if_wm.c revision 1.4 1 /* $NetBSD: if_wm.c,v 1.4 2002/05/08 17:53:28 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i82542 (``Wiseman''), i82543 (``Livengood''),
40 * and i82544 (``Cordova'') Gigabit Ethernet chips.
41 *
42 * TODO (in order of importance):
43 *
44 * - Fix hw VLAN assist.
45 *
46 * - Make GMII work on the Livengood.
47 *
48 * - Fix out-bound IP header checksums.
49 *
50 * - Fix UDP checksums.
51 *
52 * - Jumbo frames -- requires changes to network stack due to
53 * lame buffer length handling on chip.
54 *
55 * ...and, of course, performance tuning.
56 */
57
58 #include "bpfilter.h"
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/callout.h>
63 #include <sys/mbuf.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
66 #include <sys/socket.h>
67 #include <sys/ioctl.h>
68 #include <sys/errno.h>
69 #include <sys/device.h>
70 #include <sys/queue.h>
71
72 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
73
74 #include <net/if.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_ether.h>
78
79 #if NBPFILTER > 0
80 #include <net/bpf.h>
81 #endif
82
83 #include <netinet/in.h> /* XXX for struct ip */
84 #include <netinet/in_systm.h> /* XXX for struct ip */
85 #include <netinet/ip.h> /* XXX for struct ip */
86
87 #include <machine/bus.h>
88 #include <machine/intr.h>
89 #include <machine/endian.h>
90
91 #include <dev/mii/mii.h>
92 #include <dev/mii/miivar.h>
93 #include <dev/mii/mii_bitbang.h>
94
95 #include <dev/pci/pcireg.h>
96 #include <dev/pci/pcivar.h>
97 #include <dev/pci/pcidevs.h>
98
99 #include <dev/pci/if_wmreg.h>
100
101 #ifdef WM_DEBUG
102 #define WM_DEBUG_LINK 0x01
103 #define WM_DEBUG_TX 0x02
104 #define WM_DEBUG_RX 0x04
105 #define WM_DEBUG_GMII 0x08
106 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
107
108 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
109 #else
110 #define DPRINTF(x, y) /* nothing */
111 #endif /* WM_DEBUG */
112
113 /*
114 * Transmit descriptor list size. Due to errata, we can only have
115 * 256 hardware descriptors in the ring. We tell the upper layers
116 * that they can queue a lot of packets, and we go ahead and mange
117 * up to 32 of them at a time. We allow up to 16 DMA segments per
118 * packet.
119 */
120 #define WM_NTXSEGS 16
121 #define WM_IFQUEUELEN 256
122 #define WM_TXQUEUELEN 32
123 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
124 #define WM_NTXDESC 256
125 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
126 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
127 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
128
129 /*
130 * The interrupt mitigation feature of the Wiseman is pretty cool -- as
131 * long as you're transmitting, you don't have to take an interrupt at
132 * all. However, we force an interrupt to happen every N + 1 packets
133 * in order to kick us in a reasonable amount of time when we run out
134 * of descriptors.
135 */
136 #define WM_TXINTR_MASK 7
137
138 /*
139 * Receive descriptor list size. We have one Rx buffer for normal
140 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
141 * packet. We allocate 128 receive descriptors, each with a 2k
142 * buffer (MCLBYTES), which gives us room for 25 jumbo packets.
143 */
144 #define WM_NRXDESC 128
145 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
146 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
147 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
148
149 /*
150 * Control structures are DMA'd to the i82542 chip. We allocate them in
151 * a single clump that maps to a single DMA segment to make serveral things
152 * easier.
153 */
154 struct wm_control_data {
155 /*
156 * The transmit descriptors.
157 */
158 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
159
160 /*
161 * The receive descriptors.
162 */
163 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
164 };
165
166 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
167 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
168 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
169
170 /*
171 * Software state for transmit jobs.
172 */
173 struct wm_txsoft {
174 struct mbuf *txs_mbuf; /* head of our mbuf chain */
175 bus_dmamap_t txs_dmamap; /* our DMA map */
176 int txs_firstdesc; /* first descriptor in packet */
177 int txs_lastdesc; /* last descriptor in packet */
178 int txs_ndesc; /* # of descriptors used */
179 };
180
181 /*
182 * Software state for receive buffers. Each descriptor gets a
183 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
184 * more than one buffer, we chain them together.
185 */
186 struct wm_rxsoft {
187 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
188 bus_dmamap_t rxs_dmamap; /* our DMA map */
189 };
190
191 /*
192 * Software state per device.
193 */
194 struct wm_softc {
195 struct device sc_dev; /* generic device information */
196 bus_space_tag_t sc_st; /* bus space tag */
197 bus_space_handle_t sc_sh; /* bus space handle */
198 bus_dma_tag_t sc_dmat; /* bus DMA tag */
199 struct ethercom sc_ethercom; /* ethernet common data */
200 void *sc_sdhook; /* shutdown hook */
201
202 int sc_type; /* chip type; see below */
203 int sc_flags; /* flags; see below */
204
205 void *sc_ih; /* interrupt cookie */
206
207 struct mii_data sc_mii; /* MII/media information */
208
209 struct callout sc_tick_ch; /* tick callout */
210
211 bus_dmamap_t sc_cddmamap; /* control data DMA map */
212 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
213
214 /*
215 * Software state for the transmit and receive descriptors.
216 */
217 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
218 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
219
220 /*
221 * Control data structures.
222 */
223 struct wm_control_data *sc_control_data;
224 #define sc_txdescs sc_control_data->wcd_txdescs
225 #define sc_rxdescs sc_control_data->wcd_rxdescs
226
227 #ifdef WM_EVENT_COUNTERS
228 /* Event counters. */
229 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
230 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
231 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
232 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
233 struct evcnt sc_ev_rxintr; /* Rx interrupts */
234 struct evcnt sc_ev_linkintr; /* Link interrupts */
235
236 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
237 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
238 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
239 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
240
241 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
242 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
243
244 struct evcnt sc_ev_tu; /* Tx underrun */
245 #endif /* WM_EVENT_COUNTERS */
246
247 bus_addr_t sc_tdt_reg; /* offset of TDT register */
248
249 int sc_txfree; /* number of free Tx descriptors */
250 int sc_txnext; /* next ready Tx descriptor */
251 int sc_txwin; /* Tx descriptors since last Tx int */
252
253 int sc_txsfree; /* number of free Tx jobs */
254 int sc_txsnext; /* next free Tx job */
255 int sc_txsdirty; /* dirty Tx jobs */
256
257 bus_addr_t sc_rdt_reg; /* offset of RDT register */
258
259 int sc_rxptr; /* next ready Rx descriptor/queue ent */
260 int sc_rxdiscard;
261 int sc_rxlen;
262 struct mbuf *sc_rxhead;
263 struct mbuf *sc_rxtail;
264 struct mbuf **sc_rxtailp;
265
266 uint32_t sc_ctrl; /* prototype CTRL register */
267 #if 0
268 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
269 #endif
270 uint32_t sc_icr; /* prototype interrupt bits */
271 uint32_t sc_tctl; /* prototype TCTL register */
272 uint32_t sc_rctl; /* prototype RCTL register */
273 uint32_t sc_txcw; /* prototype TXCW register */
274 uint32_t sc_tipg; /* prototype TIPG register */
275
276 int sc_tbi_linkup; /* TBI link status */
277 int sc_tbi_anstate; /* autonegotiation state */
278
279 int sc_mchash_type; /* multicast filter offset */
280 };
281
282 #define WM_RXCHAIN_RESET(sc) \
283 do { \
284 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
285 *(sc)->sc_rxtailp = NULL; \
286 (sc)->sc_rxlen = 0; \
287 } while (/*CONSTCOND*/0)
288
289 #define WM_RXCHAIN_LINK(sc, m) \
290 do { \
291 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
292 (sc)->sc_rxtailp = &(m)->m_next; \
293 } while (/*CONSTCOND*/0)
294
295 /* sc_type */
296 #define WM_T_WISEMAN_2_0 0 /* Wiseman (i82542) 2.0 (really old) */
297 #define WM_T_WISEMAN_2_1 1 /* Wiseman (i82542) 2.1+ (old) */
298 #define WM_T_LIVENGOOD 2 /* Livengood (i82543) */
299 #define WM_T_CORDOVA 3 /* Cordova (i82544) */
300
301 /* sc_flags */
302 #define WM_F_HAS_MII 0x01 /* has MII */
303
304 #ifdef WM_EVENT_COUNTERS
305 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
306 #else
307 #define WM_EVCNT_INCR(ev) /* nothing */
308 #endif
309
310 #define CSR_READ(sc, reg) \
311 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
312 #define CSR_WRITE(sc, reg, val) \
313 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
314
315 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
316 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
317
318 #define WM_CDTXSYNC(sc, x, n, ops) \
319 do { \
320 int __x, __n; \
321 \
322 __x = (x); \
323 __n = (n); \
324 \
325 /* If it will wrap around, sync to the end of the ring. */ \
326 if ((__x + __n) > WM_NTXDESC) { \
327 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
328 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
329 (WM_NTXDESC - __x), (ops)); \
330 __n -= (WM_NTXDESC - __x); \
331 __x = 0; \
332 } \
333 \
334 /* Now sync whatever is left. */ \
335 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
336 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
337 } while (/*CONSTCOND*/0)
338
339 #define WM_CDRXSYNC(sc, x, ops) \
340 do { \
341 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
342 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
343 } while (/*CONSTCOND*/0)
344
345 #define WM_INIT_RXDESC(sc, x) \
346 do { \
347 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
348 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
349 struct mbuf *__m = __rxs->rxs_mbuf; \
350 \
351 /* \
352 * Note: We scoot the packet forward 2 bytes in the buffer \
353 * so that the payload after the Ethernet header is aligned \
354 * to a 4-byte boundary. \
355 * \
356 * XXX BRAINDAMAGE ALERT! \
357 * The stupid chip uses the same size for every buffer, which \
358 * is set in the Receive Control register. We are using the 2K \
359 * size option, but what we REALLY want is (2K - 2)! For this \
360 * reason, we can't accept packets longer than the standard \
361 * Ethernet MTU, without incurring a big penalty to copy every \
362 * incoming packet to a new, suitably aligned buffer. \
363 * \
364 * We'll need to make some changes to the layer 3/4 parts of \
365 * the stack (to copy the headers to a new buffer if not \
366 * aligned) in order to support large MTU on this chip. Lame. \
367 */ \
368 __m->m_data = __m->m_ext.ext_buf + 2; \
369 \
370 __rxd->wrx_addr.wa_low = \
371 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
372 __rxd->wrx_addr.wa_high = 0; \
373 __rxd->wrx_len = 0; \
374 __rxd->wrx_cksum = 0; \
375 __rxd->wrx_status = 0; \
376 __rxd->wrx_errors = 0; \
377 __rxd->wrx_special = 0; \
378 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
379 \
380 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
381 } while (/*CONSTCOND*/0)
382
383 void wm_start(struct ifnet *);
384 void wm_watchdog(struct ifnet *);
385 int wm_ioctl(struct ifnet *, u_long, caddr_t);
386 int wm_init(struct ifnet *);
387 void wm_stop(struct ifnet *, int);
388
389 void wm_shutdown(void *);
390
391 void wm_reset(struct wm_softc *);
392 void wm_rxdrain(struct wm_softc *);
393 int wm_add_rxbuf(struct wm_softc *, int);
394 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
395 void wm_tick(void *);
396
397 void wm_set_filter(struct wm_softc *);
398
399 int wm_intr(void *);
400 void wm_txintr(struct wm_softc *);
401 void wm_rxintr(struct wm_softc *);
402 void wm_linkintr(struct wm_softc *, uint32_t);
403
404 void wm_tbi_mediainit(struct wm_softc *);
405 int wm_tbi_mediachange(struct ifnet *);
406 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
407
408 void wm_tbi_set_linkled(struct wm_softc *);
409 void wm_tbi_check_link(struct wm_softc *);
410
411 void wm_gmii_reset(struct wm_softc *);
412
413 int wm_gmii_livengood_readreg(struct device *, int, int);
414 void wm_gmii_livengood_writereg(struct device *, int, int, int);
415
416 int wm_gmii_cordova_readreg(struct device *, int, int);
417 void wm_gmii_cordova_writereg(struct device *, int, int, int);
418
419 void wm_gmii_statchg(struct device *);
420
421 void wm_gmii_mediainit(struct wm_softc *);
422 int wm_gmii_mediachange(struct ifnet *);
423 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
424
425 int wm_match(struct device *, struct cfdata *, void *);
426 void wm_attach(struct device *, struct device *, void *);
427
428 int wm_copy_small = 0;
429
430 struct cfattach wm_ca = {
431 sizeof(struct wm_softc), wm_match, wm_attach,
432 };
433
434 /*
435 * Devices supported by this driver.
436 */
437 const struct wm_product {
438 pci_vendor_id_t wmp_vendor;
439 pci_product_id_t wmp_product;
440 const char *wmp_name;
441 int wmp_type;
442 int wmp_flags;
443 #define WMP_F_1000X 0x01
444 #define WMP_F_1000T 0x02
445 } wm_products[] = {
446 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
447 "Intel i82542 1000BASE-X Ethernet",
448 WM_T_WISEMAN_2_1, WMP_F_1000X },
449
450 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543_FIBER,
451 "Intel i82543 1000BASE-X Ethernet",
452 WM_T_LIVENGOOD, WMP_F_1000X },
453
454 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543_SC,
455 "Intel i82543-SC 1000BASE-X Ethernet",
456 WM_T_LIVENGOOD, WMP_F_1000X },
457
458 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543_COPPER,
459 "Intel i82543 1000BASE-T Ethernet",
460 WM_T_LIVENGOOD, WMP_F_1000T },
461
462 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544_XT,
463 "Intel i82544 1000BASE-T Ethernet",
464 WM_T_CORDOVA, WMP_F_1000T },
465
466 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544_XF,
467 "Intel i82544 1000BASE-X Ethernet",
468 WM_T_CORDOVA, WMP_F_1000X },
469
470 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC,
471 "Intel i82544GC 1000BASE-T Ethernet",
472 WM_T_CORDOVA, WMP_F_1000T },
473
474 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_64,
475 "Intel i82544GC 1000BASE-T Ethernet",
476 WM_T_CORDOVA, WMP_F_1000T },
477
478 { 0, 0,
479 NULL,
480 0, 0 },
481 };
482
483 #ifdef WM_EVENT_COUNTERS
484 #if WM_NTXSEGS != 16
485 #error Update wm_txseg_evcnt_names
486 #endif
487 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
488 "txseg1",
489 "txseg2",
490 "txseg3",
491 "txseg4",
492 "txseg5",
493 "txseg6",
494 "txseg7",
495 "txseg8",
496 "txseg9",
497 "txseg10",
498 "txseg11",
499 "txseg12",
500 "txseg13",
501 "txseg14",
502 "txseg15",
503 "txseg16",
504 };
505 #endif /* WM_EVENT_COUNTERS */
506
507 static const struct wm_product *
508 wm_lookup(const struct pci_attach_args *pa)
509 {
510 const struct wm_product *wmp;
511
512 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
513 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
514 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
515 return (wmp);
516 }
517 return (NULL);
518 }
519
520 int
521 wm_match(struct device *parent, struct cfdata *cf, void *aux)
522 {
523 struct pci_attach_args *pa = aux;
524
525 if (wm_lookup(pa) != NULL)
526 return (1);
527
528 return (0);
529 }
530
531 void
532 wm_attach(struct device *parent, struct device *self, void *aux)
533 {
534 struct wm_softc *sc = (void *) self;
535 struct pci_attach_args *pa = aux;
536 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
537 pci_chipset_tag_t pc = pa->pa_pc;
538 pci_intr_handle_t ih;
539 const char *intrstr = NULL;
540 bus_space_tag_t memt;
541 bus_space_handle_t memh;
542 bus_dma_segment_t seg;
543 int memh_valid;
544 int i, rseg, error;
545 const struct wm_product *wmp;
546 uint8_t enaddr[ETHER_ADDR_LEN];
547 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
548 pcireg_t preg, memtype;
549 int pmreg;
550
551 callout_init(&sc->sc_tick_ch);
552
553 wmp = wm_lookup(pa);
554 if (wmp == NULL) {
555 printf("\n");
556 panic("wm_attach: impossible");
557 }
558
559 sc->sc_dmat = pa->pa_dmat;
560
561 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
562 printf(": %s, rev. %d\n", wmp->wmp_name, preg);
563
564 sc->sc_type = wmp->wmp_type;
565 if (sc->sc_type < WM_T_LIVENGOOD) {
566 if (preg < 2) {
567 printf("%s: Wiseman must be at least rev. 2\n",
568 sc->sc_dev.dv_xname);
569 return;
570 }
571 if (preg < 3)
572 sc->sc_type = WM_T_WISEMAN_2_0;
573 }
574
575 /*
576 * Map the device.
577 */
578 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
579 switch (memtype) {
580 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
581 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
582 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
583 memtype, 0, &memt, &memh, NULL, NULL) == 0);
584 break;
585 default:
586 memh_valid = 0;
587 }
588
589 if (memh_valid) {
590 sc->sc_st = memt;
591 sc->sc_sh = memh;
592 } else {
593 printf("%s: unable to map device registers\n",
594 sc->sc_dev.dv_xname);
595 return;
596 }
597
598 /* Enable bus mastering. Disable MWI on the Wiseman 2.0. */
599 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
600 preg |= PCI_COMMAND_MASTER_ENABLE;
601 if (sc->sc_type < WM_T_WISEMAN_2_1)
602 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
603 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
604
605 /* Get it out of power save mode, if needed. */
606 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
607 preg = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
608 if (preg == 3) {
609 /*
610 * The card has lost all configuration data in
611 * this state, so punt.
612 */
613 printf("%s: unable to wake from power state D3\n",
614 sc->sc_dev.dv_xname);
615 return;
616 }
617 if (preg != 0) {
618 printf("%s: waking up from power state D%d\n",
619 sc->sc_dev.dv_xname, preg);
620 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
621 }
622 }
623
624 /*
625 * Map and establish our interrupt.
626 */
627 if (pci_intr_map(pa, &ih)) {
628 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
629 return;
630 }
631 intrstr = pci_intr_string(pc, ih);
632 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
633 if (sc->sc_ih == NULL) {
634 printf("%s: unable to establish interrupt",
635 sc->sc_dev.dv_xname);
636 if (intrstr != NULL)
637 printf(" at %s", intrstr);
638 printf("\n");
639 return;
640 }
641 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
642
643 /*
644 * Allocate the control data structures, and create and load the
645 * DMA map for it.
646 */
647 if ((error = bus_dmamem_alloc(sc->sc_dmat,
648 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
649 0)) != 0) {
650 printf("%s: unable to allocate control data, error = %d\n",
651 sc->sc_dev.dv_xname, error);
652 goto fail_0;
653 }
654
655 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
656 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
657 BUS_DMA_COHERENT)) != 0) {
658 printf("%s: unable to map control data, error = %d\n",
659 sc->sc_dev.dv_xname, error);
660 goto fail_1;
661 }
662
663 if ((error = bus_dmamap_create(sc->sc_dmat,
664 sizeof(struct wm_control_data), 1,
665 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
666 printf("%s: unable to create control data DMA map, "
667 "error = %d\n", sc->sc_dev.dv_xname, error);
668 goto fail_2;
669 }
670
671 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
672 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
673 0)) != 0) {
674 printf("%s: unable to load control data DMA map, error = %d\n",
675 sc->sc_dev.dv_xname, error);
676 goto fail_3;
677 }
678
679 /*
680 * Create the transmit buffer DMA maps.
681 */
682 for (i = 0; i < WM_TXQUEUELEN; i++) {
683 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
684 WM_NTXSEGS, MCLBYTES, 0, 0,
685 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
686 printf("%s: unable to create Tx DMA map %d, "
687 "error = %d\n", sc->sc_dev.dv_xname, i, error);
688 goto fail_4;
689 }
690 }
691
692 /*
693 * Create the receive buffer DMA maps.
694 */
695 for (i = 0; i < WM_NRXDESC; i++) {
696 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
697 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
698 printf("%s: unable to create Rx DMA map %d, "
699 "error = %d\n", sc->sc_dev.dv_xname, i, error);
700 goto fail_5;
701 }
702 sc->sc_rxsoft[i].rxs_mbuf = NULL;
703 }
704
705 /*
706 * Reset the chip to a known state.
707 */
708 wm_reset(sc);
709
710 /*
711 * Read the Ethernet address from the EEPROM.
712 */
713 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
714 sizeof(myea) / sizeof(myea[0]), myea);
715 enaddr[0] = myea[0] & 0xff;
716 enaddr[1] = myea[0] >> 8;
717 enaddr[2] = myea[1] & 0xff;
718 enaddr[3] = myea[1] >> 8;
719 enaddr[4] = myea[2] & 0xff;
720 enaddr[5] = myea[2] >> 8;
721
722 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
723 ether_sprintf(enaddr));
724
725 /*
726 * Read the config info from the EEPROM, and set up various
727 * bits in the control registers based on their contents.
728 */
729 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
730 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
731 if (sc->sc_type >= WM_T_CORDOVA)
732 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
733
734 if (cfg1 & EEPROM_CFG1_ILOS)
735 sc->sc_ctrl |= CTRL_ILOS;
736 if (sc->sc_type >= WM_T_CORDOVA) {
737 sc->sc_ctrl |=
738 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
739 CTRL_SWDPIO_SHIFT;
740 sc->sc_ctrl |=
741 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
742 CTRL_SWDPINS_SHIFT;
743 } else {
744 sc->sc_ctrl |=
745 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
746 CTRL_SWDPIO_SHIFT;
747 }
748
749 #if 0
750 if (sc->sc_type >= WM_T_CORDOVA) {
751 if (cfg1 & EEPROM_CFG1_IPS0)
752 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
753 if (cfg1 & EEPROM_CFG1_IPS1)
754 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
755 sc->sc_ctrl_ext |=
756 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
757 CTRL_EXT_SWDPIO_SHIFT;
758 sc->sc_ctrl_ext |=
759 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
760 CTRL_EXT_SWDPINS_SHIFT;
761 } else {
762 sc->sc_ctrl_ext |=
763 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
764 CTRL_EXT_SWDPIO_SHIFT;
765 }
766 #endif
767
768 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
769 #if 0
770 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
771 #endif
772
773 /*
774 * Set up some register offsets that are different between
775 * the Wiseman and the Livengood and later chips.
776 */
777 if (sc->sc_type < WM_T_LIVENGOOD) {
778 sc->sc_rdt_reg = WMREG_OLD_RDT0;
779 sc->sc_tdt_reg = WMREG_OLD_TDT;
780 } else {
781 sc->sc_rdt_reg = WMREG_RDT;
782 sc->sc_tdt_reg = WMREG_TDT;
783 }
784
785 /*
786 * Determine if we should use flow control. We should
787 * always use it, unless we're on a Wiseman < 2.1.
788 */
789 if (sc->sc_type >= WM_T_WISEMAN_2_1)
790 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
791
792 /*
793 * Determine if we're TBI or GMII mode, and initialize the
794 * media structures accordingly.
795 */
796 if (sc->sc_type < WM_T_LIVENGOOD ||
797 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
798 if (wmp->wmp_flags & WMP_F_1000T)
799 printf("%s: WARNING: TBIMODE set on 1000BASE-T "
800 "product!\n", sc->sc_dev.dv_xname);
801 wm_tbi_mediainit(sc);
802 } else {
803 if (wmp->wmp_flags & WMP_F_1000X)
804 printf("%s: WARNING: TBIMODE clear on 1000BASE-X "
805 "product!\n", sc->sc_dev.dv_xname);
806 wm_gmii_mediainit(sc);
807 }
808
809 ifp = &sc->sc_ethercom.ec_if;
810 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
811 ifp->if_softc = sc;
812 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
813 ifp->if_ioctl = wm_ioctl;
814 ifp->if_start = wm_start;
815 ifp->if_watchdog = wm_watchdog;
816 ifp->if_init = wm_init;
817 ifp->if_stop = wm_stop;
818 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
819 IFQ_SET_READY(&ifp->if_snd);
820
821 /*
822 * If we're a Livengood or greater, we can support VLANs.
823 */
824 if (sc->sc_type >= WM_T_LIVENGOOD)
825 sc->sc_ethercom.ec_capabilities |=
826 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
827
828 /*
829 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
830 * on Livengood and later.
831 */
832 if (sc->sc_type >= WM_T_LIVENGOOD)
833 ifp->if_capabilities |=
834 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
835
836 /*
837 * Attach the interface.
838 */
839 if_attach(ifp);
840 ether_ifattach(ifp, enaddr);
841
842 #ifdef WM_EVENT_COUNTERS
843 /* Attach event counters. */
844 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
845 NULL, sc->sc_dev.dv_xname, "txsstall");
846 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
847 NULL, sc->sc_dev.dv_xname, "txdstall");
848 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
849 NULL, sc->sc_dev.dv_xname, "txdw");
850 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
851 NULL, sc->sc_dev.dv_xname, "txqe");
852 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
853 NULL, sc->sc_dev.dv_xname, "rxintr");
854 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
855 NULL, sc->sc_dev.dv_xname, "linkintr");
856
857 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
858 NULL, sc->sc_dev.dv_xname, "rxipsum");
859 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
860 NULL, sc->sc_dev.dv_xname, "rxtusum");
861 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
862 NULL, sc->sc_dev.dv_xname, "txipsum");
863 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
864 NULL, sc->sc_dev.dv_xname, "txtusum");
865
866 for (i = 0; i < WM_NTXSEGS; i++)
867 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
868 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
869
870 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
871 NULL, sc->sc_dev.dv_xname, "txdrop");
872
873 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
874 NULL, sc->sc_dev.dv_xname, "tu");
875 #endif /* WM_EVENT_COUNTERS */
876
877 /*
878 * Make sure the interface is shutdown during reboot.
879 */
880 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
881 if (sc->sc_sdhook == NULL)
882 printf("%s: WARNING: unable to establish shutdown hook\n",
883 sc->sc_dev.dv_xname);
884 return;
885
886 /*
887 * Free any resources we've allocated during the failed attach
888 * attempt. Do this in reverse order and fall through.
889 */
890 fail_5:
891 for (i = 0; i < WM_NRXDESC; i++) {
892 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
893 bus_dmamap_destroy(sc->sc_dmat,
894 sc->sc_rxsoft[i].rxs_dmamap);
895 }
896 fail_4:
897 for (i = 0; i < WM_TXQUEUELEN; i++) {
898 if (sc->sc_txsoft[i].txs_dmamap != NULL)
899 bus_dmamap_destroy(sc->sc_dmat,
900 sc->sc_txsoft[i].txs_dmamap);
901 }
902 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
903 fail_3:
904 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
905 fail_2:
906 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
907 sizeof(struct wm_control_data));
908 fail_1:
909 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
910 fail_0:
911 return;
912 }
913
914 /*
915 * wm_shutdown:
916 *
917 * Make sure the interface is stopped at reboot time.
918 */
919 void
920 wm_shutdown(void *arg)
921 {
922 struct wm_softc *sc = arg;
923
924 wm_stop(&sc->sc_ethercom.ec_if, 1);
925 }
926
927 /*
928 * wm_tx_cksum:
929 *
930 * Set up TCP/IP checksumming parameters for the
931 * specified packet.
932 */
933 static int
934 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
935 uint32_t *fieldsp)
936 {
937 struct mbuf *m0 = txs->txs_mbuf;
938 struct livengood_tcpip_ctxdesc *t;
939 uint32_t fields = 0, tcmd = 0, ipcs, tucs;
940 struct ip *ip;
941 int offset, iphl;
942
943 /*
944 * XXX It would be nice if the mbuf pkthdr had offset
945 * fields for the protocol headers.
946 */
947
948 /* XXX Assumes normal Ethernet encap. */
949 offset = ETHER_HDR_LEN;
950
951 /* XXX */
952 if (m0->m_len < (offset + sizeof(struct ip))) {
953 printf("%s: wm_tx_cksum: need to m_pullup, "
954 "packet dropped\n", sc->sc_dev.dv_xname);
955 return (EINVAL);
956 }
957
958 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
959 iphl = ip->ip_hl << 2;
960
961 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
962 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
963 tcmd |= htole32(WTX_TCPIP_CMD_IP);
964 fields |= htole32(WTX_IXSM);
965 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
966 WTX_TCPIP_IPCSO(offsetof(struct ip, ip_sum)) |
967 WTX_TCPIP_IPCSE(offset + iphl - 1));
968 } else
969 ipcs = 0;
970
971 offset += iphl;
972
973 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
974 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
975 tcmd |= htole32(WTX_TCPIP_CMD_TCP);
976 fields |= htole32(WTX_TXSM);
977 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
978 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
979 WTX_TCPIP_TUCSE(0) /* rest of packet */);
980 } else
981 tucs = 0;
982
983 /* Fill in the context descriptor. */
984 t = (struct livengood_tcpip_ctxdesc *) &sc->sc_txdescs[sc->sc_txnext];
985 t->tcpip_ipcs = ipcs;
986 t->tcpip_tucs = tucs;
987 t->tcpip_cmdlen =
988 htole32(WTX_CMD_DEXT | WTX_CMD_IDE | WTX_DTYP_C) | tcmd;
989 t->tcpip_seg = 0;
990 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
991
992 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
993 txs->txs_ndesc++;
994
995 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
996 *fieldsp = fields;
997
998 return (0);
999 }
1000
1001 /*
1002 * wm_start: [ifnet interface function]
1003 *
1004 * Start packet transmission on the interface.
1005 */
1006 void
1007 wm_start(struct ifnet *ifp)
1008 {
1009 struct wm_softc *sc = ifp->if_softc;
1010 struct mbuf *m0/*, *m*/;
1011 struct wm_txsoft *txs;
1012 bus_dmamap_t dmamap;
1013 int error, nexttx, lasttx, ofree, seg;
1014 uint32_t cksumcmd, cksumfields;
1015
1016 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1017 return;
1018
1019 /*
1020 * Remember the previous number of free descriptors.
1021 */
1022 ofree = sc->sc_txfree;
1023
1024 /*
1025 * Loop through the send queue, setting up transmit descriptors
1026 * until we drain the queue, or use up all available transmit
1027 * descriptors.
1028 */
1029 for (;;) {
1030 /* Grab a packet off the queue. */
1031 IFQ_POLL(&ifp->if_snd, m0);
1032 if (m0 == NULL)
1033 break;
1034
1035 DPRINTF(WM_DEBUG_TX,
1036 ("%s: TX: have packet to transmit: %p\n",
1037 sc->sc_dev.dv_xname, m0));
1038
1039 /* Get a work queue entry. */
1040 if (sc->sc_txsfree == 0) {
1041 DPRINTF(WM_DEBUG_TX,
1042 ("%s: TX: no free job descriptors\n",
1043 sc->sc_dev.dv_xname));
1044 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1045 break;
1046 }
1047
1048 txs = &sc->sc_txsoft[sc->sc_txsnext];
1049 dmamap = txs->txs_dmamap;
1050
1051 /*
1052 * Load the DMA map. If this fails, the packet either
1053 * didn't fit in the allotted number of segments, or we
1054 * were short on resources. For the too-many-segments
1055 * case, we simply report an error and drop the packet,
1056 * since we can't sanely copy a jumbo packet to a single
1057 * buffer.
1058 */
1059 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1060 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1061 if (error) {
1062 if (error == EFBIG) {
1063 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1064 printf("%s: Tx packet consumes too many "
1065 "DMA segments, dropping...\n",
1066 sc->sc_dev.dv_xname);
1067 IFQ_DEQUEUE(&ifp->if_snd, m0);
1068 m_freem(m0);
1069 continue;
1070 }
1071 /*
1072 * Short on resources, just stop for now.
1073 */
1074 DPRINTF(WM_DEBUG_TX,
1075 ("%s: TX: dmamap load failed: %d\n",
1076 sc->sc_dev.dv_xname, error));
1077 break;
1078 }
1079
1080 /*
1081 * Ensure we have enough descriptors free to describe
1082 * the packet. Note, we always reserve one descriptor
1083 * at the end of the ring due to the semantics of the
1084 * TDT register, plus one more in the event we need
1085 * to re-load checksum offload context.
1086 */
1087 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1088 /*
1089 * Not enough free descriptors to transmit this
1090 * packet. We haven't committed anything yet,
1091 * so just unload the DMA map, put the packet
1092 * pack on the queue, and punt. Notify the upper
1093 * layer that there are no more slots left.
1094 */
1095 DPRINTF(WM_DEBUG_TX,
1096 ("%s: TX: need %d descriptors, have %d\n",
1097 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1098 sc->sc_txfree - 1));
1099 ifp->if_flags |= IFF_OACTIVE;
1100 bus_dmamap_unload(sc->sc_dmat, dmamap);
1101 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1102 break;
1103 }
1104
1105 IFQ_DEQUEUE(&ifp->if_snd, m0);
1106
1107 /*
1108 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1109 */
1110
1111 /* Sync the DMA map. */
1112 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1113 BUS_DMASYNC_PREWRITE);
1114
1115 DPRINTF(WM_DEBUG_TX,
1116 ("%s: TX: packet has %d DMA segments\n",
1117 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1118
1119 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1120
1121 /*
1122 * Store a pointer to the packet so that we can free it
1123 * later.
1124 *
1125 * Initially, we consider the number of descriptors the
1126 * packet uses the number of DMA segments. This may be
1127 * incremented by 1 if we do checksum offload (a descriptor
1128 * is used to set the checksum context).
1129 */
1130 txs->txs_mbuf = m0;
1131 txs->txs_ndesc = dmamap->dm_nsegs;
1132
1133 /*
1134 * Set up checksum offload parameters for
1135 * this packet.
1136 */
1137 if (m0->m_pkthdr.csum_flags &
1138 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1139 if (wm_tx_cksum(sc, txs, &cksumcmd,
1140 &cksumfields) != 0) {
1141 /* Error message already displayed. */
1142 m_freem(m0);
1143 bus_dmamap_unload(sc->sc_dmat, dmamap);
1144 txs->txs_mbuf = NULL;
1145 continue;
1146 }
1147 } else {
1148 cksumcmd = 0;
1149 cksumfields = 0;
1150 }
1151
1152 /*
1153 * Initialize the transmit descriptor.
1154 */
1155 for (nexttx = sc->sc_txnext, seg = 0;
1156 seg < dmamap->dm_nsegs;
1157 seg++, nexttx = WM_NEXTTX(nexttx)) {
1158 /*
1159 * Note: we currently only use 32-bit DMA
1160 * addresses.
1161 */
1162 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1163 htole32(dmamap->dm_segs[seg].ds_addr);
1164 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1165 htole32(dmamap->dm_segs[seg].ds_len);
1166 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1167 cksumfields;
1168 lasttx = nexttx;
1169
1170 sc->sc_txwin++;
1171
1172 DPRINTF(WM_DEBUG_TX,
1173 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1174 sc->sc_dev.dv_xname, nexttx,
1175 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1176 (uint32_t) dmamap->dm_segs[seg].ds_len));
1177 }
1178
1179 /*
1180 * Set up the command byte on the last descriptor of
1181 * the packet. If we're in the interrupt delay window,
1182 * delay the interrupt.
1183 */
1184 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1185 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RPS);
1186 if (sc->sc_txwin < (WM_NTXDESC * 2 / 3))
1187 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1188 htole32(WTX_CMD_IDE);
1189 else
1190 sc->sc_txwin = 0;
1191
1192 #if 0 /* XXXJRT */
1193 /*
1194 * If VLANs are enabled and the packet has a VLAN tag, set
1195 * up the descriptor to encapsulate the packet for us.
1196 *
1197 * This is only valid on the last descriptor of the packet.
1198 */
1199 if (sc->sc_ethercom.ec_nvlans != 0 &&
1200 (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) {
1201 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1202 htole32(WTX_CMD_VLE);
1203 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1204 = htole16(*mtod(m, int *) & 0xffff);
1205 }
1206 #endif /* XXXJRT */
1207
1208 DPRINTF(WM_DEBUG_TX,
1209 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1210 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1211
1212 /* Sync the descriptors we're using. */
1213 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1214 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1215
1216 /* Give the packet to the chip. */
1217 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1218
1219 DPRINTF(WM_DEBUG_TX,
1220 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1221
1222 /*
1223 * Remember that txdirty will be once the packet is
1224 * done.
1225 *
1226 * Note: If we're doing checksum offload, we are actually
1227 * using one descriptor before firstdesc, but it doesn't
1228 * really matter.
1229 */
1230 txs->txs_firstdesc = sc->sc_txnext;
1231 txs->txs_lastdesc = lasttx;
1232
1233 DPRINTF(WM_DEBUG_TX,
1234 ("%s: TX: finished transmitting packet, job %d\n",
1235 sc->sc_dev.dv_xname, sc->sc_txsnext));
1236
1237 /* Advance the tx pointer. */
1238 sc->sc_txfree -= txs->txs_ndesc;
1239 sc->sc_txnext = nexttx;
1240
1241 sc->sc_txsfree--;
1242 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1243
1244 #if NBPFILTER > 0
1245 /* Pass the packet to any BPF listeners. */
1246 if (ifp->if_bpf)
1247 bpf_mtap(ifp->if_bpf, m0);
1248 #endif /* NBPFILTER > 0 */
1249 }
1250
1251 if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
1252 /* No more slots; notify upper layer. */
1253 ifp->if_flags |= IFF_OACTIVE;
1254 }
1255
1256 if (sc->sc_txfree != ofree) {
1257 /* Set a watchdog timer in case the chip flakes out. */
1258 ifp->if_timer = 5;
1259 }
1260 }
1261
1262 /*
1263 * wm_watchdog: [ifnet interface function]
1264 *
1265 * Watchdog timer handler.
1266 */
1267 void
1268 wm_watchdog(struct ifnet *ifp)
1269 {
1270 struct wm_softc *sc = ifp->if_softc;
1271
1272 /*
1273 * Since we're using delayed interrupts, sweep up
1274 * before we report an error.
1275 */
1276 wm_txintr(sc);
1277
1278 if (sc->sc_txfree != WM_NTXDESC) {
1279 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1280 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1281 sc->sc_txnext);
1282 ifp->if_oerrors++;
1283
1284 /* Reset the interface. */
1285 (void) wm_init(ifp);
1286 }
1287
1288 /* Try to get more packets going. */
1289 wm_start(ifp);
1290 }
1291
1292 /*
1293 * wm_ioctl: [ifnet interface function]
1294 *
1295 * Handle control requests from the operator.
1296 */
1297 int
1298 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1299 {
1300 struct wm_softc *sc = ifp->if_softc;
1301 struct ifreq *ifr = (struct ifreq *) data;
1302 int s, error;
1303
1304 s = splnet();
1305
1306 switch (cmd) {
1307 case SIOCSIFMEDIA:
1308 case SIOCGIFMEDIA:
1309 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1310 break;
1311
1312 default:
1313 error = ether_ioctl(ifp, cmd, data);
1314 if (error == ENETRESET) {
1315 /*
1316 * Multicast list has changed; set the hardware filter
1317 * accordingly.
1318 */
1319 wm_set_filter(sc);
1320 error = 0;
1321 }
1322 break;
1323 }
1324
1325 /* Try to get more packets going. */
1326 wm_start(ifp);
1327
1328 splx(s);
1329 return (error);
1330 }
1331
1332 /*
1333 * wm_intr:
1334 *
1335 * Interrupt service routine.
1336 */
1337 int
1338 wm_intr(void *arg)
1339 {
1340 struct wm_softc *sc = arg;
1341 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1342 uint32_t icr;
1343 int wantinit, handled = 0;
1344
1345 for (wantinit = 0; wantinit == 0;) {
1346 icr = CSR_READ(sc, WMREG_ICR);
1347 if ((icr & sc->sc_icr) == 0)
1348 break;
1349
1350 handled = 1;
1351
1352 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1353 DPRINTF(WM_DEBUG_RX,
1354 ("%s: RX: got Rx intr 0x%08x\n",
1355 sc->sc_dev.dv_xname,
1356 icr & (ICR_RXDMT0|ICR_RXT0)));
1357 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1358 wm_rxintr(sc);
1359 }
1360
1361 if (icr & (ICR_TXDW|ICR_TXQE)) {
1362 DPRINTF(WM_DEBUG_TX,
1363 ("%s: TX: got TDXW|TXQE interrupt\n",
1364 sc->sc_dev.dv_xname));
1365 #ifdef WM_EVENT_COUNTERS
1366 if (icr & ICR_TXDW)
1367 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1368 else if (icr & ICR_TXQE)
1369 WM_EVCNT_INCR(&sc->sc_ev_txqe);
1370 #endif
1371 wm_txintr(sc);
1372 }
1373
1374 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1375 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1376 wm_linkintr(sc, icr);
1377 }
1378
1379 if (icr & ICR_RXO) {
1380 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1381 wantinit = 1;
1382 }
1383 }
1384
1385 if (handled) {
1386 if (wantinit)
1387 wm_init(ifp);
1388
1389 /* Try to get more packets going. */
1390 wm_start(ifp);
1391 }
1392
1393 return (handled);
1394 }
1395
1396 /*
1397 * wm_txintr:
1398 *
1399 * Helper; handle transmit interrupts.
1400 */
1401 void
1402 wm_txintr(struct wm_softc *sc)
1403 {
1404 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1405 struct wm_txsoft *txs;
1406 uint8_t status;
1407 int i;
1408
1409 ifp->if_flags &= ~IFF_OACTIVE;
1410
1411 /*
1412 * Go through the Tx list and free mbufs for those
1413 * frams which have been transmitted.
1414 */
1415 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1416 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1417 txs = &sc->sc_txsoft[i];
1418
1419 DPRINTF(WM_DEBUG_TX,
1420 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1421
1422 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1423 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1424
1425 status = le32toh(sc->sc_txdescs[
1426 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1427 if ((status & WTX_ST_DD) == 0)
1428 break;
1429
1430 DPRINTF(WM_DEBUG_TX,
1431 ("%s: TX: job %d done: descs %d..%d\n",
1432 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1433 txs->txs_lastdesc));
1434
1435 /*
1436 * XXX We should probably be using the statistics
1437 * XXX registers, but I don't know if they exist
1438 * XXX on chips before the Cordova.
1439 */
1440
1441 #ifdef WM_EVENT_COUNTERS
1442 if (status & WTX_ST_TU)
1443 WM_EVCNT_INCR(&sc->sc_ev_tu);
1444 #endif /* WM_EVENT_COUNTERS */
1445
1446 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1447 ifp->if_oerrors++;
1448 if (status & WTX_ST_LC)
1449 printf("%s: late collision\n",
1450 sc->sc_dev.dv_xname);
1451 else if (status & WTX_ST_EC) {
1452 ifp->if_collisions += 16;
1453 printf("%s: excessive collisions\n",
1454 sc->sc_dev.dv_xname);
1455 }
1456 } else
1457 ifp->if_opackets++;
1458
1459 sc->sc_txfree += txs->txs_ndesc;
1460 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1461 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1462 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1463 m_freem(txs->txs_mbuf);
1464 txs->txs_mbuf = NULL;
1465 }
1466
1467 /* Update the dirty transmit buffer pointer. */
1468 sc->sc_txsdirty = i;
1469 DPRINTF(WM_DEBUG_TX,
1470 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1471
1472 /*
1473 * If there are no more pending transmissions, cancel the watchdog
1474 * timer.
1475 */
1476 if (sc->sc_txsfree == WM_TXQUEUELEN)
1477 ifp->if_timer = 0;
1478 if (sc->sc_txfree == WM_NTXDESC)
1479 sc->sc_txwin = 0;
1480 }
1481
1482 /*
1483 * wm_rxintr:
1484 *
1485 * Helper; handle receive interrupts.
1486 */
1487 void
1488 wm_rxintr(struct wm_softc *sc)
1489 {
1490 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1491 struct wm_rxsoft *rxs;
1492 struct mbuf *m;
1493 int i, len;
1494 uint8_t status, errors;
1495
1496 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1497 rxs = &sc->sc_rxsoft[i];
1498
1499 DPRINTF(WM_DEBUG_RX,
1500 ("%s: RX: checking descriptor %d\n",
1501 sc->sc_dev.dv_xname, i));
1502
1503 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1504
1505 status = sc->sc_rxdescs[i].wrx_status;
1506 errors = sc->sc_rxdescs[i].wrx_errors;
1507 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1508
1509 if ((status & WRX_ST_DD) == 0) {
1510 /*
1511 * We have processed all of the receive descriptors.
1512 */
1513 break;
1514 }
1515
1516 if (__predict_false(sc->sc_rxdiscard)) {
1517 DPRINTF(WM_DEBUG_RX,
1518 ("%s: RX: discarding contents of descriptor %d\n",
1519 sc->sc_dev.dv_xname, i));
1520 WM_INIT_RXDESC(sc, i);
1521 if (status & WRX_ST_EOP) {
1522 /* Reset our state. */
1523 DPRINTF(WM_DEBUG_RX,
1524 ("%s: RX: resetting rxdiscard -> 0\n",
1525 sc->sc_dev.dv_xname));
1526 sc->sc_rxdiscard = 0;
1527 }
1528 continue;
1529 }
1530
1531 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1532 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1533
1534 m = rxs->rxs_mbuf;
1535
1536 /*
1537 * Add a new receive buffer to the ring.
1538 */
1539 if (wm_add_rxbuf(sc, i) != 0) {
1540 /*
1541 * Failed, throw away what we've done so
1542 * far, and discard the rest of the packet.
1543 */
1544 ifp->if_ierrors++;
1545 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1546 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1547 WM_INIT_RXDESC(sc, i);
1548 if ((status & WRX_ST_EOP) == 0)
1549 sc->sc_rxdiscard = 1;
1550 if (sc->sc_rxhead != NULL)
1551 m_freem(sc->sc_rxhead);
1552 WM_RXCHAIN_RESET(sc);
1553 DPRINTF(WM_DEBUG_RX,
1554 ("%s: RX: Rx buffer allocation failed, "
1555 "dropping packet%s\n", sc->sc_dev.dv_xname,
1556 sc->sc_rxdiscard ? " (discard)" : ""));
1557 continue;
1558 }
1559
1560 WM_RXCHAIN_LINK(sc, m);
1561
1562 m->m_len = len;
1563
1564 DPRINTF(WM_DEBUG_RX,
1565 ("%s: RX: buffer at %p len %d\n",
1566 sc->sc_dev.dv_xname, m->m_data, len));
1567
1568 /*
1569 * If this is not the end of the packet, keep
1570 * looking.
1571 */
1572 if ((status & WRX_ST_EOP) == 0) {
1573 sc->sc_rxlen += len;
1574 DPRINTF(WM_DEBUG_RX,
1575 ("%s: RX: not yet EOP, rxlen -> %d\n",
1576 sc->sc_dev.dv_xname, sc->sc_rxlen));
1577 continue;
1578 }
1579
1580 /*
1581 * Okay, we have the entire packet now...
1582 */
1583 *sc->sc_rxtailp = NULL;
1584 m = sc->sc_rxhead;
1585 len += sc->sc_rxlen;
1586
1587 WM_RXCHAIN_RESET(sc);
1588
1589 DPRINTF(WM_DEBUG_RX,
1590 ("%s: RX: have entire packet, len -> %d\n",
1591 sc->sc_dev.dv_xname, len));
1592
1593 /*
1594 * If an error occurred, update stats and drop the packet.
1595 */
1596 if (errors &
1597 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1598 ifp->if_ierrors++;
1599 if (errors & WRX_ER_SE)
1600 printf("%s: symbol error\n",
1601 sc->sc_dev.dv_xname);
1602 else if (errors & WRX_ER_SEQ)
1603 printf("%s: receive sequence error\n",
1604 sc->sc_dev.dv_xname);
1605 else if (errors & WRX_ER_CE)
1606 printf("%s: CRC error\n",
1607 sc->sc_dev.dv_xname);
1608 m_freem(m);
1609 continue;
1610 }
1611
1612 /*
1613 * No errors. Receive the packet.
1614 *
1615 * Note, we have configured the chip to include the
1616 * CRC with every packet.
1617 */
1618 m->m_flags |= M_HASFCS;
1619 m->m_pkthdr.rcvif = ifp;
1620 m->m_pkthdr.len = len;
1621
1622 #if 0 /* XXXJRT */
1623 /*
1624 * If VLANs are enabled, VLAN packets have been unwrapped
1625 * for us. Associate the tag with the packet.
1626 */
1627 if (sc->sc_ethercom.ec_nvlans != 0 &&
1628 (status & WRX_ST_VP) != 0) {
1629 struct mbuf *vtag;
1630
1631 vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
1632 if (vtag == NULL) {
1633 ifp->if_ierrors++;
1634 printf("%s: unable to allocate VLAN tag\n",
1635 sc->sc_dev.dv_xname);
1636 m_freem(m);
1637 continue;
1638 }
1639
1640 *mtod(m, int *) =
1641 le16toh(sc->sc_rxdescs[i].wrx_special);
1642 vtag->m_len = sizeof(int);
1643 }
1644 #endif /* XXXJRT */
1645
1646 /*
1647 * Set up checksum info for this packet.
1648 */
1649 if (status & WRX_ST_IPCS) {
1650 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1651 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1652 if (errors & WRX_ER_IPE)
1653 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1654 }
1655 if (status & WRX_ST_TCPCS) {
1656 /*
1657 * Note: we don't know if this was TCP or UDP,
1658 * so we just set both bits, and expect the
1659 * upper layers to deal.
1660 */
1661 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1662 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1663 if (errors & WRX_ER_TCPE)
1664 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1665 }
1666
1667 ifp->if_ipackets++;
1668
1669 #if NBPFILTER > 0
1670 /* Pass this up to any BPF listeners. */
1671 if (ifp->if_bpf)
1672 bpf_mtap(ifp->if_bpf, m);
1673 #endif /* NBPFILTER > 0 */
1674
1675 /* Pass it on. */
1676 (*ifp->if_input)(ifp, m);
1677 }
1678
1679 /* Update the receive pointer. */
1680 sc->sc_rxptr = i;
1681
1682 DPRINTF(WM_DEBUG_RX,
1683 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1684 }
1685
1686 /*
1687 * wm_linkintr:
1688 *
1689 * Helper; handle link interrupts.
1690 */
1691 void
1692 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1693 {
1694 uint32_t status;
1695
1696 /*
1697 * If we get a link status interrupt on a 1000BASE-T
1698 * device, just fall into the normal MII tick path.
1699 */
1700 if (sc->sc_flags & WM_F_HAS_MII) {
1701 if (icr & ICR_LSC) {
1702 DPRINTF(WM_DEBUG_LINK,
1703 ("%s: LINK: LSC -> mii_tick\n",
1704 sc->sc_dev.dv_xname));
1705 mii_tick(&sc->sc_mii);
1706 } else if (icr & ICR_RXSEQ) {
1707 DPRINTF(WM_DEBUG_LINK,
1708 ("%s: LINK Receive sequence error\n",
1709 sc->sc_dev.dv_xname));
1710 }
1711 return;
1712 }
1713
1714 /*
1715 * If we are now receiving /C/, check for link again in
1716 * a couple of link clock ticks.
1717 */
1718 if (icr & ICR_RXCFG) {
1719 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1720 sc->sc_dev.dv_xname));
1721 sc->sc_tbi_anstate = 2;
1722 }
1723
1724 if (icr & ICR_LSC) {
1725 status = CSR_READ(sc, WMREG_STATUS);
1726 if (status & STATUS_LU) {
1727 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1728 sc->sc_dev.dv_xname,
1729 (status & STATUS_FD) ? "FDX" : "HDX"));
1730 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1731 if (status & STATUS_FD)
1732 sc->sc_tctl |=
1733 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1734 else
1735 sc->sc_tctl |=
1736 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1737 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1738 sc->sc_tbi_linkup = 1;
1739 } else {
1740 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1741 sc->sc_dev.dv_xname));
1742 sc->sc_tbi_linkup = 0;
1743 }
1744 sc->sc_tbi_anstate = 2;
1745 wm_tbi_set_linkled(sc);
1746 } else if (icr & ICR_RXSEQ) {
1747 DPRINTF(WM_DEBUG_LINK,
1748 ("%s: LINK: Receive sequence error\n",
1749 sc->sc_dev.dv_xname));
1750 }
1751 }
1752
1753 /*
1754 * wm_tick:
1755 *
1756 * One second timer, used to check link status, sweep up
1757 * completed transmit jobs, etc.
1758 */
1759 void
1760 wm_tick(void *arg)
1761 {
1762 struct wm_softc *sc = arg;
1763 int s;
1764
1765 s = splnet();
1766
1767 if (sc->sc_flags & WM_F_HAS_MII)
1768 mii_tick(&sc->sc_mii);
1769 else
1770 wm_tbi_check_link(sc);
1771
1772 splx(s);
1773
1774 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1775 }
1776
1777 /*
1778 * wm_reset:
1779 *
1780 * Reset the i82542 chip.
1781 */
1782 void
1783 wm_reset(struct wm_softc *sc)
1784 {
1785 int i;
1786
1787 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1788 delay(10000);
1789
1790 for (i = 0; i < 1000; i++) {
1791 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1792 return;
1793 delay(20);
1794 }
1795
1796 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1797 printf("%s: WARNING: reset failed to complete\n",
1798 sc->sc_dev.dv_xname);
1799 }
1800
1801 /*
1802 * wm_init: [ifnet interface function]
1803 *
1804 * Initialize the interface. Must be called at splnet().
1805 */
1806 int
1807 wm_init(struct ifnet *ifp)
1808 {
1809 struct wm_softc *sc = ifp->if_softc;
1810 struct wm_rxsoft *rxs;
1811 int i, error = 0;
1812 uint32_t reg;
1813
1814 /* Cancel any pending I/O. */
1815 wm_stop(ifp, 0);
1816
1817 /* Reset the chip to a known state. */
1818 wm_reset(sc);
1819
1820 /* Initialize the transmit descriptor ring. */
1821 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1822 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1823 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1824 sc->sc_txfree = WM_NTXDESC;
1825 sc->sc_txnext = 0;
1826 sc->sc_txwin = 0;
1827
1828 if (sc->sc_type < WM_T_LIVENGOOD) {
1829 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1830 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1831 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1832 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1833 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1834 CSR_WRITE(sc, WMREG_OLD_TIDV, 64);
1835 } else {
1836 CSR_WRITE(sc, WMREG_TBDAH, 0);
1837 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1838 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1839 CSR_WRITE(sc, WMREG_TDH, 0);
1840 CSR_WRITE(sc, WMREG_TDT, 0);
1841 CSR_WRITE(sc, WMREG_TIDV, 64);
1842
1843 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1844 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1845 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
1846 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
1847 }
1848 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
1849 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
1850
1851 /* Initialize the transmit job descriptors. */
1852 for (i = 0; i < WM_TXQUEUELEN; i++)
1853 sc->sc_txsoft[i].txs_mbuf = NULL;
1854 sc->sc_txsfree = WM_TXQUEUELEN;
1855 sc->sc_txsnext = 0;
1856 sc->sc_txsdirty = 0;
1857
1858 /*
1859 * Initialize the receive descriptor and receive job
1860 * descriptor rings.
1861 */
1862 if (sc->sc_type < WM_T_LIVENGOOD) {
1863 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
1864 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
1865 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
1866 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
1867 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
1868 CSR_WRITE(sc, WMREG_OLD_RDTR0, 64 | RDTR_FPD);
1869
1870 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
1871 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
1872 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
1873 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
1874 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
1875 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
1876 } else {
1877 CSR_WRITE(sc, WMREG_RDBAH, 0);
1878 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
1879 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
1880 CSR_WRITE(sc, WMREG_RDH, 0);
1881 CSR_WRITE(sc, WMREG_RDT, 0);
1882 CSR_WRITE(sc, WMREG_RDTR, 64 | RDTR_FPD);
1883 }
1884 for (i = 0; i < WM_NRXDESC; i++) {
1885 rxs = &sc->sc_rxsoft[i];
1886 if (rxs->rxs_mbuf == NULL) {
1887 if ((error = wm_add_rxbuf(sc, i)) != 0) {
1888 printf("%s: unable to allocate or map rx "
1889 "buffer %d, error = %d\n",
1890 sc->sc_dev.dv_xname, i, error);
1891 /*
1892 * XXX Should attempt to run with fewer receive
1893 * XXX buffers instead of just failing.
1894 */
1895 wm_rxdrain(sc);
1896 goto out;
1897 }
1898 } else
1899 WM_INIT_RXDESC(sc, i);
1900 }
1901 sc->sc_rxptr = 0;
1902 sc->sc_rxdiscard = 0;
1903 WM_RXCHAIN_RESET(sc);
1904
1905 /*
1906 * Clear out the VLAN table -- we don't use it (yet).
1907 */
1908 CSR_WRITE(sc, WMREG_VET, 0);
1909 for (i = 0; i < WM_VLAN_TABSIZE; i++)
1910 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
1911
1912 /*
1913 * Set up flow-control parameters.
1914 *
1915 * XXX Values could probably stand some tuning.
1916 */
1917 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
1918 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
1919 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
1920 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
1921
1922 if (sc->sc_type < WM_T_LIVENGOOD) {
1923 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
1924 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
1925 } else {
1926 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
1927 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
1928 }
1929 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
1930 }
1931
1932 #if 0 /* XXXJRT */
1933 /* Deal with VLAN enables. */
1934 if (sc->sc_ethercom.ec_nvlans != 0)
1935 sc->sc_ctrl |= CTRL_VME;
1936 else
1937 #endif /* XXXJRT */
1938 sc->sc_ctrl &= ~CTRL_VME;
1939
1940 /* Write the control registers. */
1941 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1942 #if 0
1943 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1944 #endif
1945
1946 /*
1947 * Set up checksum offload parameters.
1948 */
1949 reg = CSR_READ(sc, WMREG_RXCSUM);
1950 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
1951 reg |= RXCSUM_IPOFL;
1952 else
1953 reg &= ~RXCSUM_IPOFL;
1954 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
1955 reg |= RXCSUM_TUOFL;
1956 else
1957 reg &= ~RXCSUM_TUOFL;
1958 CSR_WRITE(sc, WMREG_RXCSUM, reg);
1959
1960 /*
1961 * Set up the interrupt registers.
1962 */
1963 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
1964 sc->sc_icr = ICR_TXDW | ICR_TXQE | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
1965 ICR_RXO | ICR_RXT0;
1966 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
1967 sc->sc_icr |= ICR_RXCFG;
1968 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
1969
1970 /* Set up the inter-packet gap. */
1971 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
1972
1973 #if 0 /* XXXJRT */
1974 /* Set the VLAN ethernetype. */
1975 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
1976 #endif
1977
1978 /*
1979 * Set up the transmit control register; we start out with
1980 * a collision distance suitable for FDX, but update it whe
1981 * we resolve the media type.
1982 */
1983 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
1984 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1985 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1986
1987 /* Set the media. */
1988 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
1989
1990 /*
1991 * Set up the receive control register; we actually program
1992 * the register when we set the receive filter. Use multicast
1993 * address offset type 0.
1994 *
1995 * Only the Cordova has the ability to strip the incoming
1996 * CRC, so we don't enable that feature.
1997 */
1998 sc->sc_mchash_type = 0;
1999 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
2000 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2001
2002 /* Set the receive filter. */
2003 wm_set_filter(sc);
2004
2005 /* Start the one second link check clock. */
2006 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2007
2008 /* ...all done! */
2009 ifp->if_flags |= IFF_RUNNING;
2010 ifp->if_flags &= ~IFF_OACTIVE;
2011
2012 out:
2013 if (error)
2014 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2015 return (error);
2016 }
2017
2018 /*
2019 * wm_rxdrain:
2020 *
2021 * Drain the receive queue.
2022 */
2023 void
2024 wm_rxdrain(struct wm_softc *sc)
2025 {
2026 struct wm_rxsoft *rxs;
2027 int i;
2028
2029 for (i = 0; i < WM_NRXDESC; i++) {
2030 rxs = &sc->sc_rxsoft[i];
2031 if (rxs->rxs_mbuf != NULL) {
2032 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2033 m_freem(rxs->rxs_mbuf);
2034 rxs->rxs_mbuf = NULL;
2035 }
2036 }
2037 }
2038
2039 /*
2040 * wm_stop: [ifnet interface function]
2041 *
2042 * Stop transmission on the interface.
2043 */
2044 void
2045 wm_stop(struct ifnet *ifp, int disable)
2046 {
2047 struct wm_softc *sc = ifp->if_softc;
2048 struct wm_txsoft *txs;
2049 int i;
2050
2051 /* Stop the one second clock. */
2052 callout_stop(&sc->sc_tick_ch);
2053
2054 if (sc->sc_flags & WM_F_HAS_MII) {
2055 /* Down the MII. */
2056 mii_down(&sc->sc_mii);
2057 }
2058
2059 /* Stop the transmit and receive processes. */
2060 CSR_WRITE(sc, WMREG_TCTL, 0);
2061 CSR_WRITE(sc, WMREG_RCTL, 0);
2062
2063 /* Release any queued transmit buffers. */
2064 for (i = 0; i < WM_TXQUEUELEN; i++) {
2065 txs = &sc->sc_txsoft[i];
2066 if (txs->txs_mbuf != NULL) {
2067 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2068 m_freem(txs->txs_mbuf);
2069 txs->txs_mbuf = NULL;
2070 }
2071 }
2072
2073 if (disable)
2074 wm_rxdrain(sc);
2075
2076 /* Mark the interface as down and cancel the watchdog timer. */
2077 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2078 ifp->if_timer = 0;
2079 }
2080
2081 /*
2082 * wm_read_eeprom:
2083 *
2084 * Read data from the serial EEPROM.
2085 */
2086 void
2087 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2088 {
2089 uint32_t reg;
2090 int i, x;
2091
2092 for (i = 0; i < wordcnt; i++) {
2093 /* Send CHIP SELECT for one clock tick. */
2094 CSR_WRITE(sc, WMREG_EECD, EECD_CS);
2095 delay(2);
2096
2097 /* Shift in the READ command. */
2098 for (x = 3; x > 0; x--) {
2099 reg = EECD_CS;
2100 if (UWIRE_OPC_READ & (1 << (x - 1)))
2101 reg |= EECD_DI;
2102 CSR_WRITE(sc, WMREG_EECD, reg);
2103 delay(2);
2104 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2105 delay(2);
2106 CSR_WRITE(sc, WMREG_EECD, reg);
2107 delay(2);
2108 }
2109
2110 /* Shift in address. */
2111 for (x = 6; x > 0; x--) {
2112 reg = EECD_CS;
2113 if ((word + i) & (1 << (x - 1)))
2114 reg |= EECD_DI;
2115 CSR_WRITE(sc, WMREG_EECD, reg);
2116 delay(2);
2117 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2118 delay(2);
2119 CSR_WRITE(sc, WMREG_EECD, reg);
2120 delay(2);
2121 }
2122
2123 /* Shift out the data. */
2124 reg = EECD_CS;
2125 data[i] = 0;
2126 for (x = 16; x > 0; x--) {
2127 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2128 delay(2);
2129 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2130 data[i] |= (1 << (x - 1));
2131 CSR_WRITE(sc, WMREG_EECD, reg);
2132 delay(2);
2133 }
2134
2135 /* Clear CHIP SELECT. */
2136 CSR_WRITE(sc, WMREG_EECD, 0);
2137 }
2138 }
2139
2140 /*
2141 * wm_add_rxbuf:
2142 *
2143 * Add a receive buffer to the indiciated descriptor.
2144 */
2145 int
2146 wm_add_rxbuf(struct wm_softc *sc, int idx)
2147 {
2148 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2149 struct mbuf *m;
2150 int error;
2151
2152 MGETHDR(m, M_DONTWAIT, MT_DATA);
2153 if (m == NULL)
2154 return (ENOBUFS);
2155
2156 MCLGET(m, M_DONTWAIT);
2157 if ((m->m_flags & M_EXT) == 0) {
2158 m_freem(m);
2159 return (ENOBUFS);
2160 }
2161
2162 if (rxs->rxs_mbuf != NULL)
2163 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2164
2165 rxs->rxs_mbuf = m;
2166
2167 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2168 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2169 BUS_DMA_READ|BUS_DMA_NOWAIT);
2170 if (error) {
2171 printf("%s: unable to load rx DMA map %d, error = %d\n",
2172 sc->sc_dev.dv_xname, idx, error);
2173 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2174 }
2175
2176 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2177 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2178
2179 WM_INIT_RXDESC(sc, idx);
2180
2181 return (0);
2182 }
2183
2184 /*
2185 * wm_set_ral:
2186 *
2187 * Set an entery in the receive address list.
2188 */
2189 static void
2190 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2191 {
2192 uint32_t ral_lo, ral_hi;
2193
2194 if (enaddr != NULL) {
2195 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2196 (enaddr[3] << 24);
2197 ral_hi = enaddr[4] | (enaddr[5] << 8);
2198 ral_hi |= RAL_AV;
2199 } else {
2200 ral_lo = 0;
2201 ral_hi = 0;
2202 }
2203
2204 if (sc->sc_type >= WM_T_CORDOVA) {
2205 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2206 ral_lo);
2207 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2208 ral_hi);
2209 } else {
2210 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2211 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2212 }
2213 }
2214
2215 /*
2216 * wm_mchash:
2217 *
2218 * Compute the hash of the multicast address for the 4096-bit
2219 * multicast filter.
2220 */
2221 static uint32_t
2222 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2223 {
2224 static const int lo_shift[4] = { 4, 3, 2, 0 };
2225 static const int hi_shift[4] = { 4, 5, 6, 8 };
2226 uint32_t hash;
2227
2228 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2229 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2230
2231 return (hash & 0xfff);
2232 }
2233
2234 /*
2235 * wm_set_filter:
2236 *
2237 * Set up the receive filter.
2238 */
2239 void
2240 wm_set_filter(struct wm_softc *sc)
2241 {
2242 struct ethercom *ec = &sc->sc_ethercom;
2243 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2244 struct ether_multi *enm;
2245 struct ether_multistep step;
2246 bus_addr_t mta_reg;
2247 uint32_t hash, reg, bit;
2248 int i;
2249
2250 if (sc->sc_type >= WM_T_CORDOVA)
2251 mta_reg = WMREG_CORDOVA_MTA;
2252 else
2253 mta_reg = WMREG_MTA;
2254
2255 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2256
2257 if (ifp->if_flags & IFF_BROADCAST)
2258 sc->sc_rctl |= RCTL_BAM;
2259 if (ifp->if_flags & IFF_PROMISC) {
2260 sc->sc_rctl |= RCTL_UPE;
2261 goto allmulti;
2262 }
2263
2264 /*
2265 * Set the station address in the first RAL slot, and
2266 * clear the remaining slots.
2267 */
2268 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2269 for (i = 1; i < WM_RAL_TABSIZE; i++)
2270 wm_set_ral(sc, NULL, i);
2271
2272 /* Clear out the multicast table. */
2273 for (i = 0; i < WM_MC_TABSIZE; i++)
2274 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2275
2276 ETHER_FIRST_MULTI(step, ec, enm);
2277 while (enm != NULL) {
2278 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2279 /*
2280 * We must listen to a range of multicast addresses.
2281 * For now, just accept all multicasts, rather than
2282 * trying to set only those filter bits needed to match
2283 * the range. (At this time, the only use of address
2284 * ranges is for IP multicast routing, for which the
2285 * range is big enough to require all bits set.)
2286 */
2287 goto allmulti;
2288 }
2289
2290 hash = wm_mchash(sc, enm->enm_addrlo);
2291
2292 reg = (hash >> 5) & 0x7f;
2293 bit = hash & 0x1f;
2294
2295 hash = CSR_READ(sc, mta_reg + (reg << 2));
2296 hash |= 1U << bit;
2297
2298 /* XXX Hardware bug?? */
2299 if (sc->sc_type == WM_T_CORDOVA && (reg & 0xe) == 1) {
2300 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2301 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2302 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2303 } else
2304 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2305
2306 ETHER_NEXT_MULTI(step, enm);
2307 }
2308
2309 ifp->if_flags &= ~IFF_ALLMULTI;
2310 goto setit;
2311
2312 allmulti:
2313 ifp->if_flags |= IFF_ALLMULTI;
2314 sc->sc_rctl |= RCTL_MPE;
2315
2316 setit:
2317 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2318 }
2319
2320 /*
2321 * wm_tbi_mediainit:
2322 *
2323 * Initialize media for use on 1000BASE-X devices.
2324 */
2325 void
2326 wm_tbi_mediainit(struct wm_softc *sc)
2327 {
2328 const char *sep = "";
2329
2330 if (sc->sc_type < WM_T_LIVENGOOD)
2331 sc->sc_tipg = TIPG_WM_DFLT;
2332 else
2333 sc->sc_tipg = TIPG_LG_DFLT;
2334
2335 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_tbi_mediachange,
2336 wm_tbi_mediastatus);
2337
2338 /*
2339 * SWD Pins:
2340 *
2341 * 0 = Link LED (output)
2342 * 1 = Loss Of Signal (input)
2343 */
2344 sc->sc_ctrl |= CTRL_SWDPIO(0);
2345 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2346
2347 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2348
2349 #define ADD(s, m, d) \
2350 do { \
2351 printf("%s%s", sep, s); \
2352 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(m), (d), NULL); \
2353 sep = ", "; \
2354 } while (/*CONSTCOND*/0)
2355
2356 printf("%s: ", sc->sc_dev.dv_xname);
2357 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2358 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2359 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2360 printf("\n");
2361
2362 #undef ADD
2363
2364 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2365 }
2366
2367 /*
2368 * wm_tbi_mediastatus: [ifmedia interface function]
2369 *
2370 * Get the current interface media status on a 1000BASE-X device.
2371 */
2372 void
2373 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2374 {
2375 struct wm_softc *sc = ifp->if_softc;
2376
2377 ifmr->ifm_status = IFM_AVALID;
2378 ifmr->ifm_active = IFM_ETHER;
2379
2380 if (sc->sc_tbi_linkup == 0) {
2381 ifmr->ifm_active |= IFM_NONE;
2382 return;
2383 }
2384
2385 ifmr->ifm_status |= IFM_ACTIVE;
2386 ifmr->ifm_active |= IFM_1000_SX;
2387 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2388 ifmr->ifm_active |= IFM_FDX;
2389 }
2390
2391 /*
2392 * wm_tbi_mediachange: [ifmedia interface function]
2393 *
2394 * Set hardware to newly-selected media on a 1000BASE-X device.
2395 */
2396 int
2397 wm_tbi_mediachange(struct ifnet *ifp)
2398 {
2399 struct wm_softc *sc = ifp->if_softc;
2400 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2401 uint32_t status;
2402 int i;
2403
2404 sc->sc_txcw = ife->ifm_data;
2405 if (sc->sc_ctrl & CTRL_RFCE)
2406 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2407 if (sc->sc_ctrl & CTRL_TFCE)
2408 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2409 sc->sc_txcw |= TXCW_ANE;
2410
2411 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2412 delay(10000);
2413
2414 sc->sc_tbi_anstate = 0;
2415
2416 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2417 /* Have signal; wait for the link to come up. */
2418 for (i = 0; i < 50; i++) {
2419 delay(10000);
2420 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2421 break;
2422 }
2423
2424 status = CSR_READ(sc, WMREG_STATUS);
2425 if (status & STATUS_LU) {
2426 /* Link is up. */
2427 DPRINTF(WM_DEBUG_LINK,
2428 ("%s: LINK: set media -> link up %s\n",
2429 sc->sc_dev.dv_xname,
2430 (status & STATUS_FD) ? "FDX" : "HDX"));
2431 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2432 if (status & STATUS_FD)
2433 sc->sc_tctl |=
2434 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2435 else
2436 sc->sc_tctl |=
2437 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2438 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2439 sc->sc_tbi_linkup = 1;
2440 } else {
2441 /* Link is down. */
2442 DPRINTF(WM_DEBUG_LINK,
2443 ("%s: LINK: set media -> link down\n",
2444 sc->sc_dev.dv_xname));
2445 sc->sc_tbi_linkup = 0;
2446 }
2447 } else {
2448 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2449 sc->sc_dev.dv_xname));
2450 sc->sc_tbi_linkup = 0;
2451 }
2452
2453 wm_tbi_set_linkled(sc);
2454
2455 return (0);
2456 }
2457
2458 /*
2459 * wm_tbi_set_linkled:
2460 *
2461 * Update the link LED on 1000BASE-X devices.
2462 */
2463 void
2464 wm_tbi_set_linkled(struct wm_softc *sc)
2465 {
2466
2467 if (sc->sc_tbi_linkup)
2468 sc->sc_ctrl |= CTRL_SWDPIN(0);
2469 else
2470 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2471
2472 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2473 }
2474
2475 /*
2476 * wm_tbi_check_link:
2477 *
2478 * Check the link on 1000BASE-X devices.
2479 */
2480 void
2481 wm_tbi_check_link(struct wm_softc *sc)
2482 {
2483 uint32_t rxcw, ctrl, status;
2484
2485 if (sc->sc_tbi_anstate == 0)
2486 return;
2487 else if (sc->sc_tbi_anstate > 1) {
2488 DPRINTF(WM_DEBUG_LINK,
2489 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2490 sc->sc_tbi_anstate));
2491 sc->sc_tbi_anstate--;
2492 return;
2493 }
2494
2495 sc->sc_tbi_anstate = 0;
2496
2497 rxcw = CSR_READ(sc, WMREG_RXCW);
2498 ctrl = CSR_READ(sc, WMREG_CTRL);
2499 status = CSR_READ(sc, WMREG_STATUS);
2500
2501 if ((status & STATUS_LU) == 0) {
2502 DPRINTF(WM_DEBUG_LINK,
2503 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2504 sc->sc_tbi_linkup = 0;
2505 } else {
2506 DPRINTF(WM_DEBUG_LINK,
2507 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2508 (status & STATUS_FD) ? "FDX" : "HDX"));
2509 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2510 if (status & STATUS_FD)
2511 sc->sc_tctl |=
2512 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2513 else
2514 sc->sc_tctl |=
2515 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2516 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2517 sc->sc_tbi_linkup = 1;
2518 }
2519
2520 wm_tbi_set_linkled(sc);
2521 }
2522
2523 /*
2524 * wm_gmii_reset:
2525 *
2526 * Reset the PHY.
2527 */
2528 void
2529 wm_gmii_reset(struct wm_softc *sc)
2530 {
2531 uint32_t reg;
2532
2533 if (sc->sc_type >= WM_T_CORDOVA) {
2534 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2535 delay(20000);
2536
2537 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2538 delay(20000);
2539 } else {
2540 /* The PHY reset pin is active-low. */
2541 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2542 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2543 CTRL_EXT_SWDPIN(4));
2544 reg |= CTRL_EXT_SWDPIO(4);
2545
2546 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2547 delay(10);
2548
2549 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2550 delay(10);
2551
2552 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2553 delay(10);
2554 #if 0
2555 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2556 #endif
2557 }
2558 }
2559
2560 /*
2561 * wm_gmii_mediainit:
2562 *
2563 * Initialize media for use on 1000BASE-T devices.
2564 */
2565 void
2566 wm_gmii_mediainit(struct wm_softc *sc)
2567 {
2568 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2569
2570 /* We have MII. */
2571 sc->sc_flags |= WM_F_HAS_MII;
2572
2573 sc->sc_tipg = TIPG_1000T_DFLT;
2574
2575 /*
2576 * Let the chip set speed/duplex on its own based on
2577 * signals from the PHY.
2578 */
2579 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2580 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2581
2582 /* Initialize our media structures and probe the GMII. */
2583 sc->sc_mii.mii_ifp = ifp;
2584
2585 if (sc->sc_type >= WM_T_CORDOVA) {
2586 sc->sc_mii.mii_readreg = wm_gmii_cordova_readreg;
2587 sc->sc_mii.mii_writereg = wm_gmii_cordova_writereg;
2588 } else {
2589 sc->sc_mii.mii_readreg = wm_gmii_livengood_readreg;
2590 sc->sc_mii.mii_writereg = wm_gmii_livengood_writereg;
2591 }
2592 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2593
2594 wm_gmii_reset(sc);
2595
2596 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_gmii_mediachange,
2597 wm_gmii_mediastatus);
2598
2599 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2600 MII_OFFSET_ANY, 0);
2601 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2602 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2603 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2604 } else
2605 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2606 }
2607
2608 /*
2609 * wm_gmii_mediastatus: [ifmedia interface function]
2610 *
2611 * Get the current interface media status on a 1000BASE-T device.
2612 */
2613 void
2614 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2615 {
2616 struct wm_softc *sc = ifp->if_softc;
2617
2618 mii_pollstat(&sc->sc_mii);
2619 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2620 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2621 }
2622
2623 /*
2624 * wm_gmii_mediachange: [ifmedia interface function]
2625 *
2626 * Set hardware to newly-selected media on a 1000BASE-T device.
2627 */
2628 int
2629 wm_gmii_mediachange(struct ifnet *ifp)
2630 {
2631 struct wm_softc *sc = ifp->if_softc;
2632
2633 if (ifp->if_flags & IFF_UP)
2634 mii_mediachg(&sc->sc_mii);
2635 return (0);
2636 }
2637
2638 #define MDI_IO CTRL_SWDPIN(2)
2639 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2640 #define MDI_CLK CTRL_SWDPIN(3)
2641
2642 static void
2643 livengood_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2644 {
2645 uint32_t i, v;
2646
2647 v = CSR_READ(sc, WMREG_CTRL);
2648 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2649 v |= MDI_DIR | CTRL_SWDPIO(3);
2650
2651 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2652 if (data & i)
2653 v |= MDI_IO;
2654 else
2655 v &= ~MDI_IO;
2656 CSR_WRITE(sc, WMREG_CTRL, v);
2657 delay(10);
2658 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2659 delay(10);
2660 CSR_WRITE(sc, WMREG_CTRL, v);
2661 delay(10);
2662 }
2663 }
2664
2665 static uint32_t
2666 livengood_mii_recvbits(struct wm_softc *sc)
2667 {
2668 uint32_t v, i, data = 0;
2669
2670 v = CSR_READ(sc, WMREG_CTRL);
2671 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2672 v |= CTRL_SWDPIO(3);
2673
2674 CSR_WRITE(sc, WMREG_CTRL, v);
2675 delay(10);
2676 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2677 delay(10);
2678 CSR_WRITE(sc, WMREG_CTRL, v);
2679 delay(10);
2680
2681 for (i = 0; i < 16; i++) {
2682 data <<= 1;
2683 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2684 delay(10);
2685 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2686 data |= 1;
2687 CSR_WRITE(sc, WMREG_CTRL, v);
2688 delay(10);
2689 }
2690
2691 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2692 delay(10);
2693 CSR_WRITE(sc, WMREG_CTRL, v);
2694 delay(10);
2695
2696 return (data);
2697 }
2698
2699 #undef MDI_IO
2700 #undef MDI_DIR
2701 #undef MDI_CLK
2702
2703 /*
2704 * wm_gmii_livengood_readreg: [mii interface function]
2705 *
2706 * Read a PHY register on the GMII (Livengood version).
2707 */
2708 int
2709 wm_gmii_livengood_readreg(struct device *self, int phy, int reg)
2710 {
2711 struct wm_softc *sc = (void *) self;
2712 int rv;
2713
2714 livengood_mii_sendbits(sc, 0xffffffffU, 32);
2715 livengood_mii_sendbits(sc, reg | (phy << 5) |
2716 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2717 rv = livengood_mii_recvbits(sc) & 0xffff;
2718
2719 DPRINTF(WM_DEBUG_GMII,
2720 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2721 sc->sc_dev.dv_xname, phy, reg, rv));
2722
2723 return (rv);
2724 }
2725
2726 /*
2727 * wm_gmii_livengood_writereg: [mii interface function]
2728 *
2729 * Write a PHY register on the GMII (Livengood version).
2730 */
2731 void
2732 wm_gmii_livengood_writereg(struct device *self, int phy, int reg, int val)
2733 {
2734 struct wm_softc *sc = (void *) self;
2735
2736 livengood_mii_sendbits(sc, 0xffffffffU, 32);
2737 livengood_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2738 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2739 (MII_COMMAND_START << 30), 32);
2740 }
2741
2742 /*
2743 * wm_gmii_cordova_readreg: [mii interface function]
2744 *
2745 * Read a PHY register on the GMII.
2746 */
2747 int
2748 wm_gmii_cordova_readreg(struct device *self, int phy, int reg)
2749 {
2750 struct wm_softc *sc = (void *) self;
2751 uint32_t mdic;
2752 int i, rv;
2753
2754 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2755 MDIC_REGADD(reg));
2756
2757 for (i = 0; i < 100; i++) {
2758 mdic = CSR_READ(sc, WMREG_MDIC);
2759 if (mdic & MDIC_READY)
2760 break;
2761 delay(10);
2762 }
2763
2764 if ((mdic & MDIC_READY) == 0) {
2765 printf("%s: MDIC read timed out: phy %d reg %d\n",
2766 sc->sc_dev.dv_xname, phy, reg);
2767 rv = 0;
2768 } else if (mdic & MDIC_E) {
2769 #if 0 /* This is normal if no PHY is present. */
2770 printf("%s: MDIC read error: phy %d reg %d\n",
2771 sc->sc_dev.dv_xname, phy, reg);
2772 #endif
2773 rv = 0;
2774 } else {
2775 rv = MDIC_DATA(mdic);
2776 if (rv == 0xffff)
2777 rv = 0;
2778 }
2779
2780 return (rv);
2781 }
2782
2783 /*
2784 * wm_gmii_cordova_writereg: [mii interface function]
2785 *
2786 * Write a PHY register on the GMII.
2787 */
2788 void
2789 wm_gmii_cordova_writereg(struct device *self, int phy, int reg, int val)
2790 {
2791 struct wm_softc *sc = (void *) self;
2792 uint32_t mdic;
2793 int i;
2794
2795 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
2796 MDIC_REGADD(reg) | MDIC_DATA(val));
2797
2798 for (i = 0; i < 100; i++) {
2799 mdic = CSR_READ(sc, WMREG_MDIC);
2800 if (mdic & MDIC_READY)
2801 break;
2802 delay(10);
2803 }
2804
2805 if ((mdic & MDIC_READY) == 0)
2806 printf("%s: MDIC write timed out: phy %d reg %d\n",
2807 sc->sc_dev.dv_xname, phy, reg);
2808 else if (mdic & MDIC_E)
2809 printf("%s: MDIC write error: phy %d reg %d\n",
2810 sc->sc_dev.dv_xname, phy, reg);
2811 }
2812
2813 /*
2814 * wm_gmii_statchg: [mii interface function]
2815 *
2816 * Callback from MII layer when media changes.
2817 */
2818 void
2819 wm_gmii_statchg(struct device *self)
2820 {
2821 struct wm_softc *sc = (void *) self;
2822
2823 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2824
2825 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2826 DPRINTF(WM_DEBUG_LINK,
2827 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
2828 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2829 } else {
2830 DPRINTF(WM_DEBUG_LINK,
2831 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
2832 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2833 }
2834
2835 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2836 }
2837