if_wm.c revision 1.2 1 /* $NetBSD: if_wm.c,v 1.2 2002/05/02 16:33:27 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Register description for the Intel i82542 (``Wiseman''),
40 * i82543 (``Livengood''), and i82544 (``Cordova'') Gigabit
41 * Ethernet chips.
42 *
43 * TODO (in order of importance):
44 *
45 * - Fix hw VLAN assist.
46 *
47 * - Make GMII work on the Livengood.
48 *
49 * - Fix out-bound IP header checksums.
50 *
51 * - Fix UDP checksums.
52 *
53 * - Jumbo frames -- requires changes to network stack due to
54 * lame buffer length handling on chip.
55 *
56 * ...and, of course, performance tuning.
57 */
58
59 #include "bpfilter.h"
60
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/callout.h>
64 #include <sys/mbuf.h>
65 #include <sys/malloc.h>
66 #include <sys/kernel.h>
67 #include <sys/socket.h>
68 #include <sys/ioctl.h>
69 #include <sys/errno.h>
70 #include <sys/device.h>
71 #include <sys/queue.h>
72
73 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
74
75 #include <net/if.h>
76 #include <net/if_dl.h>
77 #include <net/if_media.h>
78 #include <net/if_ether.h>
79
80 #if NBPFILTER > 0
81 #include <net/bpf.h>
82 #endif
83
84 #include <netinet/in.h> /* XXX for struct ip */
85 #include <netinet/in_systm.h> /* XXX for struct ip */
86 #include <netinet/ip.h> /* XXX for struct ip */
87
88 #include <machine/bus.h>
89 #include <machine/intr.h>
90 #include <machine/endian.h>
91
92 #include <dev/mii/mii.h>
93 #include <dev/mii/miivar.h>
94 #include <dev/mii/mii_bitbang.h>
95
96 #include <dev/pci/pcireg.h>
97 #include <dev/pci/pcivar.h>
98 #include <dev/pci/pcidevs.h>
99
100 #include <dev/pci/if_wmreg.h>
101
102 #ifdef WM_DEBUG
103 #define WM_DEBUG_LINK 0x01
104 #define WM_DEBUG_TX 0x02
105 #define WM_DEBUG_RX 0x04
106 #define WM_DEBUG_GMII 0x08
107 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
108
109 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
110 #else
111 #define DPRINTF(x, y) /* nothing */
112 #endif /* WM_DEBUG */
113
114 /*
115 * Transmit descriptor list size. Due to errata, we can only have
116 * 256 hardware descriptors in the ring. We tell the upper layers
117 * that they can queue a lot of packets, and we go ahead and mange
118 * up to 32 of them at a time. We allow up to 16 DMA segments per
119 * packet.
120 */
121 #define WM_NTXSEGS 16
122 #define WM_IFQUEUELEN 256
123 #define WM_TXQUEUELEN 32
124 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
125 #define WM_NTXDESC 256
126 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
127 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
128 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
129
130 /*
131 * The interrupt mitigation feature of the Wiseman is pretty cool -- as
132 * long as you're transmitting, you don't have to take an interrupt at
133 * all. However, we force an interrupt to happen every N + 1 packets
134 * in order to kick us in a reasonable amount of time when we run out
135 * of descriptors.
136 */
137 #define WM_TXINTR_MASK 7
138
139 /*
140 * Receive descriptor list size. We have one Rx buffer for normal
141 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
142 * packet. We allocate 128 receive descriptors, each with a 2k
143 * buffer (MCLBYTES), which gives us room for 25 jumbo packets.
144 */
145 #define WM_NRXDESC 128
146 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
147 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
148 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
149
150 /*
151 * Control structures are DMA'd to the i82542 chip. We allocate them in
152 * a single clump that maps to a single DMA segment to make serveral things
153 * easier.
154 */
155 struct wm_control_data {
156 /*
157 * The transmit descriptors.
158 */
159 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
160
161 /*
162 * The receive descriptors.
163 */
164 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
165 };
166
167 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
168 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
169 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
170
171 /*
172 * Software state for transmit jobs.
173 */
174 struct wm_txsoft {
175 struct mbuf *txs_mbuf; /* head of our mbuf chain */
176 bus_dmamap_t txs_dmamap; /* our DMA map */
177 int txs_firstdesc; /* first descriptor in packet */
178 int txs_lastdesc; /* last descriptor in packet */
179 };
180
181 /*
182 * Software state for receive buffers. Each descriptor gets a
183 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
184 * more than one buffer, we chain them together.
185 */
186 struct wm_rxsoft {
187 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
188 bus_dmamap_t rxs_dmamap; /* our DMA map */
189 };
190
191 /*
192 * Software state per device.
193 */
194 struct wm_softc {
195 struct device sc_dev; /* generic device information */
196 bus_space_tag_t sc_st; /* bus space tag */
197 bus_space_handle_t sc_sh; /* bus space handle */
198 bus_dma_tag_t sc_dmat; /* bus DMA tag */
199 struct ethercom sc_ethercom; /* ethernet common data */
200 void *sc_sdhook; /* shutdown hook */
201
202 int sc_type; /* chip type; see below */
203 int sc_flags; /* flags; see below */
204
205 void *sc_ih; /* interrupt cookie */
206
207 struct mii_data sc_mii; /* MII/media information */
208
209 struct callout sc_tick_ch; /* tick callout */
210
211 bus_dmamap_t sc_cddmamap; /* control data DMA map */
212 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
213
214 /*
215 * Software state for the transmit and receive descriptors.
216 */
217 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
218 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
219
220 /*
221 * Control data structures.
222 */
223 struct wm_control_data *sc_control_data;
224 #define sc_txdescs sc_control_data->wcd_txdescs
225 #define sc_rxdescs sc_control_data->wcd_rxdescs
226
227 #ifdef WM_EVENT_COUNTERS
228 /* Event counters. */
229 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
230 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
231 struct evcnt sc_ev_txintr; /* Tx interrupts */
232 struct evcnt sc_ev_rxintr; /* Rx interrupts */
233 struct evcnt sc_ev_linkintr; /* Link interrupts */
234
235 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
236 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
237 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
238 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
239
240 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
241 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
242
243 struct evcnt sc_ev_tu; /* Tx underrun */
244 #endif /* WM_EVENT_COUNTERS */
245
246 bus_addr_t sc_tdt_reg; /* offset of TDT register */
247
248 int sc_txfree; /* number of free Tx descriptors */
249 int sc_txnext; /* next ready Tx descriptor */
250
251 int sc_txsfree; /* number of free Tx jobs */
252 int sc_txsnext; /* next free Tx job */
253 int sc_txsdirty; /* dirty Tx jobs */
254
255 bus_addr_t sc_rdt_reg; /* offset of RDT register */
256
257 int sc_rxptr; /* next ready Rx descriptor/queue ent */
258 int sc_rxdiscard;
259 int sc_rxlen;
260 struct mbuf *sc_rxhead;
261 struct mbuf *sc_rxtail;
262 struct mbuf **sc_rxtailp;
263
264 uint32_t sc_ctrl; /* prototype CTRL register */
265 #if 0
266 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
267 #endif
268 uint32_t sc_icr; /* prototype interrupt bits */
269 uint32_t sc_tctl; /* prototype TCTL register */
270 uint32_t sc_rctl; /* prototype RCTL register */
271 uint32_t sc_txcw; /* prototype TXCW register */
272 uint32_t sc_tipg; /* prototype TIPG register */
273
274 int sc_tbi_linkup; /* TBI link status */
275 int sc_tbi_anstate; /* autonegotiation state */
276
277 int sc_mchash_type; /* multicast filter offset */
278 };
279
280 #define WM_RXCHAIN_RESET(sc) \
281 do { \
282 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
283 *(sc)->sc_rxtailp = NULL; \
284 (sc)->sc_rxlen = 0; \
285 } while (/*CONSTCOND*/0)
286
287 #define WM_RXCHAIN_LINK(sc, m) \
288 do { \
289 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
290 (sc)->sc_rxtailp = &(m)->m_next; \
291 } while (/*CONSTCOND*/0)
292
293 /* sc_type */
294 #define WM_T_WISEMAN_2_0 0 /* Wiseman (i82542) 2.0 (really old) */
295 #define WM_T_WISEMAN_2_1 1 /* Wiseman (i82542) 2.1+ (old) */
296 #define WM_T_LIVENGOOD 2 /* Livengood (i82543) */
297 #define WM_T_CORDOVA 3 /* Cordova (i82544) */
298
299 /* sc_flags */
300 #define WM_F_HAS_MII 0x01 /* has MII */
301
302 #ifdef WM_EVENT_COUNTERS
303 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
304 #else
305 #define WM_EVCNT_INCR(ev) /* nothing */
306 #endif
307
308 #define CSR_READ(sc, reg) \
309 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
310 #define CSR_WRITE(sc, reg, val) \
311 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
312
313 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
314 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
315
316 #define WM_CDTXSYNC(sc, x, n, ops) \
317 do { \
318 int __x, __n; \
319 \
320 __x = (x); \
321 __n = (n); \
322 \
323 /* If it will wrap around, sync to the end of the ring. */ \
324 if ((__x + __n) > WM_NTXDESC) { \
325 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
326 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
327 (WM_NTXDESC - __x), (ops)); \
328 __n -= (WM_NTXDESC - __x); \
329 __x = 0; \
330 } \
331 \
332 /* Now sync whatever is left. */ \
333 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
334 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
335 } while (/*CONSTCOND*/0)
336
337 #define WM_CDRXSYNC(sc, x, ops) \
338 do { \
339 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
340 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
341 } while (/*CONSTCOND*/0)
342
343 #define WM_INIT_RXDESC(sc, x) \
344 do { \
345 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
346 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
347 struct mbuf *__m = __rxs->rxs_mbuf; \
348 \
349 /* \
350 * Note: We scoot the packet forward 2 bytes in the buffer \
351 * so that the payload after the Ethernet header is aligned \
352 * to a 4-byte boundary. \
353 * \
354 * XXX BRAINDAMAGE ALERT! \
355 * The stupid chip uses the same size for every buffer, which \
356 * is set in the Receive Control register. We are using the 2K \
357 * size option, but what we REALLY want is (2K - 2)! For this \
358 * reason, we can't accept packets longer than the standard \
359 * Ethernet MTU, without incurring a big penalty to copy every \
360 * incoming packet to a new, suitably aligned buffer. \
361 * \
362 * We'll need to make some changes to the layer 3/4 parts of \
363 * the stack (to copy the headers to a new buffer if not \
364 * aligned) in order to support large MTU on this chip. Lame. \
365 */ \
366 __m->m_data = __m->m_ext.ext_buf + 2; \
367 \
368 __rxd->wrx_addr.wa_low = \
369 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
370 __rxd->wrx_addr.wa_high = 0; \
371 __rxd->wrx_len = 0; \
372 __rxd->wrx_cksum = 0; \
373 __rxd->wrx_status = 0; \
374 __rxd->wrx_errors = 0; \
375 __rxd->wrx_special = 0; \
376 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
377 \
378 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
379 } while (/*CONSTCOND*/0)
380
381 void wm_start(struct ifnet *);
382 void wm_watchdog(struct ifnet *);
383 int wm_ioctl(struct ifnet *, u_long, caddr_t);
384 int wm_init(struct ifnet *);
385 void wm_stop(struct ifnet *, int);
386
387 void wm_shutdown(void *);
388
389 void wm_reset(struct wm_softc *);
390 void wm_rxdrain(struct wm_softc *);
391 int wm_add_rxbuf(struct wm_softc *, int);
392 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
393 void wm_tick(void *);
394
395 void wm_set_filter(struct wm_softc *);
396
397 int wm_intr(void *);
398 void wm_txintr(struct wm_softc *);
399 void wm_rxintr(struct wm_softc *);
400 void wm_linkintr(struct wm_softc *, uint32_t);
401
402 void wm_tbi_mediainit(struct wm_softc *);
403 int wm_tbi_mediachange(struct ifnet *);
404 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
405
406 void wm_tbi_set_linkled(struct wm_softc *);
407 void wm_tbi_check_link(struct wm_softc *);
408
409 void wm_gmii_reset(struct wm_softc *);
410
411 int wm_gmii_livengood_readreg(struct device *, int, int);
412 void wm_gmii_livengood_writereg(struct device *, int, int, int);
413
414 int wm_gmii_cordova_readreg(struct device *, int, int);
415 void wm_gmii_cordova_writereg(struct device *, int, int, int);
416
417 void wm_gmii_statchg(struct device *);
418
419 void wm_gmii_mediainit(struct wm_softc *);
420 int wm_gmii_mediachange(struct ifnet *);
421 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
422
423 int wm_match(struct device *, struct cfdata *, void *);
424 void wm_attach(struct device *, struct device *, void *);
425
426 int wm_copy_small = 0;
427
428 struct cfattach wm_ca = {
429 sizeof(struct wm_softc), wm_match, wm_attach,
430 };
431
432 /*
433 * Devices supported by this driver.
434 */
435 const struct wm_product {
436 pci_vendor_id_t wmp_vendor;
437 pci_product_id_t wmp_product;
438 const char *wmp_name;
439 int wmp_type;
440 int wmp_flags;
441 #define WMP_F_1000X 0x01
442 #define WMP_F_1000T 0x02
443 } wm_products[] = {
444 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
445 "Intel i82542 1000BASE-X Ethernet",
446 WM_T_WISEMAN_2_1, WMP_F_1000X },
447
448 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543_FIBER,
449 "Intel i82543 1000BASE-X Ethernet",
450 WM_T_LIVENGOOD, WMP_F_1000X },
451
452 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543_SC,
453 "Intel i82543-SC 1000BASE-X Ethernet",
454 WM_T_LIVENGOOD, WMP_F_1000X },
455
456 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543_COPPER,
457 "Intel i82543 1000BASE-T Ethernet",
458 WM_T_LIVENGOOD, WMP_F_1000T },
459
460 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544_XT,
461 "Intel i82544 1000BASE-T Ethernet",
462 WM_T_CORDOVA, WMP_F_1000T },
463
464 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544_XF,
465 "Intel i82544 1000BASE-X Ethernet",
466 WM_T_CORDOVA, WMP_F_1000X },
467
468 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC,
469 "Intel i82544GC 1000BASE-T Ethernet",
470 WM_T_CORDOVA, WMP_F_1000T },
471
472 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_64,
473 "Intel i82544GC 1000BASE-T Ethernet",
474 WM_T_CORDOVA, WMP_F_1000T },
475
476 { 0, 0,
477 NULL,
478 0, 0 },
479 };
480
481 #ifdef WM_EVENT_COUNTERS
482 #if WM_NTXSEGS != 16
483 #error Update wm_txseg_evcnt_names
484 #endif
485 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
486 "txseg1",
487 "txseg2",
488 "txseg3",
489 "txseg4",
490 "txseg5",
491 "txseg6",
492 "txseg7",
493 "txseg8",
494 "txseg9",
495 "txseg10",
496 "txseg11",
497 "txseg12",
498 "txseg13",
499 "txseg14",
500 "txseg15",
501 "txseg16",
502 };
503 #endif /* WM_EVENT_COUNTERS */
504
505 static const struct wm_product *
506 wm_lookup(const struct pci_attach_args *pa)
507 {
508 const struct wm_product *wmp;
509
510 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
511 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
512 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
513 return (wmp);
514 }
515 return (NULL);
516 }
517
518 int
519 wm_match(struct device *parent, struct cfdata *cf, void *aux)
520 {
521 struct pci_attach_args *pa = aux;
522
523 if (wm_lookup(pa) != NULL)
524 return (1);
525
526 return (0);
527 }
528
529 void
530 wm_attach(struct device *parent, struct device *self, void *aux)
531 {
532 struct wm_softc *sc = (void *) self;
533 struct pci_attach_args *pa = aux;
534 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
535 pci_chipset_tag_t pc = pa->pa_pc;
536 pci_intr_handle_t ih;
537 const char *intrstr = NULL;
538 bus_space_tag_t memt;
539 bus_space_handle_t memh;
540 bus_dma_segment_t seg;
541 int memh_valid;
542 int i, rseg, error;
543 const struct wm_product *wmp;
544 uint8_t enaddr[ETHER_ADDR_LEN];
545 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
546 pcireg_t preg, memtype;
547 int pmreg;
548
549 callout_init(&sc->sc_tick_ch);
550
551 wmp = wm_lookup(pa);
552 if (wmp == NULL) {
553 printf("\n");
554 panic("wm_attach: impossible");
555 }
556
557 sc->sc_dmat = pa->pa_dmat;
558
559 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
560 printf(": %s, rev. %d\n", wmp->wmp_name, preg);
561
562 sc->sc_type = wmp->wmp_type;
563 if (sc->sc_type < WM_T_LIVENGOOD) {
564 if (preg < 2) {
565 printf("%s: Wiseman must be at least rev. 2\n",
566 sc->sc_dev.dv_xname);
567 return;
568 }
569 if (preg < 3)
570 sc->sc_type = WM_T_WISEMAN_2_0;
571 }
572
573 /*
574 * Map the device.
575 */
576 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
577 switch (memtype) {
578 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
579 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
580 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
581 memtype, 0, &memt, &memh, NULL, NULL) == 0);
582 break;
583 default:
584 memh_valid = 0;
585 }
586
587 if (memh_valid) {
588 sc->sc_st = memt;
589 sc->sc_sh = memh;
590 } else {
591 printf("%s: unable to map device registers\n",
592 sc->sc_dev.dv_xname);
593 return;
594 }
595
596 /* Enable bus mastering. Disable MWI on the Wiseman 2.0. */
597 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
598 preg |= PCI_COMMAND_MASTER_ENABLE;
599 if (sc->sc_type < WM_T_WISEMAN_2_1)
600 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
601 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
602
603 /* Get it out of power save mode, if needed. */
604 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
605 preg = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
606 if (preg == 3) {
607 /*
608 * The card has lost all configuration data in
609 * this state, so punt.
610 */
611 printf("%s: unable to wake from power state D3\n",
612 sc->sc_dev.dv_xname);
613 return;
614 }
615 if (preg != 0) {
616 printf("%s: waking up from power state D%d\n",
617 sc->sc_dev.dv_xname, preg);
618 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
619 }
620 }
621
622 /*
623 * Map and establish our interrupt.
624 */
625 if (pci_intr_map(pa, &ih)) {
626 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
627 return;
628 }
629 intrstr = pci_intr_string(pc, ih);
630 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
631 if (sc->sc_ih == NULL) {
632 printf("%s: unable to establish interrupt",
633 sc->sc_dev.dv_xname);
634 if (intrstr != NULL)
635 printf(" at %s", intrstr);
636 printf("\n");
637 return;
638 }
639 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
640
641 /*
642 * Allocate the control data structures, and create and load the
643 * DMA map for it.
644 */
645 if ((error = bus_dmamem_alloc(sc->sc_dmat,
646 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
647 0)) != 0) {
648 printf("%s: unable to allocate control data, error = %d\n",
649 sc->sc_dev.dv_xname, error);
650 goto fail_0;
651 }
652
653 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
654 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
655 BUS_DMA_COHERENT)) != 0) {
656 printf("%s: unable to map control data, error = %d\n",
657 sc->sc_dev.dv_xname, error);
658 goto fail_1;
659 }
660
661 if ((error = bus_dmamap_create(sc->sc_dmat,
662 sizeof(struct wm_control_data), 1,
663 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
664 printf("%s: unable to create control data DMA map, "
665 "error = %d\n", sc->sc_dev.dv_xname, error);
666 goto fail_2;
667 }
668
669 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
670 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
671 0)) != 0) {
672 printf("%s: unable to load control data DMA map, error = %d\n",
673 sc->sc_dev.dv_xname, error);
674 goto fail_3;
675 }
676
677 /*
678 * Create the transmit buffer DMA maps.
679 */
680 for (i = 0; i < WM_TXQUEUELEN; i++) {
681 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
682 WM_NTXSEGS, MCLBYTES, 0, 0,
683 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
684 printf("%s: unable to create Tx DMA map %d, "
685 "error = %d\n", sc->sc_dev.dv_xname, i, error);
686 goto fail_4;
687 }
688 }
689
690 /*
691 * Create the receive buffer DMA maps.
692 */
693 for (i = 0; i < WM_NRXDESC; i++) {
694 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
695 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
696 printf("%s: unable to create Rx DMA map %d, "
697 "error = %d\n", sc->sc_dev.dv_xname, i, error);
698 goto fail_5;
699 }
700 sc->sc_rxsoft[i].rxs_mbuf = NULL;
701 }
702
703 /*
704 * Reset the chip to a known state.
705 */
706 wm_reset(sc);
707
708 /*
709 * Read the Ethernet address from the EEPROM.
710 */
711 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
712 sizeof(myea) / sizeof(myea[0]), myea);
713 enaddr[0] = myea[0] & 0xff;
714 enaddr[1] = myea[0] >> 8;
715 enaddr[2] = myea[1] & 0xff;
716 enaddr[3] = myea[1] >> 8;
717 enaddr[4] = myea[2] & 0xff;
718 enaddr[5] = myea[2] >> 8;
719
720 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
721 ether_sprintf(enaddr));
722
723 /*
724 * Read the config info from the EEPROM, and set up various
725 * bits in the control registers based on their contents.
726 */
727 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
728 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
729 if (sc->sc_type >= WM_T_CORDOVA)
730 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
731
732 if (cfg1 & EEPROM_CFG1_ILOS)
733 sc->sc_ctrl |= CTRL_ILOS;
734 if (sc->sc_type >= WM_T_CORDOVA) {
735 sc->sc_ctrl |=
736 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
737 CTRL_SWDPIO_SHIFT;
738 sc->sc_ctrl |=
739 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
740 CTRL_SWDPINS_SHIFT;
741 } else {
742 sc->sc_ctrl |=
743 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
744 CTRL_SWDPIO_SHIFT;
745 }
746
747 #if 0
748 if (sc->sc_type >= WM_T_CORDOVA) {
749 if (cfg1 & EEPROM_CFG1_IPS0)
750 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
751 if (cfg1 & EEPROM_CFG1_IPS1)
752 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
753 sc->sc_ctrl_ext |=
754 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
755 CTRL_EXT_SWDPIO_SHIFT;
756 sc->sc_ctrl_ext |=
757 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
758 CTRL_EXT_SWDPINS_SHIFT;
759 } else {
760 sc->sc_ctrl_ext |=
761 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
762 CTRL_EXT_SWDPIO_SHIFT;
763 }
764 #endif
765
766 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
767 #if 0
768 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
769 #endif
770
771 /*
772 * Set up some register offsets that are different between
773 * the Wiseman and the Livengood and later chips.
774 */
775 if (sc->sc_type < WM_T_LIVENGOOD) {
776 sc->sc_rdt_reg = WMREG_OLD_RDT0;
777 sc->sc_tdt_reg = WMREG_OLD_TDT;
778 } else {
779 sc->sc_rdt_reg = WMREG_RDT;
780 sc->sc_tdt_reg = WMREG_TDT;
781 }
782
783 /*
784 * Determine if we should use flow control. We should
785 * always use it, unless we're on a Wiseman < 2.1.
786 */
787 if (sc->sc_type >= WM_T_WISEMAN_2_1)
788 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
789
790 /*
791 * Determine if we're TBI or GMII mode, and initialize the
792 * media structures accordingly.
793 */
794 if (sc->sc_type < WM_T_LIVENGOOD ||
795 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
796 if (wmp->wmp_flags & WMP_F_1000T)
797 printf("%s: WARNING: TBIMODE set on 1000BASE-T "
798 "product!\n", sc->sc_dev.dv_xname);
799 wm_tbi_mediainit(sc);
800 } else {
801 if (wmp->wmp_flags & WMP_F_1000X)
802 printf("%s: WARNING: TBIMODE clear on 1000BASE-X "
803 "product!\n", sc->sc_dev.dv_xname);
804 wm_gmii_mediainit(sc);
805 }
806
807 ifp = &sc->sc_ethercom.ec_if;
808 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
809 ifp->if_softc = sc;
810 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
811 ifp->if_ioctl = wm_ioctl;
812 ifp->if_start = wm_start;
813 ifp->if_watchdog = wm_watchdog;
814 ifp->if_init = wm_init;
815 ifp->if_stop = wm_stop;
816 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
817 IFQ_SET_READY(&ifp->if_snd);
818
819 /*
820 * If we're a Livengood or greater, we can support VLANs.
821 */
822 if (sc->sc_type >= WM_T_LIVENGOOD)
823 sc->sc_ethercom.ec_capabilities |=
824 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
825
826 /*
827 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
828 * on Livengood and later.
829 */
830 if (sc->sc_type >= WM_T_LIVENGOOD)
831 ifp->if_capabilities |=
832 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
833
834 /*
835 * Attach the interface.
836 */
837 if_attach(ifp);
838 ether_ifattach(ifp, enaddr);
839
840 #ifdef WM_EVENT_COUNTERS
841 /* Attach event counters. */
842 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
843 NULL, sc->sc_dev.dv_xname, "txsstall");
844 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
845 NULL, sc->sc_dev.dv_xname, "txdstall");
846 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR,
847 NULL, sc->sc_dev.dv_xname, "txintr");
848 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
849 NULL, sc->sc_dev.dv_xname, "rxintr");
850 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
851 NULL, sc->sc_dev.dv_xname, "linkintr");
852
853 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
854 NULL, sc->sc_dev.dv_xname, "rxipsum");
855 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
856 NULL, sc->sc_dev.dv_xname, "rxtusum");
857 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
858 NULL, sc->sc_dev.dv_xname, "txipsum");
859 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
860 NULL, sc->sc_dev.dv_xname, "txtusum");
861
862 for (i = 0; i < WM_NTXSEGS; i++)
863 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
864 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
865
866 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
867 NULL, sc->sc_dev.dv_xname, "txdrop");
868
869 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
870 NULL, sc->sc_dev.dv_xname, "tu");
871 #endif /* WM_EVENT_COUNTERS */
872
873 /*
874 * Make sure the interface is shutdown during reboot.
875 */
876 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
877 if (sc->sc_sdhook == NULL)
878 printf("%s: WARNING: unable to establish shutdown hook\n",
879 sc->sc_dev.dv_xname);
880 return;
881
882 /*
883 * Free any resources we've allocated during the failed attach
884 * attempt. Do this in reverse order and fall through.
885 */
886 fail_5:
887 for (i = 0; i < WM_NRXDESC; i++) {
888 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
889 bus_dmamap_destroy(sc->sc_dmat,
890 sc->sc_rxsoft[i].rxs_dmamap);
891 }
892 fail_4:
893 for (i = 0; i < WM_TXQUEUELEN; i++) {
894 if (sc->sc_txsoft[i].txs_dmamap != NULL)
895 bus_dmamap_destroy(sc->sc_dmat,
896 sc->sc_txsoft[i].txs_dmamap);
897 }
898 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
899 fail_3:
900 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
901 fail_2:
902 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
903 sizeof(struct wm_control_data));
904 fail_1:
905 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
906 fail_0:
907 return;
908 }
909
910 /*
911 * wm_shutdown:
912 *
913 * Make sure the interface is stopped at reboot time.
914 */
915 void
916 wm_shutdown(void *arg)
917 {
918 struct wm_softc *sc = arg;
919
920 wm_stop(&sc->sc_ethercom.ec_if, 1);
921 }
922
923 /*
924 * wm_tx_cksum:
925 *
926 * Set up TCP/IP checksumming parameters for the
927 * specified packet.
928 */
929 static int
930 wm_tx_cksum(struct wm_softc *sc, struct mbuf *m0, uint32_t *cmdp,
931 uint32_t *fieldsp)
932 {
933 struct livengood_tcpip_ctxdesc *t;
934 uint32_t fields = 0, tcmd = 0, ipcs, tucs;
935 struct ip *ip;
936 int offset, iphl;
937
938 /*
939 * XXX It would be nice if the mbuf pkthdr had offset
940 * fields for the protocol headers.
941 */
942
943 /* XXX Assumes normal Ethernet encap. */
944 offset = ETHER_HDR_LEN;
945
946 /* XXX */
947 if (m0->m_len < (offset + sizeof(struct ip))) {
948 printf("%s: wm_tx_cksum: need to m_pullup, "
949 "packet dropped\n", sc->sc_dev.dv_xname);
950 return (EINVAL);
951 }
952
953 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
954 iphl = ip->ip_hl << 2;
955
956 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
957 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
958 tcmd |= htole32(WTX_TCPIP_CMD_IP);
959 fields |= htole32(WTX_IXSM);
960 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
961 WTX_TCPIP_IPCSO(offsetof(struct ip, ip_sum)) |
962 WTX_TCPIP_IPCSE(offset + iphl - 1));
963 } else
964 ipcs = 0;
965
966 offset += iphl;
967
968 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
969 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
970 tcmd |= htole32(WTX_TCPIP_CMD_TCP);
971 fields |= htole32(WTX_TXSM);
972 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
973 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
974 WTX_TCPIP_TUCSE(0) /* rest of packet */);
975 } else
976 tucs = 0;
977
978 /* Fill in the context descriptor. */
979 t = (struct livengood_tcpip_ctxdesc *) &sc->sc_txdescs[sc->sc_txnext];
980 t->tcpip_ipcs = ipcs;
981 t->tcpip_tucs = tucs;
982 t->tcpip_cmdlen =
983 htole32(WTX_CMD_DEXT | WTX_CMD_IDE | WTX_DTYP_C) | tcmd;
984 t->tcpip_seg = 0;
985 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
986
987 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
988
989 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
990 *fieldsp = fields;
991
992 return (0);
993 }
994
995 /*
996 * wm_start: [ifnet interface function]
997 *
998 * Start packet transmission on the interface.
999 */
1000 void
1001 wm_start(struct ifnet *ifp)
1002 {
1003 struct wm_softc *sc = ifp->if_softc;
1004 struct mbuf *m0/*, *m*/;
1005 struct wm_txsoft *txs;
1006 bus_dmamap_t dmamap;
1007 int error, nexttx, lasttx, ofree, seg;
1008 uint32_t cksumcmd, cksumfields;
1009
1010 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1011 return;
1012
1013 /*
1014 * Remember the previous number of free descriptors.
1015 */
1016 ofree = sc->sc_txfree;
1017
1018 /*
1019 * Loop through the send queue, setting up transmit descriptors
1020 * until we drain the queue, or use up all available transmit
1021 * descriptors.
1022 */
1023 for (;;) {
1024 /* Grab a packet off the queue. */
1025 IFQ_POLL(&ifp->if_snd, m0);
1026 if (m0 == NULL)
1027 break;
1028
1029 DPRINTF(WM_DEBUG_TX,
1030 ("%s: TX: have packet to transmit: %p\n",
1031 sc->sc_dev.dv_xname, m0));
1032
1033 /* Get a work queue entry. */
1034 if (sc->sc_txsfree == 0) {
1035 DPRINTF(WM_DEBUG_TX,
1036 ("%s: TX: no free job descriptors\n",
1037 sc->sc_dev.dv_xname));
1038 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1039 break;
1040 }
1041
1042 txs = &sc->sc_txsoft[sc->sc_txsnext];
1043 dmamap = txs->txs_dmamap;
1044
1045 /*
1046 * Load the DMA map. If this fails, the packet either
1047 * didn't fit in the allotted number of segments, or we
1048 * were short on resources. For the too-many-segments
1049 * case, we simply report an error and drop the packet,
1050 * since we can't sanely copy a jumbo packet to a single
1051 * buffer.
1052 */
1053 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1054 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1055 if (error) {
1056 if (error == EFBIG) {
1057 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1058 printf("%s: Tx packet consumes too many "
1059 "DMA segments, dropping...\n",
1060 sc->sc_dev.dv_xname);
1061 IFQ_DEQUEUE(&ifp->if_snd, m0);
1062 m_freem(m0);
1063 continue;
1064 }
1065 /*
1066 * Short on resources, just stop for now.
1067 */
1068 DPRINTF(WM_DEBUG_TX,
1069 ("%s: TX: dmamap load failed: %d\n",
1070 sc->sc_dev.dv_xname, error));
1071 break;
1072 }
1073
1074 /*
1075 * Ensure we have enough descriptors free to describe
1076 * the packet. Note, we always reserve one descriptor
1077 * at the end of the ring due to the semantics of the
1078 * TDT register, plus one more in the event we need
1079 * to re-load checksum offload context.
1080 */
1081 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1082 /*
1083 * Not enough free descriptors to transmit this
1084 * packet. We haven't committed anything yet,
1085 * so just unload the DMA map, put the packet
1086 * pack on the queue, and punt. Notify the upper
1087 * layer that there are no more slots left.
1088 */
1089 DPRINTF(WM_DEBUG_TX,
1090 ("%s: TX: need %d descriptors, have %d\n",
1091 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1092 sc->sc_txfree - 1));
1093 ifp->if_flags |= IFF_OACTIVE;
1094 bus_dmamap_unload(sc->sc_dmat, dmamap);
1095 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1096 break;
1097 }
1098
1099 IFQ_DEQUEUE(&ifp->if_snd, m0);
1100
1101 /*
1102 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1103 */
1104
1105 /* Sync the DMA map. */
1106 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1107 BUS_DMASYNC_PREWRITE);
1108
1109 DPRINTF(WM_DEBUG_TX,
1110 ("%s: TX: packet has %d DMA segments\n",
1111 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1112
1113 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1114
1115 /*
1116 * Set up checksum offload parameters for
1117 * this packet.
1118 */
1119 if (m0->m_pkthdr.csum_flags &
1120 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1121 if (wm_tx_cksum(sc, m0, &cksumcmd, &cksumfields) != 0) {
1122 /* Error message already displayed. */
1123 m_freem(m0);
1124 bus_dmamap_unload(sc->sc_dmat, dmamap);
1125 continue;
1126 }
1127 } else {
1128 cksumcmd = 0;
1129 cksumfields = 0;
1130 }
1131
1132 /*
1133 * Initialize the transmit descriptor.
1134 */
1135 for (nexttx = sc->sc_txnext, seg = 0;
1136 seg < dmamap->dm_nsegs;
1137 seg++, nexttx = WM_NEXTTX(nexttx)) {
1138 /*
1139 * Note: we currently only use 32-bit DMA
1140 * addresses.
1141 */
1142 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1143 htole32(dmamap->dm_segs[seg].ds_addr);
1144 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1145 htole32(dmamap->dm_segs[seg].ds_len);
1146 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1147 cksumfields;
1148 lasttx = nexttx;
1149
1150 DPRINTF(WM_DEBUG_TX,
1151 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1152 sc->sc_dev.dv_xname, nexttx,
1153 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1154 (uint32_t) dmamap->dm_segs[seg].ds_len));
1155 }
1156
1157 /*
1158 * Set up the command byte on the last descriptor of
1159 * the packet. If we're in the interrupt delay window,
1160 * delay the interrupt.
1161 */
1162 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1163 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1164 if (sc->sc_txsnext & WM_TXINTR_MASK)
1165 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1166 htole32(WTX_CMD_IDE);
1167
1168 #if 0 /* XXXJRT */
1169 /*
1170 * If VLANs are enabled and the packet has a VLAN tag, set
1171 * up the descriptor to encapsulate the packet for us.
1172 *
1173 * This is only valid on the last descriptor of the packet.
1174 */
1175 if (sc->sc_ethercom.ec_nvlans != 0 &&
1176 (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) {
1177 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1178 htole32(WTX_CMD_VLE);
1179 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1180 = htole16(*mtod(m, int *) & 0xffff);
1181 }
1182 #endif /* XXXJRT */
1183
1184 DPRINTF(WM_DEBUG_TX,
1185 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1186 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1187
1188 /* Sync the descriptors we're using. */
1189 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1190 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1191
1192 /* Give the packet to the chip. */
1193 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1194
1195 DPRINTF(WM_DEBUG_TX,
1196 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1197
1198 /*
1199 * Store a pointer to the packet so we can free it later,
1200 * and remember that txdirty will be once the packet is
1201 * done.
1202 */
1203 txs->txs_mbuf = m0;
1204 txs->txs_firstdesc = sc->sc_txnext;
1205 txs->txs_lastdesc = lasttx;
1206
1207 DPRINTF(WM_DEBUG_TX,
1208 ("%s: TX: finished transmitting packet, job %d\n",
1209 sc->sc_dev.dv_xname, sc->sc_txsnext));
1210
1211 /* Advance the tx pointer. */
1212 sc->sc_txfree -= dmamap->dm_nsegs;
1213 sc->sc_txnext = nexttx;
1214
1215 sc->sc_txsfree--;
1216 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1217
1218 #if NBPFILTER > 0
1219 /* Pass the packet to any BPF listeners. */
1220 if (ifp->if_bpf)
1221 bpf_mtap(ifp->if_bpf, m0);
1222 #endif /* NBPFILTER > 0 */
1223 }
1224
1225 if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
1226 /* No more slots; notify upper layer. */
1227 ifp->if_flags |= IFF_OACTIVE;
1228 }
1229
1230 if (sc->sc_txfree != ofree) {
1231 /* Set a watchdog timer in case the chip flakes out. */
1232 ifp->if_timer = 5;
1233 }
1234 }
1235
1236 /*
1237 * wm_watchdog: [ifnet interface function]
1238 *
1239 * Watchdog timer handler.
1240 */
1241 void
1242 wm_watchdog(struct ifnet *ifp)
1243 {
1244 struct wm_softc *sc = ifp->if_softc;
1245
1246 /*
1247 * Since we're using delayed interrupts, sweep up
1248 * before we report an error.
1249 */
1250 wm_txintr(sc);
1251
1252 if (sc->sc_txfree != WM_NTXDESC) {
1253 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1254 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1255 sc->sc_txnext);
1256 ifp->if_oerrors++;
1257
1258 /* Reset the interface. */
1259 (void) wm_init(ifp);
1260 }
1261
1262 /* Try to get more packets going. */
1263 wm_start(ifp);
1264 }
1265
1266 /*
1267 * wm_ioctl: [ifnet interface function]
1268 *
1269 * Handle control requests from the operator.
1270 */
1271 int
1272 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1273 {
1274 struct wm_softc *sc = ifp->if_softc;
1275 struct ifreq *ifr = (struct ifreq *) data;
1276 int s, error;
1277
1278 s = splnet();
1279
1280 switch (cmd) {
1281 case SIOCSIFMEDIA:
1282 case SIOCGIFMEDIA:
1283 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1284 break;
1285
1286 default:
1287 error = ether_ioctl(ifp, cmd, data);
1288 if (error == ENETRESET) {
1289 /*
1290 * Multicast list has changed; set the hardware filter
1291 * accordingly.
1292 */
1293 wm_set_filter(sc);
1294 error = 0;
1295 }
1296 break;
1297 }
1298
1299 /* Try to get more packets going. */
1300 wm_start(ifp);
1301
1302 splx(s);
1303 return (error);
1304 }
1305
1306 /*
1307 * wm_intr:
1308 *
1309 * Interrupt service routine.
1310 */
1311 int
1312 wm_intr(void *arg)
1313 {
1314 struct wm_softc *sc = arg;
1315 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1316 uint32_t icr;
1317 int wantinit, handled = 0;
1318
1319 for (wantinit = 0; wantinit == 0;) {
1320 icr = CSR_READ(sc, WMREG_ICR);
1321 if ((icr & sc->sc_icr) == 0)
1322 break;
1323
1324 handled = 1;
1325
1326 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1327 DPRINTF(WM_DEBUG_RX,
1328 ("%s: RX: got Rx intr 0x%08x\n",
1329 sc->sc_dev.dv_xname,
1330 icr & (ICR_RXDMT0|ICR_RXT0)));
1331 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1332 wm_rxintr(sc);
1333 }
1334
1335 if (icr & ICR_TXDW) {
1336 DPRINTF(WM_DEBUG_TX,
1337 ("%s: TX: got TXDW interrupt\n",
1338 sc->sc_dev.dv_xname));
1339 WM_EVCNT_INCR(&sc->sc_ev_txintr);
1340 wm_txintr(sc);
1341 }
1342
1343 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1344 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1345 wm_linkintr(sc, icr);
1346 }
1347
1348 if (icr & ICR_RXO) {
1349 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1350 wantinit = 1;
1351 }
1352 }
1353
1354 if (handled) {
1355 if (wantinit)
1356 wm_init(ifp);
1357
1358 /* Try to get more packets going. */
1359 wm_start(ifp);
1360 }
1361
1362 return (handled);
1363 }
1364
1365 /*
1366 * wm_txintr:
1367 *
1368 * Helper; handle transmit interrupts.
1369 */
1370 void
1371 wm_txintr(struct wm_softc *sc)
1372 {
1373 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1374 struct wm_txsoft *txs;
1375 uint8_t status;
1376 int i;
1377
1378 ifp->if_flags &= ~IFF_OACTIVE;
1379
1380 /*
1381 * Go through the Tx list and free mbufs for those
1382 * frams which have been transmitted.
1383 */
1384 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1385 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1386 txs = &sc->sc_txsoft[i];
1387
1388 DPRINTF(WM_DEBUG_TX,
1389 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1390
1391 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1392 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1393
1394 status = le32toh(sc->sc_txdescs[
1395 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1396 if ((status & WTX_ST_DD) == 0)
1397 break;
1398
1399 DPRINTF(WM_DEBUG_TX,
1400 ("%s: TX: job %d done: descs %d..%d\n",
1401 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1402 txs->txs_lastdesc));
1403
1404 /*
1405 * XXX We should probably be using the statistics
1406 * XXX registers, but I don't know if they exist
1407 * XXX on chips before the Cordova.
1408 */
1409
1410 #ifdef WM_EVENT_COUNTERS
1411 if (status & WTX_ST_TU)
1412 WM_EVCNT_INCR(&sc->sc_ev_tu);
1413 #endif /* WM_EVENT_COUNTERS */
1414
1415 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1416 ifp->if_oerrors++;
1417 if (status & WTX_ST_LC)
1418 printf("%s: late collision\n",
1419 sc->sc_dev.dv_xname);
1420 else if (status & WTX_ST_EC) {
1421 ifp->if_collisions += 16;
1422 printf("%s: excessive collisions\n",
1423 sc->sc_dev.dv_xname);
1424 }
1425 } else
1426 ifp->if_opackets++;
1427
1428 sc->sc_txfree += txs->txs_dmamap->dm_nsegs;
1429 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1430 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1431 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1432 m_freem(txs->txs_mbuf);
1433 txs->txs_mbuf = NULL;
1434 }
1435
1436 /* Update the dirty transmit buffer pointer. */
1437 sc->sc_txsdirty = i;
1438 DPRINTF(WM_DEBUG_TX,
1439 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1440
1441 /*
1442 * If there are no more pending transmissions, cancel the watchdog
1443 * timer.
1444 */
1445 if (sc->sc_txsfree == WM_TXQUEUELEN)
1446 ifp->if_timer = 0;
1447 }
1448
1449 /*
1450 * wm_rxintr:
1451 *
1452 * Helper; handle receive interrupts.
1453 */
1454 void
1455 wm_rxintr(struct wm_softc *sc)
1456 {
1457 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1458 struct wm_rxsoft *rxs;
1459 struct mbuf *m;
1460 int i, len;
1461 uint8_t status, errors;
1462
1463 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1464 rxs = &sc->sc_rxsoft[i];
1465
1466 DPRINTF(WM_DEBUG_RX,
1467 ("%s: RX: checking descriptor %d\n",
1468 sc->sc_dev.dv_xname, i));
1469
1470 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1471
1472 status = sc->sc_rxdescs[i].wrx_status;
1473 errors = sc->sc_rxdescs[i].wrx_errors;
1474 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1475
1476 if ((status & WRX_ST_DD) == 0) {
1477 /*
1478 * We have processed all of the receive descriptors.
1479 */
1480 break;
1481 }
1482
1483 if (__predict_false(sc->sc_rxdiscard)) {
1484 DPRINTF(WM_DEBUG_RX,
1485 ("%s: RX: discarding contents of descriptor %d\n",
1486 sc->sc_dev.dv_xname, i));
1487 WM_INIT_RXDESC(sc, i);
1488 if (status & WRX_ST_EOP) {
1489 /* Reset our state. */
1490 DPRINTF(WM_DEBUG_RX,
1491 ("%s: RX: resetting rxdiscard -> 0\n",
1492 sc->sc_dev.dv_xname));
1493 sc->sc_rxdiscard = 0;
1494 }
1495 continue;
1496 }
1497
1498 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1499 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1500
1501 m = rxs->rxs_mbuf;
1502
1503 /*
1504 * Add a new receive buffer to the ring.
1505 */
1506 if (wm_add_rxbuf(sc, i) != 0) {
1507 /*
1508 * Failed, throw away what we've done so
1509 * far, and discard the rest of the packet.
1510 */
1511 ifp->if_ierrors++;
1512 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1513 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1514 WM_INIT_RXDESC(sc, i);
1515 if ((status & WRX_ST_EOP) == 0)
1516 sc->sc_rxdiscard = 1;
1517 if (sc->sc_rxhead != NULL)
1518 m_freem(sc->sc_rxhead);
1519 WM_RXCHAIN_RESET(sc);
1520 DPRINTF(WM_DEBUG_RX,
1521 ("%s: RX: Rx buffer allocation failed, "
1522 "dropping packet%s\n", sc->sc_dev.dv_xname,
1523 sc->sc_rxdiscard ? " (discard)" : ""));
1524 continue;
1525 }
1526
1527 WM_RXCHAIN_LINK(sc, m);
1528
1529 m->m_len = len;
1530
1531 DPRINTF(WM_DEBUG_RX,
1532 ("%s: RX: buffer at %p len %d\n",
1533 sc->sc_dev.dv_xname, m->m_data, len));
1534
1535 /*
1536 * If this is not the end of the packet, keep
1537 * looking.
1538 */
1539 if ((status & WRX_ST_EOP) == 0) {
1540 sc->sc_rxlen += len;
1541 DPRINTF(WM_DEBUG_RX,
1542 ("%s: RX: not yet EOP, rxlen -> %d\n",
1543 sc->sc_dev.dv_xname, sc->sc_rxlen));
1544 continue;
1545 }
1546
1547 /*
1548 * Okay, we have the entire packet now...
1549 */
1550 *sc->sc_rxtailp = NULL;
1551 m = sc->sc_rxhead;
1552 len += sc->sc_rxlen;
1553
1554 WM_RXCHAIN_RESET(sc);
1555
1556 DPRINTF(WM_DEBUG_RX,
1557 ("%s: RX: have entire packet, len -> %d\n",
1558 sc->sc_dev.dv_xname, len));
1559
1560 /*
1561 * If an error occurred, update stats and drop the packet.
1562 */
1563 if (errors &
1564 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1565 ifp->if_ierrors++;
1566 if (errors & WRX_ER_SE)
1567 printf("%s: symbol error\n",
1568 sc->sc_dev.dv_xname);
1569 else if (errors & WRX_ER_SEQ)
1570 printf("%s: receive sequence error\n",
1571 sc->sc_dev.dv_xname);
1572 else if (errors & WRX_ER_CE)
1573 printf("%s: CRC error\n",
1574 sc->sc_dev.dv_xname);
1575 m_freem(m);
1576 continue;
1577 }
1578
1579 /*
1580 * No errors. Receive the packet.
1581 *
1582 * Note, we have configured the chip to include the
1583 * CRC with every packet.
1584 */
1585 m->m_flags |= M_HASFCS;
1586 m->m_pkthdr.rcvif = ifp;
1587 m->m_pkthdr.len = len;
1588
1589 #if 0 /* XXXJRT */
1590 /*
1591 * If VLANs are enabled, VLAN packets have been unwrapped
1592 * for us. Associate the tag with the packet.
1593 */
1594 if (sc->sc_ethercom.ec_nvlans != 0 &&
1595 (status & WRX_ST_VP) != 0) {
1596 struct mbuf *vtag;
1597
1598 vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
1599 if (vtag == NULL) {
1600 ifp->if_ierrors++;
1601 printf("%s: unable to allocate VLAN tag\n",
1602 sc->sc_dev.dv_xname);
1603 m_freem(m);
1604 continue;
1605 }
1606
1607 *mtod(m, int *) =
1608 le16toh(sc->sc_rxdescs[i].wrx_special);
1609 vtag->m_len = sizeof(int);
1610 }
1611 #endif /* XXXJRT */
1612
1613 /*
1614 * Set up checksum info for this packet.
1615 */
1616 if (status & WRX_ST_IPCS) {
1617 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1618 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1619 if (errors & WRX_ER_IPE)
1620 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1621 }
1622 if (status & WRX_ST_TCPCS) {
1623 /*
1624 * Note: we don't know if this was TCP or UDP,
1625 * so we just set both bits, and expect the
1626 * upper layers to deal.
1627 */
1628 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1629 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1630 if (errors & WRX_ER_TCPE)
1631 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1632 }
1633
1634 ifp->if_ipackets++;
1635
1636 #if NBPFILTER > 0
1637 /* Pass this up to any BPF listeners. */
1638 if (ifp->if_bpf)
1639 bpf_mtap(ifp->if_bpf, m);
1640 #endif /* NBPFILTER > 0 */
1641
1642 /* Pass it on. */
1643 (*ifp->if_input)(ifp, m);
1644 }
1645
1646 /* Update the receive pointer. */
1647 sc->sc_rxptr = i;
1648
1649 DPRINTF(WM_DEBUG_RX,
1650 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1651 }
1652
1653 /*
1654 * wm_linkintr:
1655 *
1656 * Helper; handle link interrupts.
1657 */
1658 void
1659 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1660 {
1661 uint32_t status;
1662
1663 /*
1664 * If we get a link status interrupt on a 1000BASE-T
1665 * device, just fall into the normal MII tick path.
1666 */
1667 if (sc->sc_flags & WM_F_HAS_MII) {
1668 if (icr & ICR_LSC) {
1669 DPRINTF(WM_DEBUG_LINK,
1670 ("%s: LINK: LSC -> mii_tick\n",
1671 sc->sc_dev.dv_xname));
1672 mii_tick(&sc->sc_mii);
1673 } else if (icr & ICR_RXSEQ) {
1674 DPRINTF(WM_DEBUG_LINK,
1675 ("%s: LINK Receive sequence error\n",
1676 sc->sc_dev.dv_xname));
1677 }
1678 return;
1679 }
1680
1681 /*
1682 * If we are now receiving /C/, check for link again in
1683 * a couple of link clock ticks.
1684 */
1685 if (icr & ICR_RXCFG) {
1686 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1687 sc->sc_dev.dv_xname));
1688 sc->sc_tbi_anstate = 2;
1689 }
1690
1691 if (icr & ICR_LSC) {
1692 status = CSR_READ(sc, WMREG_STATUS);
1693 if (status & STATUS_LU) {
1694 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1695 sc->sc_dev.dv_xname,
1696 (status & STATUS_FD) ? "FDX" : "HDX"));
1697 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1698 if (status & STATUS_FD)
1699 sc->sc_tctl |=
1700 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1701 else
1702 sc->sc_tctl |=
1703 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1704 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1705 sc->sc_tbi_linkup = 1;
1706 } else {
1707 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1708 sc->sc_dev.dv_xname));
1709 sc->sc_tbi_linkup = 0;
1710 }
1711 sc->sc_tbi_anstate = 2;
1712 wm_tbi_set_linkled(sc);
1713 } else if (icr & ICR_RXSEQ) {
1714 DPRINTF(WM_DEBUG_LINK,
1715 ("%s: LINK: Receive sequence error\n",
1716 sc->sc_dev.dv_xname));
1717 }
1718 }
1719
1720 /*
1721 * wm_tick:
1722 *
1723 * One second timer, used to check link status, sweep up
1724 * completed transmit jobs, etc.
1725 */
1726 void
1727 wm_tick(void *arg)
1728 {
1729 struct wm_softc *sc = arg;
1730 int s;
1731
1732 s = splnet();
1733
1734 if (sc->sc_flags & WM_F_HAS_MII)
1735 mii_tick(&sc->sc_mii);
1736 else
1737 wm_tbi_check_link(sc);
1738
1739 splx(s);
1740
1741 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1742 }
1743
1744 /*
1745 * wm_reset:
1746 *
1747 * Reset the i82542 chip.
1748 */
1749 void
1750 wm_reset(struct wm_softc *sc)
1751 {
1752 int i;
1753
1754 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1755 delay(10000);
1756
1757 for (i = 0; i < 1000; i++) {
1758 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1759 return;
1760 delay(20);
1761 }
1762
1763 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1764 printf("%s: WARNING: reset failed to complete\n",
1765 sc->sc_dev.dv_xname);
1766 }
1767
1768 /*
1769 * wm_init: [ifnet interface function]
1770 *
1771 * Initialize the interface. Must be called at splnet().
1772 */
1773 int
1774 wm_init(struct ifnet *ifp)
1775 {
1776 struct wm_softc *sc = ifp->if_softc;
1777 struct wm_rxsoft *rxs;
1778 int i, error = 0;
1779 uint32_t reg;
1780
1781 /* Cancel any pending I/O. */
1782 wm_stop(ifp, 0);
1783
1784 /* Reset the chip to a known state. */
1785 wm_reset(sc);
1786
1787 /* Initialize the transmit descriptor ring. */
1788 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1789 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1790 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1791 sc->sc_txfree = WM_NTXDESC;
1792 sc->sc_txnext = 0;
1793
1794 if (sc->sc_type < WM_T_LIVENGOOD) {
1795 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1796 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1797 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1798 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1799 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1800 CSR_WRITE(sc, WMREG_OLD_TIDV, 64);
1801 } else {
1802 CSR_WRITE(sc, WMREG_TBDAH, 0);
1803 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1804 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1805 CSR_WRITE(sc, WMREG_TDH, 0);
1806 CSR_WRITE(sc, WMREG_TDT, 0);
1807 CSR_WRITE(sc, WMREG_TIDV, 64);
1808
1809 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1810 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1811 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
1812 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
1813 }
1814 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
1815 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
1816
1817 /* Initialize the transmit job descriptors. */
1818 for (i = 0; i < WM_TXQUEUELEN; i++)
1819 sc->sc_txsoft[i].txs_mbuf = NULL;
1820 sc->sc_txsfree = WM_TXQUEUELEN;
1821 sc->sc_txsnext = 0;
1822 sc->sc_txsdirty = 0;
1823
1824 /*
1825 * Initialize the receive descriptor and receive job
1826 * descriptor rings.
1827 */
1828 if (sc->sc_type < WM_T_LIVENGOOD) {
1829 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
1830 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
1831 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
1832 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
1833 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
1834 CSR_WRITE(sc, WMREG_OLD_RDTR0, 64 | RDTR_FPD);
1835
1836 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
1837 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
1838 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
1839 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
1840 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
1841 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
1842 } else {
1843 CSR_WRITE(sc, WMREG_RDBAH, 0);
1844 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
1845 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
1846 CSR_WRITE(sc, WMREG_RDH, 0);
1847 CSR_WRITE(sc, WMREG_RDT, 0);
1848 CSR_WRITE(sc, WMREG_RDTR, 64 | RDTR_FPD);
1849 }
1850 for (i = 0; i < WM_NRXDESC; i++) {
1851 rxs = &sc->sc_rxsoft[i];
1852 if (rxs->rxs_mbuf == NULL) {
1853 if ((error = wm_add_rxbuf(sc, i)) != 0) {
1854 printf("%s: unable to allocate or map rx "
1855 "buffer %d, error = %d\n",
1856 sc->sc_dev.dv_xname, i, error);
1857 /*
1858 * XXX Should attempt to run with fewer receive
1859 * XXX buffers instead of just failing.
1860 */
1861 wm_rxdrain(sc);
1862 goto out;
1863 }
1864 } else
1865 WM_INIT_RXDESC(sc, i);
1866 }
1867 sc->sc_rxptr = 0;
1868 sc->sc_rxdiscard = 0;
1869 WM_RXCHAIN_RESET(sc);
1870
1871 /*
1872 * Clear out the VLAN table -- we don't use it (yet).
1873 */
1874 CSR_WRITE(sc, WMREG_VET, 0);
1875 for (i = 0; i < WM_VLAN_TABSIZE; i++)
1876 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
1877
1878 /*
1879 * Set up flow-control parameters.
1880 *
1881 * XXX Values could probably stand some tuning.
1882 */
1883 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
1884 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
1885 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
1886 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
1887
1888 if (sc->sc_type < WM_T_LIVENGOOD) {
1889 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
1890 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
1891 } else {
1892 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
1893 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
1894 }
1895 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
1896 }
1897
1898 #if 0 /* XXXJRT */
1899 /* Deal with VLAN enables. */
1900 if (sc->sc_ethercom.ec_nvlans != 0)
1901 sc->sc_ctrl |= CTRL_VME;
1902 else
1903 #endif /* XXXJRT */
1904 sc->sc_ctrl &= ~CTRL_VME;
1905
1906 /* Write the control registers. */
1907 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1908 #if 0
1909 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1910 #endif
1911
1912 /*
1913 * Set up checksum offload parameters.
1914 */
1915 reg = CSR_READ(sc, WMREG_RXCSUM);
1916 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
1917 reg |= RXCSUM_IPOFL;
1918 else
1919 reg &= ~RXCSUM_IPOFL;
1920 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
1921 reg |= RXCSUM_TUOFL;
1922 else
1923 reg &= ~RXCSUM_TUOFL;
1924 CSR_WRITE(sc, WMREG_RXCSUM, reg);
1925
1926 /*
1927 * Set up the interrupt registers.
1928 */
1929 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
1930 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
1931 ICR_RXO | ICR_RXT0;
1932 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
1933 sc->sc_icr |= ICR_RXCFG;
1934 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
1935
1936 /* Set up the inter-packet gap. */
1937 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
1938
1939 #if 0 /* XXXJRT */
1940 /* Set the VLAN ethernetype. */
1941 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
1942 #endif
1943
1944 /*
1945 * Set up the transmit control register; we start out with
1946 * a collision distance suitable for FDX, but update it whe
1947 * we resolve the media type.
1948 */
1949 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
1950 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1951 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1952
1953 /* Set the media. */
1954 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
1955
1956 /*
1957 * Set up the receive control register; we actually program
1958 * the register when we set the receive filter. Use multicast
1959 * address offset type 0.
1960 *
1961 * Only the Cordova has the ability to strip the incoming
1962 * CRC, so we don't enable that feature.
1963 */
1964 sc->sc_mchash_type = 0;
1965 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
1966 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
1967
1968 /* Set the receive filter. */
1969 wm_set_filter(sc);
1970
1971 /* Start the one second link check clock. */
1972 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1973
1974 /* ...all done! */
1975 ifp->if_flags |= IFF_RUNNING;
1976 ifp->if_flags &= ~IFF_OACTIVE;
1977
1978 out:
1979 if (error)
1980 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1981 return (error);
1982 }
1983
1984 /*
1985 * wm_rxdrain:
1986 *
1987 * Drain the receive queue.
1988 */
1989 void
1990 wm_rxdrain(struct wm_softc *sc)
1991 {
1992 struct wm_rxsoft *rxs;
1993 int i;
1994
1995 for (i = 0; i < WM_NRXDESC; i++) {
1996 rxs = &sc->sc_rxsoft[i];
1997 if (rxs->rxs_mbuf != NULL) {
1998 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1999 m_freem(rxs->rxs_mbuf);
2000 rxs->rxs_mbuf = NULL;
2001 }
2002 }
2003 }
2004
2005 /*
2006 * wm_stop: [ifnet interface function]
2007 *
2008 * Stop transmission on the interface.
2009 */
2010 void
2011 wm_stop(struct ifnet *ifp, int disable)
2012 {
2013 struct wm_softc *sc = ifp->if_softc;
2014 struct wm_txsoft *txs;
2015 int i;
2016
2017 /* Stop the one second clock. */
2018 callout_stop(&sc->sc_tick_ch);
2019
2020 if (sc->sc_flags & WM_F_HAS_MII) {
2021 /* Down the MII. */
2022 mii_down(&sc->sc_mii);
2023 }
2024
2025 /* Stop the transmit and receive processes. */
2026 CSR_WRITE(sc, WMREG_TCTL, 0);
2027 CSR_WRITE(sc, WMREG_RCTL, 0);
2028
2029 /* Release any queued transmit buffers. */
2030 for (i = 0; i < WM_TXQUEUELEN; i++) {
2031 txs = &sc->sc_txsoft[i];
2032 if (txs->txs_mbuf != NULL) {
2033 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2034 m_freem(txs->txs_mbuf);
2035 txs->txs_mbuf = NULL;
2036 }
2037 }
2038
2039 if (disable)
2040 wm_rxdrain(sc);
2041
2042 /* Mark the interface as down and cancel the watchdog timer. */
2043 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2044 ifp->if_timer = 0;
2045 }
2046
2047 /*
2048 * wm_read_eeprom:
2049 *
2050 * Read data from the serial EEPROM.
2051 */
2052 void
2053 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2054 {
2055 uint32_t reg;
2056 int i, x;
2057
2058 for (i = 0; i < wordcnt; i++) {
2059 /* Send CHIP SELECT for one clock tick. */
2060 CSR_WRITE(sc, WMREG_EECD, EECD_CS);
2061 delay(2);
2062
2063 /* Shift in the READ command. */
2064 for (x = 3; x > 0; x--) {
2065 reg = EECD_CS;
2066 if (UWIRE_OPC_READ & (1 << (x - 1)))
2067 reg |= EECD_DI;
2068 CSR_WRITE(sc, WMREG_EECD, reg);
2069 delay(2);
2070 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2071 delay(2);
2072 CSR_WRITE(sc, WMREG_EECD, reg);
2073 delay(2);
2074 }
2075
2076 /* Shift in address. */
2077 for (x = 6; x > 0; x--) {
2078 reg = EECD_CS;
2079 if ((word + i) & (1 << (x - 1)))
2080 reg |= EECD_DI;
2081 CSR_WRITE(sc, WMREG_EECD, reg);
2082 delay(2);
2083 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2084 delay(2);
2085 CSR_WRITE(sc, WMREG_EECD, reg);
2086 delay(2);
2087 }
2088
2089 /* Shift out the data. */
2090 reg = EECD_CS;
2091 data[i] = 0;
2092 for (x = 16; x > 0; x--) {
2093 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2094 delay(2);
2095 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2096 data[i] |= (1 << (x - 1));
2097 CSR_WRITE(sc, WMREG_EECD, reg);
2098 delay(2);
2099 }
2100
2101 /* Clear CHIP SELECT. */
2102 CSR_WRITE(sc, WMREG_EECD, 0);
2103 }
2104 }
2105
2106 /*
2107 * wm_add_rxbuf:
2108 *
2109 * Add a receive buffer to the indiciated descriptor.
2110 */
2111 int
2112 wm_add_rxbuf(struct wm_softc *sc, int idx)
2113 {
2114 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2115 struct mbuf *m;
2116 int error;
2117
2118 MGETHDR(m, M_DONTWAIT, MT_DATA);
2119 if (m == NULL)
2120 return (ENOBUFS);
2121
2122 MCLGET(m, M_DONTWAIT);
2123 if ((m->m_flags & M_EXT) == 0) {
2124 m_freem(m);
2125 return (ENOBUFS);
2126 }
2127
2128 if (rxs->rxs_mbuf != NULL)
2129 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2130
2131 rxs->rxs_mbuf = m;
2132
2133 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2134 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2135 BUS_DMA_READ|BUS_DMA_NOWAIT);
2136 if (error) {
2137 printf("%s: unable to load rx DMA map %d, error = %d\n",
2138 sc->sc_dev.dv_xname, idx, error);
2139 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2140 }
2141
2142 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2143 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2144
2145 WM_INIT_RXDESC(sc, idx);
2146
2147 return (0);
2148 }
2149
2150 /*
2151 * wm_set_ral:
2152 *
2153 * Set an entery in the receive address list.
2154 */
2155 static void
2156 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2157 {
2158 uint32_t ral_lo, ral_hi;
2159
2160 if (enaddr != NULL) {
2161 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2162 (enaddr[3] << 24);
2163 ral_hi = enaddr[4] | (enaddr[5] << 8);
2164 ral_hi |= RAL_AV;
2165 } else {
2166 ral_lo = 0;
2167 ral_hi = 0;
2168 }
2169
2170 if (sc->sc_type >= WM_T_CORDOVA) {
2171 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2172 ral_lo);
2173 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2174 ral_hi);
2175 } else {
2176 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2177 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2178 }
2179 }
2180
2181 /*
2182 * wm_mchash:
2183 *
2184 * Compute the hash of the multicast address for the 4096-bit
2185 * multicast filter.
2186 */
2187 static uint32_t
2188 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2189 {
2190 static const int lo_shift[4] = { 4, 3, 2, 0 };
2191 static const int hi_shift[4] = { 4, 5, 6, 8 };
2192 uint32_t hash;
2193
2194 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2195 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2196
2197 return (hash & 0xfff);
2198 }
2199
2200 /*
2201 * wm_set_filter:
2202 *
2203 * Set up the receive filter.
2204 */
2205 void
2206 wm_set_filter(struct wm_softc *sc)
2207 {
2208 struct ethercom *ec = &sc->sc_ethercom;
2209 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2210 struct ether_multi *enm;
2211 struct ether_multistep step;
2212 bus_addr_t mta_reg;
2213 uint32_t hash, reg, bit;
2214 int i;
2215
2216 if (sc->sc_type >= WM_T_CORDOVA)
2217 mta_reg = WMREG_CORDOVA_MTA;
2218 else
2219 mta_reg = WMREG_MTA;
2220
2221 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2222
2223 if (ifp->if_flags & IFF_BROADCAST)
2224 sc->sc_rctl |= RCTL_BAM;
2225 if (ifp->if_flags & IFF_PROMISC) {
2226 sc->sc_rctl |= RCTL_UPE;
2227 goto allmulti;
2228 }
2229
2230 /*
2231 * Set the station address in the first RAL slot, and
2232 * clear the remaining slots.
2233 */
2234 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2235 for (i = 1; i < WM_RAL_TABSIZE; i++)
2236 wm_set_ral(sc, NULL, i);
2237
2238 /* Clear out the multicast table. */
2239 for (i = 0; i < WM_MC_TABSIZE; i++)
2240 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2241
2242 ETHER_FIRST_MULTI(step, ec, enm);
2243 while (enm != NULL) {
2244 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2245 /*
2246 * We must listen to a range of multicast addresses.
2247 * For now, just accept all multicasts, rather than
2248 * trying to set only those filter bits needed to match
2249 * the range. (At this time, the only use of address
2250 * ranges is for IP multicast routing, for which the
2251 * range is big enough to require all bits set.)
2252 */
2253 goto allmulti;
2254 }
2255
2256 hash = wm_mchash(sc, enm->enm_addrlo);
2257
2258 reg = (hash >> 5) & 0x7f;
2259 bit = hash & 0x1f;
2260
2261 hash = CSR_READ(sc, mta_reg + (reg << 2));
2262 hash |= 1U << bit;
2263
2264 /* XXX Hardware bug?? */
2265 if (sc->sc_type == WM_T_CORDOVA && (reg & 0xe) == 1) {
2266 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2267 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2268 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2269 } else
2270 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2271
2272 ETHER_NEXT_MULTI(step, enm);
2273 }
2274
2275 ifp->if_flags &= ~IFF_ALLMULTI;
2276 goto setit;
2277
2278 allmulti:
2279 ifp->if_flags |= IFF_ALLMULTI;
2280 sc->sc_rctl |= RCTL_MPE;
2281
2282 setit:
2283 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2284 }
2285
2286 /*
2287 * wm_tbi_mediainit:
2288 *
2289 * Initialize media for use on 1000BASE-X devices.
2290 */
2291 void
2292 wm_tbi_mediainit(struct wm_softc *sc)
2293 {
2294 const char *sep = "";
2295
2296 if (sc->sc_type < WM_T_LIVENGOOD)
2297 sc->sc_tipg = TIPG_WM_DFLT;
2298 else
2299 sc->sc_tipg = TIPG_LG_DFLT;
2300
2301 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_tbi_mediachange,
2302 wm_tbi_mediastatus);
2303
2304 /*
2305 * SWD Pins:
2306 *
2307 * 0 = Link LED (output)
2308 * 1 = Loss Of Signal (input)
2309 */
2310 sc->sc_ctrl |= CTRL_SWDPIO(0);
2311 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2312
2313 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2314
2315 #define ADD(s, m, d) \
2316 do { \
2317 printf("%s%s", sep, s); \
2318 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(m), (d), NULL); \
2319 sep = ", "; \
2320 } while (/*CONSTCOND*/0)
2321
2322 printf("%s: ", sc->sc_dev.dv_xname);
2323 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2324 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2325 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2326 printf("\n");
2327
2328 #undef ADD
2329
2330 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2331 }
2332
2333 /*
2334 * wm_tbi_mediastatus: [ifmedia interface function]
2335 *
2336 * Get the current interface media status on a 1000BASE-X device.
2337 */
2338 void
2339 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2340 {
2341 struct wm_softc *sc = ifp->if_softc;
2342
2343 ifmr->ifm_status = IFM_AVALID;
2344 ifmr->ifm_active = IFM_ETHER;
2345
2346 if (sc->sc_tbi_linkup == 0) {
2347 ifmr->ifm_active |= IFM_NONE;
2348 return;
2349 }
2350
2351 ifmr->ifm_status |= IFM_ACTIVE;
2352 ifmr->ifm_active |= IFM_1000_SX;
2353 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2354 ifmr->ifm_active |= IFM_FDX;
2355 }
2356
2357 /*
2358 * wm_tbi_mediachange: [ifmedia interface function]
2359 *
2360 * Set hardware to newly-selected media on a 1000BASE-X device.
2361 */
2362 int
2363 wm_tbi_mediachange(struct ifnet *ifp)
2364 {
2365 struct wm_softc *sc = ifp->if_softc;
2366 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2367 uint32_t status;
2368 int i;
2369
2370 sc->sc_txcw = ife->ifm_data;
2371 if (sc->sc_ctrl & CTRL_RFCE)
2372 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2373 if (sc->sc_ctrl & CTRL_TFCE)
2374 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2375 sc->sc_txcw |= TXCW_ANE;
2376
2377 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2378 delay(10000);
2379
2380 sc->sc_tbi_anstate = 0;
2381
2382 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2383 /* Have signal; wait for the link to come up. */
2384 for (i = 0; i < 50; i++) {
2385 delay(10000);
2386 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2387 break;
2388 }
2389
2390 status = CSR_READ(sc, WMREG_STATUS);
2391 if (status & STATUS_LU) {
2392 /* Link is up. */
2393 DPRINTF(WM_DEBUG_LINK,
2394 ("%s: LINK: set media -> link up %s\n",
2395 sc->sc_dev.dv_xname,
2396 (status & STATUS_FD) ? "FDX" : "HDX"));
2397 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2398 if (status & STATUS_FD)
2399 sc->sc_tctl |=
2400 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2401 else
2402 sc->sc_tctl |=
2403 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2404 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2405 sc->sc_tbi_linkup = 1;
2406 } else {
2407 /* Link is down. */
2408 DPRINTF(WM_DEBUG_LINK,
2409 ("%s: LINK: set media -> link down\n",
2410 sc->sc_dev.dv_xname));
2411 sc->sc_tbi_linkup = 0;
2412 }
2413 } else {
2414 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2415 sc->sc_dev.dv_xname));
2416 sc->sc_tbi_linkup = 0;
2417 }
2418
2419 wm_tbi_set_linkled(sc);
2420
2421 return (0);
2422 }
2423
2424 /*
2425 * wm_tbi_set_linkled:
2426 *
2427 * Update the link LED on 1000BASE-X devices.
2428 */
2429 void
2430 wm_tbi_set_linkled(struct wm_softc *sc)
2431 {
2432
2433 if (sc->sc_tbi_linkup)
2434 sc->sc_ctrl |= CTRL_SWDPIN(0);
2435 else
2436 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2437
2438 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2439 }
2440
2441 /*
2442 * wm_tbi_check_link:
2443 *
2444 * Check the link on 1000BASE-X devices.
2445 */
2446 void
2447 wm_tbi_check_link(struct wm_softc *sc)
2448 {
2449 uint32_t rxcw, ctrl, status;
2450
2451 if (sc->sc_tbi_anstate == 0)
2452 return;
2453 else if (sc->sc_tbi_anstate > 1) {
2454 DPRINTF(WM_DEBUG_LINK,
2455 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2456 sc->sc_tbi_anstate));
2457 sc->sc_tbi_anstate--;
2458 return;
2459 }
2460
2461 sc->sc_tbi_anstate = 0;
2462
2463 rxcw = CSR_READ(sc, WMREG_RXCW);
2464 ctrl = CSR_READ(sc, WMREG_CTRL);
2465 status = CSR_READ(sc, WMREG_STATUS);
2466
2467 if ((status & STATUS_LU) == 0) {
2468 DPRINTF(WM_DEBUG_LINK,
2469 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2470 sc->sc_tbi_linkup = 0;
2471 } else {
2472 DPRINTF(WM_DEBUG_LINK,
2473 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2474 (status & STATUS_FD) ? "FDX" : "HDX"));
2475 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2476 if (status & STATUS_FD)
2477 sc->sc_tctl |=
2478 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2479 else
2480 sc->sc_tctl |=
2481 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2482 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2483 sc->sc_tbi_linkup = 1;
2484 }
2485
2486 wm_tbi_set_linkled(sc);
2487 }
2488
2489 /*
2490 * wm_gmii_reset:
2491 *
2492 * Reset the PHY.
2493 */
2494 void
2495 wm_gmii_reset(struct wm_softc *sc)
2496 {
2497 uint32_t reg;
2498
2499 if (sc->sc_type >= WM_T_CORDOVA) {
2500 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2501 delay(20000);
2502
2503 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2504 delay(20000);
2505 } else {
2506 /* The PHY reset pin is active-low. */
2507 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2508 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2509 CTRL_EXT_SWDPIN(4));
2510 reg |= CTRL_EXT_SWDPIO(4);
2511
2512 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2513 delay(10);
2514
2515 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2516 delay(10);
2517
2518 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2519 delay(10);
2520 #if 0
2521 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2522 #endif
2523 }
2524 }
2525
2526 /*
2527 * wm_gmii_mediainit:
2528 *
2529 * Initialize media for use on 1000BASE-T devices.
2530 */
2531 void
2532 wm_gmii_mediainit(struct wm_softc *sc)
2533 {
2534 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2535
2536 /* We have MII. */
2537 sc->sc_flags |= WM_F_HAS_MII;
2538
2539 sc->sc_tipg = TIPG_1000T_DFLT;
2540
2541 /*
2542 * Let the chip set speed/duplex on its own based on
2543 * signals from the PHY.
2544 */
2545 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2546 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2547
2548 /* Initialize our media structures and probe the GMII. */
2549 sc->sc_mii.mii_ifp = ifp;
2550
2551 if (sc->sc_type >= WM_T_CORDOVA) {
2552 sc->sc_mii.mii_readreg = wm_gmii_cordova_readreg;
2553 sc->sc_mii.mii_writereg = wm_gmii_cordova_writereg;
2554 } else {
2555 sc->sc_mii.mii_readreg = wm_gmii_livengood_readreg;
2556 sc->sc_mii.mii_writereg = wm_gmii_livengood_writereg;
2557 }
2558 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2559
2560 wm_gmii_reset(sc);
2561
2562 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_gmii_mediachange,
2563 wm_gmii_mediastatus);
2564
2565 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2566 MII_OFFSET_ANY, 0);
2567 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2568 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2569 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2570 } else
2571 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2572 }
2573
2574 /*
2575 * wm_gmii_mediastatus: [ifmedia interface function]
2576 *
2577 * Get the current interface media status on a 1000BASE-T device.
2578 */
2579 void
2580 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2581 {
2582 struct wm_softc *sc = ifp->if_softc;
2583
2584 mii_pollstat(&sc->sc_mii);
2585 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2586 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2587 }
2588
2589 /*
2590 * wm_gmii_mediachange: [ifmedia interface function]
2591 *
2592 * Set hardware to newly-selected media on a 1000BASE-T device.
2593 */
2594 int
2595 wm_gmii_mediachange(struct ifnet *ifp)
2596 {
2597 struct wm_softc *sc = ifp->if_softc;
2598
2599 if (ifp->if_flags & IFF_UP)
2600 mii_mediachg(&sc->sc_mii);
2601 return (0);
2602 }
2603
2604 #define MDI_IO CTRL_SWDPIN(2)
2605 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2606 #define MDI_CLK CTRL_SWDPIN(3)
2607
2608 static void
2609 livengood_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2610 {
2611 uint32_t i, v;
2612
2613 v = CSR_READ(sc, WMREG_CTRL);
2614 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2615 v |= MDI_DIR | CTRL_SWDPIO(3);
2616
2617 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2618 if (data & i)
2619 v |= MDI_IO;
2620 else
2621 v &= ~MDI_IO;
2622 CSR_WRITE(sc, WMREG_CTRL, v);
2623 delay(10);
2624 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2625 delay(10);
2626 CSR_WRITE(sc, WMREG_CTRL, v);
2627 delay(10);
2628 }
2629 }
2630
2631 static uint32_t
2632 livengood_mii_recvbits(struct wm_softc *sc)
2633 {
2634 uint32_t v, i, data = 0;
2635
2636 v = CSR_READ(sc, WMREG_CTRL);
2637 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2638 v |= CTRL_SWDPIO(3);
2639
2640 CSR_WRITE(sc, WMREG_CTRL, v);
2641 delay(10);
2642 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2643 delay(10);
2644 CSR_WRITE(sc, WMREG_CTRL, v);
2645 delay(10);
2646
2647 for (i = 0; i < 16; i++) {
2648 data <<= 1;
2649 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2650 delay(10);
2651 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2652 data |= 1;
2653 CSR_WRITE(sc, WMREG_CTRL, v);
2654 delay(10);
2655 }
2656
2657 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2658 delay(10);
2659 CSR_WRITE(sc, WMREG_CTRL, v);
2660 delay(10);
2661
2662 return (data);
2663 }
2664
2665 #undef MDI_IO
2666 #undef MDI_DIR
2667 #undef MDI_CLK
2668
2669 /*
2670 * wm_gmii_livengood_readreg: [mii interface function]
2671 *
2672 * Read a PHY register on the GMII (Livengood version).
2673 */
2674 int
2675 wm_gmii_livengood_readreg(struct device *self, int phy, int reg)
2676 {
2677 struct wm_softc *sc = (void *) self;
2678 int rv;
2679
2680 livengood_mii_sendbits(sc, 0xffffffffU, 32);
2681 livengood_mii_sendbits(sc, reg | (phy << 5) |
2682 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2683 rv = livengood_mii_recvbits(sc) & 0xffff;
2684
2685 DPRINTF(WM_DEBUG_GMII,
2686 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2687 sc->sc_dev.dv_xname, phy, reg, rv));
2688
2689 return (rv);
2690 }
2691
2692 /*
2693 * wm_gmii_livengood_writereg: [mii interface function]
2694 *
2695 * Write a PHY register on the GMII (Livengood version).
2696 */
2697 void
2698 wm_gmii_livengood_writereg(struct device *self, int phy, int reg, int val)
2699 {
2700 struct wm_softc *sc = (void *) self;
2701
2702 livengood_mii_sendbits(sc, 0xffffffffU, 32);
2703 livengood_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2704 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2705 (MII_COMMAND_START << 30), 32);
2706 }
2707
2708 /*
2709 * wm_gmii_cordova_readreg: [mii interface function]
2710 *
2711 * Read a PHY register on the GMII.
2712 */
2713 int
2714 wm_gmii_cordova_readreg(struct device *self, int phy, int reg)
2715 {
2716 struct wm_softc *sc = (void *) self;
2717 uint32_t mdic;
2718 int i, rv;
2719
2720 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2721 MDIC_REGADD(reg));
2722
2723 for (i = 0; i < 100; i++) {
2724 mdic = CSR_READ(sc, WMREG_MDIC);
2725 if (mdic & MDIC_READY)
2726 break;
2727 delay(10);
2728 }
2729
2730 if ((mdic & MDIC_READY) == 0) {
2731 printf("%s: MDIC read timed out: phy %d reg %d\n",
2732 sc->sc_dev.dv_xname, phy, reg);
2733 rv = 0;
2734 } else if (mdic & MDIC_E) {
2735 #if 0 /* This is normal if no PHY is present. */
2736 printf("%s: MDIC read error: phy %d reg %d\n",
2737 sc->sc_dev.dv_xname, phy, reg);
2738 #endif
2739 rv = 0;
2740 } else {
2741 rv = MDIC_DATA(mdic);
2742 if (rv == 0xffff)
2743 rv = 0;
2744 }
2745
2746 return (rv);
2747 }
2748
2749 /*
2750 * wm_gmii_cordova_writereg: [mii interface function]
2751 *
2752 * Write a PHY register on the GMII.
2753 */
2754 void
2755 wm_gmii_cordova_writereg(struct device *self, int phy, int reg, int val)
2756 {
2757 struct wm_softc *sc = (void *) self;
2758 uint32_t mdic;
2759 int i;
2760
2761 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
2762 MDIC_REGADD(reg) | MDIC_DATA(val));
2763
2764 for (i = 0; i < 100; i++) {
2765 mdic = CSR_READ(sc, WMREG_MDIC);
2766 if (mdic & MDIC_READY)
2767 break;
2768 delay(10);
2769 }
2770
2771 if ((mdic & MDIC_READY) == 0)
2772 printf("%s: MDIC write timed out: phy %d reg %d\n",
2773 sc->sc_dev.dv_xname, phy, reg);
2774 else if (mdic & MDIC_E)
2775 printf("%s: MDIC write error: phy %d reg %d\n",
2776 sc->sc_dev.dv_xname, phy, reg);
2777 }
2778
2779 /*
2780 * wm_gmii_statchg: [mii interface function]
2781 *
2782 * Callback from MII layer when media changes.
2783 */
2784 void
2785 wm_gmii_statchg(struct device *self)
2786 {
2787 struct wm_softc *sc = (void *) self;
2788
2789 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2790
2791 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2792 DPRINTF(WM_DEBUG_LINK,
2793 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
2794 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2795 } else {
2796 DPRINTF(WM_DEBUG_LINK,
2797 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
2798 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2799 }
2800
2801 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2802 }
2803