if_wm.c revision 1.11 1 /* $NetBSD: if_wm.c,v 1.11 2002/07/09 19:47:46 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Fix hw VLAN assist.
44 *
45 * - Make GMII work on the i82543.
46 *
47 * - Fix out-bound IP header checksums.
48 *
49 * - Fix UDP checksums.
50 *
51 * - Jumbo frames -- requires changes to network stack due to
52 * lame buffer length handling on chip.
53 *
54 * ...and, of course, performance tuning.
55 */
56
57 #include "bpfilter.h"
58
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/callout.h>
62 #include <sys/mbuf.h>
63 #include <sys/malloc.h>
64 #include <sys/kernel.h>
65 #include <sys/socket.h>
66 #include <sys/ioctl.h>
67 #include <sys/errno.h>
68 #include <sys/device.h>
69 #include <sys/queue.h>
70
71 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
72
73 #include <net/if.h>
74 #include <net/if_dl.h>
75 #include <net/if_media.h>
76 #include <net/if_ether.h>
77
78 #if NBPFILTER > 0
79 #include <net/bpf.h>
80 #endif
81
82 #include <netinet/in.h> /* XXX for struct ip */
83 #include <netinet/in_systm.h> /* XXX for struct ip */
84 #include <netinet/ip.h> /* XXX for struct ip */
85
86 #include <machine/bus.h>
87 #include <machine/intr.h>
88 #include <machine/endian.h>
89
90 #include <dev/mii/mii.h>
91 #include <dev/mii/miivar.h>
92 #include <dev/mii/mii_bitbang.h>
93
94 #include <dev/pci/pcireg.h>
95 #include <dev/pci/pcivar.h>
96 #include <dev/pci/pcidevs.h>
97
98 #include <dev/pci/if_wmreg.h>
99
100 #ifdef WM_DEBUG
101 #define WM_DEBUG_LINK 0x01
102 #define WM_DEBUG_TX 0x02
103 #define WM_DEBUG_RX 0x04
104 #define WM_DEBUG_GMII 0x08
105 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
106
107 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
108 #else
109 #define DPRINTF(x, y) /* nothing */
110 #endif /* WM_DEBUG */
111
112 /*
113 * Transmit descriptor list size. Due to errata, we can only have
114 * 256 hardware descriptors in the ring. We tell the upper layers
115 * that they can queue a lot of packets, and we go ahead and mange
116 * up to 64 of them at a time. We allow up to 16 DMA segments per
117 * packet.
118 */
119 #define WM_NTXSEGS 16
120 #define WM_IFQUEUELEN 256
121 #define WM_TXQUEUELEN 64
122 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
123 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
124 #define WM_NTXDESC 256
125 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
126 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
127 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
128
129 /*
130 * Receive descriptor list size. We have one Rx buffer for normal
131 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
132 * packet. We allocate 256 receive descriptors, each with a 2k
133 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
134 */
135 #define WM_NRXDESC 256
136 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
137 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
138 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
139
140 /*
141 * Control structures are DMA'd to the i82542 chip. We allocate them in
142 * a single clump that maps to a single DMA segment to make serveral things
143 * easier.
144 */
145 struct wm_control_data {
146 /*
147 * The transmit descriptors.
148 */
149 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
150
151 /*
152 * The receive descriptors.
153 */
154 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
155 };
156
157 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
158 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
159 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
160
161 /*
162 * Software state for transmit jobs.
163 */
164 struct wm_txsoft {
165 struct mbuf *txs_mbuf; /* head of our mbuf chain */
166 bus_dmamap_t txs_dmamap; /* our DMA map */
167 int txs_firstdesc; /* first descriptor in packet */
168 int txs_lastdesc; /* last descriptor in packet */
169 int txs_ndesc; /* # of descriptors used */
170 };
171
172 /*
173 * Software state for receive buffers. Each descriptor gets a
174 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
175 * more than one buffer, we chain them together.
176 */
177 struct wm_rxsoft {
178 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
179 bus_dmamap_t rxs_dmamap; /* our DMA map */
180 };
181
182 /*
183 * Software state per device.
184 */
185 struct wm_softc {
186 struct device sc_dev; /* generic device information */
187 bus_space_tag_t sc_st; /* bus space tag */
188 bus_space_handle_t sc_sh; /* bus space handle */
189 bus_dma_tag_t sc_dmat; /* bus DMA tag */
190 struct ethercom sc_ethercom; /* ethernet common data */
191 void *sc_sdhook; /* shutdown hook */
192
193 int sc_type; /* chip type; see below */
194 int sc_flags; /* flags; see below */
195
196 void *sc_ih; /* interrupt cookie */
197
198 struct mii_data sc_mii; /* MII/media information */
199
200 struct callout sc_tick_ch; /* tick callout */
201
202 bus_dmamap_t sc_cddmamap; /* control data DMA map */
203 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
204
205 /*
206 * Software state for the transmit and receive descriptors.
207 */
208 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
209 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
210
211 /*
212 * Control data structures.
213 */
214 struct wm_control_data *sc_control_data;
215 #define sc_txdescs sc_control_data->wcd_txdescs
216 #define sc_rxdescs sc_control_data->wcd_rxdescs
217
218 #ifdef WM_EVENT_COUNTERS
219 /* Event counters. */
220 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
221 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
222 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
223 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
224 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
225 struct evcnt sc_ev_rxintr; /* Rx interrupts */
226 struct evcnt sc_ev_linkintr; /* Link interrupts */
227
228 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
229 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
230 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
231 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
232
233 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
234 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
235 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
236
237 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
238 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
239
240 struct evcnt sc_ev_tu; /* Tx underrun */
241 #endif /* WM_EVENT_COUNTERS */
242
243 bus_addr_t sc_tdt_reg; /* offset of TDT register */
244
245 int sc_txfree; /* number of free Tx descriptors */
246 int sc_txnext; /* next ready Tx descriptor */
247
248 int sc_txsfree; /* number of free Tx jobs */
249 int sc_txsnext; /* next free Tx job */
250 int sc_txsdirty; /* dirty Tx jobs */
251
252 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
253 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
254
255 bus_addr_t sc_rdt_reg; /* offset of RDT register */
256
257 int sc_rxptr; /* next ready Rx descriptor/queue ent */
258 int sc_rxdiscard;
259 int sc_rxlen;
260 struct mbuf *sc_rxhead;
261 struct mbuf *sc_rxtail;
262 struct mbuf **sc_rxtailp;
263
264 uint32_t sc_ctrl; /* prototype CTRL register */
265 #if 0
266 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
267 #endif
268 uint32_t sc_icr; /* prototype interrupt bits */
269 uint32_t sc_tctl; /* prototype TCTL register */
270 uint32_t sc_rctl; /* prototype RCTL register */
271 uint32_t sc_txcw; /* prototype TXCW register */
272 uint32_t sc_tipg; /* prototype TIPG register */
273
274 int sc_tbi_linkup; /* TBI link status */
275 int sc_tbi_anstate; /* autonegotiation state */
276
277 int sc_mchash_type; /* multicast filter offset */
278 };
279
280 #define WM_RXCHAIN_RESET(sc) \
281 do { \
282 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
283 *(sc)->sc_rxtailp = NULL; \
284 (sc)->sc_rxlen = 0; \
285 } while (/*CONSTCOND*/0)
286
287 #define WM_RXCHAIN_LINK(sc, m) \
288 do { \
289 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
290 (sc)->sc_rxtailp = &(m)->m_next; \
291 } while (/*CONSTCOND*/0)
292
293 /* sc_type */
294 #define WM_T_82542_2_0 0 /* i82542 2.0 (really old) */
295 #define WM_T_82542_2_1 1 /* i82542 2.1+ (old) */
296 #define WM_T_82543 2 /* i82543 */
297 #define WM_T_82544 3 /* i82544 */
298 #define WM_T_82540 4 /* i82540 */
299 #define WM_T_82545 5 /* i82545 */
300 #define WM_T_82546 6 /* i82546 */
301
302 /* sc_flags */
303 #define WM_F_HAS_MII 0x01 /* has MII */
304
305 #ifdef WM_EVENT_COUNTERS
306 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
307 #else
308 #define WM_EVCNT_INCR(ev) /* nothing */
309 #endif
310
311 #define CSR_READ(sc, reg) \
312 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
313 #define CSR_WRITE(sc, reg, val) \
314 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
315
316 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
317 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
318
319 #define WM_CDTXSYNC(sc, x, n, ops) \
320 do { \
321 int __x, __n; \
322 \
323 __x = (x); \
324 __n = (n); \
325 \
326 /* If it will wrap around, sync to the end of the ring. */ \
327 if ((__x + __n) > WM_NTXDESC) { \
328 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
329 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
330 (WM_NTXDESC - __x), (ops)); \
331 __n -= (WM_NTXDESC - __x); \
332 __x = 0; \
333 } \
334 \
335 /* Now sync whatever is left. */ \
336 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
337 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
338 } while (/*CONSTCOND*/0)
339
340 #define WM_CDRXSYNC(sc, x, ops) \
341 do { \
342 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
343 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
344 } while (/*CONSTCOND*/0)
345
346 #define WM_INIT_RXDESC(sc, x) \
347 do { \
348 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
349 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
350 struct mbuf *__m = __rxs->rxs_mbuf; \
351 \
352 /* \
353 * Note: We scoot the packet forward 2 bytes in the buffer \
354 * so that the payload after the Ethernet header is aligned \
355 * to a 4-byte boundary. \
356 * \
357 * XXX BRAINDAMAGE ALERT! \
358 * The stupid chip uses the same size for every buffer, which \
359 * is set in the Receive Control register. We are using the 2K \
360 * size option, but what we REALLY want is (2K - 2)! For this \
361 * reason, we can't accept packets longer than the standard \
362 * Ethernet MTU, without incurring a big penalty to copy every \
363 * incoming packet to a new, suitably aligned buffer. \
364 * \
365 * We'll need to make some changes to the layer 3/4 parts of \
366 * the stack (to copy the headers to a new buffer if not \
367 * aligned) in order to support large MTU on this chip. Lame. \
368 */ \
369 __m->m_data = __m->m_ext.ext_buf + 2; \
370 \
371 __rxd->wrx_addr.wa_low = \
372 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
373 __rxd->wrx_addr.wa_high = 0; \
374 __rxd->wrx_len = 0; \
375 __rxd->wrx_cksum = 0; \
376 __rxd->wrx_status = 0; \
377 __rxd->wrx_errors = 0; \
378 __rxd->wrx_special = 0; \
379 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
380 \
381 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
382 } while (/*CONSTCOND*/0)
383
384 void wm_start(struct ifnet *);
385 void wm_watchdog(struct ifnet *);
386 int wm_ioctl(struct ifnet *, u_long, caddr_t);
387 int wm_init(struct ifnet *);
388 void wm_stop(struct ifnet *, int);
389
390 void wm_shutdown(void *);
391
392 void wm_reset(struct wm_softc *);
393 void wm_rxdrain(struct wm_softc *);
394 int wm_add_rxbuf(struct wm_softc *, int);
395 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
396 void wm_tick(void *);
397
398 void wm_set_filter(struct wm_softc *);
399
400 int wm_intr(void *);
401 void wm_txintr(struct wm_softc *);
402 void wm_rxintr(struct wm_softc *);
403 void wm_linkintr(struct wm_softc *, uint32_t);
404
405 void wm_tbi_mediainit(struct wm_softc *);
406 int wm_tbi_mediachange(struct ifnet *);
407 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
408
409 void wm_tbi_set_linkled(struct wm_softc *);
410 void wm_tbi_check_link(struct wm_softc *);
411
412 void wm_gmii_reset(struct wm_softc *);
413
414 int wm_gmii_i82543_readreg(struct device *, int, int);
415 void wm_gmii_i82543_writereg(struct device *, int, int, int);
416
417 int wm_gmii_i82544_readreg(struct device *, int, int);
418 void wm_gmii_i82544_writereg(struct device *, int, int, int);
419
420 void wm_gmii_statchg(struct device *);
421
422 void wm_gmii_mediainit(struct wm_softc *);
423 int wm_gmii_mediachange(struct ifnet *);
424 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
425
426 int wm_match(struct device *, struct cfdata *, void *);
427 void wm_attach(struct device *, struct device *, void *);
428
429 int wm_copy_small = 0;
430
431 struct cfattach wm_ca = {
432 sizeof(struct wm_softc), wm_match, wm_attach,
433 };
434
435 /*
436 * Devices supported by this driver.
437 */
438 const struct wm_product {
439 pci_vendor_id_t wmp_vendor;
440 pci_product_id_t wmp_product;
441 const char *wmp_name;
442 int wmp_type;
443 int wmp_flags;
444 #define WMP_F_1000X 0x01
445 #define WMP_F_1000T 0x02
446 } wm_products[] = {
447 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
448 "Intel i82542 1000BASE-X Ethernet",
449 WM_T_82542_2_1, WMP_F_1000X },
450
451 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
452 "Intel i82543GC 1000BASE-X Ethernet",
453 WM_T_82543, WMP_F_1000X },
454
455 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
456 "Intel i82543GC 1000BASE-T Ethernet",
457 WM_T_82543, WMP_F_1000T },
458
459 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
460 "Intel i82544EI 1000BASE-T Ethernet",
461 WM_T_82544, WMP_F_1000T },
462
463 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
464 "Intel i82544EI 1000BASE-X Ethernet",
465 WM_T_82544, WMP_F_1000X },
466
467 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
468 "Intel i82544GC 1000BASE-T Ethernet",
469 WM_T_82544, WMP_F_1000T },
470
471 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
472 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
473 WM_T_82544, WMP_F_1000T },
474
475 { 0, 0,
476 NULL,
477 0, 0 },
478 };
479
480 #ifdef WM_EVENT_COUNTERS
481 #if WM_NTXSEGS != 16
482 #error Update wm_txseg_evcnt_names
483 #endif
484 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
485 "txseg1",
486 "txseg2",
487 "txseg3",
488 "txseg4",
489 "txseg5",
490 "txseg6",
491 "txseg7",
492 "txseg8",
493 "txseg9",
494 "txseg10",
495 "txseg11",
496 "txseg12",
497 "txseg13",
498 "txseg14",
499 "txseg15",
500 "txseg16",
501 };
502 #endif /* WM_EVENT_COUNTERS */
503
504 static const struct wm_product *
505 wm_lookup(const struct pci_attach_args *pa)
506 {
507 const struct wm_product *wmp;
508
509 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
510 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
511 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
512 return (wmp);
513 }
514 return (NULL);
515 }
516
517 int
518 wm_match(struct device *parent, struct cfdata *cf, void *aux)
519 {
520 struct pci_attach_args *pa = aux;
521
522 if (wm_lookup(pa) != NULL)
523 return (1);
524
525 return (0);
526 }
527
528 void
529 wm_attach(struct device *parent, struct device *self, void *aux)
530 {
531 struct wm_softc *sc = (void *) self;
532 struct pci_attach_args *pa = aux;
533 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
534 pci_chipset_tag_t pc = pa->pa_pc;
535 pci_intr_handle_t ih;
536 const char *intrstr = NULL;
537 bus_space_tag_t memt;
538 bus_space_handle_t memh;
539 bus_dma_segment_t seg;
540 int memh_valid;
541 int i, rseg, error;
542 const struct wm_product *wmp;
543 uint8_t enaddr[ETHER_ADDR_LEN];
544 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
545 pcireg_t preg, memtype;
546 int pmreg;
547
548 callout_init(&sc->sc_tick_ch);
549
550 wmp = wm_lookup(pa);
551 if (wmp == NULL) {
552 printf("\n");
553 panic("wm_attach: impossible");
554 }
555
556 sc->sc_dmat = pa->pa_dmat;
557
558 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
559 printf(": %s, rev. %d\n", wmp->wmp_name, preg);
560
561 sc->sc_type = wmp->wmp_type;
562 if (sc->sc_type < WM_T_82543) {
563 if (preg < 2) {
564 printf("%s: i82542 must be at least rev. 2\n",
565 sc->sc_dev.dv_xname);
566 return;
567 }
568 if (preg < 3)
569 sc->sc_type = WM_T_82542_2_0;
570 }
571
572 /*
573 * Map the device.
574 */
575 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
576 switch (memtype) {
577 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
578 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
579 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
580 memtype, 0, &memt, &memh, NULL, NULL) == 0);
581 break;
582 default:
583 memh_valid = 0;
584 }
585
586 if (memh_valid) {
587 sc->sc_st = memt;
588 sc->sc_sh = memh;
589 } else {
590 printf("%s: unable to map device registers\n",
591 sc->sc_dev.dv_xname);
592 return;
593 }
594
595 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
596 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
597 preg |= PCI_COMMAND_MASTER_ENABLE;
598 if (sc->sc_type < WM_T_82542_2_1)
599 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
600 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
601
602 /* Get it out of power save mode, if needed. */
603 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
604 preg = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
605 if (preg == 3) {
606 /*
607 * The card has lost all configuration data in
608 * this state, so punt.
609 */
610 printf("%s: unable to wake from power state D3\n",
611 sc->sc_dev.dv_xname);
612 return;
613 }
614 if (preg != 0) {
615 printf("%s: waking up from power state D%d\n",
616 sc->sc_dev.dv_xname, preg);
617 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
618 }
619 }
620
621 /*
622 * Map and establish our interrupt.
623 */
624 if (pci_intr_map(pa, &ih)) {
625 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
626 return;
627 }
628 intrstr = pci_intr_string(pc, ih);
629 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
630 if (sc->sc_ih == NULL) {
631 printf("%s: unable to establish interrupt",
632 sc->sc_dev.dv_xname);
633 if (intrstr != NULL)
634 printf(" at %s", intrstr);
635 printf("\n");
636 return;
637 }
638 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
639
640 /*
641 * Allocate the control data structures, and create and load the
642 * DMA map for it.
643 */
644 if ((error = bus_dmamem_alloc(sc->sc_dmat,
645 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
646 0)) != 0) {
647 printf("%s: unable to allocate control data, error = %d\n",
648 sc->sc_dev.dv_xname, error);
649 goto fail_0;
650 }
651
652 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
653 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
654 BUS_DMA_COHERENT)) != 0) {
655 printf("%s: unable to map control data, error = %d\n",
656 sc->sc_dev.dv_xname, error);
657 goto fail_1;
658 }
659
660 if ((error = bus_dmamap_create(sc->sc_dmat,
661 sizeof(struct wm_control_data), 1,
662 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
663 printf("%s: unable to create control data DMA map, "
664 "error = %d\n", sc->sc_dev.dv_xname, error);
665 goto fail_2;
666 }
667
668 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
669 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
670 0)) != 0) {
671 printf("%s: unable to load control data DMA map, error = %d\n",
672 sc->sc_dev.dv_xname, error);
673 goto fail_3;
674 }
675
676 /*
677 * Create the transmit buffer DMA maps.
678 */
679 for (i = 0; i < WM_TXQUEUELEN; i++) {
680 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
681 WM_NTXSEGS, MCLBYTES, 0, 0,
682 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
683 printf("%s: unable to create Tx DMA map %d, "
684 "error = %d\n", sc->sc_dev.dv_xname, i, error);
685 goto fail_4;
686 }
687 }
688
689 /*
690 * Create the receive buffer DMA maps.
691 */
692 for (i = 0; i < WM_NRXDESC; i++) {
693 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
694 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
695 printf("%s: unable to create Rx DMA map %d, "
696 "error = %d\n", sc->sc_dev.dv_xname, i, error);
697 goto fail_5;
698 }
699 sc->sc_rxsoft[i].rxs_mbuf = NULL;
700 }
701
702 /*
703 * Reset the chip to a known state.
704 */
705 wm_reset(sc);
706
707 /*
708 * Read the Ethernet address from the EEPROM.
709 */
710 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
711 sizeof(myea) / sizeof(myea[0]), myea);
712 enaddr[0] = myea[0] & 0xff;
713 enaddr[1] = myea[0] >> 8;
714 enaddr[2] = myea[1] & 0xff;
715 enaddr[3] = myea[1] >> 8;
716 enaddr[4] = myea[2] & 0xff;
717 enaddr[5] = myea[2] >> 8;
718
719 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
720 ether_sprintf(enaddr));
721
722 /*
723 * Read the config info from the EEPROM, and set up various
724 * bits in the control registers based on their contents.
725 */
726 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
727 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
728 if (sc->sc_type >= WM_T_82544)
729 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
730
731 if (cfg1 & EEPROM_CFG1_ILOS)
732 sc->sc_ctrl |= CTRL_ILOS;
733 if (sc->sc_type >= WM_T_82544) {
734 sc->sc_ctrl |=
735 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
736 CTRL_SWDPIO_SHIFT;
737 sc->sc_ctrl |=
738 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
739 CTRL_SWDPINS_SHIFT;
740 } else {
741 sc->sc_ctrl |=
742 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
743 CTRL_SWDPIO_SHIFT;
744 }
745
746 #if 0
747 if (sc->sc_type >= WM_T_82544) {
748 if (cfg1 & EEPROM_CFG1_IPS0)
749 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
750 if (cfg1 & EEPROM_CFG1_IPS1)
751 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
752 sc->sc_ctrl_ext |=
753 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
754 CTRL_EXT_SWDPIO_SHIFT;
755 sc->sc_ctrl_ext |=
756 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
757 CTRL_EXT_SWDPINS_SHIFT;
758 } else {
759 sc->sc_ctrl_ext |=
760 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
761 CTRL_EXT_SWDPIO_SHIFT;
762 }
763 #endif
764
765 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
766 #if 0
767 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
768 #endif
769
770 /*
771 * Set up some register offsets that are different between
772 * the i82542 and the i82543 and later chips.
773 */
774 if (sc->sc_type < WM_T_82543) {
775 sc->sc_rdt_reg = WMREG_OLD_RDT0;
776 sc->sc_tdt_reg = WMREG_OLD_TDT;
777 } else {
778 sc->sc_rdt_reg = WMREG_RDT;
779 sc->sc_tdt_reg = WMREG_TDT;
780 }
781
782 /*
783 * Determine if we should use flow control. We should
784 * always use it, unless we're on a i82542 < 2.1.
785 */
786 if (sc->sc_type >= WM_T_82542_2_1)
787 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
788
789 /*
790 * Determine if we're TBI or GMII mode, and initialize the
791 * media structures accordingly.
792 */
793 if (sc->sc_type < WM_T_82543 ||
794 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
795 if (wmp->wmp_flags & WMP_F_1000T)
796 printf("%s: WARNING: TBIMODE set on 1000BASE-T "
797 "product!\n", sc->sc_dev.dv_xname);
798 wm_tbi_mediainit(sc);
799 } else {
800 if (wmp->wmp_flags & WMP_F_1000X)
801 printf("%s: WARNING: TBIMODE clear on 1000BASE-X "
802 "product!\n", sc->sc_dev.dv_xname);
803 wm_gmii_mediainit(sc);
804 }
805
806 ifp = &sc->sc_ethercom.ec_if;
807 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
808 ifp->if_softc = sc;
809 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
810 ifp->if_ioctl = wm_ioctl;
811 ifp->if_start = wm_start;
812 ifp->if_watchdog = wm_watchdog;
813 ifp->if_init = wm_init;
814 ifp->if_stop = wm_stop;
815 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
816 IFQ_SET_READY(&ifp->if_snd);
817
818 /*
819 * If we're a i82543 or greater, we can support VLANs.
820 */
821 if (sc->sc_type >= WM_T_82543)
822 sc->sc_ethercom.ec_capabilities |=
823 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
824
825 /*
826 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
827 * on i82543 and later.
828 */
829 if (sc->sc_type >= WM_T_82543)
830 ifp->if_capabilities |=
831 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
832
833 /*
834 * Attach the interface.
835 */
836 if_attach(ifp);
837 ether_ifattach(ifp, enaddr);
838
839 #ifdef WM_EVENT_COUNTERS
840 /* Attach event counters. */
841 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
842 NULL, sc->sc_dev.dv_xname, "txsstall");
843 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
844 NULL, sc->sc_dev.dv_xname, "txdstall");
845 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
846 NULL, sc->sc_dev.dv_xname, "txforceintr");
847 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
848 NULL, sc->sc_dev.dv_xname, "txdw");
849 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
850 NULL, sc->sc_dev.dv_xname, "txqe");
851 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
852 NULL, sc->sc_dev.dv_xname, "rxintr");
853 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
854 NULL, sc->sc_dev.dv_xname, "linkintr");
855
856 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
857 NULL, sc->sc_dev.dv_xname, "rxipsum");
858 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
859 NULL, sc->sc_dev.dv_xname, "rxtusum");
860 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
861 NULL, sc->sc_dev.dv_xname, "txipsum");
862 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
863 NULL, sc->sc_dev.dv_xname, "txtusum");
864
865 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
866 NULL, sc->sc_dev.dv_xname, "txctx init");
867 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
868 NULL, sc->sc_dev.dv_xname, "txctx hit");
869 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
870 NULL, sc->sc_dev.dv_xname, "txctx miss");
871
872 for (i = 0; i < WM_NTXSEGS; i++)
873 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
874 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
875
876 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
877 NULL, sc->sc_dev.dv_xname, "txdrop");
878
879 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
880 NULL, sc->sc_dev.dv_xname, "tu");
881 #endif /* WM_EVENT_COUNTERS */
882
883 /*
884 * Make sure the interface is shutdown during reboot.
885 */
886 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
887 if (sc->sc_sdhook == NULL)
888 printf("%s: WARNING: unable to establish shutdown hook\n",
889 sc->sc_dev.dv_xname);
890 return;
891
892 /*
893 * Free any resources we've allocated during the failed attach
894 * attempt. Do this in reverse order and fall through.
895 */
896 fail_5:
897 for (i = 0; i < WM_NRXDESC; i++) {
898 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
899 bus_dmamap_destroy(sc->sc_dmat,
900 sc->sc_rxsoft[i].rxs_dmamap);
901 }
902 fail_4:
903 for (i = 0; i < WM_TXQUEUELEN; i++) {
904 if (sc->sc_txsoft[i].txs_dmamap != NULL)
905 bus_dmamap_destroy(sc->sc_dmat,
906 sc->sc_txsoft[i].txs_dmamap);
907 }
908 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
909 fail_3:
910 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
911 fail_2:
912 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
913 sizeof(struct wm_control_data));
914 fail_1:
915 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
916 fail_0:
917 return;
918 }
919
920 /*
921 * wm_shutdown:
922 *
923 * Make sure the interface is stopped at reboot time.
924 */
925 void
926 wm_shutdown(void *arg)
927 {
928 struct wm_softc *sc = arg;
929
930 wm_stop(&sc->sc_ethercom.ec_if, 1);
931 }
932
933 /*
934 * wm_tx_cksum:
935 *
936 * Set up TCP/IP checksumming parameters for the
937 * specified packet.
938 */
939 static int
940 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
941 uint32_t *fieldsp)
942 {
943 struct mbuf *m0 = txs->txs_mbuf;
944 struct livengood_tcpip_ctxdesc *t;
945 uint32_t fields = 0, ipcs, tucs;
946 struct ip *ip;
947 int offset, iphl;
948
949 /*
950 * XXX It would be nice if the mbuf pkthdr had offset
951 * fields for the protocol headers.
952 */
953
954 /* XXX Assumes normal Ethernet encap. */
955 offset = ETHER_HDR_LEN;
956
957 /* XXX */
958 if (m0->m_len < (offset + sizeof(struct ip))) {
959 printf("%s: wm_tx_cksum: need to m_pullup, "
960 "packet dropped\n", sc->sc_dev.dv_xname);
961 return (EINVAL);
962 }
963
964 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
965 iphl = ip->ip_hl << 2;
966
967 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
968 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
969 fields |= htole32(WTX_IXSM);
970 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
971 WTX_TCPIP_IPCSO(offsetof(struct ip, ip_sum)) |
972 WTX_TCPIP_IPCSE(offset + iphl - 1));
973 } else
974 ipcs = 0;
975
976 offset += iphl;
977
978 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
979 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
980 fields |= htole32(WTX_TXSM);
981 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
982 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
983 WTX_TCPIP_TUCSE(0) /* rest of packet */);
984 } else
985 tucs = 0;
986
987 if (sc->sc_txctx_ipcs == ipcs &&
988 sc->sc_txctx_tucs == tucs) {
989 /* Cached context is fine. */
990 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
991 } else {
992 /* Fill in the context descriptor. */
993 #ifdef WM_EVENT_COUNTERS
994 if (sc->sc_txctx_ipcs == 0xffffffff &&
995 sc->sc_txctx_tucs == 0xffffffff)
996 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
997 else
998 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
999 #endif
1000 t = (struct livengood_tcpip_ctxdesc *)
1001 &sc->sc_txdescs[sc->sc_txnext];
1002 t->tcpip_ipcs = ipcs;
1003 t->tcpip_tucs = tucs;
1004 t->tcpip_cmdlen =
1005 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1006 t->tcpip_seg = 0;
1007 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1008
1009 sc->sc_txctx_ipcs = ipcs;
1010 sc->sc_txctx_tucs = tucs;
1011
1012 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1013 txs->txs_ndesc++;
1014 }
1015
1016 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1017 *fieldsp = fields;
1018
1019 return (0);
1020 }
1021
1022 /*
1023 * wm_start: [ifnet interface function]
1024 *
1025 * Start packet transmission on the interface.
1026 */
1027 void
1028 wm_start(struct ifnet *ifp)
1029 {
1030 struct wm_softc *sc = ifp->if_softc;
1031 struct mbuf *m0/*, *m*/;
1032 struct wm_txsoft *txs;
1033 bus_dmamap_t dmamap;
1034 int error, nexttx, lasttx, ofree, seg;
1035 uint32_t cksumcmd, cksumfields;
1036
1037 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1038 return;
1039
1040 /*
1041 * Remember the previous number of free descriptors.
1042 */
1043 ofree = sc->sc_txfree;
1044
1045 /*
1046 * Loop through the send queue, setting up transmit descriptors
1047 * until we drain the queue, or use up all available transmit
1048 * descriptors.
1049 */
1050 for (;;) {
1051 /* Grab a packet off the queue. */
1052 IFQ_POLL(&ifp->if_snd, m0);
1053 if (m0 == NULL)
1054 break;
1055
1056 DPRINTF(WM_DEBUG_TX,
1057 ("%s: TX: have packet to transmit: %p\n",
1058 sc->sc_dev.dv_xname, m0));
1059
1060 /* Get a work queue entry. */
1061 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1062 wm_txintr(sc);
1063 if (sc->sc_txsfree == 0) {
1064 DPRINTF(WM_DEBUG_TX,
1065 ("%s: TX: no free job descriptors\n",
1066 sc->sc_dev.dv_xname));
1067 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1068 break;
1069 }
1070 }
1071
1072 txs = &sc->sc_txsoft[sc->sc_txsnext];
1073 dmamap = txs->txs_dmamap;
1074
1075 /*
1076 * Load the DMA map. If this fails, the packet either
1077 * didn't fit in the allotted number of segments, or we
1078 * were short on resources. For the too-many-segments
1079 * case, we simply report an error and drop the packet,
1080 * since we can't sanely copy a jumbo packet to a single
1081 * buffer.
1082 */
1083 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1084 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1085 if (error) {
1086 if (error == EFBIG) {
1087 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1088 printf("%s: Tx packet consumes too many "
1089 "DMA segments, dropping...\n",
1090 sc->sc_dev.dv_xname);
1091 IFQ_DEQUEUE(&ifp->if_snd, m0);
1092 m_freem(m0);
1093 continue;
1094 }
1095 /*
1096 * Short on resources, just stop for now.
1097 */
1098 DPRINTF(WM_DEBUG_TX,
1099 ("%s: TX: dmamap load failed: %d\n",
1100 sc->sc_dev.dv_xname, error));
1101 break;
1102 }
1103
1104 /*
1105 * Ensure we have enough descriptors free to describe
1106 * the packet. Note, we always reserve one descriptor
1107 * at the end of the ring due to the semantics of the
1108 * TDT register, plus one more in the event we need
1109 * to re-load checksum offload context.
1110 */
1111 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1112 /*
1113 * Not enough free descriptors to transmit this
1114 * packet. We haven't committed anything yet,
1115 * so just unload the DMA map, put the packet
1116 * pack on the queue, and punt. Notify the upper
1117 * layer that there are no more slots left.
1118 */
1119 DPRINTF(WM_DEBUG_TX,
1120 ("%s: TX: need %d descriptors, have %d\n",
1121 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1122 sc->sc_txfree - 1));
1123 ifp->if_flags |= IFF_OACTIVE;
1124 bus_dmamap_unload(sc->sc_dmat, dmamap);
1125 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1126 break;
1127 }
1128
1129 IFQ_DEQUEUE(&ifp->if_snd, m0);
1130
1131 /*
1132 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1133 */
1134
1135 /* Sync the DMA map. */
1136 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1137 BUS_DMASYNC_PREWRITE);
1138
1139 DPRINTF(WM_DEBUG_TX,
1140 ("%s: TX: packet has %d DMA segments\n",
1141 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1142
1143 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1144
1145 /*
1146 * Store a pointer to the packet so that we can free it
1147 * later.
1148 *
1149 * Initially, we consider the number of descriptors the
1150 * packet uses the number of DMA segments. This may be
1151 * incremented by 1 if we do checksum offload (a descriptor
1152 * is used to set the checksum context).
1153 */
1154 txs->txs_mbuf = m0;
1155 txs->txs_firstdesc = sc->sc_txnext;
1156 txs->txs_ndesc = dmamap->dm_nsegs;
1157
1158 /*
1159 * Set up checksum offload parameters for
1160 * this packet.
1161 */
1162 if (m0->m_pkthdr.csum_flags &
1163 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1164 if (wm_tx_cksum(sc, txs, &cksumcmd,
1165 &cksumfields) != 0) {
1166 /* Error message already displayed. */
1167 m_freem(m0);
1168 bus_dmamap_unload(sc->sc_dmat, dmamap);
1169 txs->txs_mbuf = NULL;
1170 continue;
1171 }
1172 } else {
1173 cksumcmd = 0;
1174 cksumfields = 0;
1175 }
1176
1177 cksumcmd |= htole32(WTX_CMD_IDE);
1178
1179 /*
1180 * Initialize the transmit descriptor.
1181 */
1182 for (nexttx = sc->sc_txnext, seg = 0;
1183 seg < dmamap->dm_nsegs;
1184 seg++, nexttx = WM_NEXTTX(nexttx)) {
1185 /*
1186 * Note: we currently only use 32-bit DMA
1187 * addresses.
1188 */
1189 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1190 htole32(dmamap->dm_segs[seg].ds_addr);
1191 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1192 htole32(dmamap->dm_segs[seg].ds_len);
1193 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1194 cksumfields;
1195 lasttx = nexttx;
1196
1197 DPRINTF(WM_DEBUG_TX,
1198 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1199 sc->sc_dev.dv_xname, nexttx,
1200 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1201 (uint32_t) dmamap->dm_segs[seg].ds_len));
1202 }
1203
1204 /*
1205 * Set up the command byte on the last descriptor of
1206 * the packet. If we're in the interrupt delay window,
1207 * delay the interrupt.
1208 */
1209 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1210 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1211
1212 #if 0 /* XXXJRT */
1213 /*
1214 * If VLANs are enabled and the packet has a VLAN tag, set
1215 * up the descriptor to encapsulate the packet for us.
1216 *
1217 * This is only valid on the last descriptor of the packet.
1218 */
1219 if (sc->sc_ethercom.ec_nvlans != 0 &&
1220 (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) {
1221 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1222 htole32(WTX_CMD_VLE);
1223 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1224 = htole16(*mtod(m, int *) & 0xffff);
1225 }
1226 #endif /* XXXJRT */
1227
1228 txs->txs_lastdesc = lasttx;
1229
1230 DPRINTF(WM_DEBUG_TX,
1231 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1232 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1233
1234 /* Sync the descriptors we're using. */
1235 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1236 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1237
1238 /* Give the packet to the chip. */
1239 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1240
1241 DPRINTF(WM_DEBUG_TX,
1242 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1243
1244 DPRINTF(WM_DEBUG_TX,
1245 ("%s: TX: finished transmitting packet, job %d\n",
1246 sc->sc_dev.dv_xname, sc->sc_txsnext));
1247
1248 /* Advance the tx pointer. */
1249 sc->sc_txfree -= txs->txs_ndesc;
1250 sc->sc_txnext = nexttx;
1251
1252 sc->sc_txsfree--;
1253 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1254
1255 #if NBPFILTER > 0
1256 /* Pass the packet to any BPF listeners. */
1257 if (ifp->if_bpf)
1258 bpf_mtap(ifp->if_bpf, m0);
1259 #endif /* NBPFILTER > 0 */
1260 }
1261
1262 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1263 /* No more slots; notify upper layer. */
1264 ifp->if_flags |= IFF_OACTIVE;
1265 }
1266
1267 if (sc->sc_txfree != ofree) {
1268 /* Set a watchdog timer in case the chip flakes out. */
1269 ifp->if_timer = 5;
1270 }
1271 }
1272
1273 /*
1274 * wm_watchdog: [ifnet interface function]
1275 *
1276 * Watchdog timer handler.
1277 */
1278 void
1279 wm_watchdog(struct ifnet *ifp)
1280 {
1281 struct wm_softc *sc = ifp->if_softc;
1282
1283 /*
1284 * Since we're using delayed interrupts, sweep up
1285 * before we report an error.
1286 */
1287 wm_txintr(sc);
1288
1289 if (sc->sc_txfree != WM_NTXDESC) {
1290 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1291 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1292 sc->sc_txnext);
1293 ifp->if_oerrors++;
1294
1295 /* Reset the interface. */
1296 (void) wm_init(ifp);
1297 }
1298
1299 /* Try to get more packets going. */
1300 wm_start(ifp);
1301 }
1302
1303 /*
1304 * wm_ioctl: [ifnet interface function]
1305 *
1306 * Handle control requests from the operator.
1307 */
1308 int
1309 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1310 {
1311 struct wm_softc *sc = ifp->if_softc;
1312 struct ifreq *ifr = (struct ifreq *) data;
1313 int s, error;
1314
1315 s = splnet();
1316
1317 switch (cmd) {
1318 case SIOCSIFMEDIA:
1319 case SIOCGIFMEDIA:
1320 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1321 break;
1322
1323 default:
1324 error = ether_ioctl(ifp, cmd, data);
1325 if (error == ENETRESET) {
1326 /*
1327 * Multicast list has changed; set the hardware filter
1328 * accordingly.
1329 */
1330 wm_set_filter(sc);
1331 error = 0;
1332 }
1333 break;
1334 }
1335
1336 /* Try to get more packets going. */
1337 wm_start(ifp);
1338
1339 splx(s);
1340 return (error);
1341 }
1342
1343 /*
1344 * wm_intr:
1345 *
1346 * Interrupt service routine.
1347 */
1348 int
1349 wm_intr(void *arg)
1350 {
1351 struct wm_softc *sc = arg;
1352 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1353 uint32_t icr;
1354 int wantinit, handled = 0;
1355
1356 for (wantinit = 0; wantinit == 0;) {
1357 icr = CSR_READ(sc, WMREG_ICR);
1358 if ((icr & sc->sc_icr) == 0)
1359 break;
1360
1361 handled = 1;
1362
1363 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1364 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1365 DPRINTF(WM_DEBUG_RX,
1366 ("%s: RX: got Rx intr 0x%08x\n",
1367 sc->sc_dev.dv_xname,
1368 icr & (ICR_RXDMT0|ICR_RXT0)));
1369 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1370 }
1371 #endif
1372 wm_rxintr(sc);
1373
1374 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1375 if (icr & ICR_TXDW) {
1376 DPRINTF(WM_DEBUG_TX,
1377 ("%s: TX: got TDXW interrupt\n",
1378 sc->sc_dev.dv_xname));
1379 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1380 }
1381 #endif
1382 wm_txintr(sc);
1383
1384 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1385 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1386 wm_linkintr(sc, icr);
1387 }
1388
1389 if (icr & ICR_RXO) {
1390 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1391 wantinit = 1;
1392 }
1393 }
1394
1395 if (handled) {
1396 if (wantinit)
1397 wm_init(ifp);
1398
1399 /* Try to get more packets going. */
1400 wm_start(ifp);
1401 }
1402
1403 return (handled);
1404 }
1405
1406 /*
1407 * wm_txintr:
1408 *
1409 * Helper; handle transmit interrupts.
1410 */
1411 void
1412 wm_txintr(struct wm_softc *sc)
1413 {
1414 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1415 struct wm_txsoft *txs;
1416 uint8_t status;
1417 int i;
1418
1419 ifp->if_flags &= ~IFF_OACTIVE;
1420
1421 /*
1422 * Go through the Tx list and free mbufs for those
1423 * frams which have been transmitted.
1424 */
1425 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1426 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1427 txs = &sc->sc_txsoft[i];
1428
1429 DPRINTF(WM_DEBUG_TX,
1430 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1431
1432 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1433 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1434
1435 status = le32toh(sc->sc_txdescs[
1436 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1437 if ((status & WTX_ST_DD) == 0)
1438 break;
1439
1440 DPRINTF(WM_DEBUG_TX,
1441 ("%s: TX: job %d done: descs %d..%d\n",
1442 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1443 txs->txs_lastdesc));
1444
1445 /*
1446 * XXX We should probably be using the statistics
1447 * XXX registers, but I don't know if they exist
1448 * XXX on chips before the i82544.
1449 */
1450
1451 #ifdef WM_EVENT_COUNTERS
1452 if (status & WTX_ST_TU)
1453 WM_EVCNT_INCR(&sc->sc_ev_tu);
1454 #endif /* WM_EVENT_COUNTERS */
1455
1456 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1457 ifp->if_oerrors++;
1458 if (status & WTX_ST_LC)
1459 printf("%s: late collision\n",
1460 sc->sc_dev.dv_xname);
1461 else if (status & WTX_ST_EC) {
1462 ifp->if_collisions += 16;
1463 printf("%s: excessive collisions\n",
1464 sc->sc_dev.dv_xname);
1465 }
1466 } else
1467 ifp->if_opackets++;
1468
1469 sc->sc_txfree += txs->txs_ndesc;
1470 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1471 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1472 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1473 m_freem(txs->txs_mbuf);
1474 txs->txs_mbuf = NULL;
1475 }
1476
1477 /* Update the dirty transmit buffer pointer. */
1478 sc->sc_txsdirty = i;
1479 DPRINTF(WM_DEBUG_TX,
1480 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1481
1482 /*
1483 * If there are no more pending transmissions, cancel the watchdog
1484 * timer.
1485 */
1486 if (sc->sc_txsfree == WM_TXQUEUELEN)
1487 ifp->if_timer = 0;
1488 }
1489
1490 /*
1491 * wm_rxintr:
1492 *
1493 * Helper; handle receive interrupts.
1494 */
1495 void
1496 wm_rxintr(struct wm_softc *sc)
1497 {
1498 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1499 struct wm_rxsoft *rxs;
1500 struct mbuf *m;
1501 int i, len;
1502 uint8_t status, errors;
1503
1504 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1505 rxs = &sc->sc_rxsoft[i];
1506
1507 DPRINTF(WM_DEBUG_RX,
1508 ("%s: RX: checking descriptor %d\n",
1509 sc->sc_dev.dv_xname, i));
1510
1511 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1512
1513 status = sc->sc_rxdescs[i].wrx_status;
1514 errors = sc->sc_rxdescs[i].wrx_errors;
1515 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1516
1517 if ((status & WRX_ST_DD) == 0) {
1518 /*
1519 * We have processed all of the receive descriptors.
1520 */
1521 break;
1522 }
1523
1524 if (__predict_false(sc->sc_rxdiscard)) {
1525 DPRINTF(WM_DEBUG_RX,
1526 ("%s: RX: discarding contents of descriptor %d\n",
1527 sc->sc_dev.dv_xname, i));
1528 WM_INIT_RXDESC(sc, i);
1529 if (status & WRX_ST_EOP) {
1530 /* Reset our state. */
1531 DPRINTF(WM_DEBUG_RX,
1532 ("%s: RX: resetting rxdiscard -> 0\n",
1533 sc->sc_dev.dv_xname));
1534 sc->sc_rxdiscard = 0;
1535 }
1536 continue;
1537 }
1538
1539 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1540 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1541
1542 m = rxs->rxs_mbuf;
1543
1544 /*
1545 * Add a new receive buffer to the ring.
1546 */
1547 if (wm_add_rxbuf(sc, i) != 0) {
1548 /*
1549 * Failed, throw away what we've done so
1550 * far, and discard the rest of the packet.
1551 */
1552 ifp->if_ierrors++;
1553 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1554 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1555 WM_INIT_RXDESC(sc, i);
1556 if ((status & WRX_ST_EOP) == 0)
1557 sc->sc_rxdiscard = 1;
1558 if (sc->sc_rxhead != NULL)
1559 m_freem(sc->sc_rxhead);
1560 WM_RXCHAIN_RESET(sc);
1561 DPRINTF(WM_DEBUG_RX,
1562 ("%s: RX: Rx buffer allocation failed, "
1563 "dropping packet%s\n", sc->sc_dev.dv_xname,
1564 sc->sc_rxdiscard ? " (discard)" : ""));
1565 continue;
1566 }
1567
1568 WM_RXCHAIN_LINK(sc, m);
1569
1570 m->m_len = len;
1571
1572 DPRINTF(WM_DEBUG_RX,
1573 ("%s: RX: buffer at %p len %d\n",
1574 sc->sc_dev.dv_xname, m->m_data, len));
1575
1576 /*
1577 * If this is not the end of the packet, keep
1578 * looking.
1579 */
1580 if ((status & WRX_ST_EOP) == 0) {
1581 sc->sc_rxlen += len;
1582 DPRINTF(WM_DEBUG_RX,
1583 ("%s: RX: not yet EOP, rxlen -> %d\n",
1584 sc->sc_dev.dv_xname, sc->sc_rxlen));
1585 continue;
1586 }
1587
1588 /*
1589 * Okay, we have the entire packet now...
1590 */
1591 *sc->sc_rxtailp = NULL;
1592 m = sc->sc_rxhead;
1593 len += sc->sc_rxlen;
1594
1595 WM_RXCHAIN_RESET(sc);
1596
1597 DPRINTF(WM_DEBUG_RX,
1598 ("%s: RX: have entire packet, len -> %d\n",
1599 sc->sc_dev.dv_xname, len));
1600
1601 /*
1602 * If an error occurred, update stats and drop the packet.
1603 */
1604 if (errors &
1605 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1606 ifp->if_ierrors++;
1607 if (errors & WRX_ER_SE)
1608 printf("%s: symbol error\n",
1609 sc->sc_dev.dv_xname);
1610 else if (errors & WRX_ER_SEQ)
1611 printf("%s: receive sequence error\n",
1612 sc->sc_dev.dv_xname);
1613 else if (errors & WRX_ER_CE)
1614 printf("%s: CRC error\n",
1615 sc->sc_dev.dv_xname);
1616 m_freem(m);
1617 continue;
1618 }
1619
1620 /*
1621 * No errors. Receive the packet.
1622 *
1623 * Note, we have configured the chip to include the
1624 * CRC with every packet.
1625 */
1626 m->m_flags |= M_HASFCS;
1627 m->m_pkthdr.rcvif = ifp;
1628 m->m_pkthdr.len = len;
1629
1630 #if 0 /* XXXJRT */
1631 /*
1632 * If VLANs are enabled, VLAN packets have been unwrapped
1633 * for us. Associate the tag with the packet.
1634 */
1635 if (sc->sc_ethercom.ec_nvlans != 0 &&
1636 (status & WRX_ST_VP) != 0) {
1637 struct mbuf *vtag;
1638
1639 vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
1640 if (vtag == NULL) {
1641 ifp->if_ierrors++;
1642 printf("%s: unable to allocate VLAN tag\n",
1643 sc->sc_dev.dv_xname);
1644 m_freem(m);
1645 continue;
1646 }
1647
1648 *mtod(m, int *) =
1649 le16toh(sc->sc_rxdescs[i].wrx_special);
1650 vtag->m_len = sizeof(int);
1651 }
1652 #endif /* XXXJRT */
1653
1654 /*
1655 * Set up checksum info for this packet.
1656 */
1657 if (status & WRX_ST_IPCS) {
1658 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1659 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1660 if (errors & WRX_ER_IPE)
1661 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1662 }
1663 if (status & WRX_ST_TCPCS) {
1664 /*
1665 * Note: we don't know if this was TCP or UDP,
1666 * so we just set both bits, and expect the
1667 * upper layers to deal.
1668 */
1669 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1670 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1671 if (errors & WRX_ER_TCPE)
1672 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1673 }
1674
1675 ifp->if_ipackets++;
1676
1677 #if NBPFILTER > 0
1678 /* Pass this up to any BPF listeners. */
1679 if (ifp->if_bpf)
1680 bpf_mtap(ifp->if_bpf, m);
1681 #endif /* NBPFILTER > 0 */
1682
1683 /* Pass it on. */
1684 (*ifp->if_input)(ifp, m);
1685 }
1686
1687 /* Update the receive pointer. */
1688 sc->sc_rxptr = i;
1689
1690 DPRINTF(WM_DEBUG_RX,
1691 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1692 }
1693
1694 /*
1695 * wm_linkintr:
1696 *
1697 * Helper; handle link interrupts.
1698 */
1699 void
1700 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1701 {
1702 uint32_t status;
1703
1704 /*
1705 * If we get a link status interrupt on a 1000BASE-T
1706 * device, just fall into the normal MII tick path.
1707 */
1708 if (sc->sc_flags & WM_F_HAS_MII) {
1709 if (icr & ICR_LSC) {
1710 DPRINTF(WM_DEBUG_LINK,
1711 ("%s: LINK: LSC -> mii_tick\n",
1712 sc->sc_dev.dv_xname));
1713 mii_tick(&sc->sc_mii);
1714 } else if (icr & ICR_RXSEQ) {
1715 DPRINTF(WM_DEBUG_LINK,
1716 ("%s: LINK Receive sequence error\n",
1717 sc->sc_dev.dv_xname));
1718 }
1719 return;
1720 }
1721
1722 /*
1723 * If we are now receiving /C/, check for link again in
1724 * a couple of link clock ticks.
1725 */
1726 if (icr & ICR_RXCFG) {
1727 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1728 sc->sc_dev.dv_xname));
1729 sc->sc_tbi_anstate = 2;
1730 }
1731
1732 if (icr & ICR_LSC) {
1733 status = CSR_READ(sc, WMREG_STATUS);
1734 if (status & STATUS_LU) {
1735 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1736 sc->sc_dev.dv_xname,
1737 (status & STATUS_FD) ? "FDX" : "HDX"));
1738 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1739 if (status & STATUS_FD)
1740 sc->sc_tctl |=
1741 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1742 else
1743 sc->sc_tctl |=
1744 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1745 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1746 sc->sc_tbi_linkup = 1;
1747 } else {
1748 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1749 sc->sc_dev.dv_xname));
1750 sc->sc_tbi_linkup = 0;
1751 }
1752 sc->sc_tbi_anstate = 2;
1753 wm_tbi_set_linkled(sc);
1754 } else if (icr & ICR_RXSEQ) {
1755 DPRINTF(WM_DEBUG_LINK,
1756 ("%s: LINK: Receive sequence error\n",
1757 sc->sc_dev.dv_xname));
1758 }
1759 }
1760
1761 /*
1762 * wm_tick:
1763 *
1764 * One second timer, used to check link status, sweep up
1765 * completed transmit jobs, etc.
1766 */
1767 void
1768 wm_tick(void *arg)
1769 {
1770 struct wm_softc *sc = arg;
1771 int s;
1772
1773 s = splnet();
1774
1775 if (sc->sc_flags & WM_F_HAS_MII)
1776 mii_tick(&sc->sc_mii);
1777 else
1778 wm_tbi_check_link(sc);
1779
1780 splx(s);
1781
1782 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1783 }
1784
1785 /*
1786 * wm_reset:
1787 *
1788 * Reset the i82542 chip.
1789 */
1790 void
1791 wm_reset(struct wm_softc *sc)
1792 {
1793 int i;
1794
1795 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1796 delay(10000);
1797
1798 for (i = 0; i < 1000; i++) {
1799 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1800 return;
1801 delay(20);
1802 }
1803
1804 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1805 printf("%s: WARNING: reset failed to complete\n",
1806 sc->sc_dev.dv_xname);
1807 }
1808
1809 /*
1810 * wm_init: [ifnet interface function]
1811 *
1812 * Initialize the interface. Must be called at splnet().
1813 */
1814 int
1815 wm_init(struct ifnet *ifp)
1816 {
1817 struct wm_softc *sc = ifp->if_softc;
1818 struct wm_rxsoft *rxs;
1819 int i, error = 0;
1820 uint32_t reg;
1821
1822 /* Cancel any pending I/O. */
1823 wm_stop(ifp, 0);
1824
1825 /* Reset the chip to a known state. */
1826 wm_reset(sc);
1827
1828 /* Initialize the transmit descriptor ring. */
1829 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1830 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1831 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1832 sc->sc_txfree = WM_NTXDESC;
1833 sc->sc_txnext = 0;
1834
1835 sc->sc_txctx_ipcs = 0xffffffff;
1836 sc->sc_txctx_tucs = 0xffffffff;
1837
1838 if (sc->sc_type < WM_T_82543) {
1839 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1840 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1841 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1842 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1843 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1844 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
1845 } else {
1846 CSR_WRITE(sc, WMREG_TBDAH, 0);
1847 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1848 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1849 CSR_WRITE(sc, WMREG_TDH, 0);
1850 CSR_WRITE(sc, WMREG_TDT, 0);
1851 CSR_WRITE(sc, WMREG_TIDV, 128);
1852
1853 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1854 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1855 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
1856 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
1857 }
1858 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
1859 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
1860
1861 /* Initialize the transmit job descriptors. */
1862 for (i = 0; i < WM_TXQUEUELEN; i++)
1863 sc->sc_txsoft[i].txs_mbuf = NULL;
1864 sc->sc_txsfree = WM_TXQUEUELEN;
1865 sc->sc_txsnext = 0;
1866 sc->sc_txsdirty = 0;
1867
1868 /*
1869 * Initialize the receive descriptor and receive job
1870 * descriptor rings.
1871 */
1872 if (sc->sc_type < WM_T_82543) {
1873 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
1874 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
1875 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
1876 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
1877 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
1878 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
1879
1880 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
1881 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
1882 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
1883 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
1884 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
1885 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
1886 } else {
1887 CSR_WRITE(sc, WMREG_RDBAH, 0);
1888 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
1889 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
1890 CSR_WRITE(sc, WMREG_RDH, 0);
1891 CSR_WRITE(sc, WMREG_RDT, 0);
1892 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
1893 }
1894 for (i = 0; i < WM_NRXDESC; i++) {
1895 rxs = &sc->sc_rxsoft[i];
1896 if (rxs->rxs_mbuf == NULL) {
1897 if ((error = wm_add_rxbuf(sc, i)) != 0) {
1898 printf("%s: unable to allocate or map rx "
1899 "buffer %d, error = %d\n",
1900 sc->sc_dev.dv_xname, i, error);
1901 /*
1902 * XXX Should attempt to run with fewer receive
1903 * XXX buffers instead of just failing.
1904 */
1905 wm_rxdrain(sc);
1906 goto out;
1907 }
1908 } else
1909 WM_INIT_RXDESC(sc, i);
1910 }
1911 sc->sc_rxptr = 0;
1912 sc->sc_rxdiscard = 0;
1913 WM_RXCHAIN_RESET(sc);
1914
1915 /*
1916 * Clear out the VLAN table -- we don't use it (yet).
1917 */
1918 CSR_WRITE(sc, WMREG_VET, 0);
1919 for (i = 0; i < WM_VLAN_TABSIZE; i++)
1920 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
1921
1922 /*
1923 * Set up flow-control parameters.
1924 *
1925 * XXX Values could probably stand some tuning.
1926 */
1927 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
1928 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
1929 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
1930 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
1931
1932 if (sc->sc_type < WM_T_82543) {
1933 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
1934 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
1935 } else {
1936 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
1937 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
1938 }
1939 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
1940 }
1941
1942 #if 0 /* XXXJRT */
1943 /* Deal with VLAN enables. */
1944 if (sc->sc_ethercom.ec_nvlans != 0)
1945 sc->sc_ctrl |= CTRL_VME;
1946 else
1947 #endif /* XXXJRT */
1948 sc->sc_ctrl &= ~CTRL_VME;
1949
1950 /* Write the control registers. */
1951 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1952 #if 0
1953 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1954 #endif
1955
1956 /*
1957 * Set up checksum offload parameters.
1958 */
1959 reg = CSR_READ(sc, WMREG_RXCSUM);
1960 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
1961 reg |= RXCSUM_IPOFL;
1962 else
1963 reg &= ~RXCSUM_IPOFL;
1964 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
1965 reg |= RXCSUM_TUOFL;
1966 else
1967 reg &= ~RXCSUM_TUOFL;
1968 CSR_WRITE(sc, WMREG_RXCSUM, reg);
1969
1970 /*
1971 * Set up the interrupt registers.
1972 */
1973 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
1974 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
1975 ICR_RXO | ICR_RXT0;
1976 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
1977 sc->sc_icr |= ICR_RXCFG;
1978 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
1979
1980 /* Set up the inter-packet gap. */
1981 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
1982
1983 #if 0 /* XXXJRT */
1984 /* Set the VLAN ethernetype. */
1985 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
1986 #endif
1987
1988 /*
1989 * Set up the transmit control register; we start out with
1990 * a collision distance suitable for FDX, but update it whe
1991 * we resolve the media type.
1992 */
1993 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
1994 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1995 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1996
1997 /* Set the media. */
1998 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
1999
2000 /*
2001 * Set up the receive control register; we actually program
2002 * the register when we set the receive filter. Use multicast
2003 * address offset type 0.
2004 *
2005 * Only the i82544 has the ability to strip the incoming
2006 * CRC, so we don't enable that feature.
2007 */
2008 sc->sc_mchash_type = 0;
2009 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
2010 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2011
2012 /* Set the receive filter. */
2013 wm_set_filter(sc);
2014
2015 /* Start the one second link check clock. */
2016 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2017
2018 /* ...all done! */
2019 ifp->if_flags |= IFF_RUNNING;
2020 ifp->if_flags &= ~IFF_OACTIVE;
2021
2022 out:
2023 if (error)
2024 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2025 return (error);
2026 }
2027
2028 /*
2029 * wm_rxdrain:
2030 *
2031 * Drain the receive queue.
2032 */
2033 void
2034 wm_rxdrain(struct wm_softc *sc)
2035 {
2036 struct wm_rxsoft *rxs;
2037 int i;
2038
2039 for (i = 0; i < WM_NRXDESC; i++) {
2040 rxs = &sc->sc_rxsoft[i];
2041 if (rxs->rxs_mbuf != NULL) {
2042 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2043 m_freem(rxs->rxs_mbuf);
2044 rxs->rxs_mbuf = NULL;
2045 }
2046 }
2047 }
2048
2049 /*
2050 * wm_stop: [ifnet interface function]
2051 *
2052 * Stop transmission on the interface.
2053 */
2054 void
2055 wm_stop(struct ifnet *ifp, int disable)
2056 {
2057 struct wm_softc *sc = ifp->if_softc;
2058 struct wm_txsoft *txs;
2059 int i;
2060
2061 /* Stop the one second clock. */
2062 callout_stop(&sc->sc_tick_ch);
2063
2064 if (sc->sc_flags & WM_F_HAS_MII) {
2065 /* Down the MII. */
2066 mii_down(&sc->sc_mii);
2067 }
2068
2069 /* Stop the transmit and receive processes. */
2070 CSR_WRITE(sc, WMREG_TCTL, 0);
2071 CSR_WRITE(sc, WMREG_RCTL, 0);
2072
2073 /* Release any queued transmit buffers. */
2074 for (i = 0; i < WM_TXQUEUELEN; i++) {
2075 txs = &sc->sc_txsoft[i];
2076 if (txs->txs_mbuf != NULL) {
2077 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2078 m_freem(txs->txs_mbuf);
2079 txs->txs_mbuf = NULL;
2080 }
2081 }
2082
2083 if (disable)
2084 wm_rxdrain(sc);
2085
2086 /* Mark the interface as down and cancel the watchdog timer. */
2087 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2088 ifp->if_timer = 0;
2089 }
2090
2091 /*
2092 * wm_read_eeprom:
2093 *
2094 * Read data from the serial EEPROM.
2095 */
2096 void
2097 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2098 {
2099 uint32_t reg;
2100 int i, x;
2101
2102 for (i = 0; i < wordcnt; i++) {
2103 /* Send CHIP SELECT for one clock tick. */
2104 CSR_WRITE(sc, WMREG_EECD, EECD_CS);
2105 delay(2);
2106
2107 /* Shift in the READ command. */
2108 for (x = 3; x > 0; x--) {
2109 reg = EECD_CS;
2110 if (UWIRE_OPC_READ & (1 << (x - 1)))
2111 reg |= EECD_DI;
2112 CSR_WRITE(sc, WMREG_EECD, reg);
2113 delay(2);
2114 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2115 delay(2);
2116 CSR_WRITE(sc, WMREG_EECD, reg);
2117 delay(2);
2118 }
2119
2120 /* Shift in address. */
2121 for (x = 6; x > 0; x--) {
2122 reg = EECD_CS;
2123 if ((word + i) & (1 << (x - 1)))
2124 reg |= EECD_DI;
2125 CSR_WRITE(sc, WMREG_EECD, reg);
2126 delay(2);
2127 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2128 delay(2);
2129 CSR_WRITE(sc, WMREG_EECD, reg);
2130 delay(2);
2131 }
2132
2133 /* Shift out the data. */
2134 reg = EECD_CS;
2135 data[i] = 0;
2136 for (x = 16; x > 0; x--) {
2137 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2138 delay(2);
2139 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2140 data[i] |= (1 << (x - 1));
2141 CSR_WRITE(sc, WMREG_EECD, reg);
2142 delay(2);
2143 }
2144
2145 /* Clear CHIP SELECT. */
2146 CSR_WRITE(sc, WMREG_EECD, 0);
2147 }
2148 }
2149
2150 /*
2151 * wm_add_rxbuf:
2152 *
2153 * Add a receive buffer to the indiciated descriptor.
2154 */
2155 int
2156 wm_add_rxbuf(struct wm_softc *sc, int idx)
2157 {
2158 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2159 struct mbuf *m;
2160 int error;
2161
2162 MGETHDR(m, M_DONTWAIT, MT_DATA);
2163 if (m == NULL)
2164 return (ENOBUFS);
2165
2166 MCLGET(m, M_DONTWAIT);
2167 if ((m->m_flags & M_EXT) == 0) {
2168 m_freem(m);
2169 return (ENOBUFS);
2170 }
2171
2172 if (rxs->rxs_mbuf != NULL)
2173 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2174
2175 rxs->rxs_mbuf = m;
2176
2177 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2178 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2179 BUS_DMA_READ|BUS_DMA_NOWAIT);
2180 if (error) {
2181 printf("%s: unable to load rx DMA map %d, error = %d\n",
2182 sc->sc_dev.dv_xname, idx, error);
2183 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2184 }
2185
2186 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2187 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2188
2189 WM_INIT_RXDESC(sc, idx);
2190
2191 return (0);
2192 }
2193
2194 /*
2195 * wm_set_ral:
2196 *
2197 * Set an entery in the receive address list.
2198 */
2199 static void
2200 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2201 {
2202 uint32_t ral_lo, ral_hi;
2203
2204 if (enaddr != NULL) {
2205 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2206 (enaddr[3] << 24);
2207 ral_hi = enaddr[4] | (enaddr[5] << 8);
2208 ral_hi |= RAL_AV;
2209 } else {
2210 ral_lo = 0;
2211 ral_hi = 0;
2212 }
2213
2214 if (sc->sc_type >= WM_T_82544) {
2215 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2216 ral_lo);
2217 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2218 ral_hi);
2219 } else {
2220 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2221 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2222 }
2223 }
2224
2225 /*
2226 * wm_mchash:
2227 *
2228 * Compute the hash of the multicast address for the 4096-bit
2229 * multicast filter.
2230 */
2231 static uint32_t
2232 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2233 {
2234 static const int lo_shift[4] = { 4, 3, 2, 0 };
2235 static const int hi_shift[4] = { 4, 5, 6, 8 };
2236 uint32_t hash;
2237
2238 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2239 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2240
2241 return (hash & 0xfff);
2242 }
2243
2244 /*
2245 * wm_set_filter:
2246 *
2247 * Set up the receive filter.
2248 */
2249 void
2250 wm_set_filter(struct wm_softc *sc)
2251 {
2252 struct ethercom *ec = &sc->sc_ethercom;
2253 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2254 struct ether_multi *enm;
2255 struct ether_multistep step;
2256 bus_addr_t mta_reg;
2257 uint32_t hash, reg, bit;
2258 int i;
2259
2260 if (sc->sc_type >= WM_T_82544)
2261 mta_reg = WMREG_CORDOVA_MTA;
2262 else
2263 mta_reg = WMREG_MTA;
2264
2265 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2266
2267 if (ifp->if_flags & IFF_BROADCAST)
2268 sc->sc_rctl |= RCTL_BAM;
2269 if (ifp->if_flags & IFF_PROMISC) {
2270 sc->sc_rctl |= RCTL_UPE;
2271 goto allmulti;
2272 }
2273
2274 /*
2275 * Set the station address in the first RAL slot, and
2276 * clear the remaining slots.
2277 */
2278 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2279 for (i = 1; i < WM_RAL_TABSIZE; i++)
2280 wm_set_ral(sc, NULL, i);
2281
2282 /* Clear out the multicast table. */
2283 for (i = 0; i < WM_MC_TABSIZE; i++)
2284 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2285
2286 ETHER_FIRST_MULTI(step, ec, enm);
2287 while (enm != NULL) {
2288 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2289 /*
2290 * We must listen to a range of multicast addresses.
2291 * For now, just accept all multicasts, rather than
2292 * trying to set only those filter bits needed to match
2293 * the range. (At this time, the only use of address
2294 * ranges is for IP multicast routing, for which the
2295 * range is big enough to require all bits set.)
2296 */
2297 goto allmulti;
2298 }
2299
2300 hash = wm_mchash(sc, enm->enm_addrlo);
2301
2302 reg = (hash >> 5) & 0x7f;
2303 bit = hash & 0x1f;
2304
2305 hash = CSR_READ(sc, mta_reg + (reg << 2));
2306 hash |= 1U << bit;
2307
2308 /* XXX Hardware bug?? */
2309 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2310 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2311 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2312 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2313 } else
2314 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2315
2316 ETHER_NEXT_MULTI(step, enm);
2317 }
2318
2319 ifp->if_flags &= ~IFF_ALLMULTI;
2320 goto setit;
2321
2322 allmulti:
2323 ifp->if_flags |= IFF_ALLMULTI;
2324 sc->sc_rctl |= RCTL_MPE;
2325
2326 setit:
2327 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2328 }
2329
2330 /*
2331 * wm_tbi_mediainit:
2332 *
2333 * Initialize media for use on 1000BASE-X devices.
2334 */
2335 void
2336 wm_tbi_mediainit(struct wm_softc *sc)
2337 {
2338 const char *sep = "";
2339
2340 if (sc->sc_type < WM_T_82543)
2341 sc->sc_tipg = TIPG_WM_DFLT;
2342 else
2343 sc->sc_tipg = TIPG_LG_DFLT;
2344
2345 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_tbi_mediachange,
2346 wm_tbi_mediastatus);
2347
2348 /*
2349 * SWD Pins:
2350 *
2351 * 0 = Link LED (output)
2352 * 1 = Loss Of Signal (input)
2353 */
2354 sc->sc_ctrl |= CTRL_SWDPIO(0);
2355 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2356
2357 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2358
2359 #define ADD(s, m, d) \
2360 do { \
2361 printf("%s%s", sep, s); \
2362 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(m), (d), NULL); \
2363 sep = ", "; \
2364 } while (/*CONSTCOND*/0)
2365
2366 printf("%s: ", sc->sc_dev.dv_xname);
2367 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2368 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2369 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2370 printf("\n");
2371
2372 #undef ADD
2373
2374 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2375 }
2376
2377 /*
2378 * wm_tbi_mediastatus: [ifmedia interface function]
2379 *
2380 * Get the current interface media status on a 1000BASE-X device.
2381 */
2382 void
2383 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2384 {
2385 struct wm_softc *sc = ifp->if_softc;
2386
2387 ifmr->ifm_status = IFM_AVALID;
2388 ifmr->ifm_active = IFM_ETHER;
2389
2390 if (sc->sc_tbi_linkup == 0) {
2391 ifmr->ifm_active |= IFM_NONE;
2392 return;
2393 }
2394
2395 ifmr->ifm_status |= IFM_ACTIVE;
2396 ifmr->ifm_active |= IFM_1000_SX;
2397 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2398 ifmr->ifm_active |= IFM_FDX;
2399 }
2400
2401 /*
2402 * wm_tbi_mediachange: [ifmedia interface function]
2403 *
2404 * Set hardware to newly-selected media on a 1000BASE-X device.
2405 */
2406 int
2407 wm_tbi_mediachange(struct ifnet *ifp)
2408 {
2409 struct wm_softc *sc = ifp->if_softc;
2410 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2411 uint32_t status;
2412 int i;
2413
2414 sc->sc_txcw = ife->ifm_data;
2415 if (sc->sc_ctrl & CTRL_RFCE)
2416 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2417 if (sc->sc_ctrl & CTRL_TFCE)
2418 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2419 sc->sc_txcw |= TXCW_ANE;
2420
2421 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2422 delay(10000);
2423
2424 sc->sc_tbi_anstate = 0;
2425
2426 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2427 /* Have signal; wait for the link to come up. */
2428 for (i = 0; i < 50; i++) {
2429 delay(10000);
2430 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2431 break;
2432 }
2433
2434 status = CSR_READ(sc, WMREG_STATUS);
2435 if (status & STATUS_LU) {
2436 /* Link is up. */
2437 DPRINTF(WM_DEBUG_LINK,
2438 ("%s: LINK: set media -> link up %s\n",
2439 sc->sc_dev.dv_xname,
2440 (status & STATUS_FD) ? "FDX" : "HDX"));
2441 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2442 if (status & STATUS_FD)
2443 sc->sc_tctl |=
2444 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2445 else
2446 sc->sc_tctl |=
2447 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2448 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2449 sc->sc_tbi_linkup = 1;
2450 } else {
2451 /* Link is down. */
2452 DPRINTF(WM_DEBUG_LINK,
2453 ("%s: LINK: set media -> link down\n",
2454 sc->sc_dev.dv_xname));
2455 sc->sc_tbi_linkup = 0;
2456 }
2457 } else {
2458 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2459 sc->sc_dev.dv_xname));
2460 sc->sc_tbi_linkup = 0;
2461 }
2462
2463 wm_tbi_set_linkled(sc);
2464
2465 return (0);
2466 }
2467
2468 /*
2469 * wm_tbi_set_linkled:
2470 *
2471 * Update the link LED on 1000BASE-X devices.
2472 */
2473 void
2474 wm_tbi_set_linkled(struct wm_softc *sc)
2475 {
2476
2477 if (sc->sc_tbi_linkup)
2478 sc->sc_ctrl |= CTRL_SWDPIN(0);
2479 else
2480 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2481
2482 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2483 }
2484
2485 /*
2486 * wm_tbi_check_link:
2487 *
2488 * Check the link on 1000BASE-X devices.
2489 */
2490 void
2491 wm_tbi_check_link(struct wm_softc *sc)
2492 {
2493 uint32_t rxcw, ctrl, status;
2494
2495 if (sc->sc_tbi_anstate == 0)
2496 return;
2497 else if (sc->sc_tbi_anstate > 1) {
2498 DPRINTF(WM_DEBUG_LINK,
2499 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2500 sc->sc_tbi_anstate));
2501 sc->sc_tbi_anstate--;
2502 return;
2503 }
2504
2505 sc->sc_tbi_anstate = 0;
2506
2507 rxcw = CSR_READ(sc, WMREG_RXCW);
2508 ctrl = CSR_READ(sc, WMREG_CTRL);
2509 status = CSR_READ(sc, WMREG_STATUS);
2510
2511 if ((status & STATUS_LU) == 0) {
2512 DPRINTF(WM_DEBUG_LINK,
2513 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2514 sc->sc_tbi_linkup = 0;
2515 } else {
2516 DPRINTF(WM_DEBUG_LINK,
2517 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2518 (status & STATUS_FD) ? "FDX" : "HDX"));
2519 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2520 if (status & STATUS_FD)
2521 sc->sc_tctl |=
2522 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2523 else
2524 sc->sc_tctl |=
2525 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2526 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2527 sc->sc_tbi_linkup = 1;
2528 }
2529
2530 wm_tbi_set_linkled(sc);
2531 }
2532
2533 /*
2534 * wm_gmii_reset:
2535 *
2536 * Reset the PHY.
2537 */
2538 void
2539 wm_gmii_reset(struct wm_softc *sc)
2540 {
2541 uint32_t reg;
2542
2543 if (sc->sc_type >= WM_T_82544) {
2544 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2545 delay(20000);
2546
2547 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2548 delay(20000);
2549 } else {
2550 /* The PHY reset pin is active-low. */
2551 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2552 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2553 CTRL_EXT_SWDPIN(4));
2554 reg |= CTRL_EXT_SWDPIO(4);
2555
2556 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2557 delay(10);
2558
2559 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2560 delay(10);
2561
2562 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2563 delay(10);
2564 #if 0
2565 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2566 #endif
2567 }
2568 }
2569
2570 /*
2571 * wm_gmii_mediainit:
2572 *
2573 * Initialize media for use on 1000BASE-T devices.
2574 */
2575 void
2576 wm_gmii_mediainit(struct wm_softc *sc)
2577 {
2578 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2579
2580 /* We have MII. */
2581 sc->sc_flags |= WM_F_HAS_MII;
2582
2583 sc->sc_tipg = TIPG_1000T_DFLT;
2584
2585 /*
2586 * Let the chip set speed/duplex on its own based on
2587 * signals from the PHY.
2588 */
2589 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2590 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2591
2592 /* Initialize our media structures and probe the GMII. */
2593 sc->sc_mii.mii_ifp = ifp;
2594
2595 if (sc->sc_type >= WM_T_82544) {
2596 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
2597 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
2598 } else {
2599 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
2600 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
2601 }
2602 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2603
2604 wm_gmii_reset(sc);
2605
2606 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_gmii_mediachange,
2607 wm_gmii_mediastatus);
2608
2609 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2610 MII_OFFSET_ANY, 0);
2611 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2612 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2613 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2614 } else
2615 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2616 }
2617
2618 /*
2619 * wm_gmii_mediastatus: [ifmedia interface function]
2620 *
2621 * Get the current interface media status on a 1000BASE-T device.
2622 */
2623 void
2624 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2625 {
2626 struct wm_softc *sc = ifp->if_softc;
2627
2628 mii_pollstat(&sc->sc_mii);
2629 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2630 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2631 }
2632
2633 /*
2634 * wm_gmii_mediachange: [ifmedia interface function]
2635 *
2636 * Set hardware to newly-selected media on a 1000BASE-T device.
2637 */
2638 int
2639 wm_gmii_mediachange(struct ifnet *ifp)
2640 {
2641 struct wm_softc *sc = ifp->if_softc;
2642
2643 if (ifp->if_flags & IFF_UP)
2644 mii_mediachg(&sc->sc_mii);
2645 return (0);
2646 }
2647
2648 #define MDI_IO CTRL_SWDPIN(2)
2649 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2650 #define MDI_CLK CTRL_SWDPIN(3)
2651
2652 static void
2653 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2654 {
2655 uint32_t i, v;
2656
2657 v = CSR_READ(sc, WMREG_CTRL);
2658 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2659 v |= MDI_DIR | CTRL_SWDPIO(3);
2660
2661 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2662 if (data & i)
2663 v |= MDI_IO;
2664 else
2665 v &= ~MDI_IO;
2666 CSR_WRITE(sc, WMREG_CTRL, v);
2667 delay(10);
2668 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2669 delay(10);
2670 CSR_WRITE(sc, WMREG_CTRL, v);
2671 delay(10);
2672 }
2673 }
2674
2675 static uint32_t
2676 i82543_mii_recvbits(struct wm_softc *sc)
2677 {
2678 uint32_t v, i, data = 0;
2679
2680 v = CSR_READ(sc, WMREG_CTRL);
2681 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2682 v |= CTRL_SWDPIO(3);
2683
2684 CSR_WRITE(sc, WMREG_CTRL, v);
2685 delay(10);
2686 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2687 delay(10);
2688 CSR_WRITE(sc, WMREG_CTRL, v);
2689 delay(10);
2690
2691 for (i = 0; i < 16; i++) {
2692 data <<= 1;
2693 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2694 delay(10);
2695 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2696 data |= 1;
2697 CSR_WRITE(sc, WMREG_CTRL, v);
2698 delay(10);
2699 }
2700
2701 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2702 delay(10);
2703 CSR_WRITE(sc, WMREG_CTRL, v);
2704 delay(10);
2705
2706 return (data);
2707 }
2708
2709 #undef MDI_IO
2710 #undef MDI_DIR
2711 #undef MDI_CLK
2712
2713 /*
2714 * wm_gmii_i82543_readreg: [mii interface function]
2715 *
2716 * Read a PHY register on the GMII (i82543 version).
2717 */
2718 int
2719 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
2720 {
2721 struct wm_softc *sc = (void *) self;
2722 int rv;
2723
2724 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2725 i82543_mii_sendbits(sc, reg | (phy << 5) |
2726 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2727 rv = i82543_mii_recvbits(sc) & 0xffff;
2728
2729 DPRINTF(WM_DEBUG_GMII,
2730 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2731 sc->sc_dev.dv_xname, phy, reg, rv));
2732
2733 return (rv);
2734 }
2735
2736 /*
2737 * wm_gmii_i82543_writereg: [mii interface function]
2738 *
2739 * Write a PHY register on the GMII (i82543 version).
2740 */
2741 void
2742 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
2743 {
2744 struct wm_softc *sc = (void *) self;
2745
2746 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2747 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2748 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2749 (MII_COMMAND_START << 30), 32);
2750 }
2751
2752 /*
2753 * wm_gmii_i82544_readreg: [mii interface function]
2754 *
2755 * Read a PHY register on the GMII.
2756 */
2757 int
2758 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
2759 {
2760 struct wm_softc *sc = (void *) self;
2761 uint32_t mdic;
2762 int i, rv;
2763
2764 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2765 MDIC_REGADD(reg));
2766
2767 for (i = 0; i < 100; i++) {
2768 mdic = CSR_READ(sc, WMREG_MDIC);
2769 if (mdic & MDIC_READY)
2770 break;
2771 delay(10);
2772 }
2773
2774 if ((mdic & MDIC_READY) == 0) {
2775 printf("%s: MDIC read timed out: phy %d reg %d\n",
2776 sc->sc_dev.dv_xname, phy, reg);
2777 rv = 0;
2778 } else if (mdic & MDIC_E) {
2779 #if 0 /* This is normal if no PHY is present. */
2780 printf("%s: MDIC read error: phy %d reg %d\n",
2781 sc->sc_dev.dv_xname, phy, reg);
2782 #endif
2783 rv = 0;
2784 } else {
2785 rv = MDIC_DATA(mdic);
2786 if (rv == 0xffff)
2787 rv = 0;
2788 }
2789
2790 return (rv);
2791 }
2792
2793 /*
2794 * wm_gmii_i82544_writereg: [mii interface function]
2795 *
2796 * Write a PHY register on the GMII.
2797 */
2798 void
2799 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
2800 {
2801 struct wm_softc *sc = (void *) self;
2802 uint32_t mdic;
2803 int i;
2804
2805 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
2806 MDIC_REGADD(reg) | MDIC_DATA(val));
2807
2808 for (i = 0; i < 100; i++) {
2809 mdic = CSR_READ(sc, WMREG_MDIC);
2810 if (mdic & MDIC_READY)
2811 break;
2812 delay(10);
2813 }
2814
2815 if ((mdic & MDIC_READY) == 0)
2816 printf("%s: MDIC write timed out: phy %d reg %d\n",
2817 sc->sc_dev.dv_xname, phy, reg);
2818 else if (mdic & MDIC_E)
2819 printf("%s: MDIC write error: phy %d reg %d\n",
2820 sc->sc_dev.dv_xname, phy, reg);
2821 }
2822
2823 /*
2824 * wm_gmii_statchg: [mii interface function]
2825 *
2826 * Callback from MII layer when media changes.
2827 */
2828 void
2829 wm_gmii_statchg(struct device *self)
2830 {
2831 struct wm_softc *sc = (void *) self;
2832
2833 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2834
2835 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2836 DPRINTF(WM_DEBUG_LINK,
2837 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
2838 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2839 } else {
2840 DPRINTF(WM_DEBUG_LINK,
2841 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
2842 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2843 }
2844
2845 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2846 }
2847