if_wm.c revision 1.12 1 /* $NetBSD: if_wm.c,v 1.12 2002/07/09 21:05:03 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Fix TCP/UDP checksums.
44 *
45 * - Make GMII work on the i82543.
46 *
47 * - Fix hw VLAN assist.
48 *
49 * - Jumbo frames -- requires changes to network stack due to
50 * lame buffer length handling on chip.
51 */
52
53 #include "bpfilter.h"
54
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/callout.h>
58 #include <sys/mbuf.h>
59 #include <sys/malloc.h>
60 #include <sys/kernel.h>
61 #include <sys/socket.h>
62 #include <sys/ioctl.h>
63 #include <sys/errno.h>
64 #include <sys/device.h>
65 #include <sys/queue.h>
66
67 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
68
69 #include <net/if.h>
70 #include <net/if_dl.h>
71 #include <net/if_media.h>
72 #include <net/if_ether.h>
73
74 #if NBPFILTER > 0
75 #include <net/bpf.h>
76 #endif
77
78 #include <netinet/in.h> /* XXX for struct ip */
79 #include <netinet/in_systm.h> /* XXX for struct ip */
80 #include <netinet/ip.h> /* XXX for struct ip */
81
82 #include <machine/bus.h>
83 #include <machine/intr.h>
84 #include <machine/endian.h>
85
86 #include <dev/mii/mii.h>
87 #include <dev/mii/miivar.h>
88 #include <dev/mii/mii_bitbang.h>
89
90 #include <dev/pci/pcireg.h>
91 #include <dev/pci/pcivar.h>
92 #include <dev/pci/pcidevs.h>
93
94 #include <dev/pci/if_wmreg.h>
95
96 #ifdef WM_DEBUG
97 #define WM_DEBUG_LINK 0x01
98 #define WM_DEBUG_TX 0x02
99 #define WM_DEBUG_RX 0x04
100 #define WM_DEBUG_GMII 0x08
101 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
102
103 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
104 #else
105 #define DPRINTF(x, y) /* nothing */
106 #endif /* WM_DEBUG */
107
108 /*
109 * Transmit descriptor list size. Due to errata, we can only have
110 * 256 hardware descriptors in the ring. We tell the upper layers
111 * that they can queue a lot of packets, and we go ahead and mange
112 * up to 64 of them at a time. We allow up to 16 DMA segments per
113 * packet.
114 */
115 #define WM_NTXSEGS 16
116 #define WM_IFQUEUELEN 256
117 #define WM_TXQUEUELEN 64
118 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
119 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
120 #define WM_NTXDESC 256
121 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
122 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
123 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
124
125 /*
126 * Receive descriptor list size. We have one Rx buffer for normal
127 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
128 * packet. We allocate 256 receive descriptors, each with a 2k
129 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
130 */
131 #define WM_NRXDESC 256
132 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
133 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
134 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
135
136 /*
137 * Control structures are DMA'd to the i82542 chip. We allocate them in
138 * a single clump that maps to a single DMA segment to make serveral things
139 * easier.
140 */
141 struct wm_control_data {
142 /*
143 * The transmit descriptors.
144 */
145 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
146
147 /*
148 * The receive descriptors.
149 */
150 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
151 };
152
153 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
154 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
155 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
156
157 /*
158 * Software state for transmit jobs.
159 */
160 struct wm_txsoft {
161 struct mbuf *txs_mbuf; /* head of our mbuf chain */
162 bus_dmamap_t txs_dmamap; /* our DMA map */
163 int txs_firstdesc; /* first descriptor in packet */
164 int txs_lastdesc; /* last descriptor in packet */
165 int txs_ndesc; /* # of descriptors used */
166 };
167
168 /*
169 * Software state for receive buffers. Each descriptor gets a
170 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
171 * more than one buffer, we chain them together.
172 */
173 struct wm_rxsoft {
174 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
175 bus_dmamap_t rxs_dmamap; /* our DMA map */
176 };
177
178 /*
179 * Software state per device.
180 */
181 struct wm_softc {
182 struct device sc_dev; /* generic device information */
183 bus_space_tag_t sc_st; /* bus space tag */
184 bus_space_handle_t sc_sh; /* bus space handle */
185 bus_dma_tag_t sc_dmat; /* bus DMA tag */
186 struct ethercom sc_ethercom; /* ethernet common data */
187 void *sc_sdhook; /* shutdown hook */
188
189 int sc_type; /* chip type; see below */
190 int sc_flags; /* flags; see below */
191
192 void *sc_ih; /* interrupt cookie */
193
194 struct mii_data sc_mii; /* MII/media information */
195
196 struct callout sc_tick_ch; /* tick callout */
197
198 bus_dmamap_t sc_cddmamap; /* control data DMA map */
199 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
200
201 /*
202 * Software state for the transmit and receive descriptors.
203 */
204 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
205 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
206
207 /*
208 * Control data structures.
209 */
210 struct wm_control_data *sc_control_data;
211 #define sc_txdescs sc_control_data->wcd_txdescs
212 #define sc_rxdescs sc_control_data->wcd_rxdescs
213
214 #ifdef WM_EVENT_COUNTERS
215 /* Event counters. */
216 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
217 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
218 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
219 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
220 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
221 struct evcnt sc_ev_rxintr; /* Rx interrupts */
222 struct evcnt sc_ev_linkintr; /* Link interrupts */
223
224 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
225 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
226 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
227 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
228
229 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
230 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
231 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
232
233 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
234 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
235
236 struct evcnt sc_ev_tu; /* Tx underrun */
237 #endif /* WM_EVENT_COUNTERS */
238
239 bus_addr_t sc_tdt_reg; /* offset of TDT register */
240
241 int sc_txfree; /* number of free Tx descriptors */
242 int sc_txnext; /* next ready Tx descriptor */
243
244 int sc_txsfree; /* number of free Tx jobs */
245 int sc_txsnext; /* next free Tx job */
246 int sc_txsdirty; /* dirty Tx jobs */
247
248 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
249 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
250
251 bus_addr_t sc_rdt_reg; /* offset of RDT register */
252
253 int sc_rxptr; /* next ready Rx descriptor/queue ent */
254 int sc_rxdiscard;
255 int sc_rxlen;
256 struct mbuf *sc_rxhead;
257 struct mbuf *sc_rxtail;
258 struct mbuf **sc_rxtailp;
259
260 uint32_t sc_ctrl; /* prototype CTRL register */
261 #if 0
262 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
263 #endif
264 uint32_t sc_icr; /* prototype interrupt bits */
265 uint32_t sc_tctl; /* prototype TCTL register */
266 uint32_t sc_rctl; /* prototype RCTL register */
267 uint32_t sc_txcw; /* prototype TXCW register */
268 uint32_t sc_tipg; /* prototype TIPG register */
269
270 int sc_tbi_linkup; /* TBI link status */
271 int sc_tbi_anstate; /* autonegotiation state */
272
273 int sc_mchash_type; /* multicast filter offset */
274 };
275
276 #define WM_RXCHAIN_RESET(sc) \
277 do { \
278 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
279 *(sc)->sc_rxtailp = NULL; \
280 (sc)->sc_rxlen = 0; \
281 } while (/*CONSTCOND*/0)
282
283 #define WM_RXCHAIN_LINK(sc, m) \
284 do { \
285 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
286 (sc)->sc_rxtailp = &(m)->m_next; \
287 } while (/*CONSTCOND*/0)
288
289 /* sc_type */
290 #define WM_T_82542_2_0 0 /* i82542 2.0 (really old) */
291 #define WM_T_82542_2_1 1 /* i82542 2.1+ (old) */
292 #define WM_T_82543 2 /* i82543 */
293 #define WM_T_82544 3 /* i82544 */
294 #define WM_T_82540 4 /* i82540 */
295 #define WM_T_82545 5 /* i82545 */
296 #define WM_T_82546 6 /* i82546 */
297
298 /* sc_flags */
299 #define WM_F_HAS_MII 0x01 /* has MII */
300
301 #ifdef WM_EVENT_COUNTERS
302 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
303 #else
304 #define WM_EVCNT_INCR(ev) /* nothing */
305 #endif
306
307 #define CSR_READ(sc, reg) \
308 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
309 #define CSR_WRITE(sc, reg, val) \
310 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
311
312 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
313 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
314
315 #define WM_CDTXSYNC(sc, x, n, ops) \
316 do { \
317 int __x, __n; \
318 \
319 __x = (x); \
320 __n = (n); \
321 \
322 /* If it will wrap around, sync to the end of the ring. */ \
323 if ((__x + __n) > WM_NTXDESC) { \
324 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
325 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
326 (WM_NTXDESC - __x), (ops)); \
327 __n -= (WM_NTXDESC - __x); \
328 __x = 0; \
329 } \
330 \
331 /* Now sync whatever is left. */ \
332 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
333 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
334 } while (/*CONSTCOND*/0)
335
336 #define WM_CDRXSYNC(sc, x, ops) \
337 do { \
338 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
339 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
340 } while (/*CONSTCOND*/0)
341
342 #define WM_INIT_RXDESC(sc, x) \
343 do { \
344 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
345 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
346 struct mbuf *__m = __rxs->rxs_mbuf; \
347 \
348 /* \
349 * Note: We scoot the packet forward 2 bytes in the buffer \
350 * so that the payload after the Ethernet header is aligned \
351 * to a 4-byte boundary. \
352 * \
353 * XXX BRAINDAMAGE ALERT! \
354 * The stupid chip uses the same size for every buffer, which \
355 * is set in the Receive Control register. We are using the 2K \
356 * size option, but what we REALLY want is (2K - 2)! For this \
357 * reason, we can't accept packets longer than the standard \
358 * Ethernet MTU, without incurring a big penalty to copy every \
359 * incoming packet to a new, suitably aligned buffer. \
360 * \
361 * We'll need to make some changes to the layer 3/4 parts of \
362 * the stack (to copy the headers to a new buffer if not \
363 * aligned) in order to support large MTU on this chip. Lame. \
364 */ \
365 __m->m_data = __m->m_ext.ext_buf + 2; \
366 \
367 __rxd->wrx_addr.wa_low = \
368 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
369 __rxd->wrx_addr.wa_high = 0; \
370 __rxd->wrx_len = 0; \
371 __rxd->wrx_cksum = 0; \
372 __rxd->wrx_status = 0; \
373 __rxd->wrx_errors = 0; \
374 __rxd->wrx_special = 0; \
375 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
376 \
377 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
378 } while (/*CONSTCOND*/0)
379
380 void wm_start(struct ifnet *);
381 void wm_watchdog(struct ifnet *);
382 int wm_ioctl(struct ifnet *, u_long, caddr_t);
383 int wm_init(struct ifnet *);
384 void wm_stop(struct ifnet *, int);
385
386 void wm_shutdown(void *);
387
388 void wm_reset(struct wm_softc *);
389 void wm_rxdrain(struct wm_softc *);
390 int wm_add_rxbuf(struct wm_softc *, int);
391 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
392 void wm_tick(void *);
393
394 void wm_set_filter(struct wm_softc *);
395
396 int wm_intr(void *);
397 void wm_txintr(struct wm_softc *);
398 void wm_rxintr(struct wm_softc *);
399 void wm_linkintr(struct wm_softc *, uint32_t);
400
401 void wm_tbi_mediainit(struct wm_softc *);
402 int wm_tbi_mediachange(struct ifnet *);
403 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
404
405 void wm_tbi_set_linkled(struct wm_softc *);
406 void wm_tbi_check_link(struct wm_softc *);
407
408 void wm_gmii_reset(struct wm_softc *);
409
410 int wm_gmii_i82543_readreg(struct device *, int, int);
411 void wm_gmii_i82543_writereg(struct device *, int, int, int);
412
413 int wm_gmii_i82544_readreg(struct device *, int, int);
414 void wm_gmii_i82544_writereg(struct device *, int, int, int);
415
416 void wm_gmii_statchg(struct device *);
417
418 void wm_gmii_mediainit(struct wm_softc *);
419 int wm_gmii_mediachange(struct ifnet *);
420 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
421
422 int wm_match(struct device *, struct cfdata *, void *);
423 void wm_attach(struct device *, struct device *, void *);
424
425 int wm_copy_small = 0;
426
427 struct cfattach wm_ca = {
428 sizeof(struct wm_softc), wm_match, wm_attach,
429 };
430
431 /*
432 * Devices supported by this driver.
433 */
434 const struct wm_product {
435 pci_vendor_id_t wmp_vendor;
436 pci_product_id_t wmp_product;
437 const char *wmp_name;
438 int wmp_type;
439 int wmp_flags;
440 #define WMP_F_1000X 0x01
441 #define WMP_F_1000T 0x02
442 } wm_products[] = {
443 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
444 "Intel i82542 1000BASE-X Ethernet",
445 WM_T_82542_2_1, WMP_F_1000X },
446
447 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
448 "Intel i82543GC 1000BASE-X Ethernet",
449 WM_T_82543, WMP_F_1000X },
450
451 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
452 "Intel i82543GC 1000BASE-T Ethernet",
453 WM_T_82543, WMP_F_1000T },
454
455 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
456 "Intel i82544EI 1000BASE-T Ethernet",
457 WM_T_82544, WMP_F_1000T },
458
459 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
460 "Intel i82544EI 1000BASE-X Ethernet",
461 WM_T_82544, WMP_F_1000X },
462
463 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
464 "Intel i82544GC 1000BASE-T Ethernet",
465 WM_T_82544, WMP_F_1000T },
466
467 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
468 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
469 WM_T_82544, WMP_F_1000T },
470
471 { 0, 0,
472 NULL,
473 0, 0 },
474 };
475
476 #ifdef WM_EVENT_COUNTERS
477 #if WM_NTXSEGS != 16
478 #error Update wm_txseg_evcnt_names
479 #endif
480 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
481 "txseg1",
482 "txseg2",
483 "txseg3",
484 "txseg4",
485 "txseg5",
486 "txseg6",
487 "txseg7",
488 "txseg8",
489 "txseg9",
490 "txseg10",
491 "txseg11",
492 "txseg12",
493 "txseg13",
494 "txseg14",
495 "txseg15",
496 "txseg16",
497 };
498 #endif /* WM_EVENT_COUNTERS */
499
500 static const struct wm_product *
501 wm_lookup(const struct pci_attach_args *pa)
502 {
503 const struct wm_product *wmp;
504
505 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
506 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
507 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
508 return (wmp);
509 }
510 return (NULL);
511 }
512
513 int
514 wm_match(struct device *parent, struct cfdata *cf, void *aux)
515 {
516 struct pci_attach_args *pa = aux;
517
518 if (wm_lookup(pa) != NULL)
519 return (1);
520
521 return (0);
522 }
523
524 void
525 wm_attach(struct device *parent, struct device *self, void *aux)
526 {
527 struct wm_softc *sc = (void *) self;
528 struct pci_attach_args *pa = aux;
529 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
530 pci_chipset_tag_t pc = pa->pa_pc;
531 pci_intr_handle_t ih;
532 const char *intrstr = NULL;
533 bus_space_tag_t memt;
534 bus_space_handle_t memh;
535 bus_dma_segment_t seg;
536 int memh_valid;
537 int i, rseg, error;
538 const struct wm_product *wmp;
539 uint8_t enaddr[ETHER_ADDR_LEN];
540 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
541 pcireg_t preg, memtype;
542 int pmreg;
543
544 callout_init(&sc->sc_tick_ch);
545
546 wmp = wm_lookup(pa);
547 if (wmp == NULL) {
548 printf("\n");
549 panic("wm_attach: impossible");
550 }
551
552 sc->sc_dmat = pa->pa_dmat;
553
554 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
555 printf(": %s, rev. %d\n", wmp->wmp_name, preg);
556
557 sc->sc_type = wmp->wmp_type;
558 if (sc->sc_type < WM_T_82543) {
559 if (preg < 2) {
560 printf("%s: i82542 must be at least rev. 2\n",
561 sc->sc_dev.dv_xname);
562 return;
563 }
564 if (preg < 3)
565 sc->sc_type = WM_T_82542_2_0;
566 }
567
568 /*
569 * Map the device.
570 */
571 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
572 switch (memtype) {
573 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
574 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
575 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
576 memtype, 0, &memt, &memh, NULL, NULL) == 0);
577 break;
578 default:
579 memh_valid = 0;
580 }
581
582 if (memh_valid) {
583 sc->sc_st = memt;
584 sc->sc_sh = memh;
585 } else {
586 printf("%s: unable to map device registers\n",
587 sc->sc_dev.dv_xname);
588 return;
589 }
590
591 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
592 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
593 preg |= PCI_COMMAND_MASTER_ENABLE;
594 if (sc->sc_type < WM_T_82542_2_1)
595 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
596 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
597
598 /* Get it out of power save mode, if needed. */
599 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
600 preg = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
601 if (preg == 3) {
602 /*
603 * The card has lost all configuration data in
604 * this state, so punt.
605 */
606 printf("%s: unable to wake from power state D3\n",
607 sc->sc_dev.dv_xname);
608 return;
609 }
610 if (preg != 0) {
611 printf("%s: waking up from power state D%d\n",
612 sc->sc_dev.dv_xname, preg);
613 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
614 }
615 }
616
617 /*
618 * Map and establish our interrupt.
619 */
620 if (pci_intr_map(pa, &ih)) {
621 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
622 return;
623 }
624 intrstr = pci_intr_string(pc, ih);
625 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
626 if (sc->sc_ih == NULL) {
627 printf("%s: unable to establish interrupt",
628 sc->sc_dev.dv_xname);
629 if (intrstr != NULL)
630 printf(" at %s", intrstr);
631 printf("\n");
632 return;
633 }
634 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
635
636 /*
637 * Allocate the control data structures, and create and load the
638 * DMA map for it.
639 */
640 if ((error = bus_dmamem_alloc(sc->sc_dmat,
641 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
642 0)) != 0) {
643 printf("%s: unable to allocate control data, error = %d\n",
644 sc->sc_dev.dv_xname, error);
645 goto fail_0;
646 }
647
648 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
649 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
650 BUS_DMA_COHERENT)) != 0) {
651 printf("%s: unable to map control data, error = %d\n",
652 sc->sc_dev.dv_xname, error);
653 goto fail_1;
654 }
655
656 if ((error = bus_dmamap_create(sc->sc_dmat,
657 sizeof(struct wm_control_data), 1,
658 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
659 printf("%s: unable to create control data DMA map, "
660 "error = %d\n", sc->sc_dev.dv_xname, error);
661 goto fail_2;
662 }
663
664 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
665 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
666 0)) != 0) {
667 printf("%s: unable to load control data DMA map, error = %d\n",
668 sc->sc_dev.dv_xname, error);
669 goto fail_3;
670 }
671
672 /*
673 * Create the transmit buffer DMA maps.
674 */
675 for (i = 0; i < WM_TXQUEUELEN; i++) {
676 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
677 WM_NTXSEGS, MCLBYTES, 0, 0,
678 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
679 printf("%s: unable to create Tx DMA map %d, "
680 "error = %d\n", sc->sc_dev.dv_xname, i, error);
681 goto fail_4;
682 }
683 }
684
685 /*
686 * Create the receive buffer DMA maps.
687 */
688 for (i = 0; i < WM_NRXDESC; i++) {
689 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
690 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
691 printf("%s: unable to create Rx DMA map %d, "
692 "error = %d\n", sc->sc_dev.dv_xname, i, error);
693 goto fail_5;
694 }
695 sc->sc_rxsoft[i].rxs_mbuf = NULL;
696 }
697
698 /*
699 * Reset the chip to a known state.
700 */
701 wm_reset(sc);
702
703 /*
704 * Read the Ethernet address from the EEPROM.
705 */
706 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
707 sizeof(myea) / sizeof(myea[0]), myea);
708 enaddr[0] = myea[0] & 0xff;
709 enaddr[1] = myea[0] >> 8;
710 enaddr[2] = myea[1] & 0xff;
711 enaddr[3] = myea[1] >> 8;
712 enaddr[4] = myea[2] & 0xff;
713 enaddr[5] = myea[2] >> 8;
714
715 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
716 ether_sprintf(enaddr));
717
718 /*
719 * Read the config info from the EEPROM, and set up various
720 * bits in the control registers based on their contents.
721 */
722 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
723 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
724 if (sc->sc_type >= WM_T_82544)
725 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
726
727 if (cfg1 & EEPROM_CFG1_ILOS)
728 sc->sc_ctrl |= CTRL_ILOS;
729 if (sc->sc_type >= WM_T_82544) {
730 sc->sc_ctrl |=
731 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
732 CTRL_SWDPIO_SHIFT;
733 sc->sc_ctrl |=
734 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
735 CTRL_SWDPINS_SHIFT;
736 } else {
737 sc->sc_ctrl |=
738 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
739 CTRL_SWDPIO_SHIFT;
740 }
741
742 #if 0
743 if (sc->sc_type >= WM_T_82544) {
744 if (cfg1 & EEPROM_CFG1_IPS0)
745 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
746 if (cfg1 & EEPROM_CFG1_IPS1)
747 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
748 sc->sc_ctrl_ext |=
749 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
750 CTRL_EXT_SWDPIO_SHIFT;
751 sc->sc_ctrl_ext |=
752 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
753 CTRL_EXT_SWDPINS_SHIFT;
754 } else {
755 sc->sc_ctrl_ext |=
756 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
757 CTRL_EXT_SWDPIO_SHIFT;
758 }
759 #endif
760
761 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
762 #if 0
763 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
764 #endif
765
766 /*
767 * Set up some register offsets that are different between
768 * the i82542 and the i82543 and later chips.
769 */
770 if (sc->sc_type < WM_T_82543) {
771 sc->sc_rdt_reg = WMREG_OLD_RDT0;
772 sc->sc_tdt_reg = WMREG_OLD_TDT;
773 } else {
774 sc->sc_rdt_reg = WMREG_RDT;
775 sc->sc_tdt_reg = WMREG_TDT;
776 }
777
778 /*
779 * Determine if we should use flow control. We should
780 * always use it, unless we're on a i82542 < 2.1.
781 */
782 if (sc->sc_type >= WM_T_82542_2_1)
783 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
784
785 /*
786 * Determine if we're TBI or GMII mode, and initialize the
787 * media structures accordingly.
788 */
789 if (sc->sc_type < WM_T_82543 ||
790 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
791 if (wmp->wmp_flags & WMP_F_1000T)
792 printf("%s: WARNING: TBIMODE set on 1000BASE-T "
793 "product!\n", sc->sc_dev.dv_xname);
794 wm_tbi_mediainit(sc);
795 } else {
796 if (wmp->wmp_flags & WMP_F_1000X)
797 printf("%s: WARNING: TBIMODE clear on 1000BASE-X "
798 "product!\n", sc->sc_dev.dv_xname);
799 wm_gmii_mediainit(sc);
800 }
801
802 ifp = &sc->sc_ethercom.ec_if;
803 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
804 ifp->if_softc = sc;
805 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
806 ifp->if_ioctl = wm_ioctl;
807 ifp->if_start = wm_start;
808 ifp->if_watchdog = wm_watchdog;
809 ifp->if_init = wm_init;
810 ifp->if_stop = wm_stop;
811 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
812 IFQ_SET_READY(&ifp->if_snd);
813
814 /*
815 * If we're a i82543 or greater, we can support VLANs.
816 */
817 if (sc->sc_type >= WM_T_82543)
818 sc->sc_ethercom.ec_capabilities |=
819 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
820
821 /*
822 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
823 * on i82543 and later.
824 */
825 if (sc->sc_type >= WM_T_82543)
826 ifp->if_capabilities |=
827 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
828
829 /*
830 * Attach the interface.
831 */
832 if_attach(ifp);
833 ether_ifattach(ifp, enaddr);
834
835 #ifdef WM_EVENT_COUNTERS
836 /* Attach event counters. */
837 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
838 NULL, sc->sc_dev.dv_xname, "txsstall");
839 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
840 NULL, sc->sc_dev.dv_xname, "txdstall");
841 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
842 NULL, sc->sc_dev.dv_xname, "txforceintr");
843 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
844 NULL, sc->sc_dev.dv_xname, "txdw");
845 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
846 NULL, sc->sc_dev.dv_xname, "txqe");
847 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
848 NULL, sc->sc_dev.dv_xname, "rxintr");
849 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
850 NULL, sc->sc_dev.dv_xname, "linkintr");
851
852 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
853 NULL, sc->sc_dev.dv_xname, "rxipsum");
854 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
855 NULL, sc->sc_dev.dv_xname, "rxtusum");
856 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
857 NULL, sc->sc_dev.dv_xname, "txipsum");
858 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
859 NULL, sc->sc_dev.dv_xname, "txtusum");
860
861 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
862 NULL, sc->sc_dev.dv_xname, "txctx init");
863 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
864 NULL, sc->sc_dev.dv_xname, "txctx hit");
865 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
866 NULL, sc->sc_dev.dv_xname, "txctx miss");
867
868 for (i = 0; i < WM_NTXSEGS; i++)
869 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
870 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
871
872 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
873 NULL, sc->sc_dev.dv_xname, "txdrop");
874
875 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
876 NULL, sc->sc_dev.dv_xname, "tu");
877 #endif /* WM_EVENT_COUNTERS */
878
879 /*
880 * Make sure the interface is shutdown during reboot.
881 */
882 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
883 if (sc->sc_sdhook == NULL)
884 printf("%s: WARNING: unable to establish shutdown hook\n",
885 sc->sc_dev.dv_xname);
886 return;
887
888 /*
889 * Free any resources we've allocated during the failed attach
890 * attempt. Do this in reverse order and fall through.
891 */
892 fail_5:
893 for (i = 0; i < WM_NRXDESC; i++) {
894 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
895 bus_dmamap_destroy(sc->sc_dmat,
896 sc->sc_rxsoft[i].rxs_dmamap);
897 }
898 fail_4:
899 for (i = 0; i < WM_TXQUEUELEN; i++) {
900 if (sc->sc_txsoft[i].txs_dmamap != NULL)
901 bus_dmamap_destroy(sc->sc_dmat,
902 sc->sc_txsoft[i].txs_dmamap);
903 }
904 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
905 fail_3:
906 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
907 fail_2:
908 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
909 sizeof(struct wm_control_data));
910 fail_1:
911 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
912 fail_0:
913 return;
914 }
915
916 /*
917 * wm_shutdown:
918 *
919 * Make sure the interface is stopped at reboot time.
920 */
921 void
922 wm_shutdown(void *arg)
923 {
924 struct wm_softc *sc = arg;
925
926 wm_stop(&sc->sc_ethercom.ec_if, 1);
927 }
928
929 /*
930 * wm_tx_cksum:
931 *
932 * Set up TCP/IP checksumming parameters for the
933 * specified packet.
934 */
935 static int
936 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
937 uint32_t *fieldsp)
938 {
939 struct mbuf *m0 = txs->txs_mbuf;
940 struct livengood_tcpip_ctxdesc *t;
941 uint32_t fields = 0, ipcs, tucs;
942 struct ip *ip;
943 int offset, iphl;
944
945 /*
946 * XXX It would be nice if the mbuf pkthdr had offset
947 * fields for the protocol headers.
948 */
949
950 /* XXX Assumes normal Ethernet encap. */
951 offset = ETHER_HDR_LEN;
952
953 /* XXX */
954 if (m0->m_len < (offset + sizeof(struct ip))) {
955 printf("%s: wm_tx_cksum: need to m_pullup, "
956 "packet dropped\n", sc->sc_dev.dv_xname);
957 return (EINVAL);
958 }
959
960 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
961 iphl = ip->ip_hl << 2;
962
963 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
964 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
965 fields |= htole32(WTX_IXSM);
966 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
967 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
968 WTX_TCPIP_IPCSE(offset + iphl - 1));
969 } else
970 ipcs = 0;
971
972 offset += iphl;
973
974 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
975 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
976 fields |= htole32(WTX_TXSM);
977 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
978 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
979 WTX_TCPIP_TUCSE(0) /* rest of packet */);
980 } else
981 tucs = 0;
982
983 if (sc->sc_txctx_ipcs == ipcs &&
984 sc->sc_txctx_tucs == tucs) {
985 /* Cached context is fine. */
986 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
987 } else {
988 /* Fill in the context descriptor. */
989 #ifdef WM_EVENT_COUNTERS
990 if (sc->sc_txctx_ipcs == 0xffffffff &&
991 sc->sc_txctx_tucs == 0xffffffff)
992 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
993 else
994 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
995 #endif
996 t = (struct livengood_tcpip_ctxdesc *)
997 &sc->sc_txdescs[sc->sc_txnext];
998 t->tcpip_ipcs = ipcs;
999 t->tcpip_tucs = tucs;
1000 t->tcpip_cmdlen =
1001 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1002 t->tcpip_seg = 0;
1003 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1004
1005 sc->sc_txctx_ipcs = ipcs;
1006 sc->sc_txctx_tucs = tucs;
1007
1008 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1009 txs->txs_ndesc++;
1010 }
1011
1012 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1013 *fieldsp = fields;
1014
1015 return (0);
1016 }
1017
1018 /*
1019 * wm_start: [ifnet interface function]
1020 *
1021 * Start packet transmission on the interface.
1022 */
1023 void
1024 wm_start(struct ifnet *ifp)
1025 {
1026 struct wm_softc *sc = ifp->if_softc;
1027 struct mbuf *m0/*, *m*/;
1028 struct wm_txsoft *txs;
1029 bus_dmamap_t dmamap;
1030 int error, nexttx, lasttx, ofree, seg;
1031 uint32_t cksumcmd, cksumfields;
1032
1033 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1034 return;
1035
1036 /*
1037 * Remember the previous number of free descriptors.
1038 */
1039 ofree = sc->sc_txfree;
1040
1041 /*
1042 * Loop through the send queue, setting up transmit descriptors
1043 * until we drain the queue, or use up all available transmit
1044 * descriptors.
1045 */
1046 for (;;) {
1047 /* Grab a packet off the queue. */
1048 IFQ_POLL(&ifp->if_snd, m0);
1049 if (m0 == NULL)
1050 break;
1051
1052 DPRINTF(WM_DEBUG_TX,
1053 ("%s: TX: have packet to transmit: %p\n",
1054 sc->sc_dev.dv_xname, m0));
1055
1056 /* Get a work queue entry. */
1057 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1058 wm_txintr(sc);
1059 if (sc->sc_txsfree == 0) {
1060 DPRINTF(WM_DEBUG_TX,
1061 ("%s: TX: no free job descriptors\n",
1062 sc->sc_dev.dv_xname));
1063 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1064 break;
1065 }
1066 }
1067
1068 txs = &sc->sc_txsoft[sc->sc_txsnext];
1069 dmamap = txs->txs_dmamap;
1070
1071 /*
1072 * Load the DMA map. If this fails, the packet either
1073 * didn't fit in the allotted number of segments, or we
1074 * were short on resources. For the too-many-segments
1075 * case, we simply report an error and drop the packet,
1076 * since we can't sanely copy a jumbo packet to a single
1077 * buffer.
1078 */
1079 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1080 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1081 if (error) {
1082 if (error == EFBIG) {
1083 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1084 printf("%s: Tx packet consumes too many "
1085 "DMA segments, dropping...\n",
1086 sc->sc_dev.dv_xname);
1087 IFQ_DEQUEUE(&ifp->if_snd, m0);
1088 m_freem(m0);
1089 continue;
1090 }
1091 /*
1092 * Short on resources, just stop for now.
1093 */
1094 DPRINTF(WM_DEBUG_TX,
1095 ("%s: TX: dmamap load failed: %d\n",
1096 sc->sc_dev.dv_xname, error));
1097 break;
1098 }
1099
1100 /*
1101 * Ensure we have enough descriptors free to describe
1102 * the packet. Note, we always reserve one descriptor
1103 * at the end of the ring due to the semantics of the
1104 * TDT register, plus one more in the event we need
1105 * to re-load checksum offload context.
1106 */
1107 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1108 /*
1109 * Not enough free descriptors to transmit this
1110 * packet. We haven't committed anything yet,
1111 * so just unload the DMA map, put the packet
1112 * pack on the queue, and punt. Notify the upper
1113 * layer that there are no more slots left.
1114 */
1115 DPRINTF(WM_DEBUG_TX,
1116 ("%s: TX: need %d descriptors, have %d\n",
1117 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1118 sc->sc_txfree - 1));
1119 ifp->if_flags |= IFF_OACTIVE;
1120 bus_dmamap_unload(sc->sc_dmat, dmamap);
1121 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1122 break;
1123 }
1124
1125 IFQ_DEQUEUE(&ifp->if_snd, m0);
1126
1127 /*
1128 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1129 */
1130
1131 /* Sync the DMA map. */
1132 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1133 BUS_DMASYNC_PREWRITE);
1134
1135 DPRINTF(WM_DEBUG_TX,
1136 ("%s: TX: packet has %d DMA segments\n",
1137 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1138
1139 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1140
1141 /*
1142 * Store a pointer to the packet so that we can free it
1143 * later.
1144 *
1145 * Initially, we consider the number of descriptors the
1146 * packet uses the number of DMA segments. This may be
1147 * incremented by 1 if we do checksum offload (a descriptor
1148 * is used to set the checksum context).
1149 */
1150 txs->txs_mbuf = m0;
1151 txs->txs_firstdesc = sc->sc_txnext;
1152 txs->txs_ndesc = dmamap->dm_nsegs;
1153
1154 /*
1155 * Set up checksum offload parameters for
1156 * this packet.
1157 */
1158 if (m0->m_pkthdr.csum_flags &
1159 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1160 if (wm_tx_cksum(sc, txs, &cksumcmd,
1161 &cksumfields) != 0) {
1162 /* Error message already displayed. */
1163 m_freem(m0);
1164 bus_dmamap_unload(sc->sc_dmat, dmamap);
1165 txs->txs_mbuf = NULL;
1166 continue;
1167 }
1168 } else {
1169 cksumcmd = 0;
1170 cksumfields = 0;
1171 }
1172
1173 cksumcmd |= htole32(WTX_CMD_IDE);
1174
1175 /*
1176 * Initialize the transmit descriptor.
1177 */
1178 for (nexttx = sc->sc_txnext, seg = 0;
1179 seg < dmamap->dm_nsegs;
1180 seg++, nexttx = WM_NEXTTX(nexttx)) {
1181 /*
1182 * Note: we currently only use 32-bit DMA
1183 * addresses.
1184 */
1185 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1186 htole32(dmamap->dm_segs[seg].ds_addr);
1187 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1188 htole32(dmamap->dm_segs[seg].ds_len);
1189 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1190 cksumfields;
1191 lasttx = nexttx;
1192
1193 DPRINTF(WM_DEBUG_TX,
1194 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1195 sc->sc_dev.dv_xname, nexttx,
1196 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1197 (uint32_t) dmamap->dm_segs[seg].ds_len));
1198 }
1199
1200 /*
1201 * Set up the command byte on the last descriptor of
1202 * the packet. If we're in the interrupt delay window,
1203 * delay the interrupt.
1204 */
1205 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1206 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1207
1208 #if 0 /* XXXJRT */
1209 /*
1210 * If VLANs are enabled and the packet has a VLAN tag, set
1211 * up the descriptor to encapsulate the packet for us.
1212 *
1213 * This is only valid on the last descriptor of the packet.
1214 */
1215 if (sc->sc_ethercom.ec_nvlans != 0 &&
1216 (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) {
1217 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1218 htole32(WTX_CMD_VLE);
1219 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1220 = htole16(*mtod(m, int *) & 0xffff);
1221 }
1222 #endif /* XXXJRT */
1223
1224 txs->txs_lastdesc = lasttx;
1225
1226 DPRINTF(WM_DEBUG_TX,
1227 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1228 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1229
1230 /* Sync the descriptors we're using. */
1231 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1232 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1233
1234 /* Give the packet to the chip. */
1235 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1236
1237 DPRINTF(WM_DEBUG_TX,
1238 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1239
1240 DPRINTF(WM_DEBUG_TX,
1241 ("%s: TX: finished transmitting packet, job %d\n",
1242 sc->sc_dev.dv_xname, sc->sc_txsnext));
1243
1244 /* Advance the tx pointer. */
1245 sc->sc_txfree -= txs->txs_ndesc;
1246 sc->sc_txnext = nexttx;
1247
1248 sc->sc_txsfree--;
1249 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1250
1251 #if NBPFILTER > 0
1252 /* Pass the packet to any BPF listeners. */
1253 if (ifp->if_bpf)
1254 bpf_mtap(ifp->if_bpf, m0);
1255 #endif /* NBPFILTER > 0 */
1256 }
1257
1258 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1259 /* No more slots; notify upper layer. */
1260 ifp->if_flags |= IFF_OACTIVE;
1261 }
1262
1263 if (sc->sc_txfree != ofree) {
1264 /* Set a watchdog timer in case the chip flakes out. */
1265 ifp->if_timer = 5;
1266 }
1267 }
1268
1269 /*
1270 * wm_watchdog: [ifnet interface function]
1271 *
1272 * Watchdog timer handler.
1273 */
1274 void
1275 wm_watchdog(struct ifnet *ifp)
1276 {
1277 struct wm_softc *sc = ifp->if_softc;
1278
1279 /*
1280 * Since we're using delayed interrupts, sweep up
1281 * before we report an error.
1282 */
1283 wm_txintr(sc);
1284
1285 if (sc->sc_txfree != WM_NTXDESC) {
1286 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1287 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1288 sc->sc_txnext);
1289 ifp->if_oerrors++;
1290
1291 /* Reset the interface. */
1292 (void) wm_init(ifp);
1293 }
1294
1295 /* Try to get more packets going. */
1296 wm_start(ifp);
1297 }
1298
1299 /*
1300 * wm_ioctl: [ifnet interface function]
1301 *
1302 * Handle control requests from the operator.
1303 */
1304 int
1305 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1306 {
1307 struct wm_softc *sc = ifp->if_softc;
1308 struct ifreq *ifr = (struct ifreq *) data;
1309 int s, error;
1310
1311 s = splnet();
1312
1313 switch (cmd) {
1314 case SIOCSIFMEDIA:
1315 case SIOCGIFMEDIA:
1316 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1317 break;
1318
1319 default:
1320 error = ether_ioctl(ifp, cmd, data);
1321 if (error == ENETRESET) {
1322 /*
1323 * Multicast list has changed; set the hardware filter
1324 * accordingly.
1325 */
1326 wm_set_filter(sc);
1327 error = 0;
1328 }
1329 break;
1330 }
1331
1332 /* Try to get more packets going. */
1333 wm_start(ifp);
1334
1335 splx(s);
1336 return (error);
1337 }
1338
1339 /*
1340 * wm_intr:
1341 *
1342 * Interrupt service routine.
1343 */
1344 int
1345 wm_intr(void *arg)
1346 {
1347 struct wm_softc *sc = arg;
1348 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1349 uint32_t icr;
1350 int wantinit, handled = 0;
1351
1352 for (wantinit = 0; wantinit == 0;) {
1353 icr = CSR_READ(sc, WMREG_ICR);
1354 if ((icr & sc->sc_icr) == 0)
1355 break;
1356
1357 handled = 1;
1358
1359 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1360 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1361 DPRINTF(WM_DEBUG_RX,
1362 ("%s: RX: got Rx intr 0x%08x\n",
1363 sc->sc_dev.dv_xname,
1364 icr & (ICR_RXDMT0|ICR_RXT0)));
1365 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1366 }
1367 #endif
1368 wm_rxintr(sc);
1369
1370 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1371 if (icr & ICR_TXDW) {
1372 DPRINTF(WM_DEBUG_TX,
1373 ("%s: TX: got TDXW interrupt\n",
1374 sc->sc_dev.dv_xname));
1375 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1376 }
1377 #endif
1378 wm_txintr(sc);
1379
1380 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1381 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1382 wm_linkintr(sc, icr);
1383 }
1384
1385 if (icr & ICR_RXO) {
1386 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1387 wantinit = 1;
1388 }
1389 }
1390
1391 if (handled) {
1392 if (wantinit)
1393 wm_init(ifp);
1394
1395 /* Try to get more packets going. */
1396 wm_start(ifp);
1397 }
1398
1399 return (handled);
1400 }
1401
1402 /*
1403 * wm_txintr:
1404 *
1405 * Helper; handle transmit interrupts.
1406 */
1407 void
1408 wm_txintr(struct wm_softc *sc)
1409 {
1410 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1411 struct wm_txsoft *txs;
1412 uint8_t status;
1413 int i;
1414
1415 ifp->if_flags &= ~IFF_OACTIVE;
1416
1417 /*
1418 * Go through the Tx list and free mbufs for those
1419 * frams which have been transmitted.
1420 */
1421 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1422 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1423 txs = &sc->sc_txsoft[i];
1424
1425 DPRINTF(WM_DEBUG_TX,
1426 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1427
1428 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1429 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1430
1431 status = le32toh(sc->sc_txdescs[
1432 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1433 if ((status & WTX_ST_DD) == 0)
1434 break;
1435
1436 DPRINTF(WM_DEBUG_TX,
1437 ("%s: TX: job %d done: descs %d..%d\n",
1438 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1439 txs->txs_lastdesc));
1440
1441 /*
1442 * XXX We should probably be using the statistics
1443 * XXX registers, but I don't know if they exist
1444 * XXX on chips before the i82544.
1445 */
1446
1447 #ifdef WM_EVENT_COUNTERS
1448 if (status & WTX_ST_TU)
1449 WM_EVCNT_INCR(&sc->sc_ev_tu);
1450 #endif /* WM_EVENT_COUNTERS */
1451
1452 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1453 ifp->if_oerrors++;
1454 if (status & WTX_ST_LC)
1455 printf("%s: late collision\n",
1456 sc->sc_dev.dv_xname);
1457 else if (status & WTX_ST_EC) {
1458 ifp->if_collisions += 16;
1459 printf("%s: excessive collisions\n",
1460 sc->sc_dev.dv_xname);
1461 }
1462 } else
1463 ifp->if_opackets++;
1464
1465 sc->sc_txfree += txs->txs_ndesc;
1466 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1467 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1468 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1469 m_freem(txs->txs_mbuf);
1470 txs->txs_mbuf = NULL;
1471 }
1472
1473 /* Update the dirty transmit buffer pointer. */
1474 sc->sc_txsdirty = i;
1475 DPRINTF(WM_DEBUG_TX,
1476 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1477
1478 /*
1479 * If there are no more pending transmissions, cancel the watchdog
1480 * timer.
1481 */
1482 if (sc->sc_txsfree == WM_TXQUEUELEN)
1483 ifp->if_timer = 0;
1484 }
1485
1486 /*
1487 * wm_rxintr:
1488 *
1489 * Helper; handle receive interrupts.
1490 */
1491 void
1492 wm_rxintr(struct wm_softc *sc)
1493 {
1494 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1495 struct wm_rxsoft *rxs;
1496 struct mbuf *m;
1497 int i, len;
1498 uint8_t status, errors;
1499
1500 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1501 rxs = &sc->sc_rxsoft[i];
1502
1503 DPRINTF(WM_DEBUG_RX,
1504 ("%s: RX: checking descriptor %d\n",
1505 sc->sc_dev.dv_xname, i));
1506
1507 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1508
1509 status = sc->sc_rxdescs[i].wrx_status;
1510 errors = sc->sc_rxdescs[i].wrx_errors;
1511 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1512
1513 if ((status & WRX_ST_DD) == 0) {
1514 /*
1515 * We have processed all of the receive descriptors.
1516 */
1517 break;
1518 }
1519
1520 if (__predict_false(sc->sc_rxdiscard)) {
1521 DPRINTF(WM_DEBUG_RX,
1522 ("%s: RX: discarding contents of descriptor %d\n",
1523 sc->sc_dev.dv_xname, i));
1524 WM_INIT_RXDESC(sc, i);
1525 if (status & WRX_ST_EOP) {
1526 /* Reset our state. */
1527 DPRINTF(WM_DEBUG_RX,
1528 ("%s: RX: resetting rxdiscard -> 0\n",
1529 sc->sc_dev.dv_xname));
1530 sc->sc_rxdiscard = 0;
1531 }
1532 continue;
1533 }
1534
1535 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1536 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1537
1538 m = rxs->rxs_mbuf;
1539
1540 /*
1541 * Add a new receive buffer to the ring.
1542 */
1543 if (wm_add_rxbuf(sc, i) != 0) {
1544 /*
1545 * Failed, throw away what we've done so
1546 * far, and discard the rest of the packet.
1547 */
1548 ifp->if_ierrors++;
1549 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1550 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1551 WM_INIT_RXDESC(sc, i);
1552 if ((status & WRX_ST_EOP) == 0)
1553 sc->sc_rxdiscard = 1;
1554 if (sc->sc_rxhead != NULL)
1555 m_freem(sc->sc_rxhead);
1556 WM_RXCHAIN_RESET(sc);
1557 DPRINTF(WM_DEBUG_RX,
1558 ("%s: RX: Rx buffer allocation failed, "
1559 "dropping packet%s\n", sc->sc_dev.dv_xname,
1560 sc->sc_rxdiscard ? " (discard)" : ""));
1561 continue;
1562 }
1563
1564 WM_RXCHAIN_LINK(sc, m);
1565
1566 m->m_len = len;
1567
1568 DPRINTF(WM_DEBUG_RX,
1569 ("%s: RX: buffer at %p len %d\n",
1570 sc->sc_dev.dv_xname, m->m_data, len));
1571
1572 /*
1573 * If this is not the end of the packet, keep
1574 * looking.
1575 */
1576 if ((status & WRX_ST_EOP) == 0) {
1577 sc->sc_rxlen += len;
1578 DPRINTF(WM_DEBUG_RX,
1579 ("%s: RX: not yet EOP, rxlen -> %d\n",
1580 sc->sc_dev.dv_xname, sc->sc_rxlen));
1581 continue;
1582 }
1583
1584 /*
1585 * Okay, we have the entire packet now...
1586 */
1587 *sc->sc_rxtailp = NULL;
1588 m = sc->sc_rxhead;
1589 len += sc->sc_rxlen;
1590
1591 WM_RXCHAIN_RESET(sc);
1592
1593 DPRINTF(WM_DEBUG_RX,
1594 ("%s: RX: have entire packet, len -> %d\n",
1595 sc->sc_dev.dv_xname, len));
1596
1597 /*
1598 * If an error occurred, update stats and drop the packet.
1599 */
1600 if (errors &
1601 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1602 ifp->if_ierrors++;
1603 if (errors & WRX_ER_SE)
1604 printf("%s: symbol error\n",
1605 sc->sc_dev.dv_xname);
1606 else if (errors & WRX_ER_SEQ)
1607 printf("%s: receive sequence error\n",
1608 sc->sc_dev.dv_xname);
1609 else if (errors & WRX_ER_CE)
1610 printf("%s: CRC error\n",
1611 sc->sc_dev.dv_xname);
1612 m_freem(m);
1613 continue;
1614 }
1615
1616 /*
1617 * No errors. Receive the packet.
1618 *
1619 * Note, we have configured the chip to include the
1620 * CRC with every packet.
1621 */
1622 m->m_flags |= M_HASFCS;
1623 m->m_pkthdr.rcvif = ifp;
1624 m->m_pkthdr.len = len;
1625
1626 #if 0 /* XXXJRT */
1627 /*
1628 * If VLANs are enabled, VLAN packets have been unwrapped
1629 * for us. Associate the tag with the packet.
1630 */
1631 if (sc->sc_ethercom.ec_nvlans != 0 &&
1632 (status & WRX_ST_VP) != 0) {
1633 struct mbuf *vtag;
1634
1635 vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
1636 if (vtag == NULL) {
1637 ifp->if_ierrors++;
1638 printf("%s: unable to allocate VLAN tag\n",
1639 sc->sc_dev.dv_xname);
1640 m_freem(m);
1641 continue;
1642 }
1643
1644 *mtod(m, int *) =
1645 le16toh(sc->sc_rxdescs[i].wrx_special);
1646 vtag->m_len = sizeof(int);
1647 }
1648 #endif /* XXXJRT */
1649
1650 /*
1651 * Set up checksum info for this packet.
1652 */
1653 if (status & WRX_ST_IPCS) {
1654 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1655 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1656 if (errors & WRX_ER_IPE)
1657 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1658 }
1659 if (status & WRX_ST_TCPCS) {
1660 /*
1661 * Note: we don't know if this was TCP or UDP,
1662 * so we just set both bits, and expect the
1663 * upper layers to deal.
1664 */
1665 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1666 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1667 if (errors & WRX_ER_TCPE)
1668 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1669 }
1670
1671 ifp->if_ipackets++;
1672
1673 #if NBPFILTER > 0
1674 /* Pass this up to any BPF listeners. */
1675 if (ifp->if_bpf)
1676 bpf_mtap(ifp->if_bpf, m);
1677 #endif /* NBPFILTER > 0 */
1678
1679 /* Pass it on. */
1680 (*ifp->if_input)(ifp, m);
1681 }
1682
1683 /* Update the receive pointer. */
1684 sc->sc_rxptr = i;
1685
1686 DPRINTF(WM_DEBUG_RX,
1687 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1688 }
1689
1690 /*
1691 * wm_linkintr:
1692 *
1693 * Helper; handle link interrupts.
1694 */
1695 void
1696 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1697 {
1698 uint32_t status;
1699
1700 /*
1701 * If we get a link status interrupt on a 1000BASE-T
1702 * device, just fall into the normal MII tick path.
1703 */
1704 if (sc->sc_flags & WM_F_HAS_MII) {
1705 if (icr & ICR_LSC) {
1706 DPRINTF(WM_DEBUG_LINK,
1707 ("%s: LINK: LSC -> mii_tick\n",
1708 sc->sc_dev.dv_xname));
1709 mii_tick(&sc->sc_mii);
1710 } else if (icr & ICR_RXSEQ) {
1711 DPRINTF(WM_DEBUG_LINK,
1712 ("%s: LINK Receive sequence error\n",
1713 sc->sc_dev.dv_xname));
1714 }
1715 return;
1716 }
1717
1718 /*
1719 * If we are now receiving /C/, check for link again in
1720 * a couple of link clock ticks.
1721 */
1722 if (icr & ICR_RXCFG) {
1723 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1724 sc->sc_dev.dv_xname));
1725 sc->sc_tbi_anstate = 2;
1726 }
1727
1728 if (icr & ICR_LSC) {
1729 status = CSR_READ(sc, WMREG_STATUS);
1730 if (status & STATUS_LU) {
1731 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1732 sc->sc_dev.dv_xname,
1733 (status & STATUS_FD) ? "FDX" : "HDX"));
1734 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1735 if (status & STATUS_FD)
1736 sc->sc_tctl |=
1737 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1738 else
1739 sc->sc_tctl |=
1740 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1741 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1742 sc->sc_tbi_linkup = 1;
1743 } else {
1744 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1745 sc->sc_dev.dv_xname));
1746 sc->sc_tbi_linkup = 0;
1747 }
1748 sc->sc_tbi_anstate = 2;
1749 wm_tbi_set_linkled(sc);
1750 } else if (icr & ICR_RXSEQ) {
1751 DPRINTF(WM_DEBUG_LINK,
1752 ("%s: LINK: Receive sequence error\n",
1753 sc->sc_dev.dv_xname));
1754 }
1755 }
1756
1757 /*
1758 * wm_tick:
1759 *
1760 * One second timer, used to check link status, sweep up
1761 * completed transmit jobs, etc.
1762 */
1763 void
1764 wm_tick(void *arg)
1765 {
1766 struct wm_softc *sc = arg;
1767 int s;
1768
1769 s = splnet();
1770
1771 if (sc->sc_flags & WM_F_HAS_MII)
1772 mii_tick(&sc->sc_mii);
1773 else
1774 wm_tbi_check_link(sc);
1775
1776 splx(s);
1777
1778 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1779 }
1780
1781 /*
1782 * wm_reset:
1783 *
1784 * Reset the i82542 chip.
1785 */
1786 void
1787 wm_reset(struct wm_softc *sc)
1788 {
1789 int i;
1790
1791 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1792 delay(10000);
1793
1794 for (i = 0; i < 1000; i++) {
1795 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1796 return;
1797 delay(20);
1798 }
1799
1800 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1801 printf("%s: WARNING: reset failed to complete\n",
1802 sc->sc_dev.dv_xname);
1803 }
1804
1805 /*
1806 * wm_init: [ifnet interface function]
1807 *
1808 * Initialize the interface. Must be called at splnet().
1809 */
1810 int
1811 wm_init(struct ifnet *ifp)
1812 {
1813 struct wm_softc *sc = ifp->if_softc;
1814 struct wm_rxsoft *rxs;
1815 int i, error = 0;
1816 uint32_t reg;
1817
1818 /* Cancel any pending I/O. */
1819 wm_stop(ifp, 0);
1820
1821 /* Reset the chip to a known state. */
1822 wm_reset(sc);
1823
1824 /* Initialize the transmit descriptor ring. */
1825 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1826 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1827 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1828 sc->sc_txfree = WM_NTXDESC;
1829 sc->sc_txnext = 0;
1830
1831 sc->sc_txctx_ipcs = 0xffffffff;
1832 sc->sc_txctx_tucs = 0xffffffff;
1833
1834 if (sc->sc_type < WM_T_82543) {
1835 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1836 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1837 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1838 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1839 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1840 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
1841 } else {
1842 CSR_WRITE(sc, WMREG_TBDAH, 0);
1843 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1844 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1845 CSR_WRITE(sc, WMREG_TDH, 0);
1846 CSR_WRITE(sc, WMREG_TDT, 0);
1847 CSR_WRITE(sc, WMREG_TIDV, 128);
1848
1849 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1850 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1851 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
1852 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
1853 }
1854 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
1855 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
1856
1857 /* Initialize the transmit job descriptors. */
1858 for (i = 0; i < WM_TXQUEUELEN; i++)
1859 sc->sc_txsoft[i].txs_mbuf = NULL;
1860 sc->sc_txsfree = WM_TXQUEUELEN;
1861 sc->sc_txsnext = 0;
1862 sc->sc_txsdirty = 0;
1863
1864 /*
1865 * Initialize the receive descriptor and receive job
1866 * descriptor rings.
1867 */
1868 if (sc->sc_type < WM_T_82543) {
1869 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
1870 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
1871 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
1872 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
1873 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
1874 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
1875
1876 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
1877 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
1878 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
1879 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
1880 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
1881 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
1882 } else {
1883 CSR_WRITE(sc, WMREG_RDBAH, 0);
1884 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
1885 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
1886 CSR_WRITE(sc, WMREG_RDH, 0);
1887 CSR_WRITE(sc, WMREG_RDT, 0);
1888 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
1889 }
1890 for (i = 0; i < WM_NRXDESC; i++) {
1891 rxs = &sc->sc_rxsoft[i];
1892 if (rxs->rxs_mbuf == NULL) {
1893 if ((error = wm_add_rxbuf(sc, i)) != 0) {
1894 printf("%s: unable to allocate or map rx "
1895 "buffer %d, error = %d\n",
1896 sc->sc_dev.dv_xname, i, error);
1897 /*
1898 * XXX Should attempt to run with fewer receive
1899 * XXX buffers instead of just failing.
1900 */
1901 wm_rxdrain(sc);
1902 goto out;
1903 }
1904 } else
1905 WM_INIT_RXDESC(sc, i);
1906 }
1907 sc->sc_rxptr = 0;
1908 sc->sc_rxdiscard = 0;
1909 WM_RXCHAIN_RESET(sc);
1910
1911 /*
1912 * Clear out the VLAN table -- we don't use it (yet).
1913 */
1914 CSR_WRITE(sc, WMREG_VET, 0);
1915 for (i = 0; i < WM_VLAN_TABSIZE; i++)
1916 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
1917
1918 /*
1919 * Set up flow-control parameters.
1920 *
1921 * XXX Values could probably stand some tuning.
1922 */
1923 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
1924 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
1925 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
1926 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
1927
1928 if (sc->sc_type < WM_T_82543) {
1929 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
1930 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
1931 } else {
1932 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
1933 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
1934 }
1935 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
1936 }
1937
1938 #if 0 /* XXXJRT */
1939 /* Deal with VLAN enables. */
1940 if (sc->sc_ethercom.ec_nvlans != 0)
1941 sc->sc_ctrl |= CTRL_VME;
1942 else
1943 #endif /* XXXJRT */
1944 sc->sc_ctrl &= ~CTRL_VME;
1945
1946 /* Write the control registers. */
1947 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1948 #if 0
1949 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1950 #endif
1951
1952 /*
1953 * Set up checksum offload parameters.
1954 */
1955 reg = CSR_READ(sc, WMREG_RXCSUM);
1956 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
1957 reg |= RXCSUM_IPOFL;
1958 else
1959 reg &= ~RXCSUM_IPOFL;
1960 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
1961 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
1962 else {
1963 reg &= ~RXCSUM_TUOFL;
1964 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
1965 reg &= ~RXCSUM_IPOFL;
1966 }
1967 CSR_WRITE(sc, WMREG_RXCSUM, reg);
1968
1969 /*
1970 * Set up the interrupt registers.
1971 */
1972 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
1973 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
1974 ICR_RXO | ICR_RXT0;
1975 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
1976 sc->sc_icr |= ICR_RXCFG;
1977 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
1978
1979 /* Set up the inter-packet gap. */
1980 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
1981
1982 #if 0 /* XXXJRT */
1983 /* Set the VLAN ethernetype. */
1984 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
1985 #endif
1986
1987 /*
1988 * Set up the transmit control register; we start out with
1989 * a collision distance suitable for FDX, but update it whe
1990 * we resolve the media type.
1991 */
1992 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
1993 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1994 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1995
1996 /* Set the media. */
1997 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
1998
1999 /*
2000 * Set up the receive control register; we actually program
2001 * the register when we set the receive filter. Use multicast
2002 * address offset type 0.
2003 *
2004 * Only the i82544 has the ability to strip the incoming
2005 * CRC, so we don't enable that feature.
2006 */
2007 sc->sc_mchash_type = 0;
2008 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
2009 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2010
2011 /* Set the receive filter. */
2012 wm_set_filter(sc);
2013
2014 /* Start the one second link check clock. */
2015 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2016
2017 /* ...all done! */
2018 ifp->if_flags |= IFF_RUNNING;
2019 ifp->if_flags &= ~IFF_OACTIVE;
2020
2021 out:
2022 if (error)
2023 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2024 return (error);
2025 }
2026
2027 /*
2028 * wm_rxdrain:
2029 *
2030 * Drain the receive queue.
2031 */
2032 void
2033 wm_rxdrain(struct wm_softc *sc)
2034 {
2035 struct wm_rxsoft *rxs;
2036 int i;
2037
2038 for (i = 0; i < WM_NRXDESC; i++) {
2039 rxs = &sc->sc_rxsoft[i];
2040 if (rxs->rxs_mbuf != NULL) {
2041 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2042 m_freem(rxs->rxs_mbuf);
2043 rxs->rxs_mbuf = NULL;
2044 }
2045 }
2046 }
2047
2048 /*
2049 * wm_stop: [ifnet interface function]
2050 *
2051 * Stop transmission on the interface.
2052 */
2053 void
2054 wm_stop(struct ifnet *ifp, int disable)
2055 {
2056 struct wm_softc *sc = ifp->if_softc;
2057 struct wm_txsoft *txs;
2058 int i;
2059
2060 /* Stop the one second clock. */
2061 callout_stop(&sc->sc_tick_ch);
2062
2063 if (sc->sc_flags & WM_F_HAS_MII) {
2064 /* Down the MII. */
2065 mii_down(&sc->sc_mii);
2066 }
2067
2068 /* Stop the transmit and receive processes. */
2069 CSR_WRITE(sc, WMREG_TCTL, 0);
2070 CSR_WRITE(sc, WMREG_RCTL, 0);
2071
2072 /* Release any queued transmit buffers. */
2073 for (i = 0; i < WM_TXQUEUELEN; i++) {
2074 txs = &sc->sc_txsoft[i];
2075 if (txs->txs_mbuf != NULL) {
2076 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2077 m_freem(txs->txs_mbuf);
2078 txs->txs_mbuf = NULL;
2079 }
2080 }
2081
2082 if (disable)
2083 wm_rxdrain(sc);
2084
2085 /* Mark the interface as down and cancel the watchdog timer. */
2086 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2087 ifp->if_timer = 0;
2088 }
2089
2090 /*
2091 * wm_read_eeprom:
2092 *
2093 * Read data from the serial EEPROM.
2094 */
2095 void
2096 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2097 {
2098 uint32_t reg;
2099 int i, x;
2100
2101 for (i = 0; i < wordcnt; i++) {
2102 /* Send CHIP SELECT for one clock tick. */
2103 CSR_WRITE(sc, WMREG_EECD, EECD_CS);
2104 delay(2);
2105
2106 /* Shift in the READ command. */
2107 for (x = 3; x > 0; x--) {
2108 reg = EECD_CS;
2109 if (UWIRE_OPC_READ & (1 << (x - 1)))
2110 reg |= EECD_DI;
2111 CSR_WRITE(sc, WMREG_EECD, reg);
2112 delay(2);
2113 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2114 delay(2);
2115 CSR_WRITE(sc, WMREG_EECD, reg);
2116 delay(2);
2117 }
2118
2119 /* Shift in address. */
2120 for (x = 6; x > 0; x--) {
2121 reg = EECD_CS;
2122 if ((word + i) & (1 << (x - 1)))
2123 reg |= EECD_DI;
2124 CSR_WRITE(sc, WMREG_EECD, reg);
2125 delay(2);
2126 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2127 delay(2);
2128 CSR_WRITE(sc, WMREG_EECD, reg);
2129 delay(2);
2130 }
2131
2132 /* Shift out the data. */
2133 reg = EECD_CS;
2134 data[i] = 0;
2135 for (x = 16; x > 0; x--) {
2136 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2137 delay(2);
2138 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2139 data[i] |= (1 << (x - 1));
2140 CSR_WRITE(sc, WMREG_EECD, reg);
2141 delay(2);
2142 }
2143
2144 /* Clear CHIP SELECT. */
2145 CSR_WRITE(sc, WMREG_EECD, 0);
2146 }
2147 }
2148
2149 /*
2150 * wm_add_rxbuf:
2151 *
2152 * Add a receive buffer to the indiciated descriptor.
2153 */
2154 int
2155 wm_add_rxbuf(struct wm_softc *sc, int idx)
2156 {
2157 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2158 struct mbuf *m;
2159 int error;
2160
2161 MGETHDR(m, M_DONTWAIT, MT_DATA);
2162 if (m == NULL)
2163 return (ENOBUFS);
2164
2165 MCLGET(m, M_DONTWAIT);
2166 if ((m->m_flags & M_EXT) == 0) {
2167 m_freem(m);
2168 return (ENOBUFS);
2169 }
2170
2171 if (rxs->rxs_mbuf != NULL)
2172 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2173
2174 rxs->rxs_mbuf = m;
2175
2176 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2177 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2178 BUS_DMA_READ|BUS_DMA_NOWAIT);
2179 if (error) {
2180 printf("%s: unable to load rx DMA map %d, error = %d\n",
2181 sc->sc_dev.dv_xname, idx, error);
2182 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2183 }
2184
2185 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2186 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2187
2188 WM_INIT_RXDESC(sc, idx);
2189
2190 return (0);
2191 }
2192
2193 /*
2194 * wm_set_ral:
2195 *
2196 * Set an entery in the receive address list.
2197 */
2198 static void
2199 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2200 {
2201 uint32_t ral_lo, ral_hi;
2202
2203 if (enaddr != NULL) {
2204 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2205 (enaddr[3] << 24);
2206 ral_hi = enaddr[4] | (enaddr[5] << 8);
2207 ral_hi |= RAL_AV;
2208 } else {
2209 ral_lo = 0;
2210 ral_hi = 0;
2211 }
2212
2213 if (sc->sc_type >= WM_T_82544) {
2214 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2215 ral_lo);
2216 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2217 ral_hi);
2218 } else {
2219 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2220 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2221 }
2222 }
2223
2224 /*
2225 * wm_mchash:
2226 *
2227 * Compute the hash of the multicast address for the 4096-bit
2228 * multicast filter.
2229 */
2230 static uint32_t
2231 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2232 {
2233 static const int lo_shift[4] = { 4, 3, 2, 0 };
2234 static const int hi_shift[4] = { 4, 5, 6, 8 };
2235 uint32_t hash;
2236
2237 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2238 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2239
2240 return (hash & 0xfff);
2241 }
2242
2243 /*
2244 * wm_set_filter:
2245 *
2246 * Set up the receive filter.
2247 */
2248 void
2249 wm_set_filter(struct wm_softc *sc)
2250 {
2251 struct ethercom *ec = &sc->sc_ethercom;
2252 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2253 struct ether_multi *enm;
2254 struct ether_multistep step;
2255 bus_addr_t mta_reg;
2256 uint32_t hash, reg, bit;
2257 int i;
2258
2259 if (sc->sc_type >= WM_T_82544)
2260 mta_reg = WMREG_CORDOVA_MTA;
2261 else
2262 mta_reg = WMREG_MTA;
2263
2264 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2265
2266 if (ifp->if_flags & IFF_BROADCAST)
2267 sc->sc_rctl |= RCTL_BAM;
2268 if (ifp->if_flags & IFF_PROMISC) {
2269 sc->sc_rctl |= RCTL_UPE;
2270 goto allmulti;
2271 }
2272
2273 /*
2274 * Set the station address in the first RAL slot, and
2275 * clear the remaining slots.
2276 */
2277 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2278 for (i = 1; i < WM_RAL_TABSIZE; i++)
2279 wm_set_ral(sc, NULL, i);
2280
2281 /* Clear out the multicast table. */
2282 for (i = 0; i < WM_MC_TABSIZE; i++)
2283 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2284
2285 ETHER_FIRST_MULTI(step, ec, enm);
2286 while (enm != NULL) {
2287 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2288 /*
2289 * We must listen to a range of multicast addresses.
2290 * For now, just accept all multicasts, rather than
2291 * trying to set only those filter bits needed to match
2292 * the range. (At this time, the only use of address
2293 * ranges is for IP multicast routing, for which the
2294 * range is big enough to require all bits set.)
2295 */
2296 goto allmulti;
2297 }
2298
2299 hash = wm_mchash(sc, enm->enm_addrlo);
2300
2301 reg = (hash >> 5) & 0x7f;
2302 bit = hash & 0x1f;
2303
2304 hash = CSR_READ(sc, mta_reg + (reg << 2));
2305 hash |= 1U << bit;
2306
2307 /* XXX Hardware bug?? */
2308 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2309 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2310 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2311 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2312 } else
2313 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2314
2315 ETHER_NEXT_MULTI(step, enm);
2316 }
2317
2318 ifp->if_flags &= ~IFF_ALLMULTI;
2319 goto setit;
2320
2321 allmulti:
2322 ifp->if_flags |= IFF_ALLMULTI;
2323 sc->sc_rctl |= RCTL_MPE;
2324
2325 setit:
2326 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2327 }
2328
2329 /*
2330 * wm_tbi_mediainit:
2331 *
2332 * Initialize media for use on 1000BASE-X devices.
2333 */
2334 void
2335 wm_tbi_mediainit(struct wm_softc *sc)
2336 {
2337 const char *sep = "";
2338
2339 if (sc->sc_type < WM_T_82543)
2340 sc->sc_tipg = TIPG_WM_DFLT;
2341 else
2342 sc->sc_tipg = TIPG_LG_DFLT;
2343
2344 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_tbi_mediachange,
2345 wm_tbi_mediastatus);
2346
2347 /*
2348 * SWD Pins:
2349 *
2350 * 0 = Link LED (output)
2351 * 1 = Loss Of Signal (input)
2352 */
2353 sc->sc_ctrl |= CTRL_SWDPIO(0);
2354 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2355
2356 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2357
2358 #define ADD(s, m, d) \
2359 do { \
2360 printf("%s%s", sep, s); \
2361 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(m), (d), NULL); \
2362 sep = ", "; \
2363 } while (/*CONSTCOND*/0)
2364
2365 printf("%s: ", sc->sc_dev.dv_xname);
2366 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2367 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2368 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2369 printf("\n");
2370
2371 #undef ADD
2372
2373 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2374 }
2375
2376 /*
2377 * wm_tbi_mediastatus: [ifmedia interface function]
2378 *
2379 * Get the current interface media status on a 1000BASE-X device.
2380 */
2381 void
2382 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2383 {
2384 struct wm_softc *sc = ifp->if_softc;
2385
2386 ifmr->ifm_status = IFM_AVALID;
2387 ifmr->ifm_active = IFM_ETHER;
2388
2389 if (sc->sc_tbi_linkup == 0) {
2390 ifmr->ifm_active |= IFM_NONE;
2391 return;
2392 }
2393
2394 ifmr->ifm_status |= IFM_ACTIVE;
2395 ifmr->ifm_active |= IFM_1000_SX;
2396 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2397 ifmr->ifm_active |= IFM_FDX;
2398 }
2399
2400 /*
2401 * wm_tbi_mediachange: [ifmedia interface function]
2402 *
2403 * Set hardware to newly-selected media on a 1000BASE-X device.
2404 */
2405 int
2406 wm_tbi_mediachange(struct ifnet *ifp)
2407 {
2408 struct wm_softc *sc = ifp->if_softc;
2409 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2410 uint32_t status;
2411 int i;
2412
2413 sc->sc_txcw = ife->ifm_data;
2414 if (sc->sc_ctrl & CTRL_RFCE)
2415 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2416 if (sc->sc_ctrl & CTRL_TFCE)
2417 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2418 sc->sc_txcw |= TXCW_ANE;
2419
2420 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2421 delay(10000);
2422
2423 sc->sc_tbi_anstate = 0;
2424
2425 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2426 /* Have signal; wait for the link to come up. */
2427 for (i = 0; i < 50; i++) {
2428 delay(10000);
2429 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2430 break;
2431 }
2432
2433 status = CSR_READ(sc, WMREG_STATUS);
2434 if (status & STATUS_LU) {
2435 /* Link is up. */
2436 DPRINTF(WM_DEBUG_LINK,
2437 ("%s: LINK: set media -> link up %s\n",
2438 sc->sc_dev.dv_xname,
2439 (status & STATUS_FD) ? "FDX" : "HDX"));
2440 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2441 if (status & STATUS_FD)
2442 sc->sc_tctl |=
2443 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2444 else
2445 sc->sc_tctl |=
2446 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2447 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2448 sc->sc_tbi_linkup = 1;
2449 } else {
2450 /* Link is down. */
2451 DPRINTF(WM_DEBUG_LINK,
2452 ("%s: LINK: set media -> link down\n",
2453 sc->sc_dev.dv_xname));
2454 sc->sc_tbi_linkup = 0;
2455 }
2456 } else {
2457 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2458 sc->sc_dev.dv_xname));
2459 sc->sc_tbi_linkup = 0;
2460 }
2461
2462 wm_tbi_set_linkled(sc);
2463
2464 return (0);
2465 }
2466
2467 /*
2468 * wm_tbi_set_linkled:
2469 *
2470 * Update the link LED on 1000BASE-X devices.
2471 */
2472 void
2473 wm_tbi_set_linkled(struct wm_softc *sc)
2474 {
2475
2476 if (sc->sc_tbi_linkup)
2477 sc->sc_ctrl |= CTRL_SWDPIN(0);
2478 else
2479 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2480
2481 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2482 }
2483
2484 /*
2485 * wm_tbi_check_link:
2486 *
2487 * Check the link on 1000BASE-X devices.
2488 */
2489 void
2490 wm_tbi_check_link(struct wm_softc *sc)
2491 {
2492 uint32_t rxcw, ctrl, status;
2493
2494 if (sc->sc_tbi_anstate == 0)
2495 return;
2496 else if (sc->sc_tbi_anstate > 1) {
2497 DPRINTF(WM_DEBUG_LINK,
2498 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2499 sc->sc_tbi_anstate));
2500 sc->sc_tbi_anstate--;
2501 return;
2502 }
2503
2504 sc->sc_tbi_anstate = 0;
2505
2506 rxcw = CSR_READ(sc, WMREG_RXCW);
2507 ctrl = CSR_READ(sc, WMREG_CTRL);
2508 status = CSR_READ(sc, WMREG_STATUS);
2509
2510 if ((status & STATUS_LU) == 0) {
2511 DPRINTF(WM_DEBUG_LINK,
2512 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2513 sc->sc_tbi_linkup = 0;
2514 } else {
2515 DPRINTF(WM_DEBUG_LINK,
2516 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2517 (status & STATUS_FD) ? "FDX" : "HDX"));
2518 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2519 if (status & STATUS_FD)
2520 sc->sc_tctl |=
2521 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2522 else
2523 sc->sc_tctl |=
2524 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2525 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2526 sc->sc_tbi_linkup = 1;
2527 }
2528
2529 wm_tbi_set_linkled(sc);
2530 }
2531
2532 /*
2533 * wm_gmii_reset:
2534 *
2535 * Reset the PHY.
2536 */
2537 void
2538 wm_gmii_reset(struct wm_softc *sc)
2539 {
2540 uint32_t reg;
2541
2542 if (sc->sc_type >= WM_T_82544) {
2543 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2544 delay(20000);
2545
2546 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2547 delay(20000);
2548 } else {
2549 /* The PHY reset pin is active-low. */
2550 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2551 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2552 CTRL_EXT_SWDPIN(4));
2553 reg |= CTRL_EXT_SWDPIO(4);
2554
2555 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2556 delay(10);
2557
2558 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2559 delay(10);
2560
2561 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2562 delay(10);
2563 #if 0
2564 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2565 #endif
2566 }
2567 }
2568
2569 /*
2570 * wm_gmii_mediainit:
2571 *
2572 * Initialize media for use on 1000BASE-T devices.
2573 */
2574 void
2575 wm_gmii_mediainit(struct wm_softc *sc)
2576 {
2577 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2578
2579 /* We have MII. */
2580 sc->sc_flags |= WM_F_HAS_MII;
2581
2582 sc->sc_tipg = TIPG_1000T_DFLT;
2583
2584 /*
2585 * Let the chip set speed/duplex on its own based on
2586 * signals from the PHY.
2587 */
2588 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2589 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2590
2591 /* Initialize our media structures and probe the GMII. */
2592 sc->sc_mii.mii_ifp = ifp;
2593
2594 if (sc->sc_type >= WM_T_82544) {
2595 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
2596 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
2597 } else {
2598 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
2599 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
2600 }
2601 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2602
2603 wm_gmii_reset(sc);
2604
2605 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_gmii_mediachange,
2606 wm_gmii_mediastatus);
2607
2608 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2609 MII_OFFSET_ANY, 0);
2610 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2611 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2612 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2613 } else
2614 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2615 }
2616
2617 /*
2618 * wm_gmii_mediastatus: [ifmedia interface function]
2619 *
2620 * Get the current interface media status on a 1000BASE-T device.
2621 */
2622 void
2623 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2624 {
2625 struct wm_softc *sc = ifp->if_softc;
2626
2627 mii_pollstat(&sc->sc_mii);
2628 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2629 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2630 }
2631
2632 /*
2633 * wm_gmii_mediachange: [ifmedia interface function]
2634 *
2635 * Set hardware to newly-selected media on a 1000BASE-T device.
2636 */
2637 int
2638 wm_gmii_mediachange(struct ifnet *ifp)
2639 {
2640 struct wm_softc *sc = ifp->if_softc;
2641
2642 if (ifp->if_flags & IFF_UP)
2643 mii_mediachg(&sc->sc_mii);
2644 return (0);
2645 }
2646
2647 #define MDI_IO CTRL_SWDPIN(2)
2648 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2649 #define MDI_CLK CTRL_SWDPIN(3)
2650
2651 static void
2652 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2653 {
2654 uint32_t i, v;
2655
2656 v = CSR_READ(sc, WMREG_CTRL);
2657 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2658 v |= MDI_DIR | CTRL_SWDPIO(3);
2659
2660 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2661 if (data & i)
2662 v |= MDI_IO;
2663 else
2664 v &= ~MDI_IO;
2665 CSR_WRITE(sc, WMREG_CTRL, v);
2666 delay(10);
2667 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2668 delay(10);
2669 CSR_WRITE(sc, WMREG_CTRL, v);
2670 delay(10);
2671 }
2672 }
2673
2674 static uint32_t
2675 i82543_mii_recvbits(struct wm_softc *sc)
2676 {
2677 uint32_t v, i, data = 0;
2678
2679 v = CSR_READ(sc, WMREG_CTRL);
2680 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2681 v |= CTRL_SWDPIO(3);
2682
2683 CSR_WRITE(sc, WMREG_CTRL, v);
2684 delay(10);
2685 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2686 delay(10);
2687 CSR_WRITE(sc, WMREG_CTRL, v);
2688 delay(10);
2689
2690 for (i = 0; i < 16; i++) {
2691 data <<= 1;
2692 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2693 delay(10);
2694 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2695 data |= 1;
2696 CSR_WRITE(sc, WMREG_CTRL, v);
2697 delay(10);
2698 }
2699
2700 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2701 delay(10);
2702 CSR_WRITE(sc, WMREG_CTRL, v);
2703 delay(10);
2704
2705 return (data);
2706 }
2707
2708 #undef MDI_IO
2709 #undef MDI_DIR
2710 #undef MDI_CLK
2711
2712 /*
2713 * wm_gmii_i82543_readreg: [mii interface function]
2714 *
2715 * Read a PHY register on the GMII (i82543 version).
2716 */
2717 int
2718 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
2719 {
2720 struct wm_softc *sc = (void *) self;
2721 int rv;
2722
2723 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2724 i82543_mii_sendbits(sc, reg | (phy << 5) |
2725 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2726 rv = i82543_mii_recvbits(sc) & 0xffff;
2727
2728 DPRINTF(WM_DEBUG_GMII,
2729 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2730 sc->sc_dev.dv_xname, phy, reg, rv));
2731
2732 return (rv);
2733 }
2734
2735 /*
2736 * wm_gmii_i82543_writereg: [mii interface function]
2737 *
2738 * Write a PHY register on the GMII (i82543 version).
2739 */
2740 void
2741 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
2742 {
2743 struct wm_softc *sc = (void *) self;
2744
2745 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2746 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2747 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2748 (MII_COMMAND_START << 30), 32);
2749 }
2750
2751 /*
2752 * wm_gmii_i82544_readreg: [mii interface function]
2753 *
2754 * Read a PHY register on the GMII.
2755 */
2756 int
2757 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
2758 {
2759 struct wm_softc *sc = (void *) self;
2760 uint32_t mdic;
2761 int i, rv;
2762
2763 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2764 MDIC_REGADD(reg));
2765
2766 for (i = 0; i < 100; i++) {
2767 mdic = CSR_READ(sc, WMREG_MDIC);
2768 if (mdic & MDIC_READY)
2769 break;
2770 delay(10);
2771 }
2772
2773 if ((mdic & MDIC_READY) == 0) {
2774 printf("%s: MDIC read timed out: phy %d reg %d\n",
2775 sc->sc_dev.dv_xname, phy, reg);
2776 rv = 0;
2777 } else if (mdic & MDIC_E) {
2778 #if 0 /* This is normal if no PHY is present. */
2779 printf("%s: MDIC read error: phy %d reg %d\n",
2780 sc->sc_dev.dv_xname, phy, reg);
2781 #endif
2782 rv = 0;
2783 } else {
2784 rv = MDIC_DATA(mdic);
2785 if (rv == 0xffff)
2786 rv = 0;
2787 }
2788
2789 return (rv);
2790 }
2791
2792 /*
2793 * wm_gmii_i82544_writereg: [mii interface function]
2794 *
2795 * Write a PHY register on the GMII.
2796 */
2797 void
2798 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
2799 {
2800 struct wm_softc *sc = (void *) self;
2801 uint32_t mdic;
2802 int i;
2803
2804 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
2805 MDIC_REGADD(reg) | MDIC_DATA(val));
2806
2807 for (i = 0; i < 100; i++) {
2808 mdic = CSR_READ(sc, WMREG_MDIC);
2809 if (mdic & MDIC_READY)
2810 break;
2811 delay(10);
2812 }
2813
2814 if ((mdic & MDIC_READY) == 0)
2815 printf("%s: MDIC write timed out: phy %d reg %d\n",
2816 sc->sc_dev.dv_xname, phy, reg);
2817 else if (mdic & MDIC_E)
2818 printf("%s: MDIC write error: phy %d reg %d\n",
2819 sc->sc_dev.dv_xname, phy, reg);
2820 }
2821
2822 /*
2823 * wm_gmii_statchg: [mii interface function]
2824 *
2825 * Callback from MII layer when media changes.
2826 */
2827 void
2828 wm_gmii_statchg(struct device *self)
2829 {
2830 struct wm_softc *sc = (void *) self;
2831
2832 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2833
2834 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2835 DPRINTF(WM_DEBUG_LINK,
2836 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
2837 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2838 } else {
2839 DPRINTF(WM_DEBUG_LINK,
2840 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
2841 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2842 }
2843
2844 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2845 }
2846