if_wm.c revision 1.13 1 /* $NetBSD: if_wm.c,v 1.13 2002/07/14 01:12:28 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Fix TCP/UDP checksums.
44 *
45 * - Make GMII work on the i82543.
46 *
47 * - Fix hw VLAN assist.
48 *
49 * - Jumbo frames -- requires changes to network stack due to
50 * lame buffer length handling on chip.
51 */
52
53 #include "bpfilter.h"
54
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/callout.h>
58 #include <sys/mbuf.h>
59 #include <sys/malloc.h>
60 #include <sys/kernel.h>
61 #include <sys/socket.h>
62 #include <sys/ioctl.h>
63 #include <sys/errno.h>
64 #include <sys/device.h>
65 #include <sys/queue.h>
66
67 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
68
69 #include <net/if.h>
70 #include <net/if_dl.h>
71 #include <net/if_media.h>
72 #include <net/if_ether.h>
73
74 #if NBPFILTER > 0
75 #include <net/bpf.h>
76 #endif
77
78 #include <netinet/in.h> /* XXX for struct ip */
79 #include <netinet/in_systm.h> /* XXX for struct ip */
80 #include <netinet/ip.h> /* XXX for struct ip */
81 #include <netinet/tcp.h> /* XXX for struct tcphdr */
82
83 #include <machine/bus.h>
84 #include <machine/intr.h>
85 #include <machine/endian.h>
86
87 #include <dev/mii/mii.h>
88 #include <dev/mii/miivar.h>
89 #include <dev/mii/mii_bitbang.h>
90
91 #include <dev/pci/pcireg.h>
92 #include <dev/pci/pcivar.h>
93 #include <dev/pci/pcidevs.h>
94
95 #include <dev/pci/if_wmreg.h>
96
97 #ifdef WM_DEBUG
98 #define WM_DEBUG_LINK 0x01
99 #define WM_DEBUG_TX 0x02
100 #define WM_DEBUG_RX 0x04
101 #define WM_DEBUG_GMII 0x08
102 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
103
104 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
105 #else
106 #define DPRINTF(x, y) /* nothing */
107 #endif /* WM_DEBUG */
108
109 /*
110 * Transmit descriptor list size. Due to errata, we can only have
111 * 256 hardware descriptors in the ring. We tell the upper layers
112 * that they can queue a lot of packets, and we go ahead and mange
113 * up to 64 of them at a time. We allow up to 16 DMA segments per
114 * packet.
115 */
116 #define WM_NTXSEGS 16
117 #define WM_IFQUEUELEN 256
118 #define WM_TXQUEUELEN 64
119 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
120 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
121 #define WM_NTXDESC 256
122 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
123 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
124 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
125
126 /*
127 * Receive descriptor list size. We have one Rx buffer for normal
128 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
129 * packet. We allocate 256 receive descriptors, each with a 2k
130 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
131 */
132 #define WM_NRXDESC 256
133 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
134 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
135 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
136
137 /*
138 * Control structures are DMA'd to the i82542 chip. We allocate them in
139 * a single clump that maps to a single DMA segment to make serveral things
140 * easier.
141 */
142 struct wm_control_data {
143 /*
144 * The transmit descriptors.
145 */
146 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
147
148 /*
149 * The receive descriptors.
150 */
151 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
152 };
153
154 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
155 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
156 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
157
158 /*
159 * Software state for transmit jobs.
160 */
161 struct wm_txsoft {
162 struct mbuf *txs_mbuf; /* head of our mbuf chain */
163 bus_dmamap_t txs_dmamap; /* our DMA map */
164 int txs_firstdesc; /* first descriptor in packet */
165 int txs_lastdesc; /* last descriptor in packet */
166 int txs_ndesc; /* # of descriptors used */
167 };
168
169 /*
170 * Software state for receive buffers. Each descriptor gets a
171 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
172 * more than one buffer, we chain them together.
173 */
174 struct wm_rxsoft {
175 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
176 bus_dmamap_t rxs_dmamap; /* our DMA map */
177 };
178
179 /*
180 * Software state per device.
181 */
182 struct wm_softc {
183 struct device sc_dev; /* generic device information */
184 bus_space_tag_t sc_st; /* bus space tag */
185 bus_space_handle_t sc_sh; /* bus space handle */
186 bus_dma_tag_t sc_dmat; /* bus DMA tag */
187 struct ethercom sc_ethercom; /* ethernet common data */
188 void *sc_sdhook; /* shutdown hook */
189
190 int sc_type; /* chip type; see below */
191 int sc_flags; /* flags; see below */
192
193 void *sc_ih; /* interrupt cookie */
194
195 struct mii_data sc_mii; /* MII/media information */
196
197 struct callout sc_tick_ch; /* tick callout */
198
199 bus_dmamap_t sc_cddmamap; /* control data DMA map */
200 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
201
202 /*
203 * Software state for the transmit and receive descriptors.
204 */
205 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
206 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
207
208 /*
209 * Control data structures.
210 */
211 struct wm_control_data *sc_control_data;
212 #define sc_txdescs sc_control_data->wcd_txdescs
213 #define sc_rxdescs sc_control_data->wcd_rxdescs
214
215 #ifdef WM_EVENT_COUNTERS
216 /* Event counters. */
217 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
218 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
219 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
220 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
221 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
222 struct evcnt sc_ev_rxintr; /* Rx interrupts */
223 struct evcnt sc_ev_linkintr; /* Link interrupts */
224
225 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
226 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
227 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
228 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
229
230 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
231 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
232 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
233
234 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
235 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
236
237 struct evcnt sc_ev_tu; /* Tx underrun */
238 #endif /* WM_EVENT_COUNTERS */
239
240 bus_addr_t sc_tdt_reg; /* offset of TDT register */
241
242 int sc_txfree; /* number of free Tx descriptors */
243 int sc_txnext; /* next ready Tx descriptor */
244
245 int sc_txsfree; /* number of free Tx jobs */
246 int sc_txsnext; /* next free Tx job */
247 int sc_txsdirty; /* dirty Tx jobs */
248
249 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
250 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
251
252 bus_addr_t sc_rdt_reg; /* offset of RDT register */
253
254 int sc_rxptr; /* next ready Rx descriptor/queue ent */
255 int sc_rxdiscard;
256 int sc_rxlen;
257 struct mbuf *sc_rxhead;
258 struct mbuf *sc_rxtail;
259 struct mbuf **sc_rxtailp;
260
261 uint32_t sc_ctrl; /* prototype CTRL register */
262 #if 0
263 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
264 #endif
265 uint32_t sc_icr; /* prototype interrupt bits */
266 uint32_t sc_tctl; /* prototype TCTL register */
267 uint32_t sc_rctl; /* prototype RCTL register */
268 uint32_t sc_txcw; /* prototype TXCW register */
269 uint32_t sc_tipg; /* prototype TIPG register */
270
271 int sc_tbi_linkup; /* TBI link status */
272 int sc_tbi_anstate; /* autonegotiation state */
273
274 int sc_mchash_type; /* multicast filter offset */
275 };
276
277 #define WM_RXCHAIN_RESET(sc) \
278 do { \
279 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
280 *(sc)->sc_rxtailp = NULL; \
281 (sc)->sc_rxlen = 0; \
282 } while (/*CONSTCOND*/0)
283
284 #define WM_RXCHAIN_LINK(sc, m) \
285 do { \
286 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
287 (sc)->sc_rxtailp = &(m)->m_next; \
288 } while (/*CONSTCOND*/0)
289
290 /* sc_type */
291 #define WM_T_82542_2_0 0 /* i82542 2.0 (really old) */
292 #define WM_T_82542_2_1 1 /* i82542 2.1+ (old) */
293 #define WM_T_82543 2 /* i82543 */
294 #define WM_T_82544 3 /* i82544 */
295 #define WM_T_82540 4 /* i82540 */
296 #define WM_T_82545 5 /* i82545 */
297 #define WM_T_82546 6 /* i82546 */
298
299 /* sc_flags */
300 #define WM_F_HAS_MII 0x01 /* has MII */
301
302 #ifdef WM_EVENT_COUNTERS
303 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
304 #else
305 #define WM_EVCNT_INCR(ev) /* nothing */
306 #endif
307
308 #define CSR_READ(sc, reg) \
309 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
310 #define CSR_WRITE(sc, reg, val) \
311 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
312
313 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
314 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
315
316 #define WM_CDTXSYNC(sc, x, n, ops) \
317 do { \
318 int __x, __n; \
319 \
320 __x = (x); \
321 __n = (n); \
322 \
323 /* If it will wrap around, sync to the end of the ring. */ \
324 if ((__x + __n) > WM_NTXDESC) { \
325 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
326 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
327 (WM_NTXDESC - __x), (ops)); \
328 __n -= (WM_NTXDESC - __x); \
329 __x = 0; \
330 } \
331 \
332 /* Now sync whatever is left. */ \
333 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
334 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
335 } while (/*CONSTCOND*/0)
336
337 #define WM_CDRXSYNC(sc, x, ops) \
338 do { \
339 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
340 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
341 } while (/*CONSTCOND*/0)
342
343 #define WM_INIT_RXDESC(sc, x) \
344 do { \
345 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
346 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
347 struct mbuf *__m = __rxs->rxs_mbuf; \
348 \
349 /* \
350 * Note: We scoot the packet forward 2 bytes in the buffer \
351 * so that the payload after the Ethernet header is aligned \
352 * to a 4-byte boundary. \
353 * \
354 * XXX BRAINDAMAGE ALERT! \
355 * The stupid chip uses the same size for every buffer, which \
356 * is set in the Receive Control register. We are using the 2K \
357 * size option, but what we REALLY want is (2K - 2)! For this \
358 * reason, we can't accept packets longer than the standard \
359 * Ethernet MTU, without incurring a big penalty to copy every \
360 * incoming packet to a new, suitably aligned buffer. \
361 * \
362 * We'll need to make some changes to the layer 3/4 parts of \
363 * the stack (to copy the headers to a new buffer if not \
364 * aligned) in order to support large MTU on this chip. Lame. \
365 */ \
366 __m->m_data = __m->m_ext.ext_buf + 2; \
367 \
368 __rxd->wrx_addr.wa_low = \
369 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
370 __rxd->wrx_addr.wa_high = 0; \
371 __rxd->wrx_len = 0; \
372 __rxd->wrx_cksum = 0; \
373 __rxd->wrx_status = 0; \
374 __rxd->wrx_errors = 0; \
375 __rxd->wrx_special = 0; \
376 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
377 \
378 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
379 } while (/*CONSTCOND*/0)
380
381 void wm_start(struct ifnet *);
382 void wm_watchdog(struct ifnet *);
383 int wm_ioctl(struct ifnet *, u_long, caddr_t);
384 int wm_init(struct ifnet *);
385 void wm_stop(struct ifnet *, int);
386
387 void wm_shutdown(void *);
388
389 void wm_reset(struct wm_softc *);
390 void wm_rxdrain(struct wm_softc *);
391 int wm_add_rxbuf(struct wm_softc *, int);
392 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
393 void wm_tick(void *);
394
395 void wm_set_filter(struct wm_softc *);
396
397 int wm_intr(void *);
398 void wm_txintr(struct wm_softc *);
399 void wm_rxintr(struct wm_softc *);
400 void wm_linkintr(struct wm_softc *, uint32_t);
401
402 void wm_tbi_mediainit(struct wm_softc *);
403 int wm_tbi_mediachange(struct ifnet *);
404 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
405
406 void wm_tbi_set_linkled(struct wm_softc *);
407 void wm_tbi_check_link(struct wm_softc *);
408
409 void wm_gmii_reset(struct wm_softc *);
410
411 int wm_gmii_i82543_readreg(struct device *, int, int);
412 void wm_gmii_i82543_writereg(struct device *, int, int, int);
413
414 int wm_gmii_i82544_readreg(struct device *, int, int);
415 void wm_gmii_i82544_writereg(struct device *, int, int, int);
416
417 void wm_gmii_statchg(struct device *);
418
419 void wm_gmii_mediainit(struct wm_softc *);
420 int wm_gmii_mediachange(struct ifnet *);
421 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
422
423 int wm_match(struct device *, struct cfdata *, void *);
424 void wm_attach(struct device *, struct device *, void *);
425
426 int wm_copy_small = 0;
427
428 struct cfattach wm_ca = {
429 sizeof(struct wm_softc), wm_match, wm_attach,
430 };
431
432 /*
433 * Devices supported by this driver.
434 */
435 const struct wm_product {
436 pci_vendor_id_t wmp_vendor;
437 pci_product_id_t wmp_product;
438 const char *wmp_name;
439 int wmp_type;
440 int wmp_flags;
441 #define WMP_F_1000X 0x01
442 #define WMP_F_1000T 0x02
443 } wm_products[] = {
444 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
445 "Intel i82542 1000BASE-X Ethernet",
446 WM_T_82542_2_1, WMP_F_1000X },
447
448 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
449 "Intel i82543GC 1000BASE-X Ethernet",
450 WM_T_82543, WMP_F_1000X },
451
452 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
453 "Intel i82543GC 1000BASE-T Ethernet",
454 WM_T_82543, WMP_F_1000T },
455
456 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
457 "Intel i82544EI 1000BASE-T Ethernet",
458 WM_T_82544, WMP_F_1000T },
459
460 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
461 "Intel i82544EI 1000BASE-X Ethernet",
462 WM_T_82544, WMP_F_1000X },
463
464 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
465 "Intel i82544GC 1000BASE-T Ethernet",
466 WM_T_82544, WMP_F_1000T },
467
468 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
469 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
470 WM_T_82544, WMP_F_1000T },
471
472 { 0, 0,
473 NULL,
474 0, 0 },
475 };
476
477 #ifdef WM_EVENT_COUNTERS
478 #if WM_NTXSEGS != 16
479 #error Update wm_txseg_evcnt_names
480 #endif
481 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
482 "txseg1",
483 "txseg2",
484 "txseg3",
485 "txseg4",
486 "txseg5",
487 "txseg6",
488 "txseg7",
489 "txseg8",
490 "txseg9",
491 "txseg10",
492 "txseg11",
493 "txseg12",
494 "txseg13",
495 "txseg14",
496 "txseg15",
497 "txseg16",
498 };
499 #endif /* WM_EVENT_COUNTERS */
500
501 static const struct wm_product *
502 wm_lookup(const struct pci_attach_args *pa)
503 {
504 const struct wm_product *wmp;
505
506 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
507 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
508 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
509 return (wmp);
510 }
511 return (NULL);
512 }
513
514 int
515 wm_match(struct device *parent, struct cfdata *cf, void *aux)
516 {
517 struct pci_attach_args *pa = aux;
518
519 if (wm_lookup(pa) != NULL)
520 return (1);
521
522 return (0);
523 }
524
525 void
526 wm_attach(struct device *parent, struct device *self, void *aux)
527 {
528 struct wm_softc *sc = (void *) self;
529 struct pci_attach_args *pa = aux;
530 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
531 pci_chipset_tag_t pc = pa->pa_pc;
532 pci_intr_handle_t ih;
533 const char *intrstr = NULL;
534 bus_space_tag_t memt;
535 bus_space_handle_t memh;
536 bus_dma_segment_t seg;
537 int memh_valid;
538 int i, rseg, error;
539 const struct wm_product *wmp;
540 uint8_t enaddr[ETHER_ADDR_LEN];
541 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
542 pcireg_t preg, memtype;
543 int pmreg;
544
545 callout_init(&sc->sc_tick_ch);
546
547 wmp = wm_lookup(pa);
548 if (wmp == NULL) {
549 printf("\n");
550 panic("wm_attach: impossible");
551 }
552
553 sc->sc_dmat = pa->pa_dmat;
554
555 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
556 printf(": %s, rev. %d\n", wmp->wmp_name, preg);
557
558 sc->sc_type = wmp->wmp_type;
559 if (sc->sc_type < WM_T_82543) {
560 if (preg < 2) {
561 printf("%s: i82542 must be at least rev. 2\n",
562 sc->sc_dev.dv_xname);
563 return;
564 }
565 if (preg < 3)
566 sc->sc_type = WM_T_82542_2_0;
567 }
568
569 /*
570 * Map the device.
571 */
572 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
573 switch (memtype) {
574 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
575 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
576 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
577 memtype, 0, &memt, &memh, NULL, NULL) == 0);
578 break;
579 default:
580 memh_valid = 0;
581 }
582
583 if (memh_valid) {
584 sc->sc_st = memt;
585 sc->sc_sh = memh;
586 } else {
587 printf("%s: unable to map device registers\n",
588 sc->sc_dev.dv_xname);
589 return;
590 }
591
592 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
593 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
594 preg |= PCI_COMMAND_MASTER_ENABLE;
595 if (sc->sc_type < WM_T_82542_2_1)
596 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
597 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
598
599 /* Get it out of power save mode, if needed. */
600 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
601 preg = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
602 if (preg == 3) {
603 /*
604 * The card has lost all configuration data in
605 * this state, so punt.
606 */
607 printf("%s: unable to wake from power state D3\n",
608 sc->sc_dev.dv_xname);
609 return;
610 }
611 if (preg != 0) {
612 printf("%s: waking up from power state D%d\n",
613 sc->sc_dev.dv_xname, preg);
614 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
615 }
616 }
617
618 /*
619 * Map and establish our interrupt.
620 */
621 if (pci_intr_map(pa, &ih)) {
622 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
623 return;
624 }
625 intrstr = pci_intr_string(pc, ih);
626 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
627 if (sc->sc_ih == NULL) {
628 printf("%s: unable to establish interrupt",
629 sc->sc_dev.dv_xname);
630 if (intrstr != NULL)
631 printf(" at %s", intrstr);
632 printf("\n");
633 return;
634 }
635 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
636
637 /*
638 * Allocate the control data structures, and create and load the
639 * DMA map for it.
640 */
641 if ((error = bus_dmamem_alloc(sc->sc_dmat,
642 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
643 0)) != 0) {
644 printf("%s: unable to allocate control data, error = %d\n",
645 sc->sc_dev.dv_xname, error);
646 goto fail_0;
647 }
648
649 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
650 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
651 BUS_DMA_COHERENT)) != 0) {
652 printf("%s: unable to map control data, error = %d\n",
653 sc->sc_dev.dv_xname, error);
654 goto fail_1;
655 }
656
657 if ((error = bus_dmamap_create(sc->sc_dmat,
658 sizeof(struct wm_control_data), 1,
659 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
660 printf("%s: unable to create control data DMA map, "
661 "error = %d\n", sc->sc_dev.dv_xname, error);
662 goto fail_2;
663 }
664
665 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
666 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
667 0)) != 0) {
668 printf("%s: unable to load control data DMA map, error = %d\n",
669 sc->sc_dev.dv_xname, error);
670 goto fail_3;
671 }
672
673 /*
674 * Create the transmit buffer DMA maps.
675 */
676 for (i = 0; i < WM_TXQUEUELEN; i++) {
677 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
678 WM_NTXSEGS, MCLBYTES, 0, 0,
679 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
680 printf("%s: unable to create Tx DMA map %d, "
681 "error = %d\n", sc->sc_dev.dv_xname, i, error);
682 goto fail_4;
683 }
684 }
685
686 /*
687 * Create the receive buffer DMA maps.
688 */
689 for (i = 0; i < WM_NRXDESC; i++) {
690 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
691 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
692 printf("%s: unable to create Rx DMA map %d, "
693 "error = %d\n", sc->sc_dev.dv_xname, i, error);
694 goto fail_5;
695 }
696 sc->sc_rxsoft[i].rxs_mbuf = NULL;
697 }
698
699 /*
700 * Reset the chip to a known state.
701 */
702 wm_reset(sc);
703
704 /*
705 * Read the Ethernet address from the EEPROM.
706 */
707 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
708 sizeof(myea) / sizeof(myea[0]), myea);
709 enaddr[0] = myea[0] & 0xff;
710 enaddr[1] = myea[0] >> 8;
711 enaddr[2] = myea[1] & 0xff;
712 enaddr[3] = myea[1] >> 8;
713 enaddr[4] = myea[2] & 0xff;
714 enaddr[5] = myea[2] >> 8;
715
716 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
717 ether_sprintf(enaddr));
718
719 /*
720 * Read the config info from the EEPROM, and set up various
721 * bits in the control registers based on their contents.
722 */
723 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
724 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
725 if (sc->sc_type >= WM_T_82544)
726 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
727
728 if (cfg1 & EEPROM_CFG1_ILOS)
729 sc->sc_ctrl |= CTRL_ILOS;
730 if (sc->sc_type >= WM_T_82544) {
731 sc->sc_ctrl |=
732 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
733 CTRL_SWDPIO_SHIFT;
734 sc->sc_ctrl |=
735 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
736 CTRL_SWDPINS_SHIFT;
737 } else {
738 sc->sc_ctrl |=
739 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
740 CTRL_SWDPIO_SHIFT;
741 }
742
743 #if 0
744 if (sc->sc_type >= WM_T_82544) {
745 if (cfg1 & EEPROM_CFG1_IPS0)
746 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
747 if (cfg1 & EEPROM_CFG1_IPS1)
748 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
749 sc->sc_ctrl_ext |=
750 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
751 CTRL_EXT_SWDPIO_SHIFT;
752 sc->sc_ctrl_ext |=
753 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
754 CTRL_EXT_SWDPINS_SHIFT;
755 } else {
756 sc->sc_ctrl_ext |=
757 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
758 CTRL_EXT_SWDPIO_SHIFT;
759 }
760 #endif
761
762 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
763 #if 0
764 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
765 #endif
766
767 /*
768 * Set up some register offsets that are different between
769 * the i82542 and the i82543 and later chips.
770 */
771 if (sc->sc_type < WM_T_82543) {
772 sc->sc_rdt_reg = WMREG_OLD_RDT0;
773 sc->sc_tdt_reg = WMREG_OLD_TDT;
774 } else {
775 sc->sc_rdt_reg = WMREG_RDT;
776 sc->sc_tdt_reg = WMREG_TDT;
777 }
778
779 /*
780 * Determine if we should use flow control. We should
781 * always use it, unless we're on a i82542 < 2.1.
782 */
783 if (sc->sc_type >= WM_T_82542_2_1)
784 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
785
786 /*
787 * Determine if we're TBI or GMII mode, and initialize the
788 * media structures accordingly.
789 */
790 if (sc->sc_type < WM_T_82543 ||
791 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
792 if (wmp->wmp_flags & WMP_F_1000T)
793 printf("%s: WARNING: TBIMODE set on 1000BASE-T "
794 "product!\n", sc->sc_dev.dv_xname);
795 wm_tbi_mediainit(sc);
796 } else {
797 if (wmp->wmp_flags & WMP_F_1000X)
798 printf("%s: WARNING: TBIMODE clear on 1000BASE-X "
799 "product!\n", sc->sc_dev.dv_xname);
800 wm_gmii_mediainit(sc);
801 }
802
803 ifp = &sc->sc_ethercom.ec_if;
804 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
805 ifp->if_softc = sc;
806 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
807 ifp->if_ioctl = wm_ioctl;
808 ifp->if_start = wm_start;
809 ifp->if_watchdog = wm_watchdog;
810 ifp->if_init = wm_init;
811 ifp->if_stop = wm_stop;
812 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
813 IFQ_SET_READY(&ifp->if_snd);
814
815 /*
816 * If we're a i82543 or greater, we can support VLANs.
817 */
818 if (sc->sc_type >= WM_T_82543)
819 sc->sc_ethercom.ec_capabilities |=
820 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
821
822 /*
823 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
824 * on i82543 and later.
825 */
826 if (sc->sc_type >= WM_T_82543)
827 ifp->if_capabilities |=
828 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
829
830 /*
831 * Attach the interface.
832 */
833 if_attach(ifp);
834 ether_ifattach(ifp, enaddr);
835
836 #ifdef WM_EVENT_COUNTERS
837 /* Attach event counters. */
838 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
839 NULL, sc->sc_dev.dv_xname, "txsstall");
840 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
841 NULL, sc->sc_dev.dv_xname, "txdstall");
842 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
843 NULL, sc->sc_dev.dv_xname, "txforceintr");
844 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
845 NULL, sc->sc_dev.dv_xname, "txdw");
846 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
847 NULL, sc->sc_dev.dv_xname, "txqe");
848 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
849 NULL, sc->sc_dev.dv_xname, "rxintr");
850 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
851 NULL, sc->sc_dev.dv_xname, "linkintr");
852
853 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
854 NULL, sc->sc_dev.dv_xname, "rxipsum");
855 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
856 NULL, sc->sc_dev.dv_xname, "rxtusum");
857 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
858 NULL, sc->sc_dev.dv_xname, "txipsum");
859 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
860 NULL, sc->sc_dev.dv_xname, "txtusum");
861
862 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
863 NULL, sc->sc_dev.dv_xname, "txctx init");
864 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
865 NULL, sc->sc_dev.dv_xname, "txctx hit");
866 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
867 NULL, sc->sc_dev.dv_xname, "txctx miss");
868
869 for (i = 0; i < WM_NTXSEGS; i++)
870 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
871 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
872
873 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
874 NULL, sc->sc_dev.dv_xname, "txdrop");
875
876 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
877 NULL, sc->sc_dev.dv_xname, "tu");
878 #endif /* WM_EVENT_COUNTERS */
879
880 /*
881 * Make sure the interface is shutdown during reboot.
882 */
883 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
884 if (sc->sc_sdhook == NULL)
885 printf("%s: WARNING: unable to establish shutdown hook\n",
886 sc->sc_dev.dv_xname);
887 return;
888
889 /*
890 * Free any resources we've allocated during the failed attach
891 * attempt. Do this in reverse order and fall through.
892 */
893 fail_5:
894 for (i = 0; i < WM_NRXDESC; i++) {
895 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
896 bus_dmamap_destroy(sc->sc_dmat,
897 sc->sc_rxsoft[i].rxs_dmamap);
898 }
899 fail_4:
900 for (i = 0; i < WM_TXQUEUELEN; i++) {
901 if (sc->sc_txsoft[i].txs_dmamap != NULL)
902 bus_dmamap_destroy(sc->sc_dmat,
903 sc->sc_txsoft[i].txs_dmamap);
904 }
905 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
906 fail_3:
907 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
908 fail_2:
909 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
910 sizeof(struct wm_control_data));
911 fail_1:
912 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
913 fail_0:
914 return;
915 }
916
917 /*
918 * wm_shutdown:
919 *
920 * Make sure the interface is stopped at reboot time.
921 */
922 void
923 wm_shutdown(void *arg)
924 {
925 struct wm_softc *sc = arg;
926
927 wm_stop(&sc->sc_ethercom.ec_if, 1);
928 }
929
930 /*
931 * wm_tx_cksum:
932 *
933 * Set up TCP/IP checksumming parameters for the
934 * specified packet.
935 */
936 static int
937 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
938 uint32_t *fieldsp)
939 {
940 struct mbuf *m0 = txs->txs_mbuf;
941 struct livengood_tcpip_ctxdesc *t;
942 uint32_t fields = 0, ipcs, tucs;
943 struct ip *ip;
944 struct ether_header *eh;
945 int offset, iphl;
946
947 /*
948 * XXX It would be nice if the mbuf pkthdr had offset
949 * fields for the protocol headers.
950 */
951
952 eh = mtod(m0, struct ether_header *);
953 switch (htons(eh->ether_type)) {
954 case ETHERTYPE_IP:
955 iphl = sizeof(struct ip);
956 offset = ETHER_HDR_LEN;
957 break;
958
959 default:
960 /*
961 * Don't support this protocol or encapsulation.
962 */
963 *fieldsp = 0;
964 *cmdp = 0;
965 return (0);
966 }
967
968 /* XXX */
969 if (m0->m_len < (offset + iphl)) {
970 printf("%s: wm_tx_cksum: need to m_pullup, "
971 "packet dropped\n", sc->sc_dev.dv_xname);
972 return (EINVAL);
973 }
974
975 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
976 iphl = ip->ip_hl << 2;
977
978 /*
979 * NOTE: Even if we're not using the IP or TCP/UDP checksum
980 * offload feature, if we load the context descriptor, we
981 * MUST provide valid values for IPCSS and TUCSS fields.
982 */
983
984 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
985 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
986 fields |= htole32(WTX_IXSM);
987 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
988 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
989 WTX_TCPIP_IPCSE(offset + iphl - 1));
990 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
991 /* Use the cached value. */
992 ipcs = sc->sc_txctx_ipcs;
993 } else {
994 /* Just initialize it to the likely value anyway. */
995 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
996 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
997 WTX_TCPIP_IPCSE(offset + iphl - 1));
998 }
999
1000 offset += iphl;
1001
1002 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1003 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1004 fields |= htole32(WTX_TXSM);
1005 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1006 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1007 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1008 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1009 /* Use the cached value. */
1010 tucs = sc->sc_txctx_tucs;
1011 } else {
1012 /* Just initialize it to a valid TCP context. */
1013 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1014 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1015 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1016 }
1017
1018 if (sc->sc_txctx_ipcs == ipcs &&
1019 sc->sc_txctx_tucs == tucs) {
1020 /* Cached context is fine. */
1021 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1022 } else {
1023 /* Fill in the context descriptor. */
1024 #ifdef WM_EVENT_COUNTERS
1025 if (sc->sc_txctx_ipcs == 0xffffffff &&
1026 sc->sc_txctx_tucs == 0xffffffff)
1027 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1028 else
1029 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1030 #endif
1031 t = (struct livengood_tcpip_ctxdesc *)
1032 &sc->sc_txdescs[sc->sc_txnext];
1033 t->tcpip_ipcs = ipcs;
1034 t->tcpip_tucs = tucs;
1035 t->tcpip_cmdlen =
1036 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1037 t->tcpip_seg = 0;
1038 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1039
1040 sc->sc_txctx_ipcs = ipcs;
1041 sc->sc_txctx_tucs = tucs;
1042
1043 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1044 txs->txs_ndesc++;
1045 }
1046
1047 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1048 *fieldsp = fields;
1049
1050 return (0);
1051 }
1052
1053 /*
1054 * wm_start: [ifnet interface function]
1055 *
1056 * Start packet transmission on the interface.
1057 */
1058 void
1059 wm_start(struct ifnet *ifp)
1060 {
1061 struct wm_softc *sc = ifp->if_softc;
1062 struct mbuf *m0/*, *m*/;
1063 struct wm_txsoft *txs;
1064 bus_dmamap_t dmamap;
1065 int error, nexttx, lasttx, ofree, seg;
1066 uint32_t cksumcmd, cksumfields;
1067
1068 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1069 return;
1070
1071 /*
1072 * Remember the previous number of free descriptors.
1073 */
1074 ofree = sc->sc_txfree;
1075
1076 /*
1077 * Loop through the send queue, setting up transmit descriptors
1078 * until we drain the queue, or use up all available transmit
1079 * descriptors.
1080 */
1081 for (;;) {
1082 /* Grab a packet off the queue. */
1083 IFQ_POLL(&ifp->if_snd, m0);
1084 if (m0 == NULL)
1085 break;
1086
1087 DPRINTF(WM_DEBUG_TX,
1088 ("%s: TX: have packet to transmit: %p\n",
1089 sc->sc_dev.dv_xname, m0));
1090
1091 /* Get a work queue entry. */
1092 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1093 wm_txintr(sc);
1094 if (sc->sc_txsfree == 0) {
1095 DPRINTF(WM_DEBUG_TX,
1096 ("%s: TX: no free job descriptors\n",
1097 sc->sc_dev.dv_xname));
1098 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1099 break;
1100 }
1101 }
1102
1103 txs = &sc->sc_txsoft[sc->sc_txsnext];
1104 dmamap = txs->txs_dmamap;
1105
1106 /*
1107 * Load the DMA map. If this fails, the packet either
1108 * didn't fit in the allotted number of segments, or we
1109 * were short on resources. For the too-many-segments
1110 * case, we simply report an error and drop the packet,
1111 * since we can't sanely copy a jumbo packet to a single
1112 * buffer.
1113 */
1114 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1115 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1116 if (error) {
1117 if (error == EFBIG) {
1118 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1119 printf("%s: Tx packet consumes too many "
1120 "DMA segments, dropping...\n",
1121 sc->sc_dev.dv_xname);
1122 IFQ_DEQUEUE(&ifp->if_snd, m0);
1123 m_freem(m0);
1124 continue;
1125 }
1126 /*
1127 * Short on resources, just stop for now.
1128 */
1129 DPRINTF(WM_DEBUG_TX,
1130 ("%s: TX: dmamap load failed: %d\n",
1131 sc->sc_dev.dv_xname, error));
1132 break;
1133 }
1134
1135 /*
1136 * Ensure we have enough descriptors free to describe
1137 * the packet. Note, we always reserve one descriptor
1138 * at the end of the ring due to the semantics of the
1139 * TDT register, plus one more in the event we need
1140 * to re-load checksum offload context.
1141 */
1142 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1143 /*
1144 * Not enough free descriptors to transmit this
1145 * packet. We haven't committed anything yet,
1146 * so just unload the DMA map, put the packet
1147 * pack on the queue, and punt. Notify the upper
1148 * layer that there are no more slots left.
1149 */
1150 DPRINTF(WM_DEBUG_TX,
1151 ("%s: TX: need %d descriptors, have %d\n",
1152 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1153 sc->sc_txfree - 1));
1154 ifp->if_flags |= IFF_OACTIVE;
1155 bus_dmamap_unload(sc->sc_dmat, dmamap);
1156 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1157 break;
1158 }
1159
1160 IFQ_DEQUEUE(&ifp->if_snd, m0);
1161
1162 /*
1163 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1164 */
1165
1166 /* Sync the DMA map. */
1167 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1168 BUS_DMASYNC_PREWRITE);
1169
1170 DPRINTF(WM_DEBUG_TX,
1171 ("%s: TX: packet has %d DMA segments\n",
1172 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1173
1174 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1175
1176 /*
1177 * Store a pointer to the packet so that we can free it
1178 * later.
1179 *
1180 * Initially, we consider the number of descriptors the
1181 * packet uses the number of DMA segments. This may be
1182 * incremented by 1 if we do checksum offload (a descriptor
1183 * is used to set the checksum context).
1184 */
1185 txs->txs_mbuf = m0;
1186 txs->txs_firstdesc = sc->sc_txnext;
1187 txs->txs_ndesc = dmamap->dm_nsegs;
1188
1189 /*
1190 * Set up checksum offload parameters for
1191 * this packet.
1192 */
1193 if (m0->m_pkthdr.csum_flags &
1194 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1195 if (wm_tx_cksum(sc, txs, &cksumcmd,
1196 &cksumfields) != 0) {
1197 /* Error message already displayed. */
1198 m_freem(m0);
1199 bus_dmamap_unload(sc->sc_dmat, dmamap);
1200 txs->txs_mbuf = NULL;
1201 continue;
1202 }
1203 } else {
1204 cksumcmd = 0;
1205 cksumfields = 0;
1206 }
1207
1208 cksumcmd |= htole32(WTX_CMD_IDE);
1209
1210 /*
1211 * Initialize the transmit descriptor.
1212 */
1213 for (nexttx = sc->sc_txnext, seg = 0;
1214 seg < dmamap->dm_nsegs;
1215 seg++, nexttx = WM_NEXTTX(nexttx)) {
1216 /*
1217 * Note: we currently only use 32-bit DMA
1218 * addresses.
1219 */
1220 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1221 htole32(dmamap->dm_segs[seg].ds_addr);
1222 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1223 htole32(dmamap->dm_segs[seg].ds_len);
1224 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1225 cksumfields;
1226 lasttx = nexttx;
1227
1228 DPRINTF(WM_DEBUG_TX,
1229 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1230 sc->sc_dev.dv_xname, nexttx,
1231 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1232 (uint32_t) dmamap->dm_segs[seg].ds_len));
1233 }
1234
1235 /*
1236 * Set up the command byte on the last descriptor of
1237 * the packet. If we're in the interrupt delay window,
1238 * delay the interrupt.
1239 */
1240 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1241 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1242
1243 #if 0 /* XXXJRT */
1244 /*
1245 * If VLANs are enabled and the packet has a VLAN tag, set
1246 * up the descriptor to encapsulate the packet for us.
1247 *
1248 * This is only valid on the last descriptor of the packet.
1249 */
1250 if (sc->sc_ethercom.ec_nvlans != 0 &&
1251 (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) {
1252 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1253 htole32(WTX_CMD_VLE);
1254 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1255 = htole16(*mtod(m, int *) & 0xffff);
1256 }
1257 #endif /* XXXJRT */
1258
1259 txs->txs_lastdesc = lasttx;
1260
1261 DPRINTF(WM_DEBUG_TX,
1262 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1263 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1264
1265 /* Sync the descriptors we're using. */
1266 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1267 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1268
1269 /* Give the packet to the chip. */
1270 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1271
1272 DPRINTF(WM_DEBUG_TX,
1273 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1274
1275 DPRINTF(WM_DEBUG_TX,
1276 ("%s: TX: finished transmitting packet, job %d\n",
1277 sc->sc_dev.dv_xname, sc->sc_txsnext));
1278
1279 /* Advance the tx pointer. */
1280 sc->sc_txfree -= txs->txs_ndesc;
1281 sc->sc_txnext = nexttx;
1282
1283 sc->sc_txsfree--;
1284 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1285
1286 #if NBPFILTER > 0
1287 /* Pass the packet to any BPF listeners. */
1288 if (ifp->if_bpf)
1289 bpf_mtap(ifp->if_bpf, m0);
1290 #endif /* NBPFILTER > 0 */
1291 }
1292
1293 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1294 /* No more slots; notify upper layer. */
1295 ifp->if_flags |= IFF_OACTIVE;
1296 }
1297
1298 if (sc->sc_txfree != ofree) {
1299 /* Set a watchdog timer in case the chip flakes out. */
1300 ifp->if_timer = 5;
1301 }
1302 }
1303
1304 /*
1305 * wm_watchdog: [ifnet interface function]
1306 *
1307 * Watchdog timer handler.
1308 */
1309 void
1310 wm_watchdog(struct ifnet *ifp)
1311 {
1312 struct wm_softc *sc = ifp->if_softc;
1313
1314 /*
1315 * Since we're using delayed interrupts, sweep up
1316 * before we report an error.
1317 */
1318 wm_txintr(sc);
1319
1320 if (sc->sc_txfree != WM_NTXDESC) {
1321 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1322 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1323 sc->sc_txnext);
1324 ifp->if_oerrors++;
1325
1326 /* Reset the interface. */
1327 (void) wm_init(ifp);
1328 }
1329
1330 /* Try to get more packets going. */
1331 wm_start(ifp);
1332 }
1333
1334 /*
1335 * wm_ioctl: [ifnet interface function]
1336 *
1337 * Handle control requests from the operator.
1338 */
1339 int
1340 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1341 {
1342 struct wm_softc *sc = ifp->if_softc;
1343 struct ifreq *ifr = (struct ifreq *) data;
1344 int s, error;
1345
1346 s = splnet();
1347
1348 switch (cmd) {
1349 case SIOCSIFMEDIA:
1350 case SIOCGIFMEDIA:
1351 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1352 break;
1353
1354 default:
1355 error = ether_ioctl(ifp, cmd, data);
1356 if (error == ENETRESET) {
1357 /*
1358 * Multicast list has changed; set the hardware filter
1359 * accordingly.
1360 */
1361 wm_set_filter(sc);
1362 error = 0;
1363 }
1364 break;
1365 }
1366
1367 /* Try to get more packets going. */
1368 wm_start(ifp);
1369
1370 splx(s);
1371 return (error);
1372 }
1373
1374 /*
1375 * wm_intr:
1376 *
1377 * Interrupt service routine.
1378 */
1379 int
1380 wm_intr(void *arg)
1381 {
1382 struct wm_softc *sc = arg;
1383 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1384 uint32_t icr;
1385 int wantinit, handled = 0;
1386
1387 for (wantinit = 0; wantinit == 0;) {
1388 icr = CSR_READ(sc, WMREG_ICR);
1389 if ((icr & sc->sc_icr) == 0)
1390 break;
1391
1392 handled = 1;
1393
1394 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1395 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1396 DPRINTF(WM_DEBUG_RX,
1397 ("%s: RX: got Rx intr 0x%08x\n",
1398 sc->sc_dev.dv_xname,
1399 icr & (ICR_RXDMT0|ICR_RXT0)));
1400 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1401 }
1402 #endif
1403 wm_rxintr(sc);
1404
1405 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1406 if (icr & ICR_TXDW) {
1407 DPRINTF(WM_DEBUG_TX,
1408 ("%s: TX: got TDXW interrupt\n",
1409 sc->sc_dev.dv_xname));
1410 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1411 }
1412 #endif
1413 wm_txintr(sc);
1414
1415 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1416 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1417 wm_linkintr(sc, icr);
1418 }
1419
1420 if (icr & ICR_RXO) {
1421 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1422 wantinit = 1;
1423 }
1424 }
1425
1426 if (handled) {
1427 if (wantinit)
1428 wm_init(ifp);
1429
1430 /* Try to get more packets going. */
1431 wm_start(ifp);
1432 }
1433
1434 return (handled);
1435 }
1436
1437 /*
1438 * wm_txintr:
1439 *
1440 * Helper; handle transmit interrupts.
1441 */
1442 void
1443 wm_txintr(struct wm_softc *sc)
1444 {
1445 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1446 struct wm_txsoft *txs;
1447 uint8_t status;
1448 int i;
1449
1450 ifp->if_flags &= ~IFF_OACTIVE;
1451
1452 /*
1453 * Go through the Tx list and free mbufs for those
1454 * frams which have been transmitted.
1455 */
1456 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1457 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1458 txs = &sc->sc_txsoft[i];
1459
1460 DPRINTF(WM_DEBUG_TX,
1461 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1462
1463 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1464 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1465
1466 status = le32toh(sc->sc_txdescs[
1467 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1468 if ((status & WTX_ST_DD) == 0)
1469 break;
1470
1471 DPRINTF(WM_DEBUG_TX,
1472 ("%s: TX: job %d done: descs %d..%d\n",
1473 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1474 txs->txs_lastdesc));
1475
1476 /*
1477 * XXX We should probably be using the statistics
1478 * XXX registers, but I don't know if they exist
1479 * XXX on chips before the i82544.
1480 */
1481
1482 #ifdef WM_EVENT_COUNTERS
1483 if (status & WTX_ST_TU)
1484 WM_EVCNT_INCR(&sc->sc_ev_tu);
1485 #endif /* WM_EVENT_COUNTERS */
1486
1487 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1488 ifp->if_oerrors++;
1489 if (status & WTX_ST_LC)
1490 printf("%s: late collision\n",
1491 sc->sc_dev.dv_xname);
1492 else if (status & WTX_ST_EC) {
1493 ifp->if_collisions += 16;
1494 printf("%s: excessive collisions\n",
1495 sc->sc_dev.dv_xname);
1496 }
1497 } else
1498 ifp->if_opackets++;
1499
1500 sc->sc_txfree += txs->txs_ndesc;
1501 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1502 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1503 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1504 m_freem(txs->txs_mbuf);
1505 txs->txs_mbuf = NULL;
1506 }
1507
1508 /* Update the dirty transmit buffer pointer. */
1509 sc->sc_txsdirty = i;
1510 DPRINTF(WM_DEBUG_TX,
1511 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1512
1513 /*
1514 * If there are no more pending transmissions, cancel the watchdog
1515 * timer.
1516 */
1517 if (sc->sc_txsfree == WM_TXQUEUELEN)
1518 ifp->if_timer = 0;
1519 }
1520
1521 /*
1522 * wm_rxintr:
1523 *
1524 * Helper; handle receive interrupts.
1525 */
1526 void
1527 wm_rxintr(struct wm_softc *sc)
1528 {
1529 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1530 struct wm_rxsoft *rxs;
1531 struct mbuf *m;
1532 int i, len;
1533 uint8_t status, errors;
1534
1535 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1536 rxs = &sc->sc_rxsoft[i];
1537
1538 DPRINTF(WM_DEBUG_RX,
1539 ("%s: RX: checking descriptor %d\n",
1540 sc->sc_dev.dv_xname, i));
1541
1542 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1543
1544 status = sc->sc_rxdescs[i].wrx_status;
1545 errors = sc->sc_rxdescs[i].wrx_errors;
1546 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1547
1548 if ((status & WRX_ST_DD) == 0) {
1549 /*
1550 * We have processed all of the receive descriptors.
1551 */
1552 break;
1553 }
1554
1555 if (__predict_false(sc->sc_rxdiscard)) {
1556 DPRINTF(WM_DEBUG_RX,
1557 ("%s: RX: discarding contents of descriptor %d\n",
1558 sc->sc_dev.dv_xname, i));
1559 WM_INIT_RXDESC(sc, i);
1560 if (status & WRX_ST_EOP) {
1561 /* Reset our state. */
1562 DPRINTF(WM_DEBUG_RX,
1563 ("%s: RX: resetting rxdiscard -> 0\n",
1564 sc->sc_dev.dv_xname));
1565 sc->sc_rxdiscard = 0;
1566 }
1567 continue;
1568 }
1569
1570 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1571 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1572
1573 m = rxs->rxs_mbuf;
1574
1575 /*
1576 * Add a new receive buffer to the ring.
1577 */
1578 if (wm_add_rxbuf(sc, i) != 0) {
1579 /*
1580 * Failed, throw away what we've done so
1581 * far, and discard the rest of the packet.
1582 */
1583 ifp->if_ierrors++;
1584 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1585 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1586 WM_INIT_RXDESC(sc, i);
1587 if ((status & WRX_ST_EOP) == 0)
1588 sc->sc_rxdiscard = 1;
1589 if (sc->sc_rxhead != NULL)
1590 m_freem(sc->sc_rxhead);
1591 WM_RXCHAIN_RESET(sc);
1592 DPRINTF(WM_DEBUG_RX,
1593 ("%s: RX: Rx buffer allocation failed, "
1594 "dropping packet%s\n", sc->sc_dev.dv_xname,
1595 sc->sc_rxdiscard ? " (discard)" : ""));
1596 continue;
1597 }
1598
1599 WM_RXCHAIN_LINK(sc, m);
1600
1601 m->m_len = len;
1602
1603 DPRINTF(WM_DEBUG_RX,
1604 ("%s: RX: buffer at %p len %d\n",
1605 sc->sc_dev.dv_xname, m->m_data, len));
1606
1607 /*
1608 * If this is not the end of the packet, keep
1609 * looking.
1610 */
1611 if ((status & WRX_ST_EOP) == 0) {
1612 sc->sc_rxlen += len;
1613 DPRINTF(WM_DEBUG_RX,
1614 ("%s: RX: not yet EOP, rxlen -> %d\n",
1615 sc->sc_dev.dv_xname, sc->sc_rxlen));
1616 continue;
1617 }
1618
1619 /*
1620 * Okay, we have the entire packet now...
1621 */
1622 *sc->sc_rxtailp = NULL;
1623 m = sc->sc_rxhead;
1624 len += sc->sc_rxlen;
1625
1626 WM_RXCHAIN_RESET(sc);
1627
1628 DPRINTF(WM_DEBUG_RX,
1629 ("%s: RX: have entire packet, len -> %d\n",
1630 sc->sc_dev.dv_xname, len));
1631
1632 /*
1633 * If an error occurred, update stats and drop the packet.
1634 */
1635 if (errors &
1636 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1637 ifp->if_ierrors++;
1638 if (errors & WRX_ER_SE)
1639 printf("%s: symbol error\n",
1640 sc->sc_dev.dv_xname);
1641 else if (errors & WRX_ER_SEQ)
1642 printf("%s: receive sequence error\n",
1643 sc->sc_dev.dv_xname);
1644 else if (errors & WRX_ER_CE)
1645 printf("%s: CRC error\n",
1646 sc->sc_dev.dv_xname);
1647 m_freem(m);
1648 continue;
1649 }
1650
1651 /*
1652 * No errors. Receive the packet.
1653 *
1654 * Note, we have configured the chip to include the
1655 * CRC with every packet.
1656 */
1657 m->m_flags |= M_HASFCS;
1658 m->m_pkthdr.rcvif = ifp;
1659 m->m_pkthdr.len = len;
1660
1661 #if 0 /* XXXJRT */
1662 /*
1663 * If VLANs are enabled, VLAN packets have been unwrapped
1664 * for us. Associate the tag with the packet.
1665 */
1666 if (sc->sc_ethercom.ec_nvlans != 0 &&
1667 (status & WRX_ST_VP) != 0) {
1668 struct mbuf *vtag;
1669
1670 vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
1671 if (vtag == NULL) {
1672 ifp->if_ierrors++;
1673 printf("%s: unable to allocate VLAN tag\n",
1674 sc->sc_dev.dv_xname);
1675 m_freem(m);
1676 continue;
1677 }
1678
1679 *mtod(m, int *) =
1680 le16toh(sc->sc_rxdescs[i].wrx_special);
1681 vtag->m_len = sizeof(int);
1682 }
1683 #endif /* XXXJRT */
1684
1685 /*
1686 * Set up checksum info for this packet.
1687 */
1688 if (status & WRX_ST_IPCS) {
1689 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1690 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1691 if (errors & WRX_ER_IPE)
1692 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1693 }
1694 if (status & WRX_ST_TCPCS) {
1695 /*
1696 * Note: we don't know if this was TCP or UDP,
1697 * so we just set both bits, and expect the
1698 * upper layers to deal.
1699 */
1700 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1701 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1702 if (errors & WRX_ER_TCPE)
1703 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1704 }
1705
1706 ifp->if_ipackets++;
1707
1708 #if NBPFILTER > 0
1709 /* Pass this up to any BPF listeners. */
1710 if (ifp->if_bpf)
1711 bpf_mtap(ifp->if_bpf, m);
1712 #endif /* NBPFILTER > 0 */
1713
1714 /* Pass it on. */
1715 (*ifp->if_input)(ifp, m);
1716 }
1717
1718 /* Update the receive pointer. */
1719 sc->sc_rxptr = i;
1720
1721 DPRINTF(WM_DEBUG_RX,
1722 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1723 }
1724
1725 /*
1726 * wm_linkintr:
1727 *
1728 * Helper; handle link interrupts.
1729 */
1730 void
1731 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1732 {
1733 uint32_t status;
1734
1735 /*
1736 * If we get a link status interrupt on a 1000BASE-T
1737 * device, just fall into the normal MII tick path.
1738 */
1739 if (sc->sc_flags & WM_F_HAS_MII) {
1740 if (icr & ICR_LSC) {
1741 DPRINTF(WM_DEBUG_LINK,
1742 ("%s: LINK: LSC -> mii_tick\n",
1743 sc->sc_dev.dv_xname));
1744 mii_tick(&sc->sc_mii);
1745 } else if (icr & ICR_RXSEQ) {
1746 DPRINTF(WM_DEBUG_LINK,
1747 ("%s: LINK Receive sequence error\n",
1748 sc->sc_dev.dv_xname));
1749 }
1750 return;
1751 }
1752
1753 /*
1754 * If we are now receiving /C/, check for link again in
1755 * a couple of link clock ticks.
1756 */
1757 if (icr & ICR_RXCFG) {
1758 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1759 sc->sc_dev.dv_xname));
1760 sc->sc_tbi_anstate = 2;
1761 }
1762
1763 if (icr & ICR_LSC) {
1764 status = CSR_READ(sc, WMREG_STATUS);
1765 if (status & STATUS_LU) {
1766 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1767 sc->sc_dev.dv_xname,
1768 (status & STATUS_FD) ? "FDX" : "HDX"));
1769 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1770 if (status & STATUS_FD)
1771 sc->sc_tctl |=
1772 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1773 else
1774 sc->sc_tctl |=
1775 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1776 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1777 sc->sc_tbi_linkup = 1;
1778 } else {
1779 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1780 sc->sc_dev.dv_xname));
1781 sc->sc_tbi_linkup = 0;
1782 }
1783 sc->sc_tbi_anstate = 2;
1784 wm_tbi_set_linkled(sc);
1785 } else if (icr & ICR_RXSEQ) {
1786 DPRINTF(WM_DEBUG_LINK,
1787 ("%s: LINK: Receive sequence error\n",
1788 sc->sc_dev.dv_xname));
1789 }
1790 }
1791
1792 /*
1793 * wm_tick:
1794 *
1795 * One second timer, used to check link status, sweep up
1796 * completed transmit jobs, etc.
1797 */
1798 void
1799 wm_tick(void *arg)
1800 {
1801 struct wm_softc *sc = arg;
1802 int s;
1803
1804 s = splnet();
1805
1806 if (sc->sc_flags & WM_F_HAS_MII)
1807 mii_tick(&sc->sc_mii);
1808 else
1809 wm_tbi_check_link(sc);
1810
1811 splx(s);
1812
1813 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1814 }
1815
1816 /*
1817 * wm_reset:
1818 *
1819 * Reset the i82542 chip.
1820 */
1821 void
1822 wm_reset(struct wm_softc *sc)
1823 {
1824 int i;
1825
1826 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1827 delay(10000);
1828
1829 for (i = 0; i < 1000; i++) {
1830 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1831 return;
1832 delay(20);
1833 }
1834
1835 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1836 printf("%s: WARNING: reset failed to complete\n",
1837 sc->sc_dev.dv_xname);
1838 }
1839
1840 /*
1841 * wm_init: [ifnet interface function]
1842 *
1843 * Initialize the interface. Must be called at splnet().
1844 */
1845 int
1846 wm_init(struct ifnet *ifp)
1847 {
1848 struct wm_softc *sc = ifp->if_softc;
1849 struct wm_rxsoft *rxs;
1850 int i, error = 0;
1851 uint32_t reg;
1852
1853 /* Cancel any pending I/O. */
1854 wm_stop(ifp, 0);
1855
1856 /* Reset the chip to a known state. */
1857 wm_reset(sc);
1858
1859 /* Initialize the transmit descriptor ring. */
1860 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1861 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1862 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1863 sc->sc_txfree = WM_NTXDESC;
1864 sc->sc_txnext = 0;
1865
1866 sc->sc_txctx_ipcs = 0xffffffff;
1867 sc->sc_txctx_tucs = 0xffffffff;
1868
1869 if (sc->sc_type < WM_T_82543) {
1870 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1871 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1872 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1873 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1874 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1875 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
1876 } else {
1877 CSR_WRITE(sc, WMREG_TBDAH, 0);
1878 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1879 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1880 CSR_WRITE(sc, WMREG_TDH, 0);
1881 CSR_WRITE(sc, WMREG_TDT, 0);
1882 CSR_WRITE(sc, WMREG_TIDV, 128);
1883
1884 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1885 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1886 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
1887 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
1888 }
1889 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
1890 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
1891
1892 /* Initialize the transmit job descriptors. */
1893 for (i = 0; i < WM_TXQUEUELEN; i++)
1894 sc->sc_txsoft[i].txs_mbuf = NULL;
1895 sc->sc_txsfree = WM_TXQUEUELEN;
1896 sc->sc_txsnext = 0;
1897 sc->sc_txsdirty = 0;
1898
1899 /*
1900 * Initialize the receive descriptor and receive job
1901 * descriptor rings.
1902 */
1903 if (sc->sc_type < WM_T_82543) {
1904 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
1905 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
1906 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
1907 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
1908 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
1909 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
1910
1911 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
1912 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
1913 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
1914 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
1915 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
1916 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
1917 } else {
1918 CSR_WRITE(sc, WMREG_RDBAH, 0);
1919 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
1920 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
1921 CSR_WRITE(sc, WMREG_RDH, 0);
1922 CSR_WRITE(sc, WMREG_RDT, 0);
1923 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
1924 }
1925 for (i = 0; i < WM_NRXDESC; i++) {
1926 rxs = &sc->sc_rxsoft[i];
1927 if (rxs->rxs_mbuf == NULL) {
1928 if ((error = wm_add_rxbuf(sc, i)) != 0) {
1929 printf("%s: unable to allocate or map rx "
1930 "buffer %d, error = %d\n",
1931 sc->sc_dev.dv_xname, i, error);
1932 /*
1933 * XXX Should attempt to run with fewer receive
1934 * XXX buffers instead of just failing.
1935 */
1936 wm_rxdrain(sc);
1937 goto out;
1938 }
1939 } else
1940 WM_INIT_RXDESC(sc, i);
1941 }
1942 sc->sc_rxptr = 0;
1943 sc->sc_rxdiscard = 0;
1944 WM_RXCHAIN_RESET(sc);
1945
1946 /*
1947 * Clear out the VLAN table -- we don't use it (yet).
1948 */
1949 CSR_WRITE(sc, WMREG_VET, 0);
1950 for (i = 0; i < WM_VLAN_TABSIZE; i++)
1951 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
1952
1953 /*
1954 * Set up flow-control parameters.
1955 *
1956 * XXX Values could probably stand some tuning.
1957 */
1958 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
1959 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
1960 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
1961 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
1962
1963 if (sc->sc_type < WM_T_82543) {
1964 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
1965 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
1966 } else {
1967 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
1968 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
1969 }
1970 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
1971 }
1972
1973 #if 0 /* XXXJRT */
1974 /* Deal with VLAN enables. */
1975 if (sc->sc_ethercom.ec_nvlans != 0)
1976 sc->sc_ctrl |= CTRL_VME;
1977 else
1978 #endif /* XXXJRT */
1979 sc->sc_ctrl &= ~CTRL_VME;
1980
1981 /* Write the control registers. */
1982 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1983 #if 0
1984 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1985 #endif
1986
1987 /*
1988 * Set up checksum offload parameters.
1989 */
1990 reg = CSR_READ(sc, WMREG_RXCSUM);
1991 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
1992 reg |= RXCSUM_IPOFL;
1993 else
1994 reg &= ~RXCSUM_IPOFL;
1995 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
1996 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
1997 else {
1998 reg &= ~RXCSUM_TUOFL;
1999 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2000 reg &= ~RXCSUM_IPOFL;
2001 }
2002 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2003
2004 /*
2005 * Set up the interrupt registers.
2006 */
2007 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2008 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2009 ICR_RXO | ICR_RXT0;
2010 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2011 sc->sc_icr |= ICR_RXCFG;
2012 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2013
2014 /* Set up the inter-packet gap. */
2015 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2016
2017 #if 0 /* XXXJRT */
2018 /* Set the VLAN ethernetype. */
2019 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2020 #endif
2021
2022 /*
2023 * Set up the transmit control register; we start out with
2024 * a collision distance suitable for FDX, but update it whe
2025 * we resolve the media type.
2026 */
2027 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2028 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2029 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2030
2031 /* Set the media. */
2032 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2033
2034 /*
2035 * Set up the receive control register; we actually program
2036 * the register when we set the receive filter. Use multicast
2037 * address offset type 0.
2038 *
2039 * Only the i82544 has the ability to strip the incoming
2040 * CRC, so we don't enable that feature.
2041 */
2042 sc->sc_mchash_type = 0;
2043 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
2044 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2045
2046 /* Set the receive filter. */
2047 wm_set_filter(sc);
2048
2049 /* Start the one second link check clock. */
2050 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2051
2052 /* ...all done! */
2053 ifp->if_flags |= IFF_RUNNING;
2054 ifp->if_flags &= ~IFF_OACTIVE;
2055
2056 out:
2057 if (error)
2058 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2059 return (error);
2060 }
2061
2062 /*
2063 * wm_rxdrain:
2064 *
2065 * Drain the receive queue.
2066 */
2067 void
2068 wm_rxdrain(struct wm_softc *sc)
2069 {
2070 struct wm_rxsoft *rxs;
2071 int i;
2072
2073 for (i = 0; i < WM_NRXDESC; i++) {
2074 rxs = &sc->sc_rxsoft[i];
2075 if (rxs->rxs_mbuf != NULL) {
2076 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2077 m_freem(rxs->rxs_mbuf);
2078 rxs->rxs_mbuf = NULL;
2079 }
2080 }
2081 }
2082
2083 /*
2084 * wm_stop: [ifnet interface function]
2085 *
2086 * Stop transmission on the interface.
2087 */
2088 void
2089 wm_stop(struct ifnet *ifp, int disable)
2090 {
2091 struct wm_softc *sc = ifp->if_softc;
2092 struct wm_txsoft *txs;
2093 int i;
2094
2095 /* Stop the one second clock. */
2096 callout_stop(&sc->sc_tick_ch);
2097
2098 if (sc->sc_flags & WM_F_HAS_MII) {
2099 /* Down the MII. */
2100 mii_down(&sc->sc_mii);
2101 }
2102
2103 /* Stop the transmit and receive processes. */
2104 CSR_WRITE(sc, WMREG_TCTL, 0);
2105 CSR_WRITE(sc, WMREG_RCTL, 0);
2106
2107 /* Release any queued transmit buffers. */
2108 for (i = 0; i < WM_TXQUEUELEN; i++) {
2109 txs = &sc->sc_txsoft[i];
2110 if (txs->txs_mbuf != NULL) {
2111 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2112 m_freem(txs->txs_mbuf);
2113 txs->txs_mbuf = NULL;
2114 }
2115 }
2116
2117 if (disable)
2118 wm_rxdrain(sc);
2119
2120 /* Mark the interface as down and cancel the watchdog timer. */
2121 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2122 ifp->if_timer = 0;
2123 }
2124
2125 /*
2126 * wm_read_eeprom:
2127 *
2128 * Read data from the serial EEPROM.
2129 */
2130 void
2131 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2132 {
2133 uint32_t reg;
2134 int i, x;
2135
2136 for (i = 0; i < wordcnt; i++) {
2137 /* Send CHIP SELECT for one clock tick. */
2138 CSR_WRITE(sc, WMREG_EECD, EECD_CS);
2139 delay(2);
2140
2141 /* Shift in the READ command. */
2142 for (x = 3; x > 0; x--) {
2143 reg = EECD_CS;
2144 if (UWIRE_OPC_READ & (1 << (x - 1)))
2145 reg |= EECD_DI;
2146 CSR_WRITE(sc, WMREG_EECD, reg);
2147 delay(2);
2148 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2149 delay(2);
2150 CSR_WRITE(sc, WMREG_EECD, reg);
2151 delay(2);
2152 }
2153
2154 /* Shift in address. */
2155 for (x = 6; x > 0; x--) {
2156 reg = EECD_CS;
2157 if ((word + i) & (1 << (x - 1)))
2158 reg |= EECD_DI;
2159 CSR_WRITE(sc, WMREG_EECD, reg);
2160 delay(2);
2161 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2162 delay(2);
2163 CSR_WRITE(sc, WMREG_EECD, reg);
2164 delay(2);
2165 }
2166
2167 /* Shift out the data. */
2168 reg = EECD_CS;
2169 data[i] = 0;
2170 for (x = 16; x > 0; x--) {
2171 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2172 delay(2);
2173 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2174 data[i] |= (1 << (x - 1));
2175 CSR_WRITE(sc, WMREG_EECD, reg);
2176 delay(2);
2177 }
2178
2179 /* Clear CHIP SELECT. */
2180 CSR_WRITE(sc, WMREG_EECD, 0);
2181 }
2182 }
2183
2184 /*
2185 * wm_add_rxbuf:
2186 *
2187 * Add a receive buffer to the indiciated descriptor.
2188 */
2189 int
2190 wm_add_rxbuf(struct wm_softc *sc, int idx)
2191 {
2192 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2193 struct mbuf *m;
2194 int error;
2195
2196 MGETHDR(m, M_DONTWAIT, MT_DATA);
2197 if (m == NULL)
2198 return (ENOBUFS);
2199
2200 MCLGET(m, M_DONTWAIT);
2201 if ((m->m_flags & M_EXT) == 0) {
2202 m_freem(m);
2203 return (ENOBUFS);
2204 }
2205
2206 if (rxs->rxs_mbuf != NULL)
2207 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2208
2209 rxs->rxs_mbuf = m;
2210
2211 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2212 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2213 BUS_DMA_READ|BUS_DMA_NOWAIT);
2214 if (error) {
2215 printf("%s: unable to load rx DMA map %d, error = %d\n",
2216 sc->sc_dev.dv_xname, idx, error);
2217 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2218 }
2219
2220 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2221 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2222
2223 WM_INIT_RXDESC(sc, idx);
2224
2225 return (0);
2226 }
2227
2228 /*
2229 * wm_set_ral:
2230 *
2231 * Set an entery in the receive address list.
2232 */
2233 static void
2234 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2235 {
2236 uint32_t ral_lo, ral_hi;
2237
2238 if (enaddr != NULL) {
2239 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2240 (enaddr[3] << 24);
2241 ral_hi = enaddr[4] | (enaddr[5] << 8);
2242 ral_hi |= RAL_AV;
2243 } else {
2244 ral_lo = 0;
2245 ral_hi = 0;
2246 }
2247
2248 if (sc->sc_type >= WM_T_82544) {
2249 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2250 ral_lo);
2251 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2252 ral_hi);
2253 } else {
2254 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2255 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2256 }
2257 }
2258
2259 /*
2260 * wm_mchash:
2261 *
2262 * Compute the hash of the multicast address for the 4096-bit
2263 * multicast filter.
2264 */
2265 static uint32_t
2266 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2267 {
2268 static const int lo_shift[4] = { 4, 3, 2, 0 };
2269 static const int hi_shift[4] = { 4, 5, 6, 8 };
2270 uint32_t hash;
2271
2272 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2273 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2274
2275 return (hash & 0xfff);
2276 }
2277
2278 /*
2279 * wm_set_filter:
2280 *
2281 * Set up the receive filter.
2282 */
2283 void
2284 wm_set_filter(struct wm_softc *sc)
2285 {
2286 struct ethercom *ec = &sc->sc_ethercom;
2287 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2288 struct ether_multi *enm;
2289 struct ether_multistep step;
2290 bus_addr_t mta_reg;
2291 uint32_t hash, reg, bit;
2292 int i;
2293
2294 if (sc->sc_type >= WM_T_82544)
2295 mta_reg = WMREG_CORDOVA_MTA;
2296 else
2297 mta_reg = WMREG_MTA;
2298
2299 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2300
2301 if (ifp->if_flags & IFF_BROADCAST)
2302 sc->sc_rctl |= RCTL_BAM;
2303 if (ifp->if_flags & IFF_PROMISC) {
2304 sc->sc_rctl |= RCTL_UPE;
2305 goto allmulti;
2306 }
2307
2308 /*
2309 * Set the station address in the first RAL slot, and
2310 * clear the remaining slots.
2311 */
2312 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2313 for (i = 1; i < WM_RAL_TABSIZE; i++)
2314 wm_set_ral(sc, NULL, i);
2315
2316 /* Clear out the multicast table. */
2317 for (i = 0; i < WM_MC_TABSIZE; i++)
2318 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2319
2320 ETHER_FIRST_MULTI(step, ec, enm);
2321 while (enm != NULL) {
2322 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2323 /*
2324 * We must listen to a range of multicast addresses.
2325 * For now, just accept all multicasts, rather than
2326 * trying to set only those filter bits needed to match
2327 * the range. (At this time, the only use of address
2328 * ranges is for IP multicast routing, for which the
2329 * range is big enough to require all bits set.)
2330 */
2331 goto allmulti;
2332 }
2333
2334 hash = wm_mchash(sc, enm->enm_addrlo);
2335
2336 reg = (hash >> 5) & 0x7f;
2337 bit = hash & 0x1f;
2338
2339 hash = CSR_READ(sc, mta_reg + (reg << 2));
2340 hash |= 1U << bit;
2341
2342 /* XXX Hardware bug?? */
2343 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2344 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2345 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2346 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2347 } else
2348 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2349
2350 ETHER_NEXT_MULTI(step, enm);
2351 }
2352
2353 ifp->if_flags &= ~IFF_ALLMULTI;
2354 goto setit;
2355
2356 allmulti:
2357 ifp->if_flags |= IFF_ALLMULTI;
2358 sc->sc_rctl |= RCTL_MPE;
2359
2360 setit:
2361 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2362 }
2363
2364 /*
2365 * wm_tbi_mediainit:
2366 *
2367 * Initialize media for use on 1000BASE-X devices.
2368 */
2369 void
2370 wm_tbi_mediainit(struct wm_softc *sc)
2371 {
2372 const char *sep = "";
2373
2374 if (sc->sc_type < WM_T_82543)
2375 sc->sc_tipg = TIPG_WM_DFLT;
2376 else
2377 sc->sc_tipg = TIPG_LG_DFLT;
2378
2379 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_tbi_mediachange,
2380 wm_tbi_mediastatus);
2381
2382 /*
2383 * SWD Pins:
2384 *
2385 * 0 = Link LED (output)
2386 * 1 = Loss Of Signal (input)
2387 */
2388 sc->sc_ctrl |= CTRL_SWDPIO(0);
2389 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2390
2391 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2392
2393 #define ADD(s, m, d) \
2394 do { \
2395 printf("%s%s", sep, s); \
2396 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(m), (d), NULL); \
2397 sep = ", "; \
2398 } while (/*CONSTCOND*/0)
2399
2400 printf("%s: ", sc->sc_dev.dv_xname);
2401 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2402 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2403 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2404 printf("\n");
2405
2406 #undef ADD
2407
2408 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2409 }
2410
2411 /*
2412 * wm_tbi_mediastatus: [ifmedia interface function]
2413 *
2414 * Get the current interface media status on a 1000BASE-X device.
2415 */
2416 void
2417 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2418 {
2419 struct wm_softc *sc = ifp->if_softc;
2420
2421 ifmr->ifm_status = IFM_AVALID;
2422 ifmr->ifm_active = IFM_ETHER;
2423
2424 if (sc->sc_tbi_linkup == 0) {
2425 ifmr->ifm_active |= IFM_NONE;
2426 return;
2427 }
2428
2429 ifmr->ifm_status |= IFM_ACTIVE;
2430 ifmr->ifm_active |= IFM_1000_SX;
2431 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2432 ifmr->ifm_active |= IFM_FDX;
2433 }
2434
2435 /*
2436 * wm_tbi_mediachange: [ifmedia interface function]
2437 *
2438 * Set hardware to newly-selected media on a 1000BASE-X device.
2439 */
2440 int
2441 wm_tbi_mediachange(struct ifnet *ifp)
2442 {
2443 struct wm_softc *sc = ifp->if_softc;
2444 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2445 uint32_t status;
2446 int i;
2447
2448 sc->sc_txcw = ife->ifm_data;
2449 if (sc->sc_ctrl & CTRL_RFCE)
2450 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2451 if (sc->sc_ctrl & CTRL_TFCE)
2452 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2453 sc->sc_txcw |= TXCW_ANE;
2454
2455 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2456 delay(10000);
2457
2458 sc->sc_tbi_anstate = 0;
2459
2460 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2461 /* Have signal; wait for the link to come up. */
2462 for (i = 0; i < 50; i++) {
2463 delay(10000);
2464 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2465 break;
2466 }
2467
2468 status = CSR_READ(sc, WMREG_STATUS);
2469 if (status & STATUS_LU) {
2470 /* Link is up. */
2471 DPRINTF(WM_DEBUG_LINK,
2472 ("%s: LINK: set media -> link up %s\n",
2473 sc->sc_dev.dv_xname,
2474 (status & STATUS_FD) ? "FDX" : "HDX"));
2475 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2476 if (status & STATUS_FD)
2477 sc->sc_tctl |=
2478 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2479 else
2480 sc->sc_tctl |=
2481 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2482 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2483 sc->sc_tbi_linkup = 1;
2484 } else {
2485 /* Link is down. */
2486 DPRINTF(WM_DEBUG_LINK,
2487 ("%s: LINK: set media -> link down\n",
2488 sc->sc_dev.dv_xname));
2489 sc->sc_tbi_linkup = 0;
2490 }
2491 } else {
2492 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2493 sc->sc_dev.dv_xname));
2494 sc->sc_tbi_linkup = 0;
2495 }
2496
2497 wm_tbi_set_linkled(sc);
2498
2499 return (0);
2500 }
2501
2502 /*
2503 * wm_tbi_set_linkled:
2504 *
2505 * Update the link LED on 1000BASE-X devices.
2506 */
2507 void
2508 wm_tbi_set_linkled(struct wm_softc *sc)
2509 {
2510
2511 if (sc->sc_tbi_linkup)
2512 sc->sc_ctrl |= CTRL_SWDPIN(0);
2513 else
2514 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2515
2516 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2517 }
2518
2519 /*
2520 * wm_tbi_check_link:
2521 *
2522 * Check the link on 1000BASE-X devices.
2523 */
2524 void
2525 wm_tbi_check_link(struct wm_softc *sc)
2526 {
2527 uint32_t rxcw, ctrl, status;
2528
2529 if (sc->sc_tbi_anstate == 0)
2530 return;
2531 else if (sc->sc_tbi_anstate > 1) {
2532 DPRINTF(WM_DEBUG_LINK,
2533 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2534 sc->sc_tbi_anstate));
2535 sc->sc_tbi_anstate--;
2536 return;
2537 }
2538
2539 sc->sc_tbi_anstate = 0;
2540
2541 rxcw = CSR_READ(sc, WMREG_RXCW);
2542 ctrl = CSR_READ(sc, WMREG_CTRL);
2543 status = CSR_READ(sc, WMREG_STATUS);
2544
2545 if ((status & STATUS_LU) == 0) {
2546 DPRINTF(WM_DEBUG_LINK,
2547 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2548 sc->sc_tbi_linkup = 0;
2549 } else {
2550 DPRINTF(WM_DEBUG_LINK,
2551 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2552 (status & STATUS_FD) ? "FDX" : "HDX"));
2553 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2554 if (status & STATUS_FD)
2555 sc->sc_tctl |=
2556 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2557 else
2558 sc->sc_tctl |=
2559 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2560 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2561 sc->sc_tbi_linkup = 1;
2562 }
2563
2564 wm_tbi_set_linkled(sc);
2565 }
2566
2567 /*
2568 * wm_gmii_reset:
2569 *
2570 * Reset the PHY.
2571 */
2572 void
2573 wm_gmii_reset(struct wm_softc *sc)
2574 {
2575 uint32_t reg;
2576
2577 if (sc->sc_type >= WM_T_82544) {
2578 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2579 delay(20000);
2580
2581 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2582 delay(20000);
2583 } else {
2584 /* The PHY reset pin is active-low. */
2585 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2586 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2587 CTRL_EXT_SWDPIN(4));
2588 reg |= CTRL_EXT_SWDPIO(4);
2589
2590 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2591 delay(10);
2592
2593 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2594 delay(10);
2595
2596 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2597 delay(10);
2598 #if 0
2599 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2600 #endif
2601 }
2602 }
2603
2604 /*
2605 * wm_gmii_mediainit:
2606 *
2607 * Initialize media for use on 1000BASE-T devices.
2608 */
2609 void
2610 wm_gmii_mediainit(struct wm_softc *sc)
2611 {
2612 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2613
2614 /* We have MII. */
2615 sc->sc_flags |= WM_F_HAS_MII;
2616
2617 sc->sc_tipg = TIPG_1000T_DFLT;
2618
2619 /*
2620 * Let the chip set speed/duplex on its own based on
2621 * signals from the PHY.
2622 */
2623 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2624 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2625
2626 /* Initialize our media structures and probe the GMII. */
2627 sc->sc_mii.mii_ifp = ifp;
2628
2629 if (sc->sc_type >= WM_T_82544) {
2630 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
2631 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
2632 } else {
2633 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
2634 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
2635 }
2636 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2637
2638 wm_gmii_reset(sc);
2639
2640 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_gmii_mediachange,
2641 wm_gmii_mediastatus);
2642
2643 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2644 MII_OFFSET_ANY, 0);
2645 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2646 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2647 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2648 } else
2649 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2650 }
2651
2652 /*
2653 * wm_gmii_mediastatus: [ifmedia interface function]
2654 *
2655 * Get the current interface media status on a 1000BASE-T device.
2656 */
2657 void
2658 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2659 {
2660 struct wm_softc *sc = ifp->if_softc;
2661
2662 mii_pollstat(&sc->sc_mii);
2663 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2664 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2665 }
2666
2667 /*
2668 * wm_gmii_mediachange: [ifmedia interface function]
2669 *
2670 * Set hardware to newly-selected media on a 1000BASE-T device.
2671 */
2672 int
2673 wm_gmii_mediachange(struct ifnet *ifp)
2674 {
2675 struct wm_softc *sc = ifp->if_softc;
2676
2677 if (ifp->if_flags & IFF_UP)
2678 mii_mediachg(&sc->sc_mii);
2679 return (0);
2680 }
2681
2682 #define MDI_IO CTRL_SWDPIN(2)
2683 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2684 #define MDI_CLK CTRL_SWDPIN(3)
2685
2686 static void
2687 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2688 {
2689 uint32_t i, v;
2690
2691 v = CSR_READ(sc, WMREG_CTRL);
2692 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2693 v |= MDI_DIR | CTRL_SWDPIO(3);
2694
2695 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2696 if (data & i)
2697 v |= MDI_IO;
2698 else
2699 v &= ~MDI_IO;
2700 CSR_WRITE(sc, WMREG_CTRL, v);
2701 delay(10);
2702 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2703 delay(10);
2704 CSR_WRITE(sc, WMREG_CTRL, v);
2705 delay(10);
2706 }
2707 }
2708
2709 static uint32_t
2710 i82543_mii_recvbits(struct wm_softc *sc)
2711 {
2712 uint32_t v, i, data = 0;
2713
2714 v = CSR_READ(sc, WMREG_CTRL);
2715 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2716 v |= CTRL_SWDPIO(3);
2717
2718 CSR_WRITE(sc, WMREG_CTRL, v);
2719 delay(10);
2720 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2721 delay(10);
2722 CSR_WRITE(sc, WMREG_CTRL, v);
2723 delay(10);
2724
2725 for (i = 0; i < 16; i++) {
2726 data <<= 1;
2727 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2728 delay(10);
2729 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2730 data |= 1;
2731 CSR_WRITE(sc, WMREG_CTRL, v);
2732 delay(10);
2733 }
2734
2735 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2736 delay(10);
2737 CSR_WRITE(sc, WMREG_CTRL, v);
2738 delay(10);
2739
2740 return (data);
2741 }
2742
2743 #undef MDI_IO
2744 #undef MDI_DIR
2745 #undef MDI_CLK
2746
2747 /*
2748 * wm_gmii_i82543_readreg: [mii interface function]
2749 *
2750 * Read a PHY register on the GMII (i82543 version).
2751 */
2752 int
2753 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
2754 {
2755 struct wm_softc *sc = (void *) self;
2756 int rv;
2757
2758 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2759 i82543_mii_sendbits(sc, reg | (phy << 5) |
2760 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2761 rv = i82543_mii_recvbits(sc) & 0xffff;
2762
2763 DPRINTF(WM_DEBUG_GMII,
2764 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2765 sc->sc_dev.dv_xname, phy, reg, rv));
2766
2767 return (rv);
2768 }
2769
2770 /*
2771 * wm_gmii_i82543_writereg: [mii interface function]
2772 *
2773 * Write a PHY register on the GMII (i82543 version).
2774 */
2775 void
2776 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
2777 {
2778 struct wm_softc *sc = (void *) self;
2779
2780 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2781 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2782 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2783 (MII_COMMAND_START << 30), 32);
2784 }
2785
2786 /*
2787 * wm_gmii_i82544_readreg: [mii interface function]
2788 *
2789 * Read a PHY register on the GMII.
2790 */
2791 int
2792 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
2793 {
2794 struct wm_softc *sc = (void *) self;
2795 uint32_t mdic;
2796 int i, rv;
2797
2798 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2799 MDIC_REGADD(reg));
2800
2801 for (i = 0; i < 100; i++) {
2802 mdic = CSR_READ(sc, WMREG_MDIC);
2803 if (mdic & MDIC_READY)
2804 break;
2805 delay(10);
2806 }
2807
2808 if ((mdic & MDIC_READY) == 0) {
2809 printf("%s: MDIC read timed out: phy %d reg %d\n",
2810 sc->sc_dev.dv_xname, phy, reg);
2811 rv = 0;
2812 } else if (mdic & MDIC_E) {
2813 #if 0 /* This is normal if no PHY is present. */
2814 printf("%s: MDIC read error: phy %d reg %d\n",
2815 sc->sc_dev.dv_xname, phy, reg);
2816 #endif
2817 rv = 0;
2818 } else {
2819 rv = MDIC_DATA(mdic);
2820 if (rv == 0xffff)
2821 rv = 0;
2822 }
2823
2824 return (rv);
2825 }
2826
2827 /*
2828 * wm_gmii_i82544_writereg: [mii interface function]
2829 *
2830 * Write a PHY register on the GMII.
2831 */
2832 void
2833 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
2834 {
2835 struct wm_softc *sc = (void *) self;
2836 uint32_t mdic;
2837 int i;
2838
2839 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
2840 MDIC_REGADD(reg) | MDIC_DATA(val));
2841
2842 for (i = 0; i < 100; i++) {
2843 mdic = CSR_READ(sc, WMREG_MDIC);
2844 if (mdic & MDIC_READY)
2845 break;
2846 delay(10);
2847 }
2848
2849 if ((mdic & MDIC_READY) == 0)
2850 printf("%s: MDIC write timed out: phy %d reg %d\n",
2851 sc->sc_dev.dv_xname, phy, reg);
2852 else if (mdic & MDIC_E)
2853 printf("%s: MDIC write error: phy %d reg %d\n",
2854 sc->sc_dev.dv_xname, phy, reg);
2855 }
2856
2857 /*
2858 * wm_gmii_statchg: [mii interface function]
2859 *
2860 * Callback from MII layer when media changes.
2861 */
2862 void
2863 wm_gmii_statchg(struct device *self)
2864 {
2865 struct wm_softc *sc = (void *) self;
2866
2867 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2868
2869 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2870 DPRINTF(WM_DEBUG_LINK,
2871 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
2872 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2873 } else {
2874 DPRINTF(WM_DEBUG_LINK,
2875 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
2876 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2877 }
2878
2879 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2880 }
2881