if_vr.c revision 1.18 1 /* $NetBSD: if_vr.c,v 1.18 1999/02/12 00:36:48 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1997, 1998
42 * Bill Paul <wpaul (at) ctr.columbia.edu>. All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. All advertising materials mentioning features or use of this software
53 * must display the following acknowledgement:
54 * This product includes software developed by Bill Paul.
55 * 4. Neither the name of the author nor the names of any co-contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
63 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
64 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
65 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
66 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
67 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
68 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
69 * THE POSSIBILITY OF SUCH DAMAGE.
70 *
71 * $FreeBSD: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $
72 */
73
74 /*
75 * VIA Rhine fast ethernet PCI NIC driver
76 *
77 * Supports various network adapters based on the VIA Rhine
78 * and Rhine II PCI controllers, including the D-Link DFE530TX.
79 * Datasheets are available at http://www.via.com.tw.
80 *
81 * Written by Bill Paul <wpaul (at) ctr.columbia.edu>
82 * Electrical Engineering Department
83 * Columbia University, New York City
84 */
85
86 /*
87 * The VIA Rhine controllers are similar in some respects to the
88 * the DEC tulip chips, except less complicated. The controller
89 * uses an MII bus and an external physical layer interface. The
90 * receiver has a one entry perfect filter and a 64-bit hash table
91 * multicast filter. Transmit and receive descriptors are similar
92 * to the tulip.
93 *
94 * The Rhine has a serious flaw in its transmit DMA mechanism:
95 * transmit buffers must be longword aligned. Unfortunately,
96 * the kernel doesn't guarantee that mbufs will be filled in starting
97 * at longword boundaries, so we have to do a buffer copy before
98 * transmission.
99 *
100 * Apparently, the receive DMA mechanism also has the same flaw. This
101 * means that on systems with struct alignment requirements, incoming
102 * frames must be copied to a new buffer which shifts the data forward
103 * 2 bytes so that the payload is aligned on a 4-byte boundary.
104 */
105
106 #include "opt_inet.h"
107
108 #include <sys/param.h>
109 #include <sys/systm.h>
110 #include <sys/sockio.h>
111 #include <sys/mbuf.h>
112 #include <sys/malloc.h>
113 #include <sys/kernel.h>
114 #include <sys/socket.h>
115 #include <sys/device.h>
116
117 #include <vm/vm.h> /* for PAGE_SIZE */
118
119 #include <net/if.h>
120 #include <net/if_arp.h>
121 #include <net/if_dl.h>
122 #include <net/if_media.h>
123 #include <net/if_ether.h>
124
125 #if defined(INET)
126 #include <netinet/in.h>
127 #include <netinet/if_inarp.h>
128 #endif
129
130 #include "bpfilter.h"
131 #if NBPFILTER > 0
132 #include <net/bpf.h>
133 #endif
134
135 #include <machine/bus.h>
136 #include <machine/intr.h>
137
138 #include <dev/mii/mii.h>
139 #include <dev/mii/miivar.h>
140
141 #include <dev/pci/pcireg.h>
142 #include <dev/pci/pcivar.h>
143 #include <dev/pci/pcidevs.h>
144
145 #include <dev/pci/if_vrreg.h>
146
147 #define VR_USEIOSPACE
148
149 #define ETHER_CRC_LEN 4 /* XXX Should be in a common header. */
150
151 /*
152 * Various supported device vendors/types and their names.
153 */
154 static struct vr_type {
155 pci_vendor_id_t vr_vid;
156 pci_product_id_t vr_did;
157 const char *vr_name;
158 } vr_devs[] = {
159 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT3043,
160 "VIA VT3043 (Rhine) 10/100 Ethernet" },
161 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT86C100A,
162 "VIA VT86C100A (Rhine-II) 10/100 Ethernet" },
163 { 0, 0, NULL }
164 };
165
166 /*
167 * Transmit descriptor list size.
168 */
169 #define VR_NTXDESC 64
170 #define VR_NTXDESC_MASK (VR_NTXDESC - 1)
171 #define VR_NEXTTX(x) (((x) + 1) & VR_NTXDESC_MASK)
172
173 /*
174 * Receive descriptor list size.
175 */
176 #define VR_NRXDESC 64
177 #define VR_NRXDESC_MASK (VR_NRXDESC - 1)
178 #define VR_NEXTRX(x) (((x) + 1) & VR_NRXDESC_MASK)
179
180 /*
181 * Control data structres that are DMA'd to the Rhine chip. We allocate
182 * them in a single clump that maps to a single DMA segment to make several
183 * things easier.
184 *
185 * Note that since we always copy outgoing packets to aligned transmit
186 * buffers, we can reduce the transmit descriptors to one per packet.
187 */
188 struct vr_control_data {
189 struct vr_desc vr_txdescs[VR_NTXDESC];
190 struct vr_desc vr_rxdescs[VR_NRXDESC];
191 };
192
193 #define VR_CDOFF(x) offsetof(struct vr_control_data, x)
194 #define VR_CDTXOFF(x) VR_CDOFF(vr_txdescs[(x)])
195 #define VR_CDRXOFF(x) VR_CDOFF(vr_rxdescs[(x)])
196
197 /*
198 * Software state of transmit and receive descriptors.
199 */
200 struct vr_descsoft {
201 struct mbuf *ds_mbuf; /* head of mbuf chain */
202 bus_dmamap_t ds_dmamap; /* our DMA map */
203 };
204
205 struct vr_softc {
206 struct device vr_dev; /* generic device glue */
207 void *vr_ih; /* interrupt cookie */
208 void *vr_ats; /* shutdown hook */
209 bus_space_tag_t vr_bst; /* bus space tag */
210 bus_space_handle_t vr_bsh; /* bus space handle */
211 bus_dma_tag_t vr_dmat; /* bus DMA tag */
212 pci_chipset_tag_t vr_pc; /* PCI chipset info */
213 struct ethercom vr_ec; /* Ethernet common info */
214 u_int8_t vr_enaddr[ETHER_ADDR_LEN];
215 struct mii_data vr_mii; /* MII/media info */
216
217 bus_dmamap_t vr_cddmamap; /* control data DMA map */
218 #define vr_cddma vr_cddmamap->dm_segs[0].ds_addr
219
220 /*
221 * Software state for transmit and receive descriptors.
222 */
223 struct vr_descsoft vr_txsoft[VR_NTXDESC];
224 struct vr_descsoft vr_rxsoft[VR_NRXDESC];
225
226 /*
227 * Control data structures.
228 */
229 struct vr_control_data *vr_control_data;
230
231 int vr_txpending; /* number of TX requests pending */
232 int vr_txdirty; /* first dirty TX descriptor */
233 int vr_txlast; /* last used TX descriptor */
234
235 int vr_rxptr; /* next ready RX descriptor */
236 };
237
238 #define VR_CDTXADDR(sc, x) ((sc)->vr_cddma + VR_CDTXOFF((x)))
239 #define VR_CDRXADDR(sc, x) ((sc)->vr_cddma + VR_CDRXOFF((x)))
240
241 #define VR_CDTX(sc, x) (&(sc)->vr_control_data->vr_txdescs[(x)])
242 #define VR_CDRX(sc, x) (&(sc)->vr_control_data->vr_rxdescs[(x)])
243
244 #define VR_DSTX(sc, x) (&(sc)->vr_txsoft[(x)])
245 #define VR_DSRX(sc, x) (&(sc)->vr_rxsoft[(x)])
246
247 #define VR_CDTXSYNC(sc, x, ops) \
248 bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap, \
249 VR_CDTXOFF((x)), sizeof(struct vr_desc), (ops))
250
251 #define VR_CDRXSYNC(sc, x, ops) \
252 bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap, \
253 VR_CDRXOFF((x)), sizeof(struct vr_desc), (ops))
254
255 /*
256 * Note we rely on MCLBYTES being a power of two below.
257 */
258 #define VR_INIT_RXDESC(sc, i) \
259 do { \
260 struct vr_desc *__d = VR_CDRX((sc), (i)); \
261 struct vr_descsoft *__ds = VR_DSRX((sc), (i)); \
262 \
263 __d->vr_next = VR_CDRXADDR((sc), VR_NEXTRX((i))); \
264 __d->vr_status = VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG | \
265 VR_RXSTAT_OWN; \
266 __d->vr_data = __ds->ds_dmamap->dm_segs[0].ds_addr; \
267 __d->vr_ctl = VR_RXCTL_CHAIN | VR_RXCTL_RX_INTR | \
268 ((MCLBYTES - 1) & VR_RXCTL_BUFLEN); \
269 VR_CDRXSYNC((sc), (i), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
270 } while (0)
271
272 /*
273 * register space access macros
274 */
275 #define CSR_WRITE_4(sc, reg, val) \
276 bus_space_write_4(sc->vr_bst, sc->vr_bsh, reg, val)
277 #define CSR_WRITE_2(sc, reg, val) \
278 bus_space_write_2(sc->vr_bst, sc->vr_bsh, reg, val)
279 #define CSR_WRITE_1(sc, reg, val) \
280 bus_space_write_1(sc->vr_bst, sc->vr_bsh, reg, val)
281
282 #define CSR_READ_4(sc, reg) \
283 bus_space_read_4(sc->vr_bst, sc->vr_bsh, reg)
284 #define CSR_READ_2(sc, reg) \
285 bus_space_read_2(sc->vr_bst, sc->vr_bsh, reg)
286 #define CSR_READ_1(sc, reg) \
287 bus_space_read_1(sc->vr_bst, sc->vr_bsh, reg)
288
289 #define VR_TIMEOUT 1000
290
291 static int vr_add_rxbuf __P((struct vr_softc *, int));
292
293 static void vr_rxeof __P((struct vr_softc *));
294 static void vr_rxeoc __P((struct vr_softc *));
295 static void vr_txeof __P((struct vr_softc *));
296 static int vr_intr __P((void *));
297 static void vr_start __P((struct ifnet *));
298 static int vr_ioctl __P((struct ifnet *, u_long, caddr_t));
299 static void vr_init __P((void *));
300 static void vr_stop __P((struct vr_softc *));
301 static void vr_watchdog __P((struct ifnet *));
302 static void vr_tick __P((void *));
303
304 static int vr_ifmedia_upd __P((struct ifnet *));
305 static void vr_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
306
307 static void vr_mii_sync __P((struct vr_softc *));
308 static void vr_mii_send __P((struct vr_softc *, u_int32_t, int));
309 static int vr_mii_readreg __P((struct device *, int, int));
310 static void vr_mii_writereg __P((struct device *, int, int, int));
311 static void vr_mii_statchg __P((struct device *));
312
313 static u_int8_t vr_calchash __P((u_int8_t *));
314 static void vr_setmulti __P((struct vr_softc *));
315 static void vr_reset __P((struct vr_softc *));
316
317 #define VR_SETBIT(sc, reg, x) \
318 CSR_WRITE_1(sc, reg, \
319 CSR_READ_1(sc, reg) | x)
320
321 #define VR_CLRBIT(sc, reg, x) \
322 CSR_WRITE_1(sc, reg, \
323 CSR_READ_1(sc, reg) & ~x)
324
325 #define VR_SETBIT16(sc, reg, x) \
326 CSR_WRITE_2(sc, reg, \
327 CSR_READ_2(sc, reg) | x)
328
329 #define VR_CLRBIT16(sc, reg, x) \
330 CSR_WRITE_2(sc, reg, \
331 CSR_READ_2(sc, reg) & ~x)
332
333 #define VR_SETBIT32(sc, reg, x) \
334 CSR_WRITE_4(sc, reg, \
335 CSR_READ_4(sc, reg) | x)
336
337 #define VR_CLRBIT32(sc, reg, x) \
338 CSR_WRITE_4(sc, reg, \
339 CSR_READ_4(sc, reg) & ~x)
340
341 #define SIO_SET(x) \
342 CSR_WRITE_1(sc, VR_MIICMD, \
343 CSR_READ_1(sc, VR_MIICMD) | x)
344
345 #define SIO_CLR(x) \
346 CSR_WRITE_1(sc, VR_MIICMD, \
347 CSR_READ_1(sc, VR_MIICMD) & ~x)
348
349 /*
350 * Sync the PHYs by setting data bit and strobing the clock 32 times.
351 */
352 static void
353 vr_mii_sync(sc)
354 struct vr_softc *sc;
355 {
356 int i;
357
358 SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAOUT);
359
360 for (i = 0; i < 32; i++) {
361 SIO_SET(VR_MIICMD_CLK);
362 DELAY(1);
363 SIO_CLR(VR_MIICMD_CLK);
364 DELAY(1);
365 }
366 }
367
368 /*
369 * Clock a series of bits through the MII.
370 */
371 static void
372 vr_mii_send(sc, bits, cnt)
373 struct vr_softc *sc;
374 u_int32_t bits;
375 int cnt;
376 {
377 int i;
378
379 SIO_CLR(VR_MIICMD_CLK);
380
381 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
382 if (bits & i) {
383 SIO_SET(VR_MIICMD_DATAOUT);
384 } else {
385 SIO_CLR(VR_MIICMD_DATAOUT);
386 }
387 DELAY(1);
388 SIO_CLR(VR_MIICMD_CLK);
389 DELAY(1);
390 SIO_SET(VR_MIICMD_CLK);
391 }
392 }
393
394 /*
395 * Read an PHY register through the MII.
396 */
397 static int
398 vr_mii_readreg(self, phy, reg)
399 struct device *self;
400 int phy, reg;
401 {
402 struct vr_softc *sc = (struct vr_softc *)self;
403 int i, ack, val = 0;
404
405 CSR_WRITE_1(sc, VR_MIICMD, 0);
406 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
407
408 /*
409 * Turn on data xmit.
410 */
411 SIO_SET(VR_MIICMD_DIR);
412
413 vr_mii_sync(sc);
414
415 /*
416 * Send command/address info.
417 */
418 vr_mii_send(sc, MII_COMMAND_START, 2);
419 vr_mii_send(sc, MII_COMMAND_READ, 2);
420 vr_mii_send(sc, phy, 5);
421 vr_mii_send(sc, reg, 5);
422
423 /* Idle bit */
424 SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAOUT));
425 DELAY(1);
426 SIO_SET(VR_MIICMD_CLK);
427 DELAY(1);
428
429 /* Turn off xmit. */
430 SIO_CLR(VR_MIICMD_DIR);
431
432 /* Check for ack */
433 SIO_CLR(VR_MIICMD_CLK);
434 DELAY(1);
435 SIO_SET(VR_MIICMD_CLK);
436 DELAY(1);
437 ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAIN;
438
439 /*
440 * Now try reading data bits. If the ack failed, we still
441 * need to clock through 16 cycles to keep the PHY(s) in sync.
442 */
443 if (ack) {
444 for (i = 0; i < 16; i++) {
445 SIO_CLR(VR_MIICMD_CLK);
446 DELAY(1);
447 SIO_SET(VR_MIICMD_CLK);
448 DELAY(1);
449 }
450 goto fail;
451 }
452
453 for (i = 0x8000; i; i >>= 1) {
454 SIO_CLR(VR_MIICMD_CLK);
455 DELAY(1);
456 if (!ack) {
457 if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAIN)
458 val |= i;
459 DELAY(1);
460 }
461 SIO_SET(VR_MIICMD_CLK);
462 DELAY(1);
463 }
464
465 fail:
466
467 SIO_CLR(VR_MIICMD_CLK);
468 DELAY(1);
469 SIO_SET(VR_MIICMD_CLK);
470 DELAY(1);
471
472 return (val);
473 }
474
475 /*
476 * Write to a PHY register through the MII.
477 */
478 static void
479 vr_mii_writereg(self, phy, reg, val)
480 struct device *self;
481 int phy, reg, val;
482 {
483 struct vr_softc *sc = (struct vr_softc *)self;
484
485 CSR_WRITE_1(sc, VR_MIICMD, 0);
486 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
487
488 /*
489 * Turn on data output.
490 */
491 SIO_SET(VR_MIICMD_DIR);
492
493 vr_mii_sync(sc);
494
495 vr_mii_send(sc, MII_COMMAND_START, 2);
496 vr_mii_send(sc, MII_COMMAND_WRITE, 2);
497 vr_mii_send(sc, phy, 5);
498 vr_mii_send(sc, reg, 5);
499 vr_mii_send(sc, MII_COMMAND_ACK, 2);
500 vr_mii_send(sc, val, 16);
501
502 /* Idle bit. */
503 SIO_SET(VR_MIICMD_CLK);
504 DELAY(1);
505 SIO_CLR(VR_MIICMD_CLK);
506 DELAY(1);
507
508 /*
509 * Turn off xmit.
510 */
511 SIO_CLR(VR_MIICMD_DIR);
512 }
513
514 static void
515 vr_mii_statchg(self)
516 struct device *self;
517 {
518 struct vr_softc *sc = (struct vr_softc *)self;
519
520 /*
521 * In order to fiddle with the 'full-duplex' bit in the netconfig
522 * register, we first have to put the transmit and/or receive logic
523 * in the idle state.
524 */
525 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
526
527 if (sc->vr_mii.mii_media_active & IFM_FDX)
528 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
529 else
530 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
531
532 if (sc->vr_ec.ec_if.if_flags & IFF_RUNNING)
533 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
534
535 /* XXX Update ifp->if_baudrate */
536 }
537
538 /*
539 * Calculate CRC of a multicast group address, return the lower 6 bits.
540 */
541 static u_int8_t
542 vr_calchash(addr)
543 u_int8_t *addr;
544 {
545 u_int32_t crc, carry;
546 int i, j;
547 u_int8_t c;
548
549 /* Compute CRC for the address value. */
550 crc = 0xFFFFFFFF; /* initial value */
551
552 for (i = 0; i < 6; i++) {
553 c = *(addr + i);
554 for (j = 0; j < 8; j++) {
555 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
556 crc <<= 1;
557 c >>= 1;
558 if (carry)
559 crc = (crc ^ 0x04c11db6) | carry;
560 }
561 }
562
563 /* return the filter bit position */
564 return ((crc >> 26) & 0x0000003F);
565 }
566
567 /*
568 * Program the 64-bit multicast hash filter.
569 */
570 static void
571 vr_setmulti(sc)
572 struct vr_softc *sc;
573 {
574 struct ifnet *ifp;
575 int h = 0;
576 u_int32_t hashes[2] = { 0, 0 };
577 struct ether_multistep step;
578 struct ether_multi *enm;
579 int mcnt = 0;
580 u_int8_t rxfilt;
581
582 ifp = &sc->vr_ec.ec_if;
583
584 rxfilt = CSR_READ_1(sc, VR_RXCFG);
585
586 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
587 rxfilt |= VR_RXCFG_RX_MULTI;
588 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
589 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
590 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
591 return;
592 }
593
594 /* first, zot all the existing hash bits */
595 CSR_WRITE_4(sc, VR_MAR0, 0);
596 CSR_WRITE_4(sc, VR_MAR1, 0);
597
598 /* now program new ones */
599 ETHER_FIRST_MULTI(step, &sc->vr_ec, enm);
600 while (enm != NULL) {
601 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0)
602 continue;
603
604 h = vr_calchash(enm->enm_addrlo);
605
606 if (h < 32)
607 hashes[0] |= (1 << h);
608 else
609 hashes[1] |= (1 << (h - 32));
610 ETHER_NEXT_MULTI(step, enm);
611 mcnt++;
612 }
613
614 if (mcnt)
615 rxfilt |= VR_RXCFG_RX_MULTI;
616 else
617 rxfilt &= ~VR_RXCFG_RX_MULTI;
618
619 CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
620 CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
621 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
622 }
623
624 static void
625 vr_reset(sc)
626 struct vr_softc *sc;
627 {
628 int i;
629
630 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
631
632 for (i = 0; i < VR_TIMEOUT; i++) {
633 DELAY(10);
634 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
635 break;
636 }
637 if (i == VR_TIMEOUT)
638 printf("%s: reset never completed!\n",
639 sc->vr_dev.dv_xname);
640
641 /* Wait a little while for the chip to get its brains in order. */
642 DELAY(1000);
643 }
644
645 /*
646 * Initialize an RX descriptor and attach an MBUF cluster.
647 * Note: the length fields are only 11 bits wide, which means the
648 * largest size we can specify is 2047. This is important because
649 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
650 * overflow the field and make a mess.
651 */
652 static int
653 vr_add_rxbuf(sc, i)
654 struct vr_softc *sc;
655 int i;
656 {
657 struct vr_descsoft *ds = VR_DSRX(sc, i);
658 struct mbuf *m_new;
659 int error;
660
661 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
662 if (m_new == NULL)
663 return (ENOBUFS);
664
665 MCLGET(m_new, M_DONTWAIT);
666 if ((m_new->m_flags & M_EXT) == 0) {
667 m_freem(m_new);
668 return (ENOBUFS);
669 }
670
671 if (ds->ds_mbuf != NULL)
672 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
673
674 ds->ds_mbuf = m_new;
675
676 error = bus_dmamap_load(sc->vr_dmat, ds->ds_dmamap,
677 m_new->m_ext.ext_buf, m_new->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
678 if (error) {
679 printf("%s: unable to load rx DMA map %d, error = %d\n",
680 sc->vr_dev.dv_xname, i, error);
681 panic("vr_add_rxbuf"); /* XXX */
682 }
683
684 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
685 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
686
687 VR_INIT_RXDESC(sc, i);
688
689 return (0);
690 }
691
692 /*
693 * A frame has been uploaded: pass the resulting mbuf chain up to
694 * the higher level protocols.
695 */
696 static void
697 vr_rxeof(sc)
698 struct vr_softc *sc;
699 {
700 struct ether_header *eh;
701 struct mbuf *m;
702 struct ifnet *ifp;
703 struct vr_desc *d;
704 struct vr_descsoft *ds;
705 int i, total_len;
706 u_int32_t rxstat;
707
708 ifp = &sc->vr_ec.ec_if;
709
710 for (i = sc->vr_rxptr;; i = VR_NEXTRX(i)) {
711 d = VR_CDRX(sc, i);
712 ds = VR_DSRX(sc, i);
713
714 VR_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
715
716 rxstat = d->vr_status;
717
718 if (rxstat & VR_RXSTAT_OWN) {
719 /*
720 * We have processed all of the receive buffers.
721 */
722 break;
723 }
724
725 /*
726 * If an error occurs, update stats, clear the
727 * status word and leave the mbuf cluster in place:
728 * it should simply get re-used next time this descriptor
729 * comes up in the ring.
730 */
731 if (rxstat & VR_RXSTAT_RXERR) {
732 const char *errstr;
733
734 ifp->if_ierrors++;
735 switch (rxstat & 0x000000FF) {
736 case VR_RXSTAT_CRCERR:
737 errstr = "crc error";
738 break;
739 case VR_RXSTAT_FRAMEALIGNERR:
740 errstr = "frame alignment error";
741 break;
742 case VR_RXSTAT_FIFOOFLOW:
743 errstr = "FIFO overflow";
744 break;
745 case VR_RXSTAT_GIANT:
746 errstr = "received giant packet";
747 break;
748 case VR_RXSTAT_RUNT:
749 errstr = "received runt packet";
750 break;
751 case VR_RXSTAT_BUSERR:
752 errstr = "system bus error";
753 break;
754 case VR_RXSTAT_BUFFERR:
755 errstr = "rx buffer error";
756 break;
757 default:
758 errstr = "unknown rx error";
759 break;
760 }
761 printf("%s: receive error: %s\n", sc->vr_dev.dv_xname,
762 errstr);
763
764 VR_INIT_RXDESC(sc, i);
765
766 continue;
767 }
768
769 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
770 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
771
772 /* No errors; receive the packet. */
773 total_len = VR_RXBYTES(d->vr_status);
774
775 /*
776 * XXX The VIA Rhine chip includes the CRC with every
777 * received frame, and there's no way to turn this
778 * behavior off (at least, I can't find anything in
779 * the manual that explains how to do it) so we have
780 * to trim off the CRC manually.
781 */
782 total_len -= ETHER_CRC_LEN;
783
784 #ifdef __NO_STRICT_ALIGNMENT
785 /*
786 * Try to conjure up a new mbuf cluster. If that
787 * fails, it means we have an out of memory condition and
788 * should leave the buffer in place and continue. This will
789 * result in a lost packet, but there's little else we
790 * can do in this situation.
791 */
792 m = ds->ds_mbuf;
793 if (vr_add_rxbuf(sc, i) == ENOBUFS) {
794 ifp->if_ierrors++;
795 VR_INIT_RXDESC(sc, i);
796 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
797 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
798 continue;
799 }
800 #else
801 /*
802 * The Rhine's packet buffers must be 4-byte aligned.
803 * But this means that the data after the Ethernet header
804 * is misaligned. We must allocate a new buffer and
805 * copy the data, shifted forward 2 bytes.
806 */
807 MGETHDR(m, M_DONTWAIT, MT_DATA);
808 if (m == NULL) {
809 dropit:
810 ifp->if_ierrors++;
811 VR_INIT_RXDESC(sc, i);
812 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
813 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
814 continue;
815 }
816 if (total_len > (MHLEN - 2)) {
817 MCLGET(m, M_DONTWAIT);
818 if ((m->m_flags & M_EXT) == 0)
819 goto dropit;
820 }
821 m->m_data += 2;
822
823 /*
824 * Note that we use clusters for incoming frames, so the
825 * buffer is virtually contiguous.
826 */
827 memcpy(mtod(m, caddr_t), mtod(ds->ds_mbuf, caddr_t),
828 total_len);
829
830 /* Allow the recieve descriptor to continue using its mbuf. */
831 VR_INIT_RXDESC(sc, i);
832 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
833 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
834 #endif /* __NO_STRICT_ALIGNMENT */
835
836 ifp->if_ipackets++;
837 eh = mtod(m, struct ether_header *);
838 m->m_pkthdr.rcvif = ifp;
839 m->m_pkthdr.len = m->m_len = total_len;
840 #if NBPFILTER > 0
841 /*
842 * Handle BPF listeners. Let the BPF user see the packet, but
843 * don't pass it up to the ether_input() layer unless it's
844 * a broadcast packet, multicast packet, matches our ethernet
845 * address or the interface is in promiscuous mode.
846 */
847 if (ifp->if_bpf) {
848 bpf_mtap(ifp->if_bpf, m);
849 if ((ifp->if_flags & IFF_PROMISC) != 0 &&
850 (rxstat & (VR_RXSTAT_RX_PHYS | VR_RXSTAT_RX_BROAD |
851 VR_RXSTAT_RX_MULTI)) == 0) {
852 m_freem(m);
853 continue;
854 }
855 }
856 #endif
857 /* Remove header from mbuf and pass it on. */
858 m_adj(m, sizeof(struct ether_header));
859 ether_input(ifp, eh, m);
860 }
861
862 /* Update the receive pointer. */
863 sc->vr_rxptr = i;
864 }
865
866 void
867 vr_rxeoc(sc)
868 struct vr_softc *sc;
869 {
870
871 vr_rxeof(sc);
872 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
873 CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr));
874 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
875 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
876 }
877
878 /*
879 * A frame was downloaded to the chip. It's safe for us to clean up
880 * the list buffers.
881 */
882 static void
883 vr_txeof(sc)
884 struct vr_softc *sc;
885 {
886 struct ifnet *ifp = &sc->vr_ec.ec_if;
887 struct vr_desc *d;
888 struct vr_descsoft *ds;
889 u_int32_t txstat;
890 int i;
891
892 ifp->if_flags &= ~IFF_OACTIVE;
893
894 /*
895 * Go through our tx list and free mbufs for those
896 * frames that have been transmitted.
897 */
898 for (i = sc->vr_txdirty; sc->vr_txpending != 0;
899 i = VR_NEXTTX(i), sc->vr_txpending--) {
900 d = VR_CDTX(sc, i);
901 ds = VR_DSTX(sc, i);
902
903 VR_CDTXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
904
905 txstat = d->vr_status;
906 if (txstat & VR_TXSTAT_OWN)
907 break;
908
909 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap,
910 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
911 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
912 m_freem(ds->ds_mbuf);
913 ds->ds_mbuf = NULL;
914
915 if (txstat & VR_TXSTAT_ERRSUM) {
916 ifp->if_oerrors++;
917 if (txstat & VR_TXSTAT_DEFER)
918 ifp->if_collisions++;
919 if (txstat & VR_TXSTAT_LATECOLL)
920 ifp->if_collisions++;
921 }
922
923 ifp->if_collisions += (txstat & VR_TXSTAT_COLLCNT) >> 3;
924 ifp->if_opackets++;
925 }
926
927 /* Update the dirty transmit buffer pointer. */
928 sc->vr_txdirty = i;
929
930 /*
931 * Cancel the watchdog timer if there are no pending
932 * transmissions.
933 */
934 if (sc->vr_txpending == 0)
935 ifp->if_timer = 0;
936 }
937
938 static int
939 vr_intr(arg)
940 void *arg;
941 {
942 struct vr_softc *sc;
943 struct ifnet *ifp;
944 u_int16_t status;
945 int handled = 0, dotx = 0;
946
947 sc = arg;
948 ifp = &sc->vr_ec.ec_if;
949
950 /* Suppress unwanted interrupts. */
951 if ((ifp->if_flags & IFF_UP) == 0) {
952 vr_stop(sc);
953 return (0);
954 }
955
956 /* Disable interrupts. */
957 CSR_WRITE_2(sc, VR_IMR, 0x0000);
958
959 for (;;) {
960 status = CSR_READ_2(sc, VR_ISR);
961 if (status)
962 CSR_WRITE_2(sc, VR_ISR, status);
963
964 if ((status & VR_INTRS) == 0)
965 break;
966
967 handled = 1;
968
969 if (status & VR_ISR_RX_OK)
970 vr_rxeof(sc);
971
972 if (status &
973 (VR_ISR_RX_ERR | VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW |
974 VR_ISR_RX_DROPPED))
975 vr_rxeoc(sc);
976
977 if (status & VR_ISR_TX_OK) {
978 dotx = 1;
979 vr_txeof(sc);
980 }
981
982 if (status & (VR_ISR_TX_UNDERRUN | VR_ISR_TX_ABRT)) {
983 if (status & VR_ISR_TX_UNDERRUN)
984 printf("%s: transmit underrun\n",
985 sc->vr_dev.dv_xname);
986 if (status & VR_ISR_TX_ABRT)
987 printf("%s: transmit aborted\n",
988 sc->vr_dev.dv_xname);
989 ifp->if_oerrors++;
990 dotx = 1;
991 vr_txeof(sc);
992 if (sc->vr_txpending) {
993 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
994 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
995 }
996 }
997
998 if (status & VR_ISR_BUSERR) {
999 printf("%s: PCI bus error\n", sc->vr_dev.dv_xname);
1000 /* vr_init() calls vr_start() */
1001 dotx = 0;
1002 vr_init(sc);
1003 }
1004 }
1005
1006 /* Re-enable interrupts. */
1007 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1008
1009 if (dotx)
1010 vr_start(ifp);
1011
1012 return (handled);
1013 }
1014
1015 /*
1016 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1017 * to the mbuf data regions directly in the transmit lists. We also save a
1018 * copy of the pointers since the transmit list fragment pointers are
1019 * physical addresses.
1020 */
1021 static void
1022 vr_start(ifp)
1023 struct ifnet *ifp;
1024 {
1025 struct vr_softc *sc = ifp->if_softc;
1026 struct mbuf *m0, *m;
1027 struct vr_desc *d;
1028 struct vr_descsoft *ds;
1029 int error, firsttx, nexttx, opending;
1030
1031 /*
1032 * Remember the previous txpending and the first transmit
1033 * descriptor we use.
1034 */
1035 opending = sc->vr_txpending;
1036 firsttx = VR_NEXTTX(sc->vr_txlast);
1037
1038 /*
1039 * Loop through the send queue, setting up transmit descriptors
1040 * until we drain the queue, or use up all available transmit
1041 * descriptors.
1042 */
1043 while (sc->vr_txpending < VR_NTXDESC) {
1044 /*
1045 * Grab a packet off the queue.
1046 */
1047 IF_DEQUEUE(&ifp->if_snd, m0);
1048 if (m0 == NULL)
1049 break;
1050
1051 /*
1052 * Get the next available transmit descriptor.
1053 */
1054 nexttx = VR_NEXTTX(sc->vr_txlast);
1055 d = VR_CDTX(sc, nexttx);
1056 ds = VR_DSTX(sc, nexttx);
1057
1058 /*
1059 * Load the DMA map. If this fails, the packet didn't
1060 * fit in one DMA segment, and we need to copy. Note,
1061 * the packet must also be aligned.
1062 */
1063 if ((mtod(m0, bus_addr_t) & 3) != 0 ||
1064 bus_dmamap_load_mbuf(sc->vr_dmat, ds->ds_dmamap, m0,
1065 BUS_DMA_NOWAIT) != 0) {
1066 MGETHDR(m, M_DONTWAIT, MT_DATA);
1067 if (m == NULL) {
1068 printf("%s: unable to allocate Tx mbuf\n",
1069 sc->vr_dev.dv_xname);
1070 IF_PREPEND(&ifp->if_snd, m0);
1071 break;
1072 }
1073 if (m0->m_pkthdr.len > MHLEN) {
1074 MCLGET(m, M_DONTWAIT);
1075 if ((m->m_flags & M_EXT) == 0) {
1076 printf("%s: unable to allocate Tx "
1077 "cluster\n", sc->vr_dev.dv_xname);
1078 m_freem(m);
1079 IF_PREPEND(&ifp->if_snd, m0);
1080 break;
1081 }
1082 }
1083 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
1084 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
1085 m_freem(m0);
1086 m0 = m;
1087 error = bus_dmamap_load_mbuf(sc->vr_dmat,
1088 ds->ds_dmamap, m0, BUS_DMA_NOWAIT);
1089 if (error) {
1090 printf("%s: unable to load Tx buffer, "
1091 "error = %d\n", sc->vr_dev.dv_xname, error);
1092 IF_PREPEND(&ifp->if_snd, m0);
1093 break;
1094 }
1095 }
1096
1097 /* Sync the DMA map. */
1098 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
1099 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1100
1101 /*
1102 * Store a pointer to the packet so we can free it later.
1103 */
1104 ds->ds_mbuf = m0;
1105
1106 #if NBPFILTER > 0
1107 /*
1108 * If there's a BPF listener, bounce a copy of this frame
1109 * to him.
1110 */
1111 if (ifp->if_bpf)
1112 bpf_mtap(ifp->if_bpf, m0);
1113 #endif
1114
1115 /*
1116 * Fill in the transmit descriptor. The Rhine
1117 * doesn't auto-pad, so we have to do this ourselves.
1118 */
1119 d->vr_data = ds->ds_dmamap->dm_segs[0].ds_addr;
1120 d->vr_ctl = m0->m_pkthdr.len < VR_MIN_FRAMELEN ?
1121 VR_MIN_FRAMELEN : m0->m_pkthdr.len;
1122 d->vr_ctl |=
1123 VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG|VR_TXCTL_LASTFRAG;
1124
1125 /*
1126 * If this is the first descriptor we're enqueuing,
1127 * don't give it to the Rhine yet. That could cause
1128 * a race condition. We'll do it below.
1129 */
1130 if (nexttx == firsttx)
1131 d->vr_status = 0;
1132 else
1133 d->vr_status = VR_TXSTAT_OWN;
1134
1135 VR_CDTXSYNC(sc, nexttx,
1136 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1137
1138 /* Advance the tx pointer. */
1139 sc->vr_txpending++;
1140 sc->vr_txlast = nexttx;
1141 }
1142
1143 if (sc->vr_txpending == VR_NTXDESC) {
1144 /* No more slots left; notify upper layer. */
1145 ifp->if_flags |= IFF_OACTIVE;
1146 }
1147
1148 if (sc->vr_txpending != opending) {
1149 /*
1150 * We enqueued packets. If the transmitter was idle,
1151 * reset the txdirty pointer.
1152 */
1153 if (opending == 0)
1154 sc->vr_txdirty = firsttx;
1155
1156 /*
1157 * Cause a transmit interrupt to happen on the
1158 * last packet we enqueued.
1159 */
1160 VR_CDTX(sc, sc->vr_txlast)->vr_ctl |= VR_TXCTL_FINT;
1161 VR_CDTXSYNC(sc, sc->vr_txlast,
1162 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1163
1164 /*
1165 * The entire packet chain is set up. Give the
1166 * first descriptor to the Rhine now.
1167 */
1168 VR_CDTX(sc, firsttx)->vr_status = VR_TXSTAT_OWN;
1169 VR_CDTXSYNC(sc, firsttx,
1170 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1171
1172 /* Start the transmitter. */
1173 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO);
1174
1175 /* Set the watchdog timer in case the chip flakes out. */
1176 ifp->if_timer = 5;
1177 }
1178 }
1179
1180 /*
1181 * Initialize the interface. Must be called at splnet.
1182 */
1183 static void
1184 vr_init(xsc)
1185 void *xsc;
1186 {
1187 struct vr_softc *sc = xsc;
1188 struct ifnet *ifp = &sc->vr_ec.ec_if;
1189 struct vr_desc *d;
1190 int i;
1191
1192 /* Cancel pending I/O. */
1193 vr_stop(sc);
1194
1195 /* Reset the Rhine to a known state. */
1196 vr_reset(sc);
1197
1198 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1199 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1200
1201 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1202 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1203
1204 /*
1205 * Initialize the transmit desciptor ring. txlast is initialized
1206 * to the end of the list so that it will wrap around to the first
1207 * descriptor when the first packet is transmitted.
1208 */
1209 for (i = 0; i < VR_NTXDESC; i++) {
1210 d = VR_CDTX(sc, i);
1211 memset(d, 0, sizeof(struct vr_desc));
1212 d->vr_next = VR_CDTXADDR(sc, VR_NEXTTX(i));
1213 VR_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1214 }
1215 sc->vr_txpending = 0;
1216 sc->vr_txdirty = 0;
1217 sc->vr_txlast = VR_NTXDESC - 1;
1218
1219 /*
1220 * Initialize the receive descriptor ring. The buffers are
1221 * already allocated.
1222 */
1223 for (i = 0; i < VR_NRXDESC; i++)
1224 VR_INIT_RXDESC(sc, i);
1225 sc->vr_rxptr = 0;
1226
1227 /* If we want promiscuous mode, set the allframes bit. */
1228 if (ifp->if_flags & IFF_PROMISC)
1229 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1230 else
1231 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1232
1233 /* Set capture broadcast bit to capture broadcast frames. */
1234 if (ifp->if_flags & IFF_BROADCAST)
1235 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1236 else
1237 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1238
1239 /* Program the multicast filter, if necessary. */
1240 vr_setmulti(sc);
1241
1242 /* Give the transmit and recieve rings to the Rhine. */
1243 CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr));
1244 CSR_WRITE_4(sc, VR_TXADDR, VR_CDTXADDR(sc, VR_NEXTTX(sc->vr_txlast)));
1245
1246 /* Set current media. */
1247 mii_mediachg(&sc->vr_mii);
1248
1249 /* Enable receiver and transmitter. */
1250 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1251 VR_CMD_TX_ON|VR_CMD_RX_ON|
1252 VR_CMD_RX_GO);
1253
1254 /* Enable interrupts. */
1255 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1256 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1257
1258 ifp->if_flags |= IFF_RUNNING;
1259 ifp->if_flags &= ~IFF_OACTIVE;
1260
1261 /* Start one second timer. */
1262 timeout(vr_tick, sc, hz);
1263
1264 /* Attempt to start output on the interface. */
1265 vr_start(ifp);
1266 }
1267
1268 /*
1269 * Set media options.
1270 */
1271 static int
1272 vr_ifmedia_upd(ifp)
1273 struct ifnet *ifp;
1274 {
1275 struct vr_softc *sc = ifp->if_softc;
1276
1277 if (ifp->if_flags & IFF_UP)
1278 mii_mediachg(&sc->vr_mii);
1279 return (0);
1280 }
1281
1282 /*
1283 * Report current media status.
1284 */
1285 static void
1286 vr_ifmedia_sts(ifp, ifmr)
1287 struct ifnet *ifp;
1288 struct ifmediareq *ifmr;
1289 {
1290 struct vr_softc *sc = ifp->if_softc;
1291
1292 mii_pollstat(&sc->vr_mii);
1293 ifmr->ifm_status = sc->vr_mii.mii_media_status;
1294 ifmr->ifm_active = sc->vr_mii.mii_media_active;
1295 }
1296
1297 static int
1298 vr_ioctl(ifp, command, data)
1299 struct ifnet *ifp;
1300 u_long command;
1301 caddr_t data;
1302 {
1303 struct vr_softc *sc = ifp->if_softc;
1304 struct ifreq *ifr = (struct ifreq *)data;
1305 struct ifaddr *ifa = (struct ifaddr *)data;
1306 int s, error = 0;
1307
1308 s = splnet();
1309
1310 switch (command) {
1311 case SIOCSIFADDR:
1312 ifp->if_flags |= IFF_UP;
1313
1314 switch (ifa->ifa_addr->sa_family) {
1315 #ifdef INET
1316 case AF_INET:
1317 vr_init(sc);
1318 arp_ifinit(ifp, ifa);
1319 break;
1320 #endif /* INET */
1321 default:
1322 vr_init(sc);
1323 break;
1324 }
1325 break;
1326
1327 case SIOCGIFADDR:
1328 bcopy((caddr_t) sc->vr_enaddr,
1329 (caddr_t) ((struct sockaddr *)&ifr->ifr_data)->sa_data,
1330 ETHER_ADDR_LEN);
1331 break;
1332
1333 case SIOCSIFMTU:
1334 if (ifr->ifr_mtu > ETHERMTU)
1335 error = EINVAL;
1336 else
1337 ifp->if_mtu = ifr->ifr_mtu;
1338 break;
1339
1340 case SIOCSIFFLAGS:
1341 if ((ifp->if_flags & IFF_UP) == 0 &&
1342 (ifp->if_flags & IFF_RUNNING) != 0) {
1343 /*
1344 * If interface is marked down and it is running, then
1345 * stop it.
1346 */
1347 vr_stop(sc);
1348 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1349 (ifp->if_flags & IFF_RUNNING) == 0) {
1350 /*
1351 * If interface is marked up and it is stopped, then
1352 * start it.
1353 */
1354 vr_init(sc);
1355 } else if ((ifp->if_flags & IFF_UP) != 0) {
1356 /*
1357 * Reset the interface to pick up changes in any other
1358 * flags that affect the hardware state.
1359 */
1360 vr_init(sc);
1361 }
1362 break;
1363
1364 case SIOCADDMULTI:
1365 case SIOCDELMULTI:
1366 if (command == SIOCADDMULTI)
1367 error = ether_addmulti(ifr, &sc->vr_ec);
1368 else
1369 error = ether_delmulti(ifr, &sc->vr_ec);
1370
1371 if (error == ENETRESET) {
1372 /*
1373 * Multicast list has changed; set the hardware filter
1374 * accordingly.
1375 */
1376 vr_setmulti(sc);
1377 error = 0;
1378 }
1379 break;
1380
1381 case SIOCGIFMEDIA:
1382 case SIOCSIFMEDIA:
1383 error = ifmedia_ioctl(ifp, ifr, &sc->vr_mii.mii_media, command);
1384 break;
1385
1386 default:
1387 error = EINVAL;
1388 break;
1389 }
1390
1391 splx(s);
1392 return (error);
1393 }
1394
1395 static void
1396 vr_watchdog(ifp)
1397 struct ifnet *ifp;
1398 {
1399 struct vr_softc *sc = ifp->if_softc;
1400
1401 printf("%s: device timeout\n", sc->vr_dev.dv_xname);
1402 ifp->if_oerrors++;
1403
1404 vr_init(sc);
1405 }
1406
1407 /*
1408 * One second timer, used to tick MII.
1409 */
1410 static void
1411 vr_tick(arg)
1412 void *arg;
1413 {
1414 struct vr_softc *sc = arg;
1415 int s;
1416
1417 s = splnet();
1418 mii_tick(&sc->vr_mii);
1419 splx(s);
1420
1421 timeout(vr_tick, sc, hz);
1422 }
1423
1424 /*
1425 * Stop the adapter and free any mbufs allocated to the
1426 * transmit lists.
1427 */
1428 static void
1429 vr_stop(sc)
1430 struct vr_softc *sc;
1431 {
1432 struct vr_descsoft *ds;
1433 struct ifnet *ifp;
1434 int i;
1435
1436 /* Cancel one second timer. */
1437 untimeout(vr_tick, sc);
1438
1439 ifp = &sc->vr_ec.ec_if;
1440 ifp->if_timer = 0;
1441
1442 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1443 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1444 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1445 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1446 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1447
1448 /*
1449 * Release any queued transmit buffers.
1450 */
1451 for (i = 0; i < VR_NTXDESC; i++) {
1452 ds = VR_DSTX(sc, i);
1453 if (ds->ds_mbuf != NULL) {
1454 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
1455 m_freem(ds->ds_mbuf);
1456 ds->ds_mbuf = NULL;
1457 }
1458 }
1459
1460 /*
1461 * Mark the interface down and cancel the watchdog timer.
1462 */
1463 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1464 ifp->if_timer = 0;
1465 }
1466
1467 static struct vr_type *vr_lookup __P((struct pci_attach_args *));
1468 static int vr_probe __P((struct device *, struct cfdata *, void *));
1469 static void vr_attach __P((struct device *, struct device *, void *));
1470 static void vr_shutdown __P((void *));
1471
1472 struct cfattach vr_ca = {
1473 sizeof (struct vr_softc), vr_probe, vr_attach
1474 };
1475
1476 static struct vr_type *
1477 vr_lookup(pa)
1478 struct pci_attach_args *pa;
1479 {
1480 struct vr_type *vrt;
1481
1482 for (vrt = vr_devs; vrt->vr_name != NULL; vrt++) {
1483 if (PCI_VENDOR(pa->pa_id) == vrt->vr_vid &&
1484 PCI_PRODUCT(pa->pa_id) == vrt->vr_did)
1485 return (vrt);
1486 }
1487 return (NULL);
1488 }
1489
1490 static int
1491 vr_probe(parent, match, aux)
1492 struct device *parent;
1493 struct cfdata *match;
1494 void *aux;
1495 {
1496 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
1497
1498 if (vr_lookup(pa) != NULL)
1499 return (1);
1500
1501 return (0);
1502 }
1503
1504 /*
1505 * Stop all chip I/O so that the kernel's probe routines don't
1506 * get confused by errant DMAs when rebooting.
1507 */
1508 static void
1509 vr_shutdown(arg)
1510 void *arg;
1511 {
1512 struct vr_softc *sc = (struct vr_softc *)arg;
1513
1514 vr_stop(sc);
1515 }
1516
1517 /*
1518 * Attach the interface. Allocate softc structures, do ifmedia
1519 * setup and ethernet/BPF attach.
1520 */
1521 static void
1522 vr_attach(parent, self, aux)
1523 struct device *parent;
1524 struct device *self;
1525 void *aux;
1526 {
1527 struct vr_softc *sc = (struct vr_softc *) self;
1528 struct pci_attach_args *pa = (struct pci_attach_args *) aux;
1529 bus_dma_segment_t seg;
1530 struct vr_type *vrt;
1531 u_int32_t command;
1532 struct ifnet *ifp;
1533 u_char eaddr[ETHER_ADDR_LEN];
1534 int i, rseg, error;
1535
1536 #define PCI_CONF_WRITE(r, v) pci_conf_write(pa->pa_pc, pa->pa_tag, (r), (v))
1537 #define PCI_CONF_READ(r) pci_conf_read(pa->pa_pc, pa->pa_tag, (r))
1538
1539 vrt = vr_lookup(pa);
1540 if (vrt == NULL) {
1541 printf("\n");
1542 panic("vr_attach: impossible");
1543 }
1544
1545 printf(": %s Ethernet\n", vrt->vr_name);
1546
1547 /*
1548 * Handle power management nonsense.
1549 */
1550
1551 command = PCI_CONF_READ(VR_PCI_CAPID) & 0x000000FF;
1552 if (command == 0x01) {
1553 command = PCI_CONF_READ(VR_PCI_PWRMGMTCTRL);
1554 if (command & VR_PSTATE_MASK) {
1555 u_int32_t iobase, membase, irq;
1556
1557 /* Save important PCI config data. */
1558 iobase = PCI_CONF_READ(VR_PCI_LOIO);
1559 membase = PCI_CONF_READ(VR_PCI_LOMEM);
1560 irq = PCI_CONF_READ(VR_PCI_INTLINE);
1561
1562 /* Reset the power state. */
1563 printf("%s: chip is in D%d power mode "
1564 "-- setting to D0\n",
1565 sc->vr_dev.dv_xname, command & VR_PSTATE_MASK);
1566 command &= 0xFFFFFFFC;
1567 PCI_CONF_WRITE(VR_PCI_PWRMGMTCTRL, command);
1568
1569 /* Restore PCI config data. */
1570 PCI_CONF_WRITE(VR_PCI_LOIO, iobase);
1571 PCI_CONF_WRITE(VR_PCI_LOMEM, membase);
1572 PCI_CONF_WRITE(VR_PCI_INTLINE, irq);
1573 }
1574 }
1575
1576 /*
1577 * Map control/status registers.
1578 */
1579 command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
1580 command |= (PCI_COMMAND_IO_ENABLE |
1581 PCI_COMMAND_MEM_ENABLE |
1582 PCI_COMMAND_MASTER_ENABLE);
1583 PCI_CONF_WRITE(PCI_COMMAND_STATUS_REG, command);
1584 command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
1585
1586 {
1587 bus_space_tag_t iot, memt;
1588 bus_space_handle_t ioh, memh;
1589 int ioh_valid, memh_valid;
1590 pci_intr_handle_t intrhandle;
1591 const char *intrstr;
1592
1593 ioh_valid = (pci_mapreg_map(pa, VR_PCI_LOIO,
1594 PCI_MAPREG_TYPE_IO, 0,
1595 &iot, &ioh, NULL, NULL) == 0);
1596 memh_valid = (pci_mapreg_map(pa, VR_PCI_LOMEM,
1597 PCI_MAPREG_TYPE_MEM |
1598 PCI_MAPREG_MEM_TYPE_32BIT,
1599 0, &memt, &memh, NULL, NULL) == 0);
1600 #if defined(VR_USEIOSPACE)
1601 if (ioh_valid) {
1602 sc->vr_bst = iot;
1603 sc->vr_bsh = ioh;
1604 } else if (memh_valid) {
1605 sc->vr_bst = memt;
1606 sc->vr_bsh = memh;
1607 }
1608 #else
1609 if (memh_valid) {
1610 sc->vr_bst = memt;
1611 sc->vr_bsh = memh;
1612 } else if (ioh_valid) {
1613 sc->vr_bst = iot;
1614 sc->vr_bsh = ioh;
1615 }
1616 #endif
1617 else {
1618 printf(": unable to map device registers\n");
1619 return;
1620 }
1621
1622 /* Allocate interrupt */
1623 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
1624 pa->pa_intrline, &intrhandle)) {
1625 printf("%s: couldn't map interrupt\n",
1626 sc->vr_dev.dv_xname);
1627 return;
1628 }
1629 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
1630 sc->vr_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET,
1631 vr_intr, sc);
1632 if (sc->vr_ih == NULL) {
1633 printf("%s: couldn't establish interrupt",
1634 sc->vr_dev.dv_xname);
1635 if (intrstr != NULL)
1636 printf(" at %s", intrstr);
1637 printf("\n");
1638 }
1639 printf("%s: interrupting at %s\n",
1640 sc->vr_dev.dv_xname, intrstr);
1641 }
1642
1643 /* Reset the adapter. */
1644 vr_reset(sc);
1645
1646 /*
1647 * Get station address. The way the Rhine chips work,
1648 * you're not allowed to directly access the EEPROM once
1649 * they've been programmed a special way. Consequently,
1650 * we need to read the node address from the PAR0 and PAR1
1651 * registers.
1652 */
1653 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
1654 DELAY(200);
1655 for (i = 0; i < ETHER_ADDR_LEN; i++)
1656 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
1657
1658 /*
1659 * A Rhine chip was detected. Inform the world.
1660 */
1661 printf("%s: Ethernet address: %s\n",
1662 sc->vr_dev.dv_xname, ether_sprintf(eaddr));
1663
1664 bcopy(eaddr, sc->vr_enaddr, ETHER_ADDR_LEN);
1665
1666 sc->vr_dmat = pa->pa_dmat;
1667
1668 /*
1669 * Allocate the control data structures, and create and load
1670 * the DMA map for it.
1671 */
1672 if ((error = bus_dmamem_alloc(sc->vr_dmat,
1673 sizeof(struct vr_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
1674 0)) != 0) {
1675 printf("%s: unable to allocate control data, error = %d\n",
1676 sc->vr_dev.dv_xname, error);
1677 goto fail_0;
1678 }
1679
1680 if ((error = bus_dmamem_map(sc->vr_dmat, &seg, rseg,
1681 sizeof(struct vr_control_data), (caddr_t *)&sc->vr_control_data,
1682 BUS_DMA_COHERENT)) != 0) {
1683 printf("%s: unable to map control data, error = %d\n",
1684 sc->vr_dev.dv_xname, error);
1685 goto fail_1;
1686 }
1687
1688 if ((error = bus_dmamap_create(sc->vr_dmat,
1689 sizeof(struct vr_control_data), 1,
1690 sizeof(struct vr_control_data), 0, 0,
1691 &sc->vr_cddmamap)) != 0) {
1692 printf("%s: unable to create control data DMA map, "
1693 "error = %d\n", sc->vr_dev.dv_xname, error);
1694 goto fail_2;
1695 }
1696
1697 if ((error = bus_dmamap_load(sc->vr_dmat, sc->vr_cddmamap,
1698 sc->vr_control_data, sizeof(struct vr_control_data), NULL,
1699 0)) != 0) {
1700 printf("%s: unable to load control data DMA map, error = %d\n",
1701 sc->vr_dev.dv_xname, error);
1702 goto fail_3;
1703 }
1704
1705 /*
1706 * Create the transmit buffer DMA maps.
1707 */
1708 for (i = 0; i < VR_NTXDESC; i++) {
1709 if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES,
1710 1, MCLBYTES, 0, 0,
1711 &VR_DSTX(sc, i)->ds_dmamap)) != 0) {
1712 printf("%s: unable to create tx DMA map %d, "
1713 "error = %d\n", sc->vr_dev.dv_xname, i, error);
1714 goto fail_4;
1715 }
1716 }
1717
1718 /*
1719 * Create the receive buffer DMA maps.
1720 */
1721 for (i = 0; i < VR_NRXDESC; i++) {
1722 if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES, 1,
1723 MCLBYTES, 0, 0,
1724 &VR_DSRX(sc, i)->ds_dmamap)) != 0) {
1725 printf("%s: unable to create rx DMA map %d, "
1726 "error = %d\n", sc->vr_dev.dv_xname, i, error);
1727 goto fail_5;
1728 }
1729 }
1730
1731 /*
1732 * Pre-allocate the receive buffers.
1733 */
1734 for (i = 0; i < VR_NRXDESC; i++) {
1735 if ((error = vr_add_rxbuf(sc, i)) != 0) {
1736 printf("%s: unable to allocate or map rx buffer %d, "
1737 "error = %d\n", sc->vr_dev.dv_xname, i, error);
1738 goto fail_6;
1739 }
1740 }
1741
1742 ifp = &sc->vr_ec.ec_if;
1743 ifp->if_softc = sc;
1744 ifp->if_mtu = ETHERMTU;
1745 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1746 ifp->if_ioctl = vr_ioctl;
1747 ifp->if_output = ether_output;
1748 ifp->if_start = vr_start;
1749 ifp->if_watchdog = vr_watchdog;
1750 ifp->if_baudrate = 10000000;
1751 bcopy(sc->vr_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1752
1753 /*
1754 * Initialize MII/media info.
1755 */
1756 sc->vr_mii.mii_ifp = ifp;
1757 sc->vr_mii.mii_readreg = vr_mii_readreg;
1758 sc->vr_mii.mii_writereg = vr_mii_writereg;
1759 sc->vr_mii.mii_statchg = vr_mii_statchg;
1760 ifmedia_init(&sc->vr_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts);
1761 mii_phy_probe(&sc->vr_dev, &sc->vr_mii, 0xffffffff);
1762 if (LIST_FIRST(&sc->vr_mii.mii_phys) == NULL) {
1763 ifmedia_add(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1764 ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE);
1765 } else
1766 ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_AUTO);
1767
1768 /*
1769 * Call MI attach routines.
1770 */
1771 if_attach(ifp);
1772 ether_ifattach(ifp, sc->vr_enaddr);
1773
1774 #if NBPFILTER > 0
1775 bpfattach(&sc->vr_ec.ec_if.if_bpf,
1776 ifp, DLT_EN10MB, sizeof (struct ether_header));
1777 #endif
1778
1779 sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
1780 if (sc->vr_ats == NULL)
1781 printf("%s: warning: couldn't establish shutdown hook\n",
1782 sc->vr_dev.dv_xname);
1783 return;
1784
1785 fail_6:
1786 for (i = 0; i < VR_NRXDESC; i++) {
1787 if (sc->vr_rxsoft[i].ds_mbuf != NULL) {
1788 bus_dmamap_unload(sc->vr_dmat,
1789 sc->vr_rxsoft[i].ds_dmamap);
1790 (void) m_freem(sc->vr_rxsoft[i].ds_mbuf);
1791 }
1792 }
1793 fail_5:
1794 for (i = 0; i < VR_NRXDESC; i++) {
1795 if (sc->vr_rxsoft[i].ds_dmamap != NULL)
1796 bus_dmamap_destroy(sc->vr_dmat,
1797 sc->vr_rxsoft[i].ds_dmamap);
1798 }
1799 fail_4:
1800 for (i = 0; i < VR_NTXDESC; i++) {
1801 if (sc->vr_txsoft[i].ds_dmamap != NULL)
1802 bus_dmamap_destroy(sc->vr_dmat,
1803 sc->vr_txsoft[i].ds_dmamap);
1804 }
1805 bus_dmamap_unload(sc->vr_dmat, sc->vr_cddmamap);
1806 fail_3:
1807 bus_dmamap_destroy(sc->vr_dmat, sc->vr_cddmamap);
1808 fail_2:
1809 bus_dmamem_unmap(sc->vr_dmat, (caddr_t)sc->vr_control_data,
1810 sizeof(struct vr_control_data));
1811 fail_1:
1812 bus_dmamem_free(sc->vr_dmat, &seg, rseg);
1813 fail_0:
1814 return;
1815 }
1816