if_vr.c revision 1.19.2.3 1 /* $NetBSD: if_vr.c,v 1.19.2.3 1999/09/22 03:25:16 cgd Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1997, 1998
42 * Bill Paul <wpaul (at) ctr.columbia.edu>. All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. All advertising materials mentioning features or use of this software
53 * must display the following acknowledgement:
54 * This product includes software developed by Bill Paul.
55 * 4. Neither the name of the author nor the names of any co-contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
63 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
64 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
65 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
66 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
67 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
68 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
69 * THE POSSIBILITY OF SUCH DAMAGE.
70 *
71 * $FreeBSD: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $
72 */
73
74 /*
75 * VIA Rhine fast ethernet PCI NIC driver
76 *
77 * Supports various network adapters based on the VIA Rhine
78 * and Rhine II PCI controllers, including the D-Link DFE530TX.
79 * Datasheets are available at http://www.via.com.tw.
80 *
81 * Written by Bill Paul <wpaul (at) ctr.columbia.edu>
82 * Electrical Engineering Department
83 * Columbia University, New York City
84 */
85
86 /*
87 * The VIA Rhine controllers are similar in some respects to the
88 * the DEC tulip chips, except less complicated. The controller
89 * uses an MII bus and an external physical layer interface. The
90 * receiver has a one entry perfect filter and a 64-bit hash table
91 * multicast filter. Transmit and receive descriptors are similar
92 * to the tulip.
93 *
94 * The Rhine has a serious flaw in its transmit DMA mechanism:
95 * transmit buffers must be longword aligned. Unfortunately,
96 * the kernel doesn't guarantee that mbufs will be filled in starting
97 * at longword boundaries, so we have to do a buffer copy before
98 * transmission.
99 *
100 * Apparently, the receive DMA mechanism also has the same flaw. This
101 * means that on systems with struct alignment requirements, incoming
102 * frames must be copied to a new buffer which shifts the data forward
103 * 2 bytes so that the payload is aligned on a 4-byte boundary.
104 */
105
106 #include "opt_inet.h"
107
108 #include <sys/param.h>
109 #include <sys/systm.h>
110 #include <sys/sockio.h>
111 #include <sys/mbuf.h>
112 #include <sys/malloc.h>
113 #include <sys/kernel.h>
114 #include <sys/socket.h>
115 #include <sys/device.h>
116
117 #include <vm/vm.h> /* for PAGE_SIZE */
118
119 #include <net/if.h>
120 #include <net/if_arp.h>
121 #include <net/if_dl.h>
122 #include <net/if_media.h>
123 #include <net/if_ether.h>
124
125 #if defined(INET)
126 #include <netinet/in.h>
127 #include <netinet/if_inarp.h>
128 #endif
129
130 #include "bpfilter.h"
131 #if NBPFILTER > 0
132 #include <net/bpf.h>
133 #endif
134
135 #include <machine/bus.h>
136 #include <machine/intr.h>
137
138 #include <dev/mii/mii.h>
139 #include <dev/mii/miivar.h>
140
141 #include <dev/pci/pcireg.h>
142 #include <dev/pci/pcivar.h>
143 #include <dev/pci/pcidevs.h>
144
145 #include <dev/pci/if_vrreg.h>
146
147 #if BYTE_ORDER == BIG_ENDIAN
148 #include <machine/bswap.h>
149 #define htopci(x) bswap32(x)
150 #define pcitoh(x) bswap32(x)
151 #else
152 #define htopci(x) (x)
153 #define pcitoh(x) (x)
154 #endif
155
156 #define VR_USEIOSPACE
157
158 /*
159 * Various supported device vendors/types and their names.
160 */
161 static struct vr_type {
162 pci_vendor_id_t vr_vid;
163 pci_product_id_t vr_did;
164 const char *vr_name;
165 } vr_devs[] = {
166 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT3043,
167 "VIA VT3043 (Rhine) 10/100 Ethernet" },
168 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT86C100A,
169 "VIA VT86C100A (Rhine-II) 10/100 Ethernet" },
170 { 0, 0, NULL }
171 };
172
173 /*
174 * Transmit descriptor list size.
175 */
176 #define VR_NTXDESC 64
177 #define VR_NTXDESC_MASK (VR_NTXDESC - 1)
178 #define VR_NEXTTX(x) (((x) + 1) & VR_NTXDESC_MASK)
179
180 /*
181 * Receive descriptor list size.
182 */
183 #define VR_NRXDESC 64
184 #define VR_NRXDESC_MASK (VR_NRXDESC - 1)
185 #define VR_NEXTRX(x) (((x) + 1) & VR_NRXDESC_MASK)
186
187 /*
188 * Control data structres that are DMA'd to the Rhine chip. We allocate
189 * them in a single clump that maps to a single DMA segment to make several
190 * things easier.
191 *
192 * Note that since we always copy outgoing packets to aligned transmit
193 * buffers, we can reduce the transmit descriptors to one per packet.
194 */
195 struct vr_control_data {
196 struct vr_desc vr_txdescs[VR_NTXDESC];
197 struct vr_desc vr_rxdescs[VR_NRXDESC];
198 };
199
200 #define VR_CDOFF(x) offsetof(struct vr_control_data, x)
201 #define VR_CDTXOFF(x) VR_CDOFF(vr_txdescs[(x)])
202 #define VR_CDRXOFF(x) VR_CDOFF(vr_rxdescs[(x)])
203
204 /*
205 * Software state of transmit and receive descriptors.
206 */
207 struct vr_descsoft {
208 struct mbuf *ds_mbuf; /* head of mbuf chain */
209 bus_dmamap_t ds_dmamap; /* our DMA map */
210 };
211
212 struct vr_softc {
213 struct device vr_dev; /* generic device glue */
214 void *vr_ih; /* interrupt cookie */
215 void *vr_ats; /* shutdown hook */
216 bus_space_tag_t vr_bst; /* bus space tag */
217 bus_space_handle_t vr_bsh; /* bus space handle */
218 bus_dma_tag_t vr_dmat; /* bus DMA tag */
219 pci_chipset_tag_t vr_pc; /* PCI chipset info */
220 struct ethercom vr_ec; /* Ethernet common info */
221 u_int8_t vr_enaddr[ETHER_ADDR_LEN];
222 struct mii_data vr_mii; /* MII/media info */
223
224 bus_dmamap_t vr_cddmamap; /* control data DMA map */
225 #define vr_cddma vr_cddmamap->dm_segs[0].ds_addr
226
227 /*
228 * Software state for transmit and receive descriptors.
229 */
230 struct vr_descsoft vr_txsoft[VR_NTXDESC];
231 struct vr_descsoft vr_rxsoft[VR_NRXDESC];
232
233 /*
234 * Control data structures.
235 */
236 struct vr_control_data *vr_control_data;
237
238 int vr_txpending; /* number of TX requests pending */
239 int vr_txdirty; /* first dirty TX descriptor */
240 int vr_txlast; /* last used TX descriptor */
241
242 int vr_rxptr; /* next ready RX descriptor */
243 };
244
245 #define VR_CDTXADDR(sc, x) ((sc)->vr_cddma + VR_CDTXOFF((x)))
246 #define VR_CDRXADDR(sc, x) ((sc)->vr_cddma + VR_CDRXOFF((x)))
247
248 #define VR_CDTX(sc, x) (&(sc)->vr_control_data->vr_txdescs[(x)])
249 #define VR_CDRX(sc, x) (&(sc)->vr_control_data->vr_rxdescs[(x)])
250
251 #define VR_DSTX(sc, x) (&(sc)->vr_txsoft[(x)])
252 #define VR_DSRX(sc, x) (&(sc)->vr_rxsoft[(x)])
253
254 #define VR_CDTXSYNC(sc, x, ops) \
255 bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap, \
256 VR_CDTXOFF((x)), sizeof(struct vr_desc), (ops))
257
258 #define VR_CDRXSYNC(sc, x, ops) \
259 bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap, \
260 VR_CDRXOFF((x)), sizeof(struct vr_desc), (ops))
261
262 /*
263 * Note we rely on MCLBYTES being a power of two below.
264 */
265 #define VR_INIT_RXDESC(sc, i) \
266 do { \
267 struct vr_desc *__d = VR_CDRX((sc), (i)); \
268 struct vr_descsoft *__ds = VR_DSRX((sc), (i)); \
269 \
270 __d->vr_next = htopci(VR_CDRXADDR((sc), VR_NEXTRX((i)))); \
271 __d->vr_status = htopci(VR_RXSTAT_FIRSTFRAG | \
272 VR_RXSTAT_LASTFRAG | VR_RXSTAT_OWN); \
273 __d->vr_data = htopci(__ds->ds_dmamap->dm_segs[0].ds_addr); \
274 __d->vr_ctl = htopci(VR_RXCTL_CHAIN | VR_RXCTL_RX_INTR | \
275 ((MCLBYTES - 1) & VR_RXCTL_BUFLEN)); \
276 VR_CDRXSYNC((sc), (i), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
277 } while (0)
278
279 /*
280 * register space access macros
281 */
282 #define CSR_WRITE_4(sc, reg, val) \
283 bus_space_write_4(sc->vr_bst, sc->vr_bsh, reg, val)
284 #define CSR_WRITE_2(sc, reg, val) \
285 bus_space_write_2(sc->vr_bst, sc->vr_bsh, reg, val)
286 #define CSR_WRITE_1(sc, reg, val) \
287 bus_space_write_1(sc->vr_bst, sc->vr_bsh, reg, val)
288
289 #define CSR_READ_4(sc, reg) \
290 bus_space_read_4(sc->vr_bst, sc->vr_bsh, reg)
291 #define CSR_READ_2(sc, reg) \
292 bus_space_read_2(sc->vr_bst, sc->vr_bsh, reg)
293 #define CSR_READ_1(sc, reg) \
294 bus_space_read_1(sc->vr_bst, sc->vr_bsh, reg)
295
296 #define VR_TIMEOUT 1000
297
298 static int vr_add_rxbuf __P((struct vr_softc *, int));
299
300 static void vr_rxeof __P((struct vr_softc *));
301 static void vr_rxeoc __P((struct vr_softc *));
302 static void vr_txeof __P((struct vr_softc *));
303 static int vr_intr __P((void *));
304 static void vr_start __P((struct ifnet *));
305 static int vr_ioctl __P((struct ifnet *, u_long, caddr_t));
306 static void vr_init __P((void *));
307 static void vr_stop __P((struct vr_softc *));
308 static void vr_watchdog __P((struct ifnet *));
309 static void vr_tick __P((void *));
310
311 static int vr_ifmedia_upd __P((struct ifnet *));
312 static void vr_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
313
314 static void vr_mii_sync __P((struct vr_softc *));
315 static void vr_mii_send __P((struct vr_softc *, u_int32_t, int));
316 static int vr_mii_readreg __P((struct device *, int, int));
317 static void vr_mii_writereg __P((struct device *, int, int, int));
318 static void vr_mii_statchg __P((struct device *));
319
320 static u_int8_t vr_calchash __P((u_int8_t *));
321 static void vr_setmulti __P((struct vr_softc *));
322 static void vr_reset __P((struct vr_softc *));
323
324 #define VR_SETBIT(sc, reg, x) \
325 CSR_WRITE_1(sc, reg, \
326 CSR_READ_1(sc, reg) | x)
327
328 #define VR_CLRBIT(sc, reg, x) \
329 CSR_WRITE_1(sc, reg, \
330 CSR_READ_1(sc, reg) & ~x)
331
332 #define VR_SETBIT16(sc, reg, x) \
333 CSR_WRITE_2(sc, reg, \
334 CSR_READ_2(sc, reg) | x)
335
336 #define VR_CLRBIT16(sc, reg, x) \
337 CSR_WRITE_2(sc, reg, \
338 CSR_READ_2(sc, reg) & ~x)
339
340 #define VR_SETBIT32(sc, reg, x) \
341 CSR_WRITE_4(sc, reg, \
342 CSR_READ_4(sc, reg) | x)
343
344 #define VR_CLRBIT32(sc, reg, x) \
345 CSR_WRITE_4(sc, reg, \
346 CSR_READ_4(sc, reg) & ~x)
347
348 #define SIO_SET(x) \
349 CSR_WRITE_1(sc, VR_MIICMD, \
350 CSR_READ_1(sc, VR_MIICMD) | x)
351
352 #define SIO_CLR(x) \
353 CSR_WRITE_1(sc, VR_MIICMD, \
354 CSR_READ_1(sc, VR_MIICMD) & ~x)
355
356 /*
357 * Sync the PHYs by setting data bit and strobing the clock 32 times.
358 */
359 static void
360 vr_mii_sync(sc)
361 struct vr_softc *sc;
362 {
363 int i;
364
365 SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAOUT);
366
367 for (i = 0; i < 32; i++) {
368 SIO_SET(VR_MIICMD_CLK);
369 DELAY(1);
370 SIO_CLR(VR_MIICMD_CLK);
371 DELAY(1);
372 }
373 }
374
375 /*
376 * Clock a series of bits through the MII.
377 */
378 static void
379 vr_mii_send(sc, bits, cnt)
380 struct vr_softc *sc;
381 u_int32_t bits;
382 int cnt;
383 {
384 int i;
385
386 SIO_CLR(VR_MIICMD_CLK);
387
388 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
389 if (bits & i) {
390 SIO_SET(VR_MIICMD_DATAOUT);
391 } else {
392 SIO_CLR(VR_MIICMD_DATAOUT);
393 }
394 DELAY(1);
395 SIO_CLR(VR_MIICMD_CLK);
396 DELAY(1);
397 SIO_SET(VR_MIICMD_CLK);
398 }
399 }
400
401 /*
402 * Read an PHY register through the MII.
403 */
404 static int
405 vr_mii_readreg(self, phy, reg)
406 struct device *self;
407 int phy, reg;
408 {
409 struct vr_softc *sc = (struct vr_softc *)self;
410 int i, ack, val = 0;
411
412 CSR_WRITE_1(sc, VR_MIICMD, 0);
413 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
414
415 /*
416 * Turn on data xmit.
417 */
418 SIO_SET(VR_MIICMD_DIR);
419
420 vr_mii_sync(sc);
421
422 /*
423 * Send command/address info.
424 */
425 vr_mii_send(sc, MII_COMMAND_START, 2);
426 vr_mii_send(sc, MII_COMMAND_READ, 2);
427 vr_mii_send(sc, phy, 5);
428 vr_mii_send(sc, reg, 5);
429
430 /* Idle bit */
431 SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAOUT));
432 DELAY(1);
433 SIO_SET(VR_MIICMD_CLK);
434 DELAY(1);
435
436 /* Turn off xmit. */
437 SIO_CLR(VR_MIICMD_DIR);
438
439 /* Check for ack */
440 SIO_CLR(VR_MIICMD_CLK);
441 DELAY(1);
442 SIO_SET(VR_MIICMD_CLK);
443 DELAY(1);
444 ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAIN;
445
446 /*
447 * Now try reading data bits. If the ack failed, we still
448 * need to clock through 16 cycles to keep the PHY(s) in sync.
449 */
450 if (ack) {
451 for (i = 0; i < 16; i++) {
452 SIO_CLR(VR_MIICMD_CLK);
453 DELAY(1);
454 SIO_SET(VR_MIICMD_CLK);
455 DELAY(1);
456 }
457 goto fail;
458 }
459
460 for (i = 0x8000; i; i >>= 1) {
461 SIO_CLR(VR_MIICMD_CLK);
462 DELAY(1);
463 if (!ack) {
464 if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAIN)
465 val |= i;
466 DELAY(1);
467 }
468 SIO_SET(VR_MIICMD_CLK);
469 DELAY(1);
470 }
471
472 fail:
473
474 SIO_CLR(VR_MIICMD_CLK);
475 DELAY(1);
476 SIO_SET(VR_MIICMD_CLK);
477 DELAY(1);
478
479 return (val);
480 }
481
482 /*
483 * Write to a PHY register through the MII.
484 */
485 static void
486 vr_mii_writereg(self, phy, reg, val)
487 struct device *self;
488 int phy, reg, val;
489 {
490 struct vr_softc *sc = (struct vr_softc *)self;
491
492 CSR_WRITE_1(sc, VR_MIICMD, 0);
493 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
494
495 /*
496 * Turn on data output.
497 */
498 SIO_SET(VR_MIICMD_DIR);
499
500 vr_mii_sync(sc);
501
502 vr_mii_send(sc, MII_COMMAND_START, 2);
503 vr_mii_send(sc, MII_COMMAND_WRITE, 2);
504 vr_mii_send(sc, phy, 5);
505 vr_mii_send(sc, reg, 5);
506 vr_mii_send(sc, MII_COMMAND_ACK, 2);
507 vr_mii_send(sc, val, 16);
508
509 /* Idle bit. */
510 SIO_SET(VR_MIICMD_CLK);
511 DELAY(1);
512 SIO_CLR(VR_MIICMD_CLK);
513 DELAY(1);
514
515 /*
516 * Turn off xmit.
517 */
518 SIO_CLR(VR_MIICMD_DIR);
519 }
520
521 static void
522 vr_mii_statchg(self)
523 struct device *self;
524 {
525 struct vr_softc *sc = (struct vr_softc *)self;
526
527 /*
528 * In order to fiddle with the 'full-duplex' bit in the netconfig
529 * register, we first have to put the transmit and/or receive logic
530 * in the idle state.
531 */
532 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
533
534 if (sc->vr_mii.mii_media_active & IFM_FDX)
535 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
536 else
537 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
538
539 if (sc->vr_ec.ec_if.if_flags & IFF_RUNNING)
540 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
541
542 /* XXX Update ifp->if_baudrate */
543 }
544
545 /*
546 * Calculate CRC of a multicast group address, return the lower 6 bits.
547 */
548 static u_int8_t
549 vr_calchash(addr)
550 u_int8_t *addr;
551 {
552 u_int32_t crc, carry;
553 int i, j;
554 u_int8_t c;
555
556 /* Compute CRC for the address value. */
557 crc = 0xFFFFFFFF; /* initial value */
558
559 for (i = 0; i < 6; i++) {
560 c = *(addr + i);
561 for (j = 0; j < 8; j++) {
562 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
563 crc <<= 1;
564 c >>= 1;
565 if (carry)
566 crc = (crc ^ 0x04c11db6) | carry;
567 }
568 }
569
570 /* return the filter bit position */
571 return ((crc >> 26) & 0x0000003F);
572 }
573
574 /*
575 * Program the 64-bit multicast hash filter.
576 */
577 static void
578 vr_setmulti(sc)
579 struct vr_softc *sc;
580 {
581 struct ifnet *ifp;
582 int h = 0;
583 u_int32_t hashes[2] = { 0, 0 };
584 struct ether_multistep step;
585 struct ether_multi *enm;
586 int mcnt = 0;
587 u_int8_t rxfilt;
588
589 ifp = &sc->vr_ec.ec_if;
590
591 rxfilt = CSR_READ_1(sc, VR_RXCFG);
592
593 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
594 rxfilt |= VR_RXCFG_RX_MULTI;
595 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
596 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
597 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
598 return;
599 }
600
601 /* first, zot all the existing hash bits */
602 CSR_WRITE_4(sc, VR_MAR0, 0);
603 CSR_WRITE_4(sc, VR_MAR1, 0);
604
605 /* now program new ones */
606 ETHER_FIRST_MULTI(step, &sc->vr_ec, enm);
607 while (enm != NULL) {
608 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0)
609 continue;
610
611 h = vr_calchash(enm->enm_addrlo);
612
613 if (h < 32)
614 hashes[0] |= (1 << h);
615 else
616 hashes[1] |= (1 << (h - 32));
617 ETHER_NEXT_MULTI(step, enm);
618 mcnt++;
619 }
620
621 if (mcnt)
622 rxfilt |= VR_RXCFG_RX_MULTI;
623 else
624 rxfilt &= ~VR_RXCFG_RX_MULTI;
625
626 CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
627 CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
628 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
629 }
630
631 static void
632 vr_reset(sc)
633 struct vr_softc *sc;
634 {
635 int i;
636
637 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
638
639 for (i = 0; i < VR_TIMEOUT; i++) {
640 DELAY(10);
641 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
642 break;
643 }
644 if (i == VR_TIMEOUT)
645 printf("%s: reset never completed!\n",
646 sc->vr_dev.dv_xname);
647
648 /* Wait a little while for the chip to get its brains in order. */
649 DELAY(1000);
650 }
651
652 /*
653 * Initialize an RX descriptor and attach an MBUF cluster.
654 * Note: the length fields are only 11 bits wide, which means the
655 * largest size we can specify is 2047. This is important because
656 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
657 * overflow the field and make a mess.
658 */
659 static int
660 vr_add_rxbuf(sc, i)
661 struct vr_softc *sc;
662 int i;
663 {
664 struct vr_descsoft *ds = VR_DSRX(sc, i);
665 struct mbuf *m_new;
666 int error;
667
668 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
669 if (m_new == NULL)
670 return (ENOBUFS);
671
672 MCLGET(m_new, M_DONTWAIT);
673 if ((m_new->m_flags & M_EXT) == 0) {
674 m_freem(m_new);
675 return (ENOBUFS);
676 }
677
678 if (ds->ds_mbuf != NULL)
679 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
680
681 ds->ds_mbuf = m_new;
682
683 error = bus_dmamap_load(sc->vr_dmat, ds->ds_dmamap,
684 m_new->m_ext.ext_buf, m_new->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
685 if (error) {
686 printf("%s: unable to load rx DMA map %d, error = %d\n",
687 sc->vr_dev.dv_xname, i, error);
688 panic("vr_add_rxbuf"); /* XXX */
689 }
690
691 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
692 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
693
694 VR_INIT_RXDESC(sc, i);
695
696 return (0);
697 }
698
699 /*
700 * A frame has been uploaded: pass the resulting mbuf chain up to
701 * the higher level protocols.
702 */
703 static void
704 vr_rxeof(sc)
705 struct vr_softc *sc;
706 {
707 struct ether_header *eh;
708 struct mbuf *m;
709 struct ifnet *ifp;
710 struct vr_desc *d;
711 struct vr_descsoft *ds;
712 int i, total_len;
713 u_int32_t rxstat;
714
715 ifp = &sc->vr_ec.ec_if;
716
717 for (i = sc->vr_rxptr;; i = VR_NEXTRX(i)) {
718 d = VR_CDRX(sc, i);
719 ds = VR_DSRX(sc, i);
720
721 VR_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
722
723 rxstat = pcitoh(d->vr_status);
724
725 if (rxstat & VR_RXSTAT_OWN) {
726 /*
727 * We have processed all of the receive buffers.
728 */
729 break;
730 }
731
732 /*
733 * If an error occurs, update stats, clear the
734 * status word and leave the mbuf cluster in place:
735 * it should simply get re-used next time this descriptor
736 * comes up in the ring.
737 */
738 if (rxstat & VR_RXSTAT_RXERR) {
739 const char *errstr;
740
741 ifp->if_ierrors++;
742 switch (rxstat & 0x000000FF) {
743 case VR_RXSTAT_CRCERR:
744 errstr = "crc error";
745 break;
746 case VR_RXSTAT_FRAMEALIGNERR:
747 errstr = "frame alignment error";
748 break;
749 case VR_RXSTAT_FIFOOFLOW:
750 errstr = "FIFO overflow";
751 break;
752 case VR_RXSTAT_GIANT:
753 errstr = "received giant packet";
754 break;
755 case VR_RXSTAT_RUNT:
756 errstr = "received runt packet";
757 break;
758 case VR_RXSTAT_BUSERR:
759 errstr = "system bus error";
760 break;
761 case VR_RXSTAT_BUFFERR:
762 errstr = "rx buffer error";
763 break;
764 default:
765 errstr = "unknown rx error";
766 break;
767 }
768 printf("%s: receive error: %s\n", sc->vr_dev.dv_xname,
769 errstr);
770
771 VR_INIT_RXDESC(sc, i);
772
773 continue;
774 }
775
776 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
777 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
778
779 /* No errors; receive the packet. */
780 total_len = VR_RXBYTES(pcitoh(d->vr_status));
781
782 /*
783 * XXX The VIA Rhine chip includes the CRC with every
784 * received frame, and there's no way to turn this
785 * behavior off (at least, I can't find anything in
786 * the manual that explains how to do it) so we have
787 * to trim off the CRC manually.
788 */
789 total_len -= ETHER_CRC_LEN;
790
791 #ifdef __NO_STRICT_ALIGNMENT
792 /*
793 * Try to conjure up a new mbuf cluster. If that
794 * fails, it means we have an out of memory condition and
795 * should leave the buffer in place and continue. This will
796 * result in a lost packet, but there's little else we
797 * can do in this situation.
798 */
799 m = ds->ds_mbuf;
800 if (vr_add_rxbuf(sc, i) == ENOBUFS) {
801 ifp->if_ierrors++;
802 VR_INIT_RXDESC(sc, i);
803 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
804 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
805 continue;
806 }
807 #else
808 /*
809 * The Rhine's packet buffers must be 4-byte aligned.
810 * But this means that the data after the Ethernet header
811 * is misaligned. We must allocate a new buffer and
812 * copy the data, shifted forward 2 bytes.
813 */
814 MGETHDR(m, M_DONTWAIT, MT_DATA);
815 if (m == NULL) {
816 dropit:
817 ifp->if_ierrors++;
818 VR_INIT_RXDESC(sc, i);
819 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
820 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
821 continue;
822 }
823 if (total_len > (MHLEN - 2)) {
824 MCLGET(m, M_DONTWAIT);
825 if ((m->m_flags & M_EXT) == 0) {
826 m_freem(m);
827 goto dropit;
828 }
829 }
830 m->m_data += 2;
831
832 /*
833 * Note that we use clusters for incoming frames, so the
834 * buffer is virtually contiguous.
835 */
836 memcpy(mtod(m, caddr_t), mtod(ds->ds_mbuf, caddr_t),
837 total_len);
838
839 /* Allow the recieve descriptor to continue using its mbuf. */
840 VR_INIT_RXDESC(sc, i);
841 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
842 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
843 #endif /* __NO_STRICT_ALIGNMENT */
844
845 ifp->if_ipackets++;
846 eh = mtod(m, struct ether_header *);
847 m->m_pkthdr.rcvif = ifp;
848 m->m_pkthdr.len = m->m_len = total_len;
849 #if NBPFILTER > 0
850 /*
851 * Handle BPF listeners. Let the BPF user see the packet, but
852 * don't pass it up to the ether_input() layer unless it's
853 * a broadcast packet, multicast packet, matches our ethernet
854 * address or the interface is in promiscuous mode.
855 */
856 if (ifp->if_bpf) {
857 bpf_mtap(ifp->if_bpf, m);
858 if ((ifp->if_flags & IFF_PROMISC) != 0 &&
859 ETHER_IS_MULTICAST(eh->ether_dhost) == 0 &&
860 memcmp(eh->ether_dhost, LLADDR(ifp->if_sadl),
861 ETHER_ADDR_LEN) != 0) {
862 m_freem(m);
863 continue;
864 }
865 }
866 #endif
867 /* Remove header from mbuf and pass it on. */
868 m_adj(m, sizeof(struct ether_header));
869 ether_input(ifp, eh, m);
870 }
871
872 /* Update the receive pointer. */
873 sc->vr_rxptr = i;
874 }
875
876 void
877 vr_rxeoc(sc)
878 struct vr_softc *sc;
879 {
880
881 vr_rxeof(sc);
882 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
883 CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr));
884 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
885 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
886 }
887
888 /*
889 * A frame was downloaded to the chip. It's safe for us to clean up
890 * the list buffers.
891 */
892 static void
893 vr_txeof(sc)
894 struct vr_softc *sc;
895 {
896 struct ifnet *ifp = &sc->vr_ec.ec_if;
897 struct vr_desc *d;
898 struct vr_descsoft *ds;
899 u_int32_t txstat;
900 int i;
901
902 ifp->if_flags &= ~IFF_OACTIVE;
903
904 /*
905 * Go through our tx list and free mbufs for those
906 * frames that have been transmitted.
907 */
908 for (i = sc->vr_txdirty; sc->vr_txpending != 0;
909 i = VR_NEXTTX(i), sc->vr_txpending--) {
910 d = VR_CDTX(sc, i);
911 ds = VR_DSTX(sc, i);
912
913 VR_CDTXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
914
915 txstat = pcitoh(d->vr_status);
916 if (txstat & VR_TXSTAT_OWN)
917 break;
918
919 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap,
920 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
921 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
922 m_freem(ds->ds_mbuf);
923 ds->ds_mbuf = NULL;
924
925 if (txstat & VR_TXSTAT_ERRSUM) {
926 ifp->if_oerrors++;
927 if (txstat & VR_TXSTAT_DEFER)
928 ifp->if_collisions++;
929 if (txstat & VR_TXSTAT_LATECOLL)
930 ifp->if_collisions++;
931 }
932
933 ifp->if_collisions += (txstat & VR_TXSTAT_COLLCNT) >> 3;
934 ifp->if_opackets++;
935 }
936
937 /* Update the dirty transmit buffer pointer. */
938 sc->vr_txdirty = i;
939
940 /*
941 * Cancel the watchdog timer if there are no pending
942 * transmissions.
943 */
944 if (sc->vr_txpending == 0)
945 ifp->if_timer = 0;
946 }
947
948 static int
949 vr_intr(arg)
950 void *arg;
951 {
952 struct vr_softc *sc;
953 struct ifnet *ifp;
954 u_int16_t status;
955 int handled = 0, dotx = 0;
956
957 sc = arg;
958 ifp = &sc->vr_ec.ec_if;
959
960 /* Suppress unwanted interrupts. */
961 if ((ifp->if_flags & IFF_UP) == 0) {
962 vr_stop(sc);
963 return (0);
964 }
965
966 /* Disable interrupts. */
967 CSR_WRITE_2(sc, VR_IMR, 0x0000);
968
969 for (;;) {
970 status = CSR_READ_2(sc, VR_ISR);
971 if (status)
972 CSR_WRITE_2(sc, VR_ISR, status);
973
974 if ((status & VR_INTRS) == 0)
975 break;
976
977 handled = 1;
978
979 if (status & VR_ISR_RX_OK)
980 vr_rxeof(sc);
981
982 if (status &
983 (VR_ISR_RX_ERR | VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW |
984 VR_ISR_RX_DROPPED))
985 vr_rxeoc(sc);
986
987 if (status & VR_ISR_TX_OK) {
988 dotx = 1;
989 vr_txeof(sc);
990 }
991
992 if (status & (VR_ISR_TX_UNDERRUN | VR_ISR_TX_ABRT)) {
993 if (status & VR_ISR_TX_UNDERRUN)
994 printf("%s: transmit underrun\n",
995 sc->vr_dev.dv_xname);
996 if (status & VR_ISR_TX_ABRT)
997 printf("%s: transmit aborted\n",
998 sc->vr_dev.dv_xname);
999 ifp->if_oerrors++;
1000 dotx = 1;
1001 vr_txeof(sc);
1002 if (sc->vr_txpending) {
1003 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
1004 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
1005 }
1006 }
1007
1008 if (status & VR_ISR_BUSERR) {
1009 printf("%s: PCI bus error\n", sc->vr_dev.dv_xname);
1010 /* vr_init() calls vr_start() */
1011 dotx = 0;
1012 vr_init(sc);
1013 }
1014 }
1015
1016 /* Re-enable interrupts. */
1017 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1018
1019 if (dotx)
1020 vr_start(ifp);
1021
1022 return (handled);
1023 }
1024
1025 /*
1026 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1027 * to the mbuf data regions directly in the transmit lists. We also save a
1028 * copy of the pointers since the transmit list fragment pointers are
1029 * physical addresses.
1030 */
1031 static void
1032 vr_start(ifp)
1033 struct ifnet *ifp;
1034 {
1035 struct vr_softc *sc = ifp->if_softc;
1036 struct mbuf *m0, *m;
1037 struct vr_desc *d;
1038 struct vr_descsoft *ds;
1039 int error, firsttx, nexttx, opending;
1040
1041 /*
1042 * Remember the previous txpending and the first transmit
1043 * descriptor we use.
1044 */
1045 opending = sc->vr_txpending;
1046 firsttx = VR_NEXTTX(sc->vr_txlast);
1047
1048 /*
1049 * Loop through the send queue, setting up transmit descriptors
1050 * until we drain the queue, or use up all available transmit
1051 * descriptors.
1052 */
1053 while (sc->vr_txpending < VR_NTXDESC) {
1054 /*
1055 * Grab a packet off the queue.
1056 */
1057 IF_DEQUEUE(&ifp->if_snd, m0);
1058 if (m0 == NULL)
1059 break;
1060
1061 /*
1062 * Get the next available transmit descriptor.
1063 */
1064 nexttx = VR_NEXTTX(sc->vr_txlast);
1065 d = VR_CDTX(sc, nexttx);
1066 ds = VR_DSTX(sc, nexttx);
1067
1068 /*
1069 * Load the DMA map. If this fails, the packet didn't
1070 * fit in one DMA segment, and we need to copy. Note,
1071 * the packet must also be aligned.
1072 */
1073 if ((mtod(m0, bus_addr_t) & 3) != 0 ||
1074 bus_dmamap_load_mbuf(sc->vr_dmat, ds->ds_dmamap, m0,
1075 BUS_DMA_NOWAIT) != 0) {
1076 MGETHDR(m, M_DONTWAIT, MT_DATA);
1077 if (m == NULL) {
1078 printf("%s: unable to allocate Tx mbuf\n",
1079 sc->vr_dev.dv_xname);
1080 IF_PREPEND(&ifp->if_snd, m0);
1081 break;
1082 }
1083 if (m0->m_pkthdr.len > MHLEN) {
1084 MCLGET(m, M_DONTWAIT);
1085 if ((m->m_flags & M_EXT) == 0) {
1086 printf("%s: unable to allocate Tx "
1087 "cluster\n", sc->vr_dev.dv_xname);
1088 m_freem(m);
1089 IF_PREPEND(&ifp->if_snd, m0);
1090 break;
1091 }
1092 }
1093 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
1094 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
1095 m_freem(m0);
1096 m0 = m;
1097 error = bus_dmamap_load_mbuf(sc->vr_dmat,
1098 ds->ds_dmamap, m0, BUS_DMA_NOWAIT);
1099 if (error) {
1100 printf("%s: unable to load Tx buffer, "
1101 "error = %d\n", sc->vr_dev.dv_xname, error);
1102 IF_PREPEND(&ifp->if_snd, m0);
1103 break;
1104 }
1105 }
1106
1107 /* Sync the DMA map. */
1108 bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
1109 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1110
1111 /*
1112 * Store a pointer to the packet so we can free it later.
1113 */
1114 ds->ds_mbuf = m0;
1115
1116 #if NBPFILTER > 0
1117 /*
1118 * If there's a BPF listener, bounce a copy of this frame
1119 * to him.
1120 */
1121 if (ifp->if_bpf)
1122 bpf_mtap(ifp->if_bpf, m0);
1123 #endif
1124
1125 /*
1126 * Fill in the transmit descriptor. The Rhine
1127 * doesn't auto-pad, so we have to do this ourselves.
1128 */
1129 d->vr_data = htopci(ds->ds_dmamap->dm_segs[0].ds_addr);
1130 d->vr_ctl = htopci(m0->m_pkthdr.len < VR_MIN_FRAMELEN ?
1131 VR_MIN_FRAMELEN : m0->m_pkthdr.len);
1132 d->vr_ctl |=
1133 htopci(VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG|VR_TXCTL_LASTFRAG);
1134
1135 /*
1136 * If this is the first descriptor we're enqueuing,
1137 * don't give it to the Rhine yet. That could cause
1138 * a race condition. We'll do it below.
1139 */
1140 if (nexttx == firsttx)
1141 d->vr_status = 0;
1142 else
1143 d->vr_status = htopci(VR_TXSTAT_OWN);
1144
1145 VR_CDTXSYNC(sc, nexttx,
1146 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1147
1148 /* Advance the tx pointer. */
1149 sc->vr_txpending++;
1150 sc->vr_txlast = nexttx;
1151 }
1152
1153 if (sc->vr_txpending == VR_NTXDESC) {
1154 /* No more slots left; notify upper layer. */
1155 ifp->if_flags |= IFF_OACTIVE;
1156 }
1157
1158 if (sc->vr_txpending != opending) {
1159 /*
1160 * We enqueued packets. If the transmitter was idle,
1161 * reset the txdirty pointer.
1162 */
1163 if (opending == 0)
1164 sc->vr_txdirty = firsttx;
1165
1166 /*
1167 * Cause a transmit interrupt to happen on the
1168 * last packet we enqueued.
1169 */
1170 VR_CDTX(sc, sc->vr_txlast)->vr_ctl |= htopci(VR_TXCTL_FINT);
1171 VR_CDTXSYNC(sc, sc->vr_txlast,
1172 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1173
1174 /*
1175 * The entire packet chain is set up. Give the
1176 * first descriptor to the Rhine now.
1177 */
1178 VR_CDTX(sc, firsttx)->vr_status = htopci(VR_TXSTAT_OWN);
1179 VR_CDTXSYNC(sc, firsttx,
1180 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1181
1182 /* Start the transmitter. */
1183 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO);
1184
1185 /* Set the watchdog timer in case the chip flakes out. */
1186 ifp->if_timer = 5;
1187 }
1188 }
1189
1190 /*
1191 * Initialize the interface. Must be called at splnet.
1192 */
1193 static void
1194 vr_init(xsc)
1195 void *xsc;
1196 {
1197 struct vr_softc *sc = xsc;
1198 struct ifnet *ifp = &sc->vr_ec.ec_if;
1199 struct vr_desc *d;
1200 int i;
1201
1202 /* Cancel pending I/O. */
1203 vr_stop(sc);
1204
1205 /* Reset the Rhine to a known state. */
1206 vr_reset(sc);
1207
1208 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1209 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1210
1211 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1212 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1213
1214 /*
1215 * Initialize the transmit desciptor ring. txlast is initialized
1216 * to the end of the list so that it will wrap around to the first
1217 * descriptor when the first packet is transmitted.
1218 */
1219 for (i = 0; i < VR_NTXDESC; i++) {
1220 d = VR_CDTX(sc, i);
1221 memset(d, 0, sizeof(struct vr_desc));
1222 d->vr_next = htopci(VR_CDTXADDR(sc, VR_NEXTTX(i)));
1223 VR_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1224 }
1225 sc->vr_txpending = 0;
1226 sc->vr_txdirty = 0;
1227 sc->vr_txlast = VR_NTXDESC - 1;
1228
1229 /*
1230 * Initialize the receive descriptor ring. The buffers are
1231 * already allocated.
1232 */
1233 for (i = 0; i < VR_NRXDESC; i++)
1234 VR_INIT_RXDESC(sc, i);
1235 sc->vr_rxptr = 0;
1236
1237 /* If we want promiscuous mode, set the allframes bit. */
1238 if (ifp->if_flags & IFF_PROMISC)
1239 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1240 else
1241 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1242
1243 /* Set capture broadcast bit to capture broadcast frames. */
1244 if (ifp->if_flags & IFF_BROADCAST)
1245 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1246 else
1247 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1248
1249 /* Program the multicast filter, if necessary. */
1250 vr_setmulti(sc);
1251
1252 /* Give the transmit and recieve rings to the Rhine. */
1253 CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr));
1254 CSR_WRITE_4(sc, VR_TXADDR, VR_CDTXADDR(sc, VR_NEXTTX(sc->vr_txlast)));
1255
1256 /* Set current media. */
1257 mii_mediachg(&sc->vr_mii);
1258
1259 /* Enable receiver and transmitter. */
1260 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1261 VR_CMD_TX_ON|VR_CMD_RX_ON|
1262 VR_CMD_RX_GO);
1263
1264 /* Enable interrupts. */
1265 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1266 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1267
1268 ifp->if_flags |= IFF_RUNNING;
1269 ifp->if_flags &= ~IFF_OACTIVE;
1270
1271 /* Start one second timer. */
1272 timeout(vr_tick, sc, hz);
1273
1274 /* Attempt to start output on the interface. */
1275 vr_start(ifp);
1276 }
1277
1278 /*
1279 * Set media options.
1280 */
1281 static int
1282 vr_ifmedia_upd(ifp)
1283 struct ifnet *ifp;
1284 {
1285 struct vr_softc *sc = ifp->if_softc;
1286
1287 if (ifp->if_flags & IFF_UP)
1288 mii_mediachg(&sc->vr_mii);
1289 return (0);
1290 }
1291
1292 /*
1293 * Report current media status.
1294 */
1295 static void
1296 vr_ifmedia_sts(ifp, ifmr)
1297 struct ifnet *ifp;
1298 struct ifmediareq *ifmr;
1299 {
1300 struct vr_softc *sc = ifp->if_softc;
1301
1302 mii_pollstat(&sc->vr_mii);
1303 ifmr->ifm_status = sc->vr_mii.mii_media_status;
1304 ifmr->ifm_active = sc->vr_mii.mii_media_active;
1305 }
1306
1307 static int
1308 vr_ioctl(ifp, command, data)
1309 struct ifnet *ifp;
1310 u_long command;
1311 caddr_t data;
1312 {
1313 struct vr_softc *sc = ifp->if_softc;
1314 struct ifreq *ifr = (struct ifreq *)data;
1315 struct ifaddr *ifa = (struct ifaddr *)data;
1316 int s, error = 0;
1317
1318 s = splnet();
1319
1320 switch (command) {
1321 case SIOCSIFADDR:
1322 ifp->if_flags |= IFF_UP;
1323
1324 switch (ifa->ifa_addr->sa_family) {
1325 #ifdef INET
1326 case AF_INET:
1327 vr_init(sc);
1328 arp_ifinit(ifp, ifa);
1329 break;
1330 #endif /* INET */
1331 default:
1332 vr_init(sc);
1333 break;
1334 }
1335 break;
1336
1337 case SIOCGIFADDR:
1338 bcopy((caddr_t) sc->vr_enaddr,
1339 (caddr_t) ((struct sockaddr *)&ifr->ifr_data)->sa_data,
1340 ETHER_ADDR_LEN);
1341 break;
1342
1343 case SIOCSIFMTU:
1344 if (ifr->ifr_mtu > ETHERMTU)
1345 error = EINVAL;
1346 else
1347 ifp->if_mtu = ifr->ifr_mtu;
1348 break;
1349
1350 case SIOCSIFFLAGS:
1351 if ((ifp->if_flags & IFF_UP) == 0 &&
1352 (ifp->if_flags & IFF_RUNNING) != 0) {
1353 /*
1354 * If interface is marked down and it is running, then
1355 * stop it.
1356 */
1357 vr_stop(sc);
1358 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1359 (ifp->if_flags & IFF_RUNNING) == 0) {
1360 /*
1361 * If interface is marked up and it is stopped, then
1362 * start it.
1363 */
1364 vr_init(sc);
1365 } else if ((ifp->if_flags & IFF_UP) != 0) {
1366 /*
1367 * Reset the interface to pick up changes in any other
1368 * flags that affect the hardware state.
1369 */
1370 vr_init(sc);
1371 }
1372 break;
1373
1374 case SIOCADDMULTI:
1375 case SIOCDELMULTI:
1376 if (command == SIOCADDMULTI)
1377 error = ether_addmulti(ifr, &sc->vr_ec);
1378 else
1379 error = ether_delmulti(ifr, &sc->vr_ec);
1380
1381 if (error == ENETRESET) {
1382 /*
1383 * Multicast list has changed; set the hardware filter
1384 * accordingly.
1385 */
1386 vr_setmulti(sc);
1387 error = 0;
1388 }
1389 break;
1390
1391 case SIOCGIFMEDIA:
1392 case SIOCSIFMEDIA:
1393 error = ifmedia_ioctl(ifp, ifr, &sc->vr_mii.mii_media, command);
1394 break;
1395
1396 default:
1397 error = EINVAL;
1398 break;
1399 }
1400
1401 splx(s);
1402 return (error);
1403 }
1404
1405 static void
1406 vr_watchdog(ifp)
1407 struct ifnet *ifp;
1408 {
1409 struct vr_softc *sc = ifp->if_softc;
1410
1411 printf("%s: device timeout\n", sc->vr_dev.dv_xname);
1412 ifp->if_oerrors++;
1413
1414 vr_init(sc);
1415 }
1416
1417 /*
1418 * One second timer, used to tick MII.
1419 */
1420 static void
1421 vr_tick(arg)
1422 void *arg;
1423 {
1424 struct vr_softc *sc = arg;
1425 int s;
1426
1427 s = splnet();
1428 mii_tick(&sc->vr_mii);
1429 splx(s);
1430
1431 timeout(vr_tick, sc, hz);
1432 }
1433
1434 /*
1435 * Stop the adapter and free any mbufs allocated to the
1436 * transmit lists.
1437 */
1438 static void
1439 vr_stop(sc)
1440 struct vr_softc *sc;
1441 {
1442 struct vr_descsoft *ds;
1443 struct ifnet *ifp;
1444 int i;
1445
1446 /* Cancel one second timer. */
1447 untimeout(vr_tick, sc);
1448
1449 ifp = &sc->vr_ec.ec_if;
1450 ifp->if_timer = 0;
1451
1452 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1453 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1454 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1455 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1456 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1457
1458 /*
1459 * Release any queued transmit buffers.
1460 */
1461 for (i = 0; i < VR_NTXDESC; i++) {
1462 ds = VR_DSTX(sc, i);
1463 if (ds->ds_mbuf != NULL) {
1464 bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
1465 m_freem(ds->ds_mbuf);
1466 ds->ds_mbuf = NULL;
1467 }
1468 }
1469
1470 /*
1471 * Mark the interface down and cancel the watchdog timer.
1472 */
1473 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1474 ifp->if_timer = 0;
1475 }
1476
1477 static struct vr_type *vr_lookup __P((struct pci_attach_args *));
1478 static int vr_probe __P((struct device *, struct cfdata *, void *));
1479 static void vr_attach __P((struct device *, struct device *, void *));
1480 static void vr_shutdown __P((void *));
1481
1482 struct cfattach vr_ca = {
1483 sizeof (struct vr_softc), vr_probe, vr_attach
1484 };
1485
1486 static struct vr_type *
1487 vr_lookup(pa)
1488 struct pci_attach_args *pa;
1489 {
1490 struct vr_type *vrt;
1491
1492 for (vrt = vr_devs; vrt->vr_name != NULL; vrt++) {
1493 if (PCI_VENDOR(pa->pa_id) == vrt->vr_vid &&
1494 PCI_PRODUCT(pa->pa_id) == vrt->vr_did)
1495 return (vrt);
1496 }
1497 return (NULL);
1498 }
1499
1500 static int
1501 vr_probe(parent, match, aux)
1502 struct device *parent;
1503 struct cfdata *match;
1504 void *aux;
1505 {
1506 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
1507
1508 if (vr_lookup(pa) != NULL)
1509 return (1);
1510
1511 return (0);
1512 }
1513
1514 /*
1515 * Stop all chip I/O so that the kernel's probe routines don't
1516 * get confused by errant DMAs when rebooting.
1517 */
1518 static void
1519 vr_shutdown(arg)
1520 void *arg;
1521 {
1522 struct vr_softc *sc = (struct vr_softc *)arg;
1523
1524 vr_stop(sc);
1525 }
1526
1527 /*
1528 * Attach the interface. Allocate softc structures, do ifmedia
1529 * setup and ethernet/BPF attach.
1530 */
1531 static void
1532 vr_attach(parent, self, aux)
1533 struct device *parent;
1534 struct device *self;
1535 void *aux;
1536 {
1537 struct vr_softc *sc = (struct vr_softc *) self;
1538 struct pci_attach_args *pa = (struct pci_attach_args *) aux;
1539 bus_dma_segment_t seg;
1540 struct vr_type *vrt;
1541 u_int32_t command;
1542 struct ifnet *ifp;
1543 u_char eaddr[ETHER_ADDR_LEN];
1544 int i, rseg, error;
1545
1546 #define PCI_CONF_WRITE(r, v) pci_conf_write(pa->pa_pc, pa->pa_tag, (r), (v))
1547 #define PCI_CONF_READ(r) pci_conf_read(pa->pa_pc, pa->pa_tag, (r))
1548
1549 vrt = vr_lookup(pa);
1550 if (vrt == NULL) {
1551 printf("\n");
1552 panic("vr_attach: impossible");
1553 }
1554
1555 printf(": %s Ethernet\n", vrt->vr_name);
1556
1557 /*
1558 * Handle power management nonsense.
1559 */
1560
1561 command = PCI_CONF_READ(VR_PCI_CAPID) & 0x000000FF;
1562 if (command == 0x01) {
1563 command = PCI_CONF_READ(VR_PCI_PWRMGMTCTRL);
1564 if (command & VR_PSTATE_MASK) {
1565 u_int32_t iobase, membase, irq;
1566
1567 /* Save important PCI config data. */
1568 iobase = PCI_CONF_READ(VR_PCI_LOIO);
1569 membase = PCI_CONF_READ(VR_PCI_LOMEM);
1570 irq = PCI_CONF_READ(VR_PCI_INTLINE);
1571
1572 /* Reset the power state. */
1573 printf("%s: chip is in D%d power mode "
1574 "-- setting to D0\n",
1575 sc->vr_dev.dv_xname, command & VR_PSTATE_MASK);
1576 command &= 0xFFFFFFFC;
1577 PCI_CONF_WRITE(VR_PCI_PWRMGMTCTRL, command);
1578
1579 /* Restore PCI config data. */
1580 PCI_CONF_WRITE(VR_PCI_LOIO, iobase);
1581 PCI_CONF_WRITE(VR_PCI_LOMEM, membase);
1582 PCI_CONF_WRITE(VR_PCI_INTLINE, irq);
1583 }
1584 }
1585
1586 /* Make sure bus mastering is enabled. */
1587 command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
1588 command |= PCI_COMMAND_MASTER_ENABLE;
1589 PCI_CONF_WRITE(PCI_COMMAND_STATUS_REG, command);
1590
1591 /*
1592 * Map control/status registers.
1593 */
1594 {
1595 bus_space_tag_t iot, memt;
1596 bus_space_handle_t ioh, memh;
1597 int ioh_valid, memh_valid;
1598 pci_intr_handle_t intrhandle;
1599 const char *intrstr;
1600
1601 ioh_valid = (pci_mapreg_map(pa, VR_PCI_LOIO,
1602 PCI_MAPREG_TYPE_IO, 0,
1603 &iot, &ioh, NULL, NULL) == 0);
1604 memh_valid = (pci_mapreg_map(pa, VR_PCI_LOMEM,
1605 PCI_MAPREG_TYPE_MEM |
1606 PCI_MAPREG_MEM_TYPE_32BIT,
1607 0, &memt, &memh, NULL, NULL) == 0);
1608 #if defined(VR_USEIOSPACE)
1609 if (ioh_valid) {
1610 sc->vr_bst = iot;
1611 sc->vr_bsh = ioh;
1612 } else if (memh_valid) {
1613 sc->vr_bst = memt;
1614 sc->vr_bsh = memh;
1615 }
1616 #else
1617 if (memh_valid) {
1618 sc->vr_bst = memt;
1619 sc->vr_bsh = memh;
1620 } else if (ioh_valid) {
1621 sc->vr_bst = iot;
1622 sc->vr_bsh = ioh;
1623 }
1624 #endif
1625 else {
1626 printf(": unable to map device registers\n");
1627 return;
1628 }
1629
1630 /* Allocate interrupt */
1631 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
1632 pa->pa_intrline, &intrhandle)) {
1633 printf("%s: couldn't map interrupt\n",
1634 sc->vr_dev.dv_xname);
1635 return;
1636 }
1637 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
1638 sc->vr_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET,
1639 vr_intr, sc);
1640 if (sc->vr_ih == NULL) {
1641 printf("%s: couldn't establish interrupt",
1642 sc->vr_dev.dv_xname);
1643 if (intrstr != NULL)
1644 printf(" at %s", intrstr);
1645 printf("\n");
1646 }
1647 printf("%s: interrupting at %s\n",
1648 sc->vr_dev.dv_xname, intrstr);
1649 }
1650
1651 /* Reset the adapter. */
1652 vr_reset(sc);
1653
1654 /*
1655 * Get station address. The way the Rhine chips work,
1656 * you're not allowed to directly access the EEPROM once
1657 * they've been programmed a special way. Consequently,
1658 * we need to read the node address from the PAR0 and PAR1
1659 * registers.
1660 */
1661 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
1662 DELAY(200);
1663 for (i = 0; i < ETHER_ADDR_LEN; i++)
1664 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
1665
1666 /*
1667 * A Rhine chip was detected. Inform the world.
1668 */
1669 printf("%s: Ethernet address: %s\n",
1670 sc->vr_dev.dv_xname, ether_sprintf(eaddr));
1671
1672 bcopy(eaddr, sc->vr_enaddr, ETHER_ADDR_LEN);
1673
1674 sc->vr_dmat = pa->pa_dmat;
1675
1676 /*
1677 * Allocate the control data structures, and create and load
1678 * the DMA map for it.
1679 */
1680 if ((error = bus_dmamem_alloc(sc->vr_dmat,
1681 sizeof(struct vr_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
1682 0)) != 0) {
1683 printf("%s: unable to allocate control data, error = %d\n",
1684 sc->vr_dev.dv_xname, error);
1685 goto fail_0;
1686 }
1687
1688 if ((error = bus_dmamem_map(sc->vr_dmat, &seg, rseg,
1689 sizeof(struct vr_control_data), (caddr_t *)&sc->vr_control_data,
1690 BUS_DMA_COHERENT)) != 0) {
1691 printf("%s: unable to map control data, error = %d\n",
1692 sc->vr_dev.dv_xname, error);
1693 goto fail_1;
1694 }
1695
1696 if ((error = bus_dmamap_create(sc->vr_dmat,
1697 sizeof(struct vr_control_data), 1,
1698 sizeof(struct vr_control_data), 0, 0,
1699 &sc->vr_cddmamap)) != 0) {
1700 printf("%s: unable to create control data DMA map, "
1701 "error = %d\n", sc->vr_dev.dv_xname, error);
1702 goto fail_2;
1703 }
1704
1705 if ((error = bus_dmamap_load(sc->vr_dmat, sc->vr_cddmamap,
1706 sc->vr_control_data, sizeof(struct vr_control_data), NULL,
1707 0)) != 0) {
1708 printf("%s: unable to load control data DMA map, error = %d\n",
1709 sc->vr_dev.dv_xname, error);
1710 goto fail_3;
1711 }
1712
1713 /*
1714 * Create the transmit buffer DMA maps.
1715 */
1716 for (i = 0; i < VR_NTXDESC; i++) {
1717 if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES,
1718 1, MCLBYTES, 0, 0,
1719 &VR_DSTX(sc, i)->ds_dmamap)) != 0) {
1720 printf("%s: unable to create tx DMA map %d, "
1721 "error = %d\n", sc->vr_dev.dv_xname, i, error);
1722 goto fail_4;
1723 }
1724 }
1725
1726 /*
1727 * Create the receive buffer DMA maps.
1728 */
1729 for (i = 0; i < VR_NRXDESC; i++) {
1730 if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES, 1,
1731 MCLBYTES, 0, 0,
1732 &VR_DSRX(sc, i)->ds_dmamap)) != 0) {
1733 printf("%s: unable to create rx DMA map %d, "
1734 "error = %d\n", sc->vr_dev.dv_xname, i, error);
1735 goto fail_5;
1736 }
1737 }
1738
1739 /*
1740 * Pre-allocate the receive buffers.
1741 */
1742 for (i = 0; i < VR_NRXDESC; i++) {
1743 if ((error = vr_add_rxbuf(sc, i)) != 0) {
1744 printf("%s: unable to allocate or map rx buffer %d, "
1745 "error = %d\n", sc->vr_dev.dv_xname, i, error);
1746 goto fail_6;
1747 }
1748 }
1749
1750 ifp = &sc->vr_ec.ec_if;
1751 ifp->if_softc = sc;
1752 ifp->if_mtu = ETHERMTU;
1753 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1754 ifp->if_ioctl = vr_ioctl;
1755 ifp->if_output = ether_output;
1756 ifp->if_start = vr_start;
1757 ifp->if_watchdog = vr_watchdog;
1758 ifp->if_baudrate = 10000000;
1759 bcopy(sc->vr_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1760
1761 /*
1762 * Initialize MII/media info.
1763 */
1764 sc->vr_mii.mii_ifp = ifp;
1765 sc->vr_mii.mii_readreg = vr_mii_readreg;
1766 sc->vr_mii.mii_writereg = vr_mii_writereg;
1767 sc->vr_mii.mii_statchg = vr_mii_statchg;
1768 ifmedia_init(&sc->vr_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts);
1769 mii_phy_probe(&sc->vr_dev, &sc->vr_mii, 0xffffffff);
1770 if (LIST_FIRST(&sc->vr_mii.mii_phys) == NULL) {
1771 ifmedia_add(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1772 ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE);
1773 } else
1774 ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_AUTO);
1775
1776 /*
1777 * Call MI attach routines.
1778 */
1779 if_attach(ifp);
1780 ether_ifattach(ifp, sc->vr_enaddr);
1781
1782 #if NBPFILTER > 0
1783 bpfattach(&sc->vr_ec.ec_if.if_bpf,
1784 ifp, DLT_EN10MB, sizeof (struct ether_header));
1785 #endif
1786
1787 sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
1788 if (sc->vr_ats == NULL)
1789 printf("%s: warning: couldn't establish shutdown hook\n",
1790 sc->vr_dev.dv_xname);
1791 return;
1792
1793 fail_6:
1794 for (i = 0; i < VR_NRXDESC; i++) {
1795 if (sc->vr_rxsoft[i].ds_mbuf != NULL) {
1796 bus_dmamap_unload(sc->vr_dmat,
1797 sc->vr_rxsoft[i].ds_dmamap);
1798 (void) m_freem(sc->vr_rxsoft[i].ds_mbuf);
1799 }
1800 }
1801 fail_5:
1802 for (i = 0; i < VR_NRXDESC; i++) {
1803 if (sc->vr_rxsoft[i].ds_dmamap != NULL)
1804 bus_dmamap_destroy(sc->vr_dmat,
1805 sc->vr_rxsoft[i].ds_dmamap);
1806 }
1807 fail_4:
1808 for (i = 0; i < VR_NTXDESC; i++) {
1809 if (sc->vr_txsoft[i].ds_dmamap != NULL)
1810 bus_dmamap_destroy(sc->vr_dmat,
1811 sc->vr_txsoft[i].ds_dmamap);
1812 }
1813 bus_dmamap_unload(sc->vr_dmat, sc->vr_cddmamap);
1814 fail_3:
1815 bus_dmamap_destroy(sc->vr_dmat, sc->vr_cddmamap);
1816 fail_2:
1817 bus_dmamem_unmap(sc->vr_dmat, (caddr_t)sc->vr_control_data,
1818 sizeof(struct vr_control_data));
1819 fail_1:
1820 bus_dmamem_free(sc->vr_dmat, &seg, rseg);
1821 fail_0:
1822 return;
1823 }
1824