if_vr.c revision 1.1 1 /*
2 * Copyright (c) 1997, 1998
3 * Bill Paul <wpaul (at) ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * $Id: if_vr.c,v 1.1 1999/01/21 11:55:22 sakamoto Exp $
33 */
34
35 /*
36 * VIA Rhine fast ethernet PCI NIC driver
37 *
38 * Supports various network adapters based on the VIA Rhine
39 * and Rhine II PCI controllers, including the D-Link DFE530TX.
40 * Datasheets are available at http://www.via.com.tw.
41 *
42 * Written by Bill Paul <wpaul (at) ctr.columbia.edu>
43 * Electrical Engineering Department
44 * Columbia University, New York City
45 */
46
47 /*
48 * The VIA Rhine controllers are similar in some respects to the
49 * the DEC tulip chips, except less complicated. The controller
50 * uses an MII bus and an external physical layer interface. The
51 * receiver has a one entry perfect filter and a 64-bit hash table
52 * multicast filter. Transmit and receive descriptors are similar
53 * to the tulip.
54 *
55 * The Rhine has a serious flaw in its transmit DMA mechanism:
56 * transmit buffers must be longword aligned. Unfortunately,
57 * FreeBSD doesn't guarantee that mbufs will be filled in starting
58 * at longword boundaries, so we have to do a buffer copy before
59 * transmission.
60 */
61
62 #include "bpfilter.h"
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/sockio.h>
67 #include <sys/mbuf.h>
68 #include <sys/malloc.h>
69 #include <sys/kernel.h>
70 #include <sys/socket.h>
71
72 #include <net/if.h>
73 #include <net/if_arp.h>
74 #include <net/ethernet.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77
78 #if NBPFILTER > 0
79 #include <net/bpf.h>
80 #endif
81
82 #include <vm/vm.h> /* for vtophys */
83 #include <vm/pmap.h> /* for vtophys */
84 #include <machine/clock.h> /* for DELAY */
85 #include <machine/bus_pio.h>
86 #include <machine/bus_memio.h>
87 #include <machine/bus.h>
88
89 #include <pci/pcireg.h>
90 #include <pci/pcivar.h>
91
92 #define VR_USEIOSPACE
93
94 /* #define VR_BACKGROUND_AUTONEG */
95
96 #include <pci/if_vrreg.h>
97
98 #ifndef lint
99 static const char rcsid[] =
100 "$Id: if_vr.c,v 1.1 1999/01/21 11:55:22 sakamoto Exp $";
101 #endif
102
103 /*
104 * Various supported device vendors/types and their names.
105 */
106 static struct vr_type vr_devs[] = {
107 { VIA_VENDORID, VIA_DEVICEID_RHINE,
108 "VIA VT3043 Rhine I 10/100BaseTX" },
109 { VIA_VENDORID, VIA_DEVICEID_RHINE_II,
110 "VIA VT86C100A Rhine II 10/100BaseTX" },
111 { 0, 0, NULL }
112 };
113
114 /*
115 * Various supported PHY vendors/types and their names. Note that
116 * this driver will work with pretty much any MII-compliant PHY,
117 * so failure to positively identify the chip is not a fatal error.
118 */
119
120 static struct vr_type vr_phys[] = {
121 { TI_PHY_VENDORID, TI_PHY_10BT, "<TI ThunderLAN 10BT (internal)>" },
122 { TI_PHY_VENDORID, TI_PHY_100VGPMI, "<TI TNETE211 100VG Any-LAN>" },
123 { NS_PHY_VENDORID, NS_PHY_83840A, "<National Semiconductor DP83840A>"},
124 { LEVEL1_PHY_VENDORID, LEVEL1_PHY_LXT970, "<Level 1 LXT970>" },
125 { INTEL_PHY_VENDORID, INTEL_PHY_82555, "<Intel 82555>" },
126 { SEEQ_PHY_VENDORID, SEEQ_PHY_80220, "<SEEQ 80220>" },
127 { 0, 0, "<MII-compliant physical interface>" }
128 };
129
130 static unsigned long vr_count = 0;
131 static const char *vr_probe __P((pcici_t, pcidi_t));
132 static void vr_attach __P((pcici_t, int));
133
134 static int vr_newbuf __P((struct vr_softc *,
135 struct vr_chain_onefrag *));
136 static int vr_encap __P((struct vr_softc *, struct vr_chain *,
137 struct mbuf * ));
138
139 static void vr_rxeof __P((struct vr_softc *));
140 static void vr_rxeoc __P((struct vr_softc *));
141 static void vr_txeof __P((struct vr_softc *));
142 static void vr_txeoc __P((struct vr_softc *));
143 static void vr_intr __P((void *));
144 static void vr_start __P((struct ifnet *));
145 static int vr_ioctl __P((struct ifnet *, u_long, caddr_t));
146 static void vr_init __P((void *));
147 static void vr_stop __P((struct vr_softc *));
148 static void vr_watchdog __P((struct ifnet *));
149 static void vr_shutdown __P((int, void *));
150 static int vr_ifmedia_upd __P((struct ifnet *));
151 static void vr_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
152
153 static void vr_mii_sync __P((struct vr_softc *));
154 static void vr_mii_send __P((struct vr_softc *, u_int32_t, int));
155 static int vr_mii_readreg __P((struct vr_softc *, struct vr_mii_frame *));
156 static int vr_mii_writereg __P((struct vr_softc *, struct vr_mii_frame *));
157 static u_int16_t vr_phy_readreg __P((struct vr_softc *, int));
158 static void vr_phy_writereg __P((struct vr_softc *, u_int16_t, u_int16_t));
159
160 static void vr_autoneg_xmit __P((struct vr_softc *));
161 static void vr_autoneg_mii __P((struct vr_softc *, int, int));
162 static void vr_setmode_mii __P((struct vr_softc *, int));
163 static void vr_getmode_mii __P((struct vr_softc *));
164 static void vr_setcfg __P((struct vr_softc *, u_int16_t));
165 static u_int8_t vr_calchash __P((u_int8_t *));
166 static void vr_setmulti __P((struct vr_softc *));
167 static void vr_reset __P((struct vr_softc *));
168 static int vr_list_rx_init __P((struct vr_softc *));
169 static int vr_list_tx_init __P((struct vr_softc *));
170
171 #define VR_SETBIT(sc, reg, x) \
172 CSR_WRITE_1(sc, reg, \
173 CSR_READ_1(sc, reg) | x)
174
175 #define VR_CLRBIT(sc, reg, x) \
176 CSR_WRITE_1(sc, reg, \
177 CSR_READ_1(sc, reg) & ~x)
178
179 #define VR_SETBIT16(sc, reg, x) \
180 CSR_WRITE_2(sc, reg, \
181 CSR_READ_2(sc, reg) | x)
182
183 #define VR_CLRBIT16(sc, reg, x) \
184 CSR_WRITE_2(sc, reg, \
185 CSR_READ_2(sc, reg) & ~x)
186
187 #define VR_SETBIT32(sc, reg, x) \
188 CSR_WRITE_4(sc, reg, \
189 CSR_READ_4(sc, reg) | x)
190
191 #define VR_CLRBIT32(sc, reg, x) \
192 CSR_WRITE_4(sc, reg, \
193 CSR_READ_4(sc, reg) & ~x)
194
195 #define SIO_SET(x) \
196 CSR_WRITE_1(sc, VR_MIICMD, \
197 CSR_READ_1(sc, VR_MIICMD) | x)
198
199 #define SIO_CLR(x) \
200 CSR_WRITE_1(sc, VR_MIICMD, \
201 CSR_READ_1(sc, VR_MIICMD) & ~x)
202
203 /*
204 * Sync the PHYs by setting data bit and strobing the clock 32 times.
205 */
206 static void vr_mii_sync(sc)
207 struct vr_softc *sc;
208 {
209 register int i;
210
211 SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN);
212
213 for (i = 0; i < 32; i++) {
214 SIO_SET(VR_MIICMD_CLK);
215 DELAY(1);
216 SIO_CLR(VR_MIICMD_CLK);
217 DELAY(1);
218 }
219
220 return;
221 }
222
223 /*
224 * Clock a series of bits through the MII.
225 */
226 static void vr_mii_send(sc, bits, cnt)
227 struct vr_softc *sc;
228 u_int32_t bits;
229 int cnt;
230 {
231 int i;
232
233 SIO_CLR(VR_MIICMD_CLK);
234
235 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
236 if (bits & i) {
237 SIO_SET(VR_MIICMD_DATAIN);
238 } else {
239 SIO_CLR(VR_MIICMD_DATAIN);
240 }
241 DELAY(1);
242 SIO_CLR(VR_MIICMD_CLK);
243 DELAY(1);
244 SIO_SET(VR_MIICMD_CLK);
245 }
246 }
247
248 /*
249 * Read an PHY register through the MII.
250 */
251 static int vr_mii_readreg(sc, frame)
252 struct vr_softc *sc;
253 struct vr_mii_frame *frame;
254
255 {
256 int i, ack, s;
257
258 s = splimp();
259
260 /*
261 * Set up frame for RX.
262 */
263 frame->mii_stdelim = VR_MII_STARTDELIM;
264 frame->mii_opcode = VR_MII_READOP;
265 frame->mii_turnaround = 0;
266 frame->mii_data = 0;
267
268 CSR_WRITE_1(sc, VR_MIICMD, 0);
269 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
270
271 /*
272 * Turn on data xmit.
273 */
274 SIO_SET(VR_MIICMD_DIR);
275
276 vr_mii_sync(sc);
277
278 /*
279 * Send command/address info.
280 */
281 vr_mii_send(sc, frame->mii_stdelim, 2);
282 vr_mii_send(sc, frame->mii_opcode, 2);
283 vr_mii_send(sc, frame->mii_phyaddr, 5);
284 vr_mii_send(sc, frame->mii_regaddr, 5);
285
286 /* Idle bit */
287 SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN));
288 DELAY(1);
289 SIO_SET(VR_MIICMD_CLK);
290 DELAY(1);
291
292 /* Turn off xmit. */
293 SIO_CLR(VR_MIICMD_DIR);
294
295 /* Check for ack */
296 SIO_CLR(VR_MIICMD_CLK);
297 DELAY(1);
298 SIO_SET(VR_MIICMD_CLK);
299 DELAY(1);
300 ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT;
301
302 /*
303 * Now try reading data bits. If the ack failed, we still
304 * need to clock through 16 cycles to keep the PHY(s) in sync.
305 */
306 if (ack) {
307 for(i = 0; i < 16; i++) {
308 SIO_CLR(VR_MIICMD_CLK);
309 DELAY(1);
310 SIO_SET(VR_MIICMD_CLK);
311 DELAY(1);
312 }
313 goto fail;
314 }
315
316 for (i = 0x8000; i; i >>= 1) {
317 SIO_CLR(VR_MIICMD_CLK);
318 DELAY(1);
319 if (!ack) {
320 if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT)
321 frame->mii_data |= i;
322 DELAY(1);
323 }
324 SIO_SET(VR_MIICMD_CLK);
325 DELAY(1);
326 }
327
328 fail:
329
330 SIO_CLR(VR_MIICMD_CLK);
331 DELAY(1);
332 SIO_SET(VR_MIICMD_CLK);
333 DELAY(1);
334
335 splx(s);
336
337 if (ack)
338 return(1);
339 return(0);
340 }
341
342 /*
343 * Write to a PHY register through the MII.
344 */
345 static int vr_mii_writereg(sc, frame)
346 struct vr_softc *sc;
347 struct vr_mii_frame *frame;
348
349 {
350 int s;
351
352 s = splimp();
353
354 CSR_WRITE_1(sc, VR_MIICMD, 0);
355 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
356
357 /*
358 * Set up frame for TX.
359 */
360
361 frame->mii_stdelim = VR_MII_STARTDELIM;
362 frame->mii_opcode = VR_MII_WRITEOP;
363 frame->mii_turnaround = VR_MII_TURNAROUND;
364
365 /*
366 * Turn on data output.
367 */
368 SIO_SET(VR_MIICMD_DIR);
369
370 vr_mii_sync(sc);
371
372 vr_mii_send(sc, frame->mii_stdelim, 2);
373 vr_mii_send(sc, frame->mii_opcode, 2);
374 vr_mii_send(sc, frame->mii_phyaddr, 5);
375 vr_mii_send(sc, frame->mii_regaddr, 5);
376 vr_mii_send(sc, frame->mii_turnaround, 2);
377 vr_mii_send(sc, frame->mii_data, 16);
378
379 /* Idle bit. */
380 SIO_SET(VR_MIICMD_CLK);
381 DELAY(1);
382 SIO_CLR(VR_MIICMD_CLK);
383 DELAY(1);
384
385 /*
386 * Turn off xmit.
387 */
388 SIO_CLR(VR_MIICMD_DIR);
389
390 splx(s);
391
392 return(0);
393 }
394
395 static u_int16_t vr_phy_readreg(sc, reg)
396 struct vr_softc *sc;
397 int reg;
398 {
399 struct vr_mii_frame frame;
400
401 bzero((char *)&frame, sizeof(frame));
402
403 frame.mii_phyaddr = sc->vr_phy_addr;
404 frame.mii_regaddr = reg;
405 vr_mii_readreg(sc, &frame);
406
407 return(frame.mii_data);
408 }
409
410 static void vr_phy_writereg(sc, reg, data)
411 struct vr_softc *sc;
412 u_int16_t reg;
413 u_int16_t data;
414 {
415 struct vr_mii_frame frame;
416
417 bzero((char *)&frame, sizeof(frame));
418
419 frame.mii_phyaddr = sc->vr_phy_addr;
420 frame.mii_regaddr = reg;
421 frame.mii_data = data;
422
423 vr_mii_writereg(sc, &frame);
424
425 return;
426 }
427
428 /*
429 * Calculate CRC of a multicast group address, return the lower 6 bits.
430 */
431 static u_int8_t vr_calchash(addr)
432 u_int8_t *addr;
433 {
434 u_int32_t crc, carry;
435 int i, j;
436 u_int8_t c;
437
438 /* Compute CRC for the address value. */
439 crc = 0xFFFFFFFF; /* initial value */
440
441 for (i = 0; i < 6; i++) {
442 c = *(addr + i);
443 for (j = 0; j < 8; j++) {
444 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
445 crc <<= 1;
446 c >>= 1;
447 if (carry)
448 crc = (crc ^ 0x04c11db6) | carry;
449 }
450 }
451
452 /* return the filter bit position */
453 return((crc >> 26) & 0x0000003F);
454 }
455
456 /*
457 * Program the 64-bit multicast hash filter.
458 */
459 static void vr_setmulti(sc)
460 struct vr_softc *sc;
461 {
462 struct ifnet *ifp;
463 int h = 0;
464 u_int32_t hashes[2] = { 0, 0 };
465 struct ifmultiaddr *ifma;
466 u_int8_t rxfilt;
467 int mcnt = 0;
468
469 ifp = &sc->arpcom.ac_if;
470
471 rxfilt = CSR_READ_1(sc, VR_RXCFG);
472
473 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
474 rxfilt |= VR_RXCFG_RX_MULTI;
475 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
476 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
477 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
478 return;
479 }
480
481 /* first, zot all the existing hash bits */
482 CSR_WRITE_4(sc, VR_MAR0, 0);
483 CSR_WRITE_4(sc, VR_MAR1, 0);
484
485 /* now program new ones */
486 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
487 ifma = ifma->ifma_link.le_next) {
488 if (ifma->ifma_addr->sa_family != AF_LINK)
489 continue;
490 h = vr_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
491 if (h < 32)
492 hashes[0] |= (1 << h);
493 else
494 hashes[1] |= (1 << (h - 32));
495 mcnt++;
496 }
497
498 if (mcnt)
499 rxfilt |= VR_RXCFG_RX_MULTI;
500 else
501 rxfilt &= ~VR_RXCFG_RX_MULTI;
502
503 CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
504 CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
505 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
506
507 return;
508 }
509
510 /*
511 * Initiate an autonegotiation session.
512 */
513 static void vr_autoneg_xmit(sc)
514 struct vr_softc *sc;
515 {
516 u_int16_t phy_sts;
517
518 vr_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
519 DELAY(500);
520 while(vr_phy_readreg(sc, PHY_BMCR)
521 & PHY_BMCR_RESET);
522
523 phy_sts = vr_phy_readreg(sc, PHY_BMCR);
524 phy_sts |= PHY_BMCR_AUTONEGENBL|PHY_BMCR_AUTONEGRSTR;
525 vr_phy_writereg(sc, PHY_BMCR, phy_sts);
526
527 return;
528 }
529
530 /*
531 * Invoke autonegotiation on a PHY.
532 */
533 static void vr_autoneg_mii(sc, flag, verbose)
534 struct vr_softc *sc;
535 int flag;
536 int verbose;
537 {
538 u_int16_t phy_sts = 0, media, advert, ability;
539 struct ifnet *ifp;
540 struct ifmedia *ifm;
541
542 ifm = &sc->ifmedia;
543 ifp = &sc->arpcom.ac_if;
544
545 ifm->ifm_media = IFM_ETHER | IFM_AUTO;
546
547 /*
548 * The 100baseT4 PHY on the 3c905-T4 has the 'autoneg supported'
549 * bit cleared in the status register, but has the 'autoneg enabled'
550 * bit set in the control register. This is a contradiction, and
551 * I'm not sure how to handle it. If you want to force an attempt
552 * to autoneg for 100baseT4 PHYs, #define FORCE_AUTONEG_TFOUR
553 * and see what happens.
554 */
555 #ifndef FORCE_AUTONEG_TFOUR
556 /*
557 * First, see if autoneg is supported. If not, there's
558 * no point in continuing.
559 */
560 phy_sts = vr_phy_readreg(sc, PHY_BMSR);
561 if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
562 if (verbose)
563 printf("vr%d: autonegotiation not supported\n",
564 sc->vr_unit);
565 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
566 return;
567 }
568 #endif
569
570 switch (flag) {
571 case VR_FLAG_FORCEDELAY:
572 /*
573 * XXX Never use this option anywhere but in the probe
574 * routine: making the kernel stop dead in its tracks
575 * for three whole seconds after we've gone multi-user
576 * is really bad manners.
577 */
578 vr_autoneg_xmit(sc);
579 DELAY(5000000);
580 break;
581 case VR_FLAG_SCHEDDELAY:
582 /*
583 * Wait for the transmitter to go idle before starting
584 * an autoneg session, otherwise vr_start() may clobber
585 * our timeout, and we don't want to allow transmission
586 * during an autoneg session since that can screw it up.
587 */
588 if (sc->vr_cdata.vr_tx_head != NULL) {
589 sc->vr_want_auto = 1;
590 return;
591 }
592 vr_autoneg_xmit(sc);
593 ifp->if_timer = 5;
594 sc->vr_autoneg = 1;
595 sc->vr_want_auto = 0;
596 return;
597 break;
598 case VR_FLAG_DELAYTIMEO:
599 ifp->if_timer = 0;
600 sc->vr_autoneg = 0;
601 break;
602 default:
603 printf("vr%d: invalid autoneg flag: %d\n", sc->vr_unit, flag);
604 return;
605 }
606
607 if (vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
608 if (verbose)
609 printf("vr%d: autoneg complete, ", sc->vr_unit);
610 phy_sts = vr_phy_readreg(sc, PHY_BMSR);
611 } else {
612 if (verbose)
613 printf("vr%d: autoneg not complete, ", sc->vr_unit);
614 }
615
616 media = vr_phy_readreg(sc, PHY_BMCR);
617
618 /* Link is good. Report modes and set duplex mode. */
619 if (vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
620 if (verbose)
621 printf("link status good ");
622 advert = vr_phy_readreg(sc, PHY_ANAR);
623 ability = vr_phy_readreg(sc, PHY_LPAR);
624
625 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
626 ifm->ifm_media = IFM_ETHER|IFM_100_T4;
627 media |= PHY_BMCR_SPEEDSEL;
628 media &= ~PHY_BMCR_DUPLEX;
629 printf("(100baseT4)\n");
630 } else if (advert & PHY_ANAR_100BTXFULL &&
631 ability & PHY_ANAR_100BTXFULL) {
632 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
633 media |= PHY_BMCR_SPEEDSEL;
634 media |= PHY_BMCR_DUPLEX;
635 printf("(full-duplex, 100Mbps)\n");
636 } else if (advert & PHY_ANAR_100BTXHALF &&
637 ability & PHY_ANAR_100BTXHALF) {
638 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
639 media |= PHY_BMCR_SPEEDSEL;
640 media &= ~PHY_BMCR_DUPLEX;
641 printf("(half-duplex, 100Mbps)\n");
642 } else if (advert & PHY_ANAR_10BTFULL &&
643 ability & PHY_ANAR_10BTFULL) {
644 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
645 media &= ~PHY_BMCR_SPEEDSEL;
646 media |= PHY_BMCR_DUPLEX;
647 printf("(full-duplex, 10Mbps)\n");
648 } else {
649 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
650 media &= ~PHY_BMCR_SPEEDSEL;
651 media &= ~PHY_BMCR_DUPLEX;
652 printf("(half-duplex, 10Mbps)\n");
653 }
654
655 media &= ~PHY_BMCR_AUTONEGENBL;
656
657 /* Set ASIC's duplex mode to match the PHY. */
658 vr_setcfg(sc, media);
659 vr_phy_writereg(sc, PHY_BMCR, media);
660 } else {
661 if (verbose)
662 printf("no carrier\n");
663 }
664
665 vr_init(sc);
666
667 if (sc->vr_tx_pend) {
668 sc->vr_autoneg = 0;
669 sc->vr_tx_pend = 0;
670 vr_start(ifp);
671 }
672
673 return;
674 }
675
676 static void vr_getmode_mii(sc)
677 struct vr_softc *sc;
678 {
679 u_int16_t bmsr;
680 struct ifnet *ifp;
681
682 ifp = &sc->arpcom.ac_if;
683
684 bmsr = vr_phy_readreg(sc, PHY_BMSR);
685 if (bootverbose)
686 printf("vr%d: PHY status word: %x\n", sc->vr_unit, bmsr);
687
688 /* fallback */
689 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
690
691 if (bmsr & PHY_BMSR_10BTHALF) {
692 if (bootverbose)
693 printf("vr%d: 10Mbps half-duplex mode supported\n",
694 sc->vr_unit);
695 ifmedia_add(&sc->ifmedia,
696 IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
697 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
698 }
699
700 if (bmsr & PHY_BMSR_10BTFULL) {
701 if (bootverbose)
702 printf("vr%d: 10Mbps full-duplex mode supported\n",
703 sc->vr_unit);
704 ifmedia_add(&sc->ifmedia,
705 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
706 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
707 }
708
709 if (bmsr & PHY_BMSR_100BTXHALF) {
710 if (bootverbose)
711 printf("vr%d: 100Mbps half-duplex mode supported\n",
712 sc->vr_unit);
713 ifp->if_baudrate = 100000000;
714 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
715 ifmedia_add(&sc->ifmedia,
716 IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
717 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
718 }
719
720 if (bmsr & PHY_BMSR_100BTXFULL) {
721 if (bootverbose)
722 printf("vr%d: 100Mbps full-duplex mode supported\n",
723 sc->vr_unit);
724 ifp->if_baudrate = 100000000;
725 ifmedia_add(&sc->ifmedia,
726 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
727 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
728 }
729
730 /* Some also support 100BaseT4. */
731 if (bmsr & PHY_BMSR_100BT4) {
732 if (bootverbose)
733 printf("vr%d: 100baseT4 mode supported\n", sc->vr_unit);
734 ifp->if_baudrate = 100000000;
735 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_T4, 0, NULL);
736 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_T4;
737 #ifdef FORCE_AUTONEG_TFOUR
738 if (bootverbose)
739 printf("vr%d: forcing on autoneg support for BT4\n",
740 sc->vr_unit);
741 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0 NULL):
742 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
743 #endif
744 }
745
746 if (bmsr & PHY_BMSR_CANAUTONEG) {
747 if (bootverbose)
748 printf("vr%d: autoneg supported\n", sc->vr_unit);
749 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
750 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
751 }
752
753 return;
754 }
755
756 /*
757 * Set speed and duplex mode.
758 */
759 static void vr_setmode_mii(sc, media)
760 struct vr_softc *sc;
761 int media;
762 {
763 u_int16_t bmcr;
764 struct ifnet *ifp;
765
766 ifp = &sc->arpcom.ac_if;
767
768 /*
769 * If an autoneg session is in progress, stop it.
770 */
771 if (sc->vr_autoneg) {
772 printf("vr%d: canceling autoneg session\n", sc->vr_unit);
773 ifp->if_timer = sc->vr_autoneg = sc->vr_want_auto = 0;
774 bmcr = vr_phy_readreg(sc, PHY_BMCR);
775 bmcr &= ~PHY_BMCR_AUTONEGENBL;
776 vr_phy_writereg(sc, PHY_BMCR, bmcr);
777 }
778
779 printf("vr%d: selecting MII, ", sc->vr_unit);
780
781 bmcr = vr_phy_readreg(sc, PHY_BMCR);
782
783 bmcr &= ~(PHY_BMCR_AUTONEGENBL|PHY_BMCR_SPEEDSEL|
784 PHY_BMCR_DUPLEX|PHY_BMCR_LOOPBK);
785
786 if (IFM_SUBTYPE(media) == IFM_100_T4) {
787 printf("100Mbps/T4, half-duplex\n");
788 bmcr |= PHY_BMCR_SPEEDSEL;
789 bmcr &= ~PHY_BMCR_DUPLEX;
790 }
791
792 if (IFM_SUBTYPE(media) == IFM_100_TX) {
793 printf("100Mbps, ");
794 bmcr |= PHY_BMCR_SPEEDSEL;
795 }
796
797 if (IFM_SUBTYPE(media) == IFM_10_T) {
798 printf("10Mbps, ");
799 bmcr &= ~PHY_BMCR_SPEEDSEL;
800 }
801
802 if ((media & IFM_GMASK) == IFM_FDX) {
803 printf("full duplex\n");
804 bmcr |= PHY_BMCR_DUPLEX;
805 } else {
806 printf("half duplex\n");
807 bmcr &= ~PHY_BMCR_DUPLEX;
808 }
809
810 vr_setcfg(sc, bmcr);
811 vr_phy_writereg(sc, PHY_BMCR, bmcr);
812
813 return;
814 }
815
816 /*
817 * In order to fiddle with the
818 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
819 * first have to put the transmit and/or receive logic in the idle state.
820 */
821 static void vr_setcfg(sc, bmcr)
822 struct vr_softc *sc;
823 u_int16_t bmcr;
824 {
825 int restart = 0;
826
827 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
828 restart = 1;
829 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
830 }
831
832 if (bmcr & PHY_BMCR_DUPLEX)
833 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
834 else
835 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
836
837 if (restart)
838 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
839
840 return;
841 }
842
843 static void vr_reset(sc)
844 struct vr_softc *sc;
845 {
846 register int i;
847
848 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
849
850 for (i = 0; i < VR_TIMEOUT; i++) {
851 DELAY(10);
852 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
853 break;
854 }
855 if (i == VR_TIMEOUT)
856 printf("vr%d: reset never completed!\n", sc->vr_unit);
857
858 /* Wait a little while for the chip to get its brains in order. */
859 DELAY(1000);
860
861 return;
862 }
863
864 /*
865 * Probe for a VIA Rhine chip. Check the PCI vendor and device
866 * IDs against our list and return a device name if we find a match.
867 */
868 static const char *
869 vr_probe(config_id, device_id)
870 pcici_t config_id;
871 pcidi_t device_id;
872 {
873 struct vr_type *t;
874
875 t = vr_devs;
876
877 while(t->vr_name != NULL) {
878 if ((device_id & 0xFFFF) == t->vr_vid &&
879 ((device_id >> 16) & 0xFFFF) == t->vr_did) {
880 return(t->vr_name);
881 }
882 t++;
883 }
884
885 return(NULL);
886 }
887
888 /*
889 * Attach the interface. Allocate softc structures, do ifmedia
890 * setup and ethernet/BPF attach.
891 */
892 static void
893 vr_attach(config_id, unit)
894 pcici_t config_id;
895 int unit;
896 {
897 int s, i;
898 #ifndef VR_USEIOSPACE
899 vm_offset_t pbase, vbase;
900 #endif
901 u_char eaddr[ETHER_ADDR_LEN];
902 u_int32_t command;
903 struct vr_softc *sc;
904 struct ifnet *ifp;
905 int media = IFM_ETHER|IFM_100_TX|IFM_FDX;
906 unsigned int round;
907 caddr_t roundptr;
908 struct vr_type *p;
909 u_int16_t phy_vid, phy_did, phy_sts;
910
911 s = splimp();
912
913 sc = malloc(sizeof(struct vr_softc), M_DEVBUF, M_NOWAIT);
914 if (sc == NULL) {
915 printf("vr%d: no memory for softc struct!\n", unit);
916 return;
917 }
918 bzero(sc, sizeof(struct vr_softc));
919
920 /*
921 * Handle power management nonsense.
922 */
923
924 command = pci_conf_read(config_id, VR_PCI_CAPID) & 0x000000FF;
925 if (command == 0x01) {
926
927 command = pci_conf_read(config_id, VR_PCI_PWRMGMTCTRL);
928 if (command & VR_PSTATE_MASK) {
929 u_int32_t iobase, membase, irq;
930
931 /* Save important PCI config data. */
932 iobase = pci_conf_read(config_id, VR_PCI_LOIO);
933 membase = pci_conf_read(config_id, VR_PCI_LOMEM);
934 irq = pci_conf_read(config_id, VR_PCI_INTLINE);
935
936 /* Reset the power state. */
937 printf("vr%d: chip is in D%d power mode "
938 "-- setting to D0\n", unit, command & VR_PSTATE_MASK);
939 command &= 0xFFFFFFFC;
940 pci_conf_write(config_id, VR_PCI_PWRMGMTCTRL, command);
941
942 /* Restore PCI config data. */
943 pci_conf_write(config_id, VR_PCI_LOIO, iobase);
944 pci_conf_write(config_id, VR_PCI_LOMEM, membase);
945 pci_conf_write(config_id, VR_PCI_INTLINE, irq);
946 }
947 }
948
949 /*
950 * Map control/status registers.
951 */
952 command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
953 command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
954 pci_conf_write(config_id, PCI_COMMAND_STATUS_REG, command);
955 command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
956
957 #ifdef VR_USEIOSPACE
958 if (!(command & PCIM_CMD_PORTEN)) {
959 printf("vr%d: failed to enable I/O ports!\n", unit);
960 free(sc, M_DEVBUF);
961 goto fail;
962 }
963
964 if (!pci_map_port(config_id, VR_PCI_LOIO,
965 (u_int16_t *)(&sc->vr_bhandle))) {
966 printf ("vr%d: couldn't map ports\n", unit);
967 goto fail;
968 }
969 sc->vr_btag = I386_BUS_SPACE_IO;
970 #else
971 if (!(command & PCIM_CMD_MEMEN)) {
972 printf("vr%d: failed to enable memory mapping!\n", unit);
973 goto fail;
974 }
975
976 if (!pci_map_mem(config_id, VR_PCI_LOMEM, &vbase, &pbase)) {
977 printf ("vr%d: couldn't map memory\n", unit);
978 goto fail;
979 }
980
981 sc->vr_bhandle = vbase;
982 sc->vr_btag = I386_BUS_SPACE_MEM;
983 #endif
984
985 /* Allocate interrupt */
986 if (!pci_map_int(config_id, vr_intr, sc, &net_imask)) {
987 printf("vr%d: couldn't map interrupt\n", unit);
988 goto fail;
989 }
990
991 /* Reset the adapter. */
992 vr_reset(sc);
993
994 /*
995 * Get station address. The way the Rhine chips work,
996 * you're not allowed to directly access the EEPROM once
997 * they've been programmed a special way. Consequently,
998 * we need to read the node address from the PAR0 and PAR1
999 * registers.
1000 */
1001 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
1002 DELAY(200);
1003 for (i = 0; i < ETHER_ADDR_LEN; i++)
1004 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
1005
1006 /*
1007 * A Rhine chip was detected. Inform the world.
1008 */
1009 printf("vr%d: Ethernet address: %6D\n", unit, eaddr, ":");
1010
1011 sc->vr_unit = unit;
1012 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
1013
1014 sc->vr_ldata_ptr = malloc(sizeof(struct vr_list_data) + 8,
1015 M_DEVBUF, M_NOWAIT);
1016 if (sc->vr_ldata_ptr == NULL) {
1017 free(sc, M_DEVBUF);
1018 printf("vr%d: no memory for list buffers!\n", unit);
1019 return;
1020 }
1021
1022 sc->vr_ldata = (struct vr_list_data *)sc->vr_ldata_ptr;
1023 round = (unsigned int)sc->vr_ldata_ptr & 0xF;
1024 roundptr = sc->vr_ldata_ptr;
1025 for (i = 0; i < 8; i++) {
1026 if (round % 8) {
1027 round++;
1028 roundptr++;
1029 } else
1030 break;
1031 }
1032 sc->vr_ldata = (struct vr_list_data *)roundptr;
1033 bzero(sc->vr_ldata, sizeof(struct vr_list_data));
1034
1035 ifp = &sc->arpcom.ac_if;
1036 ifp->if_softc = sc;
1037 ifp->if_unit = unit;
1038 ifp->if_name = "vr";
1039 ifp->if_mtu = ETHERMTU;
1040 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1041 ifp->if_ioctl = vr_ioctl;
1042 ifp->if_output = ether_output;
1043 ifp->if_start = vr_start;
1044 ifp->if_watchdog = vr_watchdog;
1045 ifp->if_init = vr_init;
1046 ifp->if_baudrate = 10000000;
1047
1048 if (bootverbose)
1049 printf("vr%d: probing for a PHY\n", sc->vr_unit);
1050 for (i = VR_PHYADDR_MIN; i < VR_PHYADDR_MAX + 1; i++) {
1051 if (bootverbose)
1052 printf("vr%d: checking address: %d\n",
1053 sc->vr_unit, i);
1054 sc->vr_phy_addr = i;
1055 vr_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
1056 DELAY(500);
1057 while(vr_phy_readreg(sc, PHY_BMCR)
1058 & PHY_BMCR_RESET);
1059 if ((phy_sts = vr_phy_readreg(sc, PHY_BMSR)))
1060 break;
1061 }
1062 if (phy_sts) {
1063 phy_vid = vr_phy_readreg(sc, PHY_VENID);
1064 phy_did = vr_phy_readreg(sc, PHY_DEVID);
1065 if (bootverbose)
1066 printf("vr%d: found PHY at address %d, ",
1067 sc->vr_unit, sc->vr_phy_addr);
1068 if (bootverbose)
1069 printf("vendor id: %x device id: %x\n",
1070 phy_vid, phy_did);
1071 p = vr_phys;
1072 while(p->vr_vid) {
1073 if (phy_vid == p->vr_vid &&
1074 (phy_did | 0x000F) == p->vr_did) {
1075 sc->vr_pinfo = p;
1076 break;
1077 }
1078 p++;
1079 }
1080 if (sc->vr_pinfo == NULL)
1081 sc->vr_pinfo = &vr_phys[PHY_UNKNOWN];
1082 if (bootverbose)
1083 printf("vr%d: PHY type: %s\n",
1084 sc->vr_unit, sc->vr_pinfo->vr_name);
1085 } else {
1086 printf("vr%d: MII without any phy!\n", sc->vr_unit);
1087 goto fail;
1088 }
1089
1090 /*
1091 * Do ifmedia setup.
1092 */
1093 ifmedia_init(&sc->ifmedia, 0, vr_ifmedia_upd, vr_ifmedia_sts);
1094
1095 vr_getmode_mii(sc);
1096 vr_autoneg_mii(sc, VR_FLAG_FORCEDELAY, 1);
1097 media = sc->ifmedia.ifm_media;
1098 vr_stop(sc);
1099
1100 ifmedia_set(&sc->ifmedia, media);
1101
1102 /*
1103 * Call MI attach routines.
1104 */
1105 if_attach(ifp);
1106 ether_ifattach(ifp);
1107
1108 #if NBPFILTER > 0
1109 bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
1110 #endif
1111
1112 at_shutdown(vr_shutdown, sc, SHUTDOWN_POST_SYNC);
1113
1114 fail:
1115 splx(s);
1116 return;
1117 }
1118
1119 /*
1120 * Initialize the transmit descriptors.
1121 */
1122 static int vr_list_tx_init(sc)
1123 struct vr_softc *sc;
1124 {
1125 struct vr_chain_data *cd;
1126 struct vr_list_data *ld;
1127 int i;
1128
1129 cd = &sc->vr_cdata;
1130 ld = sc->vr_ldata;
1131 for (i = 0; i < VR_TX_LIST_CNT; i++) {
1132 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
1133 if (i == (VR_TX_LIST_CNT - 1))
1134 cd->vr_tx_chain[i].vr_nextdesc =
1135 &cd->vr_tx_chain[0];
1136 else
1137 cd->vr_tx_chain[i].vr_nextdesc =
1138 &cd->vr_tx_chain[i + 1];
1139 }
1140
1141 cd->vr_tx_free = &cd->vr_tx_chain[0];
1142 cd->vr_tx_tail = cd->vr_tx_head = NULL;
1143
1144 return(0);
1145 }
1146
1147
1148 /*
1149 * Initialize the RX descriptors and allocate mbufs for them. Note that
1150 * we arrange the descriptors in a closed ring, so that the last descriptor
1151 * points back to the first.
1152 */
1153 static int vr_list_rx_init(sc)
1154 struct vr_softc *sc;
1155 {
1156 struct vr_chain_data *cd;
1157 struct vr_list_data *ld;
1158 int i;
1159
1160 cd = &sc->vr_cdata;
1161 ld = sc->vr_ldata;
1162
1163 for (i = 0; i < VR_RX_LIST_CNT; i++) {
1164 cd->vr_rx_chain[i].vr_ptr =
1165 (struct vr_desc *)&ld->vr_rx_list[i];
1166 if (vr_newbuf(sc, &cd->vr_rx_chain[i]) == ENOBUFS)
1167 return(ENOBUFS);
1168 if (i == (VR_RX_LIST_CNT - 1)) {
1169 cd->vr_rx_chain[i].vr_nextdesc =
1170 &cd->vr_rx_chain[0];
1171 ld->vr_rx_list[i].vr_next =
1172 vtophys(&ld->vr_rx_list[0]);
1173 } else {
1174 cd->vr_rx_chain[i].vr_nextdesc =
1175 &cd->vr_rx_chain[i + 1];
1176 ld->vr_rx_list[i].vr_next =
1177 vtophys(&ld->vr_rx_list[i + 1]);
1178 }
1179 }
1180
1181 cd->vr_rx_head = &cd->vr_rx_chain[0];
1182
1183 return(0);
1184 }
1185
1186 /*
1187 * Initialize an RX descriptor and attach an MBUF cluster.
1188 * Note: the length fields are only 11 bits wide, which means the
1189 * largest size we can specify is 2047. This is important because
1190 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
1191 * overflow the field and make a mess.
1192 */
1193 static int vr_newbuf(sc, c)
1194 struct vr_softc *sc;
1195 struct vr_chain_onefrag *c;
1196 {
1197 struct mbuf *m_new = NULL;
1198
1199 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1200 if (m_new == NULL) {
1201 printf("vr%d: no memory for rx list -- packet dropped!\n",
1202 sc->vr_unit);
1203 return(ENOBUFS);
1204 }
1205
1206 MCLGET(m_new, M_DONTWAIT);
1207 if (!(m_new->m_flags & M_EXT)) {
1208 printf("vr%d: no memory for rx list -- packet dropped!\n",
1209 sc->vr_unit);
1210 m_freem(m_new);
1211 return(ENOBUFS);
1212 }
1213
1214 c->vr_mbuf = m_new;
1215 c->vr_ptr->vr_status = VR_RXSTAT;
1216 c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
1217 c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN;
1218
1219 return(0);
1220 }
1221
1222 /*
1223 * A frame has been uploaded: pass the resulting mbuf chain up to
1224 * the higher level protocols.
1225 */
1226 static void vr_rxeof(sc)
1227 struct vr_softc *sc;
1228 {
1229 struct ether_header *eh;
1230 struct mbuf *m;
1231 struct ifnet *ifp;
1232 struct vr_chain_onefrag *cur_rx;
1233 int total_len = 0;
1234 u_int32_t rxstat;
1235
1236 ifp = &sc->arpcom.ac_if;
1237
1238 while(!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
1239 VR_RXSTAT_OWN)) {
1240 cur_rx = sc->vr_cdata.vr_rx_head;
1241 sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
1242
1243 /*
1244 * If an error occurs, update stats, clear the
1245 * status word and leave the mbuf cluster in place:
1246 * it should simply get re-used next time this descriptor
1247 * comes up in the ring.
1248 */
1249 if (rxstat & VR_RXSTAT_RXERR) {
1250 ifp->if_ierrors++;
1251 printf("vr%d: rx error: ", sc->vr_unit);
1252 switch(rxstat & 0x000000FF) {
1253 case VR_RXSTAT_CRCERR:
1254 printf("crc error\n");
1255 break;
1256 case VR_RXSTAT_FRAMEALIGNERR:
1257 printf("frame alignment error\n");
1258 break;
1259 case VR_RXSTAT_FIFOOFLOW:
1260 printf("FIFO overflow\n");
1261 break;
1262 case VR_RXSTAT_GIANT:
1263 printf("received giant packet\n");
1264 break;
1265 case VR_RXSTAT_RUNT:
1266 printf("received runt packet\n");
1267 break;
1268 case VR_RXSTAT_BUSERR:
1269 printf("system bus error\n");
1270 break;
1271 case VR_RXSTAT_BUFFERR:
1272 printf("rx buffer error\n");
1273 break;
1274 default:
1275 printf("unknown rx error\n");
1276 break;
1277 }
1278 cur_rx->vr_ptr->vr_status = VR_RXSTAT;
1279 cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN;
1280 continue;
1281 }
1282
1283 /* No errors; receive the packet. */
1284 m = cur_rx->vr_mbuf;
1285 total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
1286
1287 /*
1288 * XXX The VIA Rhine chip includes the CRC with every
1289 * received frame, and there's no way to turn this
1290 * behavior off (at least, I can't find anything in
1291 * the manual that explains how to do it) so we have
1292 * to trim off the CRC manually.
1293 */
1294 total_len -= ETHER_CRC_LEN;
1295
1296 /*
1297 * Try to conjure up a new mbuf cluster. If that
1298 * fails, it means we have an out of memory condition and
1299 * should leave the buffer in place and continue. This will
1300 * result in a lost packet, but there's little else we
1301 * can do in this situation.
1302 */
1303 if (vr_newbuf(sc, cur_rx) == ENOBUFS) {
1304 ifp->if_ierrors++;
1305 cur_rx->vr_ptr->vr_status = VR_RXSTAT;
1306 cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN;
1307 continue;
1308 }
1309
1310 ifp->if_ipackets++;
1311 eh = mtod(m, struct ether_header *);
1312 m->m_pkthdr.rcvif = ifp;
1313 m->m_pkthdr.len = m->m_len = total_len;
1314 #if NBPFILTER > 0
1315 /*
1316 * Handle BPF listeners. Let the BPF user see the packet, but
1317 * don't pass it up to the ether_input() layer unless it's
1318 * a broadcast packet, multicast packet, matches our ethernet
1319 * address or the interface is in promiscuous mode.
1320 */
1321 if (ifp->if_bpf) {
1322 bpf_mtap(ifp, m);
1323 if (ifp->if_flags & IFF_PROMISC &&
1324 (bcmp(eh->ether_dhost, sc->arpcom.ac_enaddr,
1325 ETHER_ADDR_LEN) &&
1326 (eh->ether_dhost[0] & 1) == 0)) {
1327 m_freem(m);
1328 continue;
1329 }
1330 }
1331 #endif
1332 /* Remove header from mbuf and pass it on. */
1333 m_adj(m, sizeof(struct ether_header));
1334 ether_input(ifp, eh, m);
1335 }
1336
1337 return;
1338 }
1339
1340 void vr_rxeoc(sc)
1341 struct vr_softc *sc;
1342 {
1343
1344 vr_rxeof(sc);
1345 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1346 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1347 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1348 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
1349
1350 return;
1351 }
1352
1353 /*
1354 * A frame was downloaded to the chip. It's safe for us to clean up
1355 * the list buffers.
1356 */
1357
1358 static void vr_txeof(sc)
1359 struct vr_softc *sc;
1360 {
1361 struct vr_chain *cur_tx;
1362 struct ifnet *ifp;
1363 register struct mbuf *n;
1364
1365 ifp = &sc->arpcom.ac_if;
1366
1367 /* Clear the timeout timer. */
1368 ifp->if_timer = 0;
1369
1370 /* Sanity check. */
1371 if (sc->vr_cdata.vr_tx_head == NULL)
1372 return;
1373
1374 /*
1375 * Go through our tx list and free mbufs for those
1376 * frames that have been transmitted.
1377 */
1378 while(sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) {
1379 u_int32_t txstat;
1380
1381 cur_tx = sc->vr_cdata.vr_tx_head;
1382 txstat = cur_tx->vr_ptr->vr_status;
1383
1384 if (txstat & VR_TXSTAT_OWN)
1385 break;
1386
1387 if (txstat & VR_TXSTAT_ERRSUM) {
1388 ifp->if_oerrors++;
1389 if (txstat & VR_TXSTAT_DEFER)
1390 ifp->if_collisions++;
1391 if (txstat & VR_TXSTAT_LATECOLL)
1392 ifp->if_collisions++;
1393 }
1394
1395 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
1396
1397 ifp->if_opackets++;
1398 MFREE(cur_tx->vr_mbuf, n);
1399 cur_tx->vr_mbuf = NULL;
1400
1401 if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) {
1402 sc->vr_cdata.vr_tx_head = NULL;
1403 sc->vr_cdata.vr_tx_tail = NULL;
1404 break;
1405 }
1406
1407 sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc;
1408 }
1409
1410 return;
1411 }
1412
1413 /*
1414 * TX 'end of channel' interrupt handler.
1415 */
1416 static void vr_txeoc(sc)
1417 struct vr_softc *sc;
1418 {
1419 struct ifnet *ifp;
1420
1421 ifp = &sc->arpcom.ac_if;
1422
1423 ifp->if_timer = 0;
1424
1425 if (sc->vr_cdata.vr_tx_head == NULL) {
1426 ifp->if_flags &= ~IFF_OACTIVE;
1427 sc->vr_cdata.vr_tx_tail = NULL;
1428 if (sc->vr_want_auto)
1429 vr_autoneg_mii(sc, VR_FLAG_SCHEDDELAY, 1);
1430 }
1431
1432 return;
1433 }
1434
1435 static void vr_intr(arg)
1436 void *arg;
1437 {
1438 struct vr_softc *sc;
1439 struct ifnet *ifp;
1440 u_int16_t status;
1441
1442 sc = arg;
1443 ifp = &sc->arpcom.ac_if;
1444
1445 /* Supress unwanted interrupts. */
1446 if (!(ifp->if_flags & IFF_UP)) {
1447 vr_stop(sc);
1448 return;
1449 }
1450
1451 /* Disable interrupts. */
1452 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1453
1454 for (;;) {
1455
1456 status = CSR_READ_2(sc, VR_ISR);
1457 if (status)
1458 CSR_WRITE_2(sc, VR_ISR, status);
1459
1460 if ((status & VR_INTRS) == 0)
1461 break;
1462
1463 if (status & VR_ISR_RX_OK)
1464 vr_rxeof(sc);
1465
1466 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1467 (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) ||
1468 (status & VR_ISR_RX_DROPPED)) {
1469 vr_rxeof(sc);
1470 vr_rxeoc(sc);
1471 }
1472
1473 if (status & VR_ISR_TX_OK) {
1474 vr_txeof(sc);
1475 vr_txeoc(sc);
1476 }
1477
1478 if ((status & VR_ISR_TX_UNDERRUN)||(status & VR_ISR_TX_ABRT)){
1479 ifp->if_oerrors++;
1480 vr_txeof(sc);
1481 if (sc->vr_cdata.vr_tx_head != NULL) {
1482 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
1483 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
1484 }
1485 }
1486
1487 if (status & VR_ISR_BUSERR) {
1488 vr_reset(sc);
1489 vr_init(sc);
1490 }
1491 }
1492
1493 /* Re-enable interrupts. */
1494 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1495
1496 if (ifp->if_snd.ifq_head != NULL) {
1497 vr_start(ifp);
1498 }
1499
1500 return;
1501 }
1502
1503 /*
1504 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1505 * pointers to the fragment pointers.
1506 */
1507 static int vr_encap(sc, c, m_head)
1508 struct vr_softc *sc;
1509 struct vr_chain *c;
1510 struct mbuf *m_head;
1511 {
1512 int frag = 0;
1513 struct vr_desc *f = NULL;
1514 int total_len;
1515 struct mbuf *m;
1516
1517 m = m_head;
1518 total_len = 0;
1519
1520 /*
1521 * The VIA Rhine wants packet buffers to be longword
1522 * aligned, but very often our mbufs aren't. Rather than
1523 * waste time trying to decide when to copy and when not
1524 * to copy, just do it all the time.
1525 */
1526 if (m != NULL) {
1527 struct mbuf *m_new = NULL;
1528
1529 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1530 if (m_new == NULL) {
1531 printf("vr%d: no memory for tx list", sc->vr_unit);
1532 return(1);
1533 }
1534 if (m_head->m_pkthdr.len > MHLEN) {
1535 MCLGET(m_new, M_DONTWAIT);
1536 if (!(m_new->m_flags & M_EXT)) {
1537 m_freem(m_new);
1538 printf("vr%d: no memory for tx list",
1539 sc->vr_unit);
1540 return(1);
1541 }
1542 }
1543 m_copydata(m_head, 0, m_head->m_pkthdr.len,
1544 mtod(m_new, caddr_t));
1545 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1546 m_freem(m_head);
1547 m_head = m_new;
1548 /*
1549 * The Rhine chip doesn't auto-pad, so we have to make
1550 * sure to pad short frames out to the minimum frame length
1551 * ourselves.
1552 */
1553 if (m_head->m_len < VR_MIN_FRAMELEN) {
1554 m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len;
1555 m_new->m_len = m_new->m_pkthdr.len;
1556 }
1557 f = c->vr_ptr;
1558 f->vr_data = vtophys(mtod(m_new, caddr_t));
1559 f->vr_ctl = total_len = m_new->m_len;
1560 f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG;
1561 f->vr_status = 0;
1562 frag = 1;
1563 }
1564
1565 c->vr_mbuf = m_head;
1566 c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT;
1567 c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr);
1568
1569 return(0);
1570 }
1571
1572 /*
1573 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1574 * to the mbuf data regions directly in the transmit lists. We also save a
1575 * copy of the pointers since the transmit list fragment pointers are
1576 * physical addresses.
1577 */
1578
1579 static void vr_start(ifp)
1580 struct ifnet *ifp;
1581 {
1582 struct vr_softc *sc;
1583 struct mbuf *m_head = NULL;
1584 struct vr_chain *cur_tx = NULL, *start_tx;
1585
1586 sc = ifp->if_softc;
1587
1588 if (sc->vr_autoneg) {
1589 sc->vr_tx_pend = 1;
1590 return;
1591 }
1592
1593 /*
1594 * Check for an available queue slot. If there are none,
1595 * punt.
1596 */
1597 if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) {
1598 ifp->if_flags |= IFF_OACTIVE;
1599 return;
1600 }
1601
1602 start_tx = sc->vr_cdata.vr_tx_free;
1603
1604 while(sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) {
1605 IF_DEQUEUE(&ifp->if_snd, m_head);
1606 if (m_head == NULL)
1607 break;
1608
1609 /* Pick a descriptor off the free list. */
1610 cur_tx = sc->vr_cdata.vr_tx_free;
1611 sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc;
1612
1613 /* Pack the data into the descriptor. */
1614 vr_encap(sc, cur_tx, m_head);
1615
1616 if (cur_tx != start_tx)
1617 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1618
1619 #if NBPFILTER > 0
1620 /*
1621 * If there's a BPF listener, bounce a copy of this frame
1622 * to him.
1623 */
1624 if (ifp->if_bpf)
1625 bpf_mtap(ifp, cur_tx->vr_mbuf);
1626 #endif
1627 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1628 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO);
1629 }
1630
1631 /*
1632 * If there are no frames queued, bail.
1633 */
1634 if (cur_tx == NULL)
1635 return;
1636
1637 sc->vr_cdata.vr_tx_tail = cur_tx;
1638
1639 if (sc->vr_cdata.vr_tx_head == NULL)
1640 sc->vr_cdata.vr_tx_head = start_tx;
1641
1642 /*
1643 * Set a timeout in case the chip goes out to lunch.
1644 */
1645 ifp->if_timer = 5;
1646
1647 return;
1648 }
1649
1650 static void vr_init(xsc)
1651 void *xsc;
1652 {
1653 struct vr_softc *sc = xsc;
1654 struct ifnet *ifp = &sc->arpcom.ac_if;
1655 u_int16_t phy_bmcr = 0;
1656 int s;
1657
1658 if (sc->vr_autoneg)
1659 return;
1660
1661 s = splimp();
1662
1663 if (sc->vr_pinfo != NULL)
1664 phy_bmcr = vr_phy_readreg(sc, PHY_BMCR);
1665
1666 /*
1667 * Cancel pending I/O and free all RX/TX buffers.
1668 */
1669 vr_stop(sc);
1670 vr_reset(sc);
1671
1672 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1673 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1674
1675 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1676 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1677
1678 /* Init circular RX list. */
1679 if (vr_list_rx_init(sc) == ENOBUFS) {
1680 printf("vr%d: initialization failed: no "
1681 "memory for rx buffers\n", sc->vr_unit);
1682 vr_stop(sc);
1683 (void)splx(s);
1684 return;
1685 }
1686
1687 /*
1688 * Init tx descriptors.
1689 */
1690 vr_list_tx_init(sc);
1691
1692 /* If we want promiscuous mode, set the allframes bit. */
1693 if (ifp->if_flags & IFF_PROMISC)
1694 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1695 else
1696 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1697
1698 /* Set capture broadcast bit to capture broadcast frames. */
1699 if (ifp->if_flags & IFF_BROADCAST)
1700 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1701 else
1702 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1703
1704 /*
1705 * Program the multicast filter, if necessary.
1706 */
1707 vr_setmulti(sc);
1708
1709 /*
1710 * Load the address of the RX list.
1711 */
1712 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1713
1714 /* Enable receiver and transmitter. */
1715 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1716 VR_CMD_TX_ON|VR_CMD_RX_ON|
1717 VR_CMD_RX_GO);
1718
1719 vr_setcfg(sc, vr_phy_readreg(sc, PHY_BMCR));
1720
1721 CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
1722
1723 /*
1724 * Enable interrupts.
1725 */
1726 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1727 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1728
1729 /* Restore state of BMCR */
1730 if (sc->vr_pinfo != NULL)
1731 vr_phy_writereg(sc, PHY_BMCR, phy_bmcr);
1732
1733 ifp->if_flags |= IFF_RUNNING;
1734 ifp->if_flags &= ~IFF_OACTIVE;
1735
1736 (void)splx(s);
1737
1738 return;
1739 }
1740
1741 /*
1742 * Set media options.
1743 */
1744 static int vr_ifmedia_upd(ifp)
1745 struct ifnet *ifp;
1746 {
1747 struct vr_softc *sc;
1748 struct ifmedia *ifm;
1749
1750 sc = ifp->if_softc;
1751 ifm = &sc->ifmedia;
1752
1753 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1754 return(EINVAL);
1755
1756 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
1757 vr_autoneg_mii(sc, VR_FLAG_SCHEDDELAY, 1);
1758 else
1759 vr_setmode_mii(sc, ifm->ifm_media);
1760
1761 return(0);
1762 }
1763
1764 /*
1765 * Report current media status.
1766 */
1767 static void vr_ifmedia_sts(ifp, ifmr)
1768 struct ifnet *ifp;
1769 struct ifmediareq *ifmr;
1770 {
1771 struct vr_softc *sc;
1772 u_int16_t advert = 0, ability = 0;
1773
1774 sc = ifp->if_softc;
1775
1776 ifmr->ifm_active = IFM_ETHER;
1777
1778 if (!(vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
1779 if (vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
1780 ifmr->ifm_active = IFM_ETHER|IFM_100_TX;
1781 else
1782 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
1783 if (vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX)
1784 ifmr->ifm_active |= IFM_FDX;
1785 else
1786 ifmr->ifm_active |= IFM_HDX;
1787 return;
1788 }
1789
1790 ability = vr_phy_readreg(sc, PHY_LPAR);
1791 advert = vr_phy_readreg(sc, PHY_ANAR);
1792 if (advert & PHY_ANAR_100BT4 &&
1793 ability & PHY_ANAR_100BT4) {
1794 ifmr->ifm_active = IFM_ETHER|IFM_100_T4;
1795 } else if (advert & PHY_ANAR_100BTXFULL &&
1796 ability & PHY_ANAR_100BTXFULL) {
1797 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_FDX;
1798 } else if (advert & PHY_ANAR_100BTXHALF &&
1799 ability & PHY_ANAR_100BTXHALF) {
1800 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_HDX;
1801 } else if (advert & PHY_ANAR_10BTFULL &&
1802 ability & PHY_ANAR_10BTFULL) {
1803 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_FDX;
1804 } else if (advert & PHY_ANAR_10BTHALF &&
1805 ability & PHY_ANAR_10BTHALF) {
1806 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_HDX;
1807 }
1808
1809 return;
1810 }
1811
1812 static int vr_ioctl(ifp, command, data)
1813 struct ifnet *ifp;
1814 u_long command;
1815 caddr_t data;
1816 {
1817 struct vr_softc *sc = ifp->if_softc;
1818 struct ifreq *ifr = (struct ifreq *) data;
1819 int s, error = 0;
1820
1821 s = splimp();
1822
1823 switch(command) {
1824 case SIOCSIFADDR:
1825 case SIOCGIFADDR:
1826 case SIOCSIFMTU:
1827 error = ether_ioctl(ifp, command, data);
1828 break;
1829 case SIOCSIFFLAGS:
1830 if (ifp->if_flags & IFF_UP) {
1831 vr_init(sc);
1832 } else {
1833 if (ifp->if_flags & IFF_RUNNING)
1834 vr_stop(sc);
1835 }
1836 error = 0;
1837 break;
1838 case SIOCADDMULTI:
1839 case SIOCDELMULTI:
1840 vr_setmulti(sc);
1841 error = 0;
1842 break;
1843 case SIOCGIFMEDIA:
1844 case SIOCSIFMEDIA:
1845 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
1846 break;
1847 default:
1848 error = EINVAL;
1849 break;
1850 }
1851
1852 (void)splx(s);
1853
1854 return(error);
1855 }
1856
1857 static void vr_watchdog(ifp)
1858 struct ifnet *ifp;
1859 {
1860 struct vr_softc *sc;
1861
1862 sc = ifp->if_softc;
1863
1864 if (sc->vr_autoneg) {
1865 vr_autoneg_mii(sc, VR_FLAG_DELAYTIMEO, 1);
1866 return;
1867 }
1868
1869 ifp->if_oerrors++;
1870 printf("vr%d: watchdog timeout\n", sc->vr_unit);
1871
1872 if (!(vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1873 printf("vr%d: no carrier - transceiver cable problem?\n",
1874 sc->vr_unit);
1875
1876 vr_stop(sc);
1877 vr_reset(sc);
1878 vr_init(sc);
1879
1880 if (ifp->if_snd.ifq_head != NULL)
1881 vr_start(ifp);
1882
1883 return;
1884 }
1885
1886 /*
1887 * Stop the adapter and free any mbufs allocated to the
1888 * RX and TX lists.
1889 */
1890 static void vr_stop(sc)
1891 struct vr_softc *sc;
1892 {
1893 register int i;
1894 struct ifnet *ifp;
1895
1896 ifp = &sc->arpcom.ac_if;
1897 ifp->if_timer = 0;
1898
1899 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1900 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1901 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1902 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1903 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1904
1905 /*
1906 * Free data in the RX lists.
1907 */
1908 for (i = 0; i < VR_RX_LIST_CNT; i++) {
1909 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1910 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
1911 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
1912 }
1913 }
1914 bzero((char *)&sc->vr_ldata->vr_rx_list,
1915 sizeof(sc->vr_ldata->vr_rx_list));
1916
1917 /*
1918 * Free the TX list buffers.
1919 */
1920 for (i = 0; i < VR_TX_LIST_CNT; i++) {
1921 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
1922 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
1923 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
1924 }
1925 }
1926
1927 bzero((char *)&sc->vr_ldata->vr_tx_list,
1928 sizeof(sc->vr_ldata->vr_tx_list));
1929
1930 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1931
1932 return;
1933 }
1934
1935 /*
1936 * Stop all chip I/O so that the kernel's probe routines don't
1937 * get confused by errant DMAs when rebooting.
1938 */
1939 static void vr_shutdown(howto, arg)
1940 int howto;
1941 void *arg;
1942 {
1943 struct vr_softc *sc = (struct vr_softc *)arg;
1944
1945 vr_stop(sc);
1946
1947 return;
1948 }
1949
1950 static struct pci_device vr_device = {
1951 "vr",
1952 vr_probe,
1953 vr_attach,
1954 &vr_count,
1955 NULL
1956 };
1957 DATA_SET(pcidevice_set, vr_device);
1958