if_vr.c revision 1.17 1 /* $NetBSD: if_vr.c,v 1.17 1999/02/05 22:09:46 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1997, 1998
5 * Bill Paul <wpaul (at) ctr.columbia.edu>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * $FreeBSD: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $
35 */
36
37 /*
38 * VIA Rhine fast ethernet PCI NIC driver
39 *
40 * Supports various network adapters based on the VIA Rhine
41 * and Rhine II PCI controllers, including the D-Link DFE530TX.
42 * Datasheets are available at http://www.via.com.tw.
43 *
44 * Written by Bill Paul <wpaul (at) ctr.columbia.edu>
45 * Electrical Engineering Department
46 * Columbia University, New York City
47 */
48
49 /*
50 * The VIA Rhine controllers are similar in some respects to the
51 * the DEC tulip chips, except less complicated. The controller
52 * uses an MII bus and an external physical layer interface. The
53 * receiver has a one entry perfect filter and a 64-bit hash table
54 * multicast filter. Transmit and receive descriptors are similar
55 * to the tulip.
56 *
57 * The Rhine has a serious flaw in its transmit DMA mechanism:
58 * transmit buffers must be longword aligned. Unfortunately,
59 * the kernel doesn't guarantee that mbufs will be filled in starting
60 * at longword boundaries, so we have to do a buffer copy before
61 * transmission.
62 *
63 * Apparently, the receive DMA mechanism also has the same flaw. This
64 * means that on systems with struct alignment requirements, incoming
65 * frames must be copied to a new buffer which shifts the data forward
66 * 2 bytes so that the payload is aligned on a 4-byte boundary.
67 */
68
69 #include "opt_inet.h"
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/sockio.h>
74 #include <sys/mbuf.h>
75 #include <sys/malloc.h>
76 #include <sys/kernel.h>
77 #include <sys/socket.h>
78 #include <sys/device.h>
79
80 #include <net/if.h>
81 #include <net/if_arp.h>
82 #include <net/if_dl.h>
83 #include <net/if_media.h>
84 #include <net/if_ether.h>
85
86 #if defined(INET)
87 #include <netinet/in.h>
88 #include <netinet/if_inarp.h>
89 #endif
90
91 #include "bpfilter.h"
92 #if NBPFILTER > 0
93 #include <net/bpf.h>
94 #endif
95
96 #include <vm/vm.h> /* for vtophys */
97
98 #include <machine/bus.h>
99 #include <machine/intr.h>
100
101 #include <dev/mii/mii.h>
102 #include <dev/mii/miivar.h>
103
104 #include <dev/pci/pcireg.h>
105 #include <dev/pci/pcivar.h>
106 #include <dev/pci/pcidevs.h>
107
108 #include <dev/pci/if_vrreg.h>
109
110 #if defined(__NetBSD__) && defined(__alpha__)
111 /* XXX XXX NEED REAL DMA MAPPING SUPPORT XXX XXX */
112 #undef vtophys
113 #define vtophys(va) alpha_XXX_dmamap((vaddr_t)(va))
114 #endif
115
116 #define VR_USEIOSPACE
117
118 #define ETHER_CRC_LEN 4 /* XXX Should be in a common header. */
119
120 /*
121 * Various supported device vendors/types and their names.
122 */
123 static struct vr_type {
124 pci_vendor_id_t vr_vid;
125 pci_product_id_t vr_did;
126 const char *vr_name;
127 } vr_devs[] = {
128 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT3043,
129 "VIA VT3043 Rhine I 10/100BaseTX" },
130 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT86C100A,
131 "VIA VT86C100A Rhine II 10/100BaseTX" },
132 { 0, 0, NULL }
133 };
134
135 struct vr_list_data {
136 struct vr_desc vr_rx_list[VR_RX_LIST_CNT];
137 struct vr_desc vr_tx_list[VR_TX_LIST_CNT];
138 };
139
140 struct vr_chain {
141 struct vr_desc *vr_ptr;
142 struct mbuf *vr_mbuf;
143 struct vr_chain *vr_nextdesc;
144 };
145
146 struct vr_chain_onefrag {
147 struct vr_desc *vr_ptr;
148 struct mbuf *vr_mbuf;
149 struct vr_chain_onefrag *vr_nextdesc;
150 };
151
152 struct vr_chain_data {
153 struct vr_chain_onefrag vr_rx_chain[VR_RX_LIST_CNT];
154 struct vr_chain vr_tx_chain[VR_TX_LIST_CNT];
155
156 struct vr_chain_onefrag *vr_rx_head;
157
158 struct vr_chain *vr_tx_head;
159 struct vr_chain *vr_tx_tail;
160 struct vr_chain *vr_tx_free;
161 };
162
163 struct vr_softc {
164 struct device vr_dev; /* generic device glue */
165 void *vr_ih; /* interrupt cookie */
166 void *vr_ats; /* shutdown hook */
167 bus_space_tag_t vr_bst; /* bus space tag */
168 bus_space_handle_t vr_bsh; /* bus space handle */
169 pci_chipset_tag_t vr_pc; /* PCI chipset info */
170 struct ethercom vr_ec; /* Ethernet common info */
171 u_int8_t vr_enaddr[ETHER_ADDR_LEN];
172 struct mii_data vr_mii; /* MII/media info */
173 caddr_t vr_ldata_ptr;
174 struct vr_list_data *vr_ldata;
175 struct vr_chain_data vr_cdata;
176 };
177
178 /*
179 * register space access macros
180 */
181 #define CSR_WRITE_4(sc, reg, val) \
182 bus_space_write_4(sc->vr_bst, sc->vr_bsh, reg, val)
183 #define CSR_WRITE_2(sc, reg, val) \
184 bus_space_write_2(sc->vr_bst, sc->vr_bsh, reg, val)
185 #define CSR_WRITE_1(sc, reg, val) \
186 bus_space_write_1(sc->vr_bst, sc->vr_bsh, reg, val)
187
188 #define CSR_READ_4(sc, reg) \
189 bus_space_read_4(sc->vr_bst, sc->vr_bsh, reg)
190 #define CSR_READ_2(sc, reg) \
191 bus_space_read_2(sc->vr_bst, sc->vr_bsh, reg)
192 #define CSR_READ_1(sc, reg) \
193 bus_space_read_1(sc->vr_bst, sc->vr_bsh, reg)
194
195 #define VR_TIMEOUT 1000
196
197 static int vr_newbuf __P((struct vr_softc *,
198 struct vr_chain_onefrag *));
199 static int vr_encap __P((struct vr_softc *, struct vr_chain *,
200 struct mbuf *));
201
202 static void vr_rxeof __P((struct vr_softc *));
203 static void vr_rxeoc __P((struct vr_softc *));
204 static void vr_txeof __P((struct vr_softc *));
205 static void vr_txeoc __P((struct vr_softc *));
206 static int vr_intr __P((void *));
207 static void vr_start __P((struct ifnet *));
208 static int vr_ioctl __P((struct ifnet *, u_long, caddr_t));
209 static void vr_init __P((void *));
210 static void vr_stop __P((struct vr_softc *));
211 static void vr_watchdog __P((struct ifnet *));
212 static void vr_tick __P((void *));
213
214 static int vr_ifmedia_upd __P((struct ifnet *));
215 static void vr_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
216
217 static void vr_mii_sync __P((struct vr_softc *));
218 static void vr_mii_send __P((struct vr_softc *, u_int32_t, int));
219 static int vr_mii_readreg __P((struct device *, int, int));
220 static void vr_mii_writereg __P((struct device *, int, int, int));
221 static void vr_mii_statchg __P((struct device *));
222
223 static u_int8_t vr_calchash __P((u_int8_t *));
224 static void vr_setmulti __P((struct vr_softc *));
225 static void vr_reset __P((struct vr_softc *));
226 static int vr_list_rx_init __P((struct vr_softc *));
227 static int vr_list_tx_init __P((struct vr_softc *));
228
229 #define VR_SETBIT(sc, reg, x) \
230 CSR_WRITE_1(sc, reg, \
231 CSR_READ_1(sc, reg) | x)
232
233 #define VR_CLRBIT(sc, reg, x) \
234 CSR_WRITE_1(sc, reg, \
235 CSR_READ_1(sc, reg) & ~x)
236
237 #define VR_SETBIT16(sc, reg, x) \
238 CSR_WRITE_2(sc, reg, \
239 CSR_READ_2(sc, reg) | x)
240
241 #define VR_CLRBIT16(sc, reg, x) \
242 CSR_WRITE_2(sc, reg, \
243 CSR_READ_2(sc, reg) & ~x)
244
245 #define VR_SETBIT32(sc, reg, x) \
246 CSR_WRITE_4(sc, reg, \
247 CSR_READ_4(sc, reg) | x)
248
249 #define VR_CLRBIT32(sc, reg, x) \
250 CSR_WRITE_4(sc, reg, \
251 CSR_READ_4(sc, reg) & ~x)
252
253 #define SIO_SET(x) \
254 CSR_WRITE_1(sc, VR_MIICMD, \
255 CSR_READ_1(sc, VR_MIICMD) | x)
256
257 #define SIO_CLR(x) \
258 CSR_WRITE_1(sc, VR_MIICMD, \
259 CSR_READ_1(sc, VR_MIICMD) & ~x)
260
261 /*
262 * Sync the PHYs by setting data bit and strobing the clock 32 times.
263 */
264 static void
265 vr_mii_sync(sc)
266 struct vr_softc *sc;
267 {
268 int i;
269
270 SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAOUT);
271
272 for (i = 0; i < 32; i++) {
273 SIO_SET(VR_MIICMD_CLK);
274 DELAY(1);
275 SIO_CLR(VR_MIICMD_CLK);
276 DELAY(1);
277 }
278
279 return;
280 }
281
282 /*
283 * Clock a series of bits through the MII.
284 */
285 static void
286 vr_mii_send(sc, bits, cnt)
287 struct vr_softc *sc;
288 u_int32_t bits;
289 int cnt;
290 {
291 int i;
292
293 SIO_CLR(VR_MIICMD_CLK);
294
295 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
296 if (bits & i) {
297 SIO_SET(VR_MIICMD_DATAOUT);
298 } else {
299 SIO_CLR(VR_MIICMD_DATAOUT);
300 }
301 DELAY(1);
302 SIO_CLR(VR_MIICMD_CLK);
303 DELAY(1);
304 SIO_SET(VR_MIICMD_CLK);
305 }
306 }
307
308 /*
309 * Read an PHY register through the MII.
310 */
311 static int
312 vr_mii_readreg(self, phy, reg)
313 struct device *self;
314 int phy, reg;
315 {
316 struct vr_softc *sc = (struct vr_softc *)self;
317 int i, ack, val = 0;
318
319 CSR_WRITE_1(sc, VR_MIICMD, 0);
320 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
321
322 /*
323 * Turn on data xmit.
324 */
325 SIO_SET(VR_MIICMD_DIR);
326
327 vr_mii_sync(sc);
328
329 /*
330 * Send command/address info.
331 */
332 vr_mii_send(sc, MII_COMMAND_START, 2);
333 vr_mii_send(sc, MII_COMMAND_READ, 2);
334 vr_mii_send(sc, phy, 5);
335 vr_mii_send(sc, reg, 5);
336
337 /* Idle bit */
338 SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAOUT));
339 DELAY(1);
340 SIO_SET(VR_MIICMD_CLK);
341 DELAY(1);
342
343 /* Turn off xmit. */
344 SIO_CLR(VR_MIICMD_DIR);
345
346 /* Check for ack */
347 SIO_CLR(VR_MIICMD_CLK);
348 DELAY(1);
349 SIO_SET(VR_MIICMD_CLK);
350 DELAY(1);
351 ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAIN;
352
353 /*
354 * Now try reading data bits. If the ack failed, we still
355 * need to clock through 16 cycles to keep the PHY(s) in sync.
356 */
357 if (ack) {
358 for (i = 0; i < 16; i++) {
359 SIO_CLR(VR_MIICMD_CLK);
360 DELAY(1);
361 SIO_SET(VR_MIICMD_CLK);
362 DELAY(1);
363 }
364 goto fail;
365 }
366
367 for (i = 0x8000; i; i >>= 1) {
368 SIO_CLR(VR_MIICMD_CLK);
369 DELAY(1);
370 if (!ack) {
371 if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAIN)
372 val |= i;
373 DELAY(1);
374 }
375 SIO_SET(VR_MIICMD_CLK);
376 DELAY(1);
377 }
378
379 fail:
380
381 SIO_CLR(VR_MIICMD_CLK);
382 DELAY(1);
383 SIO_SET(VR_MIICMD_CLK);
384 DELAY(1);
385
386 return (val);
387 }
388
389 /*
390 * Write to a PHY register through the MII.
391 */
392 static void
393 vr_mii_writereg(self, phy, reg, val)
394 struct device *self;
395 int phy, reg, val;
396 {
397 struct vr_softc *sc = (struct vr_softc *)self;
398
399 CSR_WRITE_1(sc, VR_MIICMD, 0);
400 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
401
402 /*
403 * Turn on data output.
404 */
405 SIO_SET(VR_MIICMD_DIR);
406
407 vr_mii_sync(sc);
408
409 vr_mii_send(sc, MII_COMMAND_START, 2);
410 vr_mii_send(sc, MII_COMMAND_WRITE, 2);
411 vr_mii_send(sc, phy, 5);
412 vr_mii_send(sc, reg, 5);
413 vr_mii_send(sc, MII_COMMAND_ACK, 2);
414 vr_mii_send(sc, val, 16);
415
416 /* Idle bit. */
417 SIO_SET(VR_MIICMD_CLK);
418 DELAY(1);
419 SIO_CLR(VR_MIICMD_CLK);
420 DELAY(1);
421
422 /*
423 * Turn off xmit.
424 */
425 SIO_CLR(VR_MIICMD_DIR);
426 }
427
428 static void
429 vr_mii_statchg(self)
430 struct device *self;
431 {
432 struct vr_softc *sc = (struct vr_softc *)self;
433 int restart = 0;
434
435 /*
436 * In order to fiddle with the 'full-duplex' bit in the netconfig
437 * register, we first have to put the transmit and/or receive logic
438 * in the idle state.
439 */
440 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
441 restart = 1;
442 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
443 }
444
445 if (sc->vr_mii.mii_media_active & IFM_FDX)
446 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
447 else
448 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
449
450 if (restart)
451 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
452
453 /* XXX Update ifp->if_baudrate */
454 }
455
456 /*
457 * Calculate CRC of a multicast group address, return the lower 6 bits.
458 */
459 static u_int8_t
460 vr_calchash(addr)
461 u_int8_t *addr;
462 {
463 u_int32_t crc, carry;
464 int i, j;
465 u_int8_t c;
466
467 /* Compute CRC for the address value. */
468 crc = 0xFFFFFFFF; /* initial value */
469
470 for (i = 0; i < 6; i++) {
471 c = *(addr + i);
472 for (j = 0; j < 8; j++) {
473 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
474 crc <<= 1;
475 c >>= 1;
476 if (carry)
477 crc = (crc ^ 0x04c11db6) | carry;
478 }
479 }
480
481 /* return the filter bit position */
482 return ((crc >> 26) & 0x0000003F);
483 }
484
485 /*
486 * Program the 64-bit multicast hash filter.
487 */
488 static void
489 vr_setmulti(sc)
490 struct vr_softc *sc;
491 {
492 struct ifnet *ifp;
493 int h = 0;
494 u_int32_t hashes[2] = { 0, 0 };
495 struct ether_multistep step;
496 struct ether_multi *enm;
497 int mcnt = 0;
498 u_int8_t rxfilt;
499
500 ifp = &sc->vr_ec.ec_if;
501
502 rxfilt = CSR_READ_1(sc, VR_RXCFG);
503
504 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
505 rxfilt |= VR_RXCFG_RX_MULTI;
506 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
507 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
508 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
509 return;
510 }
511
512 /* first, zot all the existing hash bits */
513 CSR_WRITE_4(sc, VR_MAR0, 0);
514 CSR_WRITE_4(sc, VR_MAR1, 0);
515
516 /* now program new ones */
517 ETHER_FIRST_MULTI(step, &sc->vr_ec, enm);
518 while (enm != NULL) {
519 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0)
520 continue;
521
522 h = vr_calchash(enm->enm_addrlo);
523
524 if (h < 32)
525 hashes[0] |= (1 << h);
526 else
527 hashes[1] |= (1 << (h - 32));
528 ETHER_NEXT_MULTI(step, enm);
529 mcnt++;
530 }
531
532 if (mcnt)
533 rxfilt |= VR_RXCFG_RX_MULTI;
534 else
535 rxfilt &= ~VR_RXCFG_RX_MULTI;
536
537 CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
538 CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
539 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
540
541 return;
542 }
543
544 static void
545 vr_reset(sc)
546 struct vr_softc *sc;
547 {
548 int i;
549
550 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
551
552 for (i = 0; i < VR_TIMEOUT; i++) {
553 DELAY(10);
554 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
555 break;
556 }
557 if (i == VR_TIMEOUT)
558 printf("%s: reset never completed!\n",
559 sc->vr_dev.dv_xname);
560
561 /* Wait a little while for the chip to get its brains in order. */
562 DELAY(1000);
563
564 return;
565 }
566
567 /*
568 * Initialize the transmit descriptors.
569 */
570 static int
571 vr_list_tx_init(sc)
572 struct vr_softc *sc;
573 {
574 struct vr_chain_data *cd;
575 struct vr_list_data *ld;
576 int i;
577
578 cd = &sc->vr_cdata;
579 ld = sc->vr_ldata;
580 for (i = 0; i < VR_TX_LIST_CNT; i++) {
581 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
582 if (i == (VR_TX_LIST_CNT - 1))
583 cd->vr_tx_chain[i].vr_nextdesc =
584 &cd->vr_tx_chain[0];
585 else
586 cd->vr_tx_chain[i].vr_nextdesc =
587 &cd->vr_tx_chain[i + 1];
588 }
589
590 cd->vr_tx_free = &cd->vr_tx_chain[0];
591 cd->vr_tx_tail = cd->vr_tx_head = NULL;
592
593 return (0);
594 }
595
596
597 /*
598 * Initialize the RX descriptors and allocate mbufs for them. Note that
599 * we arrange the descriptors in a closed ring, so that the last descriptor
600 * points back to the first.
601 */
602 static int
603 vr_list_rx_init(sc)
604 struct vr_softc *sc;
605 {
606 struct vr_chain_data *cd;
607 struct vr_list_data *ld;
608 int i;
609
610 cd = &sc->vr_cdata;
611 ld = sc->vr_ldata;
612
613 for (i = 0; i < VR_RX_LIST_CNT; i++) {
614 cd->vr_rx_chain[i].vr_ptr =
615 (struct vr_desc *)&ld->vr_rx_list[i];
616 if (vr_newbuf(sc, &cd->vr_rx_chain[i]) == ENOBUFS)
617 return (ENOBUFS);
618 if (i == (VR_RX_LIST_CNT - 1)) {
619 cd->vr_rx_chain[i].vr_nextdesc =
620 &cd->vr_rx_chain[0];
621 ld->vr_rx_list[i].vr_next =
622 vtophys(&ld->vr_rx_list[0]);
623 } else {
624 cd->vr_rx_chain[i].vr_nextdesc =
625 &cd->vr_rx_chain[i + 1];
626 ld->vr_rx_list[i].vr_next =
627 vtophys(&ld->vr_rx_list[i + 1]);
628 }
629 }
630
631 cd->vr_rx_head = &cd->vr_rx_chain[0];
632
633 return (0);
634 }
635
636 /*
637 * Initialize an RX descriptor and attach an MBUF cluster.
638 * Note: the length fields are only 11 bits wide, which means the
639 * largest size we can specify is 2047. This is important because
640 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
641 * overflow the field and make a mess.
642 */
643 static int
644 vr_newbuf(sc, c)
645 struct vr_softc *sc;
646 struct vr_chain_onefrag *c;
647 {
648 struct mbuf *m_new = NULL;
649
650 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
651 if (m_new == NULL) {
652 printf("%s: no memory for rx list -- packet dropped!\n",
653 sc->vr_dev.dv_xname);
654 return (ENOBUFS);
655 }
656
657 MCLGET(m_new, M_DONTWAIT);
658 if (!(m_new->m_flags & M_EXT)) {
659 printf("%s: no memory for rx list -- packet dropped!\n",
660 sc->vr_dev.dv_xname);
661 m_freem(m_new);
662 return (ENOBUFS);
663 }
664
665 c->vr_mbuf = m_new;
666 c->vr_ptr->vr_status = VR_RXSTAT;
667 c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
668 c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN;
669
670 return (0);
671 }
672
673 /*
674 * A frame has been uploaded: pass the resulting mbuf chain up to
675 * the higher level protocols.
676 */
677 static void
678 vr_rxeof(sc)
679 struct vr_softc *sc;
680 {
681 struct ether_header *eh;
682 struct mbuf *m;
683 struct ifnet *ifp;
684 struct vr_chain_onefrag *cur_rx;
685 int total_len = 0;
686 u_int32_t rxstat;
687
688 ifp = &sc->vr_ec.ec_if;
689
690 while (!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
691 VR_RXSTAT_OWN)) {
692 cur_rx = sc->vr_cdata.vr_rx_head;
693 sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
694
695 /*
696 * If an error occurs, update stats, clear the
697 * status word and leave the mbuf cluster in place:
698 * it should simply get re-used next time this descriptor
699 * comes up in the ring.
700 */
701 if (rxstat & VR_RXSTAT_RXERR) {
702 ifp->if_ierrors++;
703 printf("%s: rx error: ", sc->vr_dev.dv_xname);
704 switch (rxstat & 0x000000FF) {
705 case VR_RXSTAT_CRCERR:
706 printf("crc error\n");
707 break;
708 case VR_RXSTAT_FRAMEALIGNERR:
709 printf("frame alignment error\n");
710 break;
711 case VR_RXSTAT_FIFOOFLOW:
712 printf("FIFO overflow\n");
713 break;
714 case VR_RXSTAT_GIANT:
715 printf("received giant packet\n");
716 break;
717 case VR_RXSTAT_RUNT:
718 printf("received runt packet\n");
719 break;
720 case VR_RXSTAT_BUSERR:
721 printf("system bus error\n");
722 break;
723 case VR_RXSTAT_BUFFERR:
724 printf("rx buffer error\n");
725 break;
726 default:
727 printf("unknown rx error\n");
728 break;
729 }
730 cur_rx->vr_ptr->vr_status = VR_RXSTAT;
731 cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN;
732 continue;
733 }
734
735 /* No errors; receive the packet. */
736 total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
737
738 /*
739 * XXX The VIA Rhine chip includes the CRC with every
740 * received frame, and there's no way to turn this
741 * behavior off (at least, I can't find anything in
742 * the manual that explains how to do it) so we have
743 * to trim off the CRC manually.
744 */
745 total_len -= ETHER_CRC_LEN;
746
747 #ifdef __NO_STRICT_ALIGNMENT
748 /*
749 * Try to conjure up a new mbuf cluster. If that
750 * fails, it means we have an out of memory condition and
751 * should leave the buffer in place and continue. This will
752 * result in a lost packet, but there's little else we
753 * can do in this situation.
754 */
755 m = cur_rx->vr_mbuf;
756 if (vr_newbuf(sc, cur_rx) == ENOBUFS) {
757 ifp->if_ierrors++;
758 cur_rx->vr_ptr->vr_status = VR_RXSTAT;
759 cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN;
760 continue;
761 }
762 #else
763 /*
764 * The Rhine's packet buffers must be 4-byte aligned.
765 * But this means that the data after the Ethernet header
766 * is misaligned. We must allocate a new buffer and
767 * copy the data, shifted forward 2 bytes.
768 */
769 MGETHDR(m, M_DONTWAIT, MT_DATA);
770 if (m == NULL) {
771 dropit:
772 ifp->if_ierrors++;
773 cur_rx->vr_ptr->vr_status = VR_RXSTAT;
774 cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN;
775 continue;
776 }
777 if (total_len > (MHLEN - 2)) {
778 MCLGET(m, M_DONTWAIT);
779 if (m == NULL)
780 goto dropit;
781 }
782 m->m_data += 2;
783
784 /*
785 * Note that we use clusters for incoming frames, so the
786 * buffer is virtually contiguous.
787 */
788 memcpy(mtod(m, caddr_t), mtod(cur_rx->vr_mbuf, caddr_t),
789 total_len);
790
791 /* Allow the recieve descriptor to continue using its mbuf. */
792 cur_rx->vr_ptr->vr_status = VR_RXSTAT;
793 cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN;
794 #endif /* __NO_STRICT_ALIGNMENT */
795
796 ifp->if_ipackets++;
797 eh = mtod(m, struct ether_header *);
798 m->m_pkthdr.rcvif = ifp;
799 m->m_pkthdr.len = m->m_len = total_len;
800 #if NBPFILTER > 0
801 /*
802 * Handle BPF listeners. Let the BPF user see the packet, but
803 * don't pass it up to the ether_input() layer unless it's
804 * a broadcast packet, multicast packet, matches our ethernet
805 * address or the interface is in promiscuous mode.
806 */
807 if (ifp->if_bpf) {
808 bpf_mtap(ifp->if_bpf, m);
809 if (ifp->if_flags & IFF_PROMISC &&
810 (memcmp(eh->ether_dhost, sc->vr_enaddr,
811 ETHER_ADDR_LEN) &&
812 (eh->ether_dhost[0] & 1) == 0)) {
813 m_freem(m);
814 continue;
815 }
816 }
817 #endif
818 /* Remove header from mbuf and pass it on. */
819 m_adj(m, sizeof (struct ether_header));
820 ether_input(ifp, eh, m);
821 }
822 }
823
824 void
825 vr_rxeoc(sc)
826 struct vr_softc *sc;
827 {
828
829 vr_rxeof(sc);
830 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
831 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
832 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
833 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
834 }
835
836 /*
837 * A frame was downloaded to the chip. It's safe for us to clean up
838 * the list buffers.
839 */
840
841 static void
842 vr_txeof(sc)
843 struct vr_softc *sc;
844 {
845 struct vr_chain *cur_tx;
846 struct ifnet *ifp;
847 register struct mbuf *n;
848
849 ifp = &sc->vr_ec.ec_if;
850
851 /* Clear the timeout timer. */
852 ifp->if_timer = 0;
853
854 /* Sanity check. */
855 if (sc->vr_cdata.vr_tx_head == NULL)
856 return;
857
858 /*
859 * Go through our tx list and free mbufs for those
860 * frames that have been transmitted.
861 */
862 while (sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) {
863 u_int32_t txstat;
864
865 cur_tx = sc->vr_cdata.vr_tx_head;
866 txstat = cur_tx->vr_ptr->vr_status;
867
868 if (txstat & VR_TXSTAT_OWN)
869 break;
870
871 if (txstat & VR_TXSTAT_ERRSUM) {
872 ifp->if_oerrors++;
873 if (txstat & VR_TXSTAT_DEFER)
874 ifp->if_collisions++;
875 if (txstat & VR_TXSTAT_LATECOLL)
876 ifp->if_collisions++;
877 }
878
879 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
880
881 ifp->if_opackets++;
882 MFREE(cur_tx->vr_mbuf, n);
883 cur_tx->vr_mbuf = NULL;
884
885 if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) {
886 sc->vr_cdata.vr_tx_head = NULL;
887 sc->vr_cdata.vr_tx_tail = NULL;
888 break;
889 }
890
891 sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc;
892 }
893 }
894
895 /*
896 * TX 'end of channel' interrupt handler.
897 */
898 static void
899 vr_txeoc(sc)
900 struct vr_softc *sc;
901 {
902 struct ifnet *ifp;
903
904 ifp = &sc->vr_ec.ec_if;
905
906 ifp->if_timer = 0;
907
908 if (sc->vr_cdata.vr_tx_head == NULL) {
909 ifp->if_flags &= ~IFF_OACTIVE;
910 sc->vr_cdata.vr_tx_tail = NULL;
911 }
912 }
913
914 static int
915 vr_intr(arg)
916 void *arg;
917 {
918 struct vr_softc *sc;
919 struct ifnet *ifp;
920 u_int16_t status;
921 int handled = 0;
922
923 sc = arg;
924 ifp = &sc->vr_ec.ec_if;
925
926 /* Supress unwanted interrupts. */
927 if ((ifp->if_flags & IFF_UP) == 0) {
928 vr_stop(sc);
929 return (0);
930 }
931
932 /* Disable interrupts. */
933 CSR_WRITE_2(sc, VR_IMR, 0x0000);
934
935 for (;;) {
936 status = CSR_READ_2(sc, VR_ISR);
937 if (status)
938 CSR_WRITE_2(sc, VR_ISR, status);
939
940 if ((status & VR_INTRS) == 0)
941 break;
942
943 handled = 1;
944
945 if (status & VR_ISR_RX_OK)
946 vr_rxeof(sc);
947
948 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
949 (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) ||
950 (status & VR_ISR_RX_DROPPED)) {
951 vr_rxeof(sc);
952 vr_rxeoc(sc);
953 }
954
955 if (status & VR_ISR_TX_OK) {
956 vr_txeof(sc);
957 vr_txeoc(sc);
958 }
959
960 if ((status & VR_ISR_TX_UNDERRUN)||(status & VR_ISR_TX_ABRT)) {
961 ifp->if_oerrors++;
962 vr_txeof(sc);
963 if (sc->vr_cdata.vr_tx_head != NULL) {
964 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
965 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
966 }
967 }
968
969 if (status & VR_ISR_BUSERR) {
970 vr_reset(sc);
971 vr_init(sc);
972 }
973 }
974
975 /* Re-enable interrupts. */
976 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
977
978 if (ifp->if_snd.ifq_head != NULL) {
979 vr_start(ifp);
980 }
981
982 return (handled);
983 }
984
985 /*
986 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
987 * pointers to the fragment pointers.
988 */
989 static int
990 vr_encap(sc, c, m_head)
991 struct vr_softc *sc;
992 struct vr_chain *c;
993 struct mbuf *m_head;
994 {
995 int frag = 0;
996 struct vr_desc *f = NULL;
997 int total_len;
998 struct mbuf *m;
999
1000 m = m_head;
1001 total_len = 0;
1002
1003 /*
1004 * The VIA Rhine wants packet buffers to be longword
1005 * aligned, but very often our mbufs aren't. Rather than
1006 * waste time trying to decide when to copy and when not
1007 * to copy, just do it all the time.
1008 */
1009 if (m != NULL) {
1010 struct mbuf *m_new = NULL;
1011
1012 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1013 if (m_new == NULL) {
1014 printf("%s: no memory for tx list",
1015 sc->vr_dev.dv_xname);
1016 return (1);
1017 }
1018 if (m_head->m_pkthdr.len > MHLEN) {
1019 MCLGET(m_new, M_DONTWAIT);
1020 if (!(m_new->m_flags & M_EXT)) {
1021 m_freem(m_new);
1022 printf("%s: no memory for tx list",
1023 sc->vr_dev.dv_xname);
1024 return (1);
1025 }
1026 }
1027 m_copydata(m_head, 0, m_head->m_pkthdr.len,
1028 mtod(m_new, caddr_t));
1029 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1030 m_freem(m_head);
1031 m_head = m_new;
1032 /*
1033 * The Rhine chip doesn't auto-pad, so we have to make
1034 * sure to pad short frames out to the minimum frame length
1035 * ourselves.
1036 */
1037 if (m_head->m_len < VR_MIN_FRAMELEN) {
1038 m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len;
1039 m_new->m_len = m_new->m_pkthdr.len;
1040 }
1041 f = c->vr_ptr;
1042 f->vr_data = vtophys(mtod(m_new, caddr_t));
1043 f->vr_ctl = total_len = m_new->m_len;
1044 f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG;
1045 f->vr_status = 0;
1046 frag = 1;
1047 }
1048
1049 c->vr_mbuf = m_head;
1050 c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT;
1051 c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr);
1052
1053 return (0);
1054 }
1055
1056 /*
1057 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1058 * to the mbuf data regions directly in the transmit lists. We also save a
1059 * copy of the pointers since the transmit list fragment pointers are
1060 * physical addresses.
1061 */
1062 static void
1063 vr_start(ifp)
1064 struct ifnet *ifp;
1065 {
1066 struct vr_softc *sc;
1067 struct mbuf *m_head = NULL;
1068 struct vr_chain *cur_tx = NULL, *start_tx;
1069
1070 sc = ifp->if_softc;
1071
1072 /*
1073 * Check for an available queue slot. If there are none,
1074 * punt.
1075 */
1076 if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) {
1077 ifp->if_flags |= IFF_OACTIVE;
1078 return;
1079 }
1080
1081 start_tx = sc->vr_cdata.vr_tx_free;
1082
1083 while (sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) {
1084 IF_DEQUEUE(&ifp->if_snd, m_head);
1085 if (m_head == NULL)
1086 break;
1087
1088 /* Pick a descriptor off the free list. */
1089 cur_tx = sc->vr_cdata.vr_tx_free;
1090 sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc;
1091
1092 /* Pack the data into the descriptor. */
1093 vr_encap(sc, cur_tx, m_head);
1094
1095 if (cur_tx != start_tx)
1096 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1097
1098 #if NBPFILTER > 0
1099 /*
1100 * If there's a BPF listener, bounce a copy of this frame
1101 * to him.
1102 */
1103 if (ifp->if_bpf)
1104 bpf_mtap(ifp->if_bpf, cur_tx->vr_mbuf);
1105 #endif
1106 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1107 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO);
1108 }
1109
1110 /*
1111 * If there are no frames queued, bail.
1112 */
1113 if (cur_tx == NULL)
1114 return;
1115
1116 sc->vr_cdata.vr_tx_tail = cur_tx;
1117
1118 if (sc->vr_cdata.vr_tx_head == NULL)
1119 sc->vr_cdata.vr_tx_head = start_tx;
1120
1121 /*
1122 * Set a timeout in case the chip goes out to lunch.
1123 */
1124 ifp->if_timer = 5;
1125 }
1126
1127 /*
1128 * Initialize the interface. Must be called at splnet.
1129 */
1130 static void
1131 vr_init(xsc)
1132 void *xsc;
1133 {
1134 struct vr_softc *sc = xsc;
1135 struct ifnet *ifp = &sc->vr_ec.ec_if;
1136
1137 /*
1138 * Cancel pending I/O and free all RX/TX buffers.
1139 */
1140 vr_stop(sc);
1141 vr_reset(sc);
1142
1143 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1144 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1145
1146 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1147 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1148
1149 /* Init circular RX list. */
1150 if (vr_list_rx_init(sc) == ENOBUFS) {
1151 printf("%s: initialization failed: no "
1152 "memory for rx buffers\n", sc->vr_dev.dv_xname);
1153 vr_stop(sc);
1154 return;
1155 }
1156
1157 /*
1158 * Init tx descriptors.
1159 */
1160 vr_list_tx_init(sc);
1161
1162 /* If we want promiscuous mode, set the allframes bit. */
1163 if (ifp->if_flags & IFF_PROMISC)
1164 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1165 else
1166 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1167
1168 /* Set capture broadcast bit to capture broadcast frames. */
1169 if (ifp->if_flags & IFF_BROADCAST)
1170 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1171 else
1172 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1173
1174 /*
1175 * Program the multicast filter, if necessary.
1176 */
1177 vr_setmulti(sc);
1178
1179 /*
1180 * Load the address of the RX list.
1181 */
1182 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1183
1184 /* Enable receiver and transmitter. */
1185 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1186 VR_CMD_TX_ON|VR_CMD_RX_ON|
1187 VR_CMD_RX_GO);
1188
1189 /* Set current media. */
1190 mii_mediachg(&sc->vr_mii);
1191
1192 CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
1193
1194 /*
1195 * Enable interrupts.
1196 */
1197 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1198 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1199
1200 ifp->if_flags |= IFF_RUNNING;
1201 ifp->if_flags &= ~IFF_OACTIVE;
1202
1203 /* Start one second timer. */
1204 timeout(vr_tick, sc, hz);
1205 }
1206
1207 /*
1208 * Set media options.
1209 */
1210 static int
1211 vr_ifmedia_upd(ifp)
1212 struct ifnet *ifp;
1213 {
1214 struct vr_softc *sc = ifp->if_softc;
1215
1216 if (ifp->if_flags & IFF_UP)
1217 mii_mediachg(&sc->vr_mii);
1218 return (0);
1219 }
1220
1221 /*
1222 * Report current media status.
1223 */
1224 static void
1225 vr_ifmedia_sts(ifp, ifmr)
1226 struct ifnet *ifp;
1227 struct ifmediareq *ifmr;
1228 {
1229 struct vr_softc *sc = ifp->if_softc;
1230
1231 mii_pollstat(&sc->vr_mii);
1232 ifmr->ifm_status = sc->vr_mii.mii_media_status;
1233 ifmr->ifm_active = sc->vr_mii.mii_media_active;
1234 }
1235
1236 static int
1237 vr_ioctl(ifp, command, data)
1238 struct ifnet *ifp;
1239 u_long command;
1240 caddr_t data;
1241 {
1242 struct vr_softc *sc = ifp->if_softc;
1243 struct ifreq *ifr = (struct ifreq *)data;
1244 struct ifaddr *ifa = (struct ifaddr *)data;
1245 int s, error = 0;
1246
1247 s = splnet();
1248
1249 switch (command) {
1250 case SIOCSIFADDR:
1251 ifp->if_flags |= IFF_UP;
1252
1253 switch (ifa->ifa_addr->sa_family) {
1254 #ifdef INET
1255 case AF_INET:
1256 vr_init(sc);
1257 arp_ifinit(ifp, ifa);
1258 break;
1259 #endif /* INET */
1260 default:
1261 vr_init(sc);
1262 break;
1263 }
1264 break;
1265
1266 case SIOCGIFADDR:
1267 bcopy((caddr_t) sc->vr_enaddr,
1268 (caddr_t) ((struct sockaddr *)&ifr->ifr_data)->sa_data,
1269 ETHER_ADDR_LEN);
1270 break;
1271
1272 case SIOCSIFMTU:
1273 if (ifr->ifr_mtu > ETHERMTU)
1274 error = EINVAL;
1275 else
1276 ifp->if_mtu = ifr->ifr_mtu;
1277 break;
1278
1279 case SIOCSIFFLAGS:
1280 if (ifp->if_flags & IFF_UP) {
1281 vr_init(sc);
1282 } else {
1283 if (ifp->if_flags & IFF_RUNNING)
1284 vr_stop(sc);
1285 }
1286 error = 0;
1287 break;
1288 case SIOCADDMULTI:
1289 case SIOCDELMULTI:
1290 if (command == SIOCADDMULTI)
1291 error = ether_addmulti(ifr, &sc->vr_ec);
1292 else
1293 error = ether_delmulti(ifr, &sc->vr_ec);
1294
1295 if (error == ENETRESET) {
1296 vr_setmulti(sc);
1297 error = 0;
1298 }
1299 break;
1300 case SIOCGIFMEDIA:
1301 case SIOCSIFMEDIA:
1302 error = ifmedia_ioctl(ifp, ifr, &sc->vr_mii.mii_media, command);
1303 break;
1304 default:
1305 error = EINVAL;
1306 break;
1307 }
1308
1309 splx(s);
1310
1311 return (error);
1312 }
1313
1314 static void
1315 vr_watchdog(ifp)
1316 struct ifnet *ifp;
1317 {
1318 struct vr_softc *sc;
1319
1320 sc = ifp->if_softc;
1321
1322 ifp->if_oerrors++;
1323 printf("%s: watchdog timeout\n", sc->vr_dev.dv_xname);
1324
1325 vr_stop(sc);
1326 vr_reset(sc);
1327 vr_init(sc);
1328
1329 if (ifp->if_snd.ifq_head != NULL)
1330 vr_start(ifp);
1331
1332 return;
1333 }
1334
1335 /*
1336 * One second timer, used to tick MII.
1337 */
1338 static void
1339 vr_tick(arg)
1340 void *arg;
1341 {
1342 struct vr_softc *sc = arg;
1343 int s;
1344
1345 s = splnet();
1346 mii_tick(&sc->vr_mii);
1347 splx(s);
1348
1349 timeout(vr_tick, sc, hz);
1350 }
1351
1352 /*
1353 * Stop the adapter and free any mbufs allocated to the
1354 * RX and TX lists.
1355 */
1356 static void
1357 vr_stop(sc)
1358 struct vr_softc *sc;
1359 {
1360 struct ifnet *ifp;
1361 int i;
1362
1363 /* Cancel one second timer. */
1364 untimeout(vr_tick, sc);
1365
1366 ifp = &sc->vr_ec.ec_if;
1367 ifp->if_timer = 0;
1368
1369 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1370 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1371 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1372 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1373 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1374
1375 /*
1376 * Free data in the RX lists.
1377 */
1378 for (i = 0; i < VR_RX_LIST_CNT; i++) {
1379 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1380 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
1381 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
1382 }
1383 }
1384 bzero((char *)&sc->vr_ldata->vr_rx_list,
1385 sizeof (sc->vr_ldata->vr_rx_list));
1386
1387 /*
1388 * Free the TX list buffers.
1389 */
1390 for (i = 0; i < VR_TX_LIST_CNT; i++) {
1391 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
1392 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
1393 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
1394 }
1395 }
1396
1397 bzero((char *)&sc->vr_ldata->vr_tx_list,
1398 sizeof (sc->vr_ldata->vr_tx_list));
1399
1400 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1401 }
1402
1403 static struct vr_type *vr_lookup __P((struct pci_attach_args *));
1404 static int vr_probe __P((struct device *, struct cfdata *, void *));
1405 static void vr_attach __P((struct device *, struct device *, void *));
1406 static void vr_shutdown __P((void *));
1407
1408 struct cfattach vr_ca = {
1409 sizeof (struct vr_softc), vr_probe, vr_attach
1410 };
1411
1412 static struct vr_type *
1413 vr_lookup(pa)
1414 struct pci_attach_args *pa;
1415 {
1416 struct vr_type *vrt;
1417
1418 for (vrt = vr_devs; vrt->vr_name != NULL; vrt++) {
1419 if (PCI_VENDOR(pa->pa_id) == vrt->vr_vid &&
1420 PCI_PRODUCT(pa->pa_id) == vrt->vr_did)
1421 return (vrt);
1422 }
1423 return (NULL);
1424 }
1425
1426 static int
1427 vr_probe(parent, match, aux)
1428 struct device *parent;
1429 struct cfdata *match;
1430 void *aux;
1431 {
1432 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
1433
1434 if (vr_lookup(pa) != NULL)
1435 return (1);
1436
1437 return (0);
1438 }
1439
1440 /*
1441 * Stop all chip I/O so that the kernel's probe routines don't
1442 * get confused by errant DMAs when rebooting.
1443 */
1444 static void
1445 vr_shutdown(arg)
1446 void *arg;
1447 {
1448 struct vr_softc *sc = (struct vr_softc *)arg;
1449
1450 vr_stop(sc);
1451 }
1452
1453 /*
1454 * Attach the interface. Allocate softc structures, do ifmedia
1455 * setup and ethernet/BPF attach.
1456 */
1457 static void
1458 vr_attach(parent, self, aux)
1459 struct device *parent;
1460 struct device *self;
1461 void *aux;
1462 {
1463 struct vr_softc *sc = (struct vr_softc *) self;
1464 struct pci_attach_args *pa = (struct pci_attach_args *) aux;
1465 struct vr_type *vrt;
1466 int i;
1467 u_int32_t command;
1468 struct ifnet *ifp;
1469 unsigned int round;
1470 caddr_t roundptr;
1471 u_char eaddr[ETHER_ADDR_LEN];
1472
1473 #define PCI_CONF_WRITE(r, v) pci_conf_write(pa->pa_pc, pa->pa_tag, (r), (v))
1474 #define PCI_CONF_READ(r) pci_conf_read(pa->pa_pc, pa->pa_tag, (r))
1475
1476 vrt = vr_lookup(pa);
1477 if (vrt == NULL) {
1478 printf("\n");
1479 panic("vr_attach: impossible");
1480 }
1481
1482 printf(": %s Ethernet\n", vrt->vr_name);
1483
1484 /*
1485 * Handle power management nonsense.
1486 */
1487
1488 command = PCI_CONF_READ(VR_PCI_CAPID) & 0x000000FF;
1489 if (command == 0x01) {
1490 command = PCI_CONF_READ(VR_PCI_PWRMGMTCTRL);
1491 if (command & VR_PSTATE_MASK) {
1492 u_int32_t iobase, membase, irq;
1493
1494 /* Save important PCI config data. */
1495 iobase = PCI_CONF_READ(VR_PCI_LOIO);
1496 membase = PCI_CONF_READ(VR_PCI_LOMEM);
1497 irq = PCI_CONF_READ(VR_PCI_INTLINE);
1498
1499 /* Reset the power state. */
1500 printf("%s: chip is in D%d power mode "
1501 "-- setting to D0\n",
1502 sc->vr_dev.dv_xname, command & VR_PSTATE_MASK);
1503 command &= 0xFFFFFFFC;
1504 PCI_CONF_WRITE(VR_PCI_PWRMGMTCTRL, command);
1505
1506 /* Restore PCI config data. */
1507 PCI_CONF_WRITE(VR_PCI_LOIO, iobase);
1508 PCI_CONF_WRITE(VR_PCI_LOMEM, membase);
1509 PCI_CONF_WRITE(VR_PCI_INTLINE, irq);
1510 }
1511 }
1512
1513 /*
1514 * Map control/status registers.
1515 */
1516 command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
1517 command |= (PCI_COMMAND_IO_ENABLE |
1518 PCI_COMMAND_MEM_ENABLE |
1519 PCI_COMMAND_MASTER_ENABLE);
1520 PCI_CONF_WRITE(PCI_COMMAND_STATUS_REG, command);
1521 command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
1522
1523 {
1524 bus_space_tag_t iot, memt;
1525 bus_space_handle_t ioh, memh;
1526 int ioh_valid, memh_valid;
1527 pci_intr_handle_t intrhandle;
1528 const char *intrstr;
1529
1530 ioh_valid = (pci_mapreg_map(pa, VR_PCI_LOIO,
1531 PCI_MAPREG_TYPE_IO, 0,
1532 &iot, &ioh, NULL, NULL) == 0);
1533 memh_valid = (pci_mapreg_map(pa, VR_PCI_LOMEM,
1534 PCI_MAPREG_TYPE_MEM |
1535 PCI_MAPREG_MEM_TYPE_32BIT,
1536 0, &memt, &memh, NULL, NULL) == 0);
1537 #if defined(VR_USEIOSPACE)
1538 if (ioh_valid) {
1539 sc->vr_bst = iot;
1540 sc->vr_bsh = ioh;
1541 } else if (memh_valid) {
1542 sc->vr_bst = memt;
1543 sc->vr_bsh = memh;
1544 }
1545 #else
1546 if (memh_valid) {
1547 sc->vr_bst = memt;
1548 sc->vr_bsh = memh;
1549 } else if (ioh_valid) {
1550 sc->vr_bst = iot;
1551 sc->vr_bsh = ioh;
1552 }
1553 #endif
1554 else {
1555 printf(": unable to map device registers\n");
1556 return;
1557 }
1558
1559 /* Allocate interrupt */
1560 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
1561 pa->pa_intrline, &intrhandle)) {
1562 printf("%s: couldn't map interrupt\n",
1563 sc->vr_dev.dv_xname);
1564 return;
1565 }
1566 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
1567 sc->vr_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET,
1568 vr_intr, sc);
1569 if (sc->vr_ih == NULL) {
1570 printf("%s: couldn't establish interrupt",
1571 sc->vr_dev.dv_xname);
1572 if (intrstr != NULL)
1573 printf(" at %s", intrstr);
1574 printf("\n");
1575 }
1576 printf("%s: interrupting at %s\n",
1577 sc->vr_dev.dv_xname, intrstr);
1578 }
1579 sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
1580 if (sc->vr_ats == NULL)
1581 printf("%s: warning: couldn't establish shutdown hook\n",
1582 sc->vr_dev.dv_xname);
1583
1584 /* Reset the adapter. */
1585 vr_reset(sc);
1586
1587 /*
1588 * Get station address. The way the Rhine chips work,
1589 * you're not allowed to directly access the EEPROM once
1590 * they've been programmed a special way. Consequently,
1591 * we need to read the node address from the PAR0 and PAR1
1592 * registers.
1593 */
1594 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
1595 DELAY(200);
1596 for (i = 0; i < ETHER_ADDR_LEN; i++)
1597 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
1598
1599 /*
1600 * A Rhine chip was detected. Inform the world.
1601 */
1602 printf("%s: Ethernet address: %s\n",
1603 sc->vr_dev.dv_xname, ether_sprintf(eaddr));
1604
1605 bcopy(eaddr, sc->vr_enaddr, ETHER_ADDR_LEN);
1606
1607 sc->vr_ldata_ptr = malloc(sizeof (struct vr_list_data) + 8,
1608 M_DEVBUF, M_NOWAIT);
1609 if (sc->vr_ldata_ptr == NULL) {
1610 free(sc, M_DEVBUF);
1611 printf("%s: no memory for list buffers!\n",
1612 sc->vr_dev.dv_xname);
1613 return;
1614 }
1615
1616 sc->vr_ldata = (struct vr_list_data *)sc->vr_ldata_ptr;
1617 round = (unsigned long)sc->vr_ldata_ptr & 0xF;
1618 roundptr = sc->vr_ldata_ptr;
1619 for (i = 0; i < 8; i++) {
1620 if (round % 8) {
1621 round++;
1622 roundptr++;
1623 } else
1624 break;
1625 }
1626 sc->vr_ldata = (struct vr_list_data *)roundptr;
1627 bzero(sc->vr_ldata, sizeof (struct vr_list_data));
1628
1629 ifp = &sc->vr_ec.ec_if;
1630 ifp->if_softc = sc;
1631 ifp->if_mtu = ETHERMTU;
1632 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1633 ifp->if_ioctl = vr_ioctl;
1634 ifp->if_output = ether_output;
1635 ifp->if_start = vr_start;
1636 ifp->if_watchdog = vr_watchdog;
1637 ifp->if_baudrate = 10000000;
1638 bcopy(sc->vr_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1639
1640 /*
1641 * Initialize MII/media info.
1642 */
1643 sc->vr_mii.mii_ifp = ifp;
1644 sc->vr_mii.mii_readreg = vr_mii_readreg;
1645 sc->vr_mii.mii_writereg = vr_mii_writereg;
1646 sc->vr_mii.mii_statchg = vr_mii_statchg;
1647 ifmedia_init(&sc->vr_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts);
1648 mii_phy_probe(&sc->vr_dev, &sc->vr_mii, 0xffffffff);
1649 if (LIST_FIRST(&sc->vr_mii.mii_phys) == NULL) {
1650 ifmedia_add(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1651 ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE);
1652 } else
1653 ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_AUTO);
1654
1655 /*
1656 * Call MI attach routines.
1657 */
1658 if_attach(ifp);
1659 ether_ifattach(ifp, sc->vr_enaddr);
1660
1661 #if NBPFILTER > 0
1662 bpfattach(&sc->vr_ec.ec_if.if_bpf,
1663 ifp, DLT_EN10MB, sizeof (struct ether_header));
1664 #endif
1665
1666 sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
1667 if (sc->vr_ats == NULL)
1668 printf("%s: warning: couldn't establish shutdown hook\n",
1669 sc->vr_dev.dv_xname);
1670 }
1671