if_vr.c revision 1.11 1 /* $NetBSD: if_vr.c,v 1.11 1999/02/05 02:58:38 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1997, 1998
5 * Bill Paul <wpaul (at) ctr.columbia.edu>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * $FreeBSD: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $
35 */
36
37 /*
38 * VIA Rhine fast ethernet PCI NIC driver
39 *
40 * Supports various network adapters based on the VIA Rhine
41 * and Rhine II PCI controllers, including the D-Link DFE530TX.
42 * Datasheets are available at http://www.via.com.tw.
43 *
44 * Written by Bill Paul <wpaul (at) ctr.columbia.edu>
45 * Electrical Engineering Department
46 * Columbia University, New York City
47 */
48
49 /*
50 * The VIA Rhine controllers are similar in some respects to the
51 * the DEC tulip chips, except less complicated. The controller
52 * uses an MII bus and an external physical layer interface. The
53 * receiver has a one entry perfect filter and a 64-bit hash table
54 * multicast filter. Transmit and receive descriptors are similar
55 * to the tulip.
56 *
57 * The Rhine has a serious flaw in its transmit DMA mechanism:
58 * transmit buffers must be longword aligned. Unfortunately,
59 * FreeBSD doesn't guarantee that mbufs will be filled in starting
60 * at longword boundaries, so we have to do a buffer copy before
61 * transmission.
62 */
63
64 #include "opt_inet.h"
65
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/sockio.h>
69 #include <sys/mbuf.h>
70 #include <sys/malloc.h>
71 #include <sys/kernel.h>
72 #include <sys/socket.h>
73 #include <sys/device.h>
74
75 #include <net/if.h>
76 #include <net/if_arp.h>
77 #include <net/if_dl.h>
78 #include <net/if_media.h>
79 #include <net/if_ether.h>
80
81 #if defined(INET)
82 #include <netinet/in.h>
83 #include <netinet/if_inarp.h>
84 #endif
85
86 #include "bpfilter.h"
87 #if NBPFILTER > 0
88 #include <net/bpf.h>
89 #endif
90
91 #include <vm/vm.h> /* for vtophys */
92
93 #include <machine/bus.h>
94 #include <machine/intr.h>
95
96 #include <dev/mii/mii.h>
97 #include <dev/mii/miivar.h>
98
99 #include <dev/pci/pcireg.h>
100 #include <dev/pci/pcivar.h>
101 #include <dev/pci/pcidevs.h>
102
103 #include <dev/pci/if_vrreg.h>
104
105 #if defined(__NetBSD__) && defined(__alpha__)
106 /* XXX XXX NEED REAL DMA MAPPING SUPPORT XXX XXX */
107 #undef vtophys
108 #define vtophys(va) alpha_XXX_dmamap((vaddr_t)(va))
109 #endif
110
111 #define VR_USEIOSPACE
112
113 #define ETHER_CRC_LEN 4 /* XXX Should be in a common header. */
114
115 /*
116 * Various supported device vendors/types and their names.
117 */
118 static struct vr_type {
119 pci_vendor_id_t vr_vid;
120 pci_product_id_t vr_did;
121 const char *vr_name;
122 } vr_devs[] = {
123 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT3043,
124 "VIA VT3043 Rhine I 10/100BaseTX" },
125 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT86C100A,
126 "VIA VT86C100A Rhine II 10/100BaseTX" },
127 { 0, 0, NULL }
128 };
129
130 struct vr_list_data {
131 struct vr_desc vr_rx_list[VR_RX_LIST_CNT];
132 struct vr_desc vr_tx_list[VR_TX_LIST_CNT];
133 };
134
135 struct vr_chain {
136 struct vr_desc *vr_ptr;
137 struct mbuf *vr_mbuf;
138 struct vr_chain *vr_nextdesc;
139 };
140
141 struct vr_chain_onefrag {
142 struct vr_desc *vr_ptr;
143 struct mbuf *vr_mbuf;
144 struct vr_chain_onefrag *vr_nextdesc;
145 };
146
147 struct vr_chain_data {
148 struct vr_chain_onefrag vr_rx_chain[VR_RX_LIST_CNT];
149 struct vr_chain vr_tx_chain[VR_TX_LIST_CNT];
150
151 struct vr_chain_onefrag *vr_rx_head;
152
153 struct vr_chain *vr_tx_head;
154 struct vr_chain *vr_tx_tail;
155 struct vr_chain *vr_tx_free;
156 };
157
158 struct vr_softc {
159 struct device vr_dev;
160 void *vr_ih;
161 void *vr_ats;
162 bus_space_tag_t vr_bustag;
163 bus_space_handle_t vr_bushandle;
164 pci_chipset_tag_t vr_pc;
165 struct ethercom vr_ec;
166 u_int8_t vr_enaddr[ETHER_ADDR_LEN];
167 struct mii_data vr_mii; /* MII/media info */
168 bus_space_handle_t vr_bhandle; /* bus space handle */
169 bus_space_tag_t vr_btag; /* bus space tag */
170 caddr_t vr_ldata_ptr;
171 struct vr_list_data *vr_ldata;
172 struct vr_chain_data vr_cdata;
173 };
174
175 /*
176 * register space access macros
177 */
178 #define CSR_WRITE_4(sc, reg, val) \
179 bus_space_write_4(sc->vr_btag, sc->vr_bhandle, reg, val)
180 #define CSR_WRITE_2(sc, reg, val) \
181 bus_space_write_2(sc->vr_btag, sc->vr_bhandle, reg, val)
182 #define CSR_WRITE_1(sc, reg, val) \
183 bus_space_write_1(sc->vr_btag, sc->vr_bhandle, reg, val)
184
185 #define CSR_READ_4(sc, reg) \
186 bus_space_read_4(sc->vr_btag, sc->vr_bhandle, reg)
187 #define CSR_READ_2(sc, reg) \
188 bus_space_read_2(sc->vr_btag, sc->vr_bhandle, reg)
189 #define CSR_READ_1(sc, reg) \
190 bus_space_read_1(sc->vr_btag, sc->vr_bhandle, reg)
191
192 #define VR_TIMEOUT 1000
193
194 static int vr_newbuf __P((struct vr_softc *,
195 struct vr_chain_onefrag *));
196 static int vr_encap __P((struct vr_softc *, struct vr_chain *,
197 struct mbuf *));
198
199 static void vr_rxeof __P((struct vr_softc *));
200 static void vr_rxeoc __P((struct vr_softc *));
201 static void vr_txeof __P((struct vr_softc *));
202 static void vr_txeoc __P((struct vr_softc *));
203 static void vr_intr __P((void *));
204 static void vr_start __P((struct ifnet *));
205 static int vr_ioctl __P((struct ifnet *, u_long, caddr_t));
206 static void vr_init __P((void *));
207 static void vr_stop __P((struct vr_softc *));
208 static void vr_watchdog __P((struct ifnet *));
209 static void vr_tick __P((void *));
210
211 static int vr_ifmedia_upd __P((struct ifnet *));
212 static void vr_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
213
214 static void vr_mii_sync __P((struct vr_softc *));
215 static void vr_mii_send __P((struct vr_softc *, u_int32_t, int));
216 static int vr_mii_readreg __P((struct device *, int, int));
217 static void vr_mii_writereg __P((struct device *, int, int, int));
218 static void vr_mii_statchg __P((struct device *));
219
220 static u_int8_t vr_calchash __P((u_int8_t *));
221 static void vr_setmulti __P((struct vr_softc *));
222 static void vr_reset __P((struct vr_softc *));
223 static int vr_list_rx_init __P((struct vr_softc *));
224 static int vr_list_tx_init __P((struct vr_softc *));
225
226 #define VR_SETBIT(sc, reg, x) \
227 CSR_WRITE_1(sc, reg, \
228 CSR_READ_1(sc, reg) | x)
229
230 #define VR_CLRBIT(sc, reg, x) \
231 CSR_WRITE_1(sc, reg, \
232 CSR_READ_1(sc, reg) & ~x)
233
234 #define VR_SETBIT16(sc, reg, x) \
235 CSR_WRITE_2(sc, reg, \
236 CSR_READ_2(sc, reg) | x)
237
238 #define VR_CLRBIT16(sc, reg, x) \
239 CSR_WRITE_2(sc, reg, \
240 CSR_READ_2(sc, reg) & ~x)
241
242 #define VR_SETBIT32(sc, reg, x) \
243 CSR_WRITE_4(sc, reg, \
244 CSR_READ_4(sc, reg) | x)
245
246 #define VR_CLRBIT32(sc, reg, x) \
247 CSR_WRITE_4(sc, reg, \
248 CSR_READ_4(sc, reg) & ~x)
249
250 #define SIO_SET(x) \
251 CSR_WRITE_1(sc, VR_MIICMD, \
252 CSR_READ_1(sc, VR_MIICMD) | x)
253
254 #define SIO_CLR(x) \
255 CSR_WRITE_1(sc, VR_MIICMD, \
256 CSR_READ_1(sc, VR_MIICMD) & ~x)
257
258 /*
259 * Sync the PHYs by setting data bit and strobing the clock 32 times.
260 */
261 static void vr_mii_sync(sc)
262 struct vr_softc *sc;
263 {
264 register int i;
265
266 SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAOUT);
267
268 for (i = 0; i < 32; i++) {
269 SIO_SET(VR_MIICMD_CLK);
270 DELAY(1);
271 SIO_CLR(VR_MIICMD_CLK);
272 DELAY(1);
273 }
274
275 return;
276 }
277
278 /*
279 * Clock a series of bits through the MII.
280 */
281 static void vr_mii_send(sc, bits, cnt)
282 struct vr_softc *sc;
283 u_int32_t bits;
284 int cnt;
285 {
286 int i;
287
288 SIO_CLR(VR_MIICMD_CLK);
289
290 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
291 if (bits & i) {
292 SIO_SET(VR_MIICMD_DATAOUT);
293 } else {
294 SIO_CLR(VR_MIICMD_DATAOUT);
295 }
296 DELAY(1);
297 SIO_CLR(VR_MIICMD_CLK);
298 DELAY(1);
299 SIO_SET(VR_MIICMD_CLK);
300 }
301 }
302
303 /*
304 * Read an PHY register through the MII.
305 */
306 static int vr_mii_readreg(self, phy, reg)
307 struct device *self;
308 int phy, reg;
309 {
310 struct vr_softc *sc = (struct vr_softc *)self;
311 int i, ack, s, val = 0;
312
313 s = splimp();
314
315 CSR_WRITE_1(sc, VR_MIICMD, 0);
316 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
317
318 /*
319 * Turn on data xmit.
320 */
321 SIO_SET(VR_MIICMD_DIR);
322
323 vr_mii_sync(sc);
324
325 /*
326 * Send command/address info.
327 */
328 vr_mii_send(sc, MII_COMMAND_START, 2);
329 vr_mii_send(sc, MII_COMMAND_READ, 2);
330 vr_mii_send(sc, phy, 5);
331 vr_mii_send(sc, reg, 5);
332
333 /* Idle bit */
334 SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAOUT));
335 DELAY(1);
336 SIO_SET(VR_MIICMD_CLK);
337 DELAY(1);
338
339 /* Turn off xmit. */
340 SIO_CLR(VR_MIICMD_DIR);
341
342 /* Check for ack */
343 SIO_CLR(VR_MIICMD_CLK);
344 DELAY(1);
345 SIO_SET(VR_MIICMD_CLK);
346 DELAY(1);
347 ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAIN;
348
349 /*
350 * Now try reading data bits. If the ack failed, we still
351 * need to clock through 16 cycles to keep the PHY(s) in sync.
352 */
353 if (ack) {
354 for (i = 0; i < 16; i++) {
355 SIO_CLR(VR_MIICMD_CLK);
356 DELAY(1);
357 SIO_SET(VR_MIICMD_CLK);
358 DELAY(1);
359 }
360 goto fail;
361 }
362
363 for (i = 0x8000; i; i >>= 1) {
364 SIO_CLR(VR_MIICMD_CLK);
365 DELAY(1);
366 if (!ack) {
367 if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAIN)
368 val |= i;
369 DELAY(1);
370 }
371 SIO_SET(VR_MIICMD_CLK);
372 DELAY(1);
373 }
374
375 fail:
376
377 SIO_CLR(VR_MIICMD_CLK);
378 DELAY(1);
379 SIO_SET(VR_MIICMD_CLK);
380 DELAY(1);
381
382 splx(s);
383
384 return (val);
385 }
386
387 /*
388 * Write to a PHY register through the MII.
389 */
390 static void vr_mii_writereg(self, phy, reg, val)
391 struct device *self;
392 int phy, reg, val;
393 {
394 struct vr_softc *sc = (struct vr_softc *)self;
395 int s;
396
397 s = splimp();
398
399 CSR_WRITE_1(sc, VR_MIICMD, 0);
400 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
401
402 /*
403 * Turn on data output.
404 */
405 SIO_SET(VR_MIICMD_DIR);
406
407 vr_mii_sync(sc);
408
409 vr_mii_send(sc, MII_COMMAND_START, 2);
410 vr_mii_send(sc, MII_COMMAND_WRITE, 2);
411 vr_mii_send(sc, phy, 5);
412 vr_mii_send(sc, reg, 5);
413 vr_mii_send(sc, MII_COMMAND_ACK, 2);
414 vr_mii_send(sc, val, 16);
415
416 /* Idle bit. */
417 SIO_SET(VR_MIICMD_CLK);
418 DELAY(1);
419 SIO_CLR(VR_MIICMD_CLK);
420 DELAY(1);
421
422 /*
423 * Turn off xmit.
424 */
425 SIO_CLR(VR_MIICMD_DIR);
426
427 splx(s);
428 }
429
430 static void vr_mii_statchg(self)
431 struct device *self;
432 {
433 struct vr_softc *sc = (struct vr_softc *)self;
434 int restart = 0;
435
436 /*
437 * In order to fiddle with the 'full-duplex' bit in the netconfig
438 * register, we first have to put the transmit and/or receive logic
439 * in the idle state.
440 */
441 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
442 restart = 1;
443 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
444 }
445
446 if (sc->vr_mii.mii_media_active & IFM_FDX)
447 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
448 else
449 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
450
451 if (restart)
452 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
453
454 /* XXX Update ifp->if_baudrate */
455 }
456
457 /*
458 * Calculate CRC of a multicast group address, return the lower 6 bits.
459 */
460 static u_int8_t vr_calchash(addr)
461 u_int8_t *addr;
462 {
463 u_int32_t crc, carry;
464 int i, j;
465 u_int8_t c;
466
467 /* Compute CRC for the address value. */
468 crc = 0xFFFFFFFF; /* initial value */
469
470 for (i = 0; i < 6; i++) {
471 c = *(addr + i);
472 for (j = 0; j < 8; j++) {
473 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
474 crc <<= 1;
475 c >>= 1;
476 if (carry)
477 crc = (crc ^ 0x04c11db6) | carry;
478 }
479 }
480
481 /* return the filter bit position */
482 return ((crc >> 26) & 0x0000003F);
483 }
484
485 /*
486 * Program the 64-bit multicast hash filter.
487 */
488 static void vr_setmulti(sc)
489 struct vr_softc *sc;
490 {
491 struct ifnet *ifp;
492 int h = 0;
493 u_int32_t hashes[2] = { 0, 0 };
494 struct ether_multistep step;
495 struct ether_multi *enm;
496 int mcnt = 0;
497 u_int8_t rxfilt;
498
499 ifp = &sc->vr_ec.ec_if;
500
501 rxfilt = CSR_READ_1(sc, VR_RXCFG);
502
503 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
504 rxfilt |= VR_RXCFG_RX_MULTI;
505 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
506 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
507 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
508 return;
509 }
510
511 /* first, zot all the existing hash bits */
512 CSR_WRITE_4(sc, VR_MAR0, 0);
513 CSR_WRITE_4(sc, VR_MAR1, 0);
514
515 /* now program new ones */
516 ETHER_FIRST_MULTI(step, &sc->vr_ec, enm);
517 while (enm != NULL) {
518 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0)
519 continue;
520
521 h = vr_calchash(enm->enm_addrlo);
522
523 if (h < 32)
524 hashes[0] |= (1 << h);
525 else
526 hashes[1] |= (1 << (h - 32));
527 ETHER_NEXT_MULTI(step, enm);
528 mcnt++;
529 }
530
531 if (mcnt)
532 rxfilt |= VR_RXCFG_RX_MULTI;
533 else
534 rxfilt &= ~VR_RXCFG_RX_MULTI;
535
536 CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
537 CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
538 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
539
540 return;
541 }
542
543 static void vr_reset(sc)
544 struct vr_softc *sc;
545 {
546 register int i;
547
548 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
549
550 for (i = 0; i < VR_TIMEOUT; i++) {
551 DELAY(10);
552 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
553 break;
554 }
555 if (i == VR_TIMEOUT)
556 printf("%s: reset never completed!\n",
557 sc->vr_dev.dv_xname);
558
559 /* Wait a little while for the chip to get its brains in order. */
560 DELAY(1000);
561
562 return;
563 }
564
565 /*
566 * Initialize the transmit descriptors.
567 */
568 static int vr_list_tx_init(sc)
569 struct vr_softc *sc;
570 {
571 struct vr_chain_data *cd;
572 struct vr_list_data *ld;
573 int i;
574
575 cd = &sc->vr_cdata;
576 ld = sc->vr_ldata;
577 for (i = 0; i < VR_TX_LIST_CNT; i++) {
578 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
579 if (i == (VR_TX_LIST_CNT - 1))
580 cd->vr_tx_chain[i].vr_nextdesc =
581 &cd->vr_tx_chain[0];
582 else
583 cd->vr_tx_chain[i].vr_nextdesc =
584 &cd->vr_tx_chain[i + 1];
585 }
586
587 cd->vr_tx_free = &cd->vr_tx_chain[0];
588 cd->vr_tx_tail = cd->vr_tx_head = NULL;
589
590 return (0);
591 }
592
593
594 /*
595 * Initialize the RX descriptors and allocate mbufs for them. Note that
596 * we arrange the descriptors in a closed ring, so that the last descriptor
597 * points back to the first.
598 */
599 static int vr_list_rx_init(sc)
600 struct vr_softc *sc;
601 {
602 struct vr_chain_data *cd;
603 struct vr_list_data *ld;
604 int i;
605
606 cd = &sc->vr_cdata;
607 ld = sc->vr_ldata;
608
609 for (i = 0; i < VR_RX_LIST_CNT; i++) {
610 cd->vr_rx_chain[i].vr_ptr =
611 (struct vr_desc *)&ld->vr_rx_list[i];
612 if (vr_newbuf(sc, &cd->vr_rx_chain[i]) == ENOBUFS)
613 return (ENOBUFS);
614 if (i == (VR_RX_LIST_CNT - 1)) {
615 cd->vr_rx_chain[i].vr_nextdesc =
616 &cd->vr_rx_chain[0];
617 ld->vr_rx_list[i].vr_next =
618 vtophys(&ld->vr_rx_list[0]);
619 } else {
620 cd->vr_rx_chain[i].vr_nextdesc =
621 &cd->vr_rx_chain[i + 1];
622 ld->vr_rx_list[i].vr_next =
623 vtophys(&ld->vr_rx_list[i + 1]);
624 }
625 }
626
627 cd->vr_rx_head = &cd->vr_rx_chain[0];
628
629 return (0);
630 }
631
632 /*
633 * Initialize an RX descriptor and attach an MBUF cluster.
634 * Note: the length fields are only 11 bits wide, which means the
635 * largest size we can specify is 2047. This is important because
636 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
637 * overflow the field and make a mess.
638 */
639 static int vr_newbuf(sc, c)
640 struct vr_softc *sc;
641 struct vr_chain_onefrag *c;
642 {
643 struct mbuf *m_new = NULL;
644
645 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
646 if (m_new == NULL) {
647 printf("%s: no memory for rx list -- packet dropped!\n",
648 sc->vr_dev.dv_xname);
649 return (ENOBUFS);
650 }
651
652 MCLGET(m_new, M_DONTWAIT);
653 if (!(m_new->m_flags & M_EXT)) {
654 printf("%s: no memory for rx list -- packet dropped!\n",
655 sc->vr_dev.dv_xname);
656 m_freem(m_new);
657 return (ENOBUFS);
658 }
659
660 c->vr_mbuf = m_new;
661 c->vr_ptr->vr_status = VR_RXSTAT;
662 c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
663 c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN;
664
665 return (0);
666 }
667
668 /*
669 * A frame has been uploaded: pass the resulting mbuf chain up to
670 * the higher level protocols.
671 */
672 static void vr_rxeof(sc)
673 struct vr_softc *sc;
674 {
675 struct ether_header *eh;
676 struct mbuf *m;
677 struct ifnet *ifp;
678 struct vr_chain_onefrag *cur_rx;
679 int total_len = 0;
680 u_int32_t rxstat;
681
682 ifp = &sc->vr_ec.ec_if;
683
684 while (!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
685 VR_RXSTAT_OWN)) {
686 cur_rx = sc->vr_cdata.vr_rx_head;
687 sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
688
689 /*
690 * If an error occurs, update stats, clear the
691 * status word and leave the mbuf cluster in place:
692 * it should simply get re-used next time this descriptor
693 * comes up in the ring.
694 */
695 if (rxstat & VR_RXSTAT_RXERR) {
696 ifp->if_ierrors++;
697 printf("%s: rx error: ", sc->vr_dev.dv_xname);
698 switch (rxstat & 0x000000FF) {
699 case VR_RXSTAT_CRCERR:
700 printf("crc error\n");
701 break;
702 case VR_RXSTAT_FRAMEALIGNERR:
703 printf("frame alignment error\n");
704 break;
705 case VR_RXSTAT_FIFOOFLOW:
706 printf("FIFO overflow\n");
707 break;
708 case VR_RXSTAT_GIANT:
709 printf("received giant packet\n");
710 break;
711 case VR_RXSTAT_RUNT:
712 printf("received runt packet\n");
713 break;
714 case VR_RXSTAT_BUSERR:
715 printf("system bus error\n");
716 break;
717 case VR_RXSTAT_BUFFERR:
718 printf("rx buffer error\n");
719 break;
720 default:
721 printf("unknown rx error\n");
722 break;
723 }
724 cur_rx->vr_ptr->vr_status = VR_RXSTAT;
725 cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN;
726 continue;
727 }
728
729 /* No errors; receive the packet. */
730 m = cur_rx->vr_mbuf;
731 total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
732
733 /*
734 * XXX The VIA Rhine chip includes the CRC with every
735 * received frame, and there's no way to turn this
736 * behavior off (at least, I can't find anything in
737 * the manual that explains how to do it) so we have
738 * to trim off the CRC manually.
739 */
740 total_len -= ETHER_CRC_LEN;
741
742 /*
743 * Try to conjure up a new mbuf cluster. If that
744 * fails, it means we have an out of memory condition and
745 * should leave the buffer in place and continue. This will
746 * result in a lost packet, but there's little else we
747 * can do in this situation.
748 */
749 if (vr_newbuf(sc, cur_rx) == ENOBUFS) {
750 ifp->if_ierrors++;
751 cur_rx->vr_ptr->vr_status = VR_RXSTAT;
752 cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN;
753 continue;
754 }
755
756 ifp->if_ipackets++;
757 eh = mtod(m, struct ether_header *);
758 m->m_pkthdr.rcvif = ifp;
759 m->m_pkthdr.len = m->m_len = total_len;
760 #if NBPFILTER > 0
761 /*
762 * Handle BPF listeners. Let the BPF user see the packet, but
763 * don't pass it up to the ether_input() layer unless it's
764 * a broadcast packet, multicast packet, matches our ethernet
765 * address or the interface is in promiscuous mode.
766 */
767 if (ifp->if_bpf) {
768 bpf_mtap(ifp->if_bpf, m);
769 if (ifp->if_flags & IFF_PROMISC &&
770 (memcmp(eh->ether_dhost, sc->vr_enaddr,
771 ETHER_ADDR_LEN) &&
772 (eh->ether_dhost[0] & 1) == 0)) {
773 m_freem(m);
774 continue;
775 }
776 }
777 #endif
778 /* Remove header from mbuf and pass it on. */
779 m_adj(m, sizeof (struct ether_header));
780 ether_input(ifp, eh, m);
781 }
782
783 return;
784 }
785
786 void vr_rxeoc(sc)
787 struct vr_softc *sc;
788 {
789
790 vr_rxeof(sc);
791 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
792 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
793 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
794 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
795
796 return;
797 }
798
799 /*
800 * A frame was downloaded to the chip. It's safe for us to clean up
801 * the list buffers.
802 */
803
804 static void vr_txeof(sc)
805 struct vr_softc *sc;
806 {
807 struct vr_chain *cur_tx;
808 struct ifnet *ifp;
809 register struct mbuf *n;
810
811 ifp = &sc->vr_ec.ec_if;
812
813 /* Clear the timeout timer. */
814 ifp->if_timer = 0;
815
816 /* Sanity check. */
817 if (sc->vr_cdata.vr_tx_head == NULL)
818 return;
819
820 /*
821 * Go through our tx list and free mbufs for those
822 * frames that have been transmitted.
823 */
824 while (sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) {
825 u_int32_t txstat;
826
827 cur_tx = sc->vr_cdata.vr_tx_head;
828 txstat = cur_tx->vr_ptr->vr_status;
829
830 if (txstat & VR_TXSTAT_OWN)
831 break;
832
833 if (txstat & VR_TXSTAT_ERRSUM) {
834 ifp->if_oerrors++;
835 if (txstat & VR_TXSTAT_DEFER)
836 ifp->if_collisions++;
837 if (txstat & VR_TXSTAT_LATECOLL)
838 ifp->if_collisions++;
839 }
840
841 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
842
843 ifp->if_opackets++;
844 MFREE(cur_tx->vr_mbuf, n);
845 cur_tx->vr_mbuf = NULL;
846
847 if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) {
848 sc->vr_cdata.vr_tx_head = NULL;
849 sc->vr_cdata.vr_tx_tail = NULL;
850 break;
851 }
852
853 sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc;
854 }
855
856 return;
857 }
858
859 /*
860 * TX 'end of channel' interrupt handler.
861 */
862 static void vr_txeoc(sc)
863 struct vr_softc *sc;
864 {
865 struct ifnet *ifp;
866
867 ifp = &sc->vr_ec.ec_if;
868
869 ifp->if_timer = 0;
870
871 if (sc->vr_cdata.vr_tx_head == NULL) {
872 ifp->if_flags &= ~IFF_OACTIVE;
873 sc->vr_cdata.vr_tx_tail = NULL;
874 }
875
876 return;
877 }
878
879 static void vr_intr(arg)
880 void *arg;
881 {
882 struct vr_softc *sc;
883 struct ifnet *ifp;
884 u_int16_t status;
885
886 sc = arg;
887 ifp = &sc->vr_ec.ec_if;
888
889 /* Supress unwanted interrupts. */
890 if (!(ifp->if_flags & IFF_UP)) {
891 vr_stop(sc);
892 return;
893 }
894
895 /* Disable interrupts. */
896 CSR_WRITE_2(sc, VR_IMR, 0x0000);
897
898 for (;;) {
899
900 status = CSR_READ_2(sc, VR_ISR);
901 if (status)
902 CSR_WRITE_2(sc, VR_ISR, status);
903
904 if ((status & VR_INTRS) == 0)
905 break;
906
907 if (status & VR_ISR_RX_OK)
908 vr_rxeof(sc);
909
910 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
911 (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) ||
912 (status & VR_ISR_RX_DROPPED)) {
913 vr_rxeof(sc);
914 vr_rxeoc(sc);
915 }
916
917 if (status & VR_ISR_TX_OK) {
918 vr_txeof(sc);
919 vr_txeoc(sc);
920 }
921
922 if ((status & VR_ISR_TX_UNDERRUN)||(status & VR_ISR_TX_ABRT)) {
923 ifp->if_oerrors++;
924 vr_txeof(sc);
925 if (sc->vr_cdata.vr_tx_head != NULL) {
926 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
927 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
928 }
929 }
930
931 if (status & VR_ISR_BUSERR) {
932 vr_reset(sc);
933 vr_init(sc);
934 }
935 }
936
937 /* Re-enable interrupts. */
938 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
939
940 if (ifp->if_snd.ifq_head != NULL) {
941 vr_start(ifp);
942 }
943
944 return;
945 }
946
947 /*
948 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
949 * pointers to the fragment pointers.
950 */
951 static int vr_encap(sc, c, m_head)
952 struct vr_softc *sc;
953 struct vr_chain *c;
954 struct mbuf *m_head;
955 {
956 int frag = 0;
957 struct vr_desc *f = NULL;
958 int total_len;
959 struct mbuf *m;
960
961 m = m_head;
962 total_len = 0;
963
964 /*
965 * The VIA Rhine wants packet buffers to be longword
966 * aligned, but very often our mbufs aren't. Rather than
967 * waste time trying to decide when to copy and when not
968 * to copy, just do it all the time.
969 */
970 if (m != NULL) {
971 struct mbuf *m_new = NULL;
972
973 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
974 if (m_new == NULL) {
975 printf("%s: no memory for tx list",
976 sc->vr_dev.dv_xname);
977 return (1);
978 }
979 if (m_head->m_pkthdr.len > MHLEN) {
980 MCLGET(m_new, M_DONTWAIT);
981 if (!(m_new->m_flags & M_EXT)) {
982 m_freem(m_new);
983 printf("%s: no memory for tx list",
984 sc->vr_dev.dv_xname);
985 return (1);
986 }
987 }
988 m_copydata(m_head, 0, m_head->m_pkthdr.len,
989 mtod(m_new, caddr_t));
990 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
991 m_freem(m_head);
992 m_head = m_new;
993 /*
994 * The Rhine chip doesn't auto-pad, so we have to make
995 * sure to pad short frames out to the minimum frame length
996 * ourselves.
997 */
998 if (m_head->m_len < VR_MIN_FRAMELEN) {
999 m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len;
1000 m_new->m_len = m_new->m_pkthdr.len;
1001 }
1002 f = c->vr_ptr;
1003 f->vr_data = vtophys(mtod(m_new, caddr_t));
1004 f->vr_ctl = total_len = m_new->m_len;
1005 f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG;
1006 f->vr_status = 0;
1007 frag = 1;
1008 }
1009
1010 c->vr_mbuf = m_head;
1011 c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT;
1012 c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr);
1013
1014 return (0);
1015 }
1016
1017 /*
1018 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1019 * to the mbuf data regions directly in the transmit lists. We also save a
1020 * copy of the pointers since the transmit list fragment pointers are
1021 * physical addresses.
1022 */
1023
1024 static void vr_start(ifp)
1025 struct ifnet *ifp;
1026 {
1027 struct vr_softc *sc;
1028 struct mbuf *m_head = NULL;
1029 struct vr_chain *cur_tx = NULL, *start_tx;
1030
1031 sc = ifp->if_softc;
1032
1033 /*
1034 * Check for an available queue slot. If there are none,
1035 * punt.
1036 */
1037 if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) {
1038 ifp->if_flags |= IFF_OACTIVE;
1039 return;
1040 }
1041
1042 start_tx = sc->vr_cdata.vr_tx_free;
1043
1044 while (sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) {
1045 IF_DEQUEUE(&ifp->if_snd, m_head);
1046 if (m_head == NULL)
1047 break;
1048
1049 /* Pick a descriptor off the free list. */
1050 cur_tx = sc->vr_cdata.vr_tx_free;
1051 sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc;
1052
1053 /* Pack the data into the descriptor. */
1054 vr_encap(sc, cur_tx, m_head);
1055
1056 if (cur_tx != start_tx)
1057 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1058
1059 #if NBPFILTER > 0
1060 /*
1061 * If there's a BPF listener, bounce a copy of this frame
1062 * to him.
1063 */
1064 if (ifp->if_bpf)
1065 bpf_mtap(ifp->if_bpf, cur_tx->vr_mbuf);
1066 #endif
1067 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1068 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO);
1069 }
1070
1071 /*
1072 * If there are no frames queued, bail.
1073 */
1074 if (cur_tx == NULL)
1075 return;
1076
1077 sc->vr_cdata.vr_tx_tail = cur_tx;
1078
1079 if (sc->vr_cdata.vr_tx_head == NULL)
1080 sc->vr_cdata.vr_tx_head = start_tx;
1081
1082 /*
1083 * Set a timeout in case the chip goes out to lunch.
1084 */
1085 ifp->if_timer = 5;
1086
1087 return;
1088 }
1089
1090 static void vr_init(xsc)
1091 void *xsc;
1092 {
1093 struct vr_softc *sc = xsc;
1094 struct ifnet *ifp = &sc->vr_ec.ec_if;
1095 int s;
1096
1097 s = splimp();
1098
1099 /*
1100 * Cancel pending I/O and free all RX/TX buffers.
1101 */
1102 vr_stop(sc);
1103 vr_reset(sc);
1104
1105 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1106 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1107
1108 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1109 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1110
1111 /* Init circular RX list. */
1112 if (vr_list_rx_init(sc) == ENOBUFS) {
1113 printf("%s: initialization failed: no "
1114 "memory for rx buffers\n", sc->vr_dev.dv_xname);
1115 vr_stop(sc);
1116 (void)splx(s);
1117 return;
1118 }
1119
1120 /*
1121 * Init tx descriptors.
1122 */
1123 vr_list_tx_init(sc);
1124
1125 /* If we want promiscuous mode, set the allframes bit. */
1126 if (ifp->if_flags & IFF_PROMISC)
1127 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1128 else
1129 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1130
1131 /* Set capture broadcast bit to capture broadcast frames. */
1132 if (ifp->if_flags & IFF_BROADCAST)
1133 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1134 else
1135 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1136
1137 /*
1138 * Program the multicast filter, if necessary.
1139 */
1140 vr_setmulti(sc);
1141
1142 /*
1143 * Load the address of the RX list.
1144 */
1145 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1146
1147 /* Enable receiver and transmitter. */
1148 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1149 VR_CMD_TX_ON|VR_CMD_RX_ON|
1150 VR_CMD_RX_GO);
1151
1152 /* Set current media. */
1153 mii_mediachg(&sc->vr_mii);
1154
1155 CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
1156
1157 /*
1158 * Enable interrupts.
1159 */
1160 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1161 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1162
1163 ifp->if_flags |= IFF_RUNNING;
1164 ifp->if_flags &= ~IFF_OACTIVE;
1165
1166 (void)splx(s);
1167
1168 /* Start one second timer. */
1169 timeout(vr_tick, sc, hz);
1170
1171 return;
1172 }
1173
1174 /*
1175 * Set media options.
1176 */
1177 static int vr_ifmedia_upd(ifp)
1178 struct ifnet *ifp;
1179 {
1180 struct vr_softc *sc = ifp->if_softc;
1181
1182 if (ifp->if_flags & IFF_UP)
1183 mii_mediachg(&sc->vr_mii);
1184 return (0);
1185 }
1186
1187 /*
1188 * Report current media status.
1189 */
1190 static void vr_ifmedia_sts(ifp, ifmr)
1191 struct ifnet *ifp;
1192 struct ifmediareq *ifmr;
1193 {
1194 struct vr_softc *sc = ifp->if_softc;
1195
1196 mii_pollstat(&sc->vr_mii);
1197 ifmr->ifm_status = sc->vr_mii.mii_media_status;
1198 ifmr->ifm_active = sc->vr_mii.mii_media_active;
1199 }
1200
1201 static int vr_ioctl(ifp, command, data)
1202 struct ifnet *ifp;
1203 u_long command;
1204 caddr_t data;
1205 {
1206 struct vr_softc *sc = ifp->if_softc;
1207 struct ifreq *ifr = (struct ifreq *)data;
1208 struct ifaddr *ifa = (struct ifaddr *)data;
1209 int s, error = 0;
1210
1211 s = splimp();
1212
1213 switch (command) {
1214 case SIOCSIFADDR:
1215 ifp->if_flags |= IFF_UP;
1216
1217 switch (ifa->ifa_addr->sa_family) {
1218 #ifdef INET
1219 case AF_INET:
1220 vr_init(sc);
1221 arp_ifinit(ifp, ifa);
1222 break;
1223 #endif /* INET */
1224 default:
1225 vr_init(sc);
1226 break;
1227 }
1228 break;
1229
1230 case SIOCGIFADDR:
1231 bcopy((caddr_t) sc->vr_enaddr,
1232 (caddr_t) ((struct sockaddr *)&ifr->ifr_data)->sa_data,
1233 ETHER_ADDR_LEN);
1234 break;
1235
1236 case SIOCSIFMTU:
1237 if (ifr->ifr_mtu > ETHERMTU)
1238 error = EINVAL;
1239 else
1240 ifp->if_mtu = ifr->ifr_mtu;
1241 break;
1242
1243 case SIOCSIFFLAGS:
1244 if (ifp->if_flags & IFF_UP) {
1245 vr_init(sc);
1246 } else {
1247 if (ifp->if_flags & IFF_RUNNING)
1248 vr_stop(sc);
1249 }
1250 error = 0;
1251 break;
1252 case SIOCADDMULTI:
1253 case SIOCDELMULTI:
1254 if (command == SIOCADDMULTI)
1255 error = ether_addmulti(ifr, &sc->vr_ec);
1256 else
1257 error = ether_delmulti(ifr, &sc->vr_ec);
1258
1259 if (error == ENETRESET) {
1260 vr_setmulti(sc);
1261 error = 0;
1262 }
1263 break;
1264 case SIOCGIFMEDIA:
1265 case SIOCSIFMEDIA:
1266 error = ifmedia_ioctl(ifp, ifr, &sc->vr_mii.mii_media, command);
1267 break;
1268 default:
1269 error = EINVAL;
1270 break;
1271 }
1272
1273 (void)splx(s);
1274
1275 return (error);
1276 }
1277
1278 static void vr_watchdog(ifp)
1279 struct ifnet *ifp;
1280 {
1281 struct vr_softc *sc;
1282
1283 sc = ifp->if_softc;
1284
1285 ifp->if_oerrors++;
1286 printf("%s: watchdog timeout\n", sc->vr_dev.dv_xname);
1287
1288 vr_stop(sc);
1289 vr_reset(sc);
1290 vr_init(sc);
1291
1292 if (ifp->if_snd.ifq_head != NULL)
1293 vr_start(ifp);
1294
1295 return;
1296 }
1297
1298 /*
1299 * One second timer, used to tick MII.
1300 */
1301 static void
1302 vr_tick(arg)
1303 void *arg;
1304 {
1305 struct vr_softc *sc = arg;
1306 int s;
1307
1308 s = splimp();
1309 mii_tick(&sc->vr_mii);
1310 splx(s);
1311
1312 timeout(vr_tick, sc, hz);
1313 }
1314
1315 /*
1316 * Stop the adapter and free any mbufs allocated to the
1317 * RX and TX lists.
1318 */
1319 static void vr_stop(sc)
1320 struct vr_softc *sc;
1321 {
1322 register int i;
1323 struct ifnet *ifp;
1324
1325 /* Cancel one second timer. */
1326 untimeout(vr_tick, sc);
1327
1328 ifp = &sc->vr_ec.ec_if;
1329 ifp->if_timer = 0;
1330
1331 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1332 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1333 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1334 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1335 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1336
1337 /*
1338 * Free data in the RX lists.
1339 */
1340 for (i = 0; i < VR_RX_LIST_CNT; i++) {
1341 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1342 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
1343 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
1344 }
1345 }
1346 bzero((char *)&sc->vr_ldata->vr_rx_list,
1347 sizeof (sc->vr_ldata->vr_rx_list));
1348
1349 /*
1350 * Free the TX list buffers.
1351 */
1352 for (i = 0; i < VR_TX_LIST_CNT; i++) {
1353 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
1354 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
1355 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
1356 }
1357 }
1358
1359 bzero((char *)&sc->vr_ldata->vr_tx_list,
1360 sizeof (sc->vr_ldata->vr_tx_list));
1361
1362 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1363
1364 return;
1365 }
1366
1367 static struct vr_type *vr_lookup __P((struct pci_attach_args *));
1368 static int vr_probe __P((struct device *, struct cfdata *, void *));
1369 static void vr_attach __P((struct device *, struct device *, void *));
1370 static void vr_shutdown __P((void *));
1371
1372 struct cfattach vr_ca = {
1373 sizeof (struct vr_softc), vr_probe, vr_attach
1374 };
1375
1376 static struct vr_type *
1377 vr_lookup(pa)
1378 struct pci_attach_args *pa;
1379 {
1380 struct vr_type *vrt;
1381
1382 for (vrt = vr_devs; vrt->vr_name != NULL; vrt++) {
1383 if (PCI_VENDOR(pa->pa_id) == vrt->vr_vid &&
1384 PCI_PRODUCT(pa->pa_id) == vrt->vr_did)
1385 return (vrt);
1386 }
1387 return (NULL);
1388 }
1389
1390 static int
1391 vr_probe(parent, match, aux)
1392 struct device *parent;
1393 struct cfdata *match;
1394 void *aux;
1395 {
1396 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
1397
1398 if (vr_lookup(pa) != NULL)
1399 return (1);
1400
1401 return (0);
1402 }
1403
1404 /*
1405 * Stop all chip I/O so that the kernel's probe routines don't
1406 * get confused by errant DMAs when rebooting.
1407 */
1408 static void vr_shutdown(arg)
1409 void *arg;
1410 {
1411 struct vr_softc *sc = (struct vr_softc *)arg;
1412
1413 vr_stop(sc);
1414
1415 return;
1416 }
1417
1418 /*
1419 * Attach the interface. Allocate softc structures, do ifmedia
1420 * setup and ethernet/BPF attach.
1421 */
1422 static void
1423 vr_attach(parent, self, aux)
1424 struct device * const parent;
1425 struct device * const self;
1426 void * const aux;
1427 {
1428 #define PCI_CONF_WRITE(r, v) pci_conf_write(pa->pa_pc, pa->pa_tag, (r), (v))
1429 #define PCI_CONF_READ(r) pci_conf_read(pa->pa_pc, pa->pa_tag, (r))
1430 struct vr_softc * const sc = (struct vr_softc *) self;
1431 struct pci_attach_args * const pa = (struct pci_attach_args *) aux;
1432 struct vr_type *vrt;
1433 int i;
1434 u_int32_t command;
1435 struct ifnet *ifp;
1436 unsigned int round;
1437 caddr_t roundptr;
1438 u_char eaddr[ETHER_ADDR_LEN];
1439
1440 vrt = vr_lookup(pa);
1441 if (vrt == NULL) {
1442 printf("\n");
1443 panic("vr_attach: impossible");
1444 }
1445
1446 printf(": %s Ethernet\n", vrt->vr_name);
1447
1448 /*
1449 * Handle power management nonsense.
1450 */
1451
1452 command = PCI_CONF_READ(VR_PCI_CAPID) & 0x000000FF;
1453 if (command == 0x01) {
1454
1455 command = PCI_CONF_READ(VR_PCI_PWRMGMTCTRL);
1456 if (command & VR_PSTATE_MASK) {
1457 u_int32_t iobase, membase, irq;
1458
1459 /* Save important PCI config data. */
1460 iobase = PCI_CONF_READ(VR_PCI_LOIO);
1461 membase = PCI_CONF_READ(VR_PCI_LOMEM);
1462 irq = PCI_CONF_READ(VR_PCI_INTLINE);
1463
1464 /* Reset the power state. */
1465 printf("%s: chip is in D%d power mode "
1466 "-- setting to D0\n",
1467 sc->vr_dev.dv_xname, command & VR_PSTATE_MASK);
1468 command &= 0xFFFFFFFC;
1469 PCI_CONF_WRITE(VR_PCI_PWRMGMTCTRL, command);
1470
1471 /* Restore PCI config data. */
1472 PCI_CONF_WRITE(VR_PCI_LOIO, iobase);
1473 PCI_CONF_WRITE(VR_PCI_LOMEM, membase);
1474 PCI_CONF_WRITE(VR_PCI_INTLINE, irq);
1475 }
1476 }
1477
1478 /*
1479 * Map control/status registers.
1480 */
1481 command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
1482 command |= (PCI_COMMAND_IO_ENABLE |
1483 PCI_COMMAND_MEM_ENABLE |
1484 PCI_COMMAND_MASTER_ENABLE);
1485 PCI_CONF_WRITE(PCI_COMMAND_STATUS_REG, command);
1486 command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
1487
1488 {
1489 bus_space_tag_t iot, memt;
1490 bus_space_handle_t ioh, memh;
1491 int ioh_valid, memh_valid;
1492 pci_intr_handle_t intrhandle;
1493 const char *intrstr;
1494
1495 ioh_valid = (pci_mapreg_map(pa, VR_PCI_LOIO,
1496 PCI_MAPREG_TYPE_IO, 0,
1497 &iot, &ioh, NULL, NULL) == 0);
1498 memh_valid = (pci_mapreg_map(pa, VR_PCI_LOMEM,
1499 PCI_MAPREG_TYPE_MEM |
1500 PCI_MAPREG_MEM_TYPE_32BIT,
1501 0, &memt, &memh, NULL, NULL) == 0);
1502 #if defined(VR_USEIOSPACE)
1503 if (ioh_valid) {
1504 sc->vr_btag = iot;
1505 sc->vr_bhandle = ioh;
1506 } else if (memh_valid) {
1507 sc->vr_btag = memt;
1508 sc->vr_bhandle = memh;
1509 }
1510 #else
1511 if (memh_valid) {
1512 sc->vr_btag = memt;
1513 sc->vr_bhandle = memh;
1514 } else if (ioh_valid) {
1515 sc->vr_btag = iot;
1516 sc->vr_bhandle = ioh;
1517 }
1518 #endif
1519 else {
1520 printf(": unable to map device registers\n");
1521 return;
1522 }
1523
1524 /* Allocate interrupt */
1525 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
1526 pa->pa_intrline, &intrhandle)) {
1527 printf("%s: couldn't map interrupt\n",
1528 sc->vr_dev.dv_xname);
1529 goto fail;
1530 }
1531 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
1532 sc->vr_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET,
1533 (void *)vr_intr, sc);
1534 if (sc->vr_ih == NULL) {
1535 printf("%s: couldn't establish interrupt",
1536 sc->vr_dev.dv_xname);
1537 if (intrstr != NULL)
1538 printf(" at %s", intrstr);
1539 printf("\n");
1540 }
1541 printf("%s: interrupting at %s\n",
1542 sc->vr_dev.dv_xname, intrstr);
1543 }
1544 sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
1545 if (sc->vr_ats == NULL)
1546 printf("%s: warning: couldn't establish shutdown hook\n",
1547 sc->vr_dev.dv_xname);
1548
1549 /* Reset the adapter. */
1550 vr_reset(sc);
1551
1552 /*
1553 * Get station address. The way the Rhine chips work,
1554 * you're not allowed to directly access the EEPROM once
1555 * they've been programmed a special way. Consequently,
1556 * we need to read the node address from the PAR0 and PAR1
1557 * registers.
1558 */
1559 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
1560 DELAY(200);
1561 for (i = 0; i < ETHER_ADDR_LEN; i++)
1562 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
1563
1564 /*
1565 * A Rhine chip was detected. Inform the world.
1566 */
1567 printf("%s: Ethernet address: %s\n",
1568 sc->vr_dev.dv_xname, ether_sprintf(eaddr));
1569
1570 bcopy(eaddr, sc->vr_enaddr, ETHER_ADDR_LEN);
1571
1572 sc->vr_ldata_ptr = malloc(sizeof (struct vr_list_data) + 8,
1573 M_DEVBUF, M_NOWAIT);
1574 if (sc->vr_ldata_ptr == NULL) {
1575 free(sc, M_DEVBUF);
1576 printf("%s: no memory for list buffers!\n",
1577 sc->vr_dev.dv_xname);
1578 return;
1579 }
1580
1581 sc->vr_ldata = (struct vr_list_data *)sc->vr_ldata_ptr;
1582 round = (unsigned long)sc->vr_ldata_ptr & 0xF;
1583 roundptr = sc->vr_ldata_ptr;
1584 for (i = 0; i < 8; i++) {
1585 if (round % 8) {
1586 round++;
1587 roundptr++;
1588 } else
1589 break;
1590 }
1591 sc->vr_ldata = (struct vr_list_data *)roundptr;
1592 bzero(sc->vr_ldata, sizeof (struct vr_list_data));
1593
1594 ifp = &sc->vr_ec.ec_if;
1595 ifp->if_softc = sc;
1596 ifp->if_mtu = ETHERMTU;
1597 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1598 ifp->if_ioctl = vr_ioctl;
1599 ifp->if_output = ether_output;
1600 ifp->if_start = vr_start;
1601 ifp->if_watchdog = vr_watchdog;
1602 ifp->if_baudrate = 10000000;
1603 bcopy(sc->vr_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1604
1605 /*
1606 * Initialize MII/media info.
1607 */
1608 sc->vr_mii.mii_ifp = ifp;
1609 sc->vr_mii.mii_readreg = vr_mii_readreg;
1610 sc->vr_mii.mii_writereg = vr_mii_writereg;
1611 sc->vr_mii.mii_statchg = vr_mii_statchg;
1612 ifmedia_init(&sc->vr_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts);
1613 mii_phy_probe(&sc->vr_dev, &sc->vr_mii, 0xffffffff);
1614 if (LIST_FIRST(&sc->vr_mii.mii_phys) == NULL) {
1615 ifmedia_add(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1616 ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE);
1617 } else
1618 ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_AUTO);
1619
1620 /*
1621 * Call MI attach routines.
1622 */
1623 if_attach(ifp);
1624 ether_ifattach(ifp, sc->vr_enaddr);
1625
1626 #if NBPFILTER > 0
1627 bpfattach(&sc->vr_ec.ec_if.if_bpf,
1628 ifp, DLT_EN10MB, sizeof (struct ether_header));
1629 #endif
1630
1631 sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
1632 if (sc->vr_ats == NULL)
1633 printf("%s: warning: couldn't establish shutdown hook\n",
1634 sc->vr_dev.dv_xname);
1635
1636 fail:
1637 return;
1638 }
1639