if_vr.c revision 1.13 1 /* $NetBSD: if_vr.c,v 1.13 1999/02/05 08:21:31 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1997, 1998
5 * Bill Paul <wpaul (at) ctr.columbia.edu>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * $FreeBSD: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $
35 */
36
37 /*
38 * VIA Rhine fast ethernet PCI NIC driver
39 *
40 * Supports various network adapters based on the VIA Rhine
41 * and Rhine II PCI controllers, including the D-Link DFE530TX.
42 * Datasheets are available at http://www.via.com.tw.
43 *
44 * Written by Bill Paul <wpaul (at) ctr.columbia.edu>
45 * Electrical Engineering Department
46 * Columbia University, New York City
47 */
48
49 /*
50 * The VIA Rhine controllers are similar in some respects to the
51 * the DEC tulip chips, except less complicated. The controller
52 * uses an MII bus and an external physical layer interface. The
53 * receiver has a one entry perfect filter and a 64-bit hash table
54 * multicast filter. Transmit and receive descriptors are similar
55 * to the tulip.
56 *
57 * The Rhine has a serious flaw in its transmit DMA mechanism:
58 * transmit buffers must be longword aligned. Unfortunately,
59 * FreeBSD doesn't guarantee that mbufs will be filled in starting
60 * at longword boundaries, so we have to do a buffer copy before
61 * transmission.
62 */
63
64 #include "opt_inet.h"
65
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/sockio.h>
69 #include <sys/mbuf.h>
70 #include <sys/malloc.h>
71 #include <sys/kernel.h>
72 #include <sys/socket.h>
73 #include <sys/device.h>
74
75 #include <net/if.h>
76 #include <net/if_arp.h>
77 #include <net/if_dl.h>
78 #include <net/if_media.h>
79 #include <net/if_ether.h>
80
81 #if defined(INET)
82 #include <netinet/in.h>
83 #include <netinet/if_inarp.h>
84 #endif
85
86 #include "bpfilter.h"
87 #if NBPFILTER > 0
88 #include <net/bpf.h>
89 #endif
90
91 #include <vm/vm.h> /* for vtophys */
92
93 #include <machine/bus.h>
94 #include <machine/intr.h>
95
96 #include <dev/mii/mii.h>
97 #include <dev/mii/miivar.h>
98
99 #include <dev/pci/pcireg.h>
100 #include <dev/pci/pcivar.h>
101 #include <dev/pci/pcidevs.h>
102
103 #include <dev/pci/if_vrreg.h>
104
105 #if defined(__NetBSD__) && defined(__alpha__)
106 /* XXX XXX NEED REAL DMA MAPPING SUPPORT XXX XXX */
107 #undef vtophys
108 #define vtophys(va) alpha_XXX_dmamap((vaddr_t)(va))
109 #endif
110
111 #define VR_USEIOSPACE
112
113 #define ETHER_CRC_LEN 4 /* XXX Should be in a common header. */
114
115 /*
116 * Various supported device vendors/types and their names.
117 */
118 static struct vr_type {
119 pci_vendor_id_t vr_vid;
120 pci_product_id_t vr_did;
121 const char *vr_name;
122 } vr_devs[] = {
123 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT3043,
124 "VIA VT3043 Rhine I 10/100BaseTX" },
125 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT86C100A,
126 "VIA VT86C100A Rhine II 10/100BaseTX" },
127 { 0, 0, NULL }
128 };
129
130 struct vr_list_data {
131 struct vr_desc vr_rx_list[VR_RX_LIST_CNT];
132 struct vr_desc vr_tx_list[VR_TX_LIST_CNT];
133 };
134
135 struct vr_chain {
136 struct vr_desc *vr_ptr;
137 struct mbuf *vr_mbuf;
138 struct vr_chain *vr_nextdesc;
139 };
140
141 struct vr_chain_onefrag {
142 struct vr_desc *vr_ptr;
143 struct mbuf *vr_mbuf;
144 struct vr_chain_onefrag *vr_nextdesc;
145 };
146
147 struct vr_chain_data {
148 struct vr_chain_onefrag vr_rx_chain[VR_RX_LIST_CNT];
149 struct vr_chain vr_tx_chain[VR_TX_LIST_CNT];
150
151 struct vr_chain_onefrag *vr_rx_head;
152
153 struct vr_chain *vr_tx_head;
154 struct vr_chain *vr_tx_tail;
155 struct vr_chain *vr_tx_free;
156 };
157
158 struct vr_softc {
159 struct device vr_dev;
160 void *vr_ih;
161 void *vr_ats;
162 bus_space_tag_t vr_bustag;
163 bus_space_handle_t vr_bushandle;
164 pci_chipset_tag_t vr_pc;
165 struct ethercom vr_ec;
166 u_int8_t vr_enaddr[ETHER_ADDR_LEN];
167 struct mii_data vr_mii; /* MII/media info */
168 bus_space_handle_t vr_bhandle; /* bus space handle */
169 bus_space_tag_t vr_btag; /* bus space tag */
170 caddr_t vr_ldata_ptr;
171 struct vr_list_data *vr_ldata;
172 struct vr_chain_data vr_cdata;
173 };
174
175 /*
176 * register space access macros
177 */
178 #define CSR_WRITE_4(sc, reg, val) \
179 bus_space_write_4(sc->vr_btag, sc->vr_bhandle, reg, val)
180 #define CSR_WRITE_2(sc, reg, val) \
181 bus_space_write_2(sc->vr_btag, sc->vr_bhandle, reg, val)
182 #define CSR_WRITE_1(sc, reg, val) \
183 bus_space_write_1(sc->vr_btag, sc->vr_bhandle, reg, val)
184
185 #define CSR_READ_4(sc, reg) \
186 bus_space_read_4(sc->vr_btag, sc->vr_bhandle, reg)
187 #define CSR_READ_2(sc, reg) \
188 bus_space_read_2(sc->vr_btag, sc->vr_bhandle, reg)
189 #define CSR_READ_1(sc, reg) \
190 bus_space_read_1(sc->vr_btag, sc->vr_bhandle, reg)
191
192 #define VR_TIMEOUT 1000
193
194 static int vr_newbuf __P((struct vr_softc *,
195 struct vr_chain_onefrag *));
196 static int vr_encap __P((struct vr_softc *, struct vr_chain *,
197 struct mbuf *));
198
199 static void vr_rxeof __P((struct vr_softc *));
200 static void vr_rxeoc __P((struct vr_softc *));
201 static void vr_txeof __P((struct vr_softc *));
202 static void vr_txeoc __P((struct vr_softc *));
203 static void vr_intr __P((void *));
204 static void vr_start __P((struct ifnet *));
205 static int vr_ioctl __P((struct ifnet *, u_long, caddr_t));
206 static void vr_init __P((void *));
207 static void vr_stop __P((struct vr_softc *));
208 static void vr_watchdog __P((struct ifnet *));
209 static void vr_tick __P((void *));
210
211 static int vr_ifmedia_upd __P((struct ifnet *));
212 static void vr_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
213
214 static void vr_mii_sync __P((struct vr_softc *));
215 static void vr_mii_send __P((struct vr_softc *, u_int32_t, int));
216 static int vr_mii_readreg __P((struct device *, int, int));
217 static void vr_mii_writereg __P((struct device *, int, int, int));
218 static void vr_mii_statchg __P((struct device *));
219
220 static u_int8_t vr_calchash __P((u_int8_t *));
221 static void vr_setmulti __P((struct vr_softc *));
222 static void vr_reset __P((struct vr_softc *));
223 static int vr_list_rx_init __P((struct vr_softc *));
224 static int vr_list_tx_init __P((struct vr_softc *));
225
226 #define VR_SETBIT(sc, reg, x) \
227 CSR_WRITE_1(sc, reg, \
228 CSR_READ_1(sc, reg) | x)
229
230 #define VR_CLRBIT(sc, reg, x) \
231 CSR_WRITE_1(sc, reg, \
232 CSR_READ_1(sc, reg) & ~x)
233
234 #define VR_SETBIT16(sc, reg, x) \
235 CSR_WRITE_2(sc, reg, \
236 CSR_READ_2(sc, reg) | x)
237
238 #define VR_CLRBIT16(sc, reg, x) \
239 CSR_WRITE_2(sc, reg, \
240 CSR_READ_2(sc, reg) & ~x)
241
242 #define VR_SETBIT32(sc, reg, x) \
243 CSR_WRITE_4(sc, reg, \
244 CSR_READ_4(sc, reg) | x)
245
246 #define VR_CLRBIT32(sc, reg, x) \
247 CSR_WRITE_4(sc, reg, \
248 CSR_READ_4(sc, reg) & ~x)
249
250 #define SIO_SET(x) \
251 CSR_WRITE_1(sc, VR_MIICMD, \
252 CSR_READ_1(sc, VR_MIICMD) | x)
253
254 #define SIO_CLR(x) \
255 CSR_WRITE_1(sc, VR_MIICMD, \
256 CSR_READ_1(sc, VR_MIICMD) & ~x)
257
258 /*
259 * Sync the PHYs by setting data bit and strobing the clock 32 times.
260 */
261 static void vr_mii_sync(sc)
262 struct vr_softc *sc;
263 {
264 register int i;
265
266 SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAOUT);
267
268 for (i = 0; i < 32; i++) {
269 SIO_SET(VR_MIICMD_CLK);
270 DELAY(1);
271 SIO_CLR(VR_MIICMD_CLK);
272 DELAY(1);
273 }
274
275 return;
276 }
277
278 /*
279 * Clock a series of bits through the MII.
280 */
281 static void vr_mii_send(sc, bits, cnt)
282 struct vr_softc *sc;
283 u_int32_t bits;
284 int cnt;
285 {
286 int i;
287
288 SIO_CLR(VR_MIICMD_CLK);
289
290 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
291 if (bits & i) {
292 SIO_SET(VR_MIICMD_DATAOUT);
293 } else {
294 SIO_CLR(VR_MIICMD_DATAOUT);
295 }
296 DELAY(1);
297 SIO_CLR(VR_MIICMD_CLK);
298 DELAY(1);
299 SIO_SET(VR_MIICMD_CLK);
300 }
301 }
302
303 /*
304 * Read an PHY register through the MII.
305 */
306 static int vr_mii_readreg(self, phy, reg)
307 struct device *self;
308 int phy, reg;
309 {
310 struct vr_softc *sc = (struct vr_softc *)self;
311 int i, ack, val = 0;
312
313 CSR_WRITE_1(sc, VR_MIICMD, 0);
314 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
315
316 /*
317 * Turn on data xmit.
318 */
319 SIO_SET(VR_MIICMD_DIR);
320
321 vr_mii_sync(sc);
322
323 /*
324 * Send command/address info.
325 */
326 vr_mii_send(sc, MII_COMMAND_START, 2);
327 vr_mii_send(sc, MII_COMMAND_READ, 2);
328 vr_mii_send(sc, phy, 5);
329 vr_mii_send(sc, reg, 5);
330
331 /* Idle bit */
332 SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAOUT));
333 DELAY(1);
334 SIO_SET(VR_MIICMD_CLK);
335 DELAY(1);
336
337 /* Turn off xmit. */
338 SIO_CLR(VR_MIICMD_DIR);
339
340 /* Check for ack */
341 SIO_CLR(VR_MIICMD_CLK);
342 DELAY(1);
343 SIO_SET(VR_MIICMD_CLK);
344 DELAY(1);
345 ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAIN;
346
347 /*
348 * Now try reading data bits. If the ack failed, we still
349 * need to clock through 16 cycles to keep the PHY(s) in sync.
350 */
351 if (ack) {
352 for (i = 0; i < 16; i++) {
353 SIO_CLR(VR_MIICMD_CLK);
354 DELAY(1);
355 SIO_SET(VR_MIICMD_CLK);
356 DELAY(1);
357 }
358 goto fail;
359 }
360
361 for (i = 0x8000; i; i >>= 1) {
362 SIO_CLR(VR_MIICMD_CLK);
363 DELAY(1);
364 if (!ack) {
365 if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAIN)
366 val |= i;
367 DELAY(1);
368 }
369 SIO_SET(VR_MIICMD_CLK);
370 DELAY(1);
371 }
372
373 fail:
374
375 SIO_CLR(VR_MIICMD_CLK);
376 DELAY(1);
377 SIO_SET(VR_MIICMD_CLK);
378 DELAY(1);
379
380 return (val);
381 }
382
383 /*
384 * Write to a PHY register through the MII.
385 */
386 static void vr_mii_writereg(self, phy, reg, val)
387 struct device *self;
388 int phy, reg, val;
389 {
390 struct vr_softc *sc = (struct vr_softc *)self;
391
392 CSR_WRITE_1(sc, VR_MIICMD, 0);
393 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
394
395 /*
396 * Turn on data output.
397 */
398 SIO_SET(VR_MIICMD_DIR);
399
400 vr_mii_sync(sc);
401
402 vr_mii_send(sc, MII_COMMAND_START, 2);
403 vr_mii_send(sc, MII_COMMAND_WRITE, 2);
404 vr_mii_send(sc, phy, 5);
405 vr_mii_send(sc, reg, 5);
406 vr_mii_send(sc, MII_COMMAND_ACK, 2);
407 vr_mii_send(sc, val, 16);
408
409 /* Idle bit. */
410 SIO_SET(VR_MIICMD_CLK);
411 DELAY(1);
412 SIO_CLR(VR_MIICMD_CLK);
413 DELAY(1);
414
415 /*
416 * Turn off xmit.
417 */
418 SIO_CLR(VR_MIICMD_DIR);
419 }
420
421 static void vr_mii_statchg(self)
422 struct device *self;
423 {
424 struct vr_softc *sc = (struct vr_softc *)self;
425 int restart = 0;
426
427 /*
428 * In order to fiddle with the 'full-duplex' bit in the netconfig
429 * register, we first have to put the transmit and/or receive logic
430 * in the idle state.
431 */
432 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
433 restart = 1;
434 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
435 }
436
437 if (sc->vr_mii.mii_media_active & IFM_FDX)
438 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
439 else
440 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
441
442 if (restart)
443 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
444
445 /* XXX Update ifp->if_baudrate */
446 }
447
448 /*
449 * Calculate CRC of a multicast group address, return the lower 6 bits.
450 */
451 static u_int8_t vr_calchash(addr)
452 u_int8_t *addr;
453 {
454 u_int32_t crc, carry;
455 int i, j;
456 u_int8_t c;
457
458 /* Compute CRC for the address value. */
459 crc = 0xFFFFFFFF; /* initial value */
460
461 for (i = 0; i < 6; i++) {
462 c = *(addr + i);
463 for (j = 0; j < 8; j++) {
464 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
465 crc <<= 1;
466 c >>= 1;
467 if (carry)
468 crc = (crc ^ 0x04c11db6) | carry;
469 }
470 }
471
472 /* return the filter bit position */
473 return ((crc >> 26) & 0x0000003F);
474 }
475
476 /*
477 * Program the 64-bit multicast hash filter.
478 */
479 static void vr_setmulti(sc)
480 struct vr_softc *sc;
481 {
482 struct ifnet *ifp;
483 int h = 0;
484 u_int32_t hashes[2] = { 0, 0 };
485 struct ether_multistep step;
486 struct ether_multi *enm;
487 int mcnt = 0;
488 u_int8_t rxfilt;
489
490 ifp = &sc->vr_ec.ec_if;
491
492 rxfilt = CSR_READ_1(sc, VR_RXCFG);
493
494 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
495 rxfilt |= VR_RXCFG_RX_MULTI;
496 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
497 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
498 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
499 return;
500 }
501
502 /* first, zot all the existing hash bits */
503 CSR_WRITE_4(sc, VR_MAR0, 0);
504 CSR_WRITE_4(sc, VR_MAR1, 0);
505
506 /* now program new ones */
507 ETHER_FIRST_MULTI(step, &sc->vr_ec, enm);
508 while (enm != NULL) {
509 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0)
510 continue;
511
512 h = vr_calchash(enm->enm_addrlo);
513
514 if (h < 32)
515 hashes[0] |= (1 << h);
516 else
517 hashes[1] |= (1 << (h - 32));
518 ETHER_NEXT_MULTI(step, enm);
519 mcnt++;
520 }
521
522 if (mcnt)
523 rxfilt |= VR_RXCFG_RX_MULTI;
524 else
525 rxfilt &= ~VR_RXCFG_RX_MULTI;
526
527 CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
528 CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
529 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
530
531 return;
532 }
533
534 static void vr_reset(sc)
535 struct vr_softc *sc;
536 {
537 register int i;
538
539 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
540
541 for (i = 0; i < VR_TIMEOUT; i++) {
542 DELAY(10);
543 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
544 break;
545 }
546 if (i == VR_TIMEOUT)
547 printf("%s: reset never completed!\n",
548 sc->vr_dev.dv_xname);
549
550 /* Wait a little while for the chip to get its brains in order. */
551 DELAY(1000);
552
553 return;
554 }
555
556 /*
557 * Initialize the transmit descriptors.
558 */
559 static int vr_list_tx_init(sc)
560 struct vr_softc *sc;
561 {
562 struct vr_chain_data *cd;
563 struct vr_list_data *ld;
564 int i;
565
566 cd = &sc->vr_cdata;
567 ld = sc->vr_ldata;
568 for (i = 0; i < VR_TX_LIST_CNT; i++) {
569 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
570 if (i == (VR_TX_LIST_CNT - 1))
571 cd->vr_tx_chain[i].vr_nextdesc =
572 &cd->vr_tx_chain[0];
573 else
574 cd->vr_tx_chain[i].vr_nextdesc =
575 &cd->vr_tx_chain[i + 1];
576 }
577
578 cd->vr_tx_free = &cd->vr_tx_chain[0];
579 cd->vr_tx_tail = cd->vr_tx_head = NULL;
580
581 return (0);
582 }
583
584
585 /*
586 * Initialize the RX descriptors and allocate mbufs for them. Note that
587 * we arrange the descriptors in a closed ring, so that the last descriptor
588 * points back to the first.
589 */
590 static int vr_list_rx_init(sc)
591 struct vr_softc *sc;
592 {
593 struct vr_chain_data *cd;
594 struct vr_list_data *ld;
595 int i;
596
597 cd = &sc->vr_cdata;
598 ld = sc->vr_ldata;
599
600 for (i = 0; i < VR_RX_LIST_CNT; i++) {
601 cd->vr_rx_chain[i].vr_ptr =
602 (struct vr_desc *)&ld->vr_rx_list[i];
603 if (vr_newbuf(sc, &cd->vr_rx_chain[i]) == ENOBUFS)
604 return (ENOBUFS);
605 if (i == (VR_RX_LIST_CNT - 1)) {
606 cd->vr_rx_chain[i].vr_nextdesc =
607 &cd->vr_rx_chain[0];
608 ld->vr_rx_list[i].vr_next =
609 vtophys(&ld->vr_rx_list[0]);
610 } else {
611 cd->vr_rx_chain[i].vr_nextdesc =
612 &cd->vr_rx_chain[i + 1];
613 ld->vr_rx_list[i].vr_next =
614 vtophys(&ld->vr_rx_list[i + 1]);
615 }
616 }
617
618 cd->vr_rx_head = &cd->vr_rx_chain[0];
619
620 return (0);
621 }
622
623 /*
624 * Initialize an RX descriptor and attach an MBUF cluster.
625 * Note: the length fields are only 11 bits wide, which means the
626 * largest size we can specify is 2047. This is important because
627 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
628 * overflow the field and make a mess.
629 */
630 static int vr_newbuf(sc, c)
631 struct vr_softc *sc;
632 struct vr_chain_onefrag *c;
633 {
634 struct mbuf *m_new = NULL;
635
636 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
637 if (m_new == NULL) {
638 printf("%s: no memory for rx list -- packet dropped!\n",
639 sc->vr_dev.dv_xname);
640 return (ENOBUFS);
641 }
642
643 MCLGET(m_new, M_DONTWAIT);
644 if (!(m_new->m_flags & M_EXT)) {
645 printf("%s: no memory for rx list -- packet dropped!\n",
646 sc->vr_dev.dv_xname);
647 m_freem(m_new);
648 return (ENOBUFS);
649 }
650
651 c->vr_mbuf = m_new;
652 c->vr_ptr->vr_status = VR_RXSTAT;
653 c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
654 c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN;
655
656 return (0);
657 }
658
659 /*
660 * A frame has been uploaded: pass the resulting mbuf chain up to
661 * the higher level protocols.
662 */
663 static void vr_rxeof(sc)
664 struct vr_softc *sc;
665 {
666 struct ether_header *eh;
667 struct mbuf *m;
668 struct ifnet *ifp;
669 struct vr_chain_onefrag *cur_rx;
670 int total_len = 0;
671 u_int32_t rxstat;
672
673 ifp = &sc->vr_ec.ec_if;
674
675 while (!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
676 VR_RXSTAT_OWN)) {
677 cur_rx = sc->vr_cdata.vr_rx_head;
678 sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
679
680 /*
681 * If an error occurs, update stats, clear the
682 * status word and leave the mbuf cluster in place:
683 * it should simply get re-used next time this descriptor
684 * comes up in the ring.
685 */
686 if (rxstat & VR_RXSTAT_RXERR) {
687 ifp->if_ierrors++;
688 printf("%s: rx error: ", sc->vr_dev.dv_xname);
689 switch (rxstat & 0x000000FF) {
690 case VR_RXSTAT_CRCERR:
691 printf("crc error\n");
692 break;
693 case VR_RXSTAT_FRAMEALIGNERR:
694 printf("frame alignment error\n");
695 break;
696 case VR_RXSTAT_FIFOOFLOW:
697 printf("FIFO overflow\n");
698 break;
699 case VR_RXSTAT_GIANT:
700 printf("received giant packet\n");
701 break;
702 case VR_RXSTAT_RUNT:
703 printf("received runt packet\n");
704 break;
705 case VR_RXSTAT_BUSERR:
706 printf("system bus error\n");
707 break;
708 case VR_RXSTAT_BUFFERR:
709 printf("rx buffer error\n");
710 break;
711 default:
712 printf("unknown rx error\n");
713 break;
714 }
715 cur_rx->vr_ptr->vr_status = VR_RXSTAT;
716 cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN;
717 continue;
718 }
719
720 /* No errors; receive the packet. */
721 m = cur_rx->vr_mbuf;
722 total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
723
724 /*
725 * XXX The VIA Rhine chip includes the CRC with every
726 * received frame, and there's no way to turn this
727 * behavior off (at least, I can't find anything in
728 * the manual that explains how to do it) so we have
729 * to trim off the CRC manually.
730 */
731 total_len -= ETHER_CRC_LEN;
732
733 /*
734 * Try to conjure up a new mbuf cluster. If that
735 * fails, it means we have an out of memory condition and
736 * should leave the buffer in place and continue. This will
737 * result in a lost packet, but there's little else we
738 * can do in this situation.
739 */
740 if (vr_newbuf(sc, cur_rx) == ENOBUFS) {
741 ifp->if_ierrors++;
742 cur_rx->vr_ptr->vr_status = VR_RXSTAT;
743 cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN;
744 continue;
745 }
746
747 ifp->if_ipackets++;
748 eh = mtod(m, struct ether_header *);
749 m->m_pkthdr.rcvif = ifp;
750 m->m_pkthdr.len = m->m_len = total_len;
751 #if NBPFILTER > 0
752 /*
753 * Handle BPF listeners. Let the BPF user see the packet, but
754 * don't pass it up to the ether_input() layer unless it's
755 * a broadcast packet, multicast packet, matches our ethernet
756 * address or the interface is in promiscuous mode.
757 */
758 if (ifp->if_bpf) {
759 bpf_mtap(ifp->if_bpf, m);
760 if (ifp->if_flags & IFF_PROMISC &&
761 (memcmp(eh->ether_dhost, sc->vr_enaddr,
762 ETHER_ADDR_LEN) &&
763 (eh->ether_dhost[0] & 1) == 0)) {
764 m_freem(m);
765 continue;
766 }
767 }
768 #endif
769 /* Remove header from mbuf and pass it on. */
770 m_adj(m, sizeof (struct ether_header));
771 ether_input(ifp, eh, m);
772 }
773
774 return;
775 }
776
777 void vr_rxeoc(sc)
778 struct vr_softc *sc;
779 {
780
781 vr_rxeof(sc);
782 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
783 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
784 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
785 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
786
787 return;
788 }
789
790 /*
791 * A frame was downloaded to the chip. It's safe for us to clean up
792 * the list buffers.
793 */
794
795 static void vr_txeof(sc)
796 struct vr_softc *sc;
797 {
798 struct vr_chain *cur_tx;
799 struct ifnet *ifp;
800 register struct mbuf *n;
801
802 ifp = &sc->vr_ec.ec_if;
803
804 /* Clear the timeout timer. */
805 ifp->if_timer = 0;
806
807 /* Sanity check. */
808 if (sc->vr_cdata.vr_tx_head == NULL)
809 return;
810
811 /*
812 * Go through our tx list and free mbufs for those
813 * frames that have been transmitted.
814 */
815 while (sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) {
816 u_int32_t txstat;
817
818 cur_tx = sc->vr_cdata.vr_tx_head;
819 txstat = cur_tx->vr_ptr->vr_status;
820
821 if (txstat & VR_TXSTAT_OWN)
822 break;
823
824 if (txstat & VR_TXSTAT_ERRSUM) {
825 ifp->if_oerrors++;
826 if (txstat & VR_TXSTAT_DEFER)
827 ifp->if_collisions++;
828 if (txstat & VR_TXSTAT_LATECOLL)
829 ifp->if_collisions++;
830 }
831
832 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
833
834 ifp->if_opackets++;
835 MFREE(cur_tx->vr_mbuf, n);
836 cur_tx->vr_mbuf = NULL;
837
838 if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) {
839 sc->vr_cdata.vr_tx_head = NULL;
840 sc->vr_cdata.vr_tx_tail = NULL;
841 break;
842 }
843
844 sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc;
845 }
846
847 return;
848 }
849
850 /*
851 * TX 'end of channel' interrupt handler.
852 */
853 static void vr_txeoc(sc)
854 struct vr_softc *sc;
855 {
856 struct ifnet *ifp;
857
858 ifp = &sc->vr_ec.ec_if;
859
860 ifp->if_timer = 0;
861
862 if (sc->vr_cdata.vr_tx_head == NULL) {
863 ifp->if_flags &= ~IFF_OACTIVE;
864 sc->vr_cdata.vr_tx_tail = NULL;
865 }
866
867 return;
868 }
869
870 static void vr_intr(arg)
871 void *arg;
872 {
873 struct vr_softc *sc;
874 struct ifnet *ifp;
875 u_int16_t status;
876
877 sc = arg;
878 ifp = &sc->vr_ec.ec_if;
879
880 /* Supress unwanted interrupts. */
881 if (!(ifp->if_flags & IFF_UP)) {
882 vr_stop(sc);
883 return;
884 }
885
886 /* Disable interrupts. */
887 CSR_WRITE_2(sc, VR_IMR, 0x0000);
888
889 for (;;) {
890
891 status = CSR_READ_2(sc, VR_ISR);
892 if (status)
893 CSR_WRITE_2(sc, VR_ISR, status);
894
895 if ((status & VR_INTRS) == 0)
896 break;
897
898 if (status & VR_ISR_RX_OK)
899 vr_rxeof(sc);
900
901 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
902 (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) ||
903 (status & VR_ISR_RX_DROPPED)) {
904 vr_rxeof(sc);
905 vr_rxeoc(sc);
906 }
907
908 if (status & VR_ISR_TX_OK) {
909 vr_txeof(sc);
910 vr_txeoc(sc);
911 }
912
913 if ((status & VR_ISR_TX_UNDERRUN)||(status & VR_ISR_TX_ABRT)) {
914 ifp->if_oerrors++;
915 vr_txeof(sc);
916 if (sc->vr_cdata.vr_tx_head != NULL) {
917 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
918 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
919 }
920 }
921
922 if (status & VR_ISR_BUSERR) {
923 vr_reset(sc);
924 vr_init(sc);
925 }
926 }
927
928 /* Re-enable interrupts. */
929 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
930
931 if (ifp->if_snd.ifq_head != NULL) {
932 vr_start(ifp);
933 }
934
935 return;
936 }
937
938 /*
939 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
940 * pointers to the fragment pointers.
941 */
942 static int vr_encap(sc, c, m_head)
943 struct vr_softc *sc;
944 struct vr_chain *c;
945 struct mbuf *m_head;
946 {
947 int frag = 0;
948 struct vr_desc *f = NULL;
949 int total_len;
950 struct mbuf *m;
951
952 m = m_head;
953 total_len = 0;
954
955 /*
956 * The VIA Rhine wants packet buffers to be longword
957 * aligned, but very often our mbufs aren't. Rather than
958 * waste time trying to decide when to copy and when not
959 * to copy, just do it all the time.
960 */
961 if (m != NULL) {
962 struct mbuf *m_new = NULL;
963
964 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
965 if (m_new == NULL) {
966 printf("%s: no memory for tx list",
967 sc->vr_dev.dv_xname);
968 return (1);
969 }
970 if (m_head->m_pkthdr.len > MHLEN) {
971 MCLGET(m_new, M_DONTWAIT);
972 if (!(m_new->m_flags & M_EXT)) {
973 m_freem(m_new);
974 printf("%s: no memory for tx list",
975 sc->vr_dev.dv_xname);
976 return (1);
977 }
978 }
979 m_copydata(m_head, 0, m_head->m_pkthdr.len,
980 mtod(m_new, caddr_t));
981 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
982 m_freem(m_head);
983 m_head = m_new;
984 /*
985 * The Rhine chip doesn't auto-pad, so we have to make
986 * sure to pad short frames out to the minimum frame length
987 * ourselves.
988 */
989 if (m_head->m_len < VR_MIN_FRAMELEN) {
990 m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len;
991 m_new->m_len = m_new->m_pkthdr.len;
992 }
993 f = c->vr_ptr;
994 f->vr_data = vtophys(mtod(m_new, caddr_t));
995 f->vr_ctl = total_len = m_new->m_len;
996 f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG;
997 f->vr_status = 0;
998 frag = 1;
999 }
1000
1001 c->vr_mbuf = m_head;
1002 c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT;
1003 c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr);
1004
1005 return (0);
1006 }
1007
1008 /*
1009 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1010 * to the mbuf data regions directly in the transmit lists. We also save a
1011 * copy of the pointers since the transmit list fragment pointers are
1012 * physical addresses.
1013 */
1014
1015 static void vr_start(ifp)
1016 struct ifnet *ifp;
1017 {
1018 struct vr_softc *sc;
1019 struct mbuf *m_head = NULL;
1020 struct vr_chain *cur_tx = NULL, *start_tx;
1021
1022 sc = ifp->if_softc;
1023
1024 /*
1025 * Check for an available queue slot. If there are none,
1026 * punt.
1027 */
1028 if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) {
1029 ifp->if_flags |= IFF_OACTIVE;
1030 return;
1031 }
1032
1033 start_tx = sc->vr_cdata.vr_tx_free;
1034
1035 while (sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) {
1036 IF_DEQUEUE(&ifp->if_snd, m_head);
1037 if (m_head == NULL)
1038 break;
1039
1040 /* Pick a descriptor off the free list. */
1041 cur_tx = sc->vr_cdata.vr_tx_free;
1042 sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc;
1043
1044 /* Pack the data into the descriptor. */
1045 vr_encap(sc, cur_tx, m_head);
1046
1047 if (cur_tx != start_tx)
1048 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1049
1050 #if NBPFILTER > 0
1051 /*
1052 * If there's a BPF listener, bounce a copy of this frame
1053 * to him.
1054 */
1055 if (ifp->if_bpf)
1056 bpf_mtap(ifp->if_bpf, cur_tx->vr_mbuf);
1057 #endif
1058 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1059 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO);
1060 }
1061
1062 /*
1063 * If there are no frames queued, bail.
1064 */
1065 if (cur_tx == NULL)
1066 return;
1067
1068 sc->vr_cdata.vr_tx_tail = cur_tx;
1069
1070 if (sc->vr_cdata.vr_tx_head == NULL)
1071 sc->vr_cdata.vr_tx_head = start_tx;
1072
1073 /*
1074 * Set a timeout in case the chip goes out to lunch.
1075 */
1076 ifp->if_timer = 5;
1077
1078 return;
1079 }
1080
1081 /*
1082 * Initialize the interface. Must be called at splnet.
1083 */
1084 static void vr_init(xsc)
1085 void *xsc;
1086 {
1087 struct vr_softc *sc = xsc;
1088 struct ifnet *ifp = &sc->vr_ec.ec_if;
1089
1090 /*
1091 * Cancel pending I/O and free all RX/TX buffers.
1092 */
1093 vr_stop(sc);
1094 vr_reset(sc);
1095
1096 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1097 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1098
1099 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1100 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1101
1102 /* Init circular RX list. */
1103 if (vr_list_rx_init(sc) == ENOBUFS) {
1104 printf("%s: initialization failed: no "
1105 "memory for rx buffers\n", sc->vr_dev.dv_xname);
1106 vr_stop(sc);
1107 return;
1108 }
1109
1110 /*
1111 * Init tx descriptors.
1112 */
1113 vr_list_tx_init(sc);
1114
1115 /* If we want promiscuous mode, set the allframes bit. */
1116 if (ifp->if_flags & IFF_PROMISC)
1117 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1118 else
1119 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1120
1121 /* Set capture broadcast bit to capture broadcast frames. */
1122 if (ifp->if_flags & IFF_BROADCAST)
1123 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1124 else
1125 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1126
1127 /*
1128 * Program the multicast filter, if necessary.
1129 */
1130 vr_setmulti(sc);
1131
1132 /*
1133 * Load the address of the RX list.
1134 */
1135 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1136
1137 /* Enable receiver and transmitter. */
1138 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1139 VR_CMD_TX_ON|VR_CMD_RX_ON|
1140 VR_CMD_RX_GO);
1141
1142 /* Set current media. */
1143 mii_mediachg(&sc->vr_mii);
1144
1145 CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
1146
1147 /*
1148 * Enable interrupts.
1149 */
1150 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1151 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1152
1153 ifp->if_flags |= IFF_RUNNING;
1154 ifp->if_flags &= ~IFF_OACTIVE;
1155
1156 /* Start one second timer. */
1157 timeout(vr_tick, sc, hz);
1158
1159 return;
1160 }
1161
1162 /*
1163 * Set media options.
1164 */
1165 static int vr_ifmedia_upd(ifp)
1166 struct ifnet *ifp;
1167 {
1168 struct vr_softc *sc = ifp->if_softc;
1169
1170 if (ifp->if_flags & IFF_UP)
1171 mii_mediachg(&sc->vr_mii);
1172 return (0);
1173 }
1174
1175 /*
1176 * Report current media status.
1177 */
1178 static void vr_ifmedia_sts(ifp, ifmr)
1179 struct ifnet *ifp;
1180 struct ifmediareq *ifmr;
1181 {
1182 struct vr_softc *sc = ifp->if_softc;
1183
1184 mii_pollstat(&sc->vr_mii);
1185 ifmr->ifm_status = sc->vr_mii.mii_media_status;
1186 ifmr->ifm_active = sc->vr_mii.mii_media_active;
1187 }
1188
1189 static int vr_ioctl(ifp, command, data)
1190 struct ifnet *ifp;
1191 u_long command;
1192 caddr_t data;
1193 {
1194 struct vr_softc *sc = ifp->if_softc;
1195 struct ifreq *ifr = (struct ifreq *)data;
1196 struct ifaddr *ifa = (struct ifaddr *)data;
1197 int s, error = 0;
1198
1199 s = splnet();
1200
1201 switch (command) {
1202 case SIOCSIFADDR:
1203 ifp->if_flags |= IFF_UP;
1204
1205 switch (ifa->ifa_addr->sa_family) {
1206 #ifdef INET
1207 case AF_INET:
1208 vr_init(sc);
1209 arp_ifinit(ifp, ifa);
1210 break;
1211 #endif /* INET */
1212 default:
1213 vr_init(sc);
1214 break;
1215 }
1216 break;
1217
1218 case SIOCGIFADDR:
1219 bcopy((caddr_t) sc->vr_enaddr,
1220 (caddr_t) ((struct sockaddr *)&ifr->ifr_data)->sa_data,
1221 ETHER_ADDR_LEN);
1222 break;
1223
1224 case SIOCSIFMTU:
1225 if (ifr->ifr_mtu > ETHERMTU)
1226 error = EINVAL;
1227 else
1228 ifp->if_mtu = ifr->ifr_mtu;
1229 break;
1230
1231 case SIOCSIFFLAGS:
1232 if (ifp->if_flags & IFF_UP) {
1233 vr_init(sc);
1234 } else {
1235 if (ifp->if_flags & IFF_RUNNING)
1236 vr_stop(sc);
1237 }
1238 error = 0;
1239 break;
1240 case SIOCADDMULTI:
1241 case SIOCDELMULTI:
1242 if (command == SIOCADDMULTI)
1243 error = ether_addmulti(ifr, &sc->vr_ec);
1244 else
1245 error = ether_delmulti(ifr, &sc->vr_ec);
1246
1247 if (error == ENETRESET) {
1248 vr_setmulti(sc);
1249 error = 0;
1250 }
1251 break;
1252 case SIOCGIFMEDIA:
1253 case SIOCSIFMEDIA:
1254 error = ifmedia_ioctl(ifp, ifr, &sc->vr_mii.mii_media, command);
1255 break;
1256 default:
1257 error = EINVAL;
1258 break;
1259 }
1260
1261 splx(s);
1262
1263 return (error);
1264 }
1265
1266 static void vr_watchdog(ifp)
1267 struct ifnet *ifp;
1268 {
1269 struct vr_softc *sc;
1270
1271 sc = ifp->if_softc;
1272
1273 ifp->if_oerrors++;
1274 printf("%s: watchdog timeout\n", sc->vr_dev.dv_xname);
1275
1276 vr_stop(sc);
1277 vr_reset(sc);
1278 vr_init(sc);
1279
1280 if (ifp->if_snd.ifq_head != NULL)
1281 vr_start(ifp);
1282
1283 return;
1284 }
1285
1286 /*
1287 * One second timer, used to tick MII.
1288 */
1289 static void
1290 vr_tick(arg)
1291 void *arg;
1292 {
1293 struct vr_softc *sc = arg;
1294 int s;
1295
1296 s = splnet();
1297 mii_tick(&sc->vr_mii);
1298 splx(s);
1299
1300 timeout(vr_tick, sc, hz);
1301 }
1302
1303 /*
1304 * Stop the adapter and free any mbufs allocated to the
1305 * RX and TX lists.
1306 */
1307 static void vr_stop(sc)
1308 struct vr_softc *sc;
1309 {
1310 register int i;
1311 struct ifnet *ifp;
1312
1313 /* Cancel one second timer. */
1314 untimeout(vr_tick, sc);
1315
1316 ifp = &sc->vr_ec.ec_if;
1317 ifp->if_timer = 0;
1318
1319 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1320 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1321 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1322 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1323 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1324
1325 /*
1326 * Free data in the RX lists.
1327 */
1328 for (i = 0; i < VR_RX_LIST_CNT; i++) {
1329 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1330 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
1331 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
1332 }
1333 }
1334 bzero((char *)&sc->vr_ldata->vr_rx_list,
1335 sizeof (sc->vr_ldata->vr_rx_list));
1336
1337 /*
1338 * Free the TX list buffers.
1339 */
1340 for (i = 0; i < VR_TX_LIST_CNT; i++) {
1341 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
1342 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
1343 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
1344 }
1345 }
1346
1347 bzero((char *)&sc->vr_ldata->vr_tx_list,
1348 sizeof (sc->vr_ldata->vr_tx_list));
1349
1350 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1351
1352 return;
1353 }
1354
1355 static struct vr_type *vr_lookup __P((struct pci_attach_args *));
1356 static int vr_probe __P((struct device *, struct cfdata *, void *));
1357 static void vr_attach __P((struct device *, struct device *, void *));
1358 static void vr_shutdown __P((void *));
1359
1360 struct cfattach vr_ca = {
1361 sizeof (struct vr_softc), vr_probe, vr_attach
1362 };
1363
1364 static struct vr_type *
1365 vr_lookup(pa)
1366 struct pci_attach_args *pa;
1367 {
1368 struct vr_type *vrt;
1369
1370 for (vrt = vr_devs; vrt->vr_name != NULL; vrt++) {
1371 if (PCI_VENDOR(pa->pa_id) == vrt->vr_vid &&
1372 PCI_PRODUCT(pa->pa_id) == vrt->vr_did)
1373 return (vrt);
1374 }
1375 return (NULL);
1376 }
1377
1378 static int
1379 vr_probe(parent, match, aux)
1380 struct device *parent;
1381 struct cfdata *match;
1382 void *aux;
1383 {
1384 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
1385
1386 if (vr_lookup(pa) != NULL)
1387 return (1);
1388
1389 return (0);
1390 }
1391
1392 /*
1393 * Stop all chip I/O so that the kernel's probe routines don't
1394 * get confused by errant DMAs when rebooting.
1395 */
1396 static void vr_shutdown(arg)
1397 void *arg;
1398 {
1399 struct vr_softc *sc = (struct vr_softc *)arg;
1400
1401 vr_stop(sc);
1402
1403 return;
1404 }
1405
1406 /*
1407 * Attach the interface. Allocate softc structures, do ifmedia
1408 * setup and ethernet/BPF attach.
1409 */
1410 static void
1411 vr_attach(parent, self, aux)
1412 struct device * const parent;
1413 struct device * const self;
1414 void * const aux;
1415 {
1416 #define PCI_CONF_WRITE(r, v) pci_conf_write(pa->pa_pc, pa->pa_tag, (r), (v))
1417 #define PCI_CONF_READ(r) pci_conf_read(pa->pa_pc, pa->pa_tag, (r))
1418 struct vr_softc * const sc = (struct vr_softc *) self;
1419 struct pci_attach_args * const pa = (struct pci_attach_args *) aux;
1420 struct vr_type *vrt;
1421 int i;
1422 u_int32_t command;
1423 struct ifnet *ifp;
1424 unsigned int round;
1425 caddr_t roundptr;
1426 u_char eaddr[ETHER_ADDR_LEN];
1427
1428 vrt = vr_lookup(pa);
1429 if (vrt == NULL) {
1430 printf("\n");
1431 panic("vr_attach: impossible");
1432 }
1433
1434 printf(": %s Ethernet\n", vrt->vr_name);
1435
1436 /*
1437 * Handle power management nonsense.
1438 */
1439
1440 command = PCI_CONF_READ(VR_PCI_CAPID) & 0x000000FF;
1441 if (command == 0x01) {
1442
1443 command = PCI_CONF_READ(VR_PCI_PWRMGMTCTRL);
1444 if (command & VR_PSTATE_MASK) {
1445 u_int32_t iobase, membase, irq;
1446
1447 /* Save important PCI config data. */
1448 iobase = PCI_CONF_READ(VR_PCI_LOIO);
1449 membase = PCI_CONF_READ(VR_PCI_LOMEM);
1450 irq = PCI_CONF_READ(VR_PCI_INTLINE);
1451
1452 /* Reset the power state. */
1453 printf("%s: chip is in D%d power mode "
1454 "-- setting to D0\n",
1455 sc->vr_dev.dv_xname, command & VR_PSTATE_MASK);
1456 command &= 0xFFFFFFFC;
1457 PCI_CONF_WRITE(VR_PCI_PWRMGMTCTRL, command);
1458
1459 /* Restore PCI config data. */
1460 PCI_CONF_WRITE(VR_PCI_LOIO, iobase);
1461 PCI_CONF_WRITE(VR_PCI_LOMEM, membase);
1462 PCI_CONF_WRITE(VR_PCI_INTLINE, irq);
1463 }
1464 }
1465
1466 /*
1467 * Map control/status registers.
1468 */
1469 command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
1470 command |= (PCI_COMMAND_IO_ENABLE |
1471 PCI_COMMAND_MEM_ENABLE |
1472 PCI_COMMAND_MASTER_ENABLE);
1473 PCI_CONF_WRITE(PCI_COMMAND_STATUS_REG, command);
1474 command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
1475
1476 {
1477 bus_space_tag_t iot, memt;
1478 bus_space_handle_t ioh, memh;
1479 int ioh_valid, memh_valid;
1480 pci_intr_handle_t intrhandle;
1481 const char *intrstr;
1482
1483 ioh_valid = (pci_mapreg_map(pa, VR_PCI_LOIO,
1484 PCI_MAPREG_TYPE_IO, 0,
1485 &iot, &ioh, NULL, NULL) == 0);
1486 memh_valid = (pci_mapreg_map(pa, VR_PCI_LOMEM,
1487 PCI_MAPREG_TYPE_MEM |
1488 PCI_MAPREG_MEM_TYPE_32BIT,
1489 0, &memt, &memh, NULL, NULL) == 0);
1490 #if defined(VR_USEIOSPACE)
1491 if (ioh_valid) {
1492 sc->vr_btag = iot;
1493 sc->vr_bhandle = ioh;
1494 } else if (memh_valid) {
1495 sc->vr_btag = memt;
1496 sc->vr_bhandle = memh;
1497 }
1498 #else
1499 if (memh_valid) {
1500 sc->vr_btag = memt;
1501 sc->vr_bhandle = memh;
1502 } else if (ioh_valid) {
1503 sc->vr_btag = iot;
1504 sc->vr_bhandle = ioh;
1505 }
1506 #endif
1507 else {
1508 printf(": unable to map device registers\n");
1509 return;
1510 }
1511
1512 /* Allocate interrupt */
1513 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
1514 pa->pa_intrline, &intrhandle)) {
1515 printf("%s: couldn't map interrupt\n",
1516 sc->vr_dev.dv_xname);
1517 goto fail;
1518 }
1519 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
1520 sc->vr_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET,
1521 (void *)vr_intr, sc);
1522 if (sc->vr_ih == NULL) {
1523 printf("%s: couldn't establish interrupt",
1524 sc->vr_dev.dv_xname);
1525 if (intrstr != NULL)
1526 printf(" at %s", intrstr);
1527 printf("\n");
1528 }
1529 printf("%s: interrupting at %s\n",
1530 sc->vr_dev.dv_xname, intrstr);
1531 }
1532 sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
1533 if (sc->vr_ats == NULL)
1534 printf("%s: warning: couldn't establish shutdown hook\n",
1535 sc->vr_dev.dv_xname);
1536
1537 /* Reset the adapter. */
1538 vr_reset(sc);
1539
1540 /*
1541 * Get station address. The way the Rhine chips work,
1542 * you're not allowed to directly access the EEPROM once
1543 * they've been programmed a special way. Consequently,
1544 * we need to read the node address from the PAR0 and PAR1
1545 * registers.
1546 */
1547 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
1548 DELAY(200);
1549 for (i = 0; i < ETHER_ADDR_LEN; i++)
1550 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
1551
1552 /*
1553 * A Rhine chip was detected. Inform the world.
1554 */
1555 printf("%s: Ethernet address: %s\n",
1556 sc->vr_dev.dv_xname, ether_sprintf(eaddr));
1557
1558 bcopy(eaddr, sc->vr_enaddr, ETHER_ADDR_LEN);
1559
1560 sc->vr_ldata_ptr = malloc(sizeof (struct vr_list_data) + 8,
1561 M_DEVBUF, M_NOWAIT);
1562 if (sc->vr_ldata_ptr == NULL) {
1563 free(sc, M_DEVBUF);
1564 printf("%s: no memory for list buffers!\n",
1565 sc->vr_dev.dv_xname);
1566 return;
1567 }
1568
1569 sc->vr_ldata = (struct vr_list_data *)sc->vr_ldata_ptr;
1570 round = (unsigned long)sc->vr_ldata_ptr & 0xF;
1571 roundptr = sc->vr_ldata_ptr;
1572 for (i = 0; i < 8; i++) {
1573 if (round % 8) {
1574 round++;
1575 roundptr++;
1576 } else
1577 break;
1578 }
1579 sc->vr_ldata = (struct vr_list_data *)roundptr;
1580 bzero(sc->vr_ldata, sizeof (struct vr_list_data));
1581
1582 ifp = &sc->vr_ec.ec_if;
1583 ifp->if_softc = sc;
1584 ifp->if_mtu = ETHERMTU;
1585 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1586 ifp->if_ioctl = vr_ioctl;
1587 ifp->if_output = ether_output;
1588 ifp->if_start = vr_start;
1589 ifp->if_watchdog = vr_watchdog;
1590 ifp->if_baudrate = 10000000;
1591 bcopy(sc->vr_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1592
1593 /*
1594 * Initialize MII/media info.
1595 */
1596 sc->vr_mii.mii_ifp = ifp;
1597 sc->vr_mii.mii_readreg = vr_mii_readreg;
1598 sc->vr_mii.mii_writereg = vr_mii_writereg;
1599 sc->vr_mii.mii_statchg = vr_mii_statchg;
1600 ifmedia_init(&sc->vr_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts);
1601 mii_phy_probe(&sc->vr_dev, &sc->vr_mii, 0xffffffff);
1602 if (LIST_FIRST(&sc->vr_mii.mii_phys) == NULL) {
1603 ifmedia_add(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1604 ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE);
1605 } else
1606 ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_AUTO);
1607
1608 /*
1609 * Call MI attach routines.
1610 */
1611 if_attach(ifp);
1612 ether_ifattach(ifp, sc->vr_enaddr);
1613
1614 #if NBPFILTER > 0
1615 bpfattach(&sc->vr_ec.ec_if.if_bpf,
1616 ifp, DLT_EN10MB, sizeof (struct ether_header));
1617 #endif
1618
1619 sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
1620 if (sc->vr_ats == NULL)
1621 printf("%s: warning: couldn't establish shutdown hook\n",
1622 sc->vr_dev.dv_xname);
1623
1624 fail:
1625 return;
1626 }
1627