Home | History | Annotate | Line # | Download | only in pci
if_vr.c revision 1.15
      1 /*	$NetBSD: if_vr.c,v 1.15 1999/02/05 08:42:03 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997, 1998
      5  *	Bill Paul <wpaul (at) ctr.columbia.edu>.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. All advertising materials mentioning features or use of this software
     16  *    must display the following acknowledgement:
     17  *	This product includes software developed by Bill Paul.
     18  * 4. Neither the name of the author nor the names of any co-contributors
     19  *    may be used to endorse or promote products derived from this software
     20  *    without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
     23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
     26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
     32  * THE POSSIBILITY OF SUCH DAMAGE.
     33  *
     34  *	$FreeBSD: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $
     35  */
     36 
     37 /*
     38  * VIA Rhine fast ethernet PCI NIC driver
     39  *
     40  * Supports various network adapters based on the VIA Rhine
     41  * and Rhine II PCI controllers, including the D-Link DFE530TX.
     42  * Datasheets are available at http://www.via.com.tw.
     43  *
     44  * Written by Bill Paul <wpaul (at) ctr.columbia.edu>
     45  * Electrical Engineering Department
     46  * Columbia University, New York City
     47  */
     48 
     49 /*
     50  * The VIA Rhine controllers are similar in some respects to the
     51  * the DEC tulip chips, except less complicated. The controller
     52  * uses an MII bus and an external physical layer interface. The
     53  * receiver has a one entry perfect filter and a 64-bit hash table
     54  * multicast filter. Transmit and receive descriptors are similar
     55  * to the tulip.
     56  *
     57  * The Rhine has a serious flaw in its transmit DMA mechanism:
     58  * transmit buffers must be longword aligned. Unfortunately,
     59  * FreeBSD doesn't guarantee that mbufs will be filled in starting
     60  * at longword boundaries, so we have to do a buffer copy before
     61  * transmission.
     62  */
     63 
     64 #include "opt_inet.h"
     65 
     66 #include <sys/param.h>
     67 #include <sys/systm.h>
     68 #include <sys/sockio.h>
     69 #include <sys/mbuf.h>
     70 #include <sys/malloc.h>
     71 #include <sys/kernel.h>
     72 #include <sys/socket.h>
     73 #include <sys/device.h>
     74 
     75 #include <net/if.h>
     76 #include <net/if_arp.h>
     77 #include <net/if_dl.h>
     78 #include <net/if_media.h>
     79 #include <net/if_ether.h>
     80 
     81 #if defined(INET)
     82 #include <netinet/in.h>
     83 #include <netinet/if_inarp.h>
     84 #endif
     85 
     86 #include "bpfilter.h"
     87 #if NBPFILTER > 0
     88 #include <net/bpf.h>
     89 #endif
     90 
     91 #include <vm/vm.h>		/* for vtophys */
     92 
     93 #include <machine/bus.h>
     94 #include <machine/intr.h>
     95 
     96 #include <dev/mii/mii.h>
     97 #include <dev/mii/miivar.h>
     98 
     99 #include <dev/pci/pcireg.h>
    100 #include <dev/pci/pcivar.h>
    101 #include <dev/pci/pcidevs.h>
    102 
    103 #include <dev/pci/if_vrreg.h>
    104 
    105 #if defined(__NetBSD__) && defined(__alpha__)
    106 /* XXX XXX NEED REAL DMA MAPPING SUPPORT XXX XXX */
    107 #undef vtophys
    108 #define	vtophys(va)	alpha_XXX_dmamap((vaddr_t)(va))
    109 #endif
    110 
    111 #define	VR_USEIOSPACE
    112 
    113 #define	ETHER_CRC_LEN	4	/* XXX Should be in a common header. */
    114 
    115 /*
    116  * Various supported device vendors/types and their names.
    117  */
    118 static struct vr_type {
    119 	pci_vendor_id_t		vr_vid;
    120 	pci_product_id_t	vr_did;
    121 	const char		*vr_name;
    122 } vr_devs[] = {
    123 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT3043,
    124 		"VIA VT3043 Rhine I 10/100BaseTX" },
    125 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT86C100A,
    126 		"VIA VT86C100A Rhine II 10/100BaseTX" },
    127 	{ 0, 0, NULL }
    128 };
    129 
    130 struct vr_list_data {
    131 	struct vr_desc		vr_rx_list[VR_RX_LIST_CNT];
    132 	struct vr_desc		vr_tx_list[VR_TX_LIST_CNT];
    133 };
    134 
    135 struct vr_chain {
    136 	struct vr_desc		*vr_ptr;
    137 	struct mbuf		*vr_mbuf;
    138 	struct vr_chain		*vr_nextdesc;
    139 };
    140 
    141 struct vr_chain_onefrag {
    142 	struct vr_desc		*vr_ptr;
    143 	struct mbuf		*vr_mbuf;
    144 	struct vr_chain_onefrag	*vr_nextdesc;
    145 };
    146 
    147 struct vr_chain_data {
    148 	struct vr_chain_onefrag	vr_rx_chain[VR_RX_LIST_CNT];
    149 	struct vr_chain		vr_tx_chain[VR_TX_LIST_CNT];
    150 
    151 	struct vr_chain_onefrag	*vr_rx_head;
    152 
    153 	struct vr_chain		*vr_tx_head;
    154 	struct vr_chain		*vr_tx_tail;
    155 	struct vr_chain		*vr_tx_free;
    156 };
    157 
    158 struct vr_softc {
    159 	struct device		vr_dev;		/* generic device glue */
    160 	void			*vr_ih;		/* interrupt cookie */
    161 	void			*vr_ats;	/* shutdown hook */
    162 	bus_space_tag_t		vr_bst;		/* bus space tag */
    163 	bus_space_handle_t	vr_bsh;		/* bus space handle */
    164 	pci_chipset_tag_t	vr_pc;		/* PCI chipset info */
    165 	struct ethercom		vr_ec;		/* Ethernet common info */
    166 	u_int8_t 		vr_enaddr[ETHER_ADDR_LEN];
    167 	struct mii_data		vr_mii;		/* MII/media info */
    168 	caddr_t			vr_ldata_ptr;
    169 	struct vr_list_data	*vr_ldata;
    170 	struct vr_chain_data	vr_cdata;
    171 };
    172 
    173 /*
    174  * register space access macros
    175  */
    176 #define	CSR_WRITE_4(sc, reg, val)	\
    177 	bus_space_write_4(sc->vr_bst, sc->vr_bsh, reg, val)
    178 #define	CSR_WRITE_2(sc, reg, val)	\
    179 	bus_space_write_2(sc->vr_bst, sc->vr_bsh, reg, val)
    180 #define	CSR_WRITE_1(sc, reg, val)	\
    181 	bus_space_write_1(sc->vr_bst, sc->vr_bsh, reg, val)
    182 
    183 #define	CSR_READ_4(sc, reg)		\
    184 	bus_space_read_4(sc->vr_bst, sc->vr_bsh, reg)
    185 #define	CSR_READ_2(sc, reg)		\
    186 	bus_space_read_2(sc->vr_bst, sc->vr_bsh, reg)
    187 #define	CSR_READ_1(sc, reg)		\
    188 	bus_space_read_1(sc->vr_bst, sc->vr_bsh, reg)
    189 
    190 #define	VR_TIMEOUT		1000
    191 
    192 static int vr_newbuf		__P((struct vr_softc *,
    193 						struct vr_chain_onefrag *));
    194 static int vr_encap		__P((struct vr_softc *, struct vr_chain *,
    195 						struct mbuf *));
    196 
    197 static void vr_rxeof		__P((struct vr_softc *));
    198 static void vr_rxeoc		__P((struct vr_softc *));
    199 static void vr_txeof		__P((struct vr_softc *));
    200 static void vr_txeoc		__P((struct vr_softc *));
    201 static void vr_intr		__P((void *));
    202 static void vr_start		__P((struct ifnet *));
    203 static int vr_ioctl		__P((struct ifnet *, u_long, caddr_t));
    204 static void vr_init		__P((void *));
    205 static void vr_stop		__P((struct vr_softc *));
    206 static void vr_watchdog		__P((struct ifnet *));
    207 static void vr_tick		__P((void *));
    208 
    209 static int vr_ifmedia_upd	__P((struct ifnet *));
    210 static void vr_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
    211 
    212 static void vr_mii_sync		__P((struct vr_softc *));
    213 static void vr_mii_send		__P((struct vr_softc *, u_int32_t, int));
    214 static int vr_mii_readreg	__P((struct device *, int, int));
    215 static void vr_mii_writereg	__P((struct device *, int, int, int));
    216 static void vr_mii_statchg	__P((struct device *));
    217 
    218 static u_int8_t vr_calchash	__P((u_int8_t *));
    219 static void vr_setmulti		__P((struct vr_softc *));
    220 static void vr_reset		__P((struct vr_softc *));
    221 static int vr_list_rx_init	__P((struct vr_softc *));
    222 static int vr_list_tx_init	__P((struct vr_softc *));
    223 
    224 #define	VR_SETBIT(sc, reg, x)				\
    225 	CSR_WRITE_1(sc, reg,				\
    226 		CSR_READ_1(sc, reg) | x)
    227 
    228 #define	VR_CLRBIT(sc, reg, x)				\
    229 	CSR_WRITE_1(sc, reg,				\
    230 		CSR_READ_1(sc, reg) & ~x)
    231 
    232 #define	VR_SETBIT16(sc, reg, x)				\
    233 	CSR_WRITE_2(sc, reg,				\
    234 		CSR_READ_2(sc, reg) | x)
    235 
    236 #define	VR_CLRBIT16(sc, reg, x)				\
    237 	CSR_WRITE_2(sc, reg,				\
    238 		CSR_READ_2(sc, reg) & ~x)
    239 
    240 #define	VR_SETBIT32(sc, reg, x)				\
    241 	CSR_WRITE_4(sc, reg,				\
    242 		CSR_READ_4(sc, reg) | x)
    243 
    244 #define	VR_CLRBIT32(sc, reg, x)				\
    245 	CSR_WRITE_4(sc, reg,				\
    246 		CSR_READ_4(sc, reg) & ~x)
    247 
    248 #define	SIO_SET(x)					\
    249 	CSR_WRITE_1(sc, VR_MIICMD,			\
    250 		CSR_READ_1(sc, VR_MIICMD) | x)
    251 
    252 #define	SIO_CLR(x)					\
    253 	CSR_WRITE_1(sc, VR_MIICMD,			\
    254 		CSR_READ_1(sc, VR_MIICMD) & ~x)
    255 
    256 /*
    257  * Sync the PHYs by setting data bit and strobing the clock 32 times.
    258  */
    259 static void
    260 vr_mii_sync(sc)
    261 	struct vr_softc *sc;
    262 {
    263 	int i;
    264 
    265 	SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAOUT);
    266 
    267 	for (i = 0; i < 32; i++) {
    268 		SIO_SET(VR_MIICMD_CLK);
    269 		DELAY(1);
    270 		SIO_CLR(VR_MIICMD_CLK);
    271 		DELAY(1);
    272 	}
    273 
    274 	return;
    275 }
    276 
    277 /*
    278  * Clock a series of bits through the MII.
    279  */
    280 static void
    281 vr_mii_send(sc, bits, cnt)
    282 	struct vr_softc *sc;
    283 	u_int32_t bits;
    284 	int cnt;
    285 {
    286 	int i;
    287 
    288 	SIO_CLR(VR_MIICMD_CLK);
    289 
    290 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
    291 		if (bits & i) {
    292 			SIO_SET(VR_MIICMD_DATAOUT);
    293 		} else {
    294 			SIO_CLR(VR_MIICMD_DATAOUT);
    295 		}
    296 		DELAY(1);
    297 		SIO_CLR(VR_MIICMD_CLK);
    298 		DELAY(1);
    299 		SIO_SET(VR_MIICMD_CLK);
    300 	}
    301 }
    302 
    303 /*
    304  * Read an PHY register through the MII.
    305  */
    306 static int
    307 vr_mii_readreg(self, phy, reg)
    308 	struct device *self;
    309 	int phy, reg;
    310 {
    311 	struct vr_softc *sc = (struct vr_softc *)self;
    312 	int i, ack, val = 0;
    313 
    314 	CSR_WRITE_1(sc, VR_MIICMD, 0);
    315 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
    316 
    317 	/*
    318 	 * Turn on data xmit.
    319 	 */
    320 	SIO_SET(VR_MIICMD_DIR);
    321 
    322 	vr_mii_sync(sc);
    323 
    324 	/*
    325 	 * Send command/address info.
    326 	 */
    327 	vr_mii_send(sc, MII_COMMAND_START, 2);
    328 	vr_mii_send(sc, MII_COMMAND_READ, 2);
    329 	vr_mii_send(sc, phy, 5);
    330 	vr_mii_send(sc, reg, 5);
    331 
    332 	/* Idle bit */
    333 	SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAOUT));
    334 	DELAY(1);
    335 	SIO_SET(VR_MIICMD_CLK);
    336 	DELAY(1);
    337 
    338 	/* Turn off xmit. */
    339 	SIO_CLR(VR_MIICMD_DIR);
    340 
    341 	/* Check for ack */
    342 	SIO_CLR(VR_MIICMD_CLK);
    343 	DELAY(1);
    344 	SIO_SET(VR_MIICMD_CLK);
    345 	DELAY(1);
    346 	ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAIN;
    347 
    348 	/*
    349 	 * Now try reading data bits. If the ack failed, we still
    350 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
    351 	 */
    352 	if (ack) {
    353 		for (i = 0; i < 16; i++) {
    354 			SIO_CLR(VR_MIICMD_CLK);
    355 			DELAY(1);
    356 			SIO_SET(VR_MIICMD_CLK);
    357 			DELAY(1);
    358 		}
    359 		goto fail;
    360 	}
    361 
    362 	for (i = 0x8000; i; i >>= 1) {
    363 		SIO_CLR(VR_MIICMD_CLK);
    364 		DELAY(1);
    365 		if (!ack) {
    366 			if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAIN)
    367 				val |= i;
    368 			DELAY(1);
    369 		}
    370 		SIO_SET(VR_MIICMD_CLK);
    371 		DELAY(1);
    372 	}
    373 
    374  fail:
    375 
    376 	SIO_CLR(VR_MIICMD_CLK);
    377 	DELAY(1);
    378 	SIO_SET(VR_MIICMD_CLK);
    379 	DELAY(1);
    380 
    381 	return (val);
    382 }
    383 
    384 /*
    385  * Write to a PHY register through the MII.
    386  */
    387 static void
    388 vr_mii_writereg(self, phy, reg, val)
    389 	struct device *self;
    390 	int phy, reg, val;
    391 {
    392 	struct vr_softc *sc = (struct vr_softc *)self;
    393 
    394 	CSR_WRITE_1(sc, VR_MIICMD, 0);
    395 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
    396 
    397 	/*
    398 	 * Turn on data output.
    399 	 */
    400 	SIO_SET(VR_MIICMD_DIR);
    401 
    402 	vr_mii_sync(sc);
    403 
    404 	vr_mii_send(sc, MII_COMMAND_START, 2);
    405 	vr_mii_send(sc, MII_COMMAND_WRITE, 2);
    406 	vr_mii_send(sc, phy, 5);
    407 	vr_mii_send(sc, reg, 5);
    408 	vr_mii_send(sc, MII_COMMAND_ACK, 2);
    409 	vr_mii_send(sc, val, 16);
    410 
    411 	/* Idle bit. */
    412 	SIO_SET(VR_MIICMD_CLK);
    413 	DELAY(1);
    414 	SIO_CLR(VR_MIICMD_CLK);
    415 	DELAY(1);
    416 
    417 	/*
    418 	 * Turn off xmit.
    419 	 */
    420 	SIO_CLR(VR_MIICMD_DIR);
    421 }
    422 
    423 static void
    424 vr_mii_statchg(self)
    425 	struct device *self;
    426 {
    427 	struct vr_softc *sc = (struct vr_softc *)self;
    428 	int restart = 0;
    429 
    430 	/*
    431 	 * In order to fiddle with the 'full-duplex' bit in the netconfig
    432 	 * register, we first have to put the transmit and/or receive logic
    433 	 * in the idle state.
    434 	 */
    435 	if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
    436 		restart = 1;
    437 		VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
    438 	}
    439 
    440 	if (sc->vr_mii.mii_media_active & IFM_FDX)
    441 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
    442 	else
    443 		VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
    444 
    445 	if (restart)
    446 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
    447 
    448 	/* XXX Update ifp->if_baudrate */
    449 }
    450 
    451 /*
    452  * Calculate CRC of a multicast group address, return the lower 6 bits.
    453  */
    454 static u_int8_t
    455 vr_calchash(addr)
    456 	u_int8_t *addr;
    457 {
    458 	u_int32_t crc, carry;
    459 	int i, j;
    460 	u_int8_t c;
    461 
    462 	/* Compute CRC for the address value. */
    463 	crc = 0xFFFFFFFF; /* initial value */
    464 
    465 	for (i = 0; i < 6; i++) {
    466 		c = *(addr + i);
    467 		for (j = 0; j < 8; j++) {
    468 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
    469 			crc <<= 1;
    470 			c >>= 1;
    471 			if (carry)
    472 				crc = (crc ^ 0x04c11db6) | carry;
    473 		}
    474 	}
    475 
    476 	/* return the filter bit position */
    477 	return ((crc >> 26) & 0x0000003F);
    478 }
    479 
    480 /*
    481  * Program the 64-bit multicast hash filter.
    482  */
    483 static void
    484 vr_setmulti(sc)
    485 	struct vr_softc *sc;
    486 {
    487 	struct ifnet *ifp;
    488 	int h = 0;
    489 	u_int32_t hashes[2] = { 0, 0 };
    490 	struct ether_multistep step;
    491 	struct ether_multi *enm;
    492 	int mcnt = 0;
    493 	u_int8_t rxfilt;
    494 
    495 	ifp = &sc->vr_ec.ec_if;
    496 
    497 	rxfilt = CSR_READ_1(sc, VR_RXCFG);
    498 
    499 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
    500 		rxfilt |= VR_RXCFG_RX_MULTI;
    501 		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
    502 		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
    503 		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
    504 		return;
    505 	}
    506 
    507 	/* first, zot all the existing hash bits */
    508 	CSR_WRITE_4(sc, VR_MAR0, 0);
    509 	CSR_WRITE_4(sc, VR_MAR1, 0);
    510 
    511 	/* now program new ones */
    512 	ETHER_FIRST_MULTI(step, &sc->vr_ec, enm);
    513 	while (enm != NULL) {
    514 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0)
    515 			continue;
    516 
    517 		h = vr_calchash(enm->enm_addrlo);
    518 
    519 		if (h < 32)
    520 			hashes[0] |= (1 << h);
    521 		else
    522 			hashes[1] |= (1 << (h - 32));
    523 		ETHER_NEXT_MULTI(step, enm);
    524 		mcnt++;
    525 	}
    526 
    527 	if (mcnt)
    528 		rxfilt |= VR_RXCFG_RX_MULTI;
    529 	else
    530 		rxfilt &= ~VR_RXCFG_RX_MULTI;
    531 
    532 	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
    533 	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
    534 	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
    535 
    536 	return;
    537 }
    538 
    539 static void
    540 vr_reset(sc)
    541 	struct vr_softc *sc;
    542 {
    543 	int i;
    544 
    545 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
    546 
    547 	for (i = 0; i < VR_TIMEOUT; i++) {
    548 		DELAY(10);
    549 		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
    550 			break;
    551 	}
    552 	if (i == VR_TIMEOUT)
    553 		printf("%s: reset never completed!\n",
    554 			sc->vr_dev.dv_xname);
    555 
    556 	/* Wait a little while for the chip to get its brains in order. */
    557 	DELAY(1000);
    558 
    559 	return;
    560 }
    561 
    562 /*
    563  * Initialize the transmit descriptors.
    564  */
    565 static int
    566 vr_list_tx_init(sc)
    567 	struct vr_softc *sc;
    568 {
    569 	struct vr_chain_data *cd;
    570 	struct vr_list_data *ld;
    571 	int i;
    572 
    573 	cd = &sc->vr_cdata;
    574 	ld = sc->vr_ldata;
    575 	for (i = 0; i < VR_TX_LIST_CNT; i++) {
    576 		cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
    577 		if (i == (VR_TX_LIST_CNT - 1))
    578 			cd->vr_tx_chain[i].vr_nextdesc =
    579 				&cd->vr_tx_chain[0];
    580 		else
    581 			cd->vr_tx_chain[i].vr_nextdesc =
    582 				&cd->vr_tx_chain[i + 1];
    583 	}
    584 
    585 	cd->vr_tx_free = &cd->vr_tx_chain[0];
    586 	cd->vr_tx_tail = cd->vr_tx_head = NULL;
    587 
    588 	return (0);
    589 }
    590 
    591 
    592 /*
    593  * Initialize the RX descriptors and allocate mbufs for them. Note that
    594  * we arrange the descriptors in a closed ring, so that the last descriptor
    595  * points back to the first.
    596  */
    597 static int
    598 vr_list_rx_init(sc)
    599 	struct vr_softc *sc;
    600 {
    601 	struct vr_chain_data *cd;
    602 	struct vr_list_data *ld;
    603 	int i;
    604 
    605 	cd = &sc->vr_cdata;
    606 	ld = sc->vr_ldata;
    607 
    608 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
    609 		cd->vr_rx_chain[i].vr_ptr =
    610 			(struct vr_desc *)&ld->vr_rx_list[i];
    611 		if (vr_newbuf(sc, &cd->vr_rx_chain[i]) == ENOBUFS)
    612 			return (ENOBUFS);
    613 		if (i == (VR_RX_LIST_CNT - 1)) {
    614 			cd->vr_rx_chain[i].vr_nextdesc =
    615 					&cd->vr_rx_chain[0];
    616 			ld->vr_rx_list[i].vr_next =
    617 					vtophys(&ld->vr_rx_list[0]);
    618 		} else {
    619 			cd->vr_rx_chain[i].vr_nextdesc =
    620 					&cd->vr_rx_chain[i + 1];
    621 			ld->vr_rx_list[i].vr_next =
    622 					vtophys(&ld->vr_rx_list[i + 1]);
    623 		}
    624 	}
    625 
    626 	cd->vr_rx_head = &cd->vr_rx_chain[0];
    627 
    628 	return (0);
    629 }
    630 
    631 /*
    632  * Initialize an RX descriptor and attach an MBUF cluster.
    633  * Note: the length fields are only 11 bits wide, which means the
    634  * largest size we can specify is 2047. This is important because
    635  * MCLBYTES is 2048, so we have to subtract one otherwise we'll
    636  * overflow the field and make a mess.
    637  */
    638 static int
    639 vr_newbuf(sc, c)
    640 	struct vr_softc *sc;
    641 	struct vr_chain_onefrag *c;
    642 {
    643 	struct mbuf *m_new = NULL;
    644 
    645 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
    646 	if (m_new == NULL) {
    647 		printf("%s: no memory for rx list -- packet dropped!\n",
    648 			sc->vr_dev.dv_xname);
    649 		return (ENOBUFS);
    650 	}
    651 
    652 	MCLGET(m_new, M_DONTWAIT);
    653 	if (!(m_new->m_flags & M_EXT)) {
    654 		printf("%s: no memory for rx list -- packet dropped!\n",
    655 			sc->vr_dev.dv_xname);
    656 		m_freem(m_new);
    657 		return (ENOBUFS);
    658 	}
    659 
    660 	c->vr_mbuf = m_new;
    661 	c->vr_ptr->vr_status = VR_RXSTAT;
    662 	c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
    663 	c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN;
    664 
    665 	return (0);
    666 }
    667 
    668 /*
    669  * A frame has been uploaded: pass the resulting mbuf chain up to
    670  * the higher level protocols.
    671  */
    672 static void
    673 vr_rxeof(sc)
    674 	struct vr_softc *sc;
    675 {
    676 	struct ether_header *eh;
    677 	struct mbuf *m;
    678 	struct ifnet *ifp;
    679 	struct vr_chain_onefrag *cur_rx;
    680 	int total_len = 0;
    681 	u_int32_t rxstat;
    682 
    683 	ifp = &sc->vr_ec.ec_if;
    684 
    685 	while (!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
    686 							VR_RXSTAT_OWN)) {
    687 		cur_rx = sc->vr_cdata.vr_rx_head;
    688 		sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
    689 
    690 		/*
    691 		 * If an error occurs, update stats, clear the
    692 		 * status word and leave the mbuf cluster in place:
    693 		 * it should simply get re-used next time this descriptor
    694 		 * comes up in the ring.
    695 		 */
    696 		if (rxstat & VR_RXSTAT_RXERR) {
    697 			ifp->if_ierrors++;
    698 			printf("%s: rx error: ", sc->vr_dev.dv_xname);
    699 			switch (rxstat & 0x000000FF) {
    700 			case VR_RXSTAT_CRCERR:
    701 				printf("crc error\n");
    702 				break;
    703 			case VR_RXSTAT_FRAMEALIGNERR:
    704 				printf("frame alignment error\n");
    705 				break;
    706 			case VR_RXSTAT_FIFOOFLOW:
    707 				printf("FIFO overflow\n");
    708 				break;
    709 			case VR_RXSTAT_GIANT:
    710 				printf("received giant packet\n");
    711 				break;
    712 			case VR_RXSTAT_RUNT:
    713 				printf("received runt packet\n");
    714 				break;
    715 			case VR_RXSTAT_BUSERR:
    716 				printf("system bus error\n");
    717 				break;
    718 			case VR_RXSTAT_BUFFERR:
    719 				printf("rx buffer error\n");
    720 				break;
    721 			default:
    722 				printf("unknown rx error\n");
    723 				break;
    724 			}
    725 			cur_rx->vr_ptr->vr_status = VR_RXSTAT;
    726 			cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN;
    727 			continue;
    728 		}
    729 
    730 		/* No errors; receive the packet. */
    731 		m = cur_rx->vr_mbuf;
    732 		total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
    733 
    734 		/*
    735 		 * XXX The VIA Rhine chip includes the CRC with every
    736 		 * received frame, and there's no way to turn this
    737 		 * behavior off (at least, I can't find anything in
    738 		 * the manual that explains how to do it) so we have
    739 		 * to trim off the CRC manually.
    740 		 */
    741 		total_len -= ETHER_CRC_LEN;
    742 
    743 		/*
    744 		 * Try to conjure up a new mbuf cluster. If that
    745 		 * fails, it means we have an out of memory condition and
    746 		 * should leave the buffer in place and continue. This will
    747 		 * result in a lost packet, but there's little else we
    748 		 * can do in this situation.
    749 		 */
    750 		if (vr_newbuf(sc, cur_rx) == ENOBUFS) {
    751 			ifp->if_ierrors++;
    752 			cur_rx->vr_ptr->vr_status = VR_RXSTAT;
    753 			cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN;
    754 			continue;
    755 		}
    756 
    757 		ifp->if_ipackets++;
    758 		eh = mtod(m, struct ether_header *);
    759 		m->m_pkthdr.rcvif = ifp;
    760 		m->m_pkthdr.len = m->m_len = total_len;
    761 #if NBPFILTER > 0
    762 		/*
    763 		 * Handle BPF listeners. Let the BPF user see the packet, but
    764 		 * don't pass it up to the ether_input() layer unless it's
    765 		 * a broadcast packet, multicast packet, matches our ethernet
    766 		 * address or the interface is in promiscuous mode.
    767 		 */
    768 		if (ifp->if_bpf) {
    769 			bpf_mtap(ifp->if_bpf, m);
    770 			if (ifp->if_flags & IFF_PROMISC &&
    771 				(memcmp(eh->ether_dhost, sc->vr_enaddr,
    772 						ETHER_ADDR_LEN) &&
    773 					(eh->ether_dhost[0] & 1) == 0)) {
    774 				m_freem(m);
    775 				continue;
    776 			}
    777 		}
    778 #endif
    779 		/* Remove header from mbuf and pass it on. */
    780 		m_adj(m, sizeof (struct ether_header));
    781 		ether_input(ifp, eh, m);
    782 	}
    783 }
    784 
    785 void
    786 vr_rxeoc(sc)
    787 	struct vr_softc *sc;
    788 {
    789 
    790 	vr_rxeof(sc);
    791 	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
    792 	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
    793 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
    794 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
    795 }
    796 
    797 /*
    798  * A frame was downloaded to the chip. It's safe for us to clean up
    799  * the list buffers.
    800  */
    801 
    802 static void
    803 vr_txeof(sc)
    804 	struct vr_softc *sc;
    805 {
    806 	struct vr_chain *cur_tx;
    807 	struct ifnet *ifp;
    808 	register struct mbuf *n;
    809 
    810 	ifp = &sc->vr_ec.ec_if;
    811 
    812 	/* Clear the timeout timer. */
    813 	ifp->if_timer = 0;
    814 
    815 	/* Sanity check. */
    816 	if (sc->vr_cdata.vr_tx_head == NULL)
    817 		return;
    818 
    819 	/*
    820 	 * Go through our tx list and free mbufs for those
    821 	 * frames that have been transmitted.
    822 	 */
    823 	while (sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) {
    824 		u_int32_t txstat;
    825 
    826 		cur_tx = sc->vr_cdata.vr_tx_head;
    827 		txstat = cur_tx->vr_ptr->vr_status;
    828 
    829 		if (txstat & VR_TXSTAT_OWN)
    830 			break;
    831 
    832 		if (txstat & VR_TXSTAT_ERRSUM) {
    833 			ifp->if_oerrors++;
    834 			if (txstat & VR_TXSTAT_DEFER)
    835 				ifp->if_collisions++;
    836 			if (txstat & VR_TXSTAT_LATECOLL)
    837 				ifp->if_collisions++;
    838 		}
    839 
    840 		ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
    841 
    842 		ifp->if_opackets++;
    843 		MFREE(cur_tx->vr_mbuf, n);
    844 		cur_tx->vr_mbuf = NULL;
    845 
    846 		if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) {
    847 			sc->vr_cdata.vr_tx_head = NULL;
    848 			sc->vr_cdata.vr_tx_tail = NULL;
    849 			break;
    850 		}
    851 
    852 		sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc;
    853 	}
    854 }
    855 
    856 /*
    857  * TX 'end of channel' interrupt handler.
    858  */
    859 static void
    860 vr_txeoc(sc)
    861 	struct vr_softc *sc;
    862 {
    863 	struct ifnet *ifp;
    864 
    865 	ifp = &sc->vr_ec.ec_if;
    866 
    867 	ifp->if_timer = 0;
    868 
    869 	if (sc->vr_cdata.vr_tx_head == NULL) {
    870 		ifp->if_flags &= ~IFF_OACTIVE;
    871 		sc->vr_cdata.vr_tx_tail = NULL;
    872 	}
    873 }
    874 
    875 static void
    876 vr_intr(arg)
    877 	void *arg;
    878 {
    879 	struct vr_softc *sc;
    880 	struct ifnet *ifp;
    881 	u_int16_t status;
    882 
    883 	sc = arg;
    884 	ifp = &sc->vr_ec.ec_if;
    885 
    886 	/* Supress unwanted interrupts. */
    887 	if (!(ifp->if_flags & IFF_UP)) {
    888 		vr_stop(sc);
    889 		return;
    890 	}
    891 
    892 	/* Disable interrupts. */
    893 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
    894 
    895 	for (;;) {
    896 		status = CSR_READ_2(sc, VR_ISR);
    897 		if (status)
    898 			CSR_WRITE_2(sc, VR_ISR, status);
    899 
    900 		if ((status & VR_INTRS) == 0)
    901 			break;
    902 
    903 		if (status & VR_ISR_RX_OK)
    904 			vr_rxeof(sc);
    905 
    906 		if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
    907 		    (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) ||
    908 		    (status & VR_ISR_RX_DROPPED)) {
    909 			vr_rxeof(sc);
    910 			vr_rxeoc(sc);
    911 		}
    912 
    913 		if (status & VR_ISR_TX_OK) {
    914 			vr_txeof(sc);
    915 			vr_txeoc(sc);
    916 		}
    917 
    918 		if ((status & VR_ISR_TX_UNDERRUN)||(status & VR_ISR_TX_ABRT)) {
    919 			ifp->if_oerrors++;
    920 			vr_txeof(sc);
    921 			if (sc->vr_cdata.vr_tx_head != NULL) {
    922 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
    923 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
    924 			}
    925 		}
    926 
    927 		if (status & VR_ISR_BUSERR) {
    928 			vr_reset(sc);
    929 			vr_init(sc);
    930 		}
    931 	}
    932 
    933 	/* Re-enable interrupts. */
    934 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
    935 
    936 	if (ifp->if_snd.ifq_head != NULL) {
    937 		vr_start(ifp);
    938 	}
    939 }
    940 
    941 /*
    942  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
    943  * pointers to the fragment pointers.
    944  */
    945 static int
    946 vr_encap(sc, c, m_head)
    947 	struct vr_softc *sc;
    948 	struct vr_chain *c;
    949 	struct mbuf *m_head;
    950 {
    951 	int frag = 0;
    952 	struct vr_desc *f = NULL;
    953 	int total_len;
    954 	struct mbuf *m;
    955 
    956 	m = m_head;
    957 	total_len = 0;
    958 
    959 	/*
    960 	 * The VIA Rhine wants packet buffers to be longword
    961 	 * aligned, but very often our mbufs aren't. Rather than
    962 	 * waste time trying to decide when to copy and when not
    963 	 * to copy, just do it all the time.
    964 	 */
    965 	if (m != NULL) {
    966 		struct mbuf		*m_new = NULL;
    967 
    968 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
    969 		if (m_new == NULL) {
    970 			printf("%s: no memory for tx list",
    971 				sc->vr_dev.dv_xname);
    972 			return (1);
    973 		}
    974 		if (m_head->m_pkthdr.len > MHLEN) {
    975 			MCLGET(m_new, M_DONTWAIT);
    976 			if (!(m_new->m_flags & M_EXT)) {
    977 				m_freem(m_new);
    978 				printf("%s: no memory for tx list",
    979 					sc->vr_dev.dv_xname);
    980 				return (1);
    981 			}
    982 		}
    983 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
    984 					mtod(m_new, caddr_t));
    985 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
    986 		m_freem(m_head);
    987 		m_head = m_new;
    988 		/*
    989 		 * The Rhine chip doesn't auto-pad, so we have to make
    990 		 * sure to pad short frames out to the minimum frame length
    991 		 * ourselves.
    992 		 */
    993 		if (m_head->m_len < VR_MIN_FRAMELEN) {
    994 			m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len;
    995 			m_new->m_len = m_new->m_pkthdr.len;
    996 		}
    997 		f = c->vr_ptr;
    998 		f->vr_data = vtophys(mtod(m_new, caddr_t));
    999 		f->vr_ctl = total_len = m_new->m_len;
   1000 		f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG;
   1001 		f->vr_status = 0;
   1002 		frag = 1;
   1003 	}
   1004 
   1005 	c->vr_mbuf = m_head;
   1006 	c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT;
   1007 	c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr);
   1008 
   1009 	return (0);
   1010 }
   1011 
   1012 /*
   1013  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
   1014  * to the mbuf data regions directly in the transmit lists. We also save a
   1015  * copy of the pointers since the transmit list fragment pointers are
   1016  * physical addresses.
   1017  */
   1018 static void
   1019 vr_start(ifp)
   1020 	struct ifnet *ifp;
   1021 {
   1022 	struct vr_softc *sc;
   1023 	struct mbuf *m_head = NULL;
   1024 	struct vr_chain *cur_tx = NULL, *start_tx;
   1025 
   1026 	sc = ifp->if_softc;
   1027 
   1028 	/*
   1029 	 * Check for an available queue slot. If there are none,
   1030 	 * punt.
   1031 	 */
   1032 	if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) {
   1033 		ifp->if_flags |= IFF_OACTIVE;
   1034 		return;
   1035 	}
   1036 
   1037 	start_tx = sc->vr_cdata.vr_tx_free;
   1038 
   1039 	while (sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) {
   1040 		IF_DEQUEUE(&ifp->if_snd, m_head);
   1041 		if (m_head == NULL)
   1042 			break;
   1043 
   1044 		/* Pick a descriptor off the free list. */
   1045 		cur_tx = sc->vr_cdata.vr_tx_free;
   1046 		sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc;
   1047 
   1048 		/* Pack the data into the descriptor. */
   1049 		vr_encap(sc, cur_tx, m_head);
   1050 
   1051 		if (cur_tx != start_tx)
   1052 			VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
   1053 
   1054 #if NBPFILTER > 0
   1055 		/*
   1056 		 * If there's a BPF listener, bounce a copy of this frame
   1057 		 * to him.
   1058 		 */
   1059 		if (ifp->if_bpf)
   1060 			bpf_mtap(ifp->if_bpf, cur_tx->vr_mbuf);
   1061 #endif
   1062 		VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
   1063 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO);
   1064 	}
   1065 
   1066 	/*
   1067 	 * If there are no frames queued, bail.
   1068 	 */
   1069 	if (cur_tx == NULL)
   1070 		return;
   1071 
   1072 	sc->vr_cdata.vr_tx_tail = cur_tx;
   1073 
   1074 	if (sc->vr_cdata.vr_tx_head == NULL)
   1075 		sc->vr_cdata.vr_tx_head = start_tx;
   1076 
   1077 	/*
   1078 	 * Set a timeout in case the chip goes out to lunch.
   1079 	 */
   1080 	ifp->if_timer = 5;
   1081 }
   1082 
   1083 /*
   1084  * Initialize the interface.  Must be called at splnet.
   1085  */
   1086 static void
   1087 vr_init(xsc)
   1088 	void *xsc;
   1089 {
   1090 	struct vr_softc *sc = xsc;
   1091 	struct ifnet *ifp = &sc->vr_ec.ec_if;
   1092 
   1093 	/*
   1094 	 * Cancel pending I/O and free all RX/TX buffers.
   1095 	 */
   1096 	vr_stop(sc);
   1097 	vr_reset(sc);
   1098 
   1099 	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
   1100 	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
   1101 
   1102 	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
   1103 	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
   1104 
   1105 	/* Init circular RX list. */
   1106 	if (vr_list_rx_init(sc) == ENOBUFS) {
   1107 		printf("%s: initialization failed: no "
   1108 			"memory for rx buffers\n", sc->vr_dev.dv_xname);
   1109 		vr_stop(sc);
   1110 		return;
   1111 	}
   1112 
   1113 	/*
   1114 	 * Init tx descriptors.
   1115 	 */
   1116 	vr_list_tx_init(sc);
   1117 
   1118 	/* If we want promiscuous mode, set the allframes bit. */
   1119 	if (ifp->if_flags & IFF_PROMISC)
   1120 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
   1121 	else
   1122 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
   1123 
   1124 	/* Set capture broadcast bit to capture broadcast frames. */
   1125 	if (ifp->if_flags & IFF_BROADCAST)
   1126 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
   1127 	else
   1128 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
   1129 
   1130 	/*
   1131 	 * Program the multicast filter, if necessary.
   1132 	 */
   1133 	vr_setmulti(sc);
   1134 
   1135 	/*
   1136 	 * Load the address of the RX list.
   1137 	 */
   1138 	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
   1139 
   1140 	/* Enable receiver and transmitter. */
   1141 	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
   1142 				    VR_CMD_TX_ON|VR_CMD_RX_ON|
   1143 				    VR_CMD_RX_GO);
   1144 
   1145 	/* Set current media. */
   1146 	mii_mediachg(&sc->vr_mii);
   1147 
   1148 	CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
   1149 
   1150 	/*
   1151 	 * Enable interrupts.
   1152 	 */
   1153 	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
   1154 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
   1155 
   1156 	ifp->if_flags |= IFF_RUNNING;
   1157 	ifp->if_flags &= ~IFF_OACTIVE;
   1158 
   1159 	/* Start one second timer. */
   1160 	timeout(vr_tick, sc, hz);
   1161 }
   1162 
   1163 /*
   1164  * Set media options.
   1165  */
   1166 static int
   1167 vr_ifmedia_upd(ifp)
   1168 	struct ifnet *ifp;
   1169 {
   1170 	struct vr_softc *sc = ifp->if_softc;
   1171 
   1172 	if (ifp->if_flags & IFF_UP)
   1173 		mii_mediachg(&sc->vr_mii);
   1174 	return (0);
   1175 }
   1176 
   1177 /*
   1178  * Report current media status.
   1179  */
   1180 static void
   1181 vr_ifmedia_sts(ifp, ifmr)
   1182 	struct ifnet *ifp;
   1183 	struct ifmediareq *ifmr;
   1184 {
   1185 	struct vr_softc *sc = ifp->if_softc;
   1186 
   1187 	mii_pollstat(&sc->vr_mii);
   1188 	ifmr->ifm_status = sc->vr_mii.mii_media_status;
   1189 	ifmr->ifm_active = sc->vr_mii.mii_media_active;
   1190 }
   1191 
   1192 static int
   1193 vr_ioctl(ifp, command, data)
   1194 	struct ifnet *ifp;
   1195 	u_long command;
   1196 	caddr_t data;
   1197 {
   1198 	struct vr_softc *sc = ifp->if_softc;
   1199 	struct ifreq *ifr = (struct ifreq *)data;
   1200 	struct ifaddr *ifa = (struct ifaddr *)data;
   1201 	int s, error = 0;
   1202 
   1203 	s = splnet();
   1204 
   1205 	switch (command) {
   1206 	case SIOCSIFADDR:
   1207 		ifp->if_flags |= IFF_UP;
   1208 
   1209 		switch (ifa->ifa_addr->sa_family) {
   1210 #ifdef INET
   1211 		case AF_INET:
   1212 			vr_init(sc);
   1213 			arp_ifinit(ifp, ifa);
   1214 			break;
   1215 #endif /* INET */
   1216 		default:
   1217 			vr_init(sc);
   1218 			break;
   1219 		}
   1220 		break;
   1221 
   1222 	case SIOCGIFADDR:
   1223 		bcopy((caddr_t) sc->vr_enaddr,
   1224 			(caddr_t) ((struct sockaddr *)&ifr->ifr_data)->sa_data,
   1225 			ETHER_ADDR_LEN);
   1226 		break;
   1227 
   1228 	case SIOCSIFMTU:
   1229 		if (ifr->ifr_mtu > ETHERMTU)
   1230 			error = EINVAL;
   1231 		else
   1232 			ifp->if_mtu = ifr->ifr_mtu;
   1233 		break;
   1234 
   1235 	case SIOCSIFFLAGS:
   1236 		if (ifp->if_flags & IFF_UP) {
   1237 			vr_init(sc);
   1238 		} else {
   1239 			if (ifp->if_flags & IFF_RUNNING)
   1240 				vr_stop(sc);
   1241 		}
   1242 		error = 0;
   1243 		break;
   1244 	case SIOCADDMULTI:
   1245 	case SIOCDELMULTI:
   1246 		if (command == SIOCADDMULTI)
   1247 			error = ether_addmulti(ifr, &sc->vr_ec);
   1248 		else
   1249 			error = ether_delmulti(ifr, &sc->vr_ec);
   1250 
   1251 		if (error == ENETRESET) {
   1252 			vr_setmulti(sc);
   1253 			error = 0;
   1254 		}
   1255 		break;
   1256 	case SIOCGIFMEDIA:
   1257 	case SIOCSIFMEDIA:
   1258 		error = ifmedia_ioctl(ifp, ifr, &sc->vr_mii.mii_media, command);
   1259 		break;
   1260 	default:
   1261 		error = EINVAL;
   1262 		break;
   1263 	}
   1264 
   1265 	splx(s);
   1266 
   1267 	return (error);
   1268 }
   1269 
   1270 static void
   1271 vr_watchdog(ifp)
   1272 	struct ifnet *ifp;
   1273 {
   1274 	struct vr_softc *sc;
   1275 
   1276 	sc = ifp->if_softc;
   1277 
   1278 	ifp->if_oerrors++;
   1279 	printf("%s: watchdog timeout\n", sc->vr_dev.dv_xname);
   1280 
   1281 	vr_stop(sc);
   1282 	vr_reset(sc);
   1283 	vr_init(sc);
   1284 
   1285 	if (ifp->if_snd.ifq_head != NULL)
   1286 		vr_start(ifp);
   1287 
   1288 	return;
   1289 }
   1290 
   1291 /*
   1292  * One second timer, used to tick MII.
   1293  */
   1294 static void
   1295 vr_tick(arg)
   1296 	void *arg;
   1297 {
   1298 	struct vr_softc *sc = arg;
   1299 	int s;
   1300 
   1301 	s = splnet();
   1302 	mii_tick(&sc->vr_mii);
   1303 	splx(s);
   1304 
   1305 	timeout(vr_tick, sc, hz);
   1306 }
   1307 
   1308 /*
   1309  * Stop the adapter and free any mbufs allocated to the
   1310  * RX and TX lists.
   1311  */
   1312 static void
   1313 vr_stop(sc)
   1314 	struct vr_softc *sc;
   1315 {
   1316 	struct ifnet *ifp;
   1317 	int i;
   1318 
   1319 	/* Cancel one second timer. */
   1320 	untimeout(vr_tick, sc);
   1321 
   1322 	ifp = &sc->vr_ec.ec_if;
   1323 	ifp->if_timer = 0;
   1324 
   1325 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
   1326 	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
   1327 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
   1328 	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
   1329 	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
   1330 
   1331 	/*
   1332 	 * Free data in the RX lists.
   1333 	 */
   1334 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
   1335 		if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
   1336 			m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
   1337 			sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
   1338 		}
   1339 	}
   1340 	bzero((char *)&sc->vr_ldata->vr_rx_list,
   1341 		sizeof (sc->vr_ldata->vr_rx_list));
   1342 
   1343 	/*
   1344 	 * Free the TX list buffers.
   1345 	 */
   1346 	for (i = 0; i < VR_TX_LIST_CNT; i++) {
   1347 		if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
   1348 			m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
   1349 			sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
   1350 		}
   1351 	}
   1352 
   1353 	bzero((char *)&sc->vr_ldata->vr_tx_list,
   1354 		sizeof (sc->vr_ldata->vr_tx_list));
   1355 
   1356 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1357 }
   1358 
   1359 static struct vr_type *vr_lookup __P((struct pci_attach_args *));
   1360 static int vr_probe __P((struct device *, struct cfdata *, void *));
   1361 static void vr_attach __P((struct device *, struct device *, void *));
   1362 static void vr_shutdown __P((void *));
   1363 
   1364 struct cfattach vr_ca = {
   1365 	sizeof (struct vr_softc), vr_probe, vr_attach
   1366 };
   1367 
   1368 static struct vr_type *
   1369 vr_lookup(pa)
   1370 	struct pci_attach_args *pa;
   1371 {
   1372 	struct vr_type *vrt;
   1373 
   1374 	for (vrt = vr_devs; vrt->vr_name != NULL; vrt++) {
   1375 		if (PCI_VENDOR(pa->pa_id) == vrt->vr_vid &&
   1376 		    PCI_PRODUCT(pa->pa_id) == vrt->vr_did)
   1377 			return (vrt);
   1378 	}
   1379 	return (NULL);
   1380 }
   1381 
   1382 static int
   1383 vr_probe(parent, match, aux)
   1384 	struct device *parent;
   1385 	struct cfdata *match;
   1386 	void *aux;
   1387 {
   1388 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
   1389 
   1390 	if (vr_lookup(pa) != NULL)
   1391 		return (1);
   1392 
   1393 	return (0);
   1394 }
   1395 
   1396 /*
   1397  * Stop all chip I/O so that the kernel's probe routines don't
   1398  * get confused by errant DMAs when rebooting.
   1399  */
   1400 static void
   1401 vr_shutdown(arg)
   1402 	void *arg;
   1403 {
   1404 	struct vr_softc *sc = (struct vr_softc *)arg;
   1405 
   1406 	vr_stop(sc);
   1407 }
   1408 
   1409 /*
   1410  * Attach the interface. Allocate softc structures, do ifmedia
   1411  * setup and ethernet/BPF attach.
   1412  */
   1413 static void
   1414 vr_attach(parent, self, aux)
   1415 	struct device *parent;
   1416 	struct device *self;
   1417 	void *aux;
   1418 {
   1419 	struct vr_softc *sc = (struct vr_softc *) self;
   1420 	struct pci_attach_args *pa = (struct pci_attach_args *) aux;
   1421 	struct vr_type *vrt;
   1422 	int i;
   1423 	u_int32_t command;
   1424 	struct ifnet *ifp;
   1425 	unsigned int round;
   1426 	caddr_t roundptr;
   1427 	u_char eaddr[ETHER_ADDR_LEN];
   1428 
   1429 #define	PCI_CONF_WRITE(r, v)	pci_conf_write(pa->pa_pc, pa->pa_tag, (r), (v))
   1430 #define	PCI_CONF_READ(r)	pci_conf_read(pa->pa_pc, pa->pa_tag, (r))
   1431 
   1432 	vrt = vr_lookup(pa);
   1433 	if (vrt == NULL) {
   1434 		printf("\n");
   1435 		panic("vr_attach: impossible");
   1436 	}
   1437 
   1438 	printf(": %s Ethernet\n", vrt->vr_name);
   1439 
   1440 	/*
   1441 	 * Handle power management nonsense.
   1442 	 */
   1443 
   1444 	command = PCI_CONF_READ(VR_PCI_CAPID) & 0x000000FF;
   1445 	if (command == 0x01) {
   1446 		command = PCI_CONF_READ(VR_PCI_PWRMGMTCTRL);
   1447 		if (command & VR_PSTATE_MASK) {
   1448 			u_int32_t iobase, membase, irq;
   1449 
   1450 			/* Save important PCI config data. */
   1451 			iobase = PCI_CONF_READ(VR_PCI_LOIO);
   1452 			membase = PCI_CONF_READ(VR_PCI_LOMEM);
   1453 			irq = PCI_CONF_READ(VR_PCI_INTLINE);
   1454 
   1455 			/* Reset the power state. */
   1456 			printf("%s: chip is in D%d power mode "
   1457 				"-- setting to D0\n",
   1458 				sc->vr_dev.dv_xname, command & VR_PSTATE_MASK);
   1459 			command &= 0xFFFFFFFC;
   1460 			PCI_CONF_WRITE(VR_PCI_PWRMGMTCTRL, command);
   1461 
   1462 			/* Restore PCI config data. */
   1463 			PCI_CONF_WRITE(VR_PCI_LOIO, iobase);
   1464 			PCI_CONF_WRITE(VR_PCI_LOMEM, membase);
   1465 			PCI_CONF_WRITE(VR_PCI_INTLINE, irq);
   1466 		}
   1467 	}
   1468 
   1469 	/*
   1470 	 * Map control/status registers.
   1471 	 */
   1472 	command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
   1473 	command |= (PCI_COMMAND_IO_ENABLE |
   1474 		    PCI_COMMAND_MEM_ENABLE |
   1475 		    PCI_COMMAND_MASTER_ENABLE);
   1476 	PCI_CONF_WRITE(PCI_COMMAND_STATUS_REG, command);
   1477 	command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
   1478 
   1479 	{
   1480 		bus_space_tag_t iot, memt;
   1481 		bus_space_handle_t ioh, memh;
   1482 		int ioh_valid, memh_valid;
   1483 		pci_intr_handle_t intrhandle;
   1484 		const char *intrstr;
   1485 
   1486 		ioh_valid = (pci_mapreg_map(pa, VR_PCI_LOIO,
   1487 			PCI_MAPREG_TYPE_IO, 0,
   1488 			&iot, &ioh, NULL, NULL) == 0);
   1489 		memh_valid = (pci_mapreg_map(pa, VR_PCI_LOMEM,
   1490 			PCI_MAPREG_TYPE_MEM |
   1491 			PCI_MAPREG_MEM_TYPE_32BIT,
   1492 			0, &memt, &memh, NULL, NULL) == 0);
   1493 #if defined(VR_USEIOSPACE)
   1494 		if (ioh_valid) {
   1495 			sc->vr_bst = iot;
   1496 			sc->vr_bsh = ioh;
   1497 		} else if (memh_valid) {
   1498 			sc->vr_bst = memt;
   1499 			sc->vr_bsh = memh;
   1500 		}
   1501 #else
   1502 		if (memh_valid) {
   1503 			sc->vr_bst = memt;
   1504 			sc->vr_bsh = memh;
   1505 		} else if (ioh_valid) {
   1506 			sc->vr_bst = iot;
   1507 			sc->vr_bsh = ioh;
   1508 		}
   1509 #endif
   1510 		else {
   1511 			printf(": unable to map device registers\n");
   1512 			return;
   1513 		}
   1514 
   1515 		/* Allocate interrupt */
   1516 		if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
   1517 				pa->pa_intrline, &intrhandle)) {
   1518 			printf("%s: couldn't map interrupt\n",
   1519 				sc->vr_dev.dv_xname);
   1520 			return;
   1521 		}
   1522 		intrstr = pci_intr_string(pa->pa_pc, intrhandle);
   1523 		sc->vr_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET,
   1524 						(void *)vr_intr, sc);
   1525 		if (sc->vr_ih == NULL) {
   1526 			printf("%s: couldn't establish interrupt",
   1527 				sc->vr_dev.dv_xname);
   1528 			if (intrstr != NULL)
   1529 				printf(" at %s", intrstr);
   1530 			printf("\n");
   1531 		}
   1532 		printf("%s: interrupting at %s\n",
   1533 			sc->vr_dev.dv_xname, intrstr);
   1534 	}
   1535 	sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
   1536 	if (sc->vr_ats == NULL)
   1537 		printf("%s: warning: couldn't establish shutdown hook\n",
   1538 			sc->vr_dev.dv_xname);
   1539 
   1540 	/* Reset the adapter. */
   1541 	vr_reset(sc);
   1542 
   1543 	/*
   1544 	 * Get station address. The way the Rhine chips work,
   1545 	 * you're not allowed to directly access the EEPROM once
   1546 	 * they've been programmed a special way. Consequently,
   1547 	 * we need to read the node address from the PAR0 and PAR1
   1548 	 * registers.
   1549 	 */
   1550 	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
   1551 	DELAY(200);
   1552 	for (i = 0; i < ETHER_ADDR_LEN; i++)
   1553 		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
   1554 
   1555 	/*
   1556 	 * A Rhine chip was detected. Inform the world.
   1557 	 */
   1558 	printf("%s: Ethernet address: %s\n",
   1559 		sc->vr_dev.dv_xname, ether_sprintf(eaddr));
   1560 
   1561 	bcopy(eaddr, sc->vr_enaddr, ETHER_ADDR_LEN);
   1562 
   1563 	sc->vr_ldata_ptr = malloc(sizeof (struct vr_list_data) + 8,
   1564 				M_DEVBUF, M_NOWAIT);
   1565 	if (sc->vr_ldata_ptr == NULL) {
   1566 		free(sc, M_DEVBUF);
   1567 		printf("%s: no memory for list buffers!\n",
   1568 			sc->vr_dev.dv_xname);
   1569 		return;
   1570 	}
   1571 
   1572 	sc->vr_ldata = (struct vr_list_data *)sc->vr_ldata_ptr;
   1573 	round = (unsigned long)sc->vr_ldata_ptr & 0xF;
   1574 	roundptr = sc->vr_ldata_ptr;
   1575 	for (i = 0; i < 8; i++) {
   1576 		if (round % 8) {
   1577 			round++;
   1578 			roundptr++;
   1579 		} else
   1580 			break;
   1581 	}
   1582 	sc->vr_ldata = (struct vr_list_data *)roundptr;
   1583 	bzero(sc->vr_ldata, sizeof (struct vr_list_data));
   1584 
   1585 	ifp = &sc->vr_ec.ec_if;
   1586 	ifp->if_softc = sc;
   1587 	ifp->if_mtu = ETHERMTU;
   1588 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1589 	ifp->if_ioctl = vr_ioctl;
   1590 	ifp->if_output = ether_output;
   1591 	ifp->if_start = vr_start;
   1592 	ifp->if_watchdog = vr_watchdog;
   1593 	ifp->if_baudrate = 10000000;
   1594 	bcopy(sc->vr_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
   1595 
   1596 	/*
   1597 	 * Initialize MII/media info.
   1598 	 */
   1599 	sc->vr_mii.mii_ifp = ifp;
   1600 	sc->vr_mii.mii_readreg = vr_mii_readreg;
   1601 	sc->vr_mii.mii_writereg = vr_mii_writereg;
   1602 	sc->vr_mii.mii_statchg = vr_mii_statchg;
   1603 	ifmedia_init(&sc->vr_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts);
   1604 	mii_phy_probe(&sc->vr_dev, &sc->vr_mii, 0xffffffff);
   1605 	if (LIST_FIRST(&sc->vr_mii.mii_phys) == NULL) {
   1606 		ifmedia_add(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   1607 		ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE);
   1608 	} else
   1609 		ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_AUTO);
   1610 
   1611 	/*
   1612 	 * Call MI attach routines.
   1613 	 */
   1614 	if_attach(ifp);
   1615 	ether_ifattach(ifp, sc->vr_enaddr);
   1616 
   1617 #if NBPFILTER > 0
   1618 	bpfattach(&sc->vr_ec.ec_if.if_bpf,
   1619 		ifp, DLT_EN10MB, sizeof (struct ether_header));
   1620 #endif
   1621 
   1622 	sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
   1623 	if (sc->vr_ats == NULL)
   1624 		printf("%s: warning: couldn't establish shutdown hook\n",
   1625 			sc->vr_dev.dv_xname);
   1626 }
   1627