Home | History | Annotate | Line # | Download | only in pci
if_vr.c revision 1.14
      1 /*	$NetBSD: if_vr.c,v 1.14 1999/02/05 08:27:46 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997, 1998
      5  *	Bill Paul <wpaul (at) ctr.columbia.edu>.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. All advertising materials mentioning features or use of this software
     16  *    must display the following acknowledgement:
     17  *	This product includes software developed by Bill Paul.
     18  * 4. Neither the name of the author nor the names of any co-contributors
     19  *    may be used to endorse or promote products derived from this software
     20  *    without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
     23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
     26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
     32  * THE POSSIBILITY OF SUCH DAMAGE.
     33  *
     34  *	$FreeBSD: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $
     35  */
     36 
     37 /*
     38  * VIA Rhine fast ethernet PCI NIC driver
     39  *
     40  * Supports various network adapters based on the VIA Rhine
     41  * and Rhine II PCI controllers, including the D-Link DFE530TX.
     42  * Datasheets are available at http://www.via.com.tw.
     43  *
     44  * Written by Bill Paul <wpaul (at) ctr.columbia.edu>
     45  * Electrical Engineering Department
     46  * Columbia University, New York City
     47  */
     48 
     49 /*
     50  * The VIA Rhine controllers are similar in some respects to the
     51  * the DEC tulip chips, except less complicated. The controller
     52  * uses an MII bus and an external physical layer interface. The
     53  * receiver has a one entry perfect filter and a 64-bit hash table
     54  * multicast filter. Transmit and receive descriptors are similar
     55  * to the tulip.
     56  *
     57  * The Rhine has a serious flaw in its transmit DMA mechanism:
     58  * transmit buffers must be longword aligned. Unfortunately,
     59  * FreeBSD doesn't guarantee that mbufs will be filled in starting
     60  * at longword boundaries, so we have to do a buffer copy before
     61  * transmission.
     62  */
     63 
     64 #include "opt_inet.h"
     65 
     66 #include <sys/param.h>
     67 #include <sys/systm.h>
     68 #include <sys/sockio.h>
     69 #include <sys/mbuf.h>
     70 #include <sys/malloc.h>
     71 #include <sys/kernel.h>
     72 #include <sys/socket.h>
     73 #include <sys/device.h>
     74 
     75 #include <net/if.h>
     76 #include <net/if_arp.h>
     77 #include <net/if_dl.h>
     78 #include <net/if_media.h>
     79 #include <net/if_ether.h>
     80 
     81 #if defined(INET)
     82 #include <netinet/in.h>
     83 #include <netinet/if_inarp.h>
     84 #endif
     85 
     86 #include "bpfilter.h"
     87 #if NBPFILTER > 0
     88 #include <net/bpf.h>
     89 #endif
     90 
     91 #include <vm/vm.h>		/* for vtophys */
     92 
     93 #include <machine/bus.h>
     94 #include <machine/intr.h>
     95 
     96 #include <dev/mii/mii.h>
     97 #include <dev/mii/miivar.h>
     98 
     99 #include <dev/pci/pcireg.h>
    100 #include <dev/pci/pcivar.h>
    101 #include <dev/pci/pcidevs.h>
    102 
    103 #include <dev/pci/if_vrreg.h>
    104 
    105 #if defined(__NetBSD__) && defined(__alpha__)
    106 /* XXX XXX NEED REAL DMA MAPPING SUPPORT XXX XXX */
    107 #undef vtophys
    108 #define	vtophys(va)	alpha_XXX_dmamap((vaddr_t)(va))
    109 #endif
    110 
    111 #define	VR_USEIOSPACE
    112 
    113 #define	ETHER_CRC_LEN	4	/* XXX Should be in a common header. */
    114 
    115 /*
    116  * Various supported device vendors/types and their names.
    117  */
    118 static struct vr_type {
    119 	pci_vendor_id_t		vr_vid;
    120 	pci_product_id_t	vr_did;
    121 	const char		*vr_name;
    122 } vr_devs[] = {
    123 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT3043,
    124 		"VIA VT3043 Rhine I 10/100BaseTX" },
    125 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT86C100A,
    126 		"VIA VT86C100A Rhine II 10/100BaseTX" },
    127 	{ 0, 0, NULL }
    128 };
    129 
    130 struct vr_list_data {
    131 	struct vr_desc		vr_rx_list[VR_RX_LIST_CNT];
    132 	struct vr_desc		vr_tx_list[VR_TX_LIST_CNT];
    133 };
    134 
    135 struct vr_chain {
    136 	struct vr_desc		*vr_ptr;
    137 	struct mbuf		*vr_mbuf;
    138 	struct vr_chain		*vr_nextdesc;
    139 };
    140 
    141 struct vr_chain_onefrag {
    142 	struct vr_desc		*vr_ptr;
    143 	struct mbuf		*vr_mbuf;
    144 	struct vr_chain_onefrag	*vr_nextdesc;
    145 };
    146 
    147 struct vr_chain_data {
    148 	struct vr_chain_onefrag	vr_rx_chain[VR_RX_LIST_CNT];
    149 	struct vr_chain		vr_tx_chain[VR_TX_LIST_CNT];
    150 
    151 	struct vr_chain_onefrag	*vr_rx_head;
    152 
    153 	struct vr_chain		*vr_tx_head;
    154 	struct vr_chain		*vr_tx_tail;
    155 	struct vr_chain		*vr_tx_free;
    156 };
    157 
    158 struct vr_softc {
    159 	struct device		vr_dev;		/* generic device glue */
    160 	void			*vr_ih;		/* interrupt cookie */
    161 	void			*vr_ats;	/* shutdown hook */
    162 	bus_space_tag_t		vr_bst;		/* bus space tag */
    163 	bus_space_handle_t	vr_bsh;		/* bus space handle */
    164 	pci_chipset_tag_t	vr_pc;		/* PCI chipset info */
    165 	struct ethercom		vr_ec;		/* Ethernet common info */
    166 	u_int8_t 		vr_enaddr[ETHER_ADDR_LEN];
    167 	struct mii_data		vr_mii;		/* MII/media info */
    168 	caddr_t			vr_ldata_ptr;
    169 	struct vr_list_data	*vr_ldata;
    170 	struct vr_chain_data	vr_cdata;
    171 };
    172 
    173 /*
    174  * register space access macros
    175  */
    176 #define	CSR_WRITE_4(sc, reg, val)	\
    177 	bus_space_write_4(sc->vr_bst, sc->vr_bsh, reg, val)
    178 #define	CSR_WRITE_2(sc, reg, val)	\
    179 	bus_space_write_2(sc->vr_bst, sc->vr_bsh, reg, val)
    180 #define	CSR_WRITE_1(sc, reg, val)	\
    181 	bus_space_write_1(sc->vr_bst, sc->vr_bsh, reg, val)
    182 
    183 #define	CSR_READ_4(sc, reg)		\
    184 	bus_space_read_4(sc->vr_bst, sc->vr_bsh, reg)
    185 #define	CSR_READ_2(sc, reg)		\
    186 	bus_space_read_2(sc->vr_bst, sc->vr_bsh, reg)
    187 #define	CSR_READ_1(sc, reg)		\
    188 	bus_space_read_1(sc->vr_bst, sc->vr_bsh, reg)
    189 
    190 #define	VR_TIMEOUT		1000
    191 
    192 static int vr_newbuf		__P((struct vr_softc *,
    193 						struct vr_chain_onefrag *));
    194 static int vr_encap		__P((struct vr_softc *, struct vr_chain *,
    195 						struct mbuf *));
    196 
    197 static void vr_rxeof		__P((struct vr_softc *));
    198 static void vr_rxeoc		__P((struct vr_softc *));
    199 static void vr_txeof		__P((struct vr_softc *));
    200 static void vr_txeoc		__P((struct vr_softc *));
    201 static void vr_intr		__P((void *));
    202 static void vr_start		__P((struct ifnet *));
    203 static int vr_ioctl		__P((struct ifnet *, u_long, caddr_t));
    204 static void vr_init		__P((void *));
    205 static void vr_stop		__P((struct vr_softc *));
    206 static void vr_watchdog		__P((struct ifnet *));
    207 static void vr_tick		__P((void *));
    208 
    209 static int vr_ifmedia_upd	__P((struct ifnet *));
    210 static void vr_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
    211 
    212 static void vr_mii_sync		__P((struct vr_softc *));
    213 static void vr_mii_send		__P((struct vr_softc *, u_int32_t, int));
    214 static int vr_mii_readreg	__P((struct device *, int, int));
    215 static void vr_mii_writereg	__P((struct device *, int, int, int));
    216 static void vr_mii_statchg	__P((struct device *));
    217 
    218 static u_int8_t vr_calchash	__P((u_int8_t *));
    219 static void vr_setmulti		__P((struct vr_softc *));
    220 static void vr_reset		__P((struct vr_softc *));
    221 static int vr_list_rx_init	__P((struct vr_softc *));
    222 static int vr_list_tx_init	__P((struct vr_softc *));
    223 
    224 #define	VR_SETBIT(sc, reg, x)				\
    225 	CSR_WRITE_1(sc, reg,				\
    226 		CSR_READ_1(sc, reg) | x)
    227 
    228 #define	VR_CLRBIT(sc, reg, x)				\
    229 	CSR_WRITE_1(sc, reg,				\
    230 		CSR_READ_1(sc, reg) & ~x)
    231 
    232 #define	VR_SETBIT16(sc, reg, x)				\
    233 	CSR_WRITE_2(sc, reg,				\
    234 		CSR_READ_2(sc, reg) | x)
    235 
    236 #define	VR_CLRBIT16(sc, reg, x)				\
    237 	CSR_WRITE_2(sc, reg,				\
    238 		CSR_READ_2(sc, reg) & ~x)
    239 
    240 #define	VR_SETBIT32(sc, reg, x)				\
    241 	CSR_WRITE_4(sc, reg,				\
    242 		CSR_READ_4(sc, reg) | x)
    243 
    244 #define	VR_CLRBIT32(sc, reg, x)				\
    245 	CSR_WRITE_4(sc, reg,				\
    246 		CSR_READ_4(sc, reg) & ~x)
    247 
    248 #define	SIO_SET(x)					\
    249 	CSR_WRITE_1(sc, VR_MIICMD,			\
    250 		CSR_READ_1(sc, VR_MIICMD) | x)
    251 
    252 #define	SIO_CLR(x)					\
    253 	CSR_WRITE_1(sc, VR_MIICMD,			\
    254 		CSR_READ_1(sc, VR_MIICMD) & ~x)
    255 
    256 /*
    257  * Sync the PHYs by setting data bit and strobing the clock 32 times.
    258  */
    259 static void vr_mii_sync(sc)
    260 	struct vr_softc		*sc;
    261 {
    262 	register int		i;
    263 
    264 	SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAOUT);
    265 
    266 	for (i = 0; i < 32; i++) {
    267 		SIO_SET(VR_MIICMD_CLK);
    268 		DELAY(1);
    269 		SIO_CLR(VR_MIICMD_CLK);
    270 		DELAY(1);
    271 	}
    272 
    273 	return;
    274 }
    275 
    276 /*
    277  * Clock a series of bits through the MII.
    278  */
    279 static void vr_mii_send(sc, bits, cnt)
    280 	struct vr_softc		*sc;
    281 	u_int32_t		bits;
    282 	int			cnt;
    283 {
    284 	int			i;
    285 
    286 	SIO_CLR(VR_MIICMD_CLK);
    287 
    288 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
    289 		if (bits & i) {
    290 			SIO_SET(VR_MIICMD_DATAOUT);
    291 		} else {
    292 			SIO_CLR(VR_MIICMD_DATAOUT);
    293 		}
    294 		DELAY(1);
    295 		SIO_CLR(VR_MIICMD_CLK);
    296 		DELAY(1);
    297 		SIO_SET(VR_MIICMD_CLK);
    298 	}
    299 }
    300 
    301 /*
    302  * Read an PHY register through the MII.
    303  */
    304 static int vr_mii_readreg(self, phy, reg)
    305 	struct device *self;
    306 	int phy, reg;
    307 {
    308 	struct vr_softc *sc = (struct vr_softc *)self;
    309 	int i, ack, val = 0;
    310 
    311 	CSR_WRITE_1(sc, VR_MIICMD, 0);
    312 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
    313 
    314 	/*
    315 	 * Turn on data xmit.
    316 	 */
    317 	SIO_SET(VR_MIICMD_DIR);
    318 
    319 	vr_mii_sync(sc);
    320 
    321 	/*
    322 	 * Send command/address info.
    323 	 */
    324 	vr_mii_send(sc, MII_COMMAND_START, 2);
    325 	vr_mii_send(sc, MII_COMMAND_READ, 2);
    326 	vr_mii_send(sc, phy, 5);
    327 	vr_mii_send(sc, reg, 5);
    328 
    329 	/* Idle bit */
    330 	SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAOUT));
    331 	DELAY(1);
    332 	SIO_SET(VR_MIICMD_CLK);
    333 	DELAY(1);
    334 
    335 	/* Turn off xmit. */
    336 	SIO_CLR(VR_MIICMD_DIR);
    337 
    338 	/* Check for ack */
    339 	SIO_CLR(VR_MIICMD_CLK);
    340 	DELAY(1);
    341 	SIO_SET(VR_MIICMD_CLK);
    342 	DELAY(1);
    343 	ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAIN;
    344 
    345 	/*
    346 	 * Now try reading data bits. If the ack failed, we still
    347 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
    348 	 */
    349 	if (ack) {
    350 		for (i = 0; i < 16; i++) {
    351 			SIO_CLR(VR_MIICMD_CLK);
    352 			DELAY(1);
    353 			SIO_SET(VR_MIICMD_CLK);
    354 			DELAY(1);
    355 		}
    356 		goto fail;
    357 	}
    358 
    359 	for (i = 0x8000; i; i >>= 1) {
    360 		SIO_CLR(VR_MIICMD_CLK);
    361 		DELAY(1);
    362 		if (!ack) {
    363 			if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAIN)
    364 				val |= i;
    365 			DELAY(1);
    366 		}
    367 		SIO_SET(VR_MIICMD_CLK);
    368 		DELAY(1);
    369 	}
    370 
    371  fail:
    372 
    373 	SIO_CLR(VR_MIICMD_CLK);
    374 	DELAY(1);
    375 	SIO_SET(VR_MIICMD_CLK);
    376 	DELAY(1);
    377 
    378 	return (val);
    379 }
    380 
    381 /*
    382  * Write to a PHY register through the MII.
    383  */
    384 static void vr_mii_writereg(self, phy, reg, val)
    385 	struct device *self;
    386 	int phy, reg, val;
    387 {
    388 	struct vr_softc *sc = (struct vr_softc *)self;
    389 
    390 	CSR_WRITE_1(sc, VR_MIICMD, 0);
    391 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
    392 
    393 	/*
    394 	 * Turn on data output.
    395 	 */
    396 	SIO_SET(VR_MIICMD_DIR);
    397 
    398 	vr_mii_sync(sc);
    399 
    400 	vr_mii_send(sc, MII_COMMAND_START, 2);
    401 	vr_mii_send(sc, MII_COMMAND_WRITE, 2);
    402 	vr_mii_send(sc, phy, 5);
    403 	vr_mii_send(sc, reg, 5);
    404 	vr_mii_send(sc, MII_COMMAND_ACK, 2);
    405 	vr_mii_send(sc, val, 16);
    406 
    407 	/* Idle bit. */
    408 	SIO_SET(VR_MIICMD_CLK);
    409 	DELAY(1);
    410 	SIO_CLR(VR_MIICMD_CLK);
    411 	DELAY(1);
    412 
    413 	/*
    414 	 * Turn off xmit.
    415 	 */
    416 	SIO_CLR(VR_MIICMD_DIR);
    417 }
    418 
    419 static void vr_mii_statchg(self)
    420 	struct device *self;
    421 {
    422 	struct vr_softc *sc = (struct vr_softc *)self;
    423 	int restart = 0;
    424 
    425 	/*
    426 	 * In order to fiddle with the 'full-duplex' bit in the netconfig
    427 	 * register, we first have to put the transmit and/or receive logic
    428 	 * in the idle state.
    429 	 */
    430 	if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
    431 		restart = 1;
    432 		VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
    433 	}
    434 
    435 	if (sc->vr_mii.mii_media_active & IFM_FDX)
    436 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
    437 	else
    438 		VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
    439 
    440 	if (restart)
    441 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
    442 
    443 	/* XXX Update ifp->if_baudrate */
    444 }
    445 
    446 /*
    447  * Calculate CRC of a multicast group address, return the lower 6 bits.
    448  */
    449 static u_int8_t vr_calchash(addr)
    450 	u_int8_t		*addr;
    451 {
    452 	u_int32_t		crc, carry;
    453 	int			i, j;
    454 	u_int8_t		c;
    455 
    456 	/* Compute CRC for the address value. */
    457 	crc = 0xFFFFFFFF; /* initial value */
    458 
    459 	for (i = 0; i < 6; i++) {
    460 		c = *(addr + i);
    461 		for (j = 0; j < 8; j++) {
    462 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
    463 			crc <<= 1;
    464 			c >>= 1;
    465 			if (carry)
    466 				crc = (crc ^ 0x04c11db6) | carry;
    467 		}
    468 	}
    469 
    470 	/* return the filter bit position */
    471 	return ((crc >> 26) & 0x0000003F);
    472 }
    473 
    474 /*
    475  * Program the 64-bit multicast hash filter.
    476  */
    477 static void vr_setmulti(sc)
    478 	struct vr_softc		*sc;
    479 {
    480 	struct ifnet		*ifp;
    481 	int			h = 0;
    482 	u_int32_t		hashes[2] = { 0, 0 };
    483 	struct ether_multistep	step;
    484 	struct ether_multi	*enm;
    485 	int			mcnt = 0;
    486 	u_int8_t		rxfilt;
    487 
    488 	ifp = &sc->vr_ec.ec_if;
    489 
    490 	rxfilt = CSR_READ_1(sc, VR_RXCFG);
    491 
    492 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
    493 		rxfilt |= VR_RXCFG_RX_MULTI;
    494 		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
    495 		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
    496 		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
    497 		return;
    498 	}
    499 
    500 	/* first, zot all the existing hash bits */
    501 	CSR_WRITE_4(sc, VR_MAR0, 0);
    502 	CSR_WRITE_4(sc, VR_MAR1, 0);
    503 
    504 	/* now program new ones */
    505 	ETHER_FIRST_MULTI(step, &sc->vr_ec, enm);
    506 	while (enm != NULL) {
    507 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0)
    508 			continue;
    509 
    510 		h = vr_calchash(enm->enm_addrlo);
    511 
    512 		if (h < 32)
    513 			hashes[0] |= (1 << h);
    514 		else
    515 			hashes[1] |= (1 << (h - 32));
    516 		ETHER_NEXT_MULTI(step, enm);
    517 		mcnt++;
    518 	}
    519 
    520 	if (mcnt)
    521 		rxfilt |= VR_RXCFG_RX_MULTI;
    522 	else
    523 		rxfilt &= ~VR_RXCFG_RX_MULTI;
    524 
    525 	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
    526 	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
    527 	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
    528 
    529 	return;
    530 }
    531 
    532 static void vr_reset(sc)
    533 	struct vr_softc		*sc;
    534 {
    535 	register int		i;
    536 
    537 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
    538 
    539 	for (i = 0; i < VR_TIMEOUT; i++) {
    540 		DELAY(10);
    541 		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
    542 			break;
    543 	}
    544 	if (i == VR_TIMEOUT)
    545 		printf("%s: reset never completed!\n",
    546 			sc->vr_dev.dv_xname);
    547 
    548 	/* Wait a little while for the chip to get its brains in order. */
    549 	DELAY(1000);
    550 
    551 	return;
    552 }
    553 
    554 /*
    555  * Initialize the transmit descriptors.
    556  */
    557 static int vr_list_tx_init(sc)
    558 	struct vr_softc		*sc;
    559 {
    560 	struct vr_chain_data	*cd;
    561 	struct vr_list_data	*ld;
    562 	int			i;
    563 
    564 	cd = &sc->vr_cdata;
    565 	ld = sc->vr_ldata;
    566 	for (i = 0; i < VR_TX_LIST_CNT; i++) {
    567 		cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
    568 		if (i == (VR_TX_LIST_CNT - 1))
    569 			cd->vr_tx_chain[i].vr_nextdesc =
    570 				&cd->vr_tx_chain[0];
    571 		else
    572 			cd->vr_tx_chain[i].vr_nextdesc =
    573 				&cd->vr_tx_chain[i + 1];
    574 	}
    575 
    576 	cd->vr_tx_free = &cd->vr_tx_chain[0];
    577 	cd->vr_tx_tail = cd->vr_tx_head = NULL;
    578 
    579 	return (0);
    580 }
    581 
    582 
    583 /*
    584  * Initialize the RX descriptors and allocate mbufs for them. Note that
    585  * we arrange the descriptors in a closed ring, so that the last descriptor
    586  * points back to the first.
    587  */
    588 static int vr_list_rx_init(sc)
    589 	struct vr_softc		*sc;
    590 {
    591 	struct vr_chain_data	*cd;
    592 	struct vr_list_data	*ld;
    593 	int			i;
    594 
    595 	cd = &sc->vr_cdata;
    596 	ld = sc->vr_ldata;
    597 
    598 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
    599 		cd->vr_rx_chain[i].vr_ptr =
    600 			(struct vr_desc *)&ld->vr_rx_list[i];
    601 		if (vr_newbuf(sc, &cd->vr_rx_chain[i]) == ENOBUFS)
    602 			return (ENOBUFS);
    603 		if (i == (VR_RX_LIST_CNT - 1)) {
    604 			cd->vr_rx_chain[i].vr_nextdesc =
    605 					&cd->vr_rx_chain[0];
    606 			ld->vr_rx_list[i].vr_next =
    607 					vtophys(&ld->vr_rx_list[0]);
    608 		} else {
    609 			cd->vr_rx_chain[i].vr_nextdesc =
    610 					&cd->vr_rx_chain[i + 1];
    611 			ld->vr_rx_list[i].vr_next =
    612 					vtophys(&ld->vr_rx_list[i + 1]);
    613 		}
    614 	}
    615 
    616 	cd->vr_rx_head = &cd->vr_rx_chain[0];
    617 
    618 	return (0);
    619 }
    620 
    621 /*
    622  * Initialize an RX descriptor and attach an MBUF cluster.
    623  * Note: the length fields are only 11 bits wide, which means the
    624  * largest size we can specify is 2047. This is important because
    625  * MCLBYTES is 2048, so we have to subtract one otherwise we'll
    626  * overflow the field and make a mess.
    627  */
    628 static int vr_newbuf(sc, c)
    629 	struct vr_softc		*sc;
    630 	struct vr_chain_onefrag	*c;
    631 {
    632 	struct mbuf		*m_new = NULL;
    633 
    634 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
    635 	if (m_new == NULL) {
    636 		printf("%s: no memory for rx list -- packet dropped!\n",
    637 			sc->vr_dev.dv_xname);
    638 		return (ENOBUFS);
    639 	}
    640 
    641 	MCLGET(m_new, M_DONTWAIT);
    642 	if (!(m_new->m_flags & M_EXT)) {
    643 		printf("%s: no memory for rx list -- packet dropped!\n",
    644 			sc->vr_dev.dv_xname);
    645 		m_freem(m_new);
    646 		return (ENOBUFS);
    647 	}
    648 
    649 	c->vr_mbuf = m_new;
    650 	c->vr_ptr->vr_status = VR_RXSTAT;
    651 	c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
    652 	c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN;
    653 
    654 	return (0);
    655 }
    656 
    657 /*
    658  * A frame has been uploaded: pass the resulting mbuf chain up to
    659  * the higher level protocols.
    660  */
    661 static void vr_rxeof(sc)
    662 	struct vr_softc		*sc;
    663 {
    664 	struct ether_header	*eh;
    665 	struct mbuf		*m;
    666 	struct ifnet		*ifp;
    667 	struct vr_chain_onefrag	*cur_rx;
    668 	int			total_len = 0;
    669 	u_int32_t		rxstat;
    670 
    671 	ifp = &sc->vr_ec.ec_if;
    672 
    673 	while (!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
    674 							VR_RXSTAT_OWN)) {
    675 		cur_rx = sc->vr_cdata.vr_rx_head;
    676 		sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
    677 
    678 		/*
    679 		 * If an error occurs, update stats, clear the
    680 		 * status word and leave the mbuf cluster in place:
    681 		 * it should simply get re-used next time this descriptor
    682 		 * comes up in the ring.
    683 		 */
    684 		if (rxstat & VR_RXSTAT_RXERR) {
    685 			ifp->if_ierrors++;
    686 			printf("%s: rx error: ", sc->vr_dev.dv_xname);
    687 			switch (rxstat & 0x000000FF) {
    688 			case VR_RXSTAT_CRCERR:
    689 				printf("crc error\n");
    690 				break;
    691 			case VR_RXSTAT_FRAMEALIGNERR:
    692 				printf("frame alignment error\n");
    693 				break;
    694 			case VR_RXSTAT_FIFOOFLOW:
    695 				printf("FIFO overflow\n");
    696 				break;
    697 			case VR_RXSTAT_GIANT:
    698 				printf("received giant packet\n");
    699 				break;
    700 			case VR_RXSTAT_RUNT:
    701 				printf("received runt packet\n");
    702 				break;
    703 			case VR_RXSTAT_BUSERR:
    704 				printf("system bus error\n");
    705 				break;
    706 			case VR_RXSTAT_BUFFERR:
    707 				printf("rx buffer error\n");
    708 				break;
    709 			default:
    710 				printf("unknown rx error\n");
    711 				break;
    712 			}
    713 			cur_rx->vr_ptr->vr_status = VR_RXSTAT;
    714 			cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN;
    715 			continue;
    716 		}
    717 
    718 		/* No errors; receive the packet. */
    719 		m = cur_rx->vr_mbuf;
    720 		total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
    721 
    722 		/*
    723 		 * XXX The VIA Rhine chip includes the CRC with every
    724 		 * received frame, and there's no way to turn this
    725 		 * behavior off (at least, I can't find anything in
    726 		 * the manual that explains how to do it) so we have
    727 		 * to trim off the CRC manually.
    728 		 */
    729 		total_len -= ETHER_CRC_LEN;
    730 
    731 		/*
    732 		 * Try to conjure up a new mbuf cluster. If that
    733 		 * fails, it means we have an out of memory condition and
    734 		 * should leave the buffer in place and continue. This will
    735 		 * result in a lost packet, but there's little else we
    736 		 * can do in this situation.
    737 		 */
    738 		if (vr_newbuf(sc, cur_rx) == ENOBUFS) {
    739 			ifp->if_ierrors++;
    740 			cur_rx->vr_ptr->vr_status = VR_RXSTAT;
    741 			cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN;
    742 			continue;
    743 		}
    744 
    745 		ifp->if_ipackets++;
    746 		eh = mtod(m, struct ether_header *);
    747 		m->m_pkthdr.rcvif = ifp;
    748 		m->m_pkthdr.len = m->m_len = total_len;
    749 #if NBPFILTER > 0
    750 		/*
    751 		 * Handle BPF listeners. Let the BPF user see the packet, but
    752 		 * don't pass it up to the ether_input() layer unless it's
    753 		 * a broadcast packet, multicast packet, matches our ethernet
    754 		 * address or the interface is in promiscuous mode.
    755 		 */
    756 		if (ifp->if_bpf) {
    757 			bpf_mtap(ifp->if_bpf, m);
    758 			if (ifp->if_flags & IFF_PROMISC &&
    759 				(memcmp(eh->ether_dhost, sc->vr_enaddr,
    760 						ETHER_ADDR_LEN) &&
    761 					(eh->ether_dhost[0] & 1) == 0)) {
    762 				m_freem(m);
    763 				continue;
    764 			}
    765 		}
    766 #endif
    767 		/* Remove header from mbuf and pass it on. */
    768 		m_adj(m, sizeof (struct ether_header));
    769 		ether_input(ifp, eh, m);
    770 	}
    771 
    772 	return;
    773 }
    774 
    775 void vr_rxeoc(sc)
    776 	struct vr_softc		*sc;
    777 {
    778 
    779 	vr_rxeof(sc);
    780 	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
    781 	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
    782 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
    783 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
    784 
    785 	return;
    786 }
    787 
    788 /*
    789  * A frame was downloaded to the chip. It's safe for us to clean up
    790  * the list buffers.
    791  */
    792 
    793 static void vr_txeof(sc)
    794 	struct vr_softc		*sc;
    795 {
    796 	struct vr_chain		*cur_tx;
    797 	struct ifnet		*ifp;
    798 	register struct mbuf	*n;
    799 
    800 	ifp = &sc->vr_ec.ec_if;
    801 
    802 	/* Clear the timeout timer. */
    803 	ifp->if_timer = 0;
    804 
    805 	/* Sanity check. */
    806 	if (sc->vr_cdata.vr_tx_head == NULL)
    807 		return;
    808 
    809 	/*
    810 	 * Go through our tx list and free mbufs for those
    811 	 * frames that have been transmitted.
    812 	 */
    813 	while (sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) {
    814 		u_int32_t		txstat;
    815 
    816 		cur_tx = sc->vr_cdata.vr_tx_head;
    817 		txstat = cur_tx->vr_ptr->vr_status;
    818 
    819 		if (txstat & VR_TXSTAT_OWN)
    820 			break;
    821 
    822 		if (txstat & VR_TXSTAT_ERRSUM) {
    823 			ifp->if_oerrors++;
    824 			if (txstat & VR_TXSTAT_DEFER)
    825 				ifp->if_collisions++;
    826 			if (txstat & VR_TXSTAT_LATECOLL)
    827 				ifp->if_collisions++;
    828 		}
    829 
    830 		ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
    831 
    832 		ifp->if_opackets++;
    833 		MFREE(cur_tx->vr_mbuf, n);
    834 		cur_tx->vr_mbuf = NULL;
    835 
    836 		if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) {
    837 			sc->vr_cdata.vr_tx_head = NULL;
    838 			sc->vr_cdata.vr_tx_tail = NULL;
    839 			break;
    840 		}
    841 
    842 		sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc;
    843 	}
    844 
    845 	return;
    846 }
    847 
    848 /*
    849  * TX 'end of channel' interrupt handler.
    850  */
    851 static void vr_txeoc(sc)
    852 	struct vr_softc		*sc;
    853 {
    854 	struct ifnet		*ifp;
    855 
    856 	ifp = &sc->vr_ec.ec_if;
    857 
    858 	ifp->if_timer = 0;
    859 
    860 	if (sc->vr_cdata.vr_tx_head == NULL) {
    861 		ifp->if_flags &= ~IFF_OACTIVE;
    862 		sc->vr_cdata.vr_tx_tail = NULL;
    863 	}
    864 
    865 	return;
    866 }
    867 
    868 static void vr_intr(arg)
    869 	void			*arg;
    870 {
    871 	struct vr_softc		*sc;
    872 	struct ifnet		*ifp;
    873 	u_int16_t		status;
    874 
    875 	sc = arg;
    876 	ifp = &sc->vr_ec.ec_if;
    877 
    878 	/* Supress unwanted interrupts. */
    879 	if (!(ifp->if_flags & IFF_UP)) {
    880 		vr_stop(sc);
    881 		return;
    882 	}
    883 
    884 	/* Disable interrupts. */
    885 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
    886 
    887 	for (;;) {
    888 
    889 		status = CSR_READ_2(sc, VR_ISR);
    890 		if (status)
    891 			CSR_WRITE_2(sc, VR_ISR, status);
    892 
    893 		if ((status & VR_INTRS) == 0)
    894 			break;
    895 
    896 		if (status & VR_ISR_RX_OK)
    897 			vr_rxeof(sc);
    898 
    899 		if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
    900 		    (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) ||
    901 		    (status & VR_ISR_RX_DROPPED)) {
    902 			vr_rxeof(sc);
    903 			vr_rxeoc(sc);
    904 		}
    905 
    906 		if (status & VR_ISR_TX_OK) {
    907 			vr_txeof(sc);
    908 			vr_txeoc(sc);
    909 		}
    910 
    911 		if ((status & VR_ISR_TX_UNDERRUN)||(status & VR_ISR_TX_ABRT)) {
    912 			ifp->if_oerrors++;
    913 			vr_txeof(sc);
    914 			if (sc->vr_cdata.vr_tx_head != NULL) {
    915 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
    916 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
    917 			}
    918 		}
    919 
    920 		if (status & VR_ISR_BUSERR) {
    921 			vr_reset(sc);
    922 			vr_init(sc);
    923 		}
    924 	}
    925 
    926 	/* Re-enable interrupts. */
    927 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
    928 
    929 	if (ifp->if_snd.ifq_head != NULL) {
    930 		vr_start(ifp);
    931 	}
    932 
    933 	return;
    934 }
    935 
    936 /*
    937  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
    938  * pointers to the fragment pointers.
    939  */
    940 static int vr_encap(sc, c, m_head)
    941 	struct vr_softc		*sc;
    942 	struct vr_chain		*c;
    943 	struct mbuf		*m_head;
    944 {
    945 	int			frag = 0;
    946 	struct vr_desc		*f = NULL;
    947 	int			total_len;
    948 	struct mbuf		*m;
    949 
    950 	m = m_head;
    951 	total_len = 0;
    952 
    953 	/*
    954 	 * The VIA Rhine wants packet buffers to be longword
    955 	 * aligned, but very often our mbufs aren't. Rather than
    956 	 * waste time trying to decide when to copy and when not
    957 	 * to copy, just do it all the time.
    958 	 */
    959 	if (m != NULL) {
    960 		struct mbuf		*m_new = NULL;
    961 
    962 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
    963 		if (m_new == NULL) {
    964 			printf("%s: no memory for tx list",
    965 				sc->vr_dev.dv_xname);
    966 			return (1);
    967 		}
    968 		if (m_head->m_pkthdr.len > MHLEN) {
    969 			MCLGET(m_new, M_DONTWAIT);
    970 			if (!(m_new->m_flags & M_EXT)) {
    971 				m_freem(m_new);
    972 				printf("%s: no memory for tx list",
    973 					sc->vr_dev.dv_xname);
    974 				return (1);
    975 			}
    976 		}
    977 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
    978 					mtod(m_new, caddr_t));
    979 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
    980 		m_freem(m_head);
    981 		m_head = m_new;
    982 		/*
    983 		 * The Rhine chip doesn't auto-pad, so we have to make
    984 		 * sure to pad short frames out to the minimum frame length
    985 		 * ourselves.
    986 		 */
    987 		if (m_head->m_len < VR_MIN_FRAMELEN) {
    988 			m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len;
    989 			m_new->m_len = m_new->m_pkthdr.len;
    990 		}
    991 		f = c->vr_ptr;
    992 		f->vr_data = vtophys(mtod(m_new, caddr_t));
    993 		f->vr_ctl = total_len = m_new->m_len;
    994 		f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG;
    995 		f->vr_status = 0;
    996 		frag = 1;
    997 	}
    998 
    999 	c->vr_mbuf = m_head;
   1000 	c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT;
   1001 	c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr);
   1002 
   1003 	return (0);
   1004 }
   1005 
   1006 /*
   1007  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
   1008  * to the mbuf data regions directly in the transmit lists. We also save a
   1009  * copy of the pointers since the transmit list fragment pointers are
   1010  * physical addresses.
   1011  */
   1012 
   1013 static void vr_start(ifp)
   1014 	struct ifnet		*ifp;
   1015 {
   1016 	struct vr_softc		*sc;
   1017 	struct mbuf		*m_head = NULL;
   1018 	struct vr_chain		*cur_tx = NULL, *start_tx;
   1019 
   1020 	sc = ifp->if_softc;
   1021 
   1022 	/*
   1023 	 * Check for an available queue slot. If there are none,
   1024 	 * punt.
   1025 	 */
   1026 	if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) {
   1027 		ifp->if_flags |= IFF_OACTIVE;
   1028 		return;
   1029 	}
   1030 
   1031 	start_tx = sc->vr_cdata.vr_tx_free;
   1032 
   1033 	while (sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) {
   1034 		IF_DEQUEUE(&ifp->if_snd, m_head);
   1035 		if (m_head == NULL)
   1036 			break;
   1037 
   1038 		/* Pick a descriptor off the free list. */
   1039 		cur_tx = sc->vr_cdata.vr_tx_free;
   1040 		sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc;
   1041 
   1042 		/* Pack the data into the descriptor. */
   1043 		vr_encap(sc, cur_tx, m_head);
   1044 
   1045 		if (cur_tx != start_tx)
   1046 			VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
   1047 
   1048 #if NBPFILTER > 0
   1049 		/*
   1050 		 * If there's a BPF listener, bounce a copy of this frame
   1051 		 * to him.
   1052 		 */
   1053 		if (ifp->if_bpf)
   1054 			bpf_mtap(ifp->if_bpf, cur_tx->vr_mbuf);
   1055 #endif
   1056 		VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
   1057 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO);
   1058 	}
   1059 
   1060 	/*
   1061 	 * If there are no frames queued, bail.
   1062 	 */
   1063 	if (cur_tx == NULL)
   1064 		return;
   1065 
   1066 	sc->vr_cdata.vr_tx_tail = cur_tx;
   1067 
   1068 	if (sc->vr_cdata.vr_tx_head == NULL)
   1069 		sc->vr_cdata.vr_tx_head = start_tx;
   1070 
   1071 	/*
   1072 	 * Set a timeout in case the chip goes out to lunch.
   1073 	 */
   1074 	ifp->if_timer = 5;
   1075 
   1076 	return;
   1077 }
   1078 
   1079 /*
   1080  * Initialize the interface.  Must be called at splnet.
   1081  */
   1082 static void vr_init(xsc)
   1083 	void			*xsc;
   1084 {
   1085 	struct vr_softc		*sc = xsc;
   1086 	struct ifnet		*ifp = &sc->vr_ec.ec_if;
   1087 
   1088 	/*
   1089 	 * Cancel pending I/O and free all RX/TX buffers.
   1090 	 */
   1091 	vr_stop(sc);
   1092 	vr_reset(sc);
   1093 
   1094 	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
   1095 	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
   1096 
   1097 	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
   1098 	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
   1099 
   1100 	/* Init circular RX list. */
   1101 	if (vr_list_rx_init(sc) == ENOBUFS) {
   1102 		printf("%s: initialization failed: no "
   1103 			"memory for rx buffers\n", sc->vr_dev.dv_xname);
   1104 		vr_stop(sc);
   1105 		return;
   1106 	}
   1107 
   1108 	/*
   1109 	 * Init tx descriptors.
   1110 	 */
   1111 	vr_list_tx_init(sc);
   1112 
   1113 	/* If we want promiscuous mode, set the allframes bit. */
   1114 	if (ifp->if_flags & IFF_PROMISC)
   1115 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
   1116 	else
   1117 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
   1118 
   1119 	/* Set capture broadcast bit to capture broadcast frames. */
   1120 	if (ifp->if_flags & IFF_BROADCAST)
   1121 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
   1122 	else
   1123 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
   1124 
   1125 	/*
   1126 	 * Program the multicast filter, if necessary.
   1127 	 */
   1128 	vr_setmulti(sc);
   1129 
   1130 	/*
   1131 	 * Load the address of the RX list.
   1132 	 */
   1133 	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
   1134 
   1135 	/* Enable receiver and transmitter. */
   1136 	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
   1137 				    VR_CMD_TX_ON|VR_CMD_RX_ON|
   1138 				    VR_CMD_RX_GO);
   1139 
   1140 	/* Set current media. */
   1141 	mii_mediachg(&sc->vr_mii);
   1142 
   1143 	CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
   1144 
   1145 	/*
   1146 	 * Enable interrupts.
   1147 	 */
   1148 	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
   1149 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
   1150 
   1151 	ifp->if_flags |= IFF_RUNNING;
   1152 	ifp->if_flags &= ~IFF_OACTIVE;
   1153 
   1154 	/* Start one second timer. */
   1155 	timeout(vr_tick, sc, hz);
   1156 
   1157 	return;
   1158 }
   1159 
   1160 /*
   1161  * Set media options.
   1162  */
   1163 static int vr_ifmedia_upd(ifp)
   1164 	struct ifnet		*ifp;
   1165 {
   1166 	struct vr_softc *sc = ifp->if_softc;
   1167 
   1168 	if (ifp->if_flags & IFF_UP)
   1169 		mii_mediachg(&sc->vr_mii);
   1170 	return (0);
   1171 }
   1172 
   1173 /*
   1174  * Report current media status.
   1175  */
   1176 static void vr_ifmedia_sts(ifp, ifmr)
   1177 	struct ifnet		*ifp;
   1178 	struct ifmediareq	*ifmr;
   1179 {
   1180 	struct vr_softc *sc = ifp->if_softc;
   1181 
   1182 	mii_pollstat(&sc->vr_mii);
   1183 	ifmr->ifm_status = sc->vr_mii.mii_media_status;
   1184 	ifmr->ifm_active = sc->vr_mii.mii_media_active;
   1185 }
   1186 
   1187 static int vr_ioctl(ifp, command, data)
   1188 	struct ifnet		*ifp;
   1189 	u_long			command;
   1190 	caddr_t			data;
   1191 {
   1192 	struct vr_softc		*sc = ifp->if_softc;
   1193 	struct ifreq		*ifr = (struct ifreq *)data;
   1194 	struct ifaddr		*ifa = (struct ifaddr *)data;
   1195 	int			s, error = 0;
   1196 
   1197 	s = splnet();
   1198 
   1199 	switch (command) {
   1200 	case SIOCSIFADDR:
   1201 		ifp->if_flags |= IFF_UP;
   1202 
   1203 		switch (ifa->ifa_addr->sa_family) {
   1204 #ifdef INET
   1205 		case AF_INET:
   1206 			vr_init(sc);
   1207 			arp_ifinit(ifp, ifa);
   1208 			break;
   1209 #endif /* INET */
   1210 		default:
   1211 			vr_init(sc);
   1212 			break;
   1213 		}
   1214 		break;
   1215 
   1216 	case SIOCGIFADDR:
   1217 		bcopy((caddr_t) sc->vr_enaddr,
   1218 			(caddr_t) ((struct sockaddr *)&ifr->ifr_data)->sa_data,
   1219 			ETHER_ADDR_LEN);
   1220 		break;
   1221 
   1222 	case SIOCSIFMTU:
   1223 		if (ifr->ifr_mtu > ETHERMTU)
   1224 			error = EINVAL;
   1225 		else
   1226 			ifp->if_mtu = ifr->ifr_mtu;
   1227 		break;
   1228 
   1229 	case SIOCSIFFLAGS:
   1230 		if (ifp->if_flags & IFF_UP) {
   1231 			vr_init(sc);
   1232 		} else {
   1233 			if (ifp->if_flags & IFF_RUNNING)
   1234 				vr_stop(sc);
   1235 		}
   1236 		error = 0;
   1237 		break;
   1238 	case SIOCADDMULTI:
   1239 	case SIOCDELMULTI:
   1240 		if (command == SIOCADDMULTI)
   1241 			error = ether_addmulti(ifr, &sc->vr_ec);
   1242 		else
   1243 			error = ether_delmulti(ifr, &sc->vr_ec);
   1244 
   1245 		if (error == ENETRESET) {
   1246 			vr_setmulti(sc);
   1247 			error = 0;
   1248 		}
   1249 		break;
   1250 	case SIOCGIFMEDIA:
   1251 	case SIOCSIFMEDIA:
   1252 		error = ifmedia_ioctl(ifp, ifr, &sc->vr_mii.mii_media, command);
   1253 		break;
   1254 	default:
   1255 		error = EINVAL;
   1256 		break;
   1257 	}
   1258 
   1259 	splx(s);
   1260 
   1261 	return (error);
   1262 }
   1263 
   1264 static void vr_watchdog(ifp)
   1265 	struct ifnet		*ifp;
   1266 {
   1267 	struct vr_softc		*sc;
   1268 
   1269 	sc = ifp->if_softc;
   1270 
   1271 	ifp->if_oerrors++;
   1272 	printf("%s: watchdog timeout\n", sc->vr_dev.dv_xname);
   1273 
   1274 	vr_stop(sc);
   1275 	vr_reset(sc);
   1276 	vr_init(sc);
   1277 
   1278 	if (ifp->if_snd.ifq_head != NULL)
   1279 		vr_start(ifp);
   1280 
   1281 	return;
   1282 }
   1283 
   1284 /*
   1285  * One second timer, used to tick MII.
   1286  */
   1287 static void
   1288 vr_tick(arg)
   1289 	void *arg;
   1290 {
   1291 	struct vr_softc *sc = arg;
   1292 	int s;
   1293 
   1294 	s = splnet();
   1295 	mii_tick(&sc->vr_mii);
   1296 	splx(s);
   1297 
   1298 	timeout(vr_tick, sc, hz);
   1299 }
   1300 
   1301 /*
   1302  * Stop the adapter and free any mbufs allocated to the
   1303  * RX and TX lists.
   1304  */
   1305 static void vr_stop(sc)
   1306 	struct vr_softc		*sc;
   1307 {
   1308 	register int		i;
   1309 	struct ifnet		*ifp;
   1310 
   1311 	/* Cancel one second timer. */
   1312 	untimeout(vr_tick, sc);
   1313 
   1314 	ifp = &sc->vr_ec.ec_if;
   1315 	ifp->if_timer = 0;
   1316 
   1317 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
   1318 	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
   1319 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
   1320 	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
   1321 	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
   1322 
   1323 	/*
   1324 	 * Free data in the RX lists.
   1325 	 */
   1326 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
   1327 		if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
   1328 			m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
   1329 			sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
   1330 		}
   1331 	}
   1332 	bzero((char *)&sc->vr_ldata->vr_rx_list,
   1333 		sizeof (sc->vr_ldata->vr_rx_list));
   1334 
   1335 	/*
   1336 	 * Free the TX list buffers.
   1337 	 */
   1338 	for (i = 0; i < VR_TX_LIST_CNT; i++) {
   1339 		if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
   1340 			m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
   1341 			sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
   1342 		}
   1343 	}
   1344 
   1345 	bzero((char *)&sc->vr_ldata->vr_tx_list,
   1346 		sizeof (sc->vr_ldata->vr_tx_list));
   1347 
   1348 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1349 
   1350 	return;
   1351 }
   1352 
   1353 static struct vr_type *vr_lookup __P((struct pci_attach_args *));
   1354 static int vr_probe __P((struct device *, struct cfdata *, void *));
   1355 static void vr_attach __P((struct device *, struct device *, void *));
   1356 static void vr_shutdown __P((void *));
   1357 
   1358 struct cfattach vr_ca = {
   1359 	sizeof (struct vr_softc), vr_probe, vr_attach
   1360 };
   1361 
   1362 static struct vr_type *
   1363 vr_lookup(pa)
   1364 	struct pci_attach_args *pa;
   1365 {
   1366 	struct vr_type *vrt;
   1367 
   1368 	for (vrt = vr_devs; vrt->vr_name != NULL; vrt++) {
   1369 		if (PCI_VENDOR(pa->pa_id) == vrt->vr_vid &&
   1370 		    PCI_PRODUCT(pa->pa_id) == vrt->vr_did)
   1371 			return (vrt);
   1372 	}
   1373 	return (NULL);
   1374 }
   1375 
   1376 static int
   1377 vr_probe(parent, match, aux)
   1378 	struct device *parent;
   1379 	struct cfdata *match;
   1380 	void *aux;
   1381 {
   1382 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
   1383 
   1384 	if (vr_lookup(pa) != NULL)
   1385 		return (1);
   1386 
   1387 	return (0);
   1388 }
   1389 
   1390 /*
   1391  * Stop all chip I/O so that the kernel's probe routines don't
   1392  * get confused by errant DMAs when rebooting.
   1393  */
   1394 static void vr_shutdown(arg)
   1395 	void *arg;
   1396 {
   1397 	struct vr_softc		*sc = (struct vr_softc *)arg;
   1398 
   1399 	vr_stop(sc);
   1400 
   1401 	return;
   1402 }
   1403 
   1404 /*
   1405  * Attach the interface. Allocate softc structures, do ifmedia
   1406  * setup and ethernet/BPF attach.
   1407  */
   1408 static void
   1409 vr_attach(parent, self, aux)
   1410 	struct device * const parent;
   1411 	struct device * const self;
   1412 	void * const aux;
   1413 {
   1414 #define	PCI_CONF_WRITE(r, v)	pci_conf_write(pa->pa_pc, pa->pa_tag, (r), (v))
   1415 #define	PCI_CONF_READ(r)	pci_conf_read(pa->pa_pc, pa->pa_tag, (r))
   1416 	struct vr_softc * const sc = (struct vr_softc *) self;
   1417 	struct pci_attach_args * const pa = (struct pci_attach_args *) aux;
   1418 	struct vr_type *vrt;
   1419 	int			i;
   1420 	u_int32_t		command;
   1421 	struct ifnet		*ifp;
   1422 	unsigned int		round;
   1423 	caddr_t			roundptr;
   1424 	u_char			eaddr[ETHER_ADDR_LEN];
   1425 
   1426 	vrt = vr_lookup(pa);
   1427 	if (vrt == NULL) {
   1428 		printf("\n");
   1429 		panic("vr_attach: impossible");
   1430 	}
   1431 
   1432 	printf(": %s Ethernet\n", vrt->vr_name);
   1433 
   1434 	/*
   1435 	 * Handle power management nonsense.
   1436 	 */
   1437 
   1438 	command = PCI_CONF_READ(VR_PCI_CAPID) & 0x000000FF;
   1439 	if (command == 0x01) {
   1440 
   1441 		command = PCI_CONF_READ(VR_PCI_PWRMGMTCTRL);
   1442 		if (command & VR_PSTATE_MASK) {
   1443 			u_int32_t		iobase, membase, irq;
   1444 
   1445 			/* Save important PCI config data. */
   1446 			iobase = PCI_CONF_READ(VR_PCI_LOIO);
   1447 			membase = PCI_CONF_READ(VR_PCI_LOMEM);
   1448 			irq = PCI_CONF_READ(VR_PCI_INTLINE);
   1449 
   1450 			/* Reset the power state. */
   1451 			printf("%s: chip is in D%d power mode "
   1452 				"-- setting to D0\n",
   1453 				sc->vr_dev.dv_xname, command & VR_PSTATE_MASK);
   1454 			command &= 0xFFFFFFFC;
   1455 			PCI_CONF_WRITE(VR_PCI_PWRMGMTCTRL, command);
   1456 
   1457 			/* Restore PCI config data. */
   1458 			PCI_CONF_WRITE(VR_PCI_LOIO, iobase);
   1459 			PCI_CONF_WRITE(VR_PCI_LOMEM, membase);
   1460 			PCI_CONF_WRITE(VR_PCI_INTLINE, irq);
   1461 		}
   1462 	}
   1463 
   1464 	/*
   1465 	 * Map control/status registers.
   1466 	 */
   1467 	command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
   1468 	command |= (PCI_COMMAND_IO_ENABLE |
   1469 		    PCI_COMMAND_MEM_ENABLE |
   1470 		    PCI_COMMAND_MASTER_ENABLE);
   1471 	PCI_CONF_WRITE(PCI_COMMAND_STATUS_REG, command);
   1472 	command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
   1473 
   1474 	{
   1475 		bus_space_tag_t iot, memt;
   1476 		bus_space_handle_t ioh, memh;
   1477 		int ioh_valid, memh_valid;
   1478 		pci_intr_handle_t intrhandle;
   1479 		const char *intrstr;
   1480 
   1481 		ioh_valid = (pci_mapreg_map(pa, VR_PCI_LOIO,
   1482 			PCI_MAPREG_TYPE_IO, 0,
   1483 			&iot, &ioh, NULL, NULL) == 0);
   1484 		memh_valid = (pci_mapreg_map(pa, VR_PCI_LOMEM,
   1485 			PCI_MAPREG_TYPE_MEM |
   1486 			PCI_MAPREG_MEM_TYPE_32BIT,
   1487 			0, &memt, &memh, NULL, NULL) == 0);
   1488 #if defined(VR_USEIOSPACE)
   1489 		if (ioh_valid) {
   1490 			sc->vr_bst = iot;
   1491 			sc->vr_bsh = ioh;
   1492 		} else if (memh_valid) {
   1493 			sc->vr_bst = memt;
   1494 			sc->vr_bsh = memh;
   1495 		}
   1496 #else
   1497 		if (memh_valid) {
   1498 			sc->vr_bst = memt;
   1499 			sc->vr_bsh = memh;
   1500 		} else if (ioh_valid) {
   1501 			sc->vr_bst = iot;
   1502 			sc->vr_bsh = ioh;
   1503 		}
   1504 #endif
   1505 		else {
   1506 			printf(": unable to map device registers\n");
   1507 			return;
   1508 		}
   1509 
   1510 		/* Allocate interrupt */
   1511 		if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
   1512 				pa->pa_intrline, &intrhandle)) {
   1513 			printf("%s: couldn't map interrupt\n",
   1514 				sc->vr_dev.dv_xname);
   1515 			goto fail;
   1516 		}
   1517 		intrstr = pci_intr_string(pa->pa_pc, intrhandle);
   1518 		sc->vr_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET,
   1519 						(void *)vr_intr, sc);
   1520 		if (sc->vr_ih == NULL) {
   1521 			printf("%s: couldn't establish interrupt",
   1522 				sc->vr_dev.dv_xname);
   1523 			if (intrstr != NULL)
   1524 				printf(" at %s", intrstr);
   1525 			printf("\n");
   1526 		}
   1527 		printf("%s: interrupting at %s\n",
   1528 			sc->vr_dev.dv_xname, intrstr);
   1529 	}
   1530 	sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
   1531 	if (sc->vr_ats == NULL)
   1532 		printf("%s: warning: couldn't establish shutdown hook\n",
   1533 			sc->vr_dev.dv_xname);
   1534 
   1535 	/* Reset the adapter. */
   1536 	vr_reset(sc);
   1537 
   1538 	/*
   1539 	 * Get station address. The way the Rhine chips work,
   1540 	 * you're not allowed to directly access the EEPROM once
   1541 	 * they've been programmed a special way. Consequently,
   1542 	 * we need to read the node address from the PAR0 and PAR1
   1543 	 * registers.
   1544 	 */
   1545 	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
   1546 	DELAY(200);
   1547 	for (i = 0; i < ETHER_ADDR_LEN; i++)
   1548 		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
   1549 
   1550 	/*
   1551 	 * A Rhine chip was detected. Inform the world.
   1552 	 */
   1553 	printf("%s: Ethernet address: %s\n",
   1554 		sc->vr_dev.dv_xname, ether_sprintf(eaddr));
   1555 
   1556 	bcopy(eaddr, sc->vr_enaddr, ETHER_ADDR_LEN);
   1557 
   1558 	sc->vr_ldata_ptr = malloc(sizeof (struct vr_list_data) + 8,
   1559 				M_DEVBUF, M_NOWAIT);
   1560 	if (sc->vr_ldata_ptr == NULL) {
   1561 		free(sc, M_DEVBUF);
   1562 		printf("%s: no memory for list buffers!\n",
   1563 			sc->vr_dev.dv_xname);
   1564 		return;
   1565 	}
   1566 
   1567 	sc->vr_ldata = (struct vr_list_data *)sc->vr_ldata_ptr;
   1568 	round = (unsigned long)sc->vr_ldata_ptr & 0xF;
   1569 	roundptr = sc->vr_ldata_ptr;
   1570 	for (i = 0; i < 8; i++) {
   1571 		if (round % 8) {
   1572 			round++;
   1573 			roundptr++;
   1574 		} else
   1575 			break;
   1576 	}
   1577 	sc->vr_ldata = (struct vr_list_data *)roundptr;
   1578 	bzero(sc->vr_ldata, sizeof (struct vr_list_data));
   1579 
   1580 	ifp = &sc->vr_ec.ec_if;
   1581 	ifp->if_softc = sc;
   1582 	ifp->if_mtu = ETHERMTU;
   1583 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1584 	ifp->if_ioctl = vr_ioctl;
   1585 	ifp->if_output = ether_output;
   1586 	ifp->if_start = vr_start;
   1587 	ifp->if_watchdog = vr_watchdog;
   1588 	ifp->if_baudrate = 10000000;
   1589 	bcopy(sc->vr_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
   1590 
   1591 	/*
   1592 	 * Initialize MII/media info.
   1593 	 */
   1594 	sc->vr_mii.mii_ifp = ifp;
   1595 	sc->vr_mii.mii_readreg = vr_mii_readreg;
   1596 	sc->vr_mii.mii_writereg = vr_mii_writereg;
   1597 	sc->vr_mii.mii_statchg = vr_mii_statchg;
   1598 	ifmedia_init(&sc->vr_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts);
   1599 	mii_phy_probe(&sc->vr_dev, &sc->vr_mii, 0xffffffff);
   1600 	if (LIST_FIRST(&sc->vr_mii.mii_phys) == NULL) {
   1601 		ifmedia_add(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   1602 		ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE);
   1603 	} else
   1604 		ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_AUTO);
   1605 
   1606 	/*
   1607 	 * Call MI attach routines.
   1608 	 */
   1609 	if_attach(ifp);
   1610 	ether_ifattach(ifp, sc->vr_enaddr);
   1611 
   1612 #if NBPFILTER > 0
   1613 	bpfattach(&sc->vr_ec.ec_if.if_bpf,
   1614 		ifp, DLT_EN10MB, sizeof (struct ether_header));
   1615 #endif
   1616 
   1617 	sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
   1618 	if (sc->vr_ats == NULL)
   1619 		printf("%s: warning: couldn't establish shutdown hook\n",
   1620 			sc->vr_dev.dv_xname);
   1621 
   1622 fail:
   1623 	return;
   1624 }
   1625