Home | History | Annotate | Line # | Download | only in pci
if_vge.c revision 1.13
      1 /* $NetBSD: if_vge.c,v 1.13 2006/10/14 16:45:46 tsutsui Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2004
      5  *	Bill Paul <wpaul (at) windriver.com>.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. All advertising materials mentioning features or use of this software
     16  *    must display the following acknowledgement:
     17  *	This product includes software developed by Bill Paul.
     18  * 4. Neither the name of the author nor the names of any co-contributors
     19  *    may be used to endorse or promote products derived from this software
     20  *    without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
     23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
     26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
     32  * THE POSSIBILITY OF SUCH DAMAGE.
     33  *
     34  * FreeBSD: src/sys/dev/vge/if_vge.c,v 1.5 2005/02/07 19:39:29 glebius Exp
     35  */
     36 
     37 #include <sys/cdefs.h>
     38 __KERNEL_RCSID(0, "$NetBSD: if_vge.c,v 1.13 2006/10/14 16:45:46 tsutsui Exp $");
     39 
     40 /*
     41  * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
     42  *
     43  * Written by Bill Paul <wpaul (at) windriver.com>
     44  * Senior Networking Software Engineer
     45  * Wind River Systems
     46  */
     47 
     48 /*
     49  * The VIA Networking VT6122 is a 32bit, 33/66 MHz PCI device that
     50  * combines a tri-speed ethernet MAC and PHY, with the following
     51  * features:
     52  *
     53  *	o Jumbo frame support up to 16K
     54  *	o Transmit and receive flow control
     55  *	o IPv4 checksum offload
     56  *	o VLAN tag insertion and stripping
     57  *	o TCP large send
     58  *	o 64-bit multicast hash table filter
     59  *	o 64 entry CAM filter
     60  *	o 16K RX FIFO and 48K TX FIFO memory
     61  *	o Interrupt moderation
     62  *
     63  * The VT6122 supports up to four transmit DMA queues. The descriptors
     64  * in the transmit ring can address up to 7 data fragments; frames which
     65  * span more than 7 data buffers must be coalesced, but in general the
     66  * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
     67  * long. The receive descriptors address only a single buffer.
     68  *
     69  * There are two peculiar design issues with the VT6122. One is that
     70  * receive data buffers must be aligned on a 32-bit boundary. This is
     71  * not a problem where the VT6122 is used as a LOM device in x86-based
     72  * systems, but on architectures that generate unaligned access traps, we
     73  * have to do some copying.
     74  *
     75  * The other issue has to do with the way 64-bit addresses are handled.
     76  * The DMA descriptors only allow you to specify 48 bits of addressing
     77  * information. The remaining 16 bits are specified using one of the
     78  * I/O registers. If you only have a 32-bit system, then this isn't
     79  * an issue, but if you have a 64-bit system and more than 4GB of
     80  * memory, you must have to make sure your network data buffers reside
     81  * in the same 48-bit 'segment.'
     82  *
     83  * Special thanks to Ryan Fu at VIA Networking for providing documentation
     84  * and sample NICs for testing.
     85  */
     86 
     87 #include "bpfilter.h"
     88 
     89 #include <sys/param.h>
     90 #include <sys/endian.h>
     91 #include <sys/systm.h>
     92 #include <sys/sockio.h>
     93 #include <sys/mbuf.h>
     94 #include <sys/malloc.h>
     95 #include <sys/kernel.h>
     96 #include <sys/socket.h>
     97 
     98 #include <net/if.h>
     99 #include <net/if_arp.h>
    100 #include <net/if_ether.h>
    101 #include <net/if_dl.h>
    102 #include <net/if_media.h>
    103 
    104 #include <net/bpf.h>
    105 
    106 #include <machine/bus.h>
    107 
    108 #include <dev/mii/mii.h>
    109 #include <dev/mii/miivar.h>
    110 
    111 #include <dev/pci/pcireg.h>
    112 #include <dev/pci/pcivar.h>
    113 #include <dev/pci/pcidevs.h>
    114 
    115 #include <dev/pci/if_vgereg.h>
    116 #include <dev/pci/if_vgevar.h>
    117 
    118 static int vge_probe	(struct device *, struct cfdata *, void *);
    119 static void vge_attach	(struct device *, struct device *, void *);
    120 
    121 static int vge_encap		(struct vge_softc *, struct mbuf *, int);
    122 
    123 static int vge_dma_map_rx_desc	(struct vge_softc *, int);
    124 static void vge_dma_map_tx_desc	(struct vge_softc *, struct mbuf *, int, int);
    125 static int vge_allocmem		(struct vge_softc *);
    126 static int vge_newbuf		(struct vge_softc *, int, struct mbuf *);
    127 static int vge_rx_list_init	(struct vge_softc *);
    128 static int vge_tx_list_init	(struct vge_softc *);
    129 #ifndef __NO_STRICT_ALIGNMENT
    130 static inline void vge_fixup_rx
    131 				(struct mbuf *);
    132 #endif
    133 static void vge_rxeof		(struct vge_softc *);
    134 static void vge_txeof		(struct vge_softc *);
    135 static int vge_intr		(void *);
    136 static void vge_tick		(void *);
    137 static void vge_start		(struct ifnet *);
    138 static int vge_ioctl		(struct ifnet *, u_long, caddr_t);
    139 static int vge_init		(struct ifnet *);
    140 static void vge_stop		(struct vge_softc *);
    141 static void vge_watchdog	(struct ifnet *);
    142 #if VGE_POWER_MANAGEMENT
    143 static int vge_suspend		(struct device *);
    144 static int vge_resume		(struct device *);
    145 #endif
    146 static void vge_shutdown	(void *);
    147 static int vge_ifmedia_upd	(struct ifnet *);
    148 static void vge_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
    149 
    150 static uint16_t vge_read_eeprom	(struct vge_softc *, int);
    151 
    152 static void vge_miipoll_start	(struct vge_softc *);
    153 static void vge_miipoll_stop	(struct vge_softc *);
    154 static int vge_miibus_readreg	(struct device *, int, int);
    155 static void vge_miibus_writereg	(struct device *, int, int, int);
    156 static void vge_miibus_statchg	(struct device *);
    157 
    158 static void vge_cam_clear	(struct vge_softc *);
    159 static int vge_cam_set		(struct vge_softc *, uint8_t *);
    160 static void vge_setmulti	(struct vge_softc *);
    161 static void vge_reset		(struct vge_softc *);
    162 
    163 #define VGE_PCI_LOIO             0x10
    164 #define VGE_PCI_LOMEM            0x14
    165 
    166 CFATTACH_DECL(vge, sizeof(struct vge_softc),
    167     vge_probe, vge_attach, NULL, NULL);
    168 
    169 /*
    170  * Defragment mbuf chain contents to be as linear as possible.
    171  * Returns new mbuf chain on success, NULL on failure. Old mbuf
    172  * chain is always freed.
    173  * XXX temporary until there would be generic function doing this.
    174  */
    175 #define m_defrag	vge_m_defrag
    176 struct mbuf * vge_m_defrag(struct mbuf *, int);
    177 
    178 struct mbuf *
    179 vge_m_defrag(struct mbuf *mold, int flags)
    180 {
    181 	struct mbuf *m0, *mn, *n;
    182 	size_t sz = mold->m_pkthdr.len;
    183 
    184 #ifdef DIAGNOSTIC
    185 	if ((mold->m_flags & M_PKTHDR) == 0)
    186 		panic("m_defrag: not a mbuf chain header");
    187 #endif
    188 
    189 	MGETHDR(m0, flags, MT_DATA);
    190 	if (m0 == NULL)
    191 		return NULL;
    192 	m0->m_pkthdr.len = mold->m_pkthdr.len;
    193 	mn = m0;
    194 
    195 	do {
    196 		if (sz > MHLEN) {
    197 			MCLGET(mn, M_DONTWAIT);
    198 			if ((mn->m_flags & M_EXT) == 0) {
    199 				m_freem(m0);
    200 				return NULL;
    201 			}
    202 		}
    203 
    204 		mn->m_len = MIN(sz, MCLBYTES);
    205 
    206 		m_copydata(mold, mold->m_pkthdr.len - sz, mn->m_len,
    207 		     mtod(mn, caddr_t));
    208 
    209 		sz -= mn->m_len;
    210 
    211 		if (sz > 0) {
    212 			/* need more mbufs */
    213 			MGET(n, M_NOWAIT, MT_DATA);
    214 			if (n == NULL) {
    215 				m_freem(m0);
    216 				return NULL;
    217 			}
    218 
    219 			mn->m_next = n;
    220 			mn = n;
    221 		}
    222 	} while (sz > 0);
    223 
    224 	return m0;
    225 }
    226 
    227 /*
    228  * Read a word of data stored in the EEPROM at address 'addr.'
    229  */
    230 static uint16_t
    231 vge_read_eeprom(struct vge_softc *sc, int addr)
    232 {
    233 	int i;
    234 	uint16_t word = 0;
    235 
    236 	/*
    237 	 * Enter EEPROM embedded programming mode. In order to
    238 	 * access the EEPROM at all, we first have to set the
    239 	 * EELOAD bit in the CHIPCFG2 register.
    240 	 */
    241 	CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
    242 	CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
    243 
    244 	/* Select the address of the word we want to read */
    245 	CSR_WRITE_1(sc, VGE_EEADDR, addr);
    246 
    247 	/* Issue read command */
    248 	CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
    249 
    250 	/* Wait for the done bit to be set. */
    251 	for (i = 0; i < VGE_TIMEOUT; i++) {
    252 		if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
    253 			break;
    254 	}
    255 
    256 	if (i == VGE_TIMEOUT) {
    257 		printf("%s: EEPROM read timed out\n", sc->sc_dev.dv_xname);
    258 		return 0;
    259 	}
    260 
    261 	/* Read the result */
    262 	word = CSR_READ_2(sc, VGE_EERDDAT);
    263 
    264 	/* Turn off EEPROM access mode. */
    265 	CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
    266 	CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
    267 
    268 	return word;
    269 }
    270 
    271 static void
    272 vge_miipoll_stop(sc)
    273 	struct vge_softc	*sc;
    274 {
    275 	int			i;
    276 
    277 	CSR_WRITE_1(sc, VGE_MIICMD, 0);
    278 
    279 	for (i = 0; i < VGE_TIMEOUT; i++) {
    280 		DELAY(1);
    281 		if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
    282 			break;
    283 	}
    284 
    285 	if (i == VGE_TIMEOUT) {
    286 		printf("%s: failed to idle MII autopoll\n",
    287 		    sc->sc_dev.dv_xname);
    288 	}
    289 
    290 	return;
    291 }
    292 
    293 static void
    294 vge_miipoll_start(sc)
    295 	struct vge_softc	*sc;
    296 {
    297 	int			i;
    298 
    299 	/* First, make sure we're idle. */
    300 
    301 	CSR_WRITE_1(sc, VGE_MIICMD, 0);
    302 	CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
    303 
    304 	for (i = 0; i < VGE_TIMEOUT; i++) {
    305 		DELAY(1);
    306 		if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
    307 			break;
    308 	}
    309 
    310 	if (i == VGE_TIMEOUT) {
    311 		printf("%s: failed to idle MII autopoll\n",
    312 		    sc->sc_dev.dv_xname);
    313 		return;
    314 	}
    315 
    316 	/* Now enable auto poll mode. */
    317 
    318 	CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
    319 
    320 	/* And make sure it started. */
    321 
    322 	for (i = 0; i < VGE_TIMEOUT; i++) {
    323 		DELAY(1);
    324 		if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
    325 			break;
    326 	}
    327 
    328 	if (i == VGE_TIMEOUT) {
    329 		printf("%s: failed to start MII autopoll\n",
    330 		    sc->sc_dev.dv_xname);
    331 	}
    332 }
    333 
    334 static int
    335 vge_miibus_readreg(dev, phy, reg)
    336 	struct device *dev;
    337 	int phy, reg;
    338 {
    339 	struct vge_softc	*sc = (struct vge_softc *)dev;
    340 	int			i;
    341 	u_int16_t		rval = 0;
    342 
    343 	if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
    344 		return(0);
    345 
    346 	VGE_LOCK(sc);
    347 	vge_miipoll_stop(sc);
    348 
    349 	/* Specify the register we want to read. */
    350 	CSR_WRITE_1(sc, VGE_MIIADDR, reg);
    351 
    352 	/* Issue read command. */
    353 	CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
    354 
    355 	/* Wait for the read command bit to self-clear. */
    356 	for (i = 0; i < VGE_TIMEOUT; i++) {
    357 		DELAY(1);
    358 		if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
    359 			break;
    360 	}
    361 
    362 	if (i == VGE_TIMEOUT)
    363 		printf("%s: MII read timed out\n", sc->sc_dev.dv_xname);
    364 	else
    365 		rval = CSR_READ_2(sc, VGE_MIIDATA);
    366 
    367 	vge_miipoll_start(sc);
    368 	VGE_UNLOCK(sc);
    369 
    370 	return (rval);
    371 }
    372 
    373 static void
    374 vge_miibus_writereg(dev, phy, reg, data)
    375 	struct device	*dev;
    376 	int		phy, reg, data;
    377 {
    378 	struct vge_softc	*sc = (struct vge_softc *)dev;
    379 	int			i;
    380 
    381 	if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
    382 		return;
    383 
    384 	VGE_LOCK(sc);
    385 	vge_miipoll_stop(sc);
    386 
    387 	/* Specify the register we want to write. */
    388 	CSR_WRITE_1(sc, VGE_MIIADDR, reg);
    389 
    390 	/* Specify the data we want to write. */
    391 	CSR_WRITE_2(sc, VGE_MIIDATA, data);
    392 
    393 	/* Issue write command. */
    394 	CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
    395 
    396 	/* Wait for the write command bit to self-clear. */
    397 	for (i = 0; i < VGE_TIMEOUT; i++) {
    398 		DELAY(1);
    399 		if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
    400 			break;
    401 	}
    402 
    403 	if (i == VGE_TIMEOUT) {
    404 		printf("%s: MII write timed out\n", sc->sc_dev.dv_xname);
    405 	}
    406 
    407 	vge_miipoll_start(sc);
    408 	VGE_UNLOCK(sc);
    409 }
    410 
    411 static void
    412 vge_cam_clear(sc)
    413 	struct vge_softc	*sc;
    414 {
    415 	int			i;
    416 
    417 	/*
    418 	 * Turn off all the mask bits. This tells the chip
    419 	 * that none of the entries in the CAM filter are valid.
    420 	 * desired entries will be enabled as we fill the filter in.
    421 	 */
    422 
    423 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
    424 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
    425 	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
    426 	for (i = 0; i < 8; i++)
    427 		CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
    428 
    429 	/* Clear the VLAN filter too. */
    430 
    431 	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
    432 	for (i = 0; i < 8; i++)
    433 		CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
    434 
    435 	CSR_WRITE_1(sc, VGE_CAMADDR, 0);
    436 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
    437 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
    438 
    439 	sc->vge_camidx = 0;
    440 
    441 	return;
    442 }
    443 
    444 static int
    445 vge_cam_set(sc, addr)
    446 	struct vge_softc	*sc;
    447 	uint8_t			*addr;
    448 {
    449 	int			i, error = 0;
    450 
    451 	if (sc->vge_camidx == VGE_CAM_MAXADDRS)
    452 		return(ENOSPC);
    453 
    454 	/* Select the CAM data page. */
    455 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
    456 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
    457 
    458 	/* Set the filter entry we want to update and enable writing. */
    459 	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
    460 
    461 	/* Write the address to the CAM registers */
    462 	for (i = 0; i < ETHER_ADDR_LEN; i++)
    463 		CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
    464 
    465 	/* Issue a write command. */
    466 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
    467 
    468 	/* Wake for it to clear. */
    469 	for (i = 0; i < VGE_TIMEOUT; i++) {
    470 		DELAY(1);
    471 		if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
    472 			break;
    473 	}
    474 
    475 	if (i == VGE_TIMEOUT) {
    476 		printf("%s: setting CAM filter failed\n", sc->sc_dev.dv_xname);
    477 		error = EIO;
    478 		goto fail;
    479 	}
    480 
    481 	/* Select the CAM mask page. */
    482 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
    483 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
    484 
    485 	/* Set the mask bit that enables this filter. */
    486 	CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
    487 	    1<<(sc->vge_camidx & 7));
    488 
    489 	sc->vge_camidx++;
    490 
    491 fail:
    492 	/* Turn off access to CAM. */
    493 	CSR_WRITE_1(sc, VGE_CAMADDR, 0);
    494 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
    495 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
    496 
    497 	return (error);
    498 }
    499 
    500 /*
    501  * Program the multicast filter. We use the 64-entry CAM filter
    502  * for perfect filtering. If there's more than 64 multicast addresses,
    503  * we use the hash filter insted.
    504  */
    505 static void
    506 vge_setmulti(sc)
    507 	struct vge_softc	*sc;
    508 {
    509 	struct ifnet		*ifp;
    510 	int			error = 0;
    511 	u_int32_t		h, hashes[2] = { 0, 0 };
    512 	struct ether_multi *enm;
    513 	struct ether_multistep step;
    514 
    515 	ifp = &sc->sc_ethercom.ec_if;
    516 
    517 	/* First, zot all the multicast entries. */
    518 	vge_cam_clear(sc);
    519 	CSR_WRITE_4(sc, VGE_MAR0, 0);
    520 	CSR_WRITE_4(sc, VGE_MAR1, 0);
    521 	ifp->if_flags &= ~IFF_ALLMULTI;
    522 
    523 	/*
    524 	 * If the user wants allmulti or promisc mode, enable reception
    525 	 * of all multicast frames.
    526 	 */
    527 	if (ifp->if_flags & IFF_PROMISC) {
    528     allmulti:
    529 		CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF);
    530 		CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF);
    531 		ifp->if_flags |= IFF_ALLMULTI;
    532 		return;
    533 	}
    534 
    535 	/* Now program new ones */
    536 	ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
    537 	while(enm != NULL) {
    538 		/*
    539 		 * If multicast range, fall back to ALLMULTI.
    540 		 */
    541 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
    542 				ETHER_ADDR_LEN) != 0)
    543 			goto allmulti;
    544 
    545 		error = vge_cam_set(sc, enm->enm_addrlo);
    546 		if (error)
    547 			break;
    548 
    549 		ETHER_NEXT_MULTI(step, enm);
    550 	}
    551 
    552 	/* If there were too many addresses, use the hash filter. */
    553 	if (error) {
    554 		vge_cam_clear(sc);
    555 
    556 		ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
    557 		while(enm != NULL) {
    558 			/*
    559 			 * If multicast range, fall back to ALLMULTI.
    560 			 */
    561 			if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
    562 					ETHER_ADDR_LEN) != 0)
    563 				goto allmulti;
    564 
    565 			h = ether_crc32_be(enm->enm_addrlo,
    566 			    ETHER_ADDR_LEN) >> 26;
    567 			hashes[h >> 5] |= 1 << (h & 0x1f);
    568 
    569 			ETHER_NEXT_MULTI(step, enm);
    570 		}
    571 
    572 		CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
    573 		CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
    574 	}
    575 
    576 	return;
    577 }
    578 
    579 static void
    580 vge_reset(sc)
    581 	struct vge_softc		*sc;
    582 {
    583 	register int		i;
    584 
    585 	CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
    586 
    587 	for (i = 0; i < VGE_TIMEOUT; i++) {
    588 		DELAY(5);
    589 		if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
    590 			break;
    591 	}
    592 
    593 	if (i == VGE_TIMEOUT) {
    594 		printf("%s: soft reset timed out", sc->sc_dev.dv_xname);
    595 		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
    596 		DELAY(2000);
    597 	}
    598 
    599 	DELAY(5000);
    600 
    601 	CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
    602 
    603 	for (i = 0; i < VGE_TIMEOUT; i++) {
    604 		DELAY(5);
    605 		if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
    606 			break;
    607 	}
    608 
    609 	if (i == VGE_TIMEOUT) {
    610 		printf("%s: EEPROM reload timed out\n", sc->sc_dev.dv_xname);
    611 		return;
    612 	}
    613 
    614 	CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
    615 
    616 	return;
    617 }
    618 
    619 /*
    620  * Probe for a VIA gigabit chip. Check the PCI vendor and device
    621  * IDs against our list and return a device name if we find a match.
    622  */
    623 static int
    624 vge_probe(struct device *parent __unused, struct cfdata *match __unused,
    625     void *aux)
    626 {
    627 	struct pci_attach_args *pa = aux;
    628 
    629 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VIATECH
    630 	    && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VT612X)
    631 		return 1;
    632 
    633 	return (0);
    634 }
    635 
    636 static int
    637 vge_dma_map_rx_desc(sc, idx)
    638 	struct vge_softc	*sc;
    639 	int			idx;
    640 {
    641 	struct vge_rx_desc	*d = NULL;
    642 	bus_dma_segment_t	*segs;
    643 
    644 	/*
    645 	 * Map the segment array into descriptors.
    646 	 */
    647 
    648 	d = &sc->vge_ldata.vge_rx_list[idx];
    649 
    650 	/* If this descriptor is still owned by the chip, bail. */
    651 
    652 	if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) {
    653 		printf("%s: tried to map busy descriptor\n",
    654 		    sc->sc_dev.dv_xname);
    655 		return (EBUSY);
    656 	}
    657 
    658 	segs = sc->vge_ldata.vge_rx_dmamap[idx]->dm_segs;
    659 
    660 	d->vge_buflen = htole16(VGE_BUFLEN(segs[0].ds_len) | VGE_RXDESC_I);
    661 	d->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
    662 	d->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF);
    663 	d->vge_sts = 0;
    664 	d->vge_ctl = 0;
    665 
    666 	return (0);
    667 }
    668 
    669 static void
    670 vge_dma_map_tx_desc(sc, m0, idx, flags)
    671 	struct vge_softc	*sc;
    672 	struct mbuf		*m0;
    673 	int			idx, flags;
    674 {
    675 	struct vge_tx_desc	*d = &sc->vge_ldata.vge_tx_list[idx];
    676 	struct vge_tx_frag	*f;
    677 	int			i = 0;
    678 	bus_dma_segment_t	*segs;
    679 	size_t			sz;
    680 	bus_dmamap_t		map = sc->vge_ldata.vge_tx_dmamap[idx];
    681 
    682 	/* Map the segment array into descriptors. */
    683 
    684 	segs = map->dm_segs;
    685 	for (i = 0; i < map->dm_nsegs; i++) {
    686 		f = &d->vge_frag[i];
    687 		f->vge_buflen = htole16(VGE_BUFLEN(segs[i].ds_len));
    688 		f->vge_addrlo = htole32(VGE_ADDR_LO(segs[i].ds_addr));
    689 		f->vge_addrhi = htole16(VGE_ADDR_HI(segs[i].ds_addr) & 0xFFFF);
    690 	}
    691 
    692 	/* Argh. This chip does not autopad short frames */
    693 
    694 	sz = m0->m_pkthdr.len;
    695 	if (m0->m_pkthdr.len < VGE_MIN_FRAMELEN) {
    696 		f = &d->vge_frag[i];
    697 		f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN - sz));
    698 		f->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
    699 		f->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF);
    700 		sz = VGE_MIN_FRAMELEN;
    701 		i++;
    702 	}
    703 
    704 	/*
    705 	 * When telling the chip how many segments there are, we
    706 	 * must use nsegs + 1 instead of just nsegs. Darned if I
    707 	 * know why.
    708 	 */
    709 	i++;
    710 
    711 	d->vge_sts = htole32(sz << 16);
    712 	d->vge_ctl = htole32(flags|(i << 28)|VGE_TD_LS_NORM);
    713 
    714 	if (sz > ETHERMTU + ETHER_HDR_LEN)
    715 		d->vge_ctl |= htole32(VGE_TDCTL_JUMBO);
    716 }
    717 
    718 static int
    719 vge_allocmem(sc)
    720 	struct vge_softc *sc;
    721 {
    722 	int			error;
    723 	int			nseg;
    724 	int			i;
    725 	bus_dma_segment_t	seg;
    726 
    727 	/*
    728 	 * Allocate map for TX descriptor list.
    729 	 */
    730 	error = bus_dmamap_create(sc->vge_dmat,
    731 	    VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, 0, BUS_DMA_NOWAIT,
    732 	    &sc->vge_ldata.vge_tx_list_map);
    733 	if (error) {
    734 		printf("%s: could not allocate TX dma list map\n",
    735 		    sc->sc_dev.dv_xname);
    736 		return (ENOMEM);
    737 	}
    738 
    739 	/*
    740 	 * Allocate memory for TX descriptor list.
    741 	 */
    742 
    743 	error = bus_dmamem_alloc(sc->vge_dmat, VGE_TX_LIST_SZ, VGE_RING_ALIGN,
    744 	    0, &seg, 1, &nseg, BUS_DMA_NOWAIT);
    745 	if (error) {
    746 		printf("%s: could not allocate TX ring dma memory\n",
    747 		    sc->sc_dev.dv_xname);
    748 		return (ENOMEM);
    749 	}
    750 
    751 	/* Map the memory to kernel VA space */
    752 
    753 	error = bus_dmamem_map(sc->vge_dmat, &seg, nseg, VGE_TX_LIST_SZ,
    754 	     (caddr_t *)&sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT);
    755 	if (error) {
    756 		printf("%s: could not map TX ring dma memory\n",
    757 		    sc->sc_dev.dv_xname);
    758 		return (ENOMEM);
    759 	}
    760 
    761 	/* Load the map for the TX ring. */
    762 	error = bus_dmamap_load(sc->vge_dmat, sc->vge_ldata.vge_tx_list_map,
    763 	    sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
    764 	if (error) {
    765 		printf("%s: could not load TX ring dma memory\n",
    766 		    sc->sc_dev.dv_xname);
    767 		return (ENOMEM);
    768 	}
    769 
    770 	/* Create DMA maps for TX buffers */
    771 
    772 	for (i = 0; i < VGE_TX_DESC_CNT; i++) {
    773 		error = bus_dmamap_create(sc->vge_dmat, VGE_TX_MAXLEN,
    774 		    VGE_TX_FRAGS, VGE_TX_MAXLEN, 0,
    775 		    BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
    776 		    &sc->vge_ldata.vge_tx_dmamap[i]);
    777 		if (error) {
    778 			printf("%s: can't create DMA map for TX\n",
    779 			    sc->sc_dev.dv_xname);
    780 			return (ENOMEM);
    781 		}
    782 	}
    783 
    784 	/*
    785 	 * Allocate map for RX descriptor list.
    786 	 */
    787 	error = bus_dmamap_create(sc->vge_dmat,
    788 	    VGE_RX_LIST_SZ, 1, VGE_RX_LIST_SZ, 0, BUS_DMA_NOWAIT,
    789 	    &sc->vge_ldata.vge_rx_list_map);
    790 	if (error) {
    791 		printf("%s: could not allocate RX dma list map\n",
    792 		    sc->sc_dev.dv_xname);
    793 		return (ENOMEM);
    794 	}
    795 
    796 	/* Allocate DMA'able memory for the RX ring */
    797 
    798 	error = bus_dmamem_alloc(sc->vge_dmat, VGE_RX_LIST_SZ, VGE_RING_ALIGN,
    799 	    0, &seg, 1, &nseg, BUS_DMA_NOWAIT);
    800 	if (error)
    801 		return (ENOMEM);
    802 
    803 	/* Map the memory to kernel VA space */
    804 
    805 	error = bus_dmamem_map(sc->vge_dmat, &seg, nseg, VGE_RX_LIST_SZ,
    806 	     (caddr_t *)&sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT);
    807 	if (error)
    808 		return (ENOMEM);
    809 
    810 	/* Load the map for the RX ring. */
    811 	error = bus_dmamap_load(sc->vge_dmat, sc->vge_ldata.vge_rx_list_map,
    812 	    sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ, NULL, BUS_DMA_NOWAIT);
    813 	if (error) {
    814 		printf("%s: could not load RX ring dma memory\n",
    815 		    sc->sc_dev.dv_xname);
    816 		return (ENOMEM);
    817 	}
    818 
    819 	/* Create DMA maps for RX buffers */
    820 
    821 	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
    822 		error = bus_dmamap_create(sc->vge_dmat, MCLBYTES,
    823 		    1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
    824 		    &sc->vge_ldata.vge_rx_dmamap[i]);
    825 		if (error) {
    826 			printf("%s: can't create DMA map for RX\n",
    827 			     sc->sc_dev.dv_xname);
    828 			return (ENOMEM);
    829 		}
    830 	}
    831 
    832 	return (0);
    833 }
    834 
    835 /*
    836  * Attach the interface. Allocate softc structures, do ifmedia
    837  * setup and ethernet/BPF attach.
    838  */
    839 static void
    840 vge_attach(struct device *parent __unused, struct device *self, void *aux)
    841 {
    842 	uint8_t			*eaddr;
    843 	struct vge_softc	*sc = (struct vge_softc *)self;
    844 	struct ifnet		*ifp;
    845 	struct pci_attach_args *pa = aux;
    846 	pci_chipset_tag_t pc = pa->pa_pc;
    847 	const char *intrstr;
    848 	pci_intr_handle_t ih;
    849 	uint16_t val;
    850 
    851 	aprint_normal(": VIA VT612X Gigabit Ethernet (rev. %#x)\n",
    852 		PCI_REVISION(pa->pa_class));
    853 
    854 	/* Make sure bus-mastering is enabled */
    855         pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
    856 		pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
    857 		PCI_COMMAND_MASTER_ENABLE);
    858 
    859 	/*
    860 	 * Map control/status registers.
    861 	 */
    862 	if (0 != pci_mapreg_map(pa, VGE_PCI_LOMEM,
    863 	    PCI_MAPREG_TYPE_MEM, 0,
    864 	    &sc->vge_btag, &sc->vge_bhandle, NULL, NULL)) {
    865 		aprint_error("%s: couldn't map memory\n",
    866 			sc->sc_dev.dv_xname);
    867 		return;
    868 	}
    869 
    870         /*
    871          * Map and establish our interrupt.
    872          */
    873 	if (pci_intr_map(pa, &ih)) {
    874 		aprint_error("%s: unable to map interrupt\n",
    875 		    sc->sc_dev.dv_xname);
    876 		return;
    877 	}
    878 	intrstr = pci_intr_string(pc, ih);
    879 	sc->vge_intrhand = pci_intr_establish(pc, ih, IPL_NET, vge_intr, sc);
    880 	if (sc->vge_intrhand == NULL) {
    881 		printf("%s: unable to establish interrupt",
    882 		    sc->sc_dev.dv_xname);
    883 		if (intrstr != NULL)
    884 			printf(" at %s", intrstr);
    885 		printf("\n");
    886 		return;
    887 	}
    888 	aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
    889 
    890 	/* Reset the adapter. */
    891 	vge_reset(sc);
    892 
    893 	/*
    894 	 * Get station address from the EEPROM.
    895 	 */
    896 	eaddr = sc->vge_eaddr;
    897 	val = vge_read_eeprom(sc, VGE_EE_EADDR + 0);
    898 	eaddr[0] = val & 0xff;
    899 	eaddr[1] = val >> 8;
    900 	val = vge_read_eeprom(sc, VGE_EE_EADDR + 1);
    901 	eaddr[2] = val & 0xff;
    902 	eaddr[3] = val >> 8;
    903 	val = vge_read_eeprom(sc, VGE_EE_EADDR + 2);
    904 	eaddr[4] = val & 0xff;
    905 	eaddr[5] = val >> 8;
    906 
    907 	printf("%s: Ethernet address: %s\n", sc->sc_dev.dv_xname,
    908 	    ether_sprintf(eaddr));
    909 
    910 	/*
    911 	 * Use the 32bit tag. Hardware supports 48bit physical addresses,
    912 	 * but we don't use that for now.
    913 	 */
    914 	sc->vge_dmat = pa->pa_dmat;
    915 
    916 	if (vge_allocmem(sc))
    917 		return;
    918 
    919 	ifp = &sc->sc_ethercom.ec_if;
    920 	ifp->if_softc = sc;
    921 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
    922 	ifp->if_mtu = ETHERMTU;
    923 	ifp->if_baudrate = IF_Gbps(1);
    924 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    925 	ifp->if_ioctl = vge_ioctl;
    926 	ifp->if_start = vge_start;
    927 
    928 	/*
    929 	 * We can support 802.1Q VLAN-sized frames and jumbo
    930 	 * Ethernet frames.
    931 	 */
    932 	sc->sc_ethercom.ec_capabilities |=
    933 	    ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU |
    934 	    ETHERCAP_VLAN_HWTAGGING;
    935 
    936 	/*
    937 	 * We can do IPv4/TCPv4/UDPv4 checksums in hardware.
    938 	 */
    939 	ifp->if_capabilities |=
    940 	    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
    941 	    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
    942 	    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
    943 
    944 #ifdef DEVICE_POLLING
    945 #ifdef IFCAP_POLLING
    946 	ifp->if_capabilities |= IFCAP_POLLING;
    947 #endif
    948 #endif
    949 	ifp->if_watchdog = vge_watchdog;
    950 	ifp->if_init = vge_init;
    951 	IFQ_SET_MAXLEN(&ifp->if_snd, max(VGE_IFQ_MAXLEN, IFQ_MAXLEN));
    952 
    953 	/*
    954 	 * Initialize our media structures and probe the MII.
    955 	 */
    956 	sc->sc_mii.mii_ifp = ifp;
    957 	sc->sc_mii.mii_readreg = vge_miibus_readreg;
    958 	sc->sc_mii.mii_writereg = vge_miibus_writereg;
    959 	sc->sc_mii.mii_statchg = vge_miibus_statchg;
    960 	ifmedia_init(&sc->sc_mii.mii_media, 0, vge_ifmedia_upd,
    961 	    vge_ifmedia_sts);
    962 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
    963 	    MII_OFFSET_ANY, MIIF_DOPAUSE);
    964 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
    965 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
    966 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
    967 	} else
    968 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
    969 
    970 	/*
    971 	 * Attach the interface.
    972 	 */
    973 	if_attach(ifp);
    974 	ether_ifattach(ifp, eaddr);
    975 
    976 	callout_init(&sc->vge_timeout);
    977 	callout_setfunc(&sc->vge_timeout, vge_tick, sc);
    978 
    979 	/*
    980 	 * Make sure the interface is shutdown during reboot.
    981 	 */
    982 	if (shutdownhook_establish(vge_shutdown, sc) == NULL) {
    983 		printf("%s: WARNING: unable to establish shutdown hook\n",
    984 		    sc->sc_dev.dv_xname);
    985 	}
    986 }
    987 
    988 static int
    989 vge_newbuf(sc, idx, m)
    990 	struct vge_softc	*sc;
    991 	int			idx;
    992 	struct mbuf		*m;
    993 {
    994 	struct mbuf		*n = NULL;
    995 	int			i, error;
    996 
    997 	if (m == NULL) {
    998 		n = m_gethdr(M_DONTWAIT, MT_DATA);
    999 		if (n == NULL)
   1000 			return (ENOBUFS);
   1001 
   1002 		m_clget(n, M_DONTWAIT);
   1003 		if ((n->m_flags & M_EXT) == 0) {
   1004 			m_freem(n);
   1005 			return (ENOBUFS);
   1006 		}
   1007 
   1008 		m = n;
   1009 	} else
   1010 		m->m_data = m->m_ext.ext_buf;
   1011 
   1012 
   1013 #ifndef __NO_STRICT_ALIGNMENT
   1014 	/*
   1015 	 * This is part of an evil trick to deal with non-x86 platforms.
   1016 	 * The VIA chip requires RX buffers to be aligned on 32-bit
   1017 	 * boundaries, but that will hose non-x86 machines. To get around
   1018 	 * this, we leave some empty space at the start of each buffer
   1019 	 * and for non-x86 hosts, we copy the buffer back two bytes
   1020 	 * to achieve word alignment. This is slightly more efficient
   1021 	 * than allocating a new buffer, copying the contents, and
   1022 	 * discarding the old buffer.
   1023 	 */
   1024 	m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN;
   1025 	m_adj(m, VGE_ETHER_ALIGN);
   1026 #else
   1027 	m->m_len = m->m_pkthdr.len = MCLBYTES;
   1028 #endif
   1029 
   1030 	error = bus_dmamap_load_mbuf(sc->vge_dmat,
   1031 	    sc->vge_ldata.vge_rx_dmamap[idx], m, BUS_DMA_NOWAIT);
   1032 	if (error || vge_dma_map_rx_desc(sc, idx)) {
   1033 		if (n != NULL)
   1034 			m_freem(n);
   1035 		return (ENOMEM);
   1036 	}
   1037 
   1038 	/*
   1039 	 * Note: the manual fails to document the fact that for
   1040 	 * proper opration, the driver needs to replentish the RX
   1041 	 * DMA ring 4 descriptors at a time (rather than one at a
   1042 	 * time, like most chips). We can allocate the new buffers
   1043 	 * but we should not set the OWN bits until we're ready
   1044 	 * to hand back 4 of them in one shot.
   1045 	 */
   1046 
   1047 #define VGE_RXCHUNK 4
   1048 	sc->vge_rx_consumed++;
   1049 	if (sc->vge_rx_consumed == VGE_RXCHUNK) {
   1050 		for (i = idx; i != idx - sc->vge_rx_consumed; i--)
   1051 			sc->vge_ldata.vge_rx_list[i].vge_sts |=
   1052 			    htole32(VGE_RDSTS_OWN);
   1053 		sc->vge_rx_consumed = 0;
   1054 	}
   1055 
   1056 	sc->vge_ldata.vge_rx_mbuf[idx] = m;
   1057 
   1058 	bus_dmamap_sync(sc->vge_dmat,
   1059 	    sc->vge_ldata.vge_rx_dmamap[idx],
   1060 	    0, sc->vge_ldata.vge_rx_dmamap[idx]->dm_mapsize,
   1061 	    BUS_DMASYNC_PREREAD);
   1062 
   1063 	return (0);
   1064 }
   1065 
   1066 static int
   1067 vge_tx_list_init(sc)
   1068 	struct vge_softc		*sc;
   1069 {
   1070 	bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ);
   1071 	bzero ((char *)&sc->vge_ldata.vge_tx_mbuf,
   1072 	    (VGE_TX_DESC_CNT * sizeof(struct mbuf *)));
   1073 
   1074 	bus_dmamap_sync(sc->vge_dmat,
   1075 	    sc->vge_ldata.vge_tx_list_map,
   1076 	    0, sc->vge_ldata.vge_tx_list_map->dm_mapsize,
   1077 	    BUS_DMASYNC_PREWRITE);
   1078 
   1079 	sc->vge_ldata.vge_tx_prodidx = 0;
   1080 	sc->vge_ldata.vge_tx_considx = 0;
   1081 	sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT;
   1082 
   1083 	return (0);
   1084 }
   1085 
   1086 static int
   1087 vge_rx_list_init(sc)
   1088 	struct vge_softc		*sc;
   1089 {
   1090 	int			i;
   1091 
   1092 	bzero ((char *)sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ);
   1093 	bzero ((char *)&sc->vge_ldata.vge_rx_mbuf,
   1094 	    (VGE_RX_DESC_CNT * sizeof(struct mbuf *)));
   1095 
   1096 	sc->vge_rx_consumed = 0;
   1097 
   1098 	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
   1099 		if (vge_newbuf(sc, i, NULL) == ENOBUFS)
   1100 			return (ENOBUFS);
   1101 	}
   1102 
   1103 	/* Flush the RX descriptors */
   1104 
   1105 	bus_dmamap_sync(sc->vge_dmat,
   1106 	    sc->vge_ldata.vge_rx_list_map,
   1107 	    0, sc->vge_ldata.vge_rx_list_map->dm_mapsize,
   1108 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
   1109 
   1110 	sc->vge_ldata.vge_rx_prodidx = 0;
   1111 	sc->vge_rx_consumed = 0;
   1112 	sc->vge_head = sc->vge_tail = NULL;
   1113 
   1114 	return (0);
   1115 }
   1116 
   1117 #ifndef __NO_STRICT_ALIGNMENT
   1118 static inline void
   1119 vge_fixup_rx(m)
   1120 	struct mbuf		*m;
   1121 {
   1122 	int			i;
   1123 	uint16_t		*src, *dst;
   1124 
   1125 	src = mtod(m, uint16_t *);
   1126 	dst = src - 1;
   1127 
   1128 	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
   1129 		*dst++ = *src++;
   1130 
   1131 	m->m_data -= ETHER_ALIGN;
   1132 
   1133 	return;
   1134 }
   1135 #endif
   1136 
   1137 /*
   1138  * RX handler. We support the reception of jumbo frames that have
   1139  * been fragmented across multiple 2K mbuf cluster buffers.
   1140  */
   1141 static void
   1142 vge_rxeof(sc)
   1143 	struct vge_softc	*sc;
   1144 {
   1145 	struct mbuf		*m;
   1146 	struct ifnet		*ifp;
   1147 	int			i, total_len;
   1148 	int			lim = 0;
   1149 	struct vge_rx_desc	*cur_rx;
   1150 	u_int32_t		rxstat, rxctl;
   1151 
   1152 	VGE_LOCK_ASSERT(sc);
   1153 	ifp = &sc->sc_ethercom.ec_if;
   1154 	i = sc->vge_ldata.vge_rx_prodidx;
   1155 
   1156 	/* Invalidate the descriptor memory */
   1157 
   1158 	bus_dmamap_sync(sc->vge_dmat,
   1159 	    sc->vge_ldata.vge_rx_list_map,
   1160 	    0, sc->vge_ldata.vge_rx_list_map->dm_mapsize,
   1161 	    BUS_DMASYNC_POSTREAD);
   1162 
   1163 	while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) {
   1164 
   1165 #ifdef DEVICE_POLLING
   1166 		if (ifp->if_flags & IFF_POLLING) {
   1167 			if (sc->rxcycles <= 0)
   1168 				break;
   1169 			sc->rxcycles--;
   1170 		}
   1171 #endif /* DEVICE_POLLING */
   1172 
   1173 		cur_rx = &sc->vge_ldata.vge_rx_list[i];
   1174 		m = sc->vge_ldata.vge_rx_mbuf[i];
   1175 		total_len = VGE_RXBYTES(cur_rx);
   1176 		rxstat = le32toh(cur_rx->vge_sts);
   1177 		rxctl = le32toh(cur_rx->vge_ctl);
   1178 
   1179 		/* Invalidate the RX mbuf and unload its map */
   1180 
   1181 		bus_dmamap_sync(sc->vge_dmat,
   1182 		    sc->vge_ldata.vge_rx_dmamap[i],
   1183 		    0, sc->vge_ldata.vge_rx_dmamap[i]->dm_mapsize,
   1184 		    BUS_DMASYNC_POSTWRITE);
   1185 		bus_dmamap_unload(sc->vge_dmat,
   1186 		    sc->vge_ldata.vge_rx_dmamap[i]);
   1187 
   1188 		/*
   1189 		 * If the 'start of frame' bit is set, this indicates
   1190 		 * either the first fragment in a multi-fragment receive,
   1191 		 * or an intermediate fragment. Either way, we want to
   1192 		 * accumulate the buffers.
   1193 		 */
   1194 		if (rxstat & VGE_RXPKT_SOF) {
   1195 			m->m_len = MCLBYTES - VGE_ETHER_ALIGN;
   1196 			if (sc->vge_head == NULL)
   1197 				sc->vge_head = sc->vge_tail = m;
   1198 			else {
   1199 				m->m_flags &= ~M_PKTHDR;
   1200 				sc->vge_tail->m_next = m;
   1201 				sc->vge_tail = m;
   1202 			}
   1203 			vge_newbuf(sc, i, NULL);
   1204 			VGE_RX_DESC_INC(i);
   1205 			continue;
   1206 		}
   1207 
   1208 		/*
   1209 		 * Bad/error frames will have the RXOK bit cleared.
   1210 		 * However, there's one error case we want to allow:
   1211 		 * if a VLAN tagged frame arrives and the chip can't
   1212 		 * match it against the CAM filter, it considers this
   1213 		 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
   1214 		 * We don't want to drop the frame though: our VLAN
   1215 		 * filtering is done in software.
   1216 		 */
   1217 		if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM)
   1218 		    && !(rxstat & VGE_RDSTS_CSUMERR)) {
   1219 			ifp->if_ierrors++;
   1220 			/*
   1221 			 * If this is part of a multi-fragment packet,
   1222 			 * discard all the pieces.
   1223 			 */
   1224 			if (sc->vge_head != NULL) {
   1225 				m_freem(sc->vge_head);
   1226 				sc->vge_head = sc->vge_tail = NULL;
   1227 			}
   1228 			vge_newbuf(sc, i, m);
   1229 			VGE_RX_DESC_INC(i);
   1230 			continue;
   1231 		}
   1232 
   1233 		/*
   1234 		 * If allocating a replacement mbuf fails,
   1235 		 * reload the current one.
   1236 		 */
   1237 
   1238 		if (vge_newbuf(sc, i, NULL)) {
   1239 			ifp->if_ierrors++;
   1240 			if (sc->vge_head != NULL) {
   1241 				m_freem(sc->vge_head);
   1242 				sc->vge_head = sc->vge_tail = NULL;
   1243 			}
   1244 			vge_newbuf(sc, i, m);
   1245 			VGE_RX_DESC_INC(i);
   1246 			continue;
   1247 		}
   1248 
   1249 		VGE_RX_DESC_INC(i);
   1250 
   1251 		if (sc->vge_head != NULL) {
   1252 			m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN);
   1253 			/*
   1254 			 * Special case: if there's 4 bytes or less
   1255 			 * in this buffer, the mbuf can be discarded:
   1256 			 * the last 4 bytes is the CRC, which we don't
   1257 			 * care about anyway.
   1258 			 */
   1259 			if (m->m_len <= ETHER_CRC_LEN) {
   1260 				sc->vge_tail->m_len -=
   1261 				    (ETHER_CRC_LEN - m->m_len);
   1262 				m_freem(m);
   1263 			} else {
   1264 				m->m_len -= ETHER_CRC_LEN;
   1265 				m->m_flags &= ~M_PKTHDR;
   1266 				sc->vge_tail->m_next = m;
   1267 			}
   1268 			m = sc->vge_head;
   1269 			sc->vge_head = sc->vge_tail = NULL;
   1270 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
   1271 		} else
   1272 			m->m_pkthdr.len = m->m_len =
   1273 			    (total_len - ETHER_CRC_LEN);
   1274 
   1275 #ifndef __NO_STRICT_ALIGNMENT
   1276 		vge_fixup_rx(m);
   1277 #endif
   1278 		ifp->if_ipackets++;
   1279 		m->m_pkthdr.rcvif = ifp;
   1280 
   1281 		/* Do RX checksumming if enabled */
   1282 		if (ifp->if_csum_flags_rx & M_CSUM_IPv4) {
   1283 
   1284 			/* Check IP header checksum */
   1285 			if (rxctl & VGE_RDCTL_IPPKT)
   1286 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   1287 			if ((rxctl & VGE_RDCTL_IPCSUMOK) == 0)
   1288 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   1289 		}
   1290 
   1291 		if (ifp->if_csum_flags_rx & M_CSUM_TCPv4) {
   1292 			/* Check UDP checksum */
   1293 			if (rxctl & VGE_RDCTL_TCPPKT)
   1294 				m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
   1295 
   1296 			if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0)
   1297 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   1298 		}
   1299 
   1300 		if (ifp->if_csum_flags_rx & M_CSUM_UDPv4) {
   1301 			/* Check UDP checksum */
   1302 			if (rxctl & VGE_RDCTL_UDPPKT)
   1303 				m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
   1304 
   1305 			if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0)
   1306 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   1307 		}
   1308 
   1309 		if (rxstat & VGE_RDSTS_VTAG)
   1310 			VLAN_INPUT_TAG(ifp, m,
   1311 			    ntohs((rxctl & VGE_RDCTL_VLANID)), continue);
   1312 
   1313 #if NBPFILTER > 0
   1314 		/*
   1315 		 * Handle BPF listeners.
   1316 		 */
   1317 		if (ifp->if_bpf)
   1318 			bpf_mtap(ifp->if_bpf, m);
   1319 #endif
   1320 
   1321 		VGE_UNLOCK(sc);
   1322 		(*ifp->if_input)(ifp, m);
   1323 		VGE_LOCK(sc);
   1324 
   1325 		lim++;
   1326 		if (lim == VGE_RX_DESC_CNT)
   1327 			break;
   1328 
   1329 	}
   1330 
   1331 	/* Flush the RX DMA ring */
   1332 
   1333 	bus_dmamap_sync(sc->vge_dmat,
   1334 	    sc->vge_ldata.vge_rx_list_map,
   1335 	    0, sc->vge_ldata.vge_rx_list_map->dm_mapsize,
   1336 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
   1337 
   1338 	sc->vge_ldata.vge_rx_prodidx = i;
   1339 	CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim);
   1340 
   1341 
   1342 	return;
   1343 }
   1344 
   1345 static void
   1346 vge_txeof(sc)
   1347 	struct vge_softc		*sc;
   1348 {
   1349 	struct ifnet		*ifp;
   1350 	u_int32_t		txstat;
   1351 	int			idx;
   1352 
   1353 	ifp = &sc->sc_ethercom.ec_if;
   1354 	idx = sc->vge_ldata.vge_tx_considx;
   1355 
   1356 	/* Invalidate the TX descriptor list */
   1357 
   1358 	bus_dmamap_sync(sc->vge_dmat,
   1359 	    sc->vge_ldata.vge_tx_list_map,
   1360 	    0,  sc->vge_ldata.vge_tx_list_map->dm_mapsize,
   1361 	    BUS_DMASYNC_POSTREAD);
   1362 
   1363 	while (idx != sc->vge_ldata.vge_tx_prodidx) {
   1364 
   1365 		txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts);
   1366 		if (txstat & VGE_TDSTS_OWN)
   1367 			break;
   1368 
   1369 		m_freem(sc->vge_ldata.vge_tx_mbuf[idx]);
   1370 		sc->vge_ldata.vge_tx_mbuf[idx] = NULL;
   1371 		bus_dmamap_unload(sc->vge_dmat,
   1372 		    sc->vge_ldata.vge_tx_dmamap[idx]);
   1373 		if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL))
   1374 			ifp->if_collisions++;
   1375 		if (txstat & VGE_TDSTS_TXERR)
   1376 			ifp->if_oerrors++;
   1377 		else
   1378 			ifp->if_opackets++;
   1379 
   1380 		sc->vge_ldata.vge_tx_free++;
   1381 		VGE_TX_DESC_INC(idx);
   1382 	}
   1383 
   1384 	/* No changes made to the TX ring, so no flush needed */
   1385 
   1386 	if (idx != sc->vge_ldata.vge_tx_considx) {
   1387 		sc->vge_ldata.vge_tx_considx = idx;
   1388 		ifp->if_flags &= ~IFF_OACTIVE;
   1389 		ifp->if_timer = 0;
   1390 	}
   1391 
   1392 	/*
   1393 	 * If not all descriptors have been released reaped yet,
   1394 	 * reload the timer so that we will eventually get another
   1395 	 * interrupt that will cause us to re-enter this routine.
   1396 	 * This is done in case the transmitter has gone idle.
   1397 	 */
   1398 	if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT) {
   1399 		CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
   1400 	}
   1401 
   1402 	return;
   1403 }
   1404 
   1405 static void
   1406 vge_tick(xsc)
   1407 	void			*xsc;
   1408 {
   1409 	struct vge_softc	*sc = xsc;
   1410 	struct ifnet		*ifp = &sc->sc_ethercom.ec_if;
   1411 	struct mii_data		*mii = &sc->sc_mii;
   1412 	int s;
   1413 
   1414 	s = splnet();
   1415 
   1416 	VGE_LOCK(sc);
   1417 
   1418 	callout_schedule(&sc->vge_timeout, hz);
   1419 
   1420 	mii_tick(mii);
   1421 	if (sc->vge_link) {
   1422 		if (!(mii->mii_media_status & IFM_ACTIVE))
   1423 			sc->vge_link = 0;
   1424 	} else {
   1425 		if (mii->mii_media_status & IFM_ACTIVE &&
   1426 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
   1427 			sc->vge_link = 1;
   1428 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
   1429 				vge_start(ifp);
   1430 		}
   1431 	}
   1432 
   1433 	VGE_UNLOCK(sc);
   1434 
   1435 	splx(s);
   1436 }
   1437 
   1438 #ifdef DEVICE_POLLING
   1439 static void
   1440 vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count)
   1441 {
   1442 	struct vge_softc *sc = ifp->if_softc;
   1443 
   1444 	VGE_LOCK(sc);
   1445 #ifdef IFCAP_POLLING
   1446 	if (!(ifp->if_capenable & IFCAP_POLLING)) {
   1447 		ether_poll_deregister(ifp);
   1448 		cmd = POLL_DEREGISTER;
   1449 	}
   1450 #endif
   1451 	if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
   1452 		CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
   1453 		CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
   1454 		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
   1455 		goto done;
   1456 	}
   1457 
   1458 	sc->rxcycles = count;
   1459 	vge_rxeof(sc);
   1460 	vge_txeof(sc);
   1461 
   1462 #if __FreeBSD_version < 502114
   1463 	if (ifp->if_snd.ifq_head != NULL)
   1464 #else
   1465 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
   1466 #endif
   1467 		taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask);
   1468 
   1469 	if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
   1470 		u_int32_t       status;
   1471 		status = CSR_READ_4(sc, VGE_ISR);
   1472 		if (status == 0xFFFFFFFF)
   1473 			goto done;
   1474 		if (status)
   1475 			CSR_WRITE_4(sc, VGE_ISR, status);
   1476 
   1477 		/*
   1478 		 * XXX check behaviour on receiver stalls.
   1479 		 */
   1480 
   1481 		if (status & VGE_ISR_TXDMA_STALL ||
   1482 		    status & VGE_ISR_RXDMA_STALL)
   1483 			vge_init(sc);
   1484 
   1485 		if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
   1486 			vge_rxeof(sc);
   1487 			ifp->if_ierrors++;
   1488 			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
   1489 			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
   1490 		}
   1491 	}
   1492 done:
   1493 	VGE_UNLOCK(sc);
   1494 }
   1495 #endif /* DEVICE_POLLING */
   1496 
   1497 static int
   1498 vge_intr(arg)
   1499 	void			*arg;
   1500 {
   1501 	struct vge_softc	*sc = arg;
   1502 	struct ifnet		*ifp = &sc->sc_ethercom.ec_if;
   1503 	u_int32_t		status;
   1504 	int claim = 0;
   1505 
   1506 	if (sc->suspended) {
   1507 		return claim;
   1508 	}
   1509 
   1510 	VGE_LOCK(sc);
   1511 
   1512 	if (!(ifp->if_flags & IFF_UP)) {
   1513 		VGE_UNLOCK(sc);
   1514 		return claim;
   1515 	}
   1516 
   1517 #ifdef DEVICE_POLLING
   1518 	if  (ifp->if_flags & IFF_POLLING)
   1519 		goto done;
   1520 	if (
   1521 #ifdef IFCAP_POLLING
   1522 	    (ifp->if_capenable & IFCAP_POLLING) &&
   1523 #endif
   1524 	    ether_poll_register(vge_poll, ifp)) { /* ok, disable interrupts */
   1525 		CSR_WRITE_4(sc, VGE_IMR, 0);
   1526 		CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
   1527 		vge_poll(ifp, 0, 1);
   1528 		goto done;
   1529 	}
   1530 
   1531 #endif /* DEVICE_POLLING */
   1532 
   1533 	/* Disable interrupts */
   1534 	CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
   1535 
   1536 	for (;;) {
   1537 
   1538 		status = CSR_READ_4(sc, VGE_ISR);
   1539 		/* If the card has gone away the read returns 0xffff. */
   1540 		if (status == 0xFFFFFFFF)
   1541 			break;
   1542 
   1543 		if (status) {
   1544 			claim = 1;
   1545 			CSR_WRITE_4(sc, VGE_ISR, status);
   1546 		}
   1547 
   1548 		if ((status & VGE_INTRS) == 0)
   1549 			break;
   1550 
   1551 		if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
   1552 			vge_rxeof(sc);
   1553 
   1554 		if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
   1555 			vge_rxeof(sc);
   1556 			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
   1557 			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
   1558 		}
   1559 
   1560 		if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0))
   1561 			vge_txeof(sc);
   1562 
   1563 		if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL))
   1564 			vge_init(ifp);
   1565 
   1566 		if (status & VGE_ISR_LINKSTS)
   1567 			vge_tick(sc);
   1568 	}
   1569 
   1570 	/* Re-enable interrupts */
   1571 	CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
   1572 
   1573 #ifdef DEVICE_POLLING
   1574 done:
   1575 #endif
   1576 	VGE_UNLOCK(sc);
   1577 
   1578 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
   1579 		vge_start(ifp);
   1580 
   1581 	return claim;
   1582 }
   1583 
   1584 static int
   1585 vge_encap(sc, m_head, idx)
   1586 	struct vge_softc	*sc;
   1587 	struct mbuf		*m_head;
   1588 	int			idx;
   1589 {
   1590 	struct mbuf		*m_new = NULL;
   1591 	bus_dmamap_t		map;
   1592 	int			error, flags;
   1593 	struct m_tag		*mtag;
   1594 
   1595 	/* If this descriptor is still owned by the chip, bail. */
   1596 	if (sc->vge_ldata.vge_tx_free <= 2
   1597 	    || le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts) & VGE_TDSTS_OWN)
   1598 		return (ENOBUFS);
   1599 
   1600 	flags = 0;
   1601 
   1602 	if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
   1603 		flags |= VGE_TDCTL_IPCSUM;
   1604 	if (m_head->m_pkthdr.csum_flags & M_CSUM_TCPv4)
   1605 		flags |= VGE_TDCTL_TCPCSUM;
   1606 	if (m_head->m_pkthdr.csum_flags & M_CSUM_UDPv4)
   1607 		flags |= VGE_TDCTL_UDPCSUM;
   1608 
   1609 	map = sc->vge_ldata.vge_tx_dmamap[idx];
   1610 	error = bus_dmamap_load_mbuf(sc->vge_dmat, map,
   1611 	    m_head, BUS_DMA_NOWAIT);
   1612 
   1613 	/* If too many segments to map, coalesce */
   1614 	if (error == EFBIG) {
   1615 		m_new = m_defrag(m_head, M_DONTWAIT);
   1616 		if (m_new == NULL)
   1617 			return (error);
   1618 
   1619 		error = bus_dmamap_load_mbuf(sc->vge_dmat, map,
   1620 		    m_new, BUS_DMA_NOWAIT);
   1621 		if (error) {
   1622 			m_freem(m_new);
   1623 			return (error);
   1624 		}
   1625 
   1626 		m_head = m_new;
   1627 	} else if (error)
   1628 		return (error);
   1629 
   1630 	vge_dma_map_tx_desc(sc, m_head, idx, flags);
   1631 
   1632 	sc->vge_ldata.vge_tx_mbuf[idx] = m_head;
   1633 	sc->vge_ldata.vge_tx_free--;
   1634 
   1635 	/*
   1636 	 * Set up hardware VLAN tagging.
   1637 	 */
   1638 
   1639 	mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m_head);
   1640 	if (mtag != NULL)
   1641 		sc->vge_ldata.vge_tx_list[idx].vge_ctl |=
   1642 		    htole32(htons(VLAN_TAG_VALUE(mtag)) | VGE_TDCTL_VTAG);
   1643 
   1644 	sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN);
   1645 
   1646 	return (0);
   1647 }
   1648 
   1649 /*
   1650  * Main transmit routine.
   1651  */
   1652 
   1653 static void
   1654 vge_start(ifp)
   1655 	struct ifnet		*ifp;
   1656 {
   1657 	struct vge_softc	*sc;
   1658 	struct mbuf		*m_head = NULL;
   1659 	int			idx, pidx = 0, error;
   1660 
   1661 	sc = ifp->if_softc;
   1662 	VGE_LOCK(sc);
   1663 
   1664 	if (!sc->vge_link
   1665 	    || (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) {
   1666 		VGE_UNLOCK(sc);
   1667 		return;
   1668 	}
   1669 
   1670 	idx = sc->vge_ldata.vge_tx_prodidx;
   1671 
   1672 	pidx = idx - 1;
   1673 	if (pidx < 0)
   1674 		pidx = VGE_TX_DESC_CNT - 1;
   1675 
   1676 	/*
   1677 	 * Loop through the send queue, setting up transmit descriptors
   1678 	 * until we drain the queue, or use up all available transmit
   1679 	 * descriptors.
   1680 	 */
   1681 	for(;;) {
   1682 		/* Grab a packet off the queue. */
   1683 		IFQ_POLL(&ifp->if_snd, m_head);
   1684 		if (m_head == NULL)
   1685 			break;
   1686 
   1687 		if (sc->vge_ldata.vge_tx_mbuf[idx] != NULL) {
   1688 			/*
   1689 			 * Slot already used, stop for now.
   1690 			 */
   1691 			ifp->if_flags |= IFF_OACTIVE;
   1692 			break;
   1693 		}
   1694 
   1695 		if ((error = vge_encap(sc, m_head, idx))) {
   1696 			if (error == EFBIG) {
   1697 				printf("%s: Tx packet consumes too many "
   1698 				    "DMA segments, dropping...\n",
   1699 				    sc->sc_dev.dv_xname);
   1700 				IFQ_DEQUEUE(&ifp->if_snd, m_head);
   1701 				m_freem(m_head);
   1702 				continue;
   1703 			}
   1704 
   1705 			/*
   1706 			 * Short on resources, just stop for now.
   1707 			 */
   1708 			if (error == ENOBUFS)
   1709 				ifp->if_flags |= IFF_OACTIVE;
   1710 			break;
   1711 		}
   1712 
   1713 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
   1714 
   1715 		/*
   1716 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
   1717 		 */
   1718 
   1719 		sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |=
   1720 		    htole16(VGE_TXDESC_Q);
   1721 
   1722 		if (sc->vge_ldata.vge_tx_mbuf[idx] != m_head) {
   1723 			m_freem(m_head);
   1724 			m_head = sc->vge_ldata.vge_tx_mbuf[idx];
   1725 		}
   1726 
   1727 		pidx = idx;
   1728 		VGE_TX_DESC_INC(idx);
   1729 
   1730 		/*
   1731 		 * If there's a BPF listener, bounce a copy of this frame
   1732 		 * to him.
   1733 		 */
   1734 #if NBPFILTER > 0
   1735 		if (ifp->if_bpf)
   1736 			bpf_mtap(ifp->if_bpf, m_head);
   1737 #endif
   1738 	}
   1739 
   1740 	if (idx == sc->vge_ldata.vge_tx_prodidx) {
   1741 		VGE_UNLOCK(sc);
   1742 		return;
   1743 	}
   1744 
   1745 	/* Flush the TX descriptors */
   1746 
   1747 	bus_dmamap_sync(sc->vge_dmat,
   1748 	    sc->vge_ldata.vge_tx_list_map,
   1749 	    0, sc->vge_ldata.vge_tx_list_map->dm_mapsize,
   1750 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
   1751 
   1752 	/* Issue a transmit command. */
   1753 	CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
   1754 
   1755 	sc->vge_ldata.vge_tx_prodidx = idx;
   1756 
   1757 	/*
   1758 	 * Use the countdown timer for interrupt moderation.
   1759 	 * 'TX done' interrupts are disabled. Instead, we reset the
   1760 	 * countdown timer, which will begin counting until it hits
   1761 	 * the value in the SSTIMER register, and then trigger an
   1762 	 * interrupt. Each time we set the TIMER0_ENABLE bit, the
   1763 	 * the timer count is reloaded. Only when the transmitter
   1764 	 * is idle will the timer hit 0 and an interrupt fire.
   1765 	 */
   1766 	CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
   1767 
   1768 	VGE_UNLOCK(sc);
   1769 
   1770 	/*
   1771 	 * Set a timeout in case the chip goes out to lunch.
   1772 	 */
   1773 	ifp->if_timer = 5;
   1774 
   1775 	return;
   1776 }
   1777 
   1778 static int
   1779 vge_init(ifp)
   1780 	struct ifnet *ifp;
   1781 {
   1782 	struct vge_softc	*sc = ifp->if_softc;
   1783 	struct mii_data		*mii = &sc->sc_mii;
   1784 	int			i;
   1785 
   1786 	VGE_LOCK(sc);
   1787 
   1788 	/*
   1789 	 * Cancel pending I/O and free all RX/TX buffers.
   1790 	 */
   1791 	vge_stop(sc);
   1792 	vge_reset(sc);
   1793 
   1794 	/*
   1795 	 * Initialize the RX and TX descriptors and mbufs.
   1796 	 */
   1797 
   1798 	vge_rx_list_init(sc);
   1799 	vge_tx_list_init(sc);
   1800 
   1801 	/* Set our station address */
   1802 	for (i = 0; i < ETHER_ADDR_LEN; i++)
   1803 		CSR_WRITE_1(sc, VGE_PAR0 + i, sc->vge_eaddr[i]);
   1804 
   1805 	/*
   1806 	 * Set receive FIFO threshold. Also allow transmission and
   1807 	 * reception of VLAN tagged frames.
   1808 	 */
   1809 	CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
   1810 	CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2);
   1811 
   1812 	/* Set DMA burst length */
   1813 	CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
   1814 	CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
   1815 
   1816 	CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
   1817 
   1818 	/* Set collision backoff algorithm */
   1819 	CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
   1820 	    VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
   1821 	CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
   1822 
   1823 	/* Disable LPSEL field in priority resolution */
   1824 	CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
   1825 
   1826 	/*
   1827 	 * Load the addresses of the DMA queues into the chip.
   1828 	 * Note that we only use one transmit queue.
   1829 	 */
   1830 
   1831 	CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
   1832 	    VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_map->dm_segs[0].ds_addr));
   1833 	CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
   1834 
   1835 	CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
   1836 	    VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_map->dm_segs[0].ds_addr));
   1837 	CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
   1838 	CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
   1839 
   1840 	/* Enable and wake up the RX descriptor queue */
   1841 	CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
   1842 	CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
   1843 
   1844 	/* Enable the TX descriptor queue */
   1845 	CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
   1846 
   1847 	/* Set up the receive filter -- allow large frames for VLANs. */
   1848 	CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT);
   1849 
   1850 	/* If we want promiscuous mode, set the allframes bit. */
   1851 	if (ifp->if_flags & IFF_PROMISC) {
   1852 		CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
   1853 	}
   1854 
   1855 	/* Set capture broadcast bit to capture broadcast frames. */
   1856 	if (ifp->if_flags & IFF_BROADCAST) {
   1857 		CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST);
   1858 	}
   1859 
   1860 	/* Set multicast bit to capture multicast frames. */
   1861 	if (ifp->if_flags & IFF_MULTICAST) {
   1862 		CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST);
   1863 	}
   1864 
   1865 	/* Init the cam filter. */
   1866 	vge_cam_clear(sc);
   1867 
   1868 	/* Init the multicast filter. */
   1869 	vge_setmulti(sc);
   1870 
   1871 	/* Enable flow control */
   1872 
   1873 	CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
   1874 
   1875 	/* Enable jumbo frame reception (if desired) */
   1876 
   1877 	/* Start the MAC. */
   1878 	CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
   1879 	CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
   1880 	CSR_WRITE_1(sc, VGE_CRS0,
   1881 	    VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
   1882 
   1883 	/*
   1884 	 * Configure one-shot timer for microsecond
   1885 	 * resulution and load it for 500 usecs.
   1886 	 */
   1887 	CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES);
   1888 	CSR_WRITE_2(sc, VGE_SSTIMER, 400);
   1889 
   1890 	/*
   1891 	 * Configure interrupt moderation for receive. Enable
   1892 	 * the holdoff counter and load it, and set the RX
   1893 	 * suppression count to the number of descriptors we
   1894 	 * want to allow before triggering an interrupt.
   1895 	 * The holdoff timer is in units of 20 usecs.
   1896 	 */
   1897 
   1898 #ifdef notyet
   1899 	CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE);
   1900 	/* Select the interrupt holdoff timer page. */
   1901 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
   1902 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
   1903 	CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */
   1904 
   1905 	/* Enable use of the holdoff timer. */
   1906 	CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
   1907 	CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD);
   1908 
   1909 	/* Select the RX suppression threshold page. */
   1910 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
   1911 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
   1912 	CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */
   1913 
   1914 	/* Restore the page select bits. */
   1915 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
   1916 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
   1917 #endif
   1918 
   1919 #ifdef DEVICE_POLLING
   1920 	/*
   1921 	 * Disable interrupts if we are polling.
   1922 	 */
   1923 	if (ifp->if_flags & IFF_POLLING) {
   1924 		CSR_WRITE_4(sc, VGE_IMR, 0);
   1925 		CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
   1926 	} else	/* otherwise ... */
   1927 #endif /* DEVICE_POLLING */
   1928 	{
   1929 	/*
   1930 	 * Enable interrupts.
   1931 	 */
   1932 		CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
   1933 		CSR_WRITE_4(sc, VGE_ISR, 0);
   1934 		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
   1935 	}
   1936 
   1937 	mii_mediachg(mii);
   1938 
   1939 	ifp->if_flags |= IFF_RUNNING;
   1940 	ifp->if_flags &= ~IFF_OACTIVE;
   1941 
   1942 	sc->vge_if_flags = 0;
   1943 	sc->vge_link = 0;
   1944 
   1945 	VGE_UNLOCK(sc);
   1946 
   1947 	callout_schedule(&sc->vge_timeout, hz);
   1948 
   1949 	return (0);
   1950 }
   1951 
   1952 /*
   1953  * Set media options.
   1954  */
   1955 static int
   1956 vge_ifmedia_upd(ifp)
   1957 	struct ifnet		*ifp;
   1958 {
   1959 	struct vge_softc	*sc = ifp->if_softc;
   1960 	struct mii_data		*mii = &sc->sc_mii;
   1961 
   1962 	mii_mediachg(mii);
   1963 
   1964 	return (0);
   1965 }
   1966 
   1967 /*
   1968  * Report current media status.
   1969  */
   1970 static void
   1971 vge_ifmedia_sts(ifp, ifmr)
   1972 	struct ifnet		*ifp;
   1973 	struct ifmediareq	*ifmr;
   1974 {
   1975 	struct vge_softc	*sc = ifp->if_softc;
   1976 	struct mii_data		*mii = &sc->sc_mii;
   1977 
   1978 	mii_pollstat(mii);
   1979 	ifmr->ifm_active = mii->mii_media_active;
   1980 	ifmr->ifm_status = mii->mii_media_status;
   1981 
   1982 	return;
   1983 }
   1984 
   1985 static void
   1986 vge_miibus_statchg(self)
   1987 	struct device	*self;
   1988 {
   1989 	struct vge_softc	*sc = (struct vge_softc *) self;
   1990 	struct mii_data		*mii = &sc->sc_mii;
   1991 	struct ifmedia_entry	*ife = mii->mii_media.ifm_cur;
   1992 
   1993 	/*
   1994 	 * If the user manually selects a media mode, we need to turn
   1995 	 * on the forced MAC mode bit in the DIAGCTL register. If the
   1996 	 * user happens to choose a full duplex mode, we also need to
   1997 	 * set the 'force full duplex' bit. This applies only to
   1998 	 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
   1999 	 * mode is disabled, and in 1000baseT mode, full duplex is
   2000 	 * always implied, so we turn on the forced mode bit but leave
   2001 	 * the FDX bit cleared.
   2002 	 */
   2003 
   2004 	switch (IFM_SUBTYPE(ife->ifm_media)) {
   2005 	case IFM_AUTO:
   2006 		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
   2007 		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
   2008 		break;
   2009 	case IFM_1000_T:
   2010 		CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
   2011 		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
   2012 		break;
   2013 	case IFM_100_TX:
   2014 	case IFM_10_T:
   2015 		CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
   2016 		if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
   2017 			CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
   2018 		} else {
   2019 			CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
   2020 		}
   2021 		break;
   2022 	default:
   2023 		printf("%s: unknown media type: %x\n",
   2024 		    sc->sc_dev.dv_xname,
   2025 		    IFM_SUBTYPE(ife->ifm_media));
   2026 		break;
   2027 	}
   2028 
   2029 	return;
   2030 }
   2031 
   2032 static int
   2033 vge_ioctl(ifp, command, data)
   2034 	struct ifnet		*ifp;
   2035 	u_long			command;
   2036 	caddr_t			data;
   2037 {
   2038 	struct vge_softc	*sc = ifp->if_softc;
   2039 	struct ifreq		*ifr = (struct ifreq *) data;
   2040 	struct mii_data		*mii;
   2041 	int			s, error = 0;
   2042 
   2043 	s = splnet();
   2044 
   2045 	switch (command) {
   2046 	case SIOCSIFMTU:
   2047 		if (ifr->ifr_mtu > VGE_JUMBO_MTU)
   2048 			error = EINVAL;
   2049 		ifp->if_mtu = ifr->ifr_mtu;
   2050 		break;
   2051 	case SIOCSIFFLAGS:
   2052 		if (ifp->if_flags & IFF_UP) {
   2053 			if (ifp->if_flags & IFF_RUNNING &&
   2054 			    ifp->if_flags & IFF_PROMISC &&
   2055 			    !(sc->vge_if_flags & IFF_PROMISC)) {
   2056 				CSR_SETBIT_1(sc, VGE_RXCTL,
   2057 				    VGE_RXCTL_RX_PROMISC);
   2058 				vge_setmulti(sc);
   2059 			} else if (ifp->if_flags & IFF_RUNNING &&
   2060 			    !(ifp->if_flags & IFF_PROMISC) &&
   2061 			    sc->vge_if_flags & IFF_PROMISC) {
   2062 				CSR_CLRBIT_1(sc, VGE_RXCTL,
   2063 				    VGE_RXCTL_RX_PROMISC);
   2064 				vge_setmulti(sc);
   2065                         } else
   2066 				vge_init(ifp);
   2067 		} else {
   2068 			if (ifp->if_flags & IFF_RUNNING)
   2069 				vge_stop(sc);
   2070 		}
   2071 		sc->vge_if_flags = ifp->if_flags;
   2072 		break;
   2073 	case SIOCADDMULTI:
   2074 	case SIOCDELMULTI:
   2075 		error = (command == SIOCADDMULTI) ?
   2076 		    ether_addmulti(ifr, &sc->sc_ethercom) :
   2077 		    ether_delmulti(ifr, &sc->sc_ethercom);
   2078 
   2079 		if (error == ENETRESET) {
   2080 			/*
   2081 			 * Multicast list has changed; set the hardware filter
   2082 			 * accordingly.
   2083 			 */
   2084 			if (ifp->if_flags & IFF_RUNNING)
   2085 				vge_setmulti(sc);
   2086 			error = 0;
   2087 		}
   2088 		break;
   2089 	case SIOCGIFMEDIA:
   2090 	case SIOCSIFMEDIA:
   2091 		mii = &sc->sc_mii;
   2092 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
   2093 		break;
   2094 	default:
   2095 		error = ether_ioctl(ifp, command, data);
   2096 		break;
   2097 	}
   2098 
   2099 	splx(s);
   2100 	return (error);
   2101 }
   2102 
   2103 static void
   2104 vge_watchdog(ifp)
   2105 	struct ifnet		*ifp;
   2106 {
   2107 	struct vge_softc		*sc;
   2108 
   2109 	sc = ifp->if_softc;
   2110 	VGE_LOCK(sc);
   2111 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
   2112 	ifp->if_oerrors++;
   2113 
   2114 	vge_txeof(sc);
   2115 	vge_rxeof(sc);
   2116 
   2117 	vge_init(ifp);
   2118 
   2119 	VGE_UNLOCK(sc);
   2120 
   2121 	return;
   2122 }
   2123 
   2124 /*
   2125  * Stop the adapter and free any mbufs allocated to the
   2126  * RX and TX lists.
   2127  */
   2128 static void
   2129 vge_stop(sc)
   2130 	struct vge_softc		*sc;
   2131 {
   2132 	register int		i;
   2133 	struct ifnet		*ifp = &sc->sc_ethercom.ec_if;
   2134 
   2135 	VGE_LOCK(sc);
   2136 	ifp->if_timer = 0;
   2137 
   2138 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   2139 #ifdef DEVICE_POLLING
   2140 	ether_poll_deregister(ifp);
   2141 #endif /* DEVICE_POLLING */
   2142 
   2143 	CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
   2144 	CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
   2145 	CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
   2146 	CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
   2147 	CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
   2148 	CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
   2149 
   2150 	if (sc->vge_head != NULL) {
   2151 		m_freem(sc->vge_head);
   2152 		sc->vge_head = sc->vge_tail = NULL;
   2153 	}
   2154 
   2155 	/* Free the TX list buffers. */
   2156 
   2157 	for (i = 0; i < VGE_TX_DESC_CNT; i++) {
   2158 		if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) {
   2159 			bus_dmamap_unload(sc->vge_dmat,
   2160 			    sc->vge_ldata.vge_tx_dmamap[i]);
   2161 			m_freem(sc->vge_ldata.vge_tx_mbuf[i]);
   2162 			sc->vge_ldata.vge_tx_mbuf[i] = NULL;
   2163 		}
   2164 	}
   2165 
   2166 	/* Free the RX list buffers. */
   2167 
   2168 	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
   2169 		if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) {
   2170 			bus_dmamap_unload(sc->vge_dmat,
   2171 			    sc->vge_ldata.vge_rx_dmamap[i]);
   2172 			m_freem(sc->vge_ldata.vge_rx_mbuf[i]);
   2173 			sc->vge_ldata.vge_rx_mbuf[i] = NULL;
   2174 		}
   2175 	}
   2176 
   2177 	VGE_UNLOCK(sc);
   2178 
   2179 	return;
   2180 }
   2181 
   2182 #if VGE_POWER_MANAGEMENT
   2183 /*
   2184  * Device suspend routine.  Stop the interface and save some PCI
   2185  * settings in case the BIOS doesn't restore them properly on
   2186  * resume.
   2187  */
   2188 static int
   2189 vge_suspend(dev)
   2190 	struct device *		dev;
   2191 {
   2192 	struct vge_softc	*sc;
   2193 	int			i;
   2194 
   2195 	sc = device_get_softc(dev);
   2196 
   2197 	vge_stop(sc);
   2198 
   2199         for (i = 0; i < 5; i++)
   2200 		sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
   2201 	sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
   2202 	sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
   2203 	sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
   2204 	sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
   2205 
   2206 	sc->suspended = 1;
   2207 
   2208 	return (0);
   2209 }
   2210 
   2211 /*
   2212  * Device resume routine.  Restore some PCI settings in case the BIOS
   2213  * doesn't, re-enable busmastering, and restart the interface if
   2214  * appropriate.
   2215  */
   2216 static int
   2217 vge_resume(dev)
   2218 	struct device *		dev;
   2219 {
   2220 	struct vge_softc	*sc = (struct vge_softc *)dev;
   2221 	struct ifnet		*ifp = &sc->sc_ethercom.ec_if;
   2222 	int			i;
   2223 
   2224         /* better way to do this? */
   2225 	for (i = 0; i < 5; i++)
   2226 		pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
   2227 	pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
   2228 	pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
   2229 	pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
   2230 	pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
   2231 
   2232 	/* reenable busmastering */
   2233 	pci_enable_busmaster(dev);
   2234 	pci_enable_io(dev, SYS_RES_MEMORY);
   2235 
   2236 	/* reinitialize interface if necessary */
   2237 	if (ifp->if_flags & IFF_UP)
   2238 		vge_init(sc);
   2239 
   2240 	sc->suspended = 0;
   2241 
   2242 	return (0);
   2243 }
   2244 #endif
   2245 
   2246 /*
   2247  * Stop all chip I/O so that the kernel's probe routines don't
   2248  * get confused by errant DMAs when rebooting.
   2249  */
   2250 static void
   2251 vge_shutdown(arg)
   2252 	void *arg;
   2253 {
   2254 	struct vge_softc *sc = (struct vge_softc *)arg;
   2255 
   2256 	vge_stop(sc);
   2257 }
   2258