Home | History | Annotate | Line # | Download | only in pci
if_bge.c revision 1.35
      1 /*	$NetBSD: if_bge.c,v 1.35 2003/03/06 20:53:05 jonathan Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001 Wind River Systems
      5  * Copyright (c) 1997, 1998, 1999, 2001
      6  *	Bill Paul <wpaul (at) windriver.com>.  All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *	This product includes software developed by Bill Paul.
     19  * 4. Neither the name of the author nor the names of any co-contributors
     20  *    may be used to endorse or promote products derived from this software
     21  *    without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
     24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     26  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
     27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
     33  * THE POSSIBILITY OF SUCH DAMAGE.
     34  *
     35  * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $
     36  */
     37 
     38 /*
     39  * Broadcom BCM570x family gigabit ethernet driver for NetBSD.
     40  *
     41  * NetBSD version by:
     42  *
     43  *	Frank van der Linden <fvdl (at) wasabisystems.com>
     44  *	Jason Thorpe <thorpej (at) wasabisystems.com>
     45  *	Jonathan Stone <jonathan (at) dsg.stanford.edu>
     46  *
     47  * Originally written for FreeBSD by Bill Paul <wpaul (at) windriver.com>
     48  * Senior Engineer, Wind River Systems
     49  */
     50 
     51 /*
     52  * The Broadcom BCM5700 is based on technology originally developed by
     53  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
     54  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
     55  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
     56  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
     57  * frames, highly configurable RX filtering, and 16 RX and TX queues
     58  * (which, along with RX filter rules, can be used for QOS applications).
     59  * Other features, such as TCP segmentation, may be available as part
     60  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
     61  * firmware images can be stored in hardware and need not be compiled
     62  * into the driver.
     63  *
     64  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
     65  * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
     66  *
     67  * The BCM5701 is a single-chip solution incorporating both the BCM5700
     68  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
     69  * does not support external SSRAM.
     70  *
     71  * Broadcom also produces a variation of the BCM5700 under the "Altima"
     72  * brand name, which is functionally similar but lacks PCI-X support.
     73  *
     74  * Without external SSRAM, you can only have at most 4 TX rings,
     75  * and the use of the mini RX ring is disabled. This seems to imply
     76  * that these features are simply not available on the BCM5701. As a
     77  * result, this driver does not implement any support for the mini RX
     78  * ring.
     79  */
     80 
     81 #include "bpfilter.h"
     82 #include "vlan.h"
     83 
     84 #include <sys/param.h>
     85 #include <sys/systm.h>
     86 #include <sys/callout.h>
     87 #include <sys/sockio.h>
     88 #include <sys/mbuf.h>
     89 #include <sys/malloc.h>
     90 #include <sys/kernel.h>
     91 #include <sys/device.h>
     92 #include <sys/socket.h>
     93 
     94 #include <net/if.h>
     95 #include <net/if_dl.h>
     96 #include <net/if_media.h>
     97 #include <net/if_ether.h>
     98 
     99 #ifdef INET
    100 #include <netinet/in.h>
    101 #include <netinet/in_systm.h>
    102 #include <netinet/in_var.h>
    103 #include <netinet/ip.h>
    104 #endif
    105 
    106 #if NBPFILTER > 0
    107 #include <net/bpf.h>
    108 #endif
    109 
    110 #include <dev/pci/pcireg.h>
    111 #include <dev/pci/pcivar.h>
    112 #include <dev/pci/pcidevs.h>
    113 
    114 #include <dev/mii/mii.h>
    115 #include <dev/mii/miivar.h>
    116 #include <dev/mii/miidevs.h>
    117 #include <dev/mii/brgphyreg.h>
    118 
    119 #include <dev/pci/if_bgereg.h>
    120 
    121 #include <uvm/uvm_extern.h>
    122 
    123 int bge_probe(struct device *, struct cfdata *, void *);
    124 void bge_attach(struct device *, struct device *, void *);
    125 void bge_release_resources(struct bge_softc *);
    126 void bge_txeof(struct bge_softc *);
    127 void bge_rxeof(struct bge_softc *);
    128 
    129 void bge_tick(void *);
    130 void bge_stats_update(struct bge_softc *);
    131 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *);
    132 
    133 int bge_intr(void *);
    134 void bge_start(struct ifnet *);
    135 int bge_ioctl(struct ifnet *, u_long, caddr_t);
    136 int bge_init(struct ifnet *);
    137 void bge_stop(struct bge_softc *);
    138 void bge_watchdog(struct ifnet *);
    139 void bge_shutdown(void *);
    140 int bge_ifmedia_upd(struct ifnet *);
    141 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
    142 
    143 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *);
    144 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
    145 
    146 void bge_setmulti(struct bge_softc *);
    147 
    148 void bge_handle_events(struct bge_softc *);
    149 int bge_alloc_jumbo_mem(struct bge_softc *);
    150 void bge_free_jumbo_mem(struct bge_softc *);
    151 void *bge_jalloc(struct bge_softc *);
    152 void bge_jfree(struct mbuf *, caddr_t, size_t, void *);
    153 int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, bus_dmamap_t);
    154 int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
    155 int bge_init_rx_ring_std(struct bge_softc *);
    156 void bge_free_rx_ring_std(struct bge_softc *);
    157 int bge_init_rx_ring_jumbo(struct bge_softc *);
    158 void bge_free_rx_ring_jumbo(struct bge_softc *);
    159 void bge_free_tx_ring(struct bge_softc *);
    160 int bge_init_tx_ring(struct bge_softc *);
    161 
    162 int bge_chipinit(struct bge_softc *);
    163 int bge_blockinit(struct bge_softc *);
    164 int bge_setpowerstate(struct bge_softc *, int);
    165 
    166 #ifdef notdef
    167 u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
    168 void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, int);
    169 void bge_vpd_read(struct bge_softc *);
    170 #endif
    171 
    172 u_int32_t bge_readmem_ind(struct bge_softc *, int);
    173 void bge_writemem_ind(struct bge_softc *, int, int);
    174 #ifdef notdef
    175 u_int32_t bge_readreg_ind(struct bge_softc *, int);
    176 #endif
    177 void bge_writereg_ind(struct bge_softc *, int, int);
    178 
    179 int bge_miibus_readreg(struct device *, int, int);
    180 void bge_miibus_writereg(struct device *, int, int, int);
    181 void bge_miibus_statchg(struct device *);
    182 
    183 void bge_reset(struct bge_softc *);
    184 
    185 void bge_dump_status(struct bge_softc *);
    186 void bge_dump_rxbd(struct bge_rx_bd *);
    187 
    188 #define BGE_DEBUG
    189 #ifdef BGE_DEBUG
    190 #define DPRINTF(x)	if (bgedebug) printf x
    191 #define DPRINTFN(n,x)	if (bgedebug >= (n)) printf x
    192 int	bgedebug = 0;
    193 #else
    194 #define DPRINTF(x)
    195 #define DPRINTFN(n,x)
    196 #endif
    197 
    198 /* Various chip quirks. */
    199 #define	BGE_QUIRK_LINK_STATE_BROKEN	0x00000001
    200 #define	BGE_QUIRK_CSUM_BROKEN		0x00000002
    201 #define	BGE_QUIRK_ONLY_PHY_1		0x00000004
    202 #define	BGE_QUIRK_5700_SMALLDMA		0x00000008
    203 #define	BGE_QUIRK_5700_PCIX_REG_BUG	0x00000010
    204 #define	BGE_QUIRK_PRODUCER_BUG		0x00000011
    205 
    206 /* following bugs are common to bcm5700 rev B, all flavours */
    207 #define BGE_QUIRK_5700_COMMON \
    208 	(BGE_QUIRK_5700_SMALLDMA|BGE_QUIRK_PRODUCER_BUG)
    209 
    210 CFATTACH_DECL(bge, sizeof(struct bge_softc),
    211     bge_probe, bge_attach, NULL, NULL);
    212 
    213 u_int32_t
    214 bge_readmem_ind(sc, off)
    215 	struct bge_softc *sc;
    216 	int off;
    217 {
    218 	struct pci_attach_args	*pa = &(sc->bge_pa);
    219 	pcireg_t val;
    220 
    221 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
    222 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA);
    223 	return val;
    224 }
    225 
    226 void
    227 bge_writemem_ind(sc, off, val)
    228 	struct bge_softc *sc;
    229 	int off, val;
    230 {
    231 	struct pci_attach_args	*pa = &(sc->bge_pa);
    232 
    233 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
    234 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val);
    235 }
    236 
    237 #ifdef notdef
    238 u_int32_t
    239 bge_readreg_ind(sc, off)
    240 	struct bge_softc *sc;
    241 	int off;
    242 {
    243 	struct pci_attach_args	*pa = &(sc->bge_pa);
    244 
    245 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
    246 	return(pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA));
    247 }
    248 #endif
    249 
    250 void
    251 bge_writereg_ind(sc, off, val)
    252 	struct bge_softc *sc;
    253 	int off, val;
    254 {
    255 	struct pci_attach_args	*pa = &(sc->bge_pa);
    256 
    257 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
    258 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val);
    259 }
    260 
    261 #ifdef notdef
    262 u_int8_t
    263 bge_vpd_readbyte(sc, addr)
    264 	struct bge_softc *sc;
    265 	int addr;
    266 {
    267 	int i;
    268 	u_int32_t val;
    269 	struct pci_attach_args	*pa = &(sc->bge_pa);
    270 
    271 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR, addr);
    272 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
    273 		DELAY(10);
    274 		if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR) &
    275 		    BGE_VPD_FLAG)
    276 			break;
    277 	}
    278 
    279 	if (i == BGE_TIMEOUT) {
    280 		printf("%s: VPD read timed out\n", sc->bge_dev.dv_xname);
    281 		return(0);
    282 	}
    283 
    284 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_DATA);
    285 
    286 	return((val >> ((addr % 4) * 8)) & 0xFF);
    287 }
    288 
    289 void
    290 bge_vpd_read_res(sc, res, addr)
    291 	struct bge_softc *sc;
    292 	struct vpd_res *res;
    293 	int addr;
    294 {
    295 	int i;
    296 	u_int8_t *ptr;
    297 
    298 	ptr = (u_int8_t *)res;
    299 	for (i = 0; i < sizeof(struct vpd_res); i++)
    300 		ptr[i] = bge_vpd_readbyte(sc, i + addr);
    301 }
    302 
    303 void
    304 bge_vpd_read(sc)
    305 	struct bge_softc *sc;
    306 {
    307 	int pos = 0, i;
    308 	struct vpd_res res;
    309 
    310 	if (sc->bge_vpd_prodname != NULL)
    311 		free(sc->bge_vpd_prodname, M_DEVBUF);
    312 	if (sc->bge_vpd_readonly != NULL)
    313 		free(sc->bge_vpd_readonly, M_DEVBUF);
    314 	sc->bge_vpd_prodname = NULL;
    315 	sc->bge_vpd_readonly = NULL;
    316 
    317 	bge_vpd_read_res(sc, &res, pos);
    318 
    319 	if (res.vr_id != VPD_RES_ID) {
    320 		printf("%s: bad VPD resource id: expected %x got %x\n",
    321 			sc->bge_dev.dv_xname, VPD_RES_ID, res.vr_id);
    322 		return;
    323 	}
    324 
    325 	pos += sizeof(res);
    326 	sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
    327 	if (sc->bge_vpd_prodname == NULL)
    328 		panic("bge_vpd_read");
    329 	for (i = 0; i < res.vr_len; i++)
    330 		sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
    331 	sc->bge_vpd_prodname[i] = '\0';
    332 	pos += i;
    333 
    334 	bge_vpd_read_res(sc, &res, pos);
    335 
    336 	if (res.vr_id != VPD_RES_READ) {
    337 		printf("%s: bad VPD resource id: expected %x got %x\n",
    338 		    sc->bge_dev.dv_xname, VPD_RES_READ, res.vr_id);
    339 		return;
    340 	}
    341 
    342 	pos += sizeof(res);
    343 	sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
    344 	if (sc->bge_vpd_readonly == NULL)
    345 		panic("bge_vpd_read");
    346 	for (i = 0; i < res.vr_len + 1; i++)
    347 		sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
    348 }
    349 #endif
    350 
    351 /*
    352  * Read a byte of data stored in the EEPROM at address 'addr.' The
    353  * BCM570x supports both the traditional bitbang interface and an
    354  * auto access interface for reading the EEPROM. We use the auto
    355  * access method.
    356  */
    357 u_int8_t
    358 bge_eeprom_getbyte(sc, addr, dest)
    359 	struct bge_softc *sc;
    360 	int addr;
    361 	u_int8_t *dest;
    362 {
    363 	int i;
    364 	u_int32_t byte = 0;
    365 
    366 	/*
    367 	 * Enable use of auto EEPROM access so we can avoid
    368 	 * having to use the bitbang method.
    369 	 */
    370 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
    371 
    372 	/* Reset the EEPROM, load the clock period. */
    373 	CSR_WRITE_4(sc, BGE_EE_ADDR,
    374 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
    375 	DELAY(20);
    376 
    377 	/* Issue the read EEPROM command. */
    378 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
    379 
    380 	/* Wait for completion */
    381 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
    382 		DELAY(10);
    383 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
    384 			break;
    385 	}
    386 
    387 	if (i == BGE_TIMEOUT) {
    388 		printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname);
    389 		return(0);
    390 	}
    391 
    392 	/* Get result. */
    393 	byte = CSR_READ_4(sc, BGE_EE_DATA);
    394 
    395 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
    396 
    397 	return(0);
    398 }
    399 
    400 /*
    401  * Read a sequence of bytes from the EEPROM.
    402  */
    403 int
    404 bge_read_eeprom(sc, dest, off, cnt)
    405 	struct bge_softc *sc;
    406 	caddr_t dest;
    407 	int off;
    408 	int cnt;
    409 {
    410 	int err = 0, i;
    411 	u_int8_t byte = 0;
    412 
    413 	for (i = 0; i < cnt; i++) {
    414 		err = bge_eeprom_getbyte(sc, off + i, &byte);
    415 		if (err)
    416 			break;
    417 		*(dest + i) = byte;
    418 	}
    419 
    420 	return(err ? 1 : 0);
    421 }
    422 
    423 int
    424 bge_miibus_readreg(dev, phy, reg)
    425 	struct device *dev;
    426 	int phy, reg;
    427 {
    428 	struct bge_softc *sc = (struct bge_softc *)dev;
    429 	struct ifnet *ifp;
    430 	u_int32_t val;
    431 	u_int32_t saved_autopoll;
    432 	int i;
    433 
    434 	ifp = &sc->ethercom.ec_if;
    435 
    436 	/*
    437 	 * Several chips with builtin PHYs will incorrectly answer to
    438 	 * other PHY instances than the builtin PHY at id 1.
    439 	 */
    440 	if (phy != 1 && (sc->bge_quirks & BGE_QUIRK_ONLY_PHY_1))
    441 		return(0);
    442 
    443 	/* Reading with autopolling on may trigger PCI errors */
    444 	saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE);
    445 	if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
    446 		CSR_WRITE_4(sc, BGE_MI_MODE,
    447 		    saved_autopoll &~ BGE_MIMODE_AUTOPOLL);
    448 		DELAY(40);
    449 	}
    450 
    451 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
    452 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
    453 
    454 	for (i = 0; i < BGE_TIMEOUT; i++) {
    455 		val = CSR_READ_4(sc, BGE_MI_COMM);
    456 		if (!(val & BGE_MICOMM_BUSY))
    457 			break;
    458 		delay(10);
    459 	}
    460 
    461 	if (i == BGE_TIMEOUT) {
    462 		printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
    463 		val = 0;
    464 		goto done;
    465 	}
    466 
    467 	val = CSR_READ_4(sc, BGE_MI_COMM);
    468 
    469 done:
    470 	if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
    471 		CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll);
    472 		DELAY(40);
    473 	}
    474 
    475 	if (val & BGE_MICOMM_READFAIL)
    476 		return(0);
    477 
    478 	return(val & 0xFFFF);
    479 }
    480 
    481 void
    482 bge_miibus_writereg(dev, phy, reg, val)
    483 	struct device *dev;
    484 	int phy, reg, val;
    485 {
    486 	struct bge_softc *sc = (struct bge_softc *)dev;
    487 	u_int32_t saved_autopoll;
    488 	int i;
    489 
    490 	/* Touching the PHY while autopolling is on may trigger PCI errors */
    491 	saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE);
    492 	if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
    493 		delay(40);
    494 		CSR_WRITE_4(sc, BGE_MI_MODE,
    495 		    saved_autopoll & (~BGE_MIMODE_AUTOPOLL));
    496 		delay(10); /* 40 usec is supposed to be adequate */
    497 	}
    498 
    499 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
    500 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
    501 
    502 	for (i = 0; i < BGE_TIMEOUT; i++) {
    503 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
    504 			break;
    505 		delay(10);
    506 	}
    507 
    508 	if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
    509 		CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll);
    510 		delay(40);
    511 	}
    512 
    513 	if (i == BGE_TIMEOUT) {
    514 		printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
    515 	}
    516 }
    517 
    518 void
    519 bge_miibus_statchg(dev)
    520 	struct device *dev;
    521 {
    522 	struct bge_softc *sc = (struct bge_softc *)dev;
    523 	struct mii_data *mii = &sc->bge_mii;
    524 
    525 	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
    526 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
    527 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
    528 	} else {
    529 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
    530 	}
    531 
    532 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
    533 		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
    534 	} else {
    535 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
    536 	}
    537 }
    538 
    539 /*
    540  * Handle events that have triggered interrupts.
    541  */
    542 void
    543 bge_handle_events(sc)
    544 	struct bge_softc		*sc;
    545 {
    546 
    547 	return;
    548 }
    549 
    550 /*
    551  * Memory management for jumbo frames.
    552  */
    553 
    554 int
    555 bge_alloc_jumbo_mem(sc)
    556 	struct bge_softc		*sc;
    557 {
    558 	caddr_t			ptr, kva;
    559 	bus_dma_segment_t	seg;
    560 	int		i, rseg, state, error;
    561 	struct bge_jpool_entry   *entry;
    562 
    563 	state = error = 0;
    564 
    565 	/* Grab a big chunk o' storage. */
    566 	if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0,
    567 	     &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
    568 		printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname);
    569 		return ENOBUFS;
    570 	}
    571 
    572 	state = 1;
    573 	if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva,
    574 	    BUS_DMA_NOWAIT)) {
    575 		printf("%s: can't map dma buffers (%d bytes)\n",
    576 		    sc->bge_dev.dv_xname, (int)BGE_JMEM);
    577 		error = ENOBUFS;
    578 		goto out;
    579 	}
    580 
    581 	state = 2;
    582 	if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0,
    583 	    BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) {
    584 		printf("%s: can't create dma map\n", sc->bge_dev.dv_xname);
    585 		error = ENOBUFS;
    586 		goto out;
    587 	}
    588 
    589 	state = 3;
    590 	if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
    591 	    kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) {
    592 		printf("%s: can't load dma map\n", sc->bge_dev.dv_xname);
    593 		error = ENOBUFS;
    594 		goto out;
    595 	}
    596 
    597 	state = 4;
    598 	sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva;
    599 	DPRINTFN(1,("bge_jumbo_buf = 0x%p\n", sc->bge_cdata.bge_jumbo_buf));
    600 
    601 	SLIST_INIT(&sc->bge_jfree_listhead);
    602 	SLIST_INIT(&sc->bge_jinuse_listhead);
    603 
    604 	/*
    605 	 * Now divide it up into 9K pieces and save the addresses
    606 	 * in an array.
    607 	 */
    608 	ptr = sc->bge_cdata.bge_jumbo_buf;
    609 	for (i = 0; i < BGE_JSLOTS; i++) {
    610 		sc->bge_cdata.bge_jslots[i] = ptr;
    611 		ptr += BGE_JLEN;
    612 		entry = malloc(sizeof(struct bge_jpool_entry),
    613 		    M_DEVBUF, M_NOWAIT);
    614 		if (entry == NULL) {
    615 			printf("%s: no memory for jumbo buffer queue!\n",
    616 			    sc->bge_dev.dv_xname);
    617 			error = ENOBUFS;
    618 			goto out;
    619 		}
    620 		entry->slot = i;
    621 		SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
    622 				 entry, jpool_entries);
    623 	}
    624 out:
    625 	if (error != 0) {
    626 		switch (state) {
    627 		case 4:
    628 			bus_dmamap_unload(sc->bge_dmatag,
    629 			    sc->bge_cdata.bge_rx_jumbo_map);
    630 		case 3:
    631 			bus_dmamap_destroy(sc->bge_dmatag,
    632 			    sc->bge_cdata.bge_rx_jumbo_map);
    633 		case 2:
    634 			bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
    635 		case 1:
    636 			bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
    637 			break;
    638 		default:
    639 			break;
    640 		}
    641 	}
    642 
    643 	return error;
    644 }
    645 
    646 /*
    647  * Allocate a jumbo buffer.
    648  */
    649 void *
    650 bge_jalloc(sc)
    651 	struct bge_softc		*sc;
    652 {
    653 	struct bge_jpool_entry   *entry;
    654 
    655 	entry = SLIST_FIRST(&sc->bge_jfree_listhead);
    656 
    657 	if (entry == NULL) {
    658 		printf("%s: no free jumbo buffers\n", sc->bge_dev.dv_xname);
    659 		return(NULL);
    660 	}
    661 
    662 	SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
    663 	SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
    664 	return(sc->bge_cdata.bge_jslots[entry->slot]);
    665 }
    666 
    667 /*
    668  * Release a jumbo buffer.
    669  */
    670 void
    671 bge_jfree(m, buf, size, arg)
    672 	struct mbuf	*m;
    673 	caddr_t		buf;
    674 	size_t		size;
    675 	void		*arg;
    676 {
    677 	struct bge_jpool_entry *entry;
    678 	struct bge_softc *sc;
    679 	int i, s;
    680 
    681 	/* Extract the softc struct pointer. */
    682 	sc = (struct bge_softc *)arg;
    683 
    684 	if (sc == NULL)
    685 		panic("bge_jfree: can't find softc pointer!");
    686 
    687 	/* calculate the slot this buffer belongs to */
    688 
    689 	i = ((caddr_t)buf
    690 	     - (caddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
    691 
    692 	if ((i < 0) || (i >= BGE_JSLOTS))
    693 		panic("bge_jfree: asked to free buffer that we don't manage!");
    694 
    695 	s = splvm();
    696 	entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
    697 	if (entry == NULL)
    698 		panic("bge_jfree: buffer not in use!");
    699 	entry->slot = i;
    700 	SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
    701 	SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
    702 
    703 	if (__predict_true(m != NULL))
    704   		pool_cache_put(&mbpool_cache, m);
    705 	splx(s);
    706 }
    707 
    708 
    709 /*
    710  * Intialize a standard receive ring descriptor.
    711  */
    712 int
    713 bge_newbuf_std(sc, i, m, dmamap)
    714 	struct bge_softc	*sc;
    715 	int			i;
    716 	struct mbuf		*m;
    717 	bus_dmamap_t dmamap;
    718 {
    719 	struct mbuf		*m_new = NULL;
    720 	struct bge_rx_bd	*r;
    721 	int			error;
    722 
    723 	if (dmamap == NULL) {
    724 		error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1,
    725 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap);
    726 		if (error != 0)
    727 			return error;
    728 	}
    729 
    730 	sc->bge_cdata.bge_rx_std_map[i] = dmamap;
    731 
    732 	if (m == NULL) {
    733 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
    734 		if (m_new == NULL) {
    735 			return(ENOBUFS);
    736 		}
    737 
    738 		MCLGET(m_new, M_DONTWAIT);
    739 		if (!(m_new->m_flags & M_EXT)) {
    740 			m_freem(m_new);
    741 			return(ENOBUFS);
    742 		}
    743 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
    744 		m_adj(m_new, ETHER_ALIGN);
    745 
    746 		if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new,
    747 		    BUS_DMA_READ|BUS_DMA_NOWAIT))
    748 			return(ENOBUFS);
    749 	} else {
    750 		m_new = m;
    751 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
    752 		m_new->m_data = m_new->m_ext.ext_buf;
    753 		m_adj(m_new, ETHER_ALIGN);
    754 	}
    755 
    756 	sc->bge_cdata.bge_rx_std_chain[i] = m_new;
    757 	r = &sc->bge_rdata->bge_rx_std_ring[i];
    758 	bge_set_hostaddr(&r->bge_addr,
    759 	    dmamap->dm_segs[0].ds_addr);
    760 	r->bge_flags = BGE_RXBDFLAG_END;
    761 	r->bge_len = m_new->m_len;
    762 	r->bge_idx = i;
    763 
    764 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
    765 	    offsetof(struct bge_ring_data, bge_rx_std_ring) +
    766 		i * sizeof (struct bge_rx_bd),
    767 	    sizeof (struct bge_rx_bd),
    768 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
    769 
    770 	return(0);
    771 }
    772 
    773 /*
    774  * Initialize a jumbo receive ring descriptor. This allocates
    775  * a jumbo buffer from the pool managed internally by the driver.
    776  */
    777 int
    778 bge_newbuf_jumbo(sc, i, m)
    779 	struct bge_softc *sc;
    780 	int i;
    781 	struct mbuf *m;
    782 {
    783 	struct mbuf *m_new = NULL;
    784 	struct bge_rx_bd *r;
    785 
    786 	if (m == NULL) {
    787 		caddr_t			*buf = NULL;
    788 
    789 		/* Allocate the mbuf. */
    790 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
    791 		if (m_new == NULL) {
    792 			return(ENOBUFS);
    793 		}
    794 
    795 		/* Allocate the jumbo buffer */
    796 		buf = bge_jalloc(sc);
    797 		if (buf == NULL) {
    798 			m_freem(m_new);
    799 			printf("%s: jumbo allocation failed "
    800 			    "-- packet dropped!\n", sc->bge_dev.dv_xname);
    801 			return(ENOBUFS);
    802 		}
    803 
    804 		/* Attach the buffer to the mbuf. */
    805 		m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
    806 		MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF,
    807 		    bge_jfree, sc);
    808 	} else {
    809 		m_new = m;
    810 		m_new->m_data = m_new->m_ext.ext_buf;
    811 		m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
    812 	}
    813 
    814 	m_adj(m_new, ETHER_ALIGN);
    815 	/* Set up the descriptor. */
    816 	r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
    817 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
    818 	bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new));
    819 	r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
    820 	r->bge_len = m_new->m_len;
    821 	r->bge_idx = i;
    822 
    823 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
    824 	    offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
    825 		i * sizeof (struct bge_rx_bd),
    826 	    sizeof (struct bge_rx_bd),
    827 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
    828 
    829 	return(0);
    830 }
    831 
    832 /*
    833  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
    834  * that's 1MB or memory, which is a lot. For now, we fill only the first
    835  * 256 ring entries and hope that our CPU is fast enough to keep up with
    836  * the NIC.
    837  */
    838 int
    839 bge_init_rx_ring_std(sc)
    840 	struct bge_softc *sc;
    841 {
    842 	int i;
    843 
    844 	if (sc->bge_flags & BGE_RXRING_VALID)
    845 		return 0;
    846 
    847 	for (i = 0; i < BGE_SSLOTS; i++) {
    848 		if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
    849 			return(ENOBUFS);
    850 	}
    851 
    852 	sc->bge_std = i - 1;
    853 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
    854 
    855 	sc->bge_flags |= BGE_RXRING_VALID;
    856 
    857 	return(0);
    858 }
    859 
    860 void
    861 bge_free_rx_ring_std(sc)
    862 	struct bge_softc *sc;
    863 {
    864 	int i;
    865 
    866 	if (!(sc->bge_flags & BGE_RXRING_VALID))
    867 		return;
    868 
    869 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
    870 		if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
    871 			m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
    872 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
    873 			bus_dmamap_destroy(sc->bge_dmatag,
    874 			    sc->bge_cdata.bge_rx_std_map[i]);
    875 		}
    876 		memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0,
    877 		    sizeof(struct bge_rx_bd));
    878 	}
    879 
    880 	sc->bge_flags &= ~BGE_RXRING_VALID;
    881 }
    882 
    883 int
    884 bge_init_rx_ring_jumbo(sc)
    885 	struct bge_softc *sc;
    886 {
    887 	int i;
    888 	volatile struct bge_rcb *rcb;
    889 
    890 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
    891 		if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
    892 			return(ENOBUFS);
    893 	};
    894 
    895 	sc->bge_jumbo = i - 1;
    896 
    897 	rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
    898 	rcb->bge_maxlen_flags = 0;
    899 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
    900 
    901 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
    902 
    903 	return(0);
    904 }
    905 
    906 void
    907 bge_free_rx_ring_jumbo(sc)
    908 	struct bge_softc *sc;
    909 {
    910 	int i;
    911 
    912 	if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID))
    913 		return;
    914 
    915 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
    916 		if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
    917 			m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
    918 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
    919 		}
    920 		memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0,
    921 		    sizeof(struct bge_rx_bd));
    922 	}
    923 
    924 	sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID;
    925 }
    926 
    927 void
    928 bge_free_tx_ring(sc)
    929 	struct bge_softc *sc;
    930 {
    931 	int i, freed;
    932 	struct txdmamap_pool_entry *dma;
    933 
    934 	if (!(sc->bge_flags & BGE_TXRING_VALID))
    935 		return;
    936 
    937 	freed = 0;
    938 
    939 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
    940 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
    941 			freed++;
    942 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
    943 			sc->bge_cdata.bge_tx_chain[i] = NULL;
    944 			SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
    945 					    link);
    946 			sc->txdma[i] = 0;
    947 		}
    948 		memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0,
    949 		    sizeof(struct bge_tx_bd));
    950 	}
    951 
    952 	while ((dma = SLIST_FIRST(&sc->txdma_list))) {
    953 		SLIST_REMOVE_HEAD(&sc->txdma_list, link);
    954 		bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
    955 		free(dma, M_DEVBUF);
    956 	}
    957 
    958 	sc->bge_flags &= ~BGE_TXRING_VALID;
    959 }
    960 
    961 int
    962 bge_init_tx_ring(sc)
    963 	struct bge_softc *sc;
    964 {
    965 	int i;
    966 	bus_dmamap_t dmamap;
    967 	struct txdmamap_pool_entry *dma;
    968 
    969 	if (sc->bge_flags & BGE_TXRING_VALID)
    970 		return 0;
    971 
    972 	sc->bge_txcnt = 0;
    973 	sc->bge_tx_saved_considx = 0;
    974 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
    975 	if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG)	/* 5700 b2 errata */
    976 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
    977 
    978 	CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
    979 	if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG)	/* 5700 b2 errata */
    980 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
    981 
    982 	SLIST_INIT(&sc->txdma_list);
    983 	for (i = 0; i < BGE_RSLOTS; i++) {
    984 		if (bus_dmamap_create(sc->bge_dmatag, ETHER_MAX_LEN_JUMBO,
    985 		    BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT,
    986 		    &dmamap))
    987 			return(ENOBUFS);
    988 		if (dmamap == NULL)
    989 			panic("dmamap NULL in bge_init_tx_ring");
    990 		dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
    991 		if (dma == NULL) {
    992 			printf("%s: can't alloc txdmamap_pool_entry\n",
    993 			    sc->bge_dev.dv_xname);
    994 			bus_dmamap_destroy(sc->bge_dmatag, dmamap);
    995 			return (ENOMEM);
    996 		}
    997 		dma->dmamap = dmamap;
    998 		SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
    999 	}
   1000 
   1001 	sc->bge_flags |= BGE_TXRING_VALID;
   1002 
   1003 	return(0);
   1004 }
   1005 
   1006 void
   1007 bge_setmulti(sc)
   1008 	struct bge_softc *sc;
   1009 {
   1010 	struct ethercom		*ac = &sc->ethercom;
   1011 	struct ifnet		*ifp = &ac->ec_if;
   1012 	struct ether_multi	*enm;
   1013 	struct ether_multistep  step;
   1014 	u_int32_t		hashes[4] = { 0, 0, 0, 0 };
   1015 	u_int32_t		h;
   1016 	int			i;
   1017 
   1018 	if (ifp->if_flags & IFF_PROMISC)
   1019 		goto allmulti;
   1020 
   1021 	/* Now program new ones. */
   1022 	ETHER_FIRST_MULTI(step, ac, enm);
   1023 	while (enm != NULL) {
   1024 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   1025 			/*
   1026 			 * We must listen to a range of multicast addresses.
   1027 			 * For now, just accept all multicasts, rather than
   1028 			 * trying to set only those filter bits needed to match
   1029 			 * the range.  (At this time, the only use of address
   1030 			 * ranges is for IP multicast routing, for which the
   1031 			 * range is big enough to require all bits set.)
   1032 			 */
   1033 			goto allmulti;
   1034 		}
   1035 
   1036 		h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
   1037 
   1038 		/* Just want the 7 least-significant bits. */
   1039 		h &= 0x7f;
   1040 
   1041 		hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
   1042 		ETHER_NEXT_MULTI(step, enm);
   1043 	}
   1044 
   1045 	ifp->if_flags &= ~IFF_ALLMULTI;
   1046 	goto setit;
   1047 
   1048  allmulti:
   1049 	ifp->if_flags |= IFF_ALLMULTI;
   1050 	hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff;
   1051 
   1052  setit:
   1053 	for (i = 0; i < 4; i++)
   1054 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
   1055 }
   1056 
   1057 const int bge_swapbits[] = {
   1058 	0,
   1059 	BGE_MODECTL_BYTESWAP_DATA,
   1060 	BGE_MODECTL_WORDSWAP_DATA,
   1061 	BGE_MODECTL_BYTESWAP_NONFRAME,
   1062 	BGE_MODECTL_WORDSWAP_NONFRAME,
   1063 
   1064 	BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA,
   1065 	BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
   1066 	BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
   1067 
   1068 	BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
   1069 	BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
   1070 
   1071 	BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
   1072 
   1073 	BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
   1074 	    BGE_MODECTL_BYTESWAP_NONFRAME,
   1075 	BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
   1076 	    BGE_MODECTL_WORDSWAP_NONFRAME,
   1077 	BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
   1078 	    BGE_MODECTL_WORDSWAP_NONFRAME,
   1079 	BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
   1080 	    BGE_MODECTL_WORDSWAP_NONFRAME,
   1081 
   1082 	BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
   1083 	    BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
   1084 };
   1085 
   1086 int bge_swapindex = 0;
   1087 
   1088 /*
   1089  * Do endian, PCI and DMA initialization. Also check the on-board ROM
   1090  * self-test results.
   1091  */
   1092 int
   1093 bge_chipinit(sc)
   1094 	struct bge_softc *sc;
   1095 {
   1096 	u_int32_t		cachesize;
   1097 	int			i;
   1098 	u_int32_t		dma_rw_ctl;
   1099 	struct pci_attach_args	*pa = &(sc->bge_pa);
   1100 
   1101 
   1102 	/* Set endianness before we access any non-PCI registers. */
   1103 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
   1104 	    BGE_INIT);
   1105 
   1106 	/* Set power state to D0. */
   1107 	bge_setpowerstate(sc, 0);
   1108 
   1109 	/*
   1110 	 * Check the 'ROM failed' bit on the RX CPU to see if
   1111 	 * self-tests passed.
   1112 	 */
   1113 	if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
   1114 		printf("%s: RX CPU self-diagnostics failed!\n",
   1115 		    sc->bge_dev.dv_xname);
   1116 		return(ENODEV);
   1117 	}
   1118 
   1119 	/* Clear the MAC control register */
   1120 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
   1121 
   1122 	/*
   1123 	 * Clear the MAC statistics block in the NIC's
   1124 	 * internal memory.
   1125 	 */
   1126 	for (i = BGE_STATS_BLOCK;
   1127 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
   1128 		BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
   1129 
   1130 	for (i = BGE_STATUS_BLOCK;
   1131 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
   1132 		BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
   1133 
   1134 	/* Set up the PCI DMA control register. */
   1135 	if (pci_conf_read(pa->pa_pc, pa->pa_tag,BGE_PCI_PCISTATE) &
   1136 	    BGE_PCISTATE_PCI_BUSMODE) {
   1137 		/* Conventional PCI bus */
   1138 	  	DPRINTFN(4, ("(%s: PCI 2.2 dma setting)\n", sc->bge_dev.dv_xname));
   1139 		dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD |
   1140 		   (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
   1141 		   (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
   1142 		   (0x0F));
   1143 	} else {
   1144 	  	DPRINTFN(4, ("(:%s: PCI-X dma setting)\n", sc->bge_dev.dv_xname));
   1145 		/* PCI-X bus */
   1146 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
   1147 		    (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
   1148 		    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
   1149 		    (0x0F);
   1150 		/*
   1151 		 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
   1152 		 * for hardware bugs, which means we should also clear
   1153 		 * the low-order MINDMA bits.  In addition, the 5704
   1154 		 * uses a different encoding of read/write watermarks.
   1155 		 */
   1156 		if (sc->bge_asicrev == BGE_ASICREV_BCM5704_A0) {
   1157 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
   1158 			  /* should be 0x1f0000 */
   1159 			  (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
   1160 			  (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
   1161 			dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
   1162 		}
   1163 		else if ((sc->bge_asicrev >> 28) ==
   1164 			 (BGE_ASICREV_BCM5703_A0 >> 28)) {
   1165 			dma_rw_ctl &=  0xfffffff0;
   1166 			dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
   1167 		}
   1168 	}
   1169 
   1170 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl);
   1171 
   1172 	/*
   1173 	 * Set up general mode register.
   1174 	 */
   1175 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
   1176 		    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
   1177 		    BGE_MODECTL_NO_RX_CRC|BGE_MODECTL_TX_NO_PHDR_CSUM|
   1178 		    BGE_MODECTL_RX_NO_PHDR_CSUM);
   1179 
   1180 	/* Get cache line size. */
   1181 	cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
   1182 
   1183 	/*
   1184 	 * Avoid violating PCI spec on certain chip revs.
   1185 	 */
   1186 	if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD) &
   1187 	    PCIM_CMD_MWIEN) {
   1188 		switch(cachesize) {
   1189 		case 1:
   1190 			PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
   1191 				   BGE_PCI_WRITE_BNDRY_16BYTES);
   1192 			break;
   1193 		case 2:
   1194 			PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
   1195 				   BGE_PCI_WRITE_BNDRY_32BYTES);
   1196 			break;
   1197 		case 4:
   1198 			PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
   1199 				   BGE_PCI_WRITE_BNDRY_64BYTES);
   1200 			break;
   1201 		case 8:
   1202 			PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
   1203 				   BGE_PCI_WRITE_BNDRY_128BYTES);
   1204 			break;
   1205 		case 16:
   1206 			PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
   1207 				   BGE_PCI_WRITE_BNDRY_256BYTES);
   1208 			break;
   1209 		case 32:
   1210 			PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
   1211 				   BGE_PCI_WRITE_BNDRY_512BYTES);
   1212 			break;
   1213 		case 64:
   1214 			PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
   1215 				   BGE_PCI_WRITE_BNDRY_1024BYTES);
   1216 			break;
   1217 		default:
   1218 		/* Disable PCI memory write and invalidate. */
   1219 #if 0
   1220 			if (bootverbose)
   1221 				printf("%s: cache line size %d not "
   1222 				    "supported; disabling PCI MWI\n",
   1223 				    sc->bge_dev.dv_xname, cachesize);
   1224 #endif
   1225 			PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD,
   1226 			    PCIM_CMD_MWIEN);
   1227 			break;
   1228 		}
   1229 	}
   1230 
   1231 	/*
   1232 	 * Disable memory write invalidate.  Apparently it is not supported
   1233 	 * properly by these devices.
   1234 	 */
   1235 	PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, PCIM_CMD_MWIEN);
   1236 
   1237 
   1238 #ifdef __brokenalpha__
   1239 	/*
   1240 	 * Must insure that we do not cross an 8K (bytes) boundary
   1241 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
   1242 	 * restriction on some ALPHA platforms with early revision
   1243 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
   1244 	 */
   1245 	PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4);
   1246 #endif
   1247 
   1248 	/* Set the timer prescaler (always 66MHz) */
   1249 	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
   1250 
   1251 	return(0);
   1252 }
   1253 
   1254 int
   1255 bge_blockinit(sc)
   1256 	struct bge_softc *sc;
   1257 {
   1258 	volatile struct bge_rcb		*rcb;
   1259 	bus_size_t		rcb_addr;
   1260 	int			i;
   1261 	struct ifnet		*ifp = &sc->ethercom.ec_if;
   1262 	bge_hostaddr		taddr;
   1263 
   1264 	/*
   1265 	 * Initialize the memory window pointer register so that
   1266 	 * we can access the first 32K of internal NIC RAM. This will
   1267 	 * allow us to set up the TX send ring RCBs and the RX return
   1268 	 * ring RCBs, plus other things which live in NIC memory.
   1269 	 */
   1270 
   1271 	pci_conf_write(sc->bge_pa.pa_pc, sc->bge_pa.pa_tag,
   1272 	    BGE_PCI_MEMWIN_BASEADDR, 0);
   1273 
   1274 	/* Configure mbuf memory pool */
   1275 	if (sc->bge_extram) {
   1276 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_EXT_SSRAM);
   1277 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
   1278 	} else {
   1279 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
   1280 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
   1281 	}
   1282 
   1283 	/* Configure DMA resource pool */
   1284 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, BGE_DMA_DESCRIPTORS);
   1285 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
   1286 
   1287 	/* Configure mbuf pool watermarks */
   1288 #ifdef ORIG_WPAUL_VALUES
   1289 	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24);
   1290 	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24);
   1291 	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48);
   1292 #else
   1293 	/* new broadcom docs strongly recommend these: */
   1294 	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
   1295 	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
   1296 	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
   1297 #endif
   1298 
   1299 	/* Configure DMA resource watermarks */
   1300 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
   1301 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
   1302 
   1303 	/* Enable buffer manager */
   1304 	CSR_WRITE_4(sc, BGE_BMAN_MODE,
   1305 	    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
   1306 
   1307 	/* Poll for buffer manager start indication */
   1308 	for (i = 0; i < BGE_TIMEOUT; i++) {
   1309 		if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
   1310 			break;
   1311 		DELAY(10);
   1312 	}
   1313 
   1314 	if (i == BGE_TIMEOUT) {
   1315 		printf("%s: buffer manager failed to start\n",
   1316 		    sc->bge_dev.dv_xname);
   1317 		return(ENXIO);
   1318 	}
   1319 
   1320 	/* Enable flow-through queues */
   1321 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
   1322 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
   1323 
   1324 	/* Wait until queue initialization is complete */
   1325 	for (i = 0; i < BGE_TIMEOUT; i++) {
   1326 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
   1327 			break;
   1328 		DELAY(10);
   1329 	}
   1330 
   1331 	if (i == BGE_TIMEOUT) {
   1332 		printf("%s: flow-through queue init failed\n",
   1333 		    sc->bge_dev.dv_xname);
   1334 		return(ENXIO);
   1335 	}
   1336 
   1337 	/* Initialize the standard RX ring control block */
   1338 	rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
   1339 	bge_set_hostaddr(&rcb->bge_hostaddr,
   1340 	    BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
   1341 	rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
   1342 	if (sc->bge_extram)
   1343 		rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
   1344 	else
   1345 		rcb->bge_nicaddr = BGE_STD_RX_RINGS;
   1346 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
   1347 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
   1348 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
   1349 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
   1350 
   1351 	/*
   1352 	 * Initialize the jumbo RX ring control block
   1353 	 * We set the 'ring disabled' bit in the flags
   1354 	 * field until we're actually ready to start
   1355 	 * using this ring (i.e. once we set the MTU
   1356 	 * high enough to require it).
   1357 	 */
   1358 	rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
   1359 	bge_set_hostaddr(&rcb->bge_hostaddr,
   1360 	    BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
   1361 	rcb->bge_maxlen_flags =
   1362 	   BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, BGE_RCB_FLAG_RING_DISABLED);
   1363 	if (sc->bge_extram)
   1364 		rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
   1365 	else
   1366 		rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
   1367 
   1368 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
   1369 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
   1370 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
   1371 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
   1372 
   1373 	/* Set up dummy disabled mini ring RCB */
   1374 	rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
   1375 	rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
   1376 	    BGE_RCB_FLAG_RING_DISABLED);
   1377 	CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
   1378 
   1379 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   1380 	    offsetof(struct bge_ring_data, bge_info), sizeof (struct bge_gib),
   1381 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1382 
   1383 	/*
   1384 	 * Set the BD ring replentish thresholds. The recommended
   1385 	 * values are 1/8th the number of descriptors allocated to
   1386 	 * each ring.
   1387 	 */
   1388 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
   1389 	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
   1390 
   1391 	/*
   1392 	 * Disable all unused send rings by setting the 'ring disabled'
   1393 	 * bit in the flags field of all the TX send ring control blocks.
   1394 	 * These are located in NIC memory.
   1395 	 */
   1396 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
   1397 	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
   1398 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
   1399 		    BGE_RCB_MAXLEN_FLAGS(0,BGE_RCB_FLAG_RING_DISABLED));
   1400 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
   1401 		rcb_addr += sizeof(struct bge_rcb);
   1402 	}
   1403 
   1404 	/* Configure TX RCB 0 (we use only the first ring) */
   1405 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
   1406 	bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
   1407 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
   1408 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
   1409 	RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
   1410 		    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
   1411 	RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
   1412 	    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
   1413 
   1414 	/* Disable all unused RX return rings */
   1415 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
   1416 	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
   1417 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
   1418 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
   1419 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
   1420 			    BGE_RCB_MAXLEN_FLAGS(BGE_RETURN_RING_CNT,
   1421                                      BGE_RCB_FLAG_RING_DISABLED));
   1422 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
   1423 		CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
   1424 		    (i * (sizeof(u_int64_t))), 0);
   1425 		rcb_addr += sizeof(struct bge_rcb);
   1426 	}
   1427 
   1428 	/* Initialize RX ring indexes */
   1429 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
   1430 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
   1431 	CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
   1432 
   1433 	/*
   1434 	 * Set up RX return ring 0
   1435 	 * Note that the NIC address for RX return rings is 0x00000000.
   1436 	 * The return rings live entirely within the host, so the
   1437 	 * nicaddr field in the RCB isn't used.
   1438 	 */
   1439 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
   1440 	bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
   1441 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
   1442 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
   1443 	RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
   1444 	RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
   1445 	    BGE_RCB_MAXLEN_FLAGS(BGE_RETURN_RING_CNT,0));
   1446 
   1447 	/* Set random backoff seed for TX */
   1448 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
   1449 	    LLADDR(ifp->if_sadl)[0] + LLADDR(ifp->if_sadl)[1] +
   1450 	    LLADDR(ifp->if_sadl)[2] + LLADDR(ifp->if_sadl)[3] +
   1451 	    LLADDR(ifp->if_sadl)[4] + LLADDR(ifp->if_sadl)[5] +
   1452 	    BGE_TX_BACKOFF_SEED_MASK);
   1453 
   1454 	/* Set inter-packet gap */
   1455 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
   1456 
   1457 	/*
   1458 	 * Specify which ring to use for packets that don't match
   1459 	 * any RX rules.
   1460 	 */
   1461 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
   1462 
   1463 	/*
   1464 	 * Configure number of RX lists. One interrupt distribution
   1465 	 * list, sixteen active lists, one bad frames class.
   1466 	 */
   1467 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
   1468 
   1469 	/* Inialize RX list placement stats mask. */
   1470 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
   1471 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
   1472 
   1473 	/* Disable host coalescing until we get it set up */
   1474 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
   1475 
   1476 	/* Poll to make sure it's shut down. */
   1477 	for (i = 0; i < BGE_TIMEOUT; i++) {
   1478 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
   1479 			break;
   1480 		DELAY(10);
   1481 	}
   1482 
   1483 	if (i == BGE_TIMEOUT) {
   1484 		printf("%s: host coalescing engine failed to idle\n",
   1485 		    sc->bge_dev.dv_xname);
   1486 		return(ENXIO);
   1487 	}
   1488 
   1489 	/* Set up host coalescing defaults */
   1490 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
   1491 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
   1492 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
   1493 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
   1494 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
   1495 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
   1496 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
   1497 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
   1498 	CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
   1499 
   1500 	/* Set up address of statistics block */
   1501 	bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
   1502 	CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
   1503 	CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi);
   1504 	CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo);
   1505 
   1506 	/* Set up address of status block */
   1507 	bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
   1508 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
   1509 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
   1510 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
   1511 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
   1512 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
   1513 
   1514 	/* Turn on host coalescing state machine */
   1515 	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
   1516 
   1517 	/* Turn on RX BD completion state machine and enable attentions */
   1518 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
   1519 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
   1520 
   1521 	/* Turn on RX list placement state machine */
   1522 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
   1523 
   1524 	/* Turn on RX list selector state machine. */
   1525 	CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
   1526 
   1527 	/* Turn on DMA, clear stats */
   1528 	CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
   1529 	    BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
   1530 	    BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
   1531 	    BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
   1532 	    (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
   1533 
   1534 	/* Set misc. local control, enable interrupts on attentions */
   1535 	sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM;
   1536 
   1537 #ifdef notdef
   1538 	/* Assert GPIO pins for PHY reset */
   1539 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
   1540 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
   1541 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
   1542 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
   1543 #endif
   1544 
   1545 #if defined(not_quite_yet)
   1546 	/* Linux driver enables enable gpio pin #1 on 5700s */
   1547 	if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
   1548 		sc->bge_local_ctrl_reg |=
   1549 		  (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1);
   1550 	}
   1551 #endif
   1552 	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
   1553 
   1554 	/* Turn on DMA completion state machine */
   1555 	CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
   1556 
   1557 	/* Turn on write DMA state machine */
   1558 	CSR_WRITE_4(sc, BGE_WDMA_MODE,
   1559 	    BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
   1560 
   1561 	/* Turn on read DMA state machine */
   1562 	CSR_WRITE_4(sc, BGE_RDMA_MODE,
   1563 	    BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
   1564 
   1565 	/* Turn on RX data completion state machine */
   1566 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
   1567 
   1568 	/* Turn on RX BD initiator state machine */
   1569 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
   1570 
   1571 	/* Turn on RX data and RX BD initiator state machine */
   1572 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
   1573 
   1574 	/* Turn on Mbuf cluster free state machine */
   1575 	CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
   1576 
   1577 	/* Turn on send BD completion state machine */
   1578 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
   1579 
   1580 	/* Turn on send data completion state machine */
   1581 	CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
   1582 
   1583 	/* Turn on send data initiator state machine */
   1584 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
   1585 
   1586 	/* Turn on send BD initiator state machine */
   1587 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
   1588 
   1589 	/* Turn on send BD selector state machine */
   1590 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
   1591 
   1592 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
   1593 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
   1594 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
   1595 
   1596 	/* init LED register */
   1597 	CSR_WRITE_4(sc, BGE_MAC_LED_CTL, 0x00000000);
   1598 
   1599 	/* ack/clear link change events */
   1600 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
   1601 	    BGE_MACSTAT_CFG_CHANGED);
   1602 	CSR_WRITE_4(sc, BGE_MI_STS, 0);
   1603 
   1604 	/* Enable PHY auto polling (for MII/GMII only) */
   1605 	if (sc->bge_tbi) {
   1606 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
   1607  	} else {
   1608 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
   1609 		if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN)
   1610 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
   1611 			    BGE_EVTENB_MI_INTERRUPT);
   1612 	}
   1613 
   1614 	/* Enable link state change attentions. */
   1615 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
   1616 
   1617 	return(0);
   1618 }
   1619 
   1620 static const struct bge_revision {
   1621 	uint32_t		br_asicrev;
   1622 	uint32_t		br_quirks;
   1623 	const char		*br_name;
   1624 } bge_revisions[] = {
   1625 	{ BGE_ASICREV_BCM5700_A0,
   1626 	  BGE_QUIRK_LINK_STATE_BROKEN,
   1627 	  "BCM5700 A0" },
   1628 
   1629 	{ BGE_ASICREV_BCM5700_A1,
   1630 	  BGE_QUIRK_LINK_STATE_BROKEN,
   1631 	  "BCM5700 A1" },
   1632 
   1633 	{ BGE_ASICREV_BCM5700_B0,
   1634 	  BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_CSUM_BROKEN|BGE_QUIRK_5700_COMMON,
   1635 	  "BCM5700 B0" },
   1636 
   1637 	{ BGE_ASICREV_BCM5700_B1,
   1638 	  BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
   1639 	  "BCM5700 B1" },
   1640 
   1641 	{ BGE_ASICREV_BCM5700_B2,
   1642 	  BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
   1643 	  "BCM5700 B2" },
   1644 
   1645 	/* This is treated like a BCM5700 Bx */
   1646 	{ BGE_ASICREV_BCM5700_ALTIMA,
   1647 	  BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
   1648 	  "BCM5700 Altima" },
   1649 
   1650 	{ BGE_ASICREV_BCM5700_C0,
   1651 	  0,
   1652 	  "BCM5700 C0" },
   1653 
   1654 	{ BGE_ASICREV_BCM5701_A0,
   1655 	  0,
   1656 	  "BCM5701 A0" },
   1657 
   1658 	{ BGE_ASICREV_BCM5701_B0,
   1659 	  0,
   1660 	  "BCM5701 B0" },
   1661 
   1662 	{ BGE_ASICREV_BCM5701_B2,
   1663 	  0,
   1664 	  "BCM5701 B2" },
   1665 
   1666 	{ BGE_ASICREV_BCM5701_B5,
   1667 	  BGE_QUIRK_ONLY_PHY_1,
   1668 	  "BCM5701 B5" },
   1669 
   1670 	{ BGE_ASICREV_BCM5703_A0,
   1671 	  0,
   1672 	  "BCM5703 A0" },
   1673 
   1674 	{ BGE_ASICREV_BCM5703_A1,
   1675 	  0,
   1676 	  "BCM5703 A1" },
   1677 
   1678 	{ BGE_ASICREV_BCM5703_A2,
   1679 	  BGE_QUIRK_ONLY_PHY_1,
   1680 	  "BCM5703 A2" },
   1681 
   1682 	{ BGE_ASICREV_BCM5704_A0,
   1683   	  BGE_QUIRK_ONLY_PHY_1,
   1684 	  "BCM5704 A0" },
   1685 
   1686 	{ 0, 0, NULL }
   1687 };
   1688 
   1689 static const struct bge_revision *
   1690 bge_lookup_rev(uint32_t asicrev)
   1691 {
   1692 	const struct bge_revision *br;
   1693 
   1694 	for (br = bge_revisions; br->br_name != NULL; br++) {
   1695 		if (br->br_asicrev == asicrev)
   1696 			return (br);
   1697 	}
   1698 
   1699 	return (NULL);
   1700 }
   1701 
   1702 static const struct bge_product {
   1703 	pci_vendor_id_t		bp_vendor;
   1704 	pci_product_id_t	bp_product;
   1705 	const char		*bp_name;
   1706 } bge_products[] = {
   1707 	/*
   1708 	 * The BCM5700 documentation seems to indicate that the hardware
   1709 	 * still has the Alteon vendor ID burned into it, though it
   1710 	 * should always be overridden by the value in the EEPROM.  We'll
   1711 	 * check for it anyway.
   1712 	 */
   1713 	{ PCI_VENDOR_ALTEON,
   1714 	  PCI_PRODUCT_ALTEON_BCM5700,
   1715 	  "Broadcom BCM5700 Gigabit Ethernet" },
   1716 	{ PCI_VENDOR_ALTEON,
   1717 	  PCI_PRODUCT_ALTEON_BCM5701,
   1718 	  "Broadcom BCM5701 Gigabit Ethernet" },
   1719 
   1720 	{ PCI_VENDOR_ALTIMA,
   1721 	  PCI_PRODUCT_ALTIMA_AC1000,
   1722 	  "Altima AC1000 Gigabit Ethernet" },
   1723 	{ PCI_VENDOR_ALTIMA,
   1724 	  PCI_PRODUCT_ALTIMA_AC1001,
   1725 	  "Altima AC1001 Gigabit Ethernet" },
   1726 	{ PCI_VENDOR_ALTIMA,
   1727 	  PCI_PRODUCT_ALTIMA_AC9100,
   1728 	  "Altima AC9100 Gigabit Ethernet" },
   1729 
   1730 	{ PCI_VENDOR_BROADCOM,
   1731 	  PCI_PRODUCT_BROADCOM_BCM5700,
   1732 	  "Broadcom BCM5700 Gigabit Ethernet" },
   1733 	{ PCI_VENDOR_BROADCOM,
   1734 	  PCI_PRODUCT_BROADCOM_BCM5701,
   1735 	  "Broadcom BCM5701 Gigabit Ethernet" },
   1736 	{ PCI_VENDOR_BROADCOM,
   1737 	  PCI_PRODUCT_BROADCOM_BCM5702,
   1738 	  "Broadcom BCM5702 Gigabit Ethernet" },
   1739 	{ PCI_VENDOR_BROADCOM,
   1740 	  PCI_PRODUCT_BROADCOM_BCM5702X,
   1741 	  "Broadcom BCM5702X Gigabit Ethernet" },
   1742 	{ PCI_VENDOR_BROADCOM,
   1743 	  PCI_PRODUCT_BROADCOM_BCM5703,
   1744 	  "Broadcom BCM5703 Gigabit Ethernet" },
   1745 	{ PCI_VENDOR_BROADCOM,
   1746 	  PCI_PRODUCT_BROADCOM_BCM5703X,
   1747 	  "Broadcom BCM5703X Gigabit Ethernet" },
   1748    	{ PCI_VENDOR_BROADCOM,
   1749 	  PCI_PRODUCT_BROADCOM_BCM5704C,
   1750 	  "Broadcom BCM5704C Dual Gigabit Ethernet" },
   1751    	{ PCI_VENDOR_BROADCOM,
   1752 	  PCI_PRODUCT_BROADCOM_BCM5704S,
   1753 	  "Broadcom BCM5704S Dual Gigabit Ethernet" },
   1754 
   1755 
   1756 	{ PCI_VENDOR_SCHNEIDERKOCH,
   1757 	  PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
   1758 	  "SysKonnect SK-9Dx1 Gigabit Ethernet" },
   1759 
   1760 	{ PCI_VENDOR_3COM,
   1761 	  PCI_PRODUCT_3COM_3C996,
   1762 	  "3Com 3c996 Gigabit Ethernet" },
   1763 
   1764 	{ 0,
   1765 	  0,
   1766 	  NULL },
   1767 };
   1768 
   1769 static const struct bge_product *
   1770 bge_lookup(const struct pci_attach_args *pa)
   1771 {
   1772 	const struct bge_product *bp;
   1773 
   1774 	for (bp = bge_products; bp->bp_name != NULL; bp++) {
   1775 		if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor &&
   1776 		    PCI_PRODUCT(pa->pa_id) == bp->bp_product)
   1777 			return (bp);
   1778 	}
   1779 
   1780 	return (NULL);
   1781 }
   1782 
   1783 int
   1784 bge_setpowerstate(sc, powerlevel)
   1785 	struct bge_softc *sc;
   1786 	int powerlevel;
   1787 {
   1788 #ifdef NOTYET
   1789 	u_int32_t pm_ctl = 0;
   1790 
   1791 	/* XXX FIXME: make sure indirect accesses enabled? */
   1792 	pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4);
   1793 	pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS;
   1794 	pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4);
   1795 
   1796 	/* clear the PME_assert bit and power state bits, enable PME */
   1797 	pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2);
   1798 	pm_ctl &= ~PCIM_PSTAT_DMASK;
   1799 	pm_ctl |= (1 << 8);
   1800 
   1801 	if (powerlevel == 0) {
   1802 		pm_ctl |= PCIM_PSTAT_D0;
   1803 		pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD,
   1804 		    pm_ctl, 2);
   1805 		DELAY(10000);
   1806 		CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
   1807 		DELAY(10000);
   1808 
   1809 #ifdef NOTYET
   1810 		/* XXX FIXME: write 0x02 to phy aux_Ctrl reg */
   1811 		bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02);
   1812 #endif
   1813 		DELAY(40); DELAY(40); DELAY(40);
   1814 		DELAY(10000);	/* above not quite adequate on 5700 */
   1815 		return 0;
   1816 	}
   1817 
   1818 
   1819 	/*
   1820 	 * Entering ACPI power states D1-D3 is achieved by wiggling
   1821 	 * GMII gpio pins. Example code assumes all hardware vendors
   1822 	 * followed Broadom's sample pcb layout. Until we verify that
   1823 	 * for all supported OEM cards, states D1-D3 are  unsupported.
   1824 	 */
   1825 	printf("%s: power state %d unimplemented; check GPIO pins\n",
   1826 	       sc->bge_dev.dv_xname, powerlevel);
   1827 #endif
   1828 	return EOPNOTSUPP;
   1829 }
   1830 
   1831 
   1832 /*
   1833  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
   1834  * against our list and return its name if we find a match. Note
   1835  * that since the Broadcom controller contains VPD support, we
   1836  * can get the device name string from the controller itself instead
   1837  * of the compiled-in string. This is a little slow, but it guarantees
   1838  * we'll always announce the right product name.
   1839  */
   1840 int
   1841 bge_probe(parent, match, aux)
   1842 	struct device *parent;
   1843 	struct cfdata *match;
   1844 	void *aux;
   1845 {
   1846 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
   1847 
   1848 	if (bge_lookup(pa) != NULL)
   1849 		return (1);
   1850 
   1851 	return (0);
   1852 }
   1853 
   1854 void
   1855 bge_attach(parent, self, aux)
   1856 	struct device *parent, *self;
   1857 	void *aux;
   1858 {
   1859 	struct bge_softc	*sc = (struct bge_softc *)self;
   1860 	struct pci_attach_args	*pa = aux;
   1861 	const struct bge_product *bp;
   1862 	const struct bge_revision *br;
   1863 	pci_chipset_tag_t	pc = pa->pa_pc;
   1864 	pci_intr_handle_t	ih;
   1865 	const char		*intrstr = NULL;
   1866 	bus_dma_segment_t	seg;
   1867 	int			rseg;
   1868 	u_int32_t		hwcfg = 0;
   1869 	u_int32_t		mac_addr = 0;
   1870 	u_int32_t		command;
   1871 	struct ifnet		*ifp;
   1872 	caddr_t			kva;
   1873 	u_char			eaddr[ETHER_ADDR_LEN];
   1874 	pcireg_t		memtype;
   1875 	bus_addr_t		memaddr;
   1876 	bus_size_t		memsize;
   1877 	u_int32_t		pm_ctl;
   1878 
   1879 	bp = bge_lookup(pa);
   1880 	KASSERT(bp != NULL);
   1881 
   1882 	sc->bge_pa = *pa;
   1883 
   1884 	aprint_naive(": Ethernet controller\n");
   1885 	aprint_normal(": %s\n", bp->bp_name);
   1886 
   1887 	/*
   1888 	 * Map control/status registers.
   1889 	 */
   1890 	DPRINTFN(5, ("Map control/status regs\n"));
   1891 	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1892 	command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
   1893 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
   1894 	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1895 
   1896 	if (!(command & PCI_COMMAND_MEM_ENABLE)) {
   1897 		aprint_error("%s: failed to enable memory mapping!\n",
   1898 		    sc->bge_dev.dv_xname);
   1899 		return;
   1900 	}
   1901 
   1902 	DPRINTFN(5, ("pci_mem_find\n"));
   1903 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0);
   1904  	switch (memtype) {
   1905 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1906 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1907 		if (pci_mapreg_map(pa, BGE_PCI_BAR0,
   1908 		    memtype, 0, &sc->bge_btag, &sc->bge_bhandle,
   1909 		    &memaddr, &memsize) == 0)
   1910 			break;
   1911 	default:
   1912 		aprint_error("%s: can't find mem space\n",
   1913 		    sc->bge_dev.dv_xname);
   1914 		return;
   1915 	}
   1916 
   1917 	DPRINTFN(5, ("pci_intr_map\n"));
   1918 	if (pci_intr_map(pa, &ih)) {
   1919 		aprint_error("%s: couldn't map interrupt\n",
   1920 		    sc->bge_dev.dv_xname);
   1921 		return;
   1922 	}
   1923 
   1924 	DPRINTFN(5, ("pci_intr_string\n"));
   1925 	intrstr = pci_intr_string(pc, ih);
   1926 
   1927 	DPRINTFN(5, ("pci_intr_establish\n"));
   1928 	sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc);
   1929 
   1930 	if (sc->bge_intrhand == NULL) {
   1931 		aprint_error("%s: couldn't establish interrupt",
   1932 		    sc->bge_dev.dv_xname);
   1933 		if (intrstr != NULL)
   1934 			aprint_normal(" at %s", intrstr);
   1935 		aprint_normal("\n");
   1936 		return;
   1937 	}
   1938 	aprint_normal("%s: interrupting at %s\n",
   1939 	    sc->bge_dev.dv_xname, intrstr);
   1940 
   1941 	/*
   1942 	 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
   1943 	 * can clobber the chip's PCI config-space power control registers,
   1944 	 * leaving the card in D3 powersave state.
   1945 	 * We do not have memory-mapped registers in this state,
   1946 	 * so force device into D0 state before starting initialization.
   1947 	 */
   1948 	pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD);
   1949 	pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
   1950 	pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
   1951 	pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
   1952 	DELAY(1000);	/* 27 usec is allegedly sufficent */
   1953 
   1954 	/* Try to reset the chip. */
   1955 	DPRINTFN(5, ("bge_reset\n"));
   1956 	bge_reset(sc);
   1957 
   1958 	if (bge_chipinit(sc)) {
   1959 		aprint_error("%s: chip initialization failed\n",
   1960 		    sc->bge_dev.dv_xname);
   1961 		bge_release_resources(sc);
   1962 		return;
   1963 	}
   1964 
   1965 	/*
   1966 	 * Get station address from the EEPROM.
   1967 	 */
   1968 	mac_addr = bge_readmem_ind(sc, 0x0c14);
   1969 	if ((mac_addr >> 16) == 0x484b) {
   1970 		eaddr[0] = (u_char)(mac_addr >> 8);
   1971 		eaddr[1] = (u_char)(mac_addr >> 0);
   1972 		mac_addr = bge_readmem_ind(sc, 0x0c18);
   1973 		eaddr[2] = (u_char)(mac_addr >> 24);
   1974 		eaddr[3] = (u_char)(mac_addr >> 16);
   1975 		eaddr[4] = (u_char)(mac_addr >> 8);
   1976 		eaddr[5] = (u_char)(mac_addr >> 0);
   1977 	} else if (bge_read_eeprom(sc, (caddr_t)eaddr,
   1978 	    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
   1979 		aprint_error("%s: failed to read station address\n",
   1980 		    sc->bge_dev.dv_xname);
   1981 		bge_release_resources(sc);
   1982 		return;
   1983 	}
   1984 
   1985 	/*
   1986 	 * Save ASIC rev.  Look up any quirks associated with this
   1987 	 * ASIC.
   1988 	 */
   1989 	sc->bge_asicrev =
   1990 	    pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) &
   1991 	    BGE_PCIMISCCTL_ASICREV;
   1992 	br = bge_lookup_rev(sc->bge_asicrev);
   1993 
   1994 	aprint_normal("%s: ", sc->bge_dev.dv_xname);
   1995 	if (br == NULL) {
   1996 		aprint_normal("unknown ASIC 0x%08x", sc->bge_asicrev);
   1997 		sc->bge_quirks = 0;
   1998 	} else {
   1999 		aprint_normal("ASIC %s", br->br_name);
   2000 		sc->bge_quirks = br->br_quirks;
   2001 	}
   2002 	aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr));
   2003 
   2004 	/* Allocate the general information block and ring buffers. */
   2005 	sc->bge_dmatag = pa->pa_dmat;
   2006 	DPRINTFN(5, ("bus_dmamem_alloc\n"));
   2007 	if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
   2008 			     PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
   2009 		aprint_error("%s: can't alloc rx buffers\n",
   2010 		    sc->bge_dev.dv_xname);
   2011 		return;
   2012 	}
   2013 	DPRINTFN(5, ("bus_dmamem_map\n"));
   2014 	if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg,
   2015 			   sizeof(struct bge_ring_data), &kva,
   2016 			   BUS_DMA_NOWAIT)) {
   2017 		aprint_error("%s: can't map dma buffers (%d bytes)\n",
   2018 		    sc->bge_dev.dv_xname, (int)sizeof(struct bge_ring_data));
   2019 		bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
   2020 		return;
   2021 	}
   2022 	DPRINTFN(5, ("bus_dmamem_create\n"));
   2023 	if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
   2024 	    sizeof(struct bge_ring_data), 0,
   2025 	    BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
   2026 		aprint_error("%s: can't create dma map\n",
   2027 		    sc->bge_dev.dv_xname);
   2028 		bus_dmamem_unmap(sc->bge_dmatag, kva,
   2029 				 sizeof(struct bge_ring_data));
   2030 		bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
   2031 		return;
   2032 	}
   2033 	DPRINTFN(5, ("bus_dmamem_load\n"));
   2034 	if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
   2035 			    sizeof(struct bge_ring_data), NULL,
   2036 			    BUS_DMA_NOWAIT)) {
   2037 		bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
   2038 		bus_dmamem_unmap(sc->bge_dmatag, kva,
   2039 				 sizeof(struct bge_ring_data));
   2040 		bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
   2041 		return;
   2042 	}
   2043 
   2044 	DPRINTFN(5, ("bzero\n"));
   2045 	sc->bge_rdata = (struct bge_ring_data *)kva;
   2046 
   2047 	memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data));
   2048 
   2049 	/* Try to allocate memory for jumbo buffers. */
   2050 	if (bge_alloc_jumbo_mem(sc)) {
   2051 		aprint_error("%s: jumbo buffer allocation failed\n",
   2052 		    sc->bge_dev.dv_xname);
   2053 	} else
   2054 		sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2055 
   2056 	/* Set default tuneable values. */
   2057 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
   2058 	sc->bge_rx_coal_ticks = 150;
   2059 	sc->bge_rx_max_coal_bds = 64;
   2060 #ifdef ORIG_WPAUL_VALUES
   2061 	sc->bge_tx_coal_ticks = 150;
   2062 	sc->bge_tx_max_coal_bds = 128;
   2063 #else
   2064 	sc->bge_tx_coal_ticks = 300;
   2065 	sc->bge_tx_max_coal_bds = 400;
   2066 #endif
   2067 
   2068 	/* Set up ifnet structure */
   2069 	ifp = &sc->ethercom.ec_if;
   2070 	ifp->if_softc = sc;
   2071 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2072 	ifp->if_ioctl = bge_ioctl;
   2073 	ifp->if_start = bge_start;
   2074 	ifp->if_init = bge_init;
   2075 	ifp->if_watchdog = bge_watchdog;
   2076 	IFQ_SET_MAXLEN(&ifp->if_snd, BGE_TX_RING_CNT - 1);
   2077 	IFQ_SET_READY(&ifp->if_snd);
   2078 	DPRINTFN(5, ("bcopy\n"));
   2079 	strcpy(ifp->if_xname, sc->bge_dev.dv_xname);
   2080 
   2081 	if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0)
   2082 		sc->ethercom.ec_if.if_capabilities |=
   2083 		    IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
   2084 	sc->ethercom.ec_capabilities |=
   2085 	    ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
   2086 
   2087 	/*
   2088 	 * Do MII setup.
   2089 	 */
   2090 	DPRINTFN(5, ("mii setup\n"));
   2091 	sc->bge_mii.mii_ifp = ifp;
   2092 	sc->bge_mii.mii_readreg = bge_miibus_readreg;
   2093 	sc->bge_mii.mii_writereg = bge_miibus_writereg;
   2094 	sc->bge_mii.mii_statchg = bge_miibus_statchg;
   2095 
   2096 	/*
   2097 	 * Figure out what sort of media we have by checking the
   2098 	 * hardware config word in the first 32k of NIC internal memory,
   2099 	 * or fall back to the config word in the EEPROM. Note: on some BCM5700
   2100 	 * cards, this value appears to be unset. If that's the
   2101 	 * case, we have to rely on identifying the NIC by its PCI
   2102 	 * subsystem ID, as we do below for the SysKonnect SK-9D41.
   2103 	 */
   2104 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
   2105 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
   2106 	} else {
   2107 		bge_read_eeprom(sc, (caddr_t)&hwcfg,
   2108 		    BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
   2109 		hwcfg = be32toh(hwcfg);
   2110 	}
   2111 	if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
   2112 		sc->bge_tbi = 1;
   2113 
   2114 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
   2115 	if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_SUBSYS) >> 16) ==
   2116 	    SK_SUBSYSID_9D41)
   2117 		sc->bge_tbi = 1;
   2118 
   2119 	if (sc->bge_tbi) {
   2120 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
   2121 		    bge_ifmedia_sts);
   2122 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
   2123 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
   2124 			    0, NULL);
   2125 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
   2126 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
   2127 	} else {
   2128 		/*
   2129 		 * Do transceiver setup.
   2130 		 */
   2131 		ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
   2132 			     bge_ifmedia_sts);
   2133 		mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff,
   2134 			   MII_PHY_ANY, MII_OFFSET_ANY, 0);
   2135 
   2136 		if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) {
   2137 			printf("%s: no PHY found!\n", sc->bge_dev.dv_xname);
   2138 			ifmedia_add(&sc->bge_mii.mii_media,
   2139 				    IFM_ETHER|IFM_MANUAL, 0, NULL);
   2140 			ifmedia_set(&sc->bge_mii.mii_media,
   2141 				    IFM_ETHER|IFM_MANUAL);
   2142 		} else
   2143 			ifmedia_set(&sc->bge_mii.mii_media,
   2144 				    IFM_ETHER|IFM_AUTO);
   2145 	}
   2146 
   2147 	/*
   2148 	 * Call MI attach routine.
   2149 	 */
   2150 	DPRINTFN(5, ("if_attach\n"));
   2151 	if_attach(ifp);
   2152 	DPRINTFN(5, ("ether_ifattach\n"));
   2153 	ether_ifattach(ifp, eaddr);
   2154 	DPRINTFN(5, ("callout_init\n"));
   2155 	callout_init(&sc->bge_timeout);
   2156 }
   2157 
   2158 void
   2159 bge_release_resources(sc)
   2160 	struct bge_softc *sc;
   2161 {
   2162 	if (sc->bge_vpd_prodname != NULL)
   2163 		free(sc->bge_vpd_prodname, M_DEVBUF);
   2164 
   2165 	if (sc->bge_vpd_readonly != NULL)
   2166 		free(sc->bge_vpd_readonly, M_DEVBUF);
   2167 }
   2168 
   2169 void
   2170 bge_reset(sc)
   2171 	struct bge_softc *sc;
   2172 {
   2173 	struct pci_attach_args *pa = &sc->bge_pa;
   2174 	u_int32_t cachesize, command, pcistate;
   2175 	int i, val = 0;
   2176 
   2177 	/* Save some important PCI state. */
   2178 	cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
   2179 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD);
   2180 	pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
   2181 
   2182 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
   2183 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
   2184 	    BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW);
   2185 
   2186 	/* Issue global reset */
   2187 	bge_writereg_ind(sc, BGE_MISC_CFG,
   2188 	    BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1));
   2189 
   2190 	DELAY(1000);
   2191 
   2192 	/* Reset some of the PCI state that got zapped by reset */
   2193 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
   2194 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
   2195 	    BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW);
   2196 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command);
   2197 	pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize);
   2198 	bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
   2199 
   2200 	/* Enable memory arbiter. */
   2201 	CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
   2202 
   2203 	/*
   2204 	 * Prevent PXE restart: write a magic number to the
   2205 	 * general communications memory at 0xB50.
   2206 	 */
   2207 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
   2208 
   2209 	/*
   2210 	 * Poll the value location we just wrote until
   2211 	 * we see the 1's complement of the magic number.
   2212 	 * This indicates that the firmware initialization
   2213 	 * is complete.
   2214 	 */
   2215 	for (i = 0; i < 750; i++) {
   2216 		val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
   2217 		if (val == ~BGE_MAGIC_NUMBER)
   2218 			break;
   2219 		DELAY(1000);
   2220 	}
   2221 
   2222 	if (i == 750) {
   2223 		printf("%s: firmware handshake timed out, val = %x\n",
   2224 		    sc->bge_dev.dv_xname, val);
   2225 		return;
   2226 	}
   2227 
   2228 	/*
   2229 	 * XXX Wait for the value of the PCISTATE register to
   2230 	 * return to its original pre-reset state. This is a
   2231 	 * fairly good indicator of reset completion. If we don't
   2232 	 * wait for the reset to fully complete, trying to read
   2233 	 * from the device's non-PCI registers may yield garbage
   2234 	 * results.
   2235 	 */
   2236 	for (i = 0; i < BGE_TIMEOUT; i++) {
   2237 		if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) ==
   2238 		    pcistate)
   2239 			break;
   2240 		DELAY(10);
   2241 	}
   2242 
   2243 	/* Enable memory arbiter. */
   2244 	CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
   2245 
   2246 	/* Fix up byte swapping */
   2247 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
   2248 
   2249 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
   2250 
   2251 	DELAY(10000);
   2252 }
   2253 
   2254 /*
   2255  * Frame reception handling. This is called if there's a frame
   2256  * on the receive return list.
   2257  *
   2258  * Note: we have to be able to handle two possibilities here:
   2259  * 1) the frame is from the jumbo recieve ring
   2260  * 2) the frame is from the standard receive ring
   2261  */
   2262 
   2263 void
   2264 bge_rxeof(sc)
   2265 	struct bge_softc *sc;
   2266 {
   2267 	struct ifnet *ifp;
   2268 	int stdcnt = 0, jumbocnt = 0;
   2269 	int have_tag = 0;
   2270 	u_int16_t vlan_tag = 0;
   2271 	bus_dmamap_t dmamap;
   2272 	bus_addr_t offset, toff;
   2273 	bus_size_t tlen;
   2274 	int tosync;
   2275 
   2276 	ifp = &sc->ethercom.ec_if;
   2277 
   2278 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   2279 	    offsetof(struct bge_ring_data, bge_status_block),
   2280 	    sizeof (struct bge_status_block),
   2281 	    BUS_DMASYNC_POSTREAD);
   2282 
   2283 	offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
   2284 	tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx -
   2285 	    sc->bge_rx_saved_considx;
   2286 
   2287 	toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd));
   2288 
   2289 	if (tosync < 0) {
   2290 		tlen = (BGE_RETURN_RING_CNT - sc->bge_rx_saved_considx) *
   2291 		    sizeof (struct bge_rx_bd);
   2292 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   2293 		    toff, tlen, BUS_DMASYNC_POSTREAD);
   2294 		tosync = -tosync;
   2295 	}
   2296 
   2297 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   2298 	    offset, tosync * sizeof (struct bge_rx_bd),
   2299 	    BUS_DMASYNC_POSTREAD);
   2300 
   2301 	while(sc->bge_rx_saved_considx !=
   2302 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
   2303 		struct bge_rx_bd	*cur_rx;
   2304 		u_int32_t		rxidx;
   2305 		struct mbuf		*m = NULL;
   2306 
   2307 		cur_rx = &sc->bge_rdata->
   2308 			bge_rx_return_ring[sc->bge_rx_saved_considx];
   2309 
   2310 		rxidx = cur_rx->bge_idx;
   2311 		BGE_INC(sc->bge_rx_saved_considx, BGE_RETURN_RING_CNT);
   2312 
   2313 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
   2314 			have_tag = 1;
   2315 			vlan_tag = cur_rx->bge_vlan_tag;
   2316 		}
   2317 
   2318 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
   2319 			BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
   2320 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
   2321 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
   2322 			jumbocnt++;
   2323 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
   2324 				ifp->if_ierrors++;
   2325 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
   2326 				continue;
   2327 			}
   2328 			if (bge_newbuf_jumbo(sc, sc->bge_jumbo,
   2329 					     NULL)== ENOBUFS) {
   2330 				ifp->if_ierrors++;
   2331 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
   2332 				continue;
   2333 			}
   2334 		} else {
   2335 			BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
   2336 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
   2337 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
   2338 			stdcnt++;
   2339 			dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
   2340 			sc->bge_cdata.bge_rx_std_map[rxidx] = 0;
   2341 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
   2342 				ifp->if_ierrors++;
   2343 				bge_newbuf_std(sc, sc->bge_std, m, dmamap);
   2344 				continue;
   2345 			}
   2346 			if (bge_newbuf_std(sc, sc->bge_std,
   2347 			    NULL, dmamap) == ENOBUFS) {
   2348 				ifp->if_ierrors++;
   2349 				bge_newbuf_std(sc, sc->bge_std, m, dmamap);
   2350 				continue;
   2351 			}
   2352 		}
   2353 
   2354 		ifp->if_ipackets++;
   2355 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len;
   2356 		m->m_pkthdr.rcvif = ifp;
   2357 
   2358 #if NBPFILTER > 0
   2359 		/*
   2360 		 * Handle BPF listeners. Let the BPF user see the packet.
   2361 		 */
   2362 		if (ifp->if_bpf)
   2363 			bpf_mtap(ifp->if_bpf, m);
   2364 #endif
   2365 
   2366 		if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0) {
   2367 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   2368 			if ((cur_rx->bge_ip_csum ^ 0xffff) != 0)
   2369 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   2370 #if 0	/* XXX appears to be broken */
   2371 			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
   2372 				m->m_pkthdr.csum_data =
   2373 				    cur_rx->bge_tcp_udp_csum;
   2374 				m->m_pkthdr.csum_flags |=
   2375 				    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_DATA);
   2376 			}
   2377 #endif
   2378 		}
   2379 
   2380 		/*
   2381 		 * If we received a packet with a vlan tag, pass it
   2382 		 * to vlan_input() instead of ether_input().
   2383 		 */
   2384 		if (have_tag) {
   2385 			struct m_tag *mtag;
   2386 
   2387 			mtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
   2388 			    M_NOWAIT);
   2389 			if (mtag != NULL) {
   2390 				*(u_int *)(mtag + 1) = vlan_tag;
   2391 				m_tag_prepend(m, mtag);
   2392 				have_tag = vlan_tag = 0;
   2393 			} else {
   2394 				printf("%s: no mbuf for tag\n", ifp->if_xname);
   2395 				m_freem(m);
   2396 				have_tag = vlan_tag = 0;
   2397 				continue;
   2398 			}
   2399 		}
   2400 		(*ifp->if_input)(ifp, m);
   2401 	}
   2402 
   2403 	CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
   2404 	if (stdcnt)
   2405 		CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
   2406 	if (jumbocnt)
   2407 		CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
   2408 }
   2409 
   2410 void
   2411 bge_txeof(sc)
   2412 	struct bge_softc *sc;
   2413 {
   2414 	struct bge_tx_bd *cur_tx = NULL;
   2415 	struct ifnet *ifp;
   2416 	struct txdmamap_pool_entry *dma;
   2417 	bus_addr_t offset, toff;
   2418 	bus_size_t tlen;
   2419 	int tosync;
   2420 	struct mbuf *m;
   2421 
   2422 	ifp = &sc->ethercom.ec_if;
   2423 
   2424 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   2425 	    offsetof(struct bge_ring_data, bge_status_block),
   2426 	    sizeof (struct bge_status_block),
   2427 	    BUS_DMASYNC_POSTREAD);
   2428 
   2429 	offset = offsetof(struct bge_ring_data, bge_tx_ring);
   2430 	tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
   2431 	    sc->bge_tx_saved_considx;
   2432 
   2433 	toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
   2434 
   2435 	if (tosync < 0) {
   2436 		tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
   2437 		    sizeof (struct bge_tx_bd);
   2438 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   2439 		    toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   2440 		tosync = -tosync;
   2441 	}
   2442 
   2443 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   2444 	    offset, tosync * sizeof (struct bge_tx_bd),
   2445 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   2446 
   2447 	/*
   2448 	 * Go through our tx ring and free mbufs for those
   2449 	 * frames that have been sent.
   2450 	 */
   2451 	while (sc->bge_tx_saved_considx !=
   2452 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
   2453 		u_int32_t		idx = 0;
   2454 
   2455 		idx = sc->bge_tx_saved_considx;
   2456 		cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
   2457 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
   2458 			ifp->if_opackets++;
   2459 		m = sc->bge_cdata.bge_tx_chain[idx];
   2460 		if (m != NULL) {
   2461 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
   2462 			dma = sc->txdma[idx];
   2463 			bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0,
   2464 			    dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   2465 			bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
   2466 			SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
   2467 			sc->txdma[idx] = NULL;
   2468 
   2469 			m_freem(m);
   2470 		}
   2471 		sc->bge_txcnt--;
   2472 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
   2473 		ifp->if_timer = 0;
   2474 	}
   2475 
   2476 	if (cur_tx != NULL)
   2477 		ifp->if_flags &= ~IFF_OACTIVE;
   2478 }
   2479 
   2480 int
   2481 bge_intr(xsc)
   2482 	void *xsc;
   2483 {
   2484 	struct bge_softc *sc;
   2485 	struct ifnet *ifp;
   2486 
   2487 	sc = xsc;
   2488 	ifp = &sc->ethercom.ec_if;
   2489 
   2490 #ifdef notdef
   2491 	/* Avoid this for now -- checking this register is expensive. */
   2492 	/* Make sure this is really our interrupt. */
   2493 	if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
   2494 		return (0);
   2495 #endif
   2496 	/* Ack interrupt and stop others from occuring. */
   2497 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
   2498 
   2499 	/*
   2500 	 * Process link state changes.
   2501 	 * Grrr. The link status word in the status block does
   2502 	 * not work correctly on the BCM5700 rev AX and BX chips,
   2503 	 * according to all avaibable information. Hence, we have
   2504 	 * to enable MII interrupts in order to properly obtain
   2505 	 * async link changes. Unfortunately, this also means that
   2506 	 * we have to read the MAC status register to detect link
   2507 	 * changes, thereby adding an additional register access to
   2508 	 * the interrupt handler.
   2509 	 */
   2510 
   2511 	if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) {
   2512 		u_int32_t		status;
   2513 
   2514 		status = CSR_READ_4(sc, BGE_MAC_STS);
   2515 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
   2516 			sc->bge_link = 0;
   2517 			callout_stop(&sc->bge_timeout);
   2518 			bge_tick(sc);
   2519 			/* Clear the interrupt */
   2520 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
   2521 			    BGE_EVTENB_MI_INTERRUPT);
   2522 			bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR);
   2523 			bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR,
   2524 			    BRGPHY_INTRS);
   2525 		}
   2526 	} else {
   2527 		if (sc->bge_rdata->bge_status_block.bge_status &
   2528 		    BGE_STATFLAG_LINKSTATE_CHANGED) {
   2529 			sc->bge_link = 0;
   2530 			callout_stop(&sc->bge_timeout);
   2531 			bge_tick(sc);
   2532 			/* Clear the interrupt */
   2533 			CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
   2534 			    BGE_MACSTAT_CFG_CHANGED);
   2535 		}
   2536 	}
   2537 
   2538 	if (ifp->if_flags & IFF_RUNNING) {
   2539 		/* Check RX return ring producer/consumer */
   2540 		bge_rxeof(sc);
   2541 
   2542 		/* Check TX ring producer/consumer */
   2543 		bge_txeof(sc);
   2544 	}
   2545 
   2546 	bge_handle_events(sc);
   2547 
   2548 	/* Re-enable interrupts. */
   2549 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
   2550 
   2551 	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
   2552 		bge_start(ifp);
   2553 
   2554 	return (1);
   2555 }
   2556 
   2557 void
   2558 bge_tick(xsc)
   2559 	void *xsc;
   2560 {
   2561 	struct bge_softc *sc = xsc;
   2562 	struct mii_data *mii = &sc->bge_mii;
   2563 	struct ifmedia *ifm = NULL;
   2564 	struct ifnet *ifp = &sc->ethercom.ec_if;
   2565 	int s;
   2566 
   2567 	s = splnet();
   2568 
   2569 	bge_stats_update(sc);
   2570 	callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
   2571 	if (sc->bge_link) {
   2572 		splx(s);
   2573 		return;
   2574 	}
   2575 
   2576 	if (sc->bge_tbi) {
   2577 		ifm = &sc->bge_ifmedia;
   2578 		if (CSR_READ_4(sc, BGE_MAC_STS) &
   2579 		    BGE_MACSTAT_TBI_PCS_SYNCHED) {
   2580 			sc->bge_link++;
   2581 			CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
   2582 			printf("%s: gigabit link up\n", sc->bge_dev.dv_xname);
   2583 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
   2584 				bge_start(ifp);
   2585 		}
   2586 		splx(s);
   2587 		return;
   2588 	}
   2589 
   2590 	mii_tick(mii);
   2591 
   2592 	if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE &&
   2593 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
   2594 		sc->bge_link++;
   2595 		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
   2596 		    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
   2597 			printf("%s: gigabit link up\n", sc->bge_dev.dv_xname);
   2598 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
   2599 			bge_start(ifp);
   2600 	}
   2601 
   2602 	splx(s);
   2603 }
   2604 
   2605 void
   2606 bge_stats_update(sc)
   2607 	struct bge_softc *sc;
   2608 {
   2609 	struct ifnet *ifp = &sc->ethercom.ec_if;
   2610 	bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
   2611 
   2612 #define READ_STAT(sc, stats, stat) \
   2613 	  CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
   2614 
   2615 	ifp->if_collisions +=
   2616 	  (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) +
   2617 	   READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) +
   2618 	   READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) +
   2619 	   READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) -
   2620 	  ifp->if_collisions;
   2621 
   2622 #undef READ_STAT
   2623 
   2624 #ifdef notdef
   2625 	ifp->if_collisions +=
   2626 	   (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
   2627 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
   2628 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
   2629 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
   2630 	   ifp->if_collisions;
   2631 #endif
   2632 }
   2633 
   2634 /*
   2635  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
   2636  * pointers to descriptors.
   2637  */
   2638 int
   2639 bge_encap(sc, m_head, txidx)
   2640 	struct bge_softc *sc;
   2641 	struct mbuf *m_head;
   2642 	u_int32_t *txidx;
   2643 {
   2644 	struct bge_tx_bd	*f = NULL;
   2645 	u_int32_t		frag, cur, cnt = 0;
   2646 	u_int16_t		csum_flags = 0;
   2647 	struct txdmamap_pool_entry *dma;
   2648 	bus_dmamap_t dmamap;
   2649 	int			i = 0;
   2650 	struct m_tag		*mtag;
   2651 	struct mbuf		*prev, *m;
   2652 	int			totlen, prevlen;
   2653 
   2654 	cur = frag = *txidx;
   2655 
   2656 	if (m_head->m_pkthdr.csum_flags) {
   2657 		if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
   2658 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
   2659 		if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4))
   2660 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
   2661 	}
   2662 
   2663 	if (!(sc->bge_quirks & BGE_QUIRK_5700_SMALLDMA))
   2664 		goto doit;
   2665 	/*
   2666 	 * bcm5700 Revision B silicon cannot handle DMA descriptors with
   2667 	 * less than eight bytes.  If we encounter a teeny mbuf
   2668 	 * at the end of a chain, we can pad.  Otherwise, copy.
   2669 	 */
   2670 	prev = NULL;
   2671 	totlen = 0;
   2672 	for (m = m_head; m != NULL; prev = m,m = m->m_next) {
   2673 		int mlen = m->m_len;
   2674 
   2675 		totlen += mlen;
   2676 		if (mlen == 0) {
   2677 			/* print a warning? */
   2678 			continue;
   2679 		}
   2680 		if (mlen >= 8)
   2681 			continue;
   2682 
   2683 		/* If we get here, mbuf data is too small for DMA engine. */
   2684 		if (m->m_next != 0) {
   2685 			  /* Internal frag. If fits in prev, copy it there. */
   2686 			  if (prev && M_TRAILINGSPACE(prev) >= m->m_len &&
   2687 			      !M_READONLY(prev)) {
   2688 			  	bcopy(m->m_data,
   2689 				      prev->m_data+prev->m_len,
   2690 				      mlen);
   2691 				prev->m_len += mlen;
   2692 				m->m_len = 0;
   2693 				MFREE(m, prev->m_next); /* XXX stitch chain */
   2694 				m = prev;
   2695 				continue;
   2696 			  } else {
   2697 				struct mbuf *n;
   2698 				/* slow copy */
   2699 slowcopy:
   2700 			  	n = m_dup(m_head, 0, M_COPYALL, M_DONTWAIT);
   2701 				m_freem(m_head);
   2702 				if (n == 0)
   2703 					return 0;
   2704 				m_head  = n;
   2705 				goto doit;
   2706 			  }
   2707 		} else if ((totlen -mlen +8) >= 1500) {
   2708 			goto slowcopy;
   2709 		}
   2710 		prevlen = m->m_len;
   2711 	}
   2712 
   2713 doit:
   2714 	dma = SLIST_FIRST(&sc->txdma_list);
   2715 	if (dma == NULL)
   2716 		return ENOBUFS;
   2717 	dmamap = dma->dmamap;
   2718 
   2719 	/*
   2720 	 * Start packing the mbufs in this chain into
   2721 	 * the fragment pointers. Stop when we run out
   2722 	 * of fragments or hit the end of the mbuf chain.
   2723 	 */
   2724 	if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
   2725 	    BUS_DMA_NOWAIT))
   2726 		return(ENOBUFS);
   2727 
   2728 	mtag = sc->ethercom.ec_nvlans ?
   2729 	    m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL;
   2730 
   2731 	for (i = 0; i < dmamap->dm_nsegs; i++) {
   2732 		f = &sc->bge_rdata->bge_tx_ring[frag];
   2733 		if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
   2734 			break;
   2735 		bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr);
   2736 		f->bge_len = dmamap->dm_segs[i].ds_len;
   2737 		f->bge_flags = csum_flags;
   2738 
   2739 		if (mtag != NULL) {
   2740 			f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
   2741 			f->bge_vlan_tag = *(u_int *)(mtag + 1);
   2742 		} else {
   2743 			f->bge_vlan_tag = 0;
   2744 		}
   2745 		/*
   2746 		 * Sanity check: avoid coming within 16 descriptors
   2747 		 * of the end of the ring.
   2748 		 */
   2749 		if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16)
   2750 			return(ENOBUFS);
   2751 		cur = frag;
   2752 		BGE_INC(frag, BGE_TX_RING_CNT);
   2753 		cnt++;
   2754 	}
   2755 
   2756 	if (i < dmamap->dm_nsegs)
   2757 		return ENOBUFS;
   2758 
   2759 	bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
   2760 	    BUS_DMASYNC_PREWRITE);
   2761 
   2762 	if (frag == sc->bge_tx_saved_considx)
   2763 		return(ENOBUFS);
   2764 
   2765 	sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
   2766 	sc->bge_cdata.bge_tx_chain[cur] = m_head;
   2767 	SLIST_REMOVE_HEAD(&sc->txdma_list, link);
   2768 	sc->txdma[cur] = dma;
   2769 	sc->bge_txcnt += cnt;
   2770 
   2771 	*txidx = frag;
   2772 
   2773 	return(0);
   2774 }
   2775 
   2776 /*
   2777  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
   2778  * to the mbuf data regions directly in the transmit descriptors.
   2779  */
   2780 void
   2781 bge_start(ifp)
   2782 	struct ifnet *ifp;
   2783 {
   2784 	struct bge_softc *sc;
   2785 	struct mbuf *m_head = NULL;
   2786 	u_int32_t prodidx = 0;
   2787 	int pkts = 0;
   2788 
   2789 	sc = ifp->if_softc;
   2790 
   2791 	if (!sc->bge_link && ifp->if_snd.ifq_len < 10)
   2792 		return;
   2793 
   2794 	prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
   2795 
   2796 	while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
   2797 		IFQ_POLL(&ifp->if_snd, m_head);
   2798 		if (m_head == NULL)
   2799 			break;
   2800 
   2801 #if 0
   2802 		/*
   2803 		 * XXX
   2804 		 * safety overkill.  If this is a fragmented packet chain
   2805 		 * with delayed TCP/UDP checksums, then only encapsulate
   2806 		 * it if we have enough descriptors to handle the entire
   2807 		 * chain at once.
   2808 		 * (paranoia -- may not actually be needed)
   2809 		 */
   2810 		if (m_head->m_flags & M_FIRSTFRAG &&
   2811 		    m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
   2812 			if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
   2813 			    m_head->m_pkthdr.csum_data + 16) {
   2814 				ifp->if_flags |= IFF_OACTIVE;
   2815 				break;
   2816 			}
   2817 		}
   2818 #endif
   2819 
   2820 		/*
   2821 		 * Pack the data into the transmit ring. If we
   2822 		 * don't have room, set the OACTIVE flag and wait
   2823 		 * for the NIC to drain the ring.
   2824 		 */
   2825 		if (bge_encap(sc, m_head, &prodidx)) {
   2826 			ifp->if_flags |= IFF_OACTIVE;
   2827 			break;
   2828 		}
   2829 
   2830 		/* now we are committed to transmit the packet */
   2831 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
   2832 		pkts++;
   2833 
   2834 #if NBPFILTER > 0
   2835 		/*
   2836 		 * If there's a BPF listener, bounce a copy of this frame
   2837 		 * to him.
   2838 		 */
   2839 		if (ifp->if_bpf)
   2840 			bpf_mtap(ifp->if_bpf, m_head);
   2841 #endif
   2842 	}
   2843 	if (pkts == 0)
   2844 		return;
   2845 
   2846 	/* Transmit */
   2847 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
   2848 	if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG)	/* 5700 b2 errata */
   2849 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
   2850 
   2851 	/*
   2852 	 * Set a timeout in case the chip goes out to lunch.
   2853 	 */
   2854 	ifp->if_timer = 5;
   2855 }
   2856 
   2857 int
   2858 bge_init(ifp)
   2859 	struct ifnet *ifp;
   2860 {
   2861 	struct bge_softc *sc = ifp->if_softc;
   2862 	u_int16_t *m;
   2863 	int s, error;
   2864 
   2865 	s = splnet();
   2866 
   2867 	ifp = &sc->ethercom.ec_if;
   2868 
   2869 	/* Cancel pending I/O and flush buffers. */
   2870 	bge_stop(sc);
   2871 	bge_reset(sc);
   2872 	bge_chipinit(sc);
   2873 
   2874 	/*
   2875 	 * Init the various state machines, ring
   2876 	 * control blocks and firmware.
   2877 	 */
   2878 	error = bge_blockinit(sc);
   2879 	if (error != 0) {
   2880 		printf("%s: initialization error %d\n", sc->bge_dev.dv_xname,
   2881 		    error);
   2882 		splx(s);
   2883 		return error;
   2884 	}
   2885 
   2886 	ifp = &sc->ethercom.ec_if;
   2887 
   2888 	/* Specify MTU. */
   2889 	CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
   2890 	    ETHER_HDR_LEN + ETHER_CRC_LEN);
   2891 
   2892 	/* Load our MAC address. */
   2893 	m = (u_int16_t *)&(LLADDR(ifp->if_sadl)[0]);
   2894 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
   2895 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
   2896 
   2897 	/* Enable or disable promiscuous mode as needed. */
   2898 	if (ifp->if_flags & IFF_PROMISC) {
   2899 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
   2900 	} else {
   2901 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
   2902 	}
   2903 
   2904 	/* Program multicast filter. */
   2905 	bge_setmulti(sc);
   2906 
   2907 	/* Init RX ring. */
   2908 	bge_init_rx_ring_std(sc);
   2909 
   2910 	/* Init jumbo RX ring. */
   2911 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
   2912 		bge_init_rx_ring_jumbo(sc);
   2913 
   2914 	/* Init our RX return ring index */
   2915 	sc->bge_rx_saved_considx = 0;
   2916 
   2917 	/* Init TX ring. */
   2918 	bge_init_tx_ring(sc);
   2919 
   2920 	/* Turn on transmitter */
   2921 	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
   2922 
   2923 	/* Turn on receiver */
   2924 	BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
   2925 
   2926 	/* Tell firmware we're alive. */
   2927 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
   2928 
   2929 	/* Enable host interrupts. */
   2930 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
   2931 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
   2932 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
   2933 
   2934 	bge_ifmedia_upd(ifp);
   2935 
   2936 	ifp->if_flags |= IFF_RUNNING;
   2937 	ifp->if_flags &= ~IFF_OACTIVE;
   2938 
   2939 	splx(s);
   2940 
   2941 	callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
   2942 
   2943 	return 0;
   2944 }
   2945 
   2946 /*
   2947  * Set media options.
   2948  */
   2949 int
   2950 bge_ifmedia_upd(ifp)
   2951 	struct ifnet *ifp;
   2952 {
   2953 	struct bge_softc *sc = ifp->if_softc;
   2954 	struct mii_data *mii = &sc->bge_mii;
   2955 	struct ifmedia *ifm = &sc->bge_ifmedia;
   2956 
   2957 	/* If this is a 1000baseX NIC, enable the TBI port. */
   2958 	if (sc->bge_tbi) {
   2959 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   2960 			return(EINVAL);
   2961 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
   2962 		case IFM_AUTO:
   2963 			break;
   2964 		case IFM_1000_SX:
   2965 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
   2966 				BGE_CLRBIT(sc, BGE_MAC_MODE,
   2967 				    BGE_MACMODE_HALF_DUPLEX);
   2968 			} else {
   2969 				BGE_SETBIT(sc, BGE_MAC_MODE,
   2970 				    BGE_MACMODE_HALF_DUPLEX);
   2971 			}
   2972 			break;
   2973 		default:
   2974 			return(EINVAL);
   2975 		}
   2976 		return(0);
   2977 	}
   2978 
   2979 	sc->bge_link = 0;
   2980 	mii_mediachg(mii);
   2981 
   2982 	return(0);
   2983 }
   2984 
   2985 /*
   2986  * Report current media status.
   2987  */
   2988 void
   2989 bge_ifmedia_sts(ifp, ifmr)
   2990 	struct ifnet *ifp;
   2991 	struct ifmediareq *ifmr;
   2992 {
   2993 	struct bge_softc *sc = ifp->if_softc;
   2994 	struct mii_data *mii = &sc->bge_mii;
   2995 
   2996 	if (sc->bge_tbi) {
   2997 		ifmr->ifm_status = IFM_AVALID;
   2998 		ifmr->ifm_active = IFM_ETHER;
   2999 		if (CSR_READ_4(sc, BGE_MAC_STS) &
   3000 		    BGE_MACSTAT_TBI_PCS_SYNCHED)
   3001 			ifmr->ifm_status |= IFM_ACTIVE;
   3002 		ifmr->ifm_active |= IFM_1000_SX;
   3003 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
   3004 			ifmr->ifm_active |= IFM_HDX;
   3005 		else
   3006 			ifmr->ifm_active |= IFM_FDX;
   3007 		return;
   3008 	}
   3009 
   3010 	mii_pollstat(mii);
   3011 	ifmr->ifm_active = mii->mii_media_active;
   3012 	ifmr->ifm_status = mii->mii_media_status;
   3013 }
   3014 
   3015 int
   3016 bge_ioctl(ifp, command, data)
   3017 	struct ifnet *ifp;
   3018 	u_long command;
   3019 	caddr_t data;
   3020 {
   3021 	struct bge_softc *sc = ifp->if_softc;
   3022 	struct ifreq *ifr = (struct ifreq *) data;
   3023 	int s, error = 0;
   3024 	struct mii_data *mii;
   3025 
   3026 	s = splnet();
   3027 
   3028 	switch(command) {
   3029 	case SIOCSIFFLAGS:
   3030 		if (ifp->if_flags & IFF_UP) {
   3031 			/*
   3032 			 * If only the state of the PROMISC flag changed,
   3033 			 * then just use the 'set promisc mode' command
   3034 			 * instead of reinitializing the entire NIC. Doing
   3035 			 * a full re-init means reloading the firmware and
   3036 			 * waiting for it to start up, which may take a
   3037 			 * second or two.
   3038 			 */
   3039 			if (ifp->if_flags & IFF_RUNNING &&
   3040 			    ifp->if_flags & IFF_PROMISC &&
   3041 			    !(sc->bge_if_flags & IFF_PROMISC)) {
   3042 				BGE_SETBIT(sc, BGE_RX_MODE,
   3043 				    BGE_RXMODE_RX_PROMISC);
   3044 			} else if (ifp->if_flags & IFF_RUNNING &&
   3045 			    !(ifp->if_flags & IFF_PROMISC) &&
   3046 			    sc->bge_if_flags & IFF_PROMISC) {
   3047 				BGE_CLRBIT(sc, BGE_RX_MODE,
   3048 				    BGE_RXMODE_RX_PROMISC);
   3049 			} else
   3050 				bge_init(ifp);
   3051 		} else {
   3052 			if (ifp->if_flags & IFF_RUNNING) {
   3053 				bge_stop(sc);
   3054 			}
   3055 		}
   3056 		sc->bge_if_flags = ifp->if_flags;
   3057 		error = 0;
   3058 		break;
   3059 	case SIOCSIFMEDIA:
   3060 	case SIOCGIFMEDIA:
   3061 		if (sc->bge_tbi) {
   3062 			error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
   3063 			    command);
   3064 		} else {
   3065 			mii = &sc->bge_mii;
   3066 			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
   3067 			    command);
   3068 		}
   3069 		error = 0;
   3070 		break;
   3071 	default:
   3072 		error = ether_ioctl(ifp, command, data);
   3073 		if (error == ENETRESET) {
   3074 			bge_setmulti(sc);
   3075 			error = 0;
   3076 		}
   3077 		break;
   3078 	}
   3079 
   3080 	splx(s);
   3081 
   3082 	return(error);
   3083 }
   3084 
   3085 void
   3086 bge_watchdog(ifp)
   3087 	struct ifnet *ifp;
   3088 {
   3089 	struct bge_softc *sc;
   3090 
   3091 	sc = ifp->if_softc;
   3092 
   3093 	printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname);
   3094 
   3095 	ifp->if_flags &= ~IFF_RUNNING;
   3096 	bge_init(ifp);
   3097 
   3098 	ifp->if_oerrors++;
   3099 }
   3100 
   3101 static void
   3102 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit)
   3103 {
   3104 	int i;
   3105 
   3106 	BGE_CLRBIT(sc, reg, bit);
   3107 
   3108 	for (i = 0; i < BGE_TIMEOUT; i++) {
   3109 		if ((CSR_READ_4(sc, reg) & bit) == 0)
   3110 			return;
   3111 		delay(100);
   3112 	}
   3113 
   3114 	printf("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n",
   3115 	    sc->bge_dev.dv_xname, (u_long) reg, bit);
   3116 }
   3117 
   3118 /*
   3119  * Stop the adapter and free any mbufs allocated to the
   3120  * RX and TX lists.
   3121  */
   3122 void
   3123 bge_stop(sc)
   3124 	struct bge_softc *sc;
   3125 {
   3126 	struct ifnet *ifp = &sc->ethercom.ec_if;
   3127 
   3128 	callout_stop(&sc->bge_timeout);
   3129 
   3130 	/*
   3131 	 * Disable all of the receiver blocks
   3132 	 */
   3133 	bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
   3134 	bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
   3135 	bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
   3136 	bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
   3137 	bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
   3138 	bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
   3139 	bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
   3140 
   3141 	/*
   3142 	 * Disable all of the transmit blocks
   3143 	 */
   3144 	bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
   3145 	bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
   3146 	bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
   3147 	bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
   3148 	bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
   3149 	bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
   3150 	bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
   3151 
   3152 	/*
   3153 	 * Shut down all of the memory managers and related
   3154 	 * state machines.
   3155 	 */
   3156 	bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
   3157 	bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
   3158 	bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
   3159 
   3160 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
   3161 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
   3162 
   3163 	bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
   3164 	bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
   3165 
   3166 	/* Disable host interrupts. */
   3167 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
   3168 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
   3169 
   3170 	/*
   3171 	 * Tell firmware we're shutting down.
   3172 	 */
   3173 	BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
   3174 
   3175 	/* Free the RX lists. */
   3176 	bge_free_rx_ring_std(sc);
   3177 
   3178 	/* Free jumbo RX list. */
   3179 	bge_free_rx_ring_jumbo(sc);
   3180 
   3181 	/* Free TX buffers. */
   3182 	bge_free_tx_ring(sc);
   3183 
   3184 	/*
   3185 	 * Isolate/power down the PHY.
   3186 	 */
   3187 	if (!sc->bge_tbi)
   3188 		mii_down(&sc->bge_mii);
   3189 
   3190 	sc->bge_link = 0;
   3191 
   3192 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
   3193 
   3194 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   3195 }
   3196 
   3197 /*
   3198  * Stop all chip I/O so that the kernel's probe routines don't
   3199  * get confused by errant DMAs when rebooting.
   3200  */
   3201 void
   3202 bge_shutdown(xsc)
   3203 	void *xsc;
   3204 {
   3205 	struct bge_softc *sc = (struct bge_softc *)xsc;
   3206 
   3207 	bge_stop(sc);
   3208 	bge_reset(sc);
   3209 }
   3210