Home | History | Annotate | Line # | Download | only in pci
if_bge.c revision 1.275
      1 /*	$NetBSD: if_bge.c,v 1.275 2014/07/24 13:22:49 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001 Wind River Systems
      5  * Copyright (c) 1997, 1998, 1999, 2001
      6  *	Bill Paul <wpaul (at) windriver.com>.  All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *	This product includes software developed by Bill Paul.
     19  * 4. Neither the name of the author nor the names of any co-contributors
     20  *    may be used to endorse or promote products derived from this software
     21  *    without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
     24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     26  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
     27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
     33  * THE POSSIBILITY OF SUCH DAMAGE.
     34  *
     35  * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $
     36  */
     37 
     38 /*
     39  * Broadcom BCM570x family gigabit ethernet driver for NetBSD.
     40  *
     41  * NetBSD version by:
     42  *
     43  *	Frank van der Linden <fvdl (at) wasabisystems.com>
     44  *	Jason Thorpe <thorpej (at) wasabisystems.com>
     45  *	Jonathan Stone <jonathan (at) dsg.stanford.edu>
     46  *
     47  * Originally written for FreeBSD by Bill Paul <wpaul (at) windriver.com>
     48  * Senior Engineer, Wind River Systems
     49  */
     50 
     51 /*
     52  * The Broadcom BCM5700 is based on technology originally developed by
     53  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
     54  * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
     55  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
     56  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
     57  * frames, highly configurable RX filtering, and 16 RX and TX queues
     58  * (which, along with RX filter rules, can be used for QOS applications).
     59  * Other features, such as TCP segmentation, may be available as part
     60  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
     61  * firmware images can be stored in hardware and need not be compiled
     62  * into the driver.
     63  *
     64  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
     65  * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
     66  *
     67  * The BCM5701 is a single-chip solution incorporating both the BCM5700
     68  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
     69  * does not support external SSRAM.
     70  *
     71  * Broadcom also produces a variation of the BCM5700 under the "Altima"
     72  * brand name, which is functionally similar but lacks PCI-X support.
     73  *
     74  * Without external SSRAM, you can only have at most 4 TX rings,
     75  * and the use of the mini RX ring is disabled. This seems to imply
     76  * that these features are simply not available on the BCM5701. As a
     77  * result, this driver does not implement any support for the mini RX
     78  * ring.
     79  */
     80 
     81 #include <sys/cdefs.h>
     82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.275 2014/07/24 13:22:49 msaitoh Exp $");
     83 
     84 #include <sys/param.h>
     85 #include <sys/systm.h>
     86 #include <sys/callout.h>
     87 #include <sys/sockio.h>
     88 #include <sys/mbuf.h>
     89 #include <sys/malloc.h>
     90 #include <sys/kernel.h>
     91 #include <sys/device.h>
     92 #include <sys/socket.h>
     93 #include <sys/sysctl.h>
     94 
     95 #include <net/if.h>
     96 #include <net/if_dl.h>
     97 #include <net/if_media.h>
     98 #include <net/if_ether.h>
     99 
    100 #include <sys/rnd.h>
    101 
    102 #ifdef INET
    103 #include <netinet/in.h>
    104 #include <netinet/in_systm.h>
    105 #include <netinet/in_var.h>
    106 #include <netinet/ip.h>
    107 #endif
    108 
    109 /* Headers for TCP Segmentation Offload (TSO) */
    110 #include <netinet/in_systm.h>		/* n_time for <netinet/ip.h>... */
    111 #include <netinet/in.h>			/* ip_{src,dst}, for <netinet/ip.h> */
    112 #include <netinet/ip.h>			/* for struct ip */
    113 #include <netinet/tcp.h>		/* for struct tcphdr */
    114 
    115 
    116 #include <net/bpf.h>
    117 
    118 #include <dev/pci/pcireg.h>
    119 #include <dev/pci/pcivar.h>
    120 #include <dev/pci/pcidevs.h>
    121 
    122 #include <dev/mii/mii.h>
    123 #include <dev/mii/miivar.h>
    124 #include <dev/mii/miidevs.h>
    125 #include <dev/mii/brgphyreg.h>
    126 
    127 #include <dev/pci/if_bgereg.h>
    128 #include <dev/pci/if_bgevar.h>
    129 
    130 #include <prop/proplib.h>
    131 
    132 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
    133 
    134 
    135 /*
    136  * Tunable thresholds for rx-side bge interrupt mitigation.
    137  */
    138 
    139 /*
    140  * The pairs of values below were obtained from empirical measurement
    141  * on bcm5700 rev B2; they ar designed to give roughly 1 receive
    142  * interrupt for every N packets received, where N is, approximately,
    143  * the second value (rx_max_bds) in each pair.  The values are chosen
    144  * such that moving from one pair to the succeeding pair was observed
    145  * to roughly halve interrupt rate under sustained input packet load.
    146  * The values were empirically chosen to avoid overflowing internal
    147  * limits on the  bcm5700: increasing rx_ticks much beyond 600
    148  * results in internal wrapping and higher interrupt rates.
    149  * The limit of 46 frames was chosen to match NFS workloads.
    150  *
    151  * These values also work well on bcm5701, bcm5704C, and (less
    152  * tested) bcm5703.  On other chipsets, (including the Altima chip
    153  * family), the larger values may overflow internal chip limits,
    154  * leading to increasing interrupt rates rather than lower interrupt
    155  * rates.
    156  *
    157  * Applications using heavy interrupt mitigation (interrupting every
    158  * 32 or 46 frames) in both directions may need to increase the TCP
    159  * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain
    160  * full link bandwidth, due to ACKs and window updates lingering
    161  * in the RX queue during the 30-to-40-frame interrupt-mitigation window.
    162  */
    163 static const struct bge_load_rx_thresh {
    164 	int rx_ticks;
    165 	int rx_max_bds; }
    166 bge_rx_threshes[] = {
    167 	{ 16,   1 },	/* rx_max_bds = 1 disables interrupt mitigation */
    168 	{ 32,   2 },
    169 	{ 50,   4 },
    170 	{ 100,  8 },
    171 	{ 192, 16 },
    172 	{ 416, 32 },
    173 	{ 598, 46 }
    174 };
    175 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0]))
    176 
    177 /* XXX patchable; should be sysctl'able */
    178 static int bge_auto_thresh = 1;
    179 static int bge_rx_thresh_lvl;
    180 
    181 static int bge_rxthresh_nodenum;
    182 
    183 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
    184 
    185 static uint32_t bge_chipid(const struct pci_attach_args *);
    186 static int bge_probe(device_t, cfdata_t, void *);
    187 static void bge_attach(device_t, device_t, void *);
    188 static int bge_detach(device_t, int);
    189 static void bge_release_resources(struct bge_softc *);
    190 
    191 static int bge_get_eaddr_fw(struct bge_softc *, uint8_t[]);
    192 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
    193 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
    194 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
    195 static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
    196 
    197 static void bge_txeof(struct bge_softc *);
    198 static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
    199 static void bge_rxeof(struct bge_softc *);
    200 
    201 static void bge_asf_driver_up (struct bge_softc *);
    202 static void bge_tick(void *);
    203 static void bge_stats_update(struct bge_softc *);
    204 static void bge_stats_update_regs(struct bge_softc *);
    205 static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *);
    206 
    207 static int bge_intr(void *);
    208 static void bge_start(struct ifnet *);
    209 static int bge_ifflags_cb(struct ethercom *);
    210 static int bge_ioctl(struct ifnet *, u_long, void *);
    211 static int bge_init(struct ifnet *);
    212 static void bge_stop(struct ifnet *, int);
    213 static void bge_watchdog(struct ifnet *);
    214 static int bge_ifmedia_upd(struct ifnet *);
    215 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
    216 
    217 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
    218 static int bge_read_nvram(struct bge_softc *, uint8_t *, int, int);
    219 
    220 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
    221 static int bge_read_eeprom(struct bge_softc *, void *, int, int);
    222 static void bge_setmulti(struct bge_softc *);
    223 
    224 static void bge_handle_events(struct bge_softc *);
    225 static int bge_alloc_jumbo_mem(struct bge_softc *);
    226 #if 0 /* XXX */
    227 static void bge_free_jumbo_mem(struct bge_softc *);
    228 #endif
    229 static void *bge_jalloc(struct bge_softc *);
    230 static void bge_jfree(struct mbuf *, void *, size_t, void *);
    231 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *,
    232 			       bus_dmamap_t);
    233 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
    234 static int bge_init_rx_ring_std(struct bge_softc *);
    235 static void bge_free_rx_ring_std(struct bge_softc *);
    236 static int bge_init_rx_ring_jumbo(struct bge_softc *);
    237 static void bge_free_rx_ring_jumbo(struct bge_softc *);
    238 static void bge_free_tx_ring(struct bge_softc *);
    239 static int bge_init_tx_ring(struct bge_softc *);
    240 
    241 static int bge_chipinit(struct bge_softc *);
    242 static int bge_blockinit(struct bge_softc *);
    243 static int bge_phy_addr(struct bge_softc *);
    244 static uint32_t bge_readmem_ind(struct bge_softc *, int);
    245 static void bge_writemem_ind(struct bge_softc *, int, int);
    246 static void bge_writembx(struct bge_softc *, int, int);
    247 static void bge_writembx_flush(struct bge_softc *, int, int);
    248 static void bge_writemem_direct(struct bge_softc *, int, int);
    249 static void bge_writereg_ind(struct bge_softc *, int, int);
    250 static void bge_set_max_readrq(struct bge_softc *);
    251 
    252 static int bge_miibus_readreg(device_t, int, int);
    253 static void bge_miibus_writereg(device_t, int, int, int);
    254 static void bge_miibus_statchg(struct ifnet *);
    255 
    256 #define BGE_RESET_SHUTDOWN	0
    257 #define	BGE_RESET_START		1
    258 #define	BGE_RESET_SUSPEND	2
    259 static void bge_sig_post_reset(struct bge_softc *, int);
    260 static void bge_sig_legacy(struct bge_softc *, int);
    261 static void bge_sig_pre_reset(struct bge_softc *, int);
    262 static void bge_wait_for_event_ack(struct bge_softc *);
    263 static void bge_stop_fw(struct bge_softc *);
    264 static int bge_reset(struct bge_softc *);
    265 static void bge_link_upd(struct bge_softc *);
    266 static void bge_sysctl_init(struct bge_softc *);
    267 static int bge_sysctl_verify(SYSCTLFN_PROTO);
    268 
    269 static void bge_ape_lock_init(struct bge_softc *);
    270 static void bge_ape_read_fw_ver(struct bge_softc *);
    271 static int bge_ape_lock(struct bge_softc *, int);
    272 static void bge_ape_unlock(struct bge_softc *, int);
    273 static void bge_ape_send_event(struct bge_softc *, uint32_t);
    274 static void bge_ape_driver_state_change(struct bge_softc *, int);
    275 
    276 #ifdef BGE_DEBUG
    277 #define DPRINTF(x)	if (bgedebug) printf x
    278 #define DPRINTFN(n,x)	if (bgedebug >= (n)) printf x
    279 #define BGE_TSO_PRINTF(x)  do { if (bge_tso_debug) printf x ;} while (0)
    280 int	bgedebug = 0;
    281 int	bge_tso_debug = 0;
    282 void		bge_debug_info(struct bge_softc *);
    283 #else
    284 #define DPRINTF(x)
    285 #define DPRINTFN(n,x)
    286 #define BGE_TSO_PRINTF(x)
    287 #endif
    288 
    289 #ifdef BGE_EVENT_COUNTERS
    290 #define	BGE_EVCNT_INCR(ev)	(ev).ev_count++
    291 #define	BGE_EVCNT_ADD(ev, val)	(ev).ev_count += (val)
    292 #define	BGE_EVCNT_UPD(ev, val)	(ev).ev_count = (val)
    293 #else
    294 #define	BGE_EVCNT_INCR(ev)	/* nothing */
    295 #define	BGE_EVCNT_ADD(ev, val)	/* nothing */
    296 #define	BGE_EVCNT_UPD(ev, val)	/* nothing */
    297 #endif
    298 
    299 static const struct bge_product {
    300 	pci_vendor_id_t		bp_vendor;
    301 	pci_product_id_t	bp_product;
    302 	const char		*bp_name;
    303 } bge_products[] = {
    304 	/*
    305 	 * The BCM5700 documentation seems to indicate that the hardware
    306 	 * still has the Alteon vendor ID burned into it, though it
    307 	 * should always be overridden by the value in the EEPROM.  We'll
    308 	 * check for it anyway.
    309 	 */
    310 	{ PCI_VENDOR_ALTEON,
    311 	  PCI_PRODUCT_ALTEON_BCM5700,
    312 	  "Broadcom BCM5700 Gigabit Ethernet",
    313 	  },
    314 	{ PCI_VENDOR_ALTEON,
    315 	  PCI_PRODUCT_ALTEON_BCM5701,
    316 	  "Broadcom BCM5701 Gigabit Ethernet",
    317 	  },
    318 	{ PCI_VENDOR_ALTIMA,
    319 	  PCI_PRODUCT_ALTIMA_AC1000,
    320 	  "Altima AC1000 Gigabit Ethernet",
    321 	  },
    322 	{ PCI_VENDOR_ALTIMA,
    323 	  PCI_PRODUCT_ALTIMA_AC1001,
    324 	  "Altima AC1001 Gigabit Ethernet",
    325 	   },
    326 	{ PCI_VENDOR_ALTIMA,
    327 	  PCI_PRODUCT_ALTIMA_AC1003,
    328 	  "Altima AC1003 Gigabit Ethernet",
    329 	   },
    330 	{ PCI_VENDOR_ALTIMA,
    331 	  PCI_PRODUCT_ALTIMA_AC9100,
    332 	  "Altima AC9100 Gigabit Ethernet",
    333 	  },
    334 	{ PCI_VENDOR_APPLE,
    335 	  PCI_PRODUCT_APPLE_BCM5701,
    336 	  "APPLE BCM5701 Gigabit Ethernet",
    337 	  },
    338 	{ PCI_VENDOR_BROADCOM,
    339 	  PCI_PRODUCT_BROADCOM_BCM5700,
    340 	  "Broadcom BCM5700 Gigabit Ethernet",
    341 	  },
    342 	{ PCI_VENDOR_BROADCOM,
    343 	  PCI_PRODUCT_BROADCOM_BCM5701,
    344 	  "Broadcom BCM5701 Gigabit Ethernet",
    345 	  },
    346 	{ PCI_VENDOR_BROADCOM,
    347 	  PCI_PRODUCT_BROADCOM_BCM5702,
    348 	  "Broadcom BCM5702 Gigabit Ethernet",
    349 	  },
    350 	{ PCI_VENDOR_BROADCOM,
    351 	  PCI_PRODUCT_BROADCOM_BCM5702X,
    352 	  "Broadcom BCM5702X Gigabit Ethernet" },
    353 	{ PCI_VENDOR_BROADCOM,
    354 	  PCI_PRODUCT_BROADCOM_BCM5703,
    355 	  "Broadcom BCM5703 Gigabit Ethernet",
    356 	  },
    357 	{ PCI_VENDOR_BROADCOM,
    358 	  PCI_PRODUCT_BROADCOM_BCM5703X,
    359 	  "Broadcom BCM5703X Gigabit Ethernet",
    360 	  },
    361 	{ PCI_VENDOR_BROADCOM,
    362 	  PCI_PRODUCT_BROADCOM_BCM5703_ALT,
    363 	  "Broadcom BCM5703 Gigabit Ethernet",
    364 	  },
    365 	{ PCI_VENDOR_BROADCOM,
    366 	  PCI_PRODUCT_BROADCOM_BCM5704C,
    367 	  "Broadcom BCM5704C Dual Gigabit Ethernet",
    368 	  },
    369 	{ PCI_VENDOR_BROADCOM,
    370 	  PCI_PRODUCT_BROADCOM_BCM5704S,
    371 	  "Broadcom BCM5704S Dual Gigabit Ethernet",
    372 	  },
    373 	{ PCI_VENDOR_BROADCOM,
    374 	  PCI_PRODUCT_BROADCOM_BCM5705,
    375 	  "Broadcom BCM5705 Gigabit Ethernet",
    376 	  },
    377 	{ PCI_VENDOR_BROADCOM,
    378 	  PCI_PRODUCT_BROADCOM_BCM5705F,
    379 	  "Broadcom BCM5705F Gigabit Ethernet",
    380 	  },
    381 	{ PCI_VENDOR_BROADCOM,
    382 	  PCI_PRODUCT_BROADCOM_BCM5705K,
    383 	  "Broadcom BCM5705K Gigabit Ethernet",
    384 	  },
    385 	{ PCI_VENDOR_BROADCOM,
    386 	  PCI_PRODUCT_BROADCOM_BCM5705M,
    387 	  "Broadcom BCM5705M Gigabit Ethernet",
    388 	  },
    389 	{ PCI_VENDOR_BROADCOM,
    390 	  PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
    391 	  "Broadcom BCM5705M Gigabit Ethernet",
    392 	  },
    393 	{ PCI_VENDOR_BROADCOM,
    394 	  PCI_PRODUCT_BROADCOM_BCM5714,
    395 	  "Broadcom BCM5714 Gigabit Ethernet",
    396 	  },
    397 	{ PCI_VENDOR_BROADCOM,
    398 	  PCI_PRODUCT_BROADCOM_BCM5714S,
    399 	  "Broadcom BCM5714S Gigabit Ethernet",
    400 	  },
    401 	{ PCI_VENDOR_BROADCOM,
    402 	  PCI_PRODUCT_BROADCOM_BCM5715,
    403 	  "Broadcom BCM5715 Gigabit Ethernet",
    404 	  },
    405 	{ PCI_VENDOR_BROADCOM,
    406 	  PCI_PRODUCT_BROADCOM_BCM5715S,
    407 	  "Broadcom BCM5715S Gigabit Ethernet",
    408 	  },
    409 	{ PCI_VENDOR_BROADCOM,
    410 	  PCI_PRODUCT_BROADCOM_BCM5717,
    411 	  "Broadcom BCM5717 Gigabit Ethernet",
    412 	  },
    413 	{ PCI_VENDOR_BROADCOM,
    414 	  PCI_PRODUCT_BROADCOM_BCM5718,
    415 	  "Broadcom BCM5718 Gigabit Ethernet",
    416 	  },
    417 	{ PCI_VENDOR_BROADCOM,
    418 	  PCI_PRODUCT_BROADCOM_BCM5719,
    419 	  "Broadcom BCM5719 Gigabit Ethernet",
    420 	  },
    421 	{ PCI_VENDOR_BROADCOM,
    422 	  PCI_PRODUCT_BROADCOM_BCM5720,
    423 	  "Broadcom BCM5720 Gigabit Ethernet",
    424 	  },
    425 	{ PCI_VENDOR_BROADCOM,
    426 	  PCI_PRODUCT_BROADCOM_BCM5721,
    427 	  "Broadcom BCM5721 Gigabit Ethernet",
    428 	  },
    429 	{ PCI_VENDOR_BROADCOM,
    430 	  PCI_PRODUCT_BROADCOM_BCM5722,
    431 	  "Broadcom BCM5722 Gigabit Ethernet",
    432 	  },
    433 	{ PCI_VENDOR_BROADCOM,
    434 	  PCI_PRODUCT_BROADCOM_BCM5723,
    435 	  "Broadcom BCM5723 Gigabit Ethernet",
    436 	  },
    437 	{ PCI_VENDOR_BROADCOM,
    438 	  PCI_PRODUCT_BROADCOM_BCM5724,
    439 	  "Broadcom BCM5724 Gigabit Ethernet",
    440 	  },
    441 	{ PCI_VENDOR_BROADCOM,
    442 	  PCI_PRODUCT_BROADCOM_BCM5750,
    443 	  "Broadcom BCM5750 Gigabit Ethernet",
    444 	  },
    445 	{ PCI_VENDOR_BROADCOM,
    446 	  PCI_PRODUCT_BROADCOM_BCM5750M,
    447 	  "Broadcom BCM5750M Gigabit Ethernet",
    448 	  },
    449 	{ PCI_VENDOR_BROADCOM,
    450 	  PCI_PRODUCT_BROADCOM_BCM5751,
    451 	  "Broadcom BCM5751 Gigabit Ethernet",
    452 	  },
    453 	{ PCI_VENDOR_BROADCOM,
    454 	  PCI_PRODUCT_BROADCOM_BCM5751F,
    455 	  "Broadcom BCM5751F Gigabit Ethernet",
    456 	  },
    457 	{ PCI_VENDOR_BROADCOM,
    458 	  PCI_PRODUCT_BROADCOM_BCM5751M,
    459 	  "Broadcom BCM5751M Gigabit Ethernet",
    460 	  },
    461 	{ PCI_VENDOR_BROADCOM,
    462 	  PCI_PRODUCT_BROADCOM_BCM5752,
    463 	  "Broadcom BCM5752 Gigabit Ethernet",
    464 	  },
    465 	{ PCI_VENDOR_BROADCOM,
    466 	  PCI_PRODUCT_BROADCOM_BCM5752M,
    467 	  "Broadcom BCM5752M Gigabit Ethernet",
    468 	  },
    469 	{ PCI_VENDOR_BROADCOM,
    470 	  PCI_PRODUCT_BROADCOM_BCM5753,
    471 	  "Broadcom BCM5753 Gigabit Ethernet",
    472 	  },
    473 	{ PCI_VENDOR_BROADCOM,
    474 	  PCI_PRODUCT_BROADCOM_BCM5753F,
    475 	  "Broadcom BCM5753F Gigabit Ethernet",
    476 	  },
    477 	{ PCI_VENDOR_BROADCOM,
    478 	  PCI_PRODUCT_BROADCOM_BCM5753M,
    479 	  "Broadcom BCM5753M Gigabit Ethernet",
    480 	  },
    481 	{ PCI_VENDOR_BROADCOM,
    482 	  PCI_PRODUCT_BROADCOM_BCM5754,
    483 	  "Broadcom BCM5754 Gigabit Ethernet",
    484 	},
    485 	{ PCI_VENDOR_BROADCOM,
    486 	  PCI_PRODUCT_BROADCOM_BCM5754M,
    487 	  "Broadcom BCM5754M Gigabit Ethernet",
    488 	},
    489 	{ PCI_VENDOR_BROADCOM,
    490 	  PCI_PRODUCT_BROADCOM_BCM5755,
    491 	  "Broadcom BCM5755 Gigabit Ethernet",
    492 	},
    493 	{ PCI_VENDOR_BROADCOM,
    494 	  PCI_PRODUCT_BROADCOM_BCM5755M,
    495 	  "Broadcom BCM5755M Gigabit Ethernet",
    496 	},
    497 	{ PCI_VENDOR_BROADCOM,
    498 	  PCI_PRODUCT_BROADCOM_BCM5756,
    499 	  "Broadcom BCM5756 Gigabit Ethernet",
    500 	},
    501 	{ PCI_VENDOR_BROADCOM,
    502 	  PCI_PRODUCT_BROADCOM_BCM5761,
    503 	  "Broadcom BCM5761 Gigabit Ethernet",
    504 	},
    505 	{ PCI_VENDOR_BROADCOM,
    506 	  PCI_PRODUCT_BROADCOM_BCM5761E,
    507 	  "Broadcom BCM5761E Gigabit Ethernet",
    508 	},
    509 	{ PCI_VENDOR_BROADCOM,
    510 	  PCI_PRODUCT_BROADCOM_BCM5761S,
    511 	  "Broadcom BCM5761S Gigabit Ethernet",
    512 	},
    513 	{ PCI_VENDOR_BROADCOM,
    514 	  PCI_PRODUCT_BROADCOM_BCM5761SE,
    515 	  "Broadcom BCM5761SE Gigabit Ethernet",
    516 	},
    517 	{ PCI_VENDOR_BROADCOM,
    518 	  PCI_PRODUCT_BROADCOM_BCM5764,
    519 	  "Broadcom BCM5764 Gigabit Ethernet",
    520 	  },
    521 	{ PCI_VENDOR_BROADCOM,
    522 	  PCI_PRODUCT_BROADCOM_BCM5780,
    523 	  "Broadcom BCM5780 Gigabit Ethernet",
    524 	  },
    525 	{ PCI_VENDOR_BROADCOM,
    526 	  PCI_PRODUCT_BROADCOM_BCM5780S,
    527 	  "Broadcom BCM5780S Gigabit Ethernet",
    528 	  },
    529 	{ PCI_VENDOR_BROADCOM,
    530 	  PCI_PRODUCT_BROADCOM_BCM5781,
    531 	  "Broadcom BCM5781 Gigabit Ethernet",
    532 	  },
    533 	{ PCI_VENDOR_BROADCOM,
    534 	  PCI_PRODUCT_BROADCOM_BCM5782,
    535 	  "Broadcom BCM5782 Gigabit Ethernet",
    536 	},
    537 	{ PCI_VENDOR_BROADCOM,
    538 	  PCI_PRODUCT_BROADCOM_BCM5784M,
    539 	  "BCM5784M NetLink 1000baseT Ethernet",
    540 	},
    541 	{ PCI_VENDOR_BROADCOM,
    542 	  PCI_PRODUCT_BROADCOM_BCM5785F,
    543 	  "BCM5785F NetLink 10/100 Ethernet",
    544 	},
    545 	{ PCI_VENDOR_BROADCOM,
    546 	  PCI_PRODUCT_BROADCOM_BCM5785G,
    547 	  "BCM5785G NetLink 1000baseT Ethernet",
    548 	},
    549 	{ PCI_VENDOR_BROADCOM,
    550 	  PCI_PRODUCT_BROADCOM_BCM5786,
    551 	  "Broadcom BCM5786 Gigabit Ethernet",
    552 	},
    553 	{ PCI_VENDOR_BROADCOM,
    554 	  PCI_PRODUCT_BROADCOM_BCM5787,
    555 	  "Broadcom BCM5787 Gigabit Ethernet",
    556 	},
    557 	{ PCI_VENDOR_BROADCOM,
    558 	  PCI_PRODUCT_BROADCOM_BCM5787F,
    559 	  "Broadcom BCM5787F 10/100 Ethernet",
    560 	},
    561 	{ PCI_VENDOR_BROADCOM,
    562 	  PCI_PRODUCT_BROADCOM_BCM5787M,
    563 	  "Broadcom BCM5787M Gigabit Ethernet",
    564 	},
    565 	{ PCI_VENDOR_BROADCOM,
    566 	  PCI_PRODUCT_BROADCOM_BCM5788,
    567 	  "Broadcom BCM5788 Gigabit Ethernet",
    568 	  },
    569 	{ PCI_VENDOR_BROADCOM,
    570 	  PCI_PRODUCT_BROADCOM_BCM5789,
    571 	  "Broadcom BCM5789 Gigabit Ethernet",
    572 	  },
    573 	{ PCI_VENDOR_BROADCOM,
    574 	  PCI_PRODUCT_BROADCOM_BCM5901,
    575 	  "Broadcom BCM5901 Fast Ethernet",
    576 	  },
    577 	{ PCI_VENDOR_BROADCOM,
    578 	  PCI_PRODUCT_BROADCOM_BCM5901A2,
    579 	  "Broadcom BCM5901A2 Fast Ethernet",
    580 	  },
    581 	{ PCI_VENDOR_BROADCOM,
    582 	  PCI_PRODUCT_BROADCOM_BCM5903M,
    583 	  "Broadcom BCM5903M Fast Ethernet",
    584 	  },
    585 	{ PCI_VENDOR_BROADCOM,
    586 	  PCI_PRODUCT_BROADCOM_BCM5906,
    587 	  "Broadcom BCM5906 Fast Ethernet",
    588 	  },
    589 	{ PCI_VENDOR_BROADCOM,
    590 	  PCI_PRODUCT_BROADCOM_BCM5906M,
    591 	  "Broadcom BCM5906M Fast Ethernet",
    592 	  },
    593 	{ PCI_VENDOR_BROADCOM,
    594 	  PCI_PRODUCT_BROADCOM_BCM57760,
    595 	  "Broadcom BCM57760 Fast Ethernet",
    596 	  },
    597 	{ PCI_VENDOR_BROADCOM,
    598 	  PCI_PRODUCT_BROADCOM_BCM57761,
    599 	  "Broadcom BCM57761 Fast Ethernet",
    600 	  },
    601 	{ PCI_VENDOR_BROADCOM,
    602 	  PCI_PRODUCT_BROADCOM_BCM57762,
    603 	  "Broadcom BCM57762 Gigabit Ethernet",
    604 	  },
    605 	{ PCI_VENDOR_BROADCOM,
    606 	  PCI_PRODUCT_BROADCOM_BCM57765,
    607 	  "Broadcom BCM57765 Fast Ethernet",
    608 	  },
    609 	{ PCI_VENDOR_BROADCOM,
    610 	  PCI_PRODUCT_BROADCOM_BCM57766,
    611 	  "Broadcom BCM57766 Fast Ethernet",
    612 	  },
    613 	{ PCI_VENDOR_BROADCOM,
    614 	  PCI_PRODUCT_BROADCOM_BCM57780,
    615 	  "Broadcom BCM57780 Fast Ethernet",
    616 	  },
    617 	{ PCI_VENDOR_BROADCOM,
    618 	  PCI_PRODUCT_BROADCOM_BCM57781,
    619 	  "Broadcom BCM57781 Fast Ethernet",
    620 	  },
    621 	{ PCI_VENDOR_BROADCOM,
    622 	  PCI_PRODUCT_BROADCOM_BCM57782,
    623 	  "Broadcom BCM57782 Fast Ethernet",
    624 	  },
    625 	{ PCI_VENDOR_BROADCOM,
    626 	  PCI_PRODUCT_BROADCOM_BCM57785,
    627 	  "Broadcom BCM57785 Fast Ethernet",
    628 	  },
    629 	{ PCI_VENDOR_BROADCOM,
    630 	  PCI_PRODUCT_BROADCOM_BCM57786,
    631 	  "Broadcom BCM57786 Fast Ethernet",
    632 	  },
    633 	{ PCI_VENDOR_BROADCOM,
    634 	  PCI_PRODUCT_BROADCOM_BCM57788,
    635 	  "Broadcom BCM57788 Fast Ethernet",
    636 	  },
    637 	{ PCI_VENDOR_BROADCOM,
    638 	  PCI_PRODUCT_BROADCOM_BCM57790,
    639 	  "Broadcom BCM57790 Fast Ethernet",
    640 	  },
    641 	{ PCI_VENDOR_BROADCOM,
    642 	  PCI_PRODUCT_BROADCOM_BCM57791,
    643 	  "Broadcom BCM57791 Fast Ethernet",
    644 	  },
    645 	{ PCI_VENDOR_BROADCOM,
    646 	  PCI_PRODUCT_BROADCOM_BCM57795,
    647 	  "Broadcom BCM57795 Fast Ethernet",
    648 	  },
    649 	{ PCI_VENDOR_SCHNEIDERKOCH,
    650 	  PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
    651 	  "SysKonnect SK-9Dx1 Gigabit Ethernet",
    652 	  },
    653 	{ PCI_VENDOR_3COM,
    654 	  PCI_PRODUCT_3COM_3C996,
    655 	  "3Com 3c996 Gigabit Ethernet",
    656 	  },
    657 	{ PCI_VENDOR_FUJITSU4,
    658 	  PCI_PRODUCT_FUJITSU4_PW008GE4,
    659 	  "Fujitsu PW008GE4 Gigabit Ethernet",
    660 	  },
    661 	{ PCI_VENDOR_FUJITSU4,
    662 	  PCI_PRODUCT_FUJITSU4_PW008GE5,
    663 	  "Fujitsu PW008GE5 Gigabit Ethernet",
    664 	  },
    665 	{ PCI_VENDOR_FUJITSU4,
    666 	  PCI_PRODUCT_FUJITSU4_PP250_450_LAN,
    667 	  "Fujitsu Primepower 250/450 Gigabit Ethernet",
    668 	  },
    669 	{ 0,
    670 	  0,
    671 	  NULL },
    672 };
    673 
    674 #define BGE_IS_JUMBO_CAPABLE(sc)	((sc)->bge_flags & BGEF_JUMBO_CAPABLE)
    675 #define BGE_IS_5700_FAMILY(sc)		((sc)->bge_flags & BGEF_5700_FAMILY)
    676 #define BGE_IS_5705_PLUS(sc)		((sc)->bge_flags & BGEF_5705_PLUS)
    677 #define BGE_IS_5714_FAMILY(sc)		((sc)->bge_flags & BGEF_5714_FAMILY)
    678 #define BGE_IS_575X_PLUS(sc)		((sc)->bge_flags & BGEF_575X_PLUS)
    679 #define BGE_IS_5755_PLUS(sc)		((sc)->bge_flags & BGEF_5755_PLUS)
    680 #define BGE_IS_57765_FAMILY(sc)		((sc)->bge_flags & BGEF_57765_FAMILY)
    681 #define BGE_IS_57765_PLUS(sc)		((sc)->bge_flags & BGEF_57765_PLUS)
    682 #define BGE_IS_5717_PLUS(sc)		((sc)->bge_flags & BGEF_5717_PLUS)
    683 
    684 static const struct bge_revision {
    685 	uint32_t		br_chipid;
    686 	const char		*br_name;
    687 } bge_revisions[] = {
    688 	{ BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
    689 	{ BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
    690 	{ BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
    691 	{ BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
    692 	{ BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
    693 	{ BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
    694 	{ BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
    695 	{ BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
    696 	{ BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
    697 	{ BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
    698 	{ BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
    699 	{ BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
    700 	{ BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" },
    701 	{ BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" },
    702 	{ BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" },
    703 	{ BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" },
    704 	{ BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" },
    705 	{ BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
    706 	{ BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
    707 	{ BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
    708 	{ BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
    709 	{ BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
    710 	{ BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
    711 	{ BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
    712 	{ BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
    713 	{ BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
    714 	{ BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
    715 	{ BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
    716 	{ BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
    717 	{ BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
    718 	{ BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
    719 	{ BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
    720 	{ BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
    721 	{ BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
    722 	{ BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
    723 	{ BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
    724 	{ BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
    725 	{ BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
    726 	{ BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
    727 	{ BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
    728 	{ BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
    729 	{ BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
    730 	{ BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
    731 	{ BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
    732 	{ BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
    733 	{ BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
    734 	{ BGE_CHIPID_BCM5720_A0, "BCM5720 A0" },
    735 	{ BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
    736 	{ BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
    737 	{ BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
    738 	{ BGE_CHIPID_BCM5755_C0, "BCM5755 C0" },
    739 	{ BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
    740 	{ BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
    741 	{ BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
    742 	{ BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
    743 	/* 5754 and 5787 share the same ASIC ID */
    744 	{ BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
    745 	{ BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
    746 	{ BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
    747 	{ BGE_CHIPID_BCM5906_A0, "BCM5906 A0" },
    748 	{ BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
    749 	{ BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
    750 	{ BGE_CHIPID_BCM57765_A0, "BCM57765 A0" },
    751 	{ BGE_CHIPID_BCM57765_B0, "BCM57765 B0" },
    752 	{ BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
    753 	{ BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
    754 
    755 	{ 0, NULL }
    756 };
    757 
    758 /*
    759  * Some defaults for major revisions, so that newer steppings
    760  * that we don't know about have a shot at working.
    761  */
    762 static const struct bge_revision bge_majorrevs[] = {
    763 	{ BGE_ASICREV_BCM5700, "unknown BCM5700" },
    764 	{ BGE_ASICREV_BCM5701, "unknown BCM5701" },
    765 	{ BGE_ASICREV_BCM5703, "unknown BCM5703" },
    766 	{ BGE_ASICREV_BCM5704, "unknown BCM5704" },
    767 	{ BGE_ASICREV_BCM5705, "unknown BCM5705" },
    768 	{ BGE_ASICREV_BCM5750, "unknown BCM5750" },
    769 	{ BGE_ASICREV_BCM5714, "unknown BCM5714" },
    770 	{ BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
    771 	{ BGE_ASICREV_BCM5752, "unknown BCM5752" },
    772 	{ BGE_ASICREV_BCM5780, "unknown BCM5780" },
    773 	{ BGE_ASICREV_BCM5755, "unknown BCM5755" },
    774 	{ BGE_ASICREV_BCM5761, "unknown BCM5761" },
    775 	{ BGE_ASICREV_BCM5784, "unknown BCM5784" },
    776 	{ BGE_ASICREV_BCM5785, "unknown BCM5785" },
    777 	/* 5754 and 5787 share the same ASIC ID */
    778 	{ BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
    779 	{ BGE_ASICREV_BCM5906, "unknown BCM5906" },
    780 	{ BGE_ASICREV_BCM57765, "unknown BCM57765" },
    781 	{ BGE_ASICREV_BCM57766, "unknown BCM57766" },
    782 	{ BGE_ASICREV_BCM57780, "unknown BCM57780" },
    783 	{ BGE_ASICREV_BCM5717, "unknown BCM5717" },
    784 	{ BGE_ASICREV_BCM5719, "unknown BCM5719" },
    785 	{ BGE_ASICREV_BCM5720, "unknown BCM5720" },
    786 
    787 	{ 0, NULL }
    788 };
    789 
    790 static int bge_allow_asf = 1;
    791 
    792 CFATTACH_DECL3_NEW(bge, sizeof(struct bge_softc),
    793     bge_probe, bge_attach, bge_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    794 
    795 static uint32_t
    796 bge_readmem_ind(struct bge_softc *sc, int off)
    797 {
    798 	pcireg_t val;
    799 
    800 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
    801 	    off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
    802 		return 0;
    803 
    804 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off);
    805 	val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA);
    806 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0);
    807 	return val;
    808 }
    809 
    810 static void
    811 bge_writemem_ind(struct bge_softc *sc, int off, int val)
    812 {
    813 
    814 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off);
    815 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA, val);
    816 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0);
    817 }
    818 
    819 /*
    820  * PCI Express only
    821  */
    822 static void
    823 bge_set_max_readrq(struct bge_softc *sc)
    824 {
    825 	pcireg_t val;
    826 
    827 	val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap
    828 	    + PCIE_DCSR);
    829 	val &= ~PCIE_DCSR_MAX_READ_REQ;
    830 	switch (sc->bge_expmrq) {
    831 	case 2048:
    832 		val |= BGE_PCIE_DEVCTL_MAX_READRQ_2048;
    833 		break;
    834 	case 4096:
    835 		val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096;
    836 		break;
    837 	default:
    838 		panic("incorrect expmrq value(%d)", sc->bge_expmrq);
    839 		break;
    840 	}
    841 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap
    842 	    + PCIE_DCSR, val);
    843 }
    844 
    845 #ifdef notdef
    846 static uint32_t
    847 bge_readreg_ind(struct bge_softc *sc, int off)
    848 {
    849 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off);
    850 	return (pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA));
    851 }
    852 #endif
    853 
    854 static void
    855 bge_writereg_ind(struct bge_softc *sc, int off, int val)
    856 {
    857 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off);
    858 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA, val);
    859 }
    860 
    861 static void
    862 bge_writemem_direct(struct bge_softc *sc, int off, int val)
    863 {
    864 	CSR_WRITE_4(sc, off, val);
    865 }
    866 
    867 static void
    868 bge_writembx(struct bge_softc *sc, int off, int val)
    869 {
    870 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
    871 		off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
    872 
    873 	CSR_WRITE_4(sc, off, val);
    874 }
    875 
    876 static void
    877 bge_writembx_flush(struct bge_softc *sc, int off, int val)
    878 {
    879 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
    880 		off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
    881 
    882 	CSR_WRITE_4_FLUSH(sc, off, val);
    883 }
    884 
    885 /*
    886  * Clear all stale locks and select the lock for this driver instance.
    887  */
    888 void
    889 bge_ape_lock_init(struct bge_softc *sc)
    890 {
    891 	struct pci_attach_args *pa = &(sc->bge_pa);
    892 	uint32_t bit, regbase;
    893 	int i;
    894 
    895 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
    896 		regbase = BGE_APE_LOCK_GRANT;
    897 	else
    898 		regbase = BGE_APE_PER_LOCK_GRANT;
    899 
    900 	/* Clear any stale locks. */
    901 	for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) {
    902 		switch (i) {
    903 		case BGE_APE_LOCK_PHY0:
    904 		case BGE_APE_LOCK_PHY1:
    905 		case BGE_APE_LOCK_PHY2:
    906 		case BGE_APE_LOCK_PHY3:
    907 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
    908 			break;
    909 		default:
    910 			if (pa->pa_function == 0)
    911 				bit = BGE_APE_LOCK_GRANT_DRIVER0;
    912 			else
    913 				bit = (1 << pa->pa_function);
    914 		}
    915 		APE_WRITE_4(sc, regbase + 4 * i, bit);
    916 	}
    917 
    918 	/* Select the PHY lock based on the device's function number. */
    919 	switch (pa->pa_function) {
    920 	case 0:
    921 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0;
    922 		break;
    923 	case 1:
    924 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1;
    925 		break;
    926 	case 2:
    927 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2;
    928 		break;
    929 	case 3:
    930 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3;
    931 		break;
    932 	default:
    933 		printf("%s: PHY lock not supported on function\n",
    934 		    device_xname(sc->bge_dev));
    935 		break;
    936 	}
    937 }
    938 
    939 /*
    940  * Check for APE firmware, set flags, and print version info.
    941  */
    942 void
    943 bge_ape_read_fw_ver(struct bge_softc *sc)
    944 {
    945 	const char *fwtype;
    946 	uint32_t apedata, features;
    947 
    948 	/* Check for a valid APE signature in shared memory. */
    949 	apedata = APE_READ_4(sc, BGE_APE_SEG_SIG);
    950 	if (apedata != BGE_APE_SEG_SIG_MAGIC) {
    951 		sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE;
    952 		return;
    953 	}
    954 
    955 	/* Check if APE firmware is running. */
    956 	apedata = APE_READ_4(sc, BGE_APE_FW_STATUS);
    957 	if ((apedata & BGE_APE_FW_STATUS_READY) == 0) {
    958 		printf("%s: APE signature found but FW status not ready! "
    959 		    "0x%08x\n", device_xname(sc->bge_dev), apedata);
    960 		return;
    961 	}
    962 
    963 	sc->bge_mfw_flags |= BGE_MFW_ON_APE;
    964 
    965 	/* Fetch the APE firwmare type and version. */
    966 	apedata = APE_READ_4(sc, BGE_APE_FW_VERSION);
    967 	features = APE_READ_4(sc, BGE_APE_FW_FEATURES);
    968 	if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) {
    969 		sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI;
    970 		fwtype = "NCSI";
    971 	} else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) {
    972 		sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH;
    973 		fwtype = "DASH";
    974 	} else
    975 		fwtype = "UNKN";
    976 
    977 	/* Print the APE firmware version. */
    978 	aprint_normal_dev(sc->bge_dev, "APE firmware %s %d.%d.%d.%d\n", fwtype,
    979 	    (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT,
    980 	    (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT,
    981 	    (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT,
    982 	    (apedata & BGE_APE_FW_VERSION_BLDMSK));
    983 }
    984 
    985 int
    986 bge_ape_lock(struct bge_softc *sc, int locknum)
    987 {
    988 	struct pci_attach_args *pa = &(sc->bge_pa);
    989 	uint32_t bit, gnt, req, status;
    990 	int i, off;
    991 
    992 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
    993 		return (0);
    994 
    995 	/* Lock request/grant registers have different bases. */
    996 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) {
    997 		req = BGE_APE_LOCK_REQ;
    998 		gnt = BGE_APE_LOCK_GRANT;
    999 	} else {
   1000 		req = BGE_APE_PER_LOCK_REQ;
   1001 		gnt = BGE_APE_PER_LOCK_GRANT;
   1002 	}
   1003 
   1004 	off = 4 * locknum;
   1005 
   1006 	switch (locknum) {
   1007 	case BGE_APE_LOCK_GPIO:
   1008 		/* Lock required when using GPIO. */
   1009 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
   1010 			return (0);
   1011 		if (pa->pa_function == 0)
   1012 			bit = BGE_APE_LOCK_REQ_DRIVER0;
   1013 		else
   1014 			bit = (1 << pa->pa_function);
   1015 		break;
   1016 	case BGE_APE_LOCK_GRC:
   1017 		/* Lock required to reset the device. */
   1018 		if (pa->pa_function == 0)
   1019 			bit = BGE_APE_LOCK_REQ_DRIVER0;
   1020 		else
   1021 			bit = (1 << pa->pa_function);
   1022 		break;
   1023 	case BGE_APE_LOCK_MEM:
   1024 		/* Lock required when accessing certain APE memory. */
   1025 		if (pa->pa_function == 0)
   1026 			bit = BGE_APE_LOCK_REQ_DRIVER0;
   1027 		else
   1028 			bit = (1 << pa->pa_function);
   1029 		break;
   1030 	case BGE_APE_LOCK_PHY0:
   1031 	case BGE_APE_LOCK_PHY1:
   1032 	case BGE_APE_LOCK_PHY2:
   1033 	case BGE_APE_LOCK_PHY3:
   1034 		/* Lock required when accessing PHYs. */
   1035 		bit = BGE_APE_LOCK_REQ_DRIVER0;
   1036 		break;
   1037 	default:
   1038 		return (EINVAL);
   1039 	}
   1040 
   1041 	/* Request a lock. */
   1042 	APE_WRITE_4_FLUSH(sc, req + off, bit);
   1043 
   1044 	/* Wait up to 1 second to acquire lock. */
   1045 	for (i = 0; i < 20000; i++) {
   1046 		status = APE_READ_4(sc, gnt + off);
   1047 		if (status == bit)
   1048 			break;
   1049 		DELAY(50);
   1050 	}
   1051 
   1052 	/* Handle any errors. */
   1053 	if (status != bit) {
   1054 		printf("%s: APE lock %d request failed! "
   1055 		    "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n",
   1056 		    device_xname(sc->bge_dev),
   1057 		    locknum, req + off, bit & 0xFFFF, gnt + off,
   1058 		    status & 0xFFFF);
   1059 		/* Revoke the lock request. */
   1060 		APE_WRITE_4(sc, gnt + off, bit);
   1061 		return (EBUSY);
   1062 	}
   1063 
   1064 	return (0);
   1065 }
   1066 
   1067 void
   1068 bge_ape_unlock(struct bge_softc *sc, int locknum)
   1069 {
   1070 	struct pci_attach_args *pa = &(sc->bge_pa);
   1071 	uint32_t bit, gnt;
   1072 	int off;
   1073 
   1074 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
   1075 		return;
   1076 
   1077 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
   1078 		gnt = BGE_APE_LOCK_GRANT;
   1079 	else
   1080 		gnt = BGE_APE_PER_LOCK_GRANT;
   1081 
   1082 	off = 4 * locknum;
   1083 
   1084 	switch (locknum) {
   1085 	case BGE_APE_LOCK_GPIO:
   1086 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
   1087 			return;
   1088 		if (pa->pa_function == 0)
   1089 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
   1090 		else
   1091 			bit = (1 << pa->pa_function);
   1092 		break;
   1093 	case BGE_APE_LOCK_GRC:
   1094 		if (pa->pa_function == 0)
   1095 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
   1096 		else
   1097 			bit = (1 << pa->pa_function);
   1098 		break;
   1099 	case BGE_APE_LOCK_MEM:
   1100 		if (pa->pa_function == 0)
   1101 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
   1102 		else
   1103 			bit = (1 << pa->pa_function);
   1104 		break;
   1105 	case BGE_APE_LOCK_PHY0:
   1106 	case BGE_APE_LOCK_PHY1:
   1107 	case BGE_APE_LOCK_PHY2:
   1108 	case BGE_APE_LOCK_PHY3:
   1109 		bit = BGE_APE_LOCK_GRANT_DRIVER0;
   1110 		break;
   1111 	default:
   1112 		return;
   1113 	}
   1114 
   1115 	/* Write and flush for consecutive bge_ape_lock() */
   1116 	APE_WRITE_4_FLUSH(sc, gnt + off, bit);
   1117 }
   1118 
   1119 /*
   1120  * Send an event to the APE firmware.
   1121  */
   1122 void
   1123 bge_ape_send_event(struct bge_softc *sc, uint32_t event)
   1124 {
   1125 	uint32_t apedata;
   1126 	int i;
   1127 
   1128 	/* NCSI does not support APE events. */
   1129 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
   1130 		return;
   1131 
   1132 	/* Wait up to 1ms for APE to service previous event. */
   1133 	for (i = 10; i > 0; i--) {
   1134 		if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0)
   1135 			break;
   1136 		apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS);
   1137 		if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) {
   1138 			APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event |
   1139 			    BGE_APE_EVENT_STATUS_EVENT_PENDING);
   1140 			bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
   1141 			APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1);
   1142 			break;
   1143 		}
   1144 		bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
   1145 		DELAY(100);
   1146 	}
   1147 	if (i == 0) {
   1148 		printf("%s: APE event 0x%08x send timed out\n",
   1149 		    device_xname(sc->bge_dev), event);
   1150 	}
   1151 }
   1152 
   1153 void
   1154 bge_ape_driver_state_change(struct bge_softc *sc, int kind)
   1155 {
   1156 	uint32_t apedata, event;
   1157 
   1158 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
   1159 		return;
   1160 
   1161 	switch (kind) {
   1162 	case BGE_RESET_START:
   1163 		/* If this is the first load, clear the load counter. */
   1164 		apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG);
   1165 		if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC)
   1166 			APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0);
   1167 		else {
   1168 			apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT);
   1169 			APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata);
   1170 		}
   1171 		APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG,
   1172 		    BGE_APE_HOST_SEG_SIG_MAGIC);
   1173 		APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN,
   1174 		    BGE_APE_HOST_SEG_LEN_MAGIC);
   1175 
   1176 		/* Add some version info if bge(4) supports it. */
   1177 		APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID,
   1178 		    BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0));
   1179 		APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR,
   1180 		    BGE_APE_HOST_BEHAV_NO_PHYLOCK);
   1181 		APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS,
   1182 		    BGE_APE_HOST_HEARTBEAT_INT_DISABLE);
   1183 		APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
   1184 		    BGE_APE_HOST_DRVR_STATE_START);
   1185 		event = BGE_APE_EVENT_STATUS_STATE_START;
   1186 		break;
   1187 	case BGE_RESET_SHUTDOWN:
   1188 		APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
   1189 		    BGE_APE_HOST_DRVR_STATE_UNLOAD);
   1190 		event = BGE_APE_EVENT_STATUS_STATE_UNLOAD;
   1191 		break;
   1192 	case BGE_RESET_SUSPEND:
   1193 		event = BGE_APE_EVENT_STATUS_STATE_SUSPEND;
   1194 		break;
   1195 	default:
   1196 		return;
   1197 	}
   1198 
   1199 	bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT |
   1200 	    BGE_APE_EVENT_STATUS_STATE_CHNGE);
   1201 }
   1202 
   1203 static uint8_t
   1204 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
   1205 {
   1206 	uint32_t access, byte = 0;
   1207 	int i;
   1208 
   1209 	/* Lock. */
   1210 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
   1211 	for (i = 0; i < 8000; i++) {
   1212 		if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
   1213 			break;
   1214 		DELAY(20);
   1215 	}
   1216 	if (i == 8000)
   1217 		return 1;
   1218 
   1219 	/* Enable access. */
   1220 	access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
   1221 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
   1222 
   1223 	CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
   1224 	CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
   1225 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
   1226 		DELAY(10);
   1227 		if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
   1228 			DELAY(10);
   1229 			break;
   1230 		}
   1231 	}
   1232 
   1233 	if (i == BGE_TIMEOUT * 10) {
   1234 		aprint_error_dev(sc->bge_dev, "nvram read timed out\n");
   1235 		return 1;
   1236 	}
   1237 
   1238 	/* Get result. */
   1239 	byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
   1240 
   1241 	*dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
   1242 
   1243 	/* Disable access. */
   1244 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
   1245 
   1246 	/* Unlock. */
   1247 	CSR_WRITE_4_FLUSH(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
   1248 
   1249 	return 0;
   1250 }
   1251 
   1252 /*
   1253  * Read a sequence of bytes from NVRAM.
   1254  */
   1255 static int
   1256 bge_read_nvram(struct bge_softc *sc, uint8_t *dest, int off, int cnt)
   1257 {
   1258 	int error = 0, i;
   1259 	uint8_t byte = 0;
   1260 
   1261 	if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
   1262 		return 1;
   1263 
   1264 	for (i = 0; i < cnt; i++) {
   1265 		error = bge_nvram_getbyte(sc, off + i, &byte);
   1266 		if (error)
   1267 			break;
   1268 		*(dest + i) = byte;
   1269 	}
   1270 
   1271 	return (error ? 1 : 0);
   1272 }
   1273 
   1274 /*
   1275  * Read a byte of data stored in the EEPROM at address 'addr.' The
   1276  * BCM570x supports both the traditional bitbang interface and an
   1277  * auto access interface for reading the EEPROM. We use the auto
   1278  * access method.
   1279  */
   1280 static uint8_t
   1281 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
   1282 {
   1283 	int i;
   1284 	uint32_t byte = 0;
   1285 
   1286 	/*
   1287 	 * Enable use of auto EEPROM access so we can avoid
   1288 	 * having to use the bitbang method.
   1289 	 */
   1290 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
   1291 
   1292 	/* Reset the EEPROM, load the clock period. */
   1293 	CSR_WRITE_4(sc, BGE_EE_ADDR,
   1294 	    BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
   1295 	DELAY(20);
   1296 
   1297 	/* Issue the read EEPROM command. */
   1298 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
   1299 
   1300 	/* Wait for completion */
   1301 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
   1302 		DELAY(10);
   1303 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
   1304 			break;
   1305 	}
   1306 
   1307 	if (i == BGE_TIMEOUT * 10) {
   1308 		aprint_error_dev(sc->bge_dev, "eeprom read timed out\n");
   1309 		return 1;
   1310 	}
   1311 
   1312 	/* Get result. */
   1313 	byte = CSR_READ_4(sc, BGE_EE_DATA);
   1314 
   1315 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
   1316 
   1317 	return 0;
   1318 }
   1319 
   1320 /*
   1321  * Read a sequence of bytes from the EEPROM.
   1322  */
   1323 static int
   1324 bge_read_eeprom(struct bge_softc *sc, void *destv, int off, int cnt)
   1325 {
   1326 	int error = 0, i;
   1327 	uint8_t byte = 0;
   1328 	char *dest = destv;
   1329 
   1330 	for (i = 0; i < cnt; i++) {
   1331 		error = bge_eeprom_getbyte(sc, off + i, &byte);
   1332 		if (error)
   1333 			break;
   1334 		*(dest + i) = byte;
   1335 	}
   1336 
   1337 	return (error ? 1 : 0);
   1338 }
   1339 
   1340 static int
   1341 bge_miibus_readreg(device_t dev, int phy, int reg)
   1342 {
   1343 	struct bge_softc *sc = device_private(dev);
   1344 	uint32_t val;
   1345 	uint32_t autopoll;
   1346 	int i;
   1347 
   1348 	if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
   1349 		return 0;
   1350 
   1351 	/* Reading with autopolling on may trigger PCI errors */
   1352 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
   1353 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
   1354 		BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
   1355 		BGE_CLRBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
   1356 		DELAY(80);
   1357 	}
   1358 
   1359 	CSR_WRITE_4_FLUSH(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
   1360 	    BGE_MIPHY(phy) | BGE_MIREG(reg));
   1361 
   1362 	for (i = 0; i < BGE_TIMEOUT; i++) {
   1363 		delay(10);
   1364 		val = CSR_READ_4(sc, BGE_MI_COMM);
   1365 		if (!(val & BGE_MICOMM_BUSY)) {
   1366 			DELAY(5);
   1367 			val = CSR_READ_4(sc, BGE_MI_COMM);
   1368 			break;
   1369 		}
   1370 	}
   1371 
   1372 	if (i == BGE_TIMEOUT) {
   1373 		aprint_error_dev(sc->bge_dev, "PHY read timed out\n");
   1374 		val = 0;
   1375 		goto done;
   1376 	}
   1377 
   1378 done:
   1379 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
   1380 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
   1381 		BGE_SETBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
   1382 		DELAY(80);
   1383 	}
   1384 
   1385 	bge_ape_unlock(sc, sc->bge_phy_ape_lock);
   1386 
   1387 	if (val & BGE_MICOMM_READFAIL)
   1388 		return 0;
   1389 
   1390 	return (val & 0xFFFF);
   1391 }
   1392 
   1393 static void
   1394 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
   1395 {
   1396 	struct bge_softc *sc = device_private(dev);
   1397 	uint32_t autopoll;
   1398 	int i;
   1399 
   1400 	if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
   1401 		return;
   1402 
   1403 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
   1404 	    (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
   1405 		return;
   1406 
   1407 	/* Reading with autopolling on may trigger PCI errors */
   1408 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
   1409 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
   1410 		BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
   1411 		BGE_CLRBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
   1412 		DELAY(80);
   1413 	}
   1414 
   1415 	CSR_WRITE_4_FLUSH(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
   1416 	    BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
   1417 
   1418 	for (i = 0; i < BGE_TIMEOUT; i++) {
   1419 		delay(10);
   1420 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
   1421 			delay(5);
   1422 			CSR_READ_4(sc, BGE_MI_COMM);
   1423 			break;
   1424 		}
   1425 	}
   1426 
   1427 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
   1428 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
   1429 		BGE_SETBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
   1430 		delay(80);
   1431 	}
   1432 
   1433 	bge_ape_unlock(sc, sc->bge_phy_ape_lock);
   1434 
   1435 	if (i == BGE_TIMEOUT)
   1436 		aprint_error_dev(sc->bge_dev, "PHY read timed out\n");
   1437 }
   1438 
   1439 static void
   1440 bge_miibus_statchg(struct ifnet *ifp)
   1441 {
   1442 	struct bge_softc *sc = ifp->if_softc;
   1443 	struct mii_data *mii = &sc->bge_mii;
   1444 	uint32_t mac_mode, rx_mode, tx_mode;
   1445 
   1446 	/*
   1447 	 * Get flow control negotiation result.
   1448 	 */
   1449 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   1450 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags)
   1451 		sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   1452 
   1453 	if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
   1454 	    mii->mii_media_status & IFM_ACTIVE &&
   1455 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
   1456 		BGE_STS_SETBIT(sc, BGE_STS_LINK);
   1457 	else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
   1458 	    (!(mii->mii_media_status & IFM_ACTIVE) ||
   1459 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
   1460 		BGE_STS_CLRBIT(sc, BGE_STS_LINK);
   1461 
   1462 	if (!BGE_STS_BIT(sc, BGE_STS_LINK))
   1463 		return;
   1464 
   1465 	/* Set the port mode (MII/GMII) to match the link speed. */
   1466 	mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) &
   1467 	    ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX);
   1468 	tx_mode = CSR_READ_4(sc, BGE_TX_MODE);
   1469 	rx_mode = CSR_READ_4(sc, BGE_RX_MODE);
   1470 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
   1471 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
   1472 		mac_mode |= BGE_PORTMODE_GMII;
   1473 	else
   1474 		mac_mode |= BGE_PORTMODE_MII;
   1475 
   1476 	tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE;
   1477 	rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE;
   1478 	if ((mii->mii_media_active & IFM_FDX) != 0) {
   1479 		if (sc->bge_flowflags & IFM_ETH_TXPAUSE)
   1480 			tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE;
   1481 		if (sc->bge_flowflags & IFM_ETH_RXPAUSE)
   1482 			rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE;
   1483 	} else
   1484 		mac_mode |= BGE_MACMODE_HALF_DUPLEX;
   1485 
   1486 	CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, mac_mode);
   1487 	DELAY(40);
   1488 	CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode);
   1489 	CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode);
   1490 }
   1491 
   1492 /*
   1493  * Update rx threshold levels to values in a particular slot
   1494  * of the interrupt-mitigation table bge_rx_threshes.
   1495  */
   1496 static void
   1497 bge_set_thresh(struct ifnet *ifp, int lvl)
   1498 {
   1499 	struct bge_softc *sc = ifp->if_softc;
   1500 	int s;
   1501 
   1502 	/* For now, just save the new Rx-intr thresholds and record
   1503 	 * that a threshold update is pending.  Updating the hardware
   1504 	 * registers here (even at splhigh()) is observed to
   1505 	 * occasionaly cause glitches where Rx-interrupts are not
   1506 	 * honoured for up to 10 seconds. jonathan (at) NetBSD.org, 2003-04-05
   1507 	 */
   1508 	s = splnet();
   1509 	sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks;
   1510 	sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds;
   1511 	sc->bge_pending_rxintr_change = 1;
   1512 	splx(s);
   1513 }
   1514 
   1515 
   1516 /*
   1517  * Update Rx thresholds of all bge devices
   1518  */
   1519 static void
   1520 bge_update_all_threshes(int lvl)
   1521 {
   1522 	struct ifnet *ifp;
   1523 	const char * const namebuf = "bge";
   1524 	int namelen;
   1525 
   1526 	if (lvl < 0)
   1527 		lvl = 0;
   1528 	else if (lvl >= NBGE_RX_THRESH)
   1529 		lvl = NBGE_RX_THRESH - 1;
   1530 
   1531 	namelen = strlen(namebuf);
   1532 	/*
   1533 	 * Now search all the interfaces for this name/number
   1534 	 */
   1535 	IFNET_FOREACH(ifp) {
   1536 		if (strncmp(ifp->if_xname, namebuf, namelen) != 0)
   1537 		      continue;
   1538 		/* We got a match: update if doing auto-threshold-tuning */
   1539 		if (bge_auto_thresh)
   1540 			bge_set_thresh(ifp, lvl);
   1541 	}
   1542 }
   1543 
   1544 /*
   1545  * Handle events that have triggered interrupts.
   1546  */
   1547 static void
   1548 bge_handle_events(struct bge_softc *sc)
   1549 {
   1550 
   1551 	return;
   1552 }
   1553 
   1554 /*
   1555  * Memory management for jumbo frames.
   1556  */
   1557 
   1558 static int
   1559 bge_alloc_jumbo_mem(struct bge_softc *sc)
   1560 {
   1561 	char *ptr, *kva;
   1562 	bus_dma_segment_t	seg;
   1563 	int		i, rseg, state, error;
   1564 	struct bge_jpool_entry   *entry;
   1565 
   1566 	state = error = 0;
   1567 
   1568 	/* Grab a big chunk o' storage. */
   1569 	if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0,
   1570 	     &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
   1571 		aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n");
   1572 		return ENOBUFS;
   1573 	}
   1574 
   1575 	state = 1;
   1576 	if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, (void **)&kva,
   1577 	    BUS_DMA_NOWAIT)) {
   1578 		aprint_error_dev(sc->bge_dev,
   1579 		    "can't map DMA buffers (%d bytes)\n", (int)BGE_JMEM);
   1580 		error = ENOBUFS;
   1581 		goto out;
   1582 	}
   1583 
   1584 	state = 2;
   1585 	if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0,
   1586 	    BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) {
   1587 		aprint_error_dev(sc->bge_dev, "can't create DMA map\n");
   1588 		error = ENOBUFS;
   1589 		goto out;
   1590 	}
   1591 
   1592 	state = 3;
   1593 	if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
   1594 	    kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) {
   1595 		aprint_error_dev(sc->bge_dev, "can't load DMA map\n");
   1596 		error = ENOBUFS;
   1597 		goto out;
   1598 	}
   1599 
   1600 	state = 4;
   1601 	sc->bge_cdata.bge_jumbo_buf = (void *)kva;
   1602 	DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf));
   1603 
   1604 	SLIST_INIT(&sc->bge_jfree_listhead);
   1605 	SLIST_INIT(&sc->bge_jinuse_listhead);
   1606 
   1607 	/*
   1608 	 * Now divide it up into 9K pieces and save the addresses
   1609 	 * in an array.
   1610 	 */
   1611 	ptr = sc->bge_cdata.bge_jumbo_buf;
   1612 	for (i = 0; i < BGE_JSLOTS; i++) {
   1613 		sc->bge_cdata.bge_jslots[i] = ptr;
   1614 		ptr += BGE_JLEN;
   1615 		entry = malloc(sizeof(struct bge_jpool_entry),
   1616 		    M_DEVBUF, M_NOWAIT);
   1617 		if (entry == NULL) {
   1618 			aprint_error_dev(sc->bge_dev,
   1619 			    "no memory for jumbo buffer queue!\n");
   1620 			error = ENOBUFS;
   1621 			goto out;
   1622 		}
   1623 		entry->slot = i;
   1624 		SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
   1625 				 entry, jpool_entries);
   1626 	}
   1627 out:
   1628 	if (error != 0) {
   1629 		switch (state) {
   1630 		case 4:
   1631 			bus_dmamap_unload(sc->bge_dmatag,
   1632 			    sc->bge_cdata.bge_rx_jumbo_map);
   1633 		case 3:
   1634 			bus_dmamap_destroy(sc->bge_dmatag,
   1635 			    sc->bge_cdata.bge_rx_jumbo_map);
   1636 		case 2:
   1637 			bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
   1638 		case 1:
   1639 			bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
   1640 			break;
   1641 		default:
   1642 			break;
   1643 		}
   1644 	}
   1645 
   1646 	return error;
   1647 }
   1648 
   1649 /*
   1650  * Allocate a jumbo buffer.
   1651  */
   1652 static void *
   1653 bge_jalloc(struct bge_softc *sc)
   1654 {
   1655 	struct bge_jpool_entry   *entry;
   1656 
   1657 	entry = SLIST_FIRST(&sc->bge_jfree_listhead);
   1658 
   1659 	if (entry == NULL) {
   1660 		aprint_error_dev(sc->bge_dev, "no free jumbo buffers\n");
   1661 		return NULL;
   1662 	}
   1663 
   1664 	SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
   1665 	SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
   1666 	return (sc->bge_cdata.bge_jslots[entry->slot]);
   1667 }
   1668 
   1669 /*
   1670  * Release a jumbo buffer.
   1671  */
   1672 static void
   1673 bge_jfree(struct mbuf *m, void *buf, size_t size, void *arg)
   1674 {
   1675 	struct bge_jpool_entry *entry;
   1676 	struct bge_softc *sc;
   1677 	int i, s;
   1678 
   1679 	/* Extract the softc struct pointer. */
   1680 	sc = (struct bge_softc *)arg;
   1681 
   1682 	if (sc == NULL)
   1683 		panic("bge_jfree: can't find softc pointer!");
   1684 
   1685 	/* calculate the slot this buffer belongs to */
   1686 
   1687 	i = ((char *)buf
   1688 	     - (char *)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
   1689 
   1690 	if ((i < 0) || (i >= BGE_JSLOTS))
   1691 		panic("bge_jfree: asked to free buffer that we don't manage!");
   1692 
   1693 	s = splvm();
   1694 	entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
   1695 	if (entry == NULL)
   1696 		panic("bge_jfree: buffer not in use!");
   1697 	entry->slot = i;
   1698 	SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
   1699 	SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
   1700 
   1701 	if (__predict_true(m != NULL))
   1702   		pool_cache_put(mb_cache, m);
   1703 	splx(s);
   1704 }
   1705 
   1706 
   1707 /*
   1708  * Initialize a standard receive ring descriptor.
   1709  */
   1710 static int
   1711 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m,
   1712     bus_dmamap_t dmamap)
   1713 {
   1714 	struct mbuf		*m_new = NULL;
   1715 	struct bge_rx_bd	*r;
   1716 	int			error;
   1717 
   1718 	if (dmamap == NULL) {
   1719 		error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1,
   1720 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap);
   1721 		if (error != 0)
   1722 			return error;
   1723 	}
   1724 
   1725 	sc->bge_cdata.bge_rx_std_map[i] = dmamap;
   1726 
   1727 	if (m == NULL) {
   1728 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
   1729 		if (m_new == NULL)
   1730 			return ENOBUFS;
   1731 
   1732 		MCLGET(m_new, M_DONTWAIT);
   1733 		if (!(m_new->m_flags & M_EXT)) {
   1734 			m_freem(m_new);
   1735 			return ENOBUFS;
   1736 		}
   1737 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
   1738 
   1739 	} else {
   1740 		m_new = m;
   1741 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
   1742 		m_new->m_data = m_new->m_ext.ext_buf;
   1743 	}
   1744 	if (!(sc->bge_flags & BGEF_RX_ALIGNBUG))
   1745 	    m_adj(m_new, ETHER_ALIGN);
   1746 	if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new,
   1747 	    BUS_DMA_READ|BUS_DMA_NOWAIT))
   1748 		return ENOBUFS;
   1749 	bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
   1750 	    BUS_DMASYNC_PREREAD);
   1751 
   1752 	sc->bge_cdata.bge_rx_std_chain[i] = m_new;
   1753 	r = &sc->bge_rdata->bge_rx_std_ring[i];
   1754 	BGE_HOSTADDR(r->bge_addr, dmamap->dm_segs[0].ds_addr);
   1755 	r->bge_flags = BGE_RXBDFLAG_END;
   1756 	r->bge_len = m_new->m_len;
   1757 	r->bge_idx = i;
   1758 
   1759 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   1760 	    offsetof(struct bge_ring_data, bge_rx_std_ring) +
   1761 		i * sizeof (struct bge_rx_bd),
   1762 	    sizeof (struct bge_rx_bd),
   1763 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
   1764 
   1765 	return 0;
   1766 }
   1767 
   1768 /*
   1769  * Initialize a jumbo receive ring descriptor. This allocates
   1770  * a jumbo buffer from the pool managed internally by the driver.
   1771  */
   1772 static int
   1773 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
   1774 {
   1775 	struct mbuf *m_new = NULL;
   1776 	struct bge_rx_bd *r;
   1777 	void *buf = NULL;
   1778 
   1779 	if (m == NULL) {
   1780 
   1781 		/* Allocate the mbuf. */
   1782 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
   1783 		if (m_new == NULL)
   1784 			return ENOBUFS;
   1785 
   1786 		/* Allocate the jumbo buffer */
   1787 		buf = bge_jalloc(sc);
   1788 		if (buf == NULL) {
   1789 			m_freem(m_new);
   1790 			aprint_error_dev(sc->bge_dev,
   1791 			    "jumbo allocation failed -- packet dropped!\n");
   1792 			return ENOBUFS;
   1793 		}
   1794 
   1795 		/* Attach the buffer to the mbuf. */
   1796 		m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
   1797 		MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF,
   1798 		    bge_jfree, sc);
   1799 		m_new->m_flags |= M_EXT_RW;
   1800 	} else {
   1801 		m_new = m;
   1802 		buf = m_new->m_data = m_new->m_ext.ext_buf;
   1803 		m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
   1804 	}
   1805 	if (!(sc->bge_flags & BGEF_RX_ALIGNBUG))
   1806 	    m_adj(m_new, ETHER_ALIGN);
   1807 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
   1808 	    mtod(m_new, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, BGE_JLEN,
   1809 	    BUS_DMASYNC_PREREAD);
   1810 	/* Set up the descriptor. */
   1811 	r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
   1812 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
   1813 	BGE_HOSTADDR(r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new));
   1814 	r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
   1815 	r->bge_len = m_new->m_len;
   1816 	r->bge_idx = i;
   1817 
   1818 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   1819 	    offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
   1820 		i * sizeof (struct bge_rx_bd),
   1821 	    sizeof (struct bge_rx_bd),
   1822 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
   1823 
   1824 	return 0;
   1825 }
   1826 
   1827 /*
   1828  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
   1829  * that's 1MB or memory, which is a lot. For now, we fill only the first
   1830  * 256 ring entries and hope that our CPU is fast enough to keep up with
   1831  * the NIC.
   1832  */
   1833 static int
   1834 bge_init_rx_ring_std(struct bge_softc *sc)
   1835 {
   1836 	int i;
   1837 
   1838 	if (sc->bge_flags & BGEF_RXRING_VALID)
   1839 		return 0;
   1840 
   1841 	for (i = 0; i < BGE_SSLOTS; i++) {
   1842 		if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
   1843 			return ENOBUFS;
   1844 	}
   1845 
   1846 	sc->bge_std = i - 1;
   1847 	bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
   1848 
   1849 	sc->bge_flags |= BGEF_RXRING_VALID;
   1850 
   1851 	return 0;
   1852 }
   1853 
   1854 static void
   1855 bge_free_rx_ring_std(struct bge_softc *sc)
   1856 {
   1857 	int i;
   1858 
   1859 	if (!(sc->bge_flags & BGEF_RXRING_VALID))
   1860 		return;
   1861 
   1862 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
   1863 		if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
   1864 			m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
   1865 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
   1866 			bus_dmamap_destroy(sc->bge_dmatag,
   1867 			    sc->bge_cdata.bge_rx_std_map[i]);
   1868 		}
   1869 		memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0,
   1870 		    sizeof(struct bge_rx_bd));
   1871 	}
   1872 
   1873 	sc->bge_flags &= ~BGEF_RXRING_VALID;
   1874 }
   1875 
   1876 static int
   1877 bge_init_rx_ring_jumbo(struct bge_softc *sc)
   1878 {
   1879 	int i;
   1880 	volatile struct bge_rcb *rcb;
   1881 
   1882 	if (sc->bge_flags & BGEF_JUMBO_RXRING_VALID)
   1883 		return 0;
   1884 
   1885 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
   1886 		if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
   1887 			return ENOBUFS;
   1888 	}
   1889 
   1890 	sc->bge_jumbo = i - 1;
   1891 	sc->bge_flags |= BGEF_JUMBO_RXRING_VALID;
   1892 
   1893 	rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
   1894 	rcb->bge_maxlen_flags = 0;
   1895 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
   1896 
   1897 	bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
   1898 
   1899 	return 0;
   1900 }
   1901 
   1902 static void
   1903 bge_free_rx_ring_jumbo(struct bge_softc *sc)
   1904 {
   1905 	int i;
   1906 
   1907 	if (!(sc->bge_flags & BGEF_JUMBO_RXRING_VALID))
   1908 		return;
   1909 
   1910 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
   1911 		if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
   1912 			m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
   1913 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
   1914 		}
   1915 		memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0,
   1916 		    sizeof(struct bge_rx_bd));
   1917 	}
   1918 
   1919 	sc->bge_flags &= ~BGEF_JUMBO_RXRING_VALID;
   1920 }
   1921 
   1922 static void
   1923 bge_free_tx_ring(struct bge_softc *sc)
   1924 {
   1925 	int i;
   1926 	struct txdmamap_pool_entry *dma;
   1927 
   1928 	if (!(sc->bge_flags & BGEF_TXRING_VALID))
   1929 		return;
   1930 
   1931 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
   1932 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
   1933 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
   1934 			sc->bge_cdata.bge_tx_chain[i] = NULL;
   1935 			SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
   1936 					    link);
   1937 			sc->txdma[i] = 0;
   1938 		}
   1939 		memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0,
   1940 		    sizeof(struct bge_tx_bd));
   1941 	}
   1942 
   1943 	while ((dma = SLIST_FIRST(&sc->txdma_list))) {
   1944 		SLIST_REMOVE_HEAD(&sc->txdma_list, link);
   1945 		bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
   1946 		free(dma, M_DEVBUF);
   1947 	}
   1948 
   1949 	sc->bge_flags &= ~BGEF_TXRING_VALID;
   1950 }
   1951 
   1952 static int
   1953 bge_init_tx_ring(struct bge_softc *sc)
   1954 {
   1955 	struct ifnet *ifp = &sc->ethercom.ec_if;
   1956 	int i;
   1957 	bus_dmamap_t dmamap;
   1958 	bus_size_t maxsegsz;
   1959 	struct txdmamap_pool_entry *dma;
   1960 
   1961 	if (sc->bge_flags & BGEF_TXRING_VALID)
   1962 		return 0;
   1963 
   1964 	sc->bge_txcnt = 0;
   1965 	sc->bge_tx_saved_considx = 0;
   1966 
   1967 	/* Initialize transmit producer index for host-memory send ring. */
   1968 	sc->bge_tx_prodidx = 0;
   1969 	bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
   1970 	/* 5700 b2 errata */
   1971 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
   1972 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
   1973 
   1974 	/* NIC-memory send ring not used; initialize to zero. */
   1975 	bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
   1976 	/* 5700 b2 errata */
   1977 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
   1978 		bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
   1979 
   1980 	/* Limit DMA segment size for some chips */
   1981 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) &&
   1982 	    (ifp->if_mtu <= ETHERMTU))
   1983 		maxsegsz = 2048;
   1984 	else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
   1985 		maxsegsz = 4096;
   1986 	else
   1987 		maxsegsz = ETHER_MAX_LEN_JUMBO;
   1988 	SLIST_INIT(&sc->txdma_list);
   1989 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
   1990 		if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX,
   1991 		    BGE_NTXSEG, maxsegsz, 0, BUS_DMA_NOWAIT,
   1992 		    &dmamap))
   1993 			return ENOBUFS;
   1994 		if (dmamap == NULL)
   1995 			panic("dmamap NULL in bge_init_tx_ring");
   1996 		dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
   1997 		if (dma == NULL) {
   1998 			aprint_error_dev(sc->bge_dev,
   1999 			    "can't alloc txdmamap_pool_entry\n");
   2000 			bus_dmamap_destroy(sc->bge_dmatag, dmamap);
   2001 			return ENOMEM;
   2002 		}
   2003 		dma->dmamap = dmamap;
   2004 		SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
   2005 	}
   2006 
   2007 	sc->bge_flags |= BGEF_TXRING_VALID;
   2008 
   2009 	return 0;
   2010 }
   2011 
   2012 static void
   2013 bge_setmulti(struct bge_softc *sc)
   2014 {
   2015 	struct ethercom		*ac = &sc->ethercom;
   2016 	struct ifnet		*ifp = &ac->ec_if;
   2017 	struct ether_multi	*enm;
   2018 	struct ether_multistep  step;
   2019 	uint32_t		hashes[4] = { 0, 0, 0, 0 };
   2020 	uint32_t		h;
   2021 	int			i;
   2022 
   2023 	if (ifp->if_flags & IFF_PROMISC)
   2024 		goto allmulti;
   2025 
   2026 	/* Now program new ones. */
   2027 	ETHER_FIRST_MULTI(step, ac, enm);
   2028 	while (enm != NULL) {
   2029 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   2030 			/*
   2031 			 * We must listen to a range of multicast addresses.
   2032 			 * For now, just accept all multicasts, rather than
   2033 			 * trying to set only those filter bits needed to match
   2034 			 * the range.  (At this time, the only use of address
   2035 			 * ranges is for IP multicast routing, for which the
   2036 			 * range is big enough to require all bits set.)
   2037 			 */
   2038 			goto allmulti;
   2039 		}
   2040 
   2041 		h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
   2042 
   2043 		/* Just want the 7 least-significant bits. */
   2044 		h &= 0x7f;
   2045 
   2046 		hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
   2047 		ETHER_NEXT_MULTI(step, enm);
   2048 	}
   2049 
   2050 	ifp->if_flags &= ~IFF_ALLMULTI;
   2051 	goto setit;
   2052 
   2053  allmulti:
   2054 	ifp->if_flags |= IFF_ALLMULTI;
   2055 	hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff;
   2056 
   2057  setit:
   2058 	for (i = 0; i < 4; i++)
   2059 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
   2060 }
   2061 
   2062 static void
   2063 bge_sig_pre_reset(struct bge_softc *sc, int type)
   2064 {
   2065 
   2066 	/*
   2067 	 * Some chips don't like this so only do this if ASF is enabled
   2068 	 */
   2069 	if (sc->bge_asf_mode)
   2070 		bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
   2071 
   2072 	if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
   2073 		switch (type) {
   2074 		case BGE_RESET_START:
   2075 			bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
   2076 			    BGE_FW_DRV_STATE_START);
   2077 			break;
   2078 		case BGE_RESET_SHUTDOWN:
   2079 			bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
   2080 			    BGE_FW_DRV_STATE_UNLOAD);
   2081 			break;
   2082 		case BGE_RESET_SUSPEND:
   2083 			bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
   2084 			    BGE_FW_DRV_STATE_SUSPEND);
   2085 			break;
   2086 		}
   2087 	}
   2088 
   2089 	if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND)
   2090 		bge_ape_driver_state_change(sc, type);
   2091 }
   2092 
   2093 static void
   2094 bge_sig_post_reset(struct bge_softc *sc, int type)
   2095 {
   2096 
   2097 	if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
   2098 		switch (type) {
   2099 		case BGE_RESET_START:
   2100 			bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
   2101 			    BGE_FW_DRV_STATE_START_DONE);
   2102 			/* START DONE */
   2103 			break;
   2104 		case BGE_RESET_SHUTDOWN:
   2105 			bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
   2106 			    BGE_FW_DRV_STATE_UNLOAD_DONE);
   2107 			break;
   2108 		}
   2109 	}
   2110 
   2111 	if (type == BGE_RESET_SHUTDOWN)
   2112 		bge_ape_driver_state_change(sc, type);
   2113 }
   2114 
   2115 static void
   2116 bge_sig_legacy(struct bge_softc *sc, int type)
   2117 {
   2118 
   2119 	if (sc->bge_asf_mode) {
   2120 		switch (type) {
   2121 		case BGE_RESET_START:
   2122 			bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
   2123 			    BGE_FW_DRV_STATE_START);
   2124 			break;
   2125 		case BGE_RESET_SHUTDOWN:
   2126 			bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
   2127 			    BGE_FW_DRV_STATE_UNLOAD);
   2128 			break;
   2129 		}
   2130 	}
   2131 }
   2132 
   2133 static void
   2134 bge_wait_for_event_ack(struct bge_softc *sc)
   2135 {
   2136 	int i;
   2137 
   2138 	/* wait up to 2500usec */
   2139 	for (i = 0; i < 250; i++) {
   2140 		if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) &
   2141 			BGE_RX_CPU_DRV_EVENT))
   2142 			break;
   2143 		DELAY(10);
   2144 	}
   2145 }
   2146 
   2147 static void
   2148 bge_stop_fw(struct bge_softc *sc)
   2149 {
   2150 
   2151 	if (sc->bge_asf_mode) {
   2152 		bge_wait_for_event_ack(sc);
   2153 
   2154 		bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE);
   2155 		CSR_WRITE_4_FLUSH(sc, BGE_RX_CPU_EVENT,
   2156 		    CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT);
   2157 
   2158 		bge_wait_for_event_ack(sc);
   2159 	}
   2160 }
   2161 
   2162 static int
   2163 bge_poll_fw(struct bge_softc *sc)
   2164 {
   2165 	uint32_t val;
   2166 	int i;
   2167 
   2168 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
   2169 		for (i = 0; i < BGE_TIMEOUT; i++) {
   2170 			val = CSR_READ_4(sc, BGE_VCPU_STATUS);
   2171 			if (val & BGE_VCPU_STATUS_INIT_DONE)
   2172 				break;
   2173 			DELAY(100);
   2174 		}
   2175 		if (i >= BGE_TIMEOUT) {
   2176 			aprint_error_dev(sc->bge_dev, "reset timed out\n");
   2177 			return -1;
   2178 		}
   2179 	} else {
   2180 		/*
   2181 		 * Poll the value location we just wrote until
   2182 		 * we see the 1's complement of the magic number.
   2183 		 * This indicates that the firmware initialization
   2184 		 * is complete.
   2185 		 * XXX 1000ms for Flash and 10000ms for SEEPROM.
   2186 		 */
   2187 		for (i = 0; i < BGE_TIMEOUT; i++) {
   2188 			val = bge_readmem_ind(sc, BGE_SRAM_FW_MB);
   2189 			if (val == ~BGE_SRAM_FW_MB_MAGIC)
   2190 				break;
   2191 			DELAY(10);
   2192 		}
   2193 
   2194 		if ((i >= BGE_TIMEOUT)
   2195 		    && ((sc->bge_flags & BGEF_NO_EEPROM) == 0)) {
   2196 			aprint_error_dev(sc->bge_dev,
   2197 			    "firmware handshake timed out, val = %x\n", val);
   2198 			return -1;
   2199 		}
   2200 	}
   2201 
   2202 	if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) {
   2203 		/* tg3 says we have to wait extra time */
   2204 		delay(10 * 1000);
   2205 	}
   2206 
   2207 	return 0;
   2208 }
   2209 
   2210 int
   2211 bge_phy_addr(struct bge_softc *sc)
   2212 {
   2213 	struct pci_attach_args *pa = &(sc->bge_pa);
   2214 	int phy_addr = 1;
   2215 
   2216 	/*
   2217 	 * PHY address mapping for various devices.
   2218 	 *
   2219 	 *          | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
   2220 	 * ---------+-------+-------+-------+-------+
   2221 	 * BCM57XX  |   1   |   X   |   X   |   X   |
   2222 	 * BCM5704  |   1   |   X   |   1   |   X   |
   2223 	 * BCM5717  |   1   |   8   |   2   |   9   |
   2224 	 * BCM5719  |   1   |   8   |   2   |   9   |
   2225 	 * BCM5720  |   1   |   8   |   2   |   9   |
   2226 	 *
   2227 	 *          | F2 Cu | F2 Sr | F3 Cu | F3 Sr |
   2228 	 * ---------+-------+-------+-------+-------+
   2229 	 * BCM57XX  |   X   |   X   |   X   |   X   |
   2230 	 * BCM5704  |   X   |   X   |   X   |   X   |
   2231 	 * BCM5717  |   X   |   X   |   X   |   X   |
   2232 	 * BCM5719  |   3   |   10  |   4   |   11  |
   2233 	 * BCM5720  |   X   |   X   |   X   |   X   |
   2234 	 *
   2235 	 * Other addresses may respond but they are not
   2236 	 * IEEE compliant PHYs and should be ignored.
   2237 	 */
   2238 	switch (BGE_ASICREV(sc->bge_chipid)) {
   2239 	case BGE_ASICREV_BCM5717:
   2240 	case BGE_ASICREV_BCM5719:
   2241 	case BGE_ASICREV_BCM5720:
   2242 		phy_addr = pa->pa_function;
   2243 		if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) {
   2244 			phy_addr += (CSR_READ_4(sc, BGE_SGDIG_STS) &
   2245 			    BGE_SGDIGSTS_IS_SERDES) ? 8 : 1;
   2246 		} else {
   2247 			phy_addr += (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
   2248 			    BGE_CPMU_PHY_STRAP_IS_SERDES) ? 8 : 1;
   2249 		}
   2250 	}
   2251 
   2252 	return phy_addr;
   2253 }
   2254 
   2255 /*
   2256  * Do endian, PCI and DMA initialization. Also check the on-board ROM
   2257  * self-test results.
   2258  */
   2259 static int
   2260 bge_chipinit(struct bge_softc *sc)
   2261 {
   2262 	uint32_t dma_rw_ctl, mode_ctl, reg;
   2263 	int i;
   2264 
   2265 	/* Set endianness before we access any non-PCI registers. */
   2266 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
   2267 	    BGE_INIT);
   2268 
   2269 	/*
   2270 	 * Clear the MAC statistics block in the NIC's
   2271 	 * internal memory.
   2272 	 */
   2273 	for (i = BGE_STATS_BLOCK;
   2274 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
   2275 		BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0);
   2276 
   2277 	for (i = BGE_STATUS_BLOCK;
   2278 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
   2279 		BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0);
   2280 
   2281 	/* 5717 workaround from tg3 */
   2282 	if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
   2283 		/* Save */
   2284 		mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
   2285 
   2286 		/* Temporary modify MODE_CTL to control TLP */
   2287 		reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK;
   2288 		CSR_WRITE_4(sc, BGE_MODE_CTL, reg | BGE_MODECTL_PCIE_TLPADDR1);
   2289 
   2290 		/* Control TLP */
   2291 		reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG +
   2292 		    BGE_TLP_PHYCTL1);
   2293 		CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_PHYCTL1,
   2294 		    reg | BGE_TLP_PHYCTL1_EN_L1PLLPD);
   2295 
   2296 		/* Restore */
   2297 		CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
   2298 	}
   2299 
   2300 	if (BGE_IS_57765_FAMILY(sc)) {
   2301 		if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) {
   2302 			/* Save */
   2303 			mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
   2304 
   2305 			/* Temporary modify MODE_CTL to control TLP */
   2306 			reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK;
   2307 			CSR_WRITE_4(sc, BGE_MODE_CTL,
   2308 			    reg | BGE_MODECTL_PCIE_TLPADDR1);
   2309 
   2310 			/* Control TLP */
   2311 			reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG +
   2312 			    BGE_TLP_PHYCTL5);
   2313 			CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_PHYCTL5,
   2314 			    reg | BGE_TLP_PHYCTL5_DIS_L2CLKREQ);
   2315 
   2316 			/* Restore */
   2317 			CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
   2318 		}
   2319 		if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_57765_AX) {
   2320 			reg = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL);
   2321 			CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL,
   2322 			    reg | BGE_CPMU_PADRNG_CTL_RDIV2);
   2323 
   2324 			/* Save */
   2325 			mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
   2326 
   2327 			/* Temporary modify MODE_CTL to control TLP */
   2328 			reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK;
   2329 			CSR_WRITE_4(sc, BGE_MODE_CTL,
   2330 			    reg | BGE_MODECTL_PCIE_TLPADDR0);
   2331 
   2332 			/* Control TLP */
   2333 			reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG +
   2334 			    BGE_TLP_FTSMAX);
   2335 			reg &= ~BGE_TLP_FTSMAX_MSK;
   2336 			CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_FTSMAX,
   2337 			    reg | BGE_TLP_FTSMAX_VAL);
   2338 
   2339 			/* Restore */
   2340 			CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
   2341 		}
   2342 
   2343 		reg = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK);
   2344 		reg &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK;
   2345 		reg |= BGE_CPMU_LSPD_10MB_MACCLK_6_25;
   2346 		CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, reg);
   2347 	}
   2348 
   2349 	/* Set up the PCI DMA control register. */
   2350 	dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD;
   2351 	if (sc->bge_flags & BGEF_PCIE) {
   2352 		/* Read watermark not used, 128 bytes for write. */
   2353 		DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n",
   2354 		    device_xname(sc->bge_dev)));
   2355 		if (sc->bge_mps >= 256)
   2356 			dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
   2357 		else
   2358 			dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
   2359 	} else if (sc->bge_flags & BGEF_PCIX) {
   2360 	  	DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n",
   2361 		    device_xname(sc->bge_dev)));
   2362 		/* PCI-X bus */
   2363 		if (BGE_IS_5714_FAMILY(sc)) {
   2364 			/* 256 bytes for read and write. */
   2365 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
   2366 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
   2367 
   2368 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780)
   2369 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
   2370 			else
   2371 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
   2372 		} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
   2373 			/* 1536 bytes for read, 384 bytes for write. */
   2374 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
   2375 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
   2376 		} else {
   2377 			/* 384 bytes for read and write. */
   2378 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
   2379 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
   2380 			    (0x0F);
   2381 		}
   2382 
   2383 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
   2384 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
   2385 			uint32_t tmp;
   2386 
   2387 			/* Set ONEDMA_ATONCE for hardware workaround. */
   2388 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
   2389 			if (tmp == 6 || tmp == 7)
   2390 				dma_rw_ctl |=
   2391 				    BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
   2392 
   2393 			/* Set PCI-X DMA write workaround. */
   2394 			dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
   2395 		}
   2396 	} else {
   2397 		/* Conventional PCI bus: 256 bytes for read and write. */
   2398 	  	DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n",
   2399 		    device_xname(sc->bge_dev)));
   2400 		dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
   2401 		    BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
   2402 
   2403 		if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 &&
   2404 		    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750)
   2405 			dma_rw_ctl |= 0x0F;
   2406 	}
   2407 
   2408 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
   2409 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701)
   2410 		dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
   2411 		    BGE_PCIDMARWCTL_ASRT_ALL_BE;
   2412 
   2413 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
   2414 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
   2415 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
   2416 
   2417 	if (BGE_IS_57765_PLUS(sc)) {
   2418 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
   2419 		if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
   2420 			dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
   2421 
   2422 		/*
   2423 		 * Enable HW workaround for controllers that misinterpret
   2424 		 * a status tag update and leave interrupts permanently
   2425 		 * disabled.
   2426 		 */
   2427 		if (!BGE_IS_57765_FAMILY(sc) &&
   2428 		    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717)
   2429 			dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
   2430 	}
   2431 
   2432 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL,
   2433 	    dma_rw_ctl);
   2434 
   2435 	/*
   2436 	 * Set up general mode register.
   2437 	 */
   2438 	mode_ctl = BGE_DMA_SWAP_OPTIONS;
   2439 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
   2440 		/* Retain Host-2-BMC settings written by APE firmware. */
   2441 		mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) &
   2442 		    (BGE_MODECTL_BYTESWAP_B2HRX_DATA |
   2443 		    BGE_MODECTL_WORDSWAP_B2HRX_DATA |
   2444 		    BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE);
   2445 	}
   2446 	mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
   2447 	    BGE_MODECTL_TX_NO_PHDR_CSUM;
   2448 
   2449 	/*
   2450 	 * BCM5701 B5 have a bug causing data corruption when using
   2451 	 * 64-bit DMA reads, which can be terminated early and then
   2452 	 * completed later as 32-bit accesses, in combination with
   2453 	 * certain bridges.
   2454 	 */
   2455 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
   2456 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
   2457 		mode_ctl |= BGE_MODECTL_FORCE_PCI32;
   2458 
   2459 	/*
   2460 	 * Tell the firmware the driver is running
   2461 	 */
   2462 	if (sc->bge_asf_mode & ASF_STACKUP)
   2463 		mode_ctl |= BGE_MODECTL_STACKUP;
   2464 
   2465 	CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
   2466 
   2467 	/*
   2468 	 * Disable memory write invalidate.  Apparently it is not supported
   2469 	 * properly by these devices.
   2470 	 */
   2471 	PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG,
   2472 		   PCI_COMMAND_INVALIDATE_ENABLE);
   2473 
   2474 #ifdef __brokenalpha__
   2475 	/*
   2476 	 * Must insure that we do not cross an 8K (bytes) boundary
   2477 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
   2478 	 * restriction on some ALPHA platforms with early revision
   2479 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
   2480 	 */
   2481 	PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4);
   2482 #endif
   2483 
   2484 	/* Set the timer prescaler (always 66MHz) */
   2485 	CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
   2486 
   2487 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
   2488 		DELAY(40);	/* XXX */
   2489 
   2490 		/* Put PHY into ready state */
   2491 		BGE_CLRBIT_FLUSH(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
   2492 		DELAY(40);
   2493 	}
   2494 
   2495 	return 0;
   2496 }
   2497 
   2498 static int
   2499 bge_blockinit(struct bge_softc *sc)
   2500 {
   2501 	volatile struct bge_rcb	 *rcb;
   2502 	bus_size_t rcb_addr;
   2503 	struct ifnet *ifp = &sc->ethercom.ec_if;
   2504 	bge_hostaddr taddr;
   2505 	uint32_t	dmactl, mimode, val;
   2506 	int		i, limit;
   2507 
   2508 	/*
   2509 	 * Initialize the memory window pointer register so that
   2510 	 * we can access the first 32K of internal NIC RAM. This will
   2511 	 * allow us to set up the TX send ring RCBs and the RX return
   2512 	 * ring RCBs, plus other things which live in NIC memory.
   2513 	 */
   2514 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0);
   2515 
   2516 	if (!BGE_IS_5705_PLUS(sc)) {
   2517 		/* 57XX step 33 */
   2518 		/* Configure mbuf memory pool */
   2519 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
   2520 		    BGE_BUFFPOOL_1);
   2521 
   2522 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
   2523 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
   2524 		else
   2525 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
   2526 
   2527 		/* 57XX step 34 */
   2528 		/* Configure DMA resource pool */
   2529 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
   2530 		    BGE_DMA_DESCRIPTORS);
   2531 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
   2532 	}
   2533 
   2534 	/* 5718 step 11, 57XX step 35 */
   2535 	/*
   2536 	 * Configure mbuf pool watermarks. New broadcom docs strongly
   2537 	 * recommend these.
   2538 	 */
   2539 	if (BGE_IS_5717_PLUS(sc)) {
   2540 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
   2541 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
   2542 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
   2543 	} else if (BGE_IS_5705_PLUS(sc)) {
   2544 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
   2545 
   2546 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
   2547 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
   2548 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
   2549 		} else {
   2550 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
   2551 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
   2552 		}
   2553 	} else {
   2554 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
   2555 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
   2556 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
   2557 	}
   2558 
   2559 	/* 57XX step 36 */
   2560 	/* Configure DMA resource watermarks */
   2561 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
   2562 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
   2563 
   2564 	/* 5718 step 13, 57XX step 38 */
   2565 	/* Enable buffer manager */
   2566 	val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_ATTN;
   2567 	/*
   2568 	 * Change the arbitration algorithm of TXMBUF read request to
   2569 	 * round-robin instead of priority based for BCM5719.  When
   2570 	 * TXFIFO is almost empty, RDMA will hold its request until
   2571 	 * TXFIFO is not almost empty.
   2572 	 */
   2573 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
   2574 		val |= BGE_BMANMODE_NO_TX_UNDERRUN;
   2575 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
   2576 		sc->bge_chipid == BGE_CHIPID_BCM5719_A0 ||
   2577 		sc->bge_chipid == BGE_CHIPID_BCM5720_A0)
   2578 		val |= BGE_BMANMODE_LOMBUF_ATTN;
   2579 	CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
   2580 
   2581 	/* 57XX step 39 */
   2582 	/* Poll for buffer manager start indication */
   2583 	for (i = 0; i < BGE_TIMEOUT * 2; i++) {
   2584 		DELAY(10);
   2585 		if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
   2586 			break;
   2587 	}
   2588 
   2589 	if (i == BGE_TIMEOUT * 2) {
   2590 		aprint_error_dev(sc->bge_dev,
   2591 		    "buffer manager failed to start\n");
   2592 		return ENXIO;
   2593 	}
   2594 
   2595 	/* 57XX step 40 */
   2596 	/* Enable flow-through queues */
   2597 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
   2598 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
   2599 
   2600 	/* Wait until queue initialization is complete */
   2601 	for (i = 0; i < BGE_TIMEOUT * 2; i++) {
   2602 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
   2603 			break;
   2604 		DELAY(10);
   2605 	}
   2606 
   2607 	if (i == BGE_TIMEOUT * 2) {
   2608 		aprint_error_dev(sc->bge_dev,
   2609 		    "flow-through queue init failed\n");
   2610 		return ENXIO;
   2611 	}
   2612 
   2613 	/*
   2614 	 * Summary of rings supported by the controller:
   2615 	 *
   2616 	 * Standard Receive Producer Ring
   2617 	 * - This ring is used to feed receive buffers for "standard"
   2618 	 *   sized frames (typically 1536 bytes) to the controller.
   2619 	 *
   2620 	 * Jumbo Receive Producer Ring
   2621 	 * - This ring is used to feed receive buffers for jumbo sized
   2622 	 *   frames (i.e. anything bigger than the "standard" frames)
   2623 	 *   to the controller.
   2624 	 *
   2625 	 * Mini Receive Producer Ring
   2626 	 * - This ring is used to feed receive buffers for "mini"
   2627 	 *   sized frames to the controller.
   2628 	 * - This feature required external memory for the controller
   2629 	 *   but was never used in a production system.  Should always
   2630 	 *   be disabled.
   2631 	 *
   2632 	 * Receive Return Ring
   2633 	 * - After the controller has placed an incoming frame into a
   2634 	 *   receive buffer that buffer is moved into a receive return
   2635 	 *   ring.  The driver is then responsible to passing the
   2636 	 *   buffer up to the stack.  Many versions of the controller
   2637 	 *   support multiple RR rings.
   2638 	 *
   2639 	 * Send Ring
   2640 	 * - This ring is used for outgoing frames.  Many versions of
   2641 	 *   the controller support multiple send rings.
   2642 	 */
   2643 
   2644 	/* 5718 step 15, 57XX step 41 */
   2645 	/* Initialize the standard RX ring control block */
   2646 	rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
   2647 	BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
   2648 	/* 5718 step 16 */
   2649 	if (BGE_IS_57765_PLUS(sc)) {
   2650 		/*
   2651 		 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
   2652 		 * Bits 15-2 : Maximum RX frame size
   2653 		 * Bit 1     : 1 = Ring Disabled, 0 = Ring ENabled
   2654 		 * Bit 0     : Reserved
   2655 		 */
   2656 		rcb->bge_maxlen_flags =
   2657 		    BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
   2658 	} else if (BGE_IS_5705_PLUS(sc)) {
   2659 		/*
   2660 		 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
   2661 		 * Bits 15-2 : Reserved (should be 0)
   2662 		 * Bit 1     : 1 = Ring Disabled, 0 = Ring Enabled
   2663 		 * Bit 0     : Reserved
   2664 		 */
   2665 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
   2666 	} else {
   2667 		/*
   2668 		 * Ring size is always XXX entries
   2669 		 * Bits 31-16: Maximum RX frame size
   2670 		 * Bits 15-2 : Reserved (should be 0)
   2671 		 * Bit 1     : 1 = Ring Disabled, 0 = Ring Enabled
   2672 		 * Bit 0     : Reserved
   2673 		 */
   2674 		rcb->bge_maxlen_flags =
   2675 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
   2676 	}
   2677 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
   2678 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
   2679 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
   2680 		rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
   2681 	else
   2682 		rcb->bge_nicaddr = BGE_STD_RX_RINGS;
   2683 	/* Write the standard receive producer ring control block. */
   2684 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
   2685 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
   2686 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
   2687 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
   2688 
   2689 	/* Reset the standard receive producer ring producer index. */
   2690 	bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
   2691 
   2692 	/* 57XX step 42 */
   2693 	/*
   2694 	 * Initialize the jumbo RX ring control block
   2695 	 * We set the 'ring disabled' bit in the flags
   2696 	 * field until we're actually ready to start
   2697 	 * using this ring (i.e. once we set the MTU
   2698 	 * high enough to require it).
   2699 	 */
   2700 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
   2701 		rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
   2702 		BGE_HOSTADDR(rcb->bge_hostaddr,
   2703 		    BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
   2704 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
   2705 		    BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
   2706 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
   2707 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
   2708 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
   2709 			rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
   2710 		else
   2711 			rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
   2712 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
   2713 		    rcb->bge_hostaddr.bge_addr_hi);
   2714 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
   2715 		    rcb->bge_hostaddr.bge_addr_lo);
   2716 		/* Program the jumbo receive producer ring RCB parameters. */
   2717 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
   2718 		    rcb->bge_maxlen_flags);
   2719 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
   2720 		/* Reset the jumbo receive producer ring producer index. */
   2721 		bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
   2722 	}
   2723 
   2724 	/* 57XX step 43 */
   2725 	/* Disable the mini receive producer ring RCB. */
   2726 	if (BGE_IS_5700_FAMILY(sc)) {
   2727 		/* Set up dummy disabled mini ring RCB */
   2728 		rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
   2729 		rcb->bge_maxlen_flags =
   2730 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
   2731 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
   2732 		    rcb->bge_maxlen_flags);
   2733 		/* Reset the mini receive producer ring producer index. */
   2734 		bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
   2735 
   2736 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   2737 		    offsetof(struct bge_ring_data, bge_info),
   2738 		    sizeof (struct bge_gib),
   2739 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   2740 	}
   2741 
   2742 	/* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
   2743 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
   2744 		if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
   2745 		    sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
   2746 		    sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
   2747 			CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
   2748 			    (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
   2749 	}
   2750 	/* 5718 step 14, 57XX step 44 */
   2751 	/*
   2752 	 * The BD ring replenish thresholds control how often the
   2753 	 * hardware fetches new BD's from the producer rings in host
   2754 	 * memory.  Setting the value too low on a busy system can
   2755 	 * starve the hardware and recue the throughpout.
   2756 	 *
   2757 	 * Set the BD ring replenish thresholds. The recommended
   2758 	 * values are 1/8th the number of descriptors allocated to
   2759 	 * each ring, but since we try to avoid filling the entire
   2760 	 * ring we set these to the minimal value of 8.  This needs to
   2761 	 * be done on several of the supported chip revisions anyway,
   2762 	 * to work around HW bugs.
   2763 	 */
   2764 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, 8);
   2765 	if (BGE_IS_JUMBO_CAPABLE(sc))
   2766 		CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 8);
   2767 
   2768 	/* 5718 step 18 */
   2769 	if (BGE_IS_5717_PLUS(sc)) {
   2770 		CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4);
   2771 		CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4);
   2772 	}
   2773 
   2774 	/* 57XX step 45 */
   2775 	/*
   2776 	 * Disable all send rings by setting the 'ring disabled' bit
   2777 	 * in the flags field of all the TX send ring control blocks,
   2778 	 * located in NIC memory.
   2779 	 */
   2780 	if (BGE_IS_5700_FAMILY(sc)) {
   2781 		/* 5700 to 5704 had 16 send rings. */
   2782 		limit = BGE_TX_RINGS_EXTSSRAM_MAX;
   2783 	} else if (BGE_IS_5717_PLUS(sc)) {
   2784 		limit = BGE_TX_RINGS_5717_MAX;
   2785 	} else if (BGE_IS_57765_FAMILY(sc)) {
   2786 		limit = BGE_TX_RINGS_57765_MAX;
   2787 	} else
   2788 		limit = 1;
   2789 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
   2790 	for (i = 0; i < limit; i++) {
   2791 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
   2792 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
   2793 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
   2794 		rcb_addr += sizeof(struct bge_rcb);
   2795 	}
   2796 
   2797 	/* 57XX step 46 and 47 */
   2798 	/* Configure send ring RCB 0 (we use only the first ring) */
   2799 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
   2800 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
   2801 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
   2802 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
   2803 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
   2804 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
   2805 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
   2806 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, BGE_SEND_RING_5717);
   2807 	else
   2808 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
   2809 		    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
   2810 	RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
   2811 	    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
   2812 
   2813 	/* 57XX step 48 */
   2814 	/*
   2815 	 * Disable all receive return rings by setting the
   2816 	 * 'ring diabled' bit in the flags field of all the receive
   2817 	 * return ring control blocks, located in NIC memory.
   2818 	 */
   2819 	if (BGE_IS_5717_PLUS(sc)) {
   2820 		/* Should be 17, use 16 until we get an SRAM map. */
   2821 		limit = 16;
   2822 	} else if (BGE_IS_5700_FAMILY(sc))
   2823 		limit = BGE_RX_RINGS_MAX;
   2824 	else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
   2825 	    BGE_IS_57765_FAMILY(sc))
   2826 		limit = 4;
   2827 	else
   2828 		limit = 1;
   2829 	/* Disable all receive return rings */
   2830 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
   2831 	for (i = 0; i < limit; i++) {
   2832 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
   2833 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
   2834 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
   2835 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
   2836 			BGE_RCB_FLAG_RING_DISABLED));
   2837 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
   2838 		bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
   2839 		    (i * (sizeof(uint64_t))), 0);
   2840 		rcb_addr += sizeof(struct bge_rcb);
   2841 	}
   2842 
   2843 	/* 57XX step 49 */
   2844 	/*
   2845 	 * Set up receive return ring 0.  Note that the NIC address
   2846 	 * for RX return rings is 0x0.  The return rings live entirely
   2847 	 * within the host, so the nicaddr field in the RCB isn't used.
   2848 	 */
   2849 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
   2850 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
   2851 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
   2852 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
   2853 	RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
   2854 	RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
   2855 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
   2856 
   2857 	/* 5718 step 24, 57XX step 53 */
   2858 	/* Set random backoff seed for TX */
   2859 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
   2860 	    (CLLADDR(ifp->if_sadl)[0] + CLLADDR(ifp->if_sadl)[1] +
   2861 		CLLADDR(ifp->if_sadl)[2] + CLLADDR(ifp->if_sadl)[3] +
   2862 		CLLADDR(ifp->if_sadl)[4] + CLLADDR(ifp->if_sadl)[5]) &
   2863 	    BGE_TX_BACKOFF_SEED_MASK);
   2864 
   2865 	/* 5718 step 26, 57XX step 55 */
   2866 	/* Set inter-packet gap */
   2867 	val = 0x2620;
   2868 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
   2869 		val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
   2870 		    (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
   2871 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
   2872 
   2873 	/* 5718 step 27, 57XX step 56 */
   2874 	/*
   2875 	 * Specify which ring to use for packets that don't match
   2876 	 * any RX rules.
   2877 	 */
   2878 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
   2879 
   2880 	/* 5718 step 28, 57XX step 57 */
   2881 	/*
   2882 	 * Configure number of RX lists. One interrupt distribution
   2883 	 * list, sixteen active lists, one bad frames class.
   2884 	 */
   2885 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
   2886 
   2887 	/* 5718 step 29, 57XX step 58 */
   2888 	/* Inialize RX list placement stats mask. */
   2889 	if (BGE_IS_575X_PLUS(sc)) {
   2890 		val = CSR_READ_4(sc, BGE_RXLP_STATS_ENABLE_MASK);
   2891 		val &= ~BGE_RXLPSTATCONTROL_DACK_FIX;
   2892 		CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, val);
   2893 	} else
   2894 		CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
   2895 
   2896 	/* 5718 step 30, 57XX step 59 */
   2897 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
   2898 
   2899 	/* 5718 step 33, 57XX step 62 */
   2900 	/* Disable host coalescing until we get it set up */
   2901 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
   2902 
   2903 	/* 5718 step 34, 57XX step 63 */
   2904 	/* Poll to make sure it's shut down. */
   2905 	for (i = 0; i < BGE_TIMEOUT * 2; i++) {
   2906 		DELAY(10);
   2907 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
   2908 			break;
   2909 	}
   2910 
   2911 	if (i == BGE_TIMEOUT * 2) {
   2912 		aprint_error_dev(sc->bge_dev,
   2913 		    "host coalescing engine failed to idle\n");
   2914 		return ENXIO;
   2915 	}
   2916 
   2917 	/* 5718 step 35, 36, 37 */
   2918 	/* Set up host coalescing defaults */
   2919 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
   2920 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
   2921 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
   2922 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
   2923 	if (!(BGE_IS_5705_PLUS(sc))) {
   2924 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
   2925 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
   2926 	}
   2927 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
   2928 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
   2929 
   2930 	/* Set up address of statistics block */
   2931 	if (BGE_IS_5700_FAMILY(sc)) {
   2932 		BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
   2933 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
   2934 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
   2935 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi);
   2936 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo);
   2937 	}
   2938 
   2939 	/* 5718 step 38 */
   2940 	/* Set up address of status block */
   2941 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
   2942 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
   2943 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
   2944 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
   2945 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
   2946 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
   2947 
   2948 	/* Set up status block size. */
   2949 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 &&
   2950 	    sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
   2951 		val = BGE_STATBLKSZ_FULL;
   2952 		bzero(&sc->bge_rdata->bge_status_block, BGE_STATUS_BLK_SZ);
   2953 	} else {
   2954 		val = BGE_STATBLKSZ_32BYTE;
   2955 		bzero(&sc->bge_rdata->bge_status_block, 32);
   2956 	}
   2957 
   2958 	/* 5718 step 39, 57XX step 73 */
   2959 	/* Turn on host coalescing state machine */
   2960 	CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
   2961 
   2962 	/* 5718 step 40, 57XX step 74 */
   2963 	/* Turn on RX BD completion state machine and enable attentions */
   2964 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
   2965 	    BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
   2966 
   2967 	/* 5718 step 41, 57XX step 75 */
   2968 	/* Turn on RX list placement state machine */
   2969 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
   2970 
   2971 	/* 57XX step 76 */
   2972 	/* Turn on RX list selector state machine. */
   2973 	if (!(BGE_IS_5705_PLUS(sc)))
   2974 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
   2975 
   2976 	val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
   2977 	    BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
   2978 	    BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
   2979 	    BGE_MACMODE_FRMHDR_DMA_ENB;
   2980 
   2981 	if (sc->bge_flags & BGEF_FIBER_TBI)
   2982 		val |= BGE_PORTMODE_TBI;
   2983 	else if (sc->bge_flags & BGEF_FIBER_MII)
   2984 		val |= BGE_PORTMODE_GMII;
   2985 	else
   2986 		val |= BGE_PORTMODE_MII;
   2987 
   2988 	/* 5718 step 42 and 43, 57XX step 77 and 78 */
   2989 	/* Allow APE to send/receive frames. */
   2990 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
   2991 		val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
   2992 
   2993 	/* Turn on DMA, clear stats */
   2994 	CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, val);
   2995 	/* 5718 step 44 */
   2996 	DELAY(40);
   2997 
   2998 	/* 5718 step 45, 57XX step 79 */
   2999 	/* Set misc. local control, enable interrupts on attentions */
   3000 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
   3001 	if (BGE_IS_5717_PLUS(sc)) {
   3002 		CSR_READ_4(sc, BGE_MISC_LOCAL_CTL); /* Flush */
   3003 		/* 5718 step 46 */
   3004 		DELAY(100);
   3005 	}
   3006 
   3007 	/* 57XX step 81 */
   3008 	/* Turn on DMA completion state machine */
   3009 	if (!(BGE_IS_5705_PLUS(sc)))
   3010 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
   3011 
   3012 	/* 5718 step 47, 57XX step 82 */
   3013 	val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
   3014 
   3015 	/* 5718 step 48 */
   3016 	/* Enable host coalescing bug fix. */
   3017 	if (BGE_IS_5755_PLUS(sc))
   3018 		val |= BGE_WDMAMODE_STATUS_TAG_FIX;
   3019 
   3020 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785)
   3021 		val |= BGE_WDMAMODE_BURST_ALL_DATA;
   3022 
   3023 	/* Turn on write DMA state machine */
   3024 	CSR_WRITE_4_FLUSH(sc, BGE_WDMA_MODE, val);
   3025 	/* 5718 step 49 */
   3026 	DELAY(40);
   3027 
   3028 	val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
   3029 
   3030 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717)
   3031 		val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
   3032 
   3033 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
   3034 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
   3035 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
   3036 		val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
   3037 		    BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
   3038 		    BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
   3039 
   3040 	if (sc->bge_flags & BGEF_PCIE)
   3041 		val |= BGE_RDMAMODE_FIFO_LONG_BURST;
   3042 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) {
   3043 		if (ifp->if_mtu <= ETHERMTU)
   3044 			val |= BGE_RDMAMODE_JMB_2K_MMRR;
   3045 	}
   3046 	if (sc->bge_flags & BGEF_TSO)
   3047 		val |= BGE_RDMAMODE_TSO4_ENABLE;
   3048 
   3049 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
   3050 		val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
   3051 		    BGE_RDMAMODE_H2BNC_VLAN_DET;
   3052 		/*
   3053 		 * Allow multiple outstanding read requests from
   3054 		 * non-LSO read DMA engine.
   3055 		 */
   3056 		val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
   3057 	}
   3058 
   3059 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
   3060 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
   3061 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
   3062 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780 ||
   3063 	    BGE_IS_57765_PLUS(sc)) {
   3064 		dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
   3065 		/*
   3066 		 * Adjust tx margin to prevent TX data corruption and
   3067 		 * fix internal FIFO overflow.
   3068 		 */
   3069 		if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
   3070 			dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
   3071 			    BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
   3072 			    BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
   3073 			dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
   3074 			    BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
   3075 			    BGE_RDMA_RSRVCTRL_TXMRGN_320B;
   3076 		}
   3077 		/*
   3078 		 * Enable fix for read DMA FIFO overruns.
   3079 		 * The fix is to limit the number of RX BDs
   3080 		 * the hardware would fetch at a fime.
   3081 		 */
   3082 		CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL, dmactl |
   3083 		    BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
   3084 	}
   3085 
   3086 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) {
   3087 		CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
   3088 		    CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
   3089 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
   3090 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
   3091 	} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
   3092 		/*
   3093 		 * Allow 4KB burst length reads for non-LSO frames.
   3094 		 * Enable 512B burst length reads for buffer descriptors.
   3095 		 */
   3096 		CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
   3097 		    CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
   3098 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
   3099 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
   3100 	}
   3101 
   3102 	/* Turn on read DMA state machine */
   3103 	CSR_WRITE_4_FLUSH(sc, BGE_RDMA_MODE, val);
   3104 	/* 5718 step 52 */
   3105 	delay(40);
   3106 
   3107 	/* 5718 step 56, 57XX step 84 */
   3108 	/* Turn on RX data completion state machine */
   3109 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
   3110 
   3111 	/* Turn on RX data and RX BD initiator state machine */
   3112 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
   3113 
   3114 	/* 57XX step 85 */
   3115 	/* Turn on Mbuf cluster free state machine */
   3116 	if (!BGE_IS_5705_PLUS(sc))
   3117 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
   3118 
   3119 	/* 5718 step 57, 57XX step 86 */
   3120 	/* Turn on send data completion state machine */
   3121 	val = BGE_SDCMODE_ENABLE;
   3122 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
   3123 		val |= BGE_SDCMODE_CDELAY;
   3124 	CSR_WRITE_4(sc, BGE_SDC_MODE, val);
   3125 
   3126 	/* 5718 step 58 */
   3127 	/* Turn on send BD completion state machine */
   3128 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
   3129 
   3130 	/* 57XX step 88 */
   3131 	/* Turn on RX BD initiator state machine */
   3132 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
   3133 
   3134 	/* 5718 step 60, 57XX step 90 */
   3135 	/* Turn on send data initiator state machine */
   3136 	if (sc->bge_flags & BGEF_TSO) {
   3137 		/* XXX: magic value from Linux driver */
   3138 		CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
   3139 		    BGE_SDIMODE_HW_LSO_PRE_DMA);
   3140 	} else
   3141 		CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
   3142 
   3143 	/* 5718 step 61, 57XX step 91 */
   3144 	/* Turn on send BD initiator state machine */
   3145 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
   3146 
   3147 	/* 5718 step 62, 57XX step 92 */
   3148 	/* Turn on send BD selector state machine */
   3149 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
   3150 
   3151 	/* 5718 step 31, 57XX step 60 */
   3152 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
   3153 	/* 5718 step 32, 57XX step 61 */
   3154 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
   3155 	    BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
   3156 
   3157 	/* ack/clear link change events */
   3158 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
   3159 	    BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
   3160 	    BGE_MACSTAT_LINK_CHANGED);
   3161 	CSR_WRITE_4(sc, BGE_MI_STS, 0);
   3162 
   3163 	/*
   3164 	 * Enable attention when the link has changed state for
   3165 	 * devices that use auto polling.
   3166 	 */
   3167 	if (sc->bge_flags & BGEF_FIBER_TBI) {
   3168 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
   3169 	} else {
   3170 		if ((sc->bge_flags & BGEF_CPMU_PRESENT) != 0)
   3171 			mimode = BGE_MIMODE_500KHZ_CONST;
   3172 		else
   3173 			mimode = BGE_MIMODE_BASE;
   3174 		/* 5718 step 68. 5718 step 69 (optionally). */
   3175 		if (BGE_IS_5700_FAMILY(sc) ||
   3176 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705) {
   3177 			mimode |= BGE_MIMODE_AUTOPOLL;
   3178 			BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
   3179 		}
   3180 		mimode |= BGE_MIMODE_PHYADDR(sc->bge_phy_addr);
   3181 		CSR_WRITE_4(sc, BGE_MI_MODE, mimode);
   3182 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700)
   3183 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
   3184 			    BGE_EVTENB_MI_INTERRUPT);
   3185 	}
   3186 
   3187 	/*
   3188 	 * Clear any pending link state attention.
   3189 	 * Otherwise some link state change events may be lost until attention
   3190 	 * is cleared by bge_intr() -> bge_link_upd() sequence.
   3191 	 * It's not necessary on newer BCM chips - perhaps enabling link
   3192 	 * state change attentions implies clearing pending attention.
   3193 	 */
   3194 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
   3195 	    BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
   3196 	    BGE_MACSTAT_LINK_CHANGED);
   3197 
   3198 	/* Enable link state change attentions. */
   3199 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
   3200 
   3201 	return 0;
   3202 }
   3203 
   3204 static const struct bge_revision *
   3205 bge_lookup_rev(uint32_t chipid)
   3206 {
   3207 	const struct bge_revision *br;
   3208 
   3209 	for (br = bge_revisions; br->br_name != NULL; br++) {
   3210 		if (br->br_chipid == chipid)
   3211 			return br;
   3212 	}
   3213 
   3214 	for (br = bge_majorrevs; br->br_name != NULL; br++) {
   3215 		if (br->br_chipid == BGE_ASICREV(chipid))
   3216 			return br;
   3217 	}
   3218 
   3219 	return NULL;
   3220 }
   3221 
   3222 static const struct bge_product *
   3223 bge_lookup(const struct pci_attach_args *pa)
   3224 {
   3225 	const struct bge_product *bp;
   3226 
   3227 	for (bp = bge_products; bp->bp_name != NULL; bp++) {
   3228 		if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor &&
   3229 		    PCI_PRODUCT(pa->pa_id) == bp->bp_product)
   3230 			return bp;
   3231 	}
   3232 
   3233 	return NULL;
   3234 }
   3235 
   3236 static uint32_t
   3237 bge_chipid(const struct pci_attach_args *pa)
   3238 {
   3239 	uint32_t id;
   3240 
   3241 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL)
   3242 		>> BGE_PCIMISCCTL_ASICREV_SHIFT;
   3243 
   3244 	if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
   3245 		switch (PCI_PRODUCT(pa->pa_id)) {
   3246 		case PCI_PRODUCT_BROADCOM_BCM5717:
   3247 		case PCI_PRODUCT_BROADCOM_BCM5718:
   3248 		case PCI_PRODUCT_BROADCOM_BCM5719:
   3249 		case PCI_PRODUCT_BROADCOM_BCM5720:
   3250 		case PCI_PRODUCT_BROADCOM_BCM5724: /* ??? */
   3251 			id = pci_conf_read(pa->pa_pc, pa->pa_tag,
   3252 			    BGE_PCI_GEN2_PRODID_ASICREV);
   3253 			break;
   3254 		case PCI_PRODUCT_BROADCOM_BCM57761:
   3255 		case PCI_PRODUCT_BROADCOM_BCM57762:
   3256 		case PCI_PRODUCT_BROADCOM_BCM57765:
   3257 		case PCI_PRODUCT_BROADCOM_BCM57766:
   3258 		case PCI_PRODUCT_BROADCOM_BCM57781:
   3259 		case PCI_PRODUCT_BROADCOM_BCM57785:
   3260 		case PCI_PRODUCT_BROADCOM_BCM57791:
   3261 		case PCI_PRODUCT_BROADCOM_BCM57795:
   3262 			id = pci_conf_read(pa->pa_pc, pa->pa_tag,
   3263 			    BGE_PCI_GEN15_PRODID_ASICREV);
   3264 			break;
   3265 		default:
   3266 			id = pci_conf_read(pa->pa_pc, pa->pa_tag,
   3267 			    BGE_PCI_PRODID_ASICREV);
   3268 			break;
   3269 		}
   3270 	}
   3271 
   3272 	return id;
   3273 }
   3274 
   3275 /*
   3276  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
   3277  * against our list and return its name if we find a match. Note
   3278  * that since the Broadcom controller contains VPD support, we
   3279  * can get the device name string from the controller itself instead
   3280  * of the compiled-in string. This is a little slow, but it guarantees
   3281  * we'll always announce the right product name.
   3282  */
   3283 static int
   3284 bge_probe(device_t parent, cfdata_t match, void *aux)
   3285 {
   3286 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
   3287 
   3288 	if (bge_lookup(pa) != NULL)
   3289 		return 1;
   3290 
   3291 	return 0;
   3292 }
   3293 
   3294 static void
   3295 bge_attach(device_t parent, device_t self, void *aux)
   3296 {
   3297 	struct bge_softc	*sc = device_private(self);
   3298 	struct pci_attach_args	*pa = aux;
   3299 	prop_dictionary_t dict;
   3300 	const struct bge_product *bp;
   3301 	const struct bge_revision *br;
   3302 	pci_chipset_tag_t	pc;
   3303 	pci_intr_handle_t	ih;
   3304 	const char		*intrstr = NULL;
   3305 	uint32_t 		hwcfg, hwcfg2, hwcfg3, hwcfg4, hwcfg5;
   3306 	uint32_t		command;
   3307 	struct ifnet		*ifp;
   3308 	uint32_t		misccfg, mimode;
   3309 	void *			kva;
   3310 	u_char			eaddr[ETHER_ADDR_LEN];
   3311 	pcireg_t		memtype, subid, reg;
   3312 	bus_addr_t		memaddr;
   3313 	uint32_t		pm_ctl;
   3314 	bool			no_seeprom;
   3315 	int			capmask;
   3316 	int			mii_flags;
   3317 	int			map_flags;
   3318 	char intrbuf[PCI_INTRSTR_LEN];
   3319 
   3320 	bp = bge_lookup(pa);
   3321 	KASSERT(bp != NULL);
   3322 
   3323 	sc->sc_pc = pa->pa_pc;
   3324 	sc->sc_pcitag = pa->pa_tag;
   3325 	sc->bge_dev = self;
   3326 
   3327 	sc->bge_pa = *pa;
   3328 	pc = sc->sc_pc;
   3329 	subid = pci_conf_read(pc, sc->sc_pcitag, PCI_SUBSYS_ID_REG);
   3330 
   3331 	aprint_naive(": Ethernet controller\n");
   3332 	aprint_normal(": %s\n", bp->bp_name);
   3333 
   3334 	/*
   3335 	 * Map control/status registers.
   3336 	 */
   3337 	DPRINTFN(5, ("Map control/status regs\n"));
   3338 	command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
   3339 	command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
   3340 	pci_conf_write(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, command);
   3341 	command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
   3342 
   3343 	if (!(command & PCI_COMMAND_MEM_ENABLE)) {
   3344 		aprint_error_dev(sc->bge_dev,
   3345 		    "failed to enable memory mapping!\n");
   3346 		return;
   3347 	}
   3348 
   3349 	DPRINTFN(5, ("pci_mem_find\n"));
   3350 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_pcitag, BGE_PCI_BAR0);
   3351 	switch (memtype) {
   3352 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   3353 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   3354 #if 0
   3355 		if (pci_mapreg_map(pa, BGE_PCI_BAR0,
   3356 		    memtype, 0, &sc->bge_btag, &sc->bge_bhandle,
   3357 		    &memaddr, &sc->bge_bsize) == 0)
   3358 			break;
   3359 #else
   3360 		/*
   3361 		 * Workaround for PCI prefetchable bit. Some BCM5717-5720 based
   3362 		 * system get NMI on boot (PR#48451). This problem might not be
   3363 		 * the driver's bug but our PCI common part's bug. Until we
   3364 		 * find a real reason, we ignore the prefetchable bit.
   3365 		 */
   3366 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0,
   3367 		    memtype, &memaddr, &sc->bge_bsize, &map_flags) == 0) {
   3368 			map_flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3369 			if (bus_space_map(pa->pa_memt, memaddr, sc->bge_bsize,
   3370 			    map_flags, &sc->bge_bhandle) == 0) {
   3371 				sc->bge_btag = pa->pa_memt;
   3372 				break;
   3373 			}
   3374 		}
   3375 #endif
   3376 	default:
   3377 		aprint_error_dev(sc->bge_dev, "can't find mem space\n");
   3378 		return;
   3379 	}
   3380 
   3381 	DPRINTFN(5, ("pci_intr_map\n"));
   3382 	if (pci_intr_map(pa, &ih)) {
   3383 		aprint_error_dev(sc->bge_dev, "couldn't map interrupt\n");
   3384 		return;
   3385 	}
   3386 
   3387 	DPRINTFN(5, ("pci_intr_string\n"));
   3388 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
   3389 
   3390 	DPRINTFN(5, ("pci_intr_establish\n"));
   3391 	sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc);
   3392 
   3393 	if (sc->bge_intrhand == NULL) {
   3394 		aprint_error_dev(sc->bge_dev,
   3395 		    "couldn't establish interrupt%s%s\n",
   3396 		    intrstr ? " at " : "", intrstr ? intrstr : "");
   3397 		return;
   3398 	}
   3399 	aprint_normal_dev(sc->bge_dev, "interrupting at %s\n", intrstr);
   3400 
   3401 	/* Save various chip information. */
   3402 	sc->bge_chipid = bge_chipid(pa);
   3403 	sc->bge_phy_addr = bge_phy_addr(sc);
   3404 
   3405 	if ((pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PCIEXPRESS,
   3406 	        &sc->bge_pciecap, NULL) != 0)
   3407 	    || (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785)) {
   3408 		/* PCIe */
   3409 		sc->bge_flags |= BGEF_PCIE;
   3410 		/* Extract supported maximum payload size. */
   3411 		reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3412 		    sc->bge_pciecap + PCIE_DCAP);
   3413 		sc->bge_mps = 128 << (reg & PCIE_DCAP_MAX_PAYLOAD);
   3414 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
   3415 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
   3416 			sc->bge_expmrq = 2048;
   3417 		else
   3418 			sc->bge_expmrq = 4096;
   3419 		bge_set_max_readrq(sc);
   3420 	} else if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE) &
   3421 		BGE_PCISTATE_PCI_BUSMODE) == 0) {
   3422 		/* PCI-X */
   3423 		sc->bge_flags |= BGEF_PCIX;
   3424 		if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX,
   3425 			&sc->bge_pcixcap, NULL) == 0)
   3426 			aprint_error_dev(sc->bge_dev,
   3427 			    "unable to find PCIX capability\n");
   3428 	}
   3429 
   3430 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) {
   3431 		/*
   3432 		 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
   3433 		 * can clobber the chip's PCI config-space power control
   3434 		 * registers, leaving the card in D3 powersave state. We do
   3435 		 * not have memory-mapped registers in this state, so force
   3436 		 * device into D0 state before starting initialization.
   3437 		 */
   3438 		pm_ctl = pci_conf_read(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD);
   3439 		pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
   3440 		pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
   3441 		pci_conf_write(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
   3442 		DELAY(1000);	/* 27 usec is allegedly sufficent */
   3443 	}
   3444 
   3445 	/* Save chipset family. */
   3446 	switch (BGE_ASICREV(sc->bge_chipid)) {
   3447 	case BGE_ASICREV_BCM5717:
   3448 	case BGE_ASICREV_BCM5719:
   3449 	case BGE_ASICREV_BCM5720:
   3450 		sc->bge_flags |= BGEF_5717_PLUS;
   3451 		/* FALLTHROUGH */
   3452 	case BGE_ASICREV_BCM57765:
   3453 	case BGE_ASICREV_BCM57766:
   3454 		if (!BGE_IS_5717_PLUS(sc))
   3455 			sc->bge_flags |= BGEF_57765_FAMILY;
   3456 		sc->bge_flags |= BGEF_57765_PLUS | BGEF_5755_PLUS |
   3457 		    BGEF_575X_PLUS | BGEF_5705_PLUS | BGEF_JUMBO_CAPABLE;
   3458 		/* Jumbo frame on BCM5719 A0 does not work. */
   3459 		if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) &&
   3460 		    (sc->bge_chipid == BGE_CHIPID_BCM5719_A0))
   3461 			sc->bge_flags &= ~BGEF_JUMBO_CAPABLE;
   3462 		break;
   3463 	case BGE_ASICREV_BCM5755:
   3464 	case BGE_ASICREV_BCM5761:
   3465 	case BGE_ASICREV_BCM5784:
   3466 	case BGE_ASICREV_BCM5785:
   3467 	case BGE_ASICREV_BCM5787:
   3468 	case BGE_ASICREV_BCM57780:
   3469 		sc->bge_flags |= BGEF_5755_PLUS | BGEF_575X_PLUS | BGEF_5705_PLUS;
   3470 		break;
   3471 	case BGE_ASICREV_BCM5700:
   3472 	case BGE_ASICREV_BCM5701:
   3473 	case BGE_ASICREV_BCM5703:
   3474 	case BGE_ASICREV_BCM5704:
   3475 		sc->bge_flags |= BGEF_5700_FAMILY | BGEF_JUMBO_CAPABLE;
   3476 		break;
   3477 	case BGE_ASICREV_BCM5714_A0:
   3478 	case BGE_ASICREV_BCM5780:
   3479 	case BGE_ASICREV_BCM5714:
   3480 		sc->bge_flags |= BGEF_5714_FAMILY | BGEF_JUMBO_CAPABLE;
   3481 		/* FALLTHROUGH */
   3482 	case BGE_ASICREV_BCM5750:
   3483 	case BGE_ASICREV_BCM5752:
   3484 	case BGE_ASICREV_BCM5906:
   3485 		sc->bge_flags |= BGEF_575X_PLUS;
   3486 		/* FALLTHROUGH */
   3487 	case BGE_ASICREV_BCM5705:
   3488 		sc->bge_flags |= BGEF_5705_PLUS;
   3489 		break;
   3490 	}
   3491 
   3492 	/* Identify chips with APE processor. */
   3493 	switch (BGE_ASICREV(sc->bge_chipid)) {
   3494 	case BGE_ASICREV_BCM5717:
   3495 	case BGE_ASICREV_BCM5719:
   3496 	case BGE_ASICREV_BCM5720:
   3497 	case BGE_ASICREV_BCM5761:
   3498 		sc->bge_flags |= BGEF_APE;
   3499 		break;
   3500 	}
   3501 
   3502 	/*
   3503 	 * The 40bit DMA bug applies to the 5714/5715 controllers and is
   3504 	 * not actually a MAC controller bug but an issue with the embedded
   3505 	 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
   3506 	 */
   3507 	if (BGE_IS_5714_FAMILY(sc) && ((sc->bge_flags & BGEF_PCIX) != 0))
   3508 		sc->bge_flags |= BGEF_40BIT_BUG;
   3509 
   3510 	/* Chips with APE need BAR2 access for APE registers/memory. */
   3511 	if ((sc->bge_flags & BGEF_APE) != 0) {
   3512 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2);
   3513 #if 0
   3514 		if (pci_mapreg_map(pa, BGE_PCI_BAR2, memtype, 0,
   3515 			&sc->bge_apetag, &sc->bge_apehandle, NULL,
   3516 			&sc->bge_apesize)) {
   3517 			aprint_error_dev(sc->bge_dev,
   3518 			    "couldn't map BAR2 memory\n");
   3519 			return;
   3520 		}
   3521 #else
   3522 		/*
   3523 		 * Workaround for PCI prefetchable bit. Some BCM5717-5720 based
   3524 		 * system get NMI on boot (PR#48451). This problem might not be
   3525 		 * the driver's bug but our PCI common part's bug. Until we
   3526 		 * find a real reason, we ignore the prefetchable bit.
   3527 		 */
   3528 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2,
   3529 		    memtype, &memaddr, &sc->bge_apesize, &map_flags) != 0) {
   3530 			aprint_error_dev(sc->bge_dev,
   3531 			    "couldn't map BAR2 memory\n");
   3532 			return;
   3533 		}
   3534 
   3535 		map_flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3536 		if (bus_space_map(pa->pa_memt, memaddr,
   3537 		    sc->bge_apesize, map_flags, &sc->bge_apehandle) != 0) {
   3538 			aprint_error_dev(sc->bge_dev,
   3539 			    "couldn't map BAR2 memory\n");
   3540 			return;
   3541 		}
   3542 		sc->bge_apetag = pa->pa_memt;
   3543 #endif
   3544 
   3545 		/* Enable APE register/memory access by host driver. */
   3546 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
   3547 		reg |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
   3548 		    BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
   3549 		    BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
   3550 		pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE, reg);
   3551 
   3552 		bge_ape_lock_init(sc);
   3553 		bge_ape_read_fw_ver(sc);
   3554 	}
   3555 
   3556 	/* Identify the chips that use an CPMU. */
   3557 	if (BGE_IS_5717_PLUS(sc) ||
   3558 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
   3559 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
   3560 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
   3561 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
   3562 		sc->bge_flags |= BGEF_CPMU_PRESENT;
   3563 
   3564 	/* Set MI_MODE */
   3565 	mimode = BGE_MIMODE_PHYADDR(sc->bge_phy_addr);
   3566 	if ((sc->bge_flags & BGEF_CPMU_PRESENT) != 0)
   3567 		mimode |= BGE_MIMODE_500KHZ_CONST;
   3568 	else
   3569 		mimode |= BGE_MIMODE_BASE;
   3570 	CSR_WRITE_4(sc, BGE_MI_MODE, mimode);
   3571 
   3572 	/*
   3573 	 * When using the BCM5701 in PCI-X mode, data corruption has
   3574 	 * been observed in the first few bytes of some received packets.
   3575 	 * Aligning the packet buffer in memory eliminates the corruption.
   3576 	 * Unfortunately, this misaligns the packet payloads.  On platforms
   3577 	 * which do not support unaligned accesses, we will realign the
   3578 	 * payloads by copying the received packets.
   3579 	 */
   3580 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
   3581 	    sc->bge_flags & BGEF_PCIX)
   3582 		sc->bge_flags |= BGEF_RX_ALIGNBUG;
   3583 
   3584 	if (BGE_IS_5700_FAMILY(sc))
   3585 		sc->bge_flags |= BGEF_JUMBO_CAPABLE;
   3586 
   3587 	misccfg = CSR_READ_4(sc, BGE_MISC_CFG);
   3588 	misccfg &= BGE_MISCCFG_BOARD_ID_MASK;
   3589 
   3590 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
   3591 	    (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
   3592 	     misccfg == BGE_MISCCFG_BOARD_ID_5788M))
   3593 		sc->bge_flags |= BGEF_IS_5788;
   3594 
   3595 	/*
   3596 	 * Some controllers seem to require a special firmware to use
   3597 	 * TSO. But the firmware is not available to FreeBSD and Linux
   3598 	 * claims that the TSO performed by the firmware is slower than
   3599 	 * hardware based TSO. Moreover the firmware based TSO has one
   3600 	 * known bug which can't handle TSO if ethernet header + IP/TCP
   3601 	 * header is greater than 80 bytes. The workaround for the TSO
   3602 	 * bug exist but it seems it's too expensive than not using
   3603 	 * TSO at all. Some hardwares also have the TSO bug so limit
   3604 	 * the TSO to the controllers that are not affected TSO issues
   3605 	 * (e.g. 5755 or higher).
   3606 	 */
   3607 	if (BGE_IS_5755_PLUS(sc)) {
   3608 		/*
   3609 		 * BCM5754 and BCM5787 shares the same ASIC id so
   3610 		 * explicit device id check is required.
   3611 		 */
   3612 		if ((PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754) &&
   3613 		    (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754M))
   3614 			sc->bge_flags |= BGEF_TSO;
   3615 	}
   3616 
   3617 	capmask = 0xffffffff; /* XXX BMSR_DEFCAPMASK */
   3618 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 &&
   3619 	     (misccfg == 0x4000 || misccfg == 0x8000)) ||
   3620 	    (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
   3621 	     PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
   3622 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 ||
   3623 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
   3624 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
   3625 	    (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
   3626 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F ||
   3627 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F ||
   3628 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
   3629 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 ||
   3630 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 ||
   3631 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795 ||
   3632 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
   3633 		/* These chips are 10/100 only. */
   3634 		capmask &= ~BMSR_EXTSTAT;
   3635 		sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED;
   3636 	}
   3637 
   3638 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
   3639 	    (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
   3640 	     (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
   3641 		 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)))
   3642 		sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED;
   3643 
   3644 	/* Set various PHY bug flags. */
   3645 	if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
   3646 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
   3647 		sc->bge_phy_flags |= BGEPHYF_CRC_BUG;
   3648 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX ||
   3649 	    BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX)
   3650 		sc->bge_phy_flags |= BGEPHYF_ADC_BUG;
   3651 	if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
   3652 		sc->bge_phy_flags |= BGEPHYF_5704_A0_BUG;
   3653 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
   3654 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) &&
   3655 	    PCI_VENDOR(subid) == PCI_VENDOR_DELL)
   3656 		sc->bge_phy_flags |= BGEPHYF_NO_3LED;
   3657 	if (BGE_IS_5705_PLUS(sc) &&
   3658 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 &&
   3659 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 &&
   3660 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780 &&
   3661 	    !BGE_IS_57765_PLUS(sc)) {
   3662 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
   3663 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
   3664 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
   3665 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) {
   3666 			if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 &&
   3667 			    PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756)
   3668 				sc->bge_phy_flags |= BGEPHYF_JITTER_BUG;
   3669 			if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M)
   3670 				sc->bge_phy_flags |= BGEPHYF_ADJUST_TRIM;
   3671 		} else
   3672 			sc->bge_phy_flags |= BGEPHYF_BER_BUG;
   3673 	}
   3674 
   3675 	/*
   3676 	 * SEEPROM check.
   3677 	 * First check if firmware knows we do not have SEEPROM.
   3678 	 */
   3679 	if (prop_dictionary_get_bool(device_properties(self),
   3680 	     "without-seeprom", &no_seeprom) && no_seeprom)
   3681 	 	sc->bge_flags |= BGEF_NO_EEPROM;
   3682 
   3683 	else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
   3684 		sc->bge_flags |= BGEF_NO_EEPROM;
   3685 
   3686 	/* Now check the 'ROM failed' bit on the RX CPU */
   3687 	else if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL)
   3688 		sc->bge_flags |= BGEF_NO_EEPROM;
   3689 
   3690 	sc->bge_asf_mode = 0;
   3691 	/* No ASF if APE present. */
   3692 	if ((sc->bge_flags & BGEF_APE) == 0) {
   3693 		if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
   3694 			BGE_SRAM_DATA_SIG_MAGIC)) {
   3695 			if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG) &
   3696 			    BGE_HWCFG_ASF) {
   3697 				sc->bge_asf_mode |= ASF_ENABLE;
   3698 				sc->bge_asf_mode |= ASF_STACKUP;
   3699 				if (BGE_IS_575X_PLUS(sc))
   3700 					sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
   3701 			}
   3702 		}
   3703 	}
   3704 
   3705 	/*
   3706 	 * Reset NVRAM before bge_reset(). It's required to acquire NVRAM
   3707 	 * lock in bge_reset().
   3708 	 */
   3709 	CSR_WRITE_4(sc, BGE_EE_ADDR,
   3710 	    BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
   3711 	delay(1000);
   3712 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
   3713 
   3714 	bge_stop_fw(sc);
   3715 	bge_sig_pre_reset(sc, BGE_RESET_START);
   3716 	if (bge_reset(sc))
   3717 		aprint_error_dev(sc->bge_dev, "chip reset failed\n");
   3718 
   3719 	/*
   3720 	 * Read the hardware config word in the first 32k of NIC internal
   3721 	 * memory, or fall back to the config word in the EEPROM.
   3722 	 * Note: on some BCM5700 cards, this value appears to be unset.
   3723 	 */
   3724 	hwcfg = hwcfg2 = hwcfg3 = hwcfg4 = hwcfg5 = 0;
   3725 	if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
   3726 	    BGE_SRAM_DATA_SIG_MAGIC) {
   3727 		uint32_t tmp;
   3728 
   3729 		hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG);
   3730 		tmp = bge_readmem_ind(sc, BGE_SRAM_DATA_VER) >>
   3731 		    BGE_SRAM_DATA_VER_SHIFT;
   3732 		if ((0 < tmp) && (tmp < 0x100))
   3733 			hwcfg2 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_2);
   3734 		if (sc->bge_flags & BGEF_PCIE)
   3735 			hwcfg3 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_3);
   3736 		if (BGE_ASICREV(sc->bge_chipid == BGE_ASICREV_BCM5785))
   3737 			hwcfg4 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_4);
   3738 		if (BGE_IS_5717_PLUS(sc))
   3739 			hwcfg5 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_5);
   3740 	} else if (!(sc->bge_flags & BGEF_NO_EEPROM)) {
   3741 		bge_read_eeprom(sc, (void *)&hwcfg,
   3742 		    BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
   3743 		hwcfg = be32toh(hwcfg);
   3744 	}
   3745 	aprint_normal_dev(sc->bge_dev,
   3746 	    "HW config %08x, %08x, %08x, %08x %08x\n",
   3747 	    hwcfg, hwcfg2, hwcfg3, hwcfg4, hwcfg5);
   3748 
   3749 	bge_sig_legacy(sc, BGE_RESET_START);
   3750 	bge_sig_post_reset(sc, BGE_RESET_START);
   3751 
   3752 	if (bge_chipinit(sc)) {
   3753 		aprint_error_dev(sc->bge_dev, "chip initialization failed\n");
   3754 		bge_release_resources(sc);
   3755 		return;
   3756 	}
   3757 
   3758 	/*
   3759 	 * Get station address from the EEPROM.
   3760 	 */
   3761 	if (bge_get_eaddr(sc, eaddr)) {
   3762 		aprint_error_dev(sc->bge_dev,
   3763 		    "failed to read station address\n");
   3764 		bge_release_resources(sc);
   3765 		return;
   3766 	}
   3767 
   3768 	br = bge_lookup_rev(sc->bge_chipid);
   3769 
   3770 	if (br == NULL) {
   3771 		aprint_normal_dev(sc->bge_dev, "unknown ASIC (0x%x)",
   3772 		    sc->bge_chipid);
   3773 	} else {
   3774 		aprint_normal_dev(sc->bge_dev, "ASIC %s (0x%x)",
   3775 		    br->br_name, sc->bge_chipid);
   3776 	}
   3777 	aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr));
   3778 
   3779 	/* Allocate the general information block and ring buffers. */
   3780 	if (pci_dma64_available(pa))
   3781 		sc->bge_dmatag = pa->pa_dmat64;
   3782 	else
   3783 		sc->bge_dmatag = pa->pa_dmat;
   3784 
   3785 	/* 40bit DMA workaround */
   3786 	if (sizeof(bus_addr_t) > 4) {
   3787 		if ((sc->bge_flags & BGEF_40BIT_BUG) != 0) {
   3788 			bus_dma_tag_t olddmatag = sc->bge_dmatag; /* save */
   3789 
   3790 			if (bus_dmatag_subregion(olddmatag, 0,
   3791 				(bus_addr_t)(1ULL << 40), &(sc->bge_dmatag),
   3792 				BUS_DMA_NOWAIT) != 0) {
   3793 				aprint_error_dev(self,
   3794 				    "WARNING: failed to restrict dma range,"
   3795 				    " falling back to parent bus dma range\n");
   3796 				sc->bge_dmatag = olddmatag;
   3797 			}
   3798 		}
   3799 	}
   3800 	DPRINTFN(5, ("bus_dmamem_alloc\n"));
   3801 	if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
   3802 			     PAGE_SIZE, 0, &sc->bge_ring_seg, 1,
   3803 		&sc->bge_ring_rseg, BUS_DMA_NOWAIT)) {
   3804 		aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n");
   3805 		return;
   3806 	}
   3807 	DPRINTFN(5, ("bus_dmamem_map\n"));
   3808 	if (bus_dmamem_map(sc->bge_dmatag, &sc->bge_ring_seg,
   3809 		sc->bge_ring_rseg, sizeof(struct bge_ring_data), &kva,
   3810 			   BUS_DMA_NOWAIT)) {
   3811 		aprint_error_dev(sc->bge_dev,
   3812 		    "can't map DMA buffers (%zu bytes)\n",
   3813 		    sizeof(struct bge_ring_data));
   3814 		bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg,
   3815 		    sc->bge_ring_rseg);
   3816 		return;
   3817 	}
   3818 	DPRINTFN(5, ("bus_dmamem_create\n"));
   3819 	if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
   3820 	    sizeof(struct bge_ring_data), 0,
   3821 	    BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
   3822 		aprint_error_dev(sc->bge_dev, "can't create DMA map\n");
   3823 		bus_dmamem_unmap(sc->bge_dmatag, kva,
   3824 				 sizeof(struct bge_ring_data));
   3825 		bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg,
   3826 		    sc->bge_ring_rseg);
   3827 		return;
   3828 	}
   3829 	DPRINTFN(5, ("bus_dmamem_load\n"));
   3830 	if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
   3831 			    sizeof(struct bge_ring_data), NULL,
   3832 			    BUS_DMA_NOWAIT)) {
   3833 		bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
   3834 		bus_dmamem_unmap(sc->bge_dmatag, kva,
   3835 				 sizeof(struct bge_ring_data));
   3836 		bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg,
   3837 		    sc->bge_ring_rseg);
   3838 		return;
   3839 	}
   3840 
   3841 	DPRINTFN(5, ("bzero\n"));
   3842 	sc->bge_rdata = (struct bge_ring_data *)kva;
   3843 
   3844 	memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data));
   3845 
   3846 	/* Try to allocate memory for jumbo buffers. */
   3847 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
   3848 		if (bge_alloc_jumbo_mem(sc)) {
   3849 			aprint_error_dev(sc->bge_dev,
   3850 			    "jumbo buffer allocation failed\n");
   3851 		} else
   3852 			sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   3853 	}
   3854 
   3855 	/* Set default tuneable values. */
   3856 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
   3857 	sc->bge_rx_coal_ticks = 150;
   3858 	sc->bge_rx_max_coal_bds = 64;
   3859 	sc->bge_tx_coal_ticks = 300;
   3860 	sc->bge_tx_max_coal_bds = 400;
   3861 	if (BGE_IS_5705_PLUS(sc)) {
   3862 		sc->bge_tx_coal_ticks = (12 * 5);
   3863 		sc->bge_tx_max_coal_bds = (12 * 5);
   3864 			aprint_verbose_dev(sc->bge_dev,
   3865 			    "setting short Tx thresholds\n");
   3866 	}
   3867 
   3868 	if (BGE_IS_5717_PLUS(sc))
   3869 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
   3870 	else if (BGE_IS_5705_PLUS(sc))
   3871 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
   3872 	else
   3873 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
   3874 
   3875 	/* Set up ifnet structure */
   3876 	ifp = &sc->ethercom.ec_if;
   3877 	ifp->if_softc = sc;
   3878 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   3879 	ifp->if_ioctl = bge_ioctl;
   3880 	ifp->if_stop = bge_stop;
   3881 	ifp->if_start = bge_start;
   3882 	ifp->if_init = bge_init;
   3883 	ifp->if_watchdog = bge_watchdog;
   3884 	IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN));
   3885 	IFQ_SET_READY(&ifp->if_snd);
   3886 	DPRINTFN(5, ("strcpy if_xname\n"));
   3887 	strcpy(ifp->if_xname, device_xname(sc->bge_dev));
   3888 
   3889 	if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0)
   3890 		sc->ethercom.ec_if.if_capabilities |=
   3891 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
   3892 #if 1	/* XXX TCP/UDP checksum offload breaks with pf(4) */
   3893 		sc->ethercom.ec_if.if_capabilities |=
   3894 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   3895 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
   3896 #endif
   3897 	sc->ethercom.ec_capabilities |=
   3898 	    ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
   3899 
   3900 	if (sc->bge_flags & BGEF_TSO)
   3901 		sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4;
   3902 
   3903 	/*
   3904 	 * Do MII setup.
   3905 	 */
   3906 	DPRINTFN(5, ("mii setup\n"));
   3907 	sc->bge_mii.mii_ifp = ifp;
   3908 	sc->bge_mii.mii_readreg = bge_miibus_readreg;
   3909 	sc->bge_mii.mii_writereg = bge_miibus_writereg;
   3910 	sc->bge_mii.mii_statchg = bge_miibus_statchg;
   3911 
   3912 	/*
   3913 	 * Figure out what sort of media we have by checking the hardware
   3914 	 * config word.  Note: on some BCM5700 cards, this value appears to be
   3915 	 * unset. If that's the case, we have to rely on identifying the NIC
   3916 	 * by its PCI subsystem ID, as we do below for the SysKonnect SK-9D41.
   3917 	 * The SysKonnect SK-9D41 is a 1000baseSX card.
   3918 	 */
   3919 	if (PCI_PRODUCT(pa->pa_id) == SK_SUBSYSID_9D41 ||
   3920 	    (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
   3921 		if (BGE_IS_5705_PLUS(sc)) {
   3922 			sc->bge_flags |= BGEF_FIBER_MII;
   3923 			sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED;
   3924 		} else
   3925 			sc->bge_flags |= BGEF_FIBER_TBI;
   3926 	}
   3927 
   3928 	/* Set bge_phy_flags before prop_dictionary_set_uint32() */
   3929 	if (BGE_IS_JUMBO_CAPABLE(sc))
   3930 		sc->bge_phy_flags |= BGEPHYF_JUMBO_CAPABLE;
   3931 
   3932 	/* set phyflags and chipid before mii_attach() */
   3933 	dict = device_properties(self);
   3934 	prop_dictionary_set_uint32(dict, "phyflags", sc->bge_phy_flags);
   3935 	prop_dictionary_set_uint32(dict, "chipid", sc->bge_chipid);
   3936 
   3937 	if (sc->bge_flags & BGEF_FIBER_TBI) {
   3938 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
   3939 		    bge_ifmedia_sts);
   3940 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER |IFM_1000_SX, 0, NULL);
   3941 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX|IFM_FDX,
   3942 			    0, NULL);
   3943 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
   3944 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
   3945 		/* Pretend the user requested this setting */
   3946 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
   3947 	} else {
   3948 		/*
   3949 		 * Do transceiver setup and tell the firmware the
   3950 		 * driver is down so we can try to get access the
   3951 		 * probe if ASF is running.  Retry a couple of times
   3952 		 * if we get a conflict with the ASF firmware accessing
   3953 		 * the PHY.
   3954 		 */
   3955 		BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
   3956 		bge_asf_driver_up(sc);
   3957 
   3958 		ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
   3959 			     bge_ifmedia_sts);
   3960 		mii_flags = MIIF_DOPAUSE;
   3961 		if (sc->bge_flags & BGEF_FIBER_MII)
   3962 			mii_flags |= MIIF_HAVEFIBER;
   3963 		mii_attach(sc->bge_dev, &sc->bge_mii, capmask, sc->bge_phy_addr,
   3964 		    MII_OFFSET_ANY, mii_flags);
   3965 
   3966 		if (LIST_EMPTY(&sc->bge_mii.mii_phys)) {
   3967 			aprint_error_dev(sc->bge_dev, "no PHY found!\n");
   3968 			ifmedia_add(&sc->bge_mii.mii_media,
   3969 				    IFM_ETHER|IFM_MANUAL, 0, NULL);
   3970 			ifmedia_set(&sc->bge_mii.mii_media,
   3971 				    IFM_ETHER|IFM_MANUAL);
   3972 		} else
   3973 			ifmedia_set(&sc->bge_mii.mii_media,
   3974 				    IFM_ETHER|IFM_AUTO);
   3975 
   3976 		/*
   3977 		 * Now tell the firmware we are going up after probing the PHY
   3978 		 */
   3979 		if (sc->bge_asf_mode & ASF_STACKUP)
   3980 			BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
   3981 	}
   3982 
   3983 	/*
   3984 	 * Call MI attach routine.
   3985 	 */
   3986 	DPRINTFN(5, ("if_attach\n"));
   3987 	if_attach(ifp);
   3988 	DPRINTFN(5, ("ether_ifattach\n"));
   3989 	ether_ifattach(ifp, eaddr);
   3990 	ether_set_ifflags_cb(&sc->ethercom, bge_ifflags_cb);
   3991 	rnd_attach_source(&sc->rnd_source, device_xname(sc->bge_dev),
   3992 		RND_TYPE_NET, 0);
   3993 #ifdef BGE_EVENT_COUNTERS
   3994 	/*
   3995 	 * Attach event counters.
   3996 	 */
   3997 	evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR,
   3998 	    NULL, device_xname(sc->bge_dev), "intr");
   3999 	evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC,
   4000 	    NULL, device_xname(sc->bge_dev), "tx_xoff");
   4001 	evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC,
   4002 	    NULL, device_xname(sc->bge_dev), "tx_xon");
   4003 	evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC,
   4004 	    NULL, device_xname(sc->bge_dev), "rx_xoff");
   4005 	evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC,
   4006 	    NULL, device_xname(sc->bge_dev), "rx_xon");
   4007 	evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC,
   4008 	    NULL, device_xname(sc->bge_dev), "rx_macctl");
   4009 	evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC,
   4010 	    NULL, device_xname(sc->bge_dev), "xoffentered");
   4011 #endif /* BGE_EVENT_COUNTERS */
   4012 	DPRINTFN(5, ("callout_init\n"));
   4013 	callout_init(&sc->bge_timeout, 0);
   4014 
   4015 	if (pmf_device_register(self, NULL, NULL))
   4016 		pmf_class_network_register(self, ifp);
   4017 	else
   4018 		aprint_error_dev(self, "couldn't establish power handler\n");
   4019 
   4020 	bge_sysctl_init(sc);
   4021 
   4022 #ifdef BGE_DEBUG
   4023 	bge_debug_info(sc);
   4024 #endif
   4025 }
   4026 
   4027 /*
   4028  * Stop all chip I/O so that the kernel's probe routines don't
   4029  * get confused by errant DMAs when rebooting.
   4030  */
   4031 static int
   4032 bge_detach(device_t self, int flags __unused)
   4033 {
   4034 	struct bge_softc *sc = device_private(self);
   4035 	struct ifnet *ifp = &sc->ethercom.ec_if;
   4036 	int s;
   4037 
   4038 	s = splnet();
   4039 	/* Stop the interface. Callouts are stopped in it. */
   4040 	bge_stop(ifp, 1);
   4041 	splx(s);
   4042 
   4043 	mii_detach(&sc->bge_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   4044 
   4045 	/* Delete all remaining media. */
   4046 	ifmedia_delete_instance(&sc->bge_mii.mii_media, IFM_INST_ANY);
   4047 
   4048 	ether_ifdetach(ifp);
   4049 	if_detach(ifp);
   4050 
   4051 	bge_release_resources(sc);
   4052 
   4053 	return 0;
   4054 }
   4055 
   4056 static void
   4057 bge_release_resources(struct bge_softc *sc)
   4058 {
   4059 
   4060 	/* Disestablish the interrupt handler */
   4061 	if (sc->bge_intrhand != NULL) {
   4062 		pci_intr_disestablish(sc->sc_pc, sc->bge_intrhand);
   4063 		sc->bge_intrhand = NULL;
   4064 	}
   4065 
   4066 	if (sc->bge_dmatag != NULL) {
   4067 		bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map);
   4068 		bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
   4069 		bus_dmamem_unmap(sc->bge_dmatag, (void *)sc->bge_rdata,
   4070 		    sizeof(struct bge_ring_data));
   4071 		bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, sc->bge_ring_rseg);
   4072 	}
   4073 
   4074 	/* Unmap the device registers */
   4075 	if (sc->bge_bsize != 0) {
   4076 		bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize);
   4077 		sc->bge_bsize = 0;
   4078 	}
   4079 
   4080 	/* Unmap the APE registers */
   4081 	if (sc->bge_apesize != 0) {
   4082 		bus_space_unmap(sc->bge_apetag, sc->bge_apehandle,
   4083 		    sc->bge_apesize);
   4084 		sc->bge_apesize = 0;
   4085 	}
   4086 }
   4087 
   4088 static int
   4089 bge_reset(struct bge_softc *sc)
   4090 {
   4091 	uint32_t cachesize, command;
   4092 	uint32_t reset, mac_mode, mac_mode_mask;
   4093 	pcireg_t devctl, reg;
   4094 	int i, val;
   4095 	void (*write_op)(struct bge_softc *, int, int);
   4096 
   4097 	/* Make mask for BGE_MAC_MODE register. */
   4098 	mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE;
   4099 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
   4100 		mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
   4101 	/* Keep mac_mode_mask's bits of BGE_MAC_MODE register into mac_mode */
   4102 	mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask;
   4103 
   4104 	if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
   4105 	    (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)) {
   4106 	    	if (sc->bge_flags & BGEF_PCIE)
   4107 			write_op = bge_writemem_direct;
   4108 		else
   4109 			write_op = bge_writemem_ind;
   4110 	} else
   4111 		write_op = bge_writereg_ind;
   4112 
   4113 	/* 57XX step 4 */
   4114 	/* Acquire the NVM lock */
   4115 	if ((sc->bge_flags & BGEF_NO_EEPROM) == 0 &&
   4116 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5700 &&
   4117 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5701) {
   4118 		CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
   4119 		for (i = 0; i < 8000; i++) {
   4120 			if (CSR_READ_4(sc, BGE_NVRAM_SWARB) &
   4121 			    BGE_NVRAMSWARB_GNT1)
   4122 				break;
   4123 			DELAY(20);
   4124 		}
   4125 		if (i == 8000) {
   4126 			printf("%s: NVRAM lock timedout!\n",
   4127 			    device_xname(sc->bge_dev));
   4128 		}
   4129 	}
   4130 
   4131 	/* Take APE lock when performing reset. */
   4132 	bge_ape_lock(sc, BGE_APE_LOCK_GRC);
   4133 
   4134 	/* 57XX step 3 */
   4135 	/* Save some important PCI state. */
   4136 	cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ);
   4137 	/* 5718 reset step 3 */
   4138 	command = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD);
   4139 
   4140 	/* 5718 reset step 5, 57XX step 5b-5d */
   4141 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
   4142 	    BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
   4143 	    BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW);
   4144 
   4145 	/* XXX ???: Disable fastboot on controllers that support it. */
   4146 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
   4147 	    BGE_IS_5755_PLUS(sc))
   4148 		CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0);
   4149 
   4150 	/* 5718 reset step 2, 57XX step 6 */
   4151 	/*
   4152 	 * Write the magic number to SRAM at offset 0xB50.
   4153 	 * When firmware finishes its initialization it will
   4154 	 * write ~BGE_MAGIC_NUMBER to the same location.
   4155 	 */
   4156 	bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
   4157 
   4158 	/* 5718 reset step 6, 57XX step 7 */
   4159 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
   4160 	/*
   4161 	 * XXX: from FreeBSD/Linux; no documentation
   4162 	 */
   4163 	if (sc->bge_flags & BGEF_PCIE) {
   4164 		if (BGE_ASICREV(sc->bge_chipid != BGE_ASICREV_BCM5785) &&
   4165 		    !BGE_IS_57765_PLUS(sc) &&
   4166 		    (CSR_READ_4(sc, BGE_PHY_TEST_CTRL_REG) ==
   4167 			(BGE_PHY_PCIE_LTASS_MODE | BGE_PHY_PCIE_SCRAM_MODE))) {
   4168 			/* PCI Express 1.0 system */
   4169 			CSR_WRITE_4(sc, BGE_PHY_TEST_CTRL_REG,
   4170 			    BGE_PHY_PCIE_SCRAM_MODE);
   4171 		}
   4172 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
   4173 			/*
   4174 			 * Prevent PCI Express link training
   4175 			 * during global reset.
   4176 			 */
   4177 			CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
   4178 			reset |= (1 << 29);
   4179 		}
   4180 	}
   4181 
   4182 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
   4183 		i = CSR_READ_4(sc, BGE_VCPU_STATUS);
   4184 		CSR_WRITE_4(sc, BGE_VCPU_STATUS,
   4185 		    i | BGE_VCPU_STATUS_DRV_RESET);
   4186 		i = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
   4187 		CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
   4188 		    i & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
   4189 	}
   4190 
   4191 	/*
   4192 	 * Set GPHY Power Down Override to leave GPHY
   4193 	 * powered up in D0 uninitialized.
   4194 	 */
   4195 	if (BGE_IS_5705_PLUS(sc) &&
   4196 	    (sc->bge_flags & BGEF_CPMU_PRESENT) == 0)
   4197 		reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
   4198 
   4199 	/* Issue global reset */
   4200 	write_op(sc, BGE_MISC_CFG, reset);
   4201 
   4202 	/* 5718 reset step 7, 57XX step 8 */
   4203 	if (sc->bge_flags & BGEF_PCIE)
   4204 		delay(100*1000); /* too big */
   4205 	else
   4206 		delay(1000);
   4207 
   4208 	if (sc->bge_flags & BGEF_PCIE) {
   4209 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
   4210 			DELAY(500000);
   4211 			/* XXX: Magic Numbers */
   4212 			reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4213 			    BGE_PCI_UNKNOWN0);
   4214 			pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4215 			    BGE_PCI_UNKNOWN0,
   4216 			    reg | (1 << 15));
   4217 		}
   4218 		devctl = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4219 		    sc->bge_pciecap + PCIE_DCSR);
   4220 		/* Clear enable no snoop and disable relaxed ordering. */
   4221 		devctl &= ~(PCIE_DCSR_ENA_RELAX_ORD |
   4222 		    PCIE_DCSR_ENA_NO_SNOOP);
   4223 
   4224 		/* Set PCIE max payload size to 128 for older PCIe devices */
   4225 		if ((sc->bge_flags & BGEF_CPMU_PRESENT) == 0)
   4226 			devctl &= ~(0x00e0);
   4227 		/* Clear device status register. Write 1b to clear */
   4228 		devctl |= PCIE_DCSR_URD | PCIE_DCSR_FED
   4229 		    | PCIE_DCSR_NFED | PCIE_DCSR_CED;
   4230 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4231 		    sc->bge_pciecap + PCIE_DCSR, devctl);
   4232 		bge_set_max_readrq(sc);
   4233 	}
   4234 
   4235 	/* From Linux: dummy read to flush PCI posted writes */
   4236 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD);
   4237 
   4238 	/*
   4239 	 * Reset some of the PCI state that got zapped by reset
   4240 	 * To modify the PCISTATE register, BGE_PCIMISCCTL_PCISTATE_RW must be
   4241 	 * set, too.
   4242 	 */
   4243 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
   4244 	    BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
   4245 	    BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW);
   4246 	val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE;
   4247 	if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 &&
   4248 	    (sc->bge_flags & BGEF_PCIX) != 0)
   4249 		val |= BGE_PCISTATE_RETRY_SAME_DMA;
   4250 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
   4251 		val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
   4252 		    BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
   4253 		    BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
   4254 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE, val);
   4255 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ, cachesize);
   4256 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, command);
   4257 
   4258 	/* 57xx step 11: disable PCI-X Relaxed Ordering. */
   4259 	if (sc->bge_flags & BGEF_PCIX) {
   4260 		reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap
   4261 		    + PCIX_CMD);
   4262 		/* Set max memory read byte count to 2K */
   4263 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) {
   4264 			reg &= ~PCIX_CMD_BYTECNT_MASK;
   4265 			reg |= PCIX_CMD_BCNT_2048;
   4266 		} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704){
   4267 			/*
   4268 			 * For 5704, set max outstanding split transaction
   4269 			 * field to 0 (0 means it supports 1 request)
   4270 			 */
   4271 			reg &= ~(PCIX_CMD_SPLTRANS_MASK
   4272 			    | PCIX_CMD_BYTECNT_MASK);
   4273 			reg |= PCIX_CMD_BCNT_2048;
   4274 		}
   4275 		pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap
   4276 		    + PCIX_CMD, reg & ~PCIX_CMD_RELAXED_ORDER);
   4277 	}
   4278 
   4279 	/* 5718 reset step 10, 57XX step 12 */
   4280 	/* Enable memory arbiter. */
   4281 	if (BGE_IS_5714_FAMILY(sc)) {
   4282 		val = CSR_READ_4(sc, BGE_MARB_MODE);
   4283 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
   4284 	} else
   4285 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
   4286 
   4287 	/* XXX 5721, 5751 and 5752 */
   4288 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750) {
   4289 		/* Step 19: */
   4290 		BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, 1 << 29 | 1 << 25);
   4291 		/* Step 20: */
   4292 		BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, BGE_TLP_DATA_FIFO_PROTECT);
   4293 	}
   4294 
   4295 	/* 5718 reset step 12, 57XX step 15 and 16 */
   4296 	/* Fix up byte swapping */
   4297 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
   4298 
   4299 	/* 5718 reset step 13, 57XX step 17 */
   4300 	/* Poll until the firmware initialization is complete */
   4301 	bge_poll_fw(sc);
   4302 
   4303 	/* 57XX step 21 */
   4304 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_BX) {
   4305 		pcireg_t msidata;
   4306 
   4307 		msidata = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4308 		    BGE_PCI_MSI_DATA);
   4309 		msidata |= ((1 << 13 | 1 << 12 | 1 << 10) << 16);
   4310 		pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MSI_DATA,
   4311 		    msidata);
   4312 	}
   4313 
   4314 	/* 57XX step 18 */
   4315 	/* Write mac mode. */
   4316 	val = CSR_READ_4(sc, BGE_MAC_MODE);
   4317 	/* Restore mac_mode_mask's bits using mac_mode */
   4318 	val = (val & ~mac_mode_mask) | mac_mode;
   4319 	CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, val);
   4320 	DELAY(40);
   4321 
   4322 	bge_ape_unlock(sc, BGE_APE_LOCK_GRC);
   4323 
   4324 	/*
   4325 	 * The 5704 in TBI mode apparently needs some special
   4326 	 * adjustment to insure the SERDES drive level is set
   4327 	 * to 1.2V.
   4328 	 */
   4329 	if (sc->bge_flags & BGEF_FIBER_TBI &&
   4330 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
   4331 		uint32_t serdescfg;
   4332 
   4333 		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
   4334 		serdescfg = (serdescfg & ~0xFFF) | 0x880;
   4335 		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
   4336 	}
   4337 
   4338 	if (sc->bge_flags & BGEF_PCIE &&
   4339 	    !BGE_IS_57765_PLUS(sc) &&
   4340 	    sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
   4341 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) {
   4342 		uint32_t v;
   4343 
   4344 		/* Enable PCI Express bug fix */
   4345 		v = CSR_READ_4(sc, BGE_TLP_CONTROL_REG);
   4346 		CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG,
   4347 		    v | BGE_TLP_DATA_FIFO_PROTECT);
   4348 	}
   4349 
   4350 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
   4351 		BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
   4352 		    CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
   4353 
   4354 	return 0;
   4355 }
   4356 
   4357 /*
   4358  * Frame reception handling. This is called if there's a frame
   4359  * on the receive return list.
   4360  *
   4361  * Note: we have to be able to handle two possibilities here:
   4362  * 1) the frame is from the jumbo receive ring
   4363  * 2) the frame is from the standard receive ring
   4364  */
   4365 
   4366 static void
   4367 bge_rxeof(struct bge_softc *sc)
   4368 {
   4369 	struct ifnet *ifp;
   4370 	uint16_t rx_prod, rx_cons;
   4371 	int stdcnt = 0, jumbocnt = 0;
   4372 	bus_dmamap_t dmamap;
   4373 	bus_addr_t offset, toff;
   4374 	bus_size_t tlen;
   4375 	int tosync;
   4376 
   4377 	rx_cons = sc->bge_rx_saved_considx;
   4378 	rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx;
   4379 
   4380 	/* Nothing to do */
   4381 	if (rx_cons == rx_prod)
   4382 		return;
   4383 
   4384 	ifp = &sc->ethercom.ec_if;
   4385 
   4386 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   4387 	    offsetof(struct bge_ring_data, bge_status_block),
   4388 	    sizeof (struct bge_status_block),
   4389 	    BUS_DMASYNC_POSTREAD);
   4390 
   4391 	offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
   4392 	tosync = rx_prod - rx_cons;
   4393 
   4394 	if (tosync != 0)
   4395 		rnd_add_uint32(&sc->rnd_source, tosync);
   4396 
   4397 	toff = offset + (rx_cons * sizeof (struct bge_rx_bd));
   4398 
   4399 	if (tosync < 0) {
   4400 		tlen = (sc->bge_return_ring_cnt - rx_cons) *
   4401 		    sizeof (struct bge_rx_bd);
   4402 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   4403 		    toff, tlen, BUS_DMASYNC_POSTREAD);
   4404 		tosync = -tosync;
   4405 	}
   4406 
   4407 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   4408 	    offset, tosync * sizeof (struct bge_rx_bd),
   4409 	    BUS_DMASYNC_POSTREAD);
   4410 
   4411 	while (rx_cons != rx_prod) {
   4412 		struct bge_rx_bd	*cur_rx;
   4413 		uint32_t		rxidx;
   4414 		struct mbuf		*m = NULL;
   4415 
   4416 		cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons];
   4417 
   4418 		rxidx = cur_rx->bge_idx;
   4419 		BGE_INC(rx_cons, sc->bge_return_ring_cnt);
   4420 
   4421 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
   4422 			BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
   4423 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
   4424 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
   4425 			jumbocnt++;
   4426 			bus_dmamap_sync(sc->bge_dmatag,
   4427 			    sc->bge_cdata.bge_rx_jumbo_map,
   4428 			    mtod(m, char *) - (char *)sc->bge_cdata.bge_jumbo_buf,
   4429 			    BGE_JLEN, BUS_DMASYNC_POSTREAD);
   4430 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
   4431 				ifp->if_ierrors++;
   4432 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
   4433 				continue;
   4434 			}
   4435 			if (bge_newbuf_jumbo(sc, sc->bge_jumbo,
   4436 					     NULL)== ENOBUFS) {
   4437 				ifp->if_ierrors++;
   4438 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
   4439 				continue;
   4440 			}
   4441 		} else {
   4442 			BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
   4443 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
   4444 
   4445 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
   4446 			stdcnt++;
   4447 			dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
   4448 			sc->bge_cdata.bge_rx_std_map[rxidx] = 0;
   4449 			if (dmamap == NULL) {
   4450 				ifp->if_ierrors++;
   4451 				bge_newbuf_std(sc, sc->bge_std, m, dmamap);
   4452 				continue;
   4453 			}
   4454 			bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
   4455 			    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   4456 			bus_dmamap_unload(sc->bge_dmatag, dmamap);
   4457 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
   4458 				ifp->if_ierrors++;
   4459 				bge_newbuf_std(sc, sc->bge_std, m, dmamap);
   4460 				continue;
   4461 			}
   4462 			if (bge_newbuf_std(sc, sc->bge_std,
   4463 			    NULL, dmamap) == ENOBUFS) {
   4464 				ifp->if_ierrors++;
   4465 				bge_newbuf_std(sc, sc->bge_std, m, dmamap);
   4466 				continue;
   4467 			}
   4468 		}
   4469 
   4470 		ifp->if_ipackets++;
   4471 #ifndef __NO_STRICT_ALIGNMENT
   4472 		/*
   4473 		 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect,
   4474 		 * the Rx buffer has the layer-2 header unaligned.
   4475 		 * If our CPU requires alignment, re-align by copying.
   4476 		 */
   4477 		if (sc->bge_flags & BGEF_RX_ALIGNBUG) {
   4478 			memmove(mtod(m, char *) + ETHER_ALIGN, m->m_data,
   4479 				cur_rx->bge_len);
   4480 			m->m_data += ETHER_ALIGN;
   4481 		}
   4482 #endif
   4483 
   4484 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
   4485 		m->m_pkthdr.rcvif = ifp;
   4486 
   4487 		/*
   4488 		 * Handle BPF listeners. Let the BPF user see the packet.
   4489 		 */
   4490 		bpf_mtap(ifp, m);
   4491 
   4492 		bge_rxcsum(sc, cur_rx, m);
   4493 
   4494 		/*
   4495 		 * If we received a packet with a vlan tag, pass it
   4496 		 * to vlan_input() instead of ether_input().
   4497 		 */
   4498 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
   4499 			VLAN_INPUT_TAG(ifp, m, cur_rx->bge_vlan_tag, continue);
   4500 		}
   4501 
   4502 		(*ifp->if_input)(ifp, m);
   4503 	}
   4504 
   4505 	sc->bge_rx_saved_considx = rx_cons;
   4506 	bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
   4507 	if (stdcnt)
   4508 		bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
   4509 	if (jumbocnt)
   4510 		bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
   4511 }
   4512 
   4513 static void
   4514 bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
   4515 {
   4516 
   4517 	if (BGE_IS_57765_PLUS(sc)) {
   4518 		if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
   4519 			if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) != 0)
   4520 				m->m_pkthdr.csum_flags = M_CSUM_IPv4;
   4521 			if ((cur_rx->bge_error_flag &
   4522 				BGE_RXERRFLAG_IP_CSUM_NOK) != 0)
   4523 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   4524 			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
   4525 				m->m_pkthdr.csum_data =
   4526 				    cur_rx->bge_tcp_udp_csum;
   4527 				m->m_pkthdr.csum_flags |=
   4528 				    (M_CSUM_TCPv4|M_CSUM_UDPv4|
   4529 					M_CSUM_DATA);
   4530 			}
   4531 		}
   4532 	} else {
   4533 		if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) != 0)
   4534 			m->m_pkthdr.csum_flags = M_CSUM_IPv4;
   4535 		if ((cur_rx->bge_ip_csum ^ 0xffff) != 0)
   4536 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   4537 		/*
   4538 		 * Rx transport checksum-offload may also
   4539 		 * have bugs with packets which, when transmitted,
   4540 		 * were `runts' requiring padding.
   4541 		 */
   4542 		if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
   4543 		    (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/
   4544 			    m->m_pkthdr.len >= ETHER_MIN_NOPAD)) {
   4545 			m->m_pkthdr.csum_data =
   4546 			    cur_rx->bge_tcp_udp_csum;
   4547 			m->m_pkthdr.csum_flags |=
   4548 			    (M_CSUM_TCPv4|M_CSUM_UDPv4|
   4549 				M_CSUM_DATA);
   4550 		}
   4551 	}
   4552 }
   4553 
   4554 static void
   4555 bge_txeof(struct bge_softc *sc)
   4556 {
   4557 	struct bge_tx_bd *cur_tx = NULL;
   4558 	struct ifnet *ifp;
   4559 	struct txdmamap_pool_entry *dma;
   4560 	bus_addr_t offset, toff;
   4561 	bus_size_t tlen;
   4562 	int tosync;
   4563 	struct mbuf *m;
   4564 
   4565 	ifp = &sc->ethercom.ec_if;
   4566 
   4567 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   4568 	    offsetof(struct bge_ring_data, bge_status_block),
   4569 	    sizeof (struct bge_status_block),
   4570 	    BUS_DMASYNC_POSTREAD);
   4571 
   4572 	offset = offsetof(struct bge_ring_data, bge_tx_ring);
   4573 	tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
   4574 	    sc->bge_tx_saved_considx;
   4575 
   4576 	if (tosync != 0)
   4577 		rnd_add_uint32(&sc->rnd_source, tosync);
   4578 
   4579 	toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
   4580 
   4581 	if (tosync < 0) {
   4582 		tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
   4583 		    sizeof (struct bge_tx_bd);
   4584 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   4585 		    toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   4586 		tosync = -tosync;
   4587 	}
   4588 
   4589 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   4590 	    offset, tosync * sizeof (struct bge_tx_bd),
   4591 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   4592 
   4593 	/*
   4594 	 * Go through our tx ring and free mbufs for those
   4595 	 * frames that have been sent.
   4596 	 */
   4597 	while (sc->bge_tx_saved_considx !=
   4598 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
   4599 		uint32_t		idx = 0;
   4600 
   4601 		idx = sc->bge_tx_saved_considx;
   4602 		cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
   4603 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
   4604 			ifp->if_opackets++;
   4605 		m = sc->bge_cdata.bge_tx_chain[idx];
   4606 		if (m != NULL) {
   4607 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
   4608 			dma = sc->txdma[idx];
   4609 			bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0,
   4610 			    dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   4611 			bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
   4612 			SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
   4613 			sc->txdma[idx] = NULL;
   4614 
   4615 			m_freem(m);
   4616 		}
   4617 		sc->bge_txcnt--;
   4618 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
   4619 		ifp->if_timer = 0;
   4620 	}
   4621 
   4622 	if (cur_tx != NULL)
   4623 		ifp->if_flags &= ~IFF_OACTIVE;
   4624 }
   4625 
   4626 static int
   4627 bge_intr(void *xsc)
   4628 {
   4629 	struct bge_softc *sc;
   4630 	struct ifnet *ifp;
   4631 	uint32_t statusword;
   4632 	uint32_t intrmask = BGE_PCISTATE_INTR_NOT_ACTIVE;
   4633 
   4634 	sc = xsc;
   4635 	ifp = &sc->ethercom.ec_if;
   4636 
   4637 	/* 5717 and newer chips have no BGE_PCISTATE_INTR_NOT_ACTIVE bit */
   4638 	if (BGE_IS_5717_PLUS(sc))
   4639 		intrmask = 0;
   4640 
   4641 	/* It is possible for the interrupt to arrive before
   4642 	 * the status block is updated prior to the interrupt.
   4643 	 * Reading the PCI State register will confirm whether the
   4644 	 * interrupt is ours and will flush the status block.
   4645 	 */
   4646 
   4647 	/* read status word from status block */
   4648 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   4649 	    offsetof(struct bge_ring_data, bge_status_block),
   4650 	    sizeof (struct bge_status_block),
   4651 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   4652 	statusword = sc->bge_rdata->bge_status_block.bge_status;
   4653 
   4654 	if ((statusword & BGE_STATFLAG_UPDATED) ||
   4655 	    (~CSR_READ_4(sc, BGE_PCI_PCISTATE) & intrmask)) {
   4656 		/* Ack interrupt and stop others from occuring. */
   4657 		bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 1);
   4658 
   4659 		BGE_EVCNT_INCR(sc->bge_ev_intr);
   4660 
   4661 		/* clear status word */
   4662 		sc->bge_rdata->bge_status_block.bge_status = 0;
   4663 
   4664 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
   4665 		    statusword & BGE_STATFLAG_LINKSTATE_CHANGED ||
   4666 		    BGE_STS_BIT(sc, BGE_STS_LINK_EVT))
   4667 			bge_link_upd(sc);
   4668 
   4669 		if (ifp->if_flags & IFF_RUNNING) {
   4670 			/* Check RX return ring producer/consumer */
   4671 			bge_rxeof(sc);
   4672 
   4673 			/* Check TX ring producer/consumer */
   4674 			bge_txeof(sc);
   4675 		}
   4676 
   4677 		if (sc->bge_pending_rxintr_change) {
   4678 			uint32_t rx_ticks = sc->bge_rx_coal_ticks;
   4679 			uint32_t rx_bds = sc->bge_rx_max_coal_bds;
   4680 
   4681 			CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks);
   4682 			DELAY(10);
   4683 			(void)CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
   4684 
   4685 			CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds);
   4686 			DELAY(10);
   4687 			(void)CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
   4688 
   4689 			sc->bge_pending_rxintr_change = 0;
   4690 		}
   4691 		bge_handle_events(sc);
   4692 
   4693 		/* Re-enable interrupts. */
   4694 		bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 0);
   4695 
   4696 		if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
   4697 			bge_start(ifp);
   4698 
   4699 		return 1;
   4700 	} else
   4701 		return 0;
   4702 }
   4703 
   4704 static void
   4705 bge_asf_driver_up(struct bge_softc *sc)
   4706 {
   4707 	if (sc->bge_asf_mode & ASF_STACKUP) {
   4708 		/* Send ASF heartbeat aprox. every 2s */
   4709 		if (sc->bge_asf_count)
   4710 			sc->bge_asf_count --;
   4711 		else {
   4712 			sc->bge_asf_count = 2;
   4713 
   4714 			bge_wait_for_event_ack(sc);
   4715 
   4716 			bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
   4717 			    BGE_FW_CMD_DRV_ALIVE);
   4718 			bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
   4719 			bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB,
   4720 			    BGE_FW_HB_TIMEOUT_SEC);
   4721 			CSR_WRITE_4_FLUSH(sc, BGE_RX_CPU_EVENT,
   4722 			    CSR_READ_4(sc, BGE_RX_CPU_EVENT) |
   4723 			    BGE_RX_CPU_DRV_EVENT);
   4724 		}
   4725 	}
   4726 }
   4727 
   4728 static void
   4729 bge_tick(void *xsc)
   4730 {
   4731 	struct bge_softc *sc = xsc;
   4732 	struct mii_data *mii = &sc->bge_mii;
   4733 	int s;
   4734 
   4735 	s = splnet();
   4736 
   4737 	if (BGE_IS_5705_PLUS(sc))
   4738 		bge_stats_update_regs(sc);
   4739 	else
   4740 		bge_stats_update(sc);
   4741 
   4742 	if (sc->bge_flags & BGEF_FIBER_TBI) {
   4743 		/*
   4744 		 * Since in TBI mode auto-polling can't be used we should poll
   4745 		 * link status manually. Here we register pending link event
   4746 		 * and trigger interrupt.
   4747 		 */
   4748 		BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
   4749 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
   4750 	} else {
   4751 		/*
   4752 		 * Do not touch PHY if we have link up. This could break
   4753 		 * IPMI/ASF mode or produce extra input errors.
   4754 		 * (extra input errors was reported for bcm5701 & bcm5704).
   4755 		 */
   4756 		if (!BGE_STS_BIT(sc, BGE_STS_LINK))
   4757 			mii_tick(mii);
   4758 	}
   4759 
   4760 	bge_asf_driver_up(sc);
   4761 
   4762 	callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
   4763 
   4764 	splx(s);
   4765 }
   4766 
   4767 static void
   4768 bge_stats_update_regs(struct bge_softc *sc)
   4769 {
   4770 	struct ifnet *ifp = &sc->ethercom.ec_if;
   4771 
   4772 	ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
   4773 	    offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
   4774 
   4775 	ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
   4776 	ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
   4777 	ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
   4778 }
   4779 
   4780 static void
   4781 bge_stats_update(struct bge_softc *sc)
   4782 {
   4783 	struct ifnet *ifp = &sc->ethercom.ec_if;
   4784 	bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
   4785 
   4786 #define READ_STAT(sc, stats, stat) \
   4787 	  CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
   4788 
   4789 	ifp->if_collisions +=
   4790 	  (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) +
   4791 	   READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) +
   4792 	   READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) +
   4793 	   READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) -
   4794 	  ifp->if_collisions;
   4795 
   4796 	BGE_EVCNT_UPD(sc->bge_ev_tx_xoff,
   4797 		      READ_STAT(sc, stats, outXoffSent.bge_addr_lo));
   4798 	BGE_EVCNT_UPD(sc->bge_ev_tx_xon,
   4799 		      READ_STAT(sc, stats, outXonSent.bge_addr_lo));
   4800 	BGE_EVCNT_UPD(sc->bge_ev_rx_xoff,
   4801 		      READ_STAT(sc, stats,
   4802 		      		xoffPauseFramesReceived.bge_addr_lo));
   4803 	BGE_EVCNT_UPD(sc->bge_ev_rx_xon,
   4804 		      READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo));
   4805 	BGE_EVCNT_UPD(sc->bge_ev_rx_macctl,
   4806 		      READ_STAT(sc, stats,
   4807 		      		macControlFramesReceived.bge_addr_lo));
   4808 	BGE_EVCNT_UPD(sc->bge_ev_xoffentered,
   4809 		      READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo));
   4810 
   4811 #undef READ_STAT
   4812 
   4813 #ifdef notdef
   4814 	ifp->if_collisions +=
   4815 	   (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
   4816 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
   4817 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
   4818 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
   4819 	   ifp->if_collisions;
   4820 #endif
   4821 }
   4822 
   4823 /*
   4824  * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
   4825  * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
   4826  * but when such padded frames employ the  bge IP/TCP checksum offload,
   4827  * the hardware checksum assist gives incorrect results (possibly
   4828  * from incorporating its own padding into the UDP/TCP checksum; who knows).
   4829  * If we pad such runts with zeros, the onboard checksum comes out correct.
   4830  */
   4831 static inline int
   4832 bge_cksum_pad(struct mbuf *pkt)
   4833 {
   4834 	struct mbuf *last = NULL;
   4835 	int padlen;
   4836 
   4837 	padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len;
   4838 
   4839 	/* if there's only the packet-header and we can pad there, use it. */
   4840 	if (pkt->m_pkthdr.len == pkt->m_len &&
   4841 	    M_TRAILINGSPACE(pkt) >= padlen) {
   4842 		last = pkt;
   4843 	} else {
   4844 		/*
   4845 		 * Walk packet chain to find last mbuf. We will either
   4846 		 * pad there, or append a new mbuf and pad it
   4847 		 * (thus perhaps avoiding the bcm5700 dma-min bug).
   4848 		 */
   4849 		for (last = pkt; last->m_next != NULL; last = last->m_next) {
   4850 	      	       continue; /* do nothing */
   4851 		}
   4852 
   4853 		/* `last' now points to last in chain. */
   4854 		if (M_TRAILINGSPACE(last) < padlen) {
   4855 			/* Allocate new empty mbuf, pad it. Compact later. */
   4856 			struct mbuf *n;
   4857 			MGET(n, M_DONTWAIT, MT_DATA);
   4858 			if (n == NULL)
   4859 				return ENOBUFS;
   4860 			n->m_len = 0;
   4861 			last->m_next = n;
   4862 			last = n;
   4863 		}
   4864 	}
   4865 
   4866 	KDASSERT(!M_READONLY(last));
   4867 	KDASSERT(M_TRAILINGSPACE(last) >= padlen);
   4868 
   4869 	/* Now zero the pad area, to avoid the bge cksum-assist bug */
   4870 	memset(mtod(last, char *) + last->m_len, 0, padlen);
   4871 	last->m_len += padlen;
   4872 	pkt->m_pkthdr.len += padlen;
   4873 	return 0;
   4874 }
   4875 
   4876 /*
   4877  * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
   4878  */
   4879 static inline int
   4880 bge_compact_dma_runt(struct mbuf *pkt)
   4881 {
   4882 	struct mbuf	*m, *prev;
   4883 	int 		totlen;
   4884 
   4885 	prev = NULL;
   4886 	totlen = 0;
   4887 
   4888 	for (m = pkt; m != NULL; prev = m,m = m->m_next) {
   4889 		int mlen = m->m_len;
   4890 		int shortfall = 8 - mlen ;
   4891 
   4892 		totlen += mlen;
   4893 		if (mlen == 0)
   4894 			continue;
   4895 		if (mlen >= 8)
   4896 			continue;
   4897 
   4898 		/* If we get here, mbuf data is too small for DMA engine.
   4899 		 * Try to fix by shuffling data to prev or next in chain.
   4900 		 * If that fails, do a compacting deep-copy of the whole chain.
   4901 		 */
   4902 
   4903 		/* Internal frag. If fits in prev, copy it there. */
   4904 		if (prev && M_TRAILINGSPACE(prev) >= m->m_len) {
   4905 		  	memcpy(prev->m_data + prev->m_len, m->m_data, mlen);
   4906 			prev->m_len += mlen;
   4907 			m->m_len = 0;
   4908 			/* XXX stitch chain */
   4909 			prev->m_next = m_free(m);
   4910 			m = prev;
   4911 			continue;
   4912 		}
   4913 		else if (m->m_next != NULL &&
   4914 			     M_TRAILINGSPACE(m) >= shortfall &&
   4915 			     m->m_next->m_len >= (8 + shortfall)) {
   4916 		    /* m is writable and have enough data in next, pull up. */
   4917 
   4918 		  	memcpy(m->m_data + m->m_len, m->m_next->m_data,
   4919 			    shortfall);
   4920 			m->m_len += shortfall;
   4921 			m->m_next->m_len -= shortfall;
   4922 			m->m_next->m_data += shortfall;
   4923 		}
   4924 		else if (m->m_next == NULL || 1) {
   4925 		  	/* Got a runt at the very end of the packet.
   4926 			 * borrow data from the tail of the preceding mbuf and
   4927 			 * update its length in-place. (The original data is still
   4928 			 * valid, so we can do this even if prev is not writable.)
   4929 			 */
   4930 
   4931 			/* if we'd make prev a runt, just move all of its data. */
   4932 			KASSERT(prev != NULL /*, ("runt but null PREV")*/);
   4933 			KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
   4934 
   4935 			if ((prev->m_len - shortfall) < 8)
   4936 				shortfall = prev->m_len;
   4937 
   4938 #ifdef notyet	/* just do the safe slow thing for now */
   4939 			if (!M_READONLY(m)) {
   4940 				if (M_LEADINGSPACE(m) < shorfall) {
   4941 					void *m_dat;
   4942 					m_dat = (m->m_flags & M_PKTHDR) ?
   4943 					  m->m_pktdat : m->dat;
   4944 					memmove(m_dat, mtod(m, void*), m->m_len);
   4945 					m->m_data = m_dat;
   4946 				    }
   4947 			} else
   4948 #endif	/* just do the safe slow thing */
   4949 			{
   4950 				struct mbuf * n = NULL;
   4951 				int newprevlen = prev->m_len - shortfall;
   4952 
   4953 				MGET(n, M_NOWAIT, MT_DATA);
   4954 				if (n == NULL)
   4955 				   return ENOBUFS;
   4956 				KASSERT(m->m_len + shortfall < MLEN
   4957 					/*,
   4958 					  ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
   4959 
   4960 				/* first copy the data we're stealing from prev */
   4961 				memcpy(n->m_data, prev->m_data + newprevlen,
   4962 				    shortfall);
   4963 
   4964 				/* update prev->m_len accordingly */
   4965 				prev->m_len -= shortfall;
   4966 
   4967 				/* copy data from runt m */
   4968 				memcpy(n->m_data + shortfall, m->m_data,
   4969 				    m->m_len);
   4970 
   4971 				/* n holds what we stole from prev, plus m */
   4972 				n->m_len = shortfall + m->m_len;
   4973 
   4974 				/* stitch n into chain and free m */
   4975 				n->m_next = m->m_next;
   4976 				prev->m_next = n;
   4977 				/* KASSERT(m->m_next == NULL); */
   4978 				m->m_next = NULL;
   4979 				m_free(m);
   4980 				m = n;	/* for continuing loop */
   4981 			}
   4982 		}
   4983 	}
   4984 	return 0;
   4985 }
   4986 
   4987 /*
   4988  * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
   4989  * pointers to descriptors.
   4990  */
   4991 static int
   4992 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
   4993 {
   4994 	struct bge_tx_bd	*f = NULL;
   4995 	uint32_t		frag, cur;
   4996 	uint16_t		csum_flags = 0;
   4997 	uint16_t		txbd_tso_flags = 0;
   4998 	struct txdmamap_pool_entry *dma;
   4999 	bus_dmamap_t dmamap;
   5000 	int			i = 0;
   5001 	struct m_tag		*mtag;
   5002 	int			use_tso, maxsegsize, error;
   5003 
   5004 	cur = frag = *txidx;
   5005 
   5006 	if (m_head->m_pkthdr.csum_flags) {
   5007 		if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
   5008 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
   5009 		if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4))
   5010 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
   5011 	}
   5012 
   5013 	/*
   5014 	 * If we were asked to do an outboard checksum, and the NIC
   5015 	 * has the bug where it sometimes adds in the Ethernet padding,
   5016 	 * explicitly pad with zeros so the cksum will be correct either way.
   5017 	 * (For now, do this for all chip versions, until newer
   5018 	 * are confirmed to not require the workaround.)
   5019 	 */
   5020 	if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 ||
   5021 #ifdef notyet
   5022 	    (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||
   5023 #endif
   5024 	    m_head->m_pkthdr.len >= ETHER_MIN_NOPAD)
   5025 		goto check_dma_bug;
   5026 
   5027 	if (bge_cksum_pad(m_head) != 0)
   5028 	    return ENOBUFS;
   5029 
   5030 check_dma_bug:
   5031 	if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX))
   5032 		goto doit;
   5033 
   5034 	/*
   5035 	 * bcm5700 Revision B silicon cannot handle DMA descriptors with
   5036 	 * less than eight bytes.  If we encounter a teeny mbuf
   5037 	 * at the end of a chain, we can pad.  Otherwise, copy.
   5038 	 */
   5039 	if (bge_compact_dma_runt(m_head) != 0)
   5040 		return ENOBUFS;
   5041 
   5042 doit:
   5043 	dma = SLIST_FIRST(&sc->txdma_list);
   5044 	if (dma == NULL)
   5045 		return ENOBUFS;
   5046 	dmamap = dma->dmamap;
   5047 
   5048 	/*
   5049 	 * Set up any necessary TSO state before we start packing...
   5050 	 */
   5051 	use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   5052 	if (!use_tso) {
   5053 		maxsegsize = 0;
   5054 	} else {	/* TSO setup */
   5055 		unsigned  mss;
   5056 		struct ether_header *eh;
   5057 		unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset;
   5058 		struct mbuf * m0 = m_head;
   5059 		struct ip *ip;
   5060 		struct tcphdr *th;
   5061 		int iphl, hlen;
   5062 
   5063 		/*
   5064 		 * XXX It would be nice if the mbuf pkthdr had offset
   5065 		 * fields for the protocol headers.
   5066 		 */
   5067 
   5068 		eh = mtod(m0, struct ether_header *);
   5069 		switch (htons(eh->ether_type)) {
   5070 		case ETHERTYPE_IP:
   5071 			offset = ETHER_HDR_LEN;
   5072 			break;
   5073 
   5074 		case ETHERTYPE_VLAN:
   5075 			offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   5076 			break;
   5077 
   5078 		default:
   5079 			/*
   5080 			 * Don't support this protocol or encapsulation.
   5081 			 */
   5082 			return ENOBUFS;
   5083 		}
   5084 
   5085 		/*
   5086 		 * TCP/IP headers are in the first mbuf; we can do
   5087 		 * this the easy way.
   5088 		 */
   5089 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   5090 		hlen = iphl + offset;
   5091 		if (__predict_false(m0->m_len <
   5092 				    (hlen + sizeof(struct tcphdr)))) {
   5093 
   5094 			aprint_debug_dev(sc->bge_dev,
   5095 			    "TSO: hard case m0->m_len == %d < ip/tcp hlen %zd,"
   5096 			    "not handled yet\n",
   5097 			     m0->m_len, hlen+ sizeof(struct tcphdr));
   5098 #ifdef NOTYET
   5099 			/*
   5100 			 * XXX jonathan (at) NetBSD.org: untested.
   5101 			 * how to force  this branch to be taken?
   5102 			 */
   5103 			BGE_EVCNT_INCR(sc->bge_ev_txtsopain);
   5104 
   5105 			m_copydata(m0, offset, sizeof(ip), &ip);
   5106 			m_copydata(m0, hlen, sizeof(th), &th);
   5107 
   5108 			ip.ip_len = 0;
   5109 
   5110 			m_copyback(m0, hlen + offsetof(struct ip, ip_len),
   5111 			    sizeof(ip.ip_len), &ip.ip_len);
   5112 
   5113 			th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   5114 			    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   5115 
   5116 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   5117 			    sizeof(th.th_sum), &th.th_sum);
   5118 
   5119 			hlen += th.th_off << 2;
   5120 			iptcp_opt_words	= hlen;
   5121 #else
   5122 			/*
   5123 			 * if_wm "hard" case not yet supported, can we not
   5124 			 * mandate it out of existence?
   5125 			 */
   5126 			(void) ip; (void)th; (void) ip_tcp_hlen;
   5127 
   5128 			return ENOBUFS;
   5129 #endif
   5130 		} else {
   5131 			ip = (struct ip *) (mtod(m0, char *) + offset);
   5132 			th = (struct tcphdr *) (mtod(m0, char *) + hlen);
   5133 			ip_tcp_hlen = iphl +  (th->th_off << 2);
   5134 
   5135 			/* Total IP/TCP options, in 32-bit words */
   5136 			iptcp_opt_words = (ip_tcp_hlen
   5137 					   - sizeof(struct tcphdr)
   5138 					   - sizeof(struct ip)) >> 2;
   5139 		}
   5140 		if (BGE_IS_575X_PLUS(sc)) {
   5141 			th->th_sum = 0;
   5142 			csum_flags &= ~(BGE_TXBDFLAG_TCP_UDP_CSUM);
   5143 		} else {
   5144 			/*
   5145 			 * XXX jonathan (at) NetBSD.org: 5705 untested.
   5146 			 * Requires TSO firmware patch for 5701/5703/5704.
   5147 			 */
   5148 			th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   5149 			    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   5150 		}
   5151 
   5152 		mss = m_head->m_pkthdr.segsz;
   5153 		txbd_tso_flags |=
   5154 		    BGE_TXBDFLAG_CPU_PRE_DMA |
   5155 		    BGE_TXBDFLAG_CPU_POST_DMA;
   5156 
   5157 		/*
   5158 		 * Our NIC TSO-assist assumes TSO has standard, optionless
   5159 		 * IPv4 and TCP headers, which total 40 bytes. By default,
   5160 		 * the NIC copies 40 bytes of IP/TCP header from the
   5161 		 * supplied header into the IP/TCP header portion of
   5162 		 * each post-TSO-segment. If the supplied packet has IP or
   5163 		 * TCP options, we need to tell the NIC to copy those extra
   5164 		 * bytes into each  post-TSO header, in addition to the normal
   5165 		 * 40-byte IP/TCP header (and to leave space accordingly).
   5166 		 * Unfortunately, the driver encoding of option length
   5167 		 * varies across different ASIC families.
   5168 		 */
   5169 		tcp_seg_flags = 0;
   5170 		if (iptcp_opt_words) {
   5171 			if (BGE_IS_5705_PLUS(sc)) {
   5172 				tcp_seg_flags =
   5173 					iptcp_opt_words << 11;
   5174 			} else {
   5175 				txbd_tso_flags |=
   5176 					iptcp_opt_words << 12;
   5177 			}
   5178 		}
   5179 		maxsegsize = mss | tcp_seg_flags;
   5180 		ip->ip_len = htons(mss + ip_tcp_hlen);
   5181 
   5182 	}	/* TSO setup */
   5183 
   5184 	/*
   5185 	 * Start packing the mbufs in this chain into
   5186 	 * the fragment pointers. Stop when we run out
   5187 	 * of fragments or hit the end of the mbuf chain.
   5188 	 */
   5189 	error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
   5190 	    BUS_DMA_NOWAIT);
   5191 	if (error)
   5192 		return ENOBUFS;
   5193 	/*
   5194 	 * Sanity check: avoid coming within 16 descriptors
   5195 	 * of the end of the ring.
   5196 	 */
   5197 	if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
   5198 		BGE_TSO_PRINTF(("%s: "
   5199 		    " dmamap_load_mbuf too close to ring wrap\n",
   5200 		    device_xname(sc->bge_dev)));
   5201 		goto fail_unload;
   5202 	}
   5203 
   5204 	mtag = sc->ethercom.ec_nvlans ?
   5205 	    m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL;
   5206 
   5207 
   5208 	/* Iterate over dmap-map fragments. */
   5209 	for (i = 0; i < dmamap->dm_nsegs; i++) {
   5210 		f = &sc->bge_rdata->bge_tx_ring[frag];
   5211 		if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
   5212 			break;
   5213 
   5214 		BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr);
   5215 		f->bge_len = dmamap->dm_segs[i].ds_len;
   5216 
   5217 		/*
   5218 		 * For 5751 and follow-ons, for TSO we must turn
   5219 		 * off checksum-assist flag in the tx-descr, and
   5220 		 * supply the ASIC-revision-specific encoding
   5221 		 * of TSO flags and segsize.
   5222 		 */
   5223 		if (use_tso) {
   5224 			if (BGE_IS_575X_PLUS(sc) || i == 0) {
   5225 				f->bge_rsvd = maxsegsize;
   5226 				f->bge_flags = csum_flags | txbd_tso_flags;
   5227 			} else {
   5228 				f->bge_rsvd = 0;
   5229 				f->bge_flags =
   5230 				  (csum_flags | txbd_tso_flags) & 0x0fff;
   5231 			}
   5232 		} else {
   5233 			f->bge_rsvd = 0;
   5234 			f->bge_flags = csum_flags;
   5235 		}
   5236 
   5237 		if (mtag != NULL) {
   5238 			f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
   5239 			f->bge_vlan_tag = VLAN_TAG_VALUE(mtag);
   5240 		} else {
   5241 			f->bge_vlan_tag = 0;
   5242 		}
   5243 		cur = frag;
   5244 		BGE_INC(frag, BGE_TX_RING_CNT);
   5245 	}
   5246 
   5247 	if (i < dmamap->dm_nsegs) {
   5248 		BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n",
   5249 		    device_xname(sc->bge_dev), i, dmamap->dm_nsegs));
   5250 		goto fail_unload;
   5251 	}
   5252 
   5253 	bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
   5254 	    BUS_DMASYNC_PREWRITE);
   5255 
   5256 	if (frag == sc->bge_tx_saved_considx) {
   5257 		BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n",
   5258 		    device_xname(sc->bge_dev), frag, sc->bge_tx_saved_considx));
   5259 
   5260 		goto fail_unload;
   5261 	}
   5262 
   5263 	sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
   5264 	sc->bge_cdata.bge_tx_chain[cur] = m_head;
   5265 	SLIST_REMOVE_HEAD(&sc->txdma_list, link);
   5266 	sc->txdma[cur] = dma;
   5267 	sc->bge_txcnt += dmamap->dm_nsegs;
   5268 
   5269 	*txidx = frag;
   5270 
   5271 	return 0;
   5272 
   5273 fail_unload:
   5274 	bus_dmamap_unload(sc->bge_dmatag, dmamap);
   5275 
   5276 	return ENOBUFS;
   5277 }
   5278 
   5279 /*
   5280  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
   5281  * to the mbuf data regions directly in the transmit descriptors.
   5282  */
   5283 static void
   5284 bge_start(struct ifnet *ifp)
   5285 {
   5286 	struct bge_softc *sc;
   5287 	struct mbuf *m_head = NULL;
   5288 	uint32_t prodidx;
   5289 	int pkts = 0;
   5290 
   5291 	sc = ifp->if_softc;
   5292 
   5293 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   5294 		return;
   5295 
   5296 	prodidx = sc->bge_tx_prodidx;
   5297 
   5298 	while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
   5299 		IFQ_POLL(&ifp->if_snd, m_head);
   5300 		if (m_head == NULL)
   5301 			break;
   5302 
   5303 #if 0
   5304 		/*
   5305 		 * XXX
   5306 		 * safety overkill.  If this is a fragmented packet chain
   5307 		 * with delayed TCP/UDP checksums, then only encapsulate
   5308 		 * it if we have enough descriptors to handle the entire
   5309 		 * chain at once.
   5310 		 * (paranoia -- may not actually be needed)
   5311 		 */
   5312 		if (m_head->m_flags & M_FIRSTFRAG &&
   5313 		    m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
   5314 			if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
   5315 			    M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) {
   5316 				ifp->if_flags |= IFF_OACTIVE;
   5317 				break;
   5318 			}
   5319 		}
   5320 #endif
   5321 
   5322 		/*
   5323 		 * Pack the data into the transmit ring. If we
   5324 		 * don't have room, set the OACTIVE flag and wait
   5325 		 * for the NIC to drain the ring.
   5326 		 */
   5327 		if (bge_encap(sc, m_head, &prodidx)) {
   5328 			ifp->if_flags |= IFF_OACTIVE;
   5329 			break;
   5330 		}
   5331 
   5332 		/* now we are committed to transmit the packet */
   5333 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
   5334 		pkts++;
   5335 
   5336 		/*
   5337 		 * If there's a BPF listener, bounce a copy of this frame
   5338 		 * to him.
   5339 		 */
   5340 		bpf_mtap(ifp, m_head);
   5341 	}
   5342 	if (pkts == 0)
   5343 		return;
   5344 
   5345 	/* Transmit */
   5346 	bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
   5347 	/* 5700 b2 errata */
   5348 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
   5349 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
   5350 
   5351 	sc->bge_tx_prodidx = prodidx;
   5352 
   5353 	/*
   5354 	 * Set a timeout in case the chip goes out to lunch.
   5355 	 */
   5356 	ifp->if_timer = 5;
   5357 }
   5358 
   5359 static int
   5360 bge_init(struct ifnet *ifp)
   5361 {
   5362 	struct bge_softc *sc = ifp->if_softc;
   5363 	const uint16_t *m;
   5364 	uint32_t mode, reg;
   5365 	int s, error = 0;
   5366 
   5367 	s = splnet();
   5368 
   5369 	ifp = &sc->ethercom.ec_if;
   5370 
   5371 	/* Cancel pending I/O and flush buffers. */
   5372 	bge_stop(ifp, 0);
   5373 
   5374 	bge_stop_fw(sc);
   5375 	bge_sig_pre_reset(sc, BGE_RESET_START);
   5376 	bge_reset(sc);
   5377 	bge_sig_legacy(sc, BGE_RESET_START);
   5378 	bge_sig_post_reset(sc, BGE_RESET_START);
   5379 
   5380 	bge_chipinit(sc);
   5381 
   5382 	/*
   5383 	 * Init the various state machines, ring
   5384 	 * control blocks and firmware.
   5385 	 */
   5386 	error = bge_blockinit(sc);
   5387 	if (error != 0) {
   5388 		aprint_error_dev(sc->bge_dev, "initialization error %d\n",
   5389 		    error);
   5390 		splx(s);
   5391 		return error;
   5392 	}
   5393 
   5394 	ifp = &sc->ethercom.ec_if;
   5395 
   5396 	/* 5718 step 25, 57XX step 54 */
   5397 	/* Specify MTU. */
   5398 	CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
   5399 	    ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
   5400 
   5401 	/* 5718 step 23 */
   5402 	/* Load our MAC address. */
   5403 	m = (const uint16_t *)&(CLLADDR(ifp->if_sadl)[0]);
   5404 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
   5405 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
   5406 
   5407 	/* Enable or disable promiscuous mode as needed. */
   5408 	if (ifp->if_flags & IFF_PROMISC)
   5409 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
   5410 	else
   5411 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
   5412 
   5413 	/* Program multicast filter. */
   5414 	bge_setmulti(sc);
   5415 
   5416 	/* Init RX ring. */
   5417 	bge_init_rx_ring_std(sc);
   5418 
   5419 	/*
   5420 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
   5421 	 * memory to insure that the chip has in fact read the first
   5422 	 * entry of the ring.
   5423 	 */
   5424 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
   5425 		uint32_t		v, i;
   5426 		for (i = 0; i < 10; i++) {
   5427 			DELAY(20);
   5428 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
   5429 			if (v == (MCLBYTES - ETHER_ALIGN))
   5430 				break;
   5431 		}
   5432 		if (i == 10)
   5433 			aprint_error_dev(sc->bge_dev,
   5434 			    "5705 A0 chip failed to load RX ring\n");
   5435 	}
   5436 
   5437 	/* Init jumbo RX ring. */
   5438 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
   5439 		bge_init_rx_ring_jumbo(sc);
   5440 
   5441 	/* Init our RX return ring index */
   5442 	sc->bge_rx_saved_considx = 0;
   5443 
   5444 	/* Init TX ring. */
   5445 	bge_init_tx_ring(sc);
   5446 
   5447 	/* 5718 step 63, 57XX step 94 */
   5448 	/* Enable TX MAC state machine lockup fix. */
   5449 	mode = CSR_READ_4(sc, BGE_TX_MODE);
   5450 	if (BGE_IS_5755_PLUS(sc) ||
   5451 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
   5452 		mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
   5453 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
   5454 		mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
   5455 		mode |= CSR_READ_4(sc, BGE_TX_MODE) &
   5456 		    (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
   5457 	}
   5458 
   5459 	/* Turn on transmitter */
   5460 	CSR_WRITE_4_FLUSH(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
   5461 	/* 5718 step 64 */
   5462 	DELAY(100);
   5463 
   5464 	/* 5718 step 65, 57XX step 95 */
   5465 	/* Turn on receiver */
   5466 	mode = CSR_READ_4(sc, BGE_RX_MODE);
   5467 	if (BGE_IS_5755_PLUS(sc))
   5468 		mode |= BGE_RXMODE_IPV6_ENABLE;
   5469 	CSR_WRITE_4_FLUSH(sc, BGE_RX_MODE, mode | BGE_RXMODE_ENABLE);
   5470 	/* 5718 step 66 */
   5471 	DELAY(10);
   5472 
   5473 	/* 5718 step 12, 57XX step 37 */
   5474 	/*
   5475 	 * XXX Doucments of 5718 series and 577xx say the recommended value
   5476 	 * is 1, but tg3 set 1 only on 57765 series.
   5477 	 */
   5478 	if (BGE_IS_57765_PLUS(sc))
   5479 		reg = 1;
   5480 	else
   5481 		reg = 2;
   5482 	CSR_WRITE_4_FLUSH(sc, BGE_MAX_RX_FRAME_LOWAT, reg);
   5483 
   5484 	/* Tell firmware we're alive. */
   5485 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
   5486 
   5487 	/* Enable host interrupts. */
   5488 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
   5489 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
   5490 	bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 0);
   5491 
   5492 	if ((error = bge_ifmedia_upd(ifp)) != 0)
   5493 		goto out;
   5494 
   5495 	ifp->if_flags |= IFF_RUNNING;
   5496 	ifp->if_flags &= ~IFF_OACTIVE;
   5497 
   5498 	callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
   5499 
   5500 out:
   5501 	sc->bge_if_flags = ifp->if_flags;
   5502 	splx(s);
   5503 
   5504 	return error;
   5505 }
   5506 
   5507 /*
   5508  * Set media options.
   5509  */
   5510 static int
   5511 bge_ifmedia_upd(struct ifnet *ifp)
   5512 {
   5513 	struct bge_softc *sc = ifp->if_softc;
   5514 	struct mii_data *mii = &sc->bge_mii;
   5515 	struct ifmedia *ifm = &sc->bge_ifmedia;
   5516 	int rc;
   5517 
   5518 	/* If this is a 1000baseX NIC, enable the TBI port. */
   5519 	if (sc->bge_flags & BGEF_FIBER_TBI) {
   5520 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   5521 			return EINVAL;
   5522 		switch (IFM_SUBTYPE(ifm->ifm_media)) {
   5523 		case IFM_AUTO:
   5524 			/*
   5525 			 * The BCM5704 ASIC appears to have a special
   5526 			 * mechanism for programming the autoneg
   5527 			 * advertisement registers in TBI mode.
   5528 			 */
   5529 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
   5530 				uint32_t sgdig;
   5531 				sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
   5532 				if (sgdig & BGE_SGDIGSTS_DONE) {
   5533 					CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
   5534 					sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
   5535 					sgdig |= BGE_SGDIGCFG_AUTO |
   5536 					    BGE_SGDIGCFG_PAUSE_CAP |
   5537 					    BGE_SGDIGCFG_ASYM_PAUSE;
   5538 					CSR_WRITE_4_FLUSH(sc, BGE_SGDIG_CFG,
   5539 					    sgdig | BGE_SGDIGCFG_SEND);
   5540 					DELAY(5);
   5541 					CSR_WRITE_4_FLUSH(sc, BGE_SGDIG_CFG,
   5542 					    sgdig);
   5543 				}
   5544 			}
   5545 			break;
   5546 		case IFM_1000_SX:
   5547 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
   5548 				BGE_CLRBIT(sc, BGE_MAC_MODE,
   5549 				    BGE_MACMODE_HALF_DUPLEX);
   5550 			} else {
   5551 				BGE_SETBIT(sc, BGE_MAC_MODE,
   5552 				    BGE_MACMODE_HALF_DUPLEX);
   5553 			}
   5554 			DELAY(40);
   5555 			break;
   5556 		default:
   5557 			return EINVAL;
   5558 		}
   5559 		/* XXX 802.3x flow control for 1000BASE-SX */
   5560 		return 0;
   5561 	}
   5562 
   5563 	BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
   5564 	if ((rc = mii_mediachg(mii)) == ENXIO)
   5565 		return 0;
   5566 
   5567 	/*
   5568 	 * Force an interrupt so that we will call bge_link_upd
   5569 	 * if needed and clear any pending link state attention.
   5570 	 * Without this we are not getting any further interrupts
   5571 	 * for link state changes and thus will not UP the link and
   5572 	 * not be able to send in bge_start. The only way to get
   5573 	 * things working was to receive a packet and get a RX intr.
   5574 	 */
   5575 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
   5576 	    sc->bge_flags & BGEF_IS_5788)
   5577 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
   5578 	else
   5579 		BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
   5580 
   5581 	return rc;
   5582 }
   5583 
   5584 /*
   5585  * Report current media status.
   5586  */
   5587 static void
   5588 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
   5589 {
   5590 	struct bge_softc *sc = ifp->if_softc;
   5591 	struct mii_data *mii = &sc->bge_mii;
   5592 
   5593 	if (sc->bge_flags & BGEF_FIBER_TBI) {
   5594 		ifmr->ifm_status = IFM_AVALID;
   5595 		ifmr->ifm_active = IFM_ETHER;
   5596 		if (CSR_READ_4(sc, BGE_MAC_STS) &
   5597 		    BGE_MACSTAT_TBI_PCS_SYNCHED)
   5598 			ifmr->ifm_status |= IFM_ACTIVE;
   5599 		ifmr->ifm_active |= IFM_1000_SX;
   5600 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
   5601 			ifmr->ifm_active |= IFM_HDX;
   5602 		else
   5603 			ifmr->ifm_active |= IFM_FDX;
   5604 		return;
   5605 	}
   5606 
   5607 	mii_pollstat(mii);
   5608 	ifmr->ifm_status = mii->mii_media_status;
   5609 	ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
   5610 	    sc->bge_flowflags;
   5611 }
   5612 
   5613 static int
   5614 bge_ifflags_cb(struct ethercom *ec)
   5615 {
   5616 	struct ifnet *ifp = &ec->ec_if;
   5617 	struct bge_softc *sc = ifp->if_softc;
   5618 	int change = ifp->if_flags ^ sc->bge_if_flags;
   5619 
   5620 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
   5621 		return ENETRESET;
   5622 	else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0)
   5623 		return 0;
   5624 
   5625 	if ((ifp->if_flags & IFF_PROMISC) == 0)
   5626 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
   5627 	else
   5628 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
   5629 
   5630 	bge_setmulti(sc);
   5631 
   5632 	sc->bge_if_flags = ifp->if_flags;
   5633 	return 0;
   5634 }
   5635 
   5636 static int
   5637 bge_ioctl(struct ifnet *ifp, u_long command, void *data)
   5638 {
   5639 	struct bge_softc *sc = ifp->if_softc;
   5640 	struct ifreq *ifr = (struct ifreq *) data;
   5641 	int s, error = 0;
   5642 	struct mii_data *mii;
   5643 
   5644 	s = splnet();
   5645 
   5646 	switch (command) {
   5647 	case SIOCSIFMEDIA:
   5648 		/* XXX Flow control is not supported for 1000BASE-SX */
   5649 		if (sc->bge_flags & BGEF_FIBER_TBI) {
   5650 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   5651 			sc->bge_flowflags = 0;
   5652 		}
   5653 
   5654 		/* Flow control requires full-duplex mode. */
   5655 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   5656 		    (ifr->ifr_media & IFM_FDX) == 0) {
   5657 		    	ifr->ifr_media &= ~IFM_ETH_FMASK;
   5658 		}
   5659 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   5660 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   5661 				/* We can do both TXPAUSE and RXPAUSE. */
   5662 				ifr->ifr_media |=
   5663 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   5664 			}
   5665 			sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   5666 		}
   5667 		/* FALLTHROUGH */
   5668 	case SIOCGIFMEDIA:
   5669 		if (sc->bge_flags & BGEF_FIBER_TBI) {
   5670 			error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
   5671 			    command);
   5672 		} else {
   5673 			mii = &sc->bge_mii;
   5674 			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
   5675 			    command);
   5676 		}
   5677 		break;
   5678 	default:
   5679 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   5680 			break;
   5681 
   5682 		error = 0;
   5683 
   5684 		if (command != SIOCADDMULTI && command != SIOCDELMULTI)
   5685 			;
   5686 		else if (ifp->if_flags & IFF_RUNNING)
   5687 			bge_setmulti(sc);
   5688 		break;
   5689 	}
   5690 
   5691 	splx(s);
   5692 
   5693 	return error;
   5694 }
   5695 
   5696 static void
   5697 bge_watchdog(struct ifnet *ifp)
   5698 {
   5699 	struct bge_softc *sc;
   5700 
   5701 	sc = ifp->if_softc;
   5702 
   5703 	aprint_error_dev(sc->bge_dev, "watchdog timeout -- resetting\n");
   5704 
   5705 	ifp->if_flags &= ~IFF_RUNNING;
   5706 	bge_init(ifp);
   5707 
   5708 	ifp->if_oerrors++;
   5709 }
   5710 
   5711 static void
   5712 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit)
   5713 {
   5714 	int i;
   5715 
   5716 	BGE_CLRBIT_FLUSH(sc, reg, bit);
   5717 
   5718 	for (i = 0; i < 1000; i++) {
   5719 		delay(100);
   5720 		if ((CSR_READ_4(sc, reg) & bit) == 0)
   5721 			return;
   5722 	}
   5723 
   5724 	/*
   5725 	 * Doesn't print only when the register is BGE_SRS_MODE. It occurs
   5726 	 * on some environment (and once after boot?)
   5727 	 */
   5728 	if (reg != BGE_SRS_MODE)
   5729 		aprint_error_dev(sc->bge_dev,
   5730 		    "block failed to stop: reg 0x%lx, bit 0x%08x\n",
   5731 		    (u_long)reg, bit);
   5732 }
   5733 
   5734 /*
   5735  * Stop the adapter and free any mbufs allocated to the
   5736  * RX and TX lists.
   5737  */
   5738 static void
   5739 bge_stop(struct ifnet *ifp, int disable)
   5740 {
   5741 	struct bge_softc *sc = ifp->if_softc;
   5742 
   5743 	callout_stop(&sc->bge_timeout);
   5744 
   5745 	/* Disable host interrupts. */
   5746 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
   5747 	bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 1);
   5748 
   5749 	/*
   5750 	 * Tell firmware we're shutting down.
   5751 	 */
   5752 	bge_stop_fw(sc);
   5753 	bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
   5754 
   5755 	/*
   5756 	 * Disable all of the receiver blocks.
   5757 	 */
   5758 	bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
   5759 	bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
   5760 	bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
   5761 	if (BGE_IS_5700_FAMILY(sc))
   5762 		bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
   5763 	bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
   5764 	bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
   5765 	bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
   5766 
   5767 	/*
   5768 	 * Disable all of the transmit blocks.
   5769 	 */
   5770 	bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
   5771 	bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
   5772 	bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
   5773 	bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
   5774 	bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
   5775 	if (BGE_IS_5700_FAMILY(sc))
   5776 		bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
   5777 	bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
   5778 
   5779 	BGE_CLRBIT_FLUSH(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB);
   5780 	delay(40);
   5781 
   5782 	bge_stop_block(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
   5783 
   5784 	/*
   5785 	 * Shut down all of the memory managers and related
   5786 	 * state machines.
   5787 	 */
   5788 	/* 5718 step 5a,5b */
   5789 	bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
   5790 	bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
   5791 	if (BGE_IS_5700_FAMILY(sc))
   5792 		bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
   5793 
   5794 	/* 5718 step 5c,5d */
   5795 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
   5796 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
   5797 
   5798 	if (BGE_IS_5700_FAMILY(sc)) {
   5799 		bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
   5800 		bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
   5801 	}
   5802 
   5803 	bge_reset(sc);
   5804 	bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
   5805 	bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
   5806 
   5807 	/*
   5808 	 * Keep the ASF firmware running if up.
   5809 	 */
   5810 	if (sc->bge_asf_mode & ASF_STACKUP)
   5811 		BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
   5812 	else
   5813 		BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
   5814 
   5815 	/* Free the RX lists. */
   5816 	bge_free_rx_ring_std(sc);
   5817 
   5818 	/* Free jumbo RX list. */
   5819 	if (BGE_IS_JUMBO_CAPABLE(sc))
   5820 		bge_free_rx_ring_jumbo(sc);
   5821 
   5822 	/* Free TX buffers. */
   5823 	bge_free_tx_ring(sc);
   5824 
   5825 	/*
   5826 	 * Isolate/power down the PHY.
   5827 	 */
   5828 	if (!(sc->bge_flags & BGEF_FIBER_TBI))
   5829 		mii_down(&sc->bge_mii);
   5830 
   5831 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
   5832 
   5833 	/* Clear MAC's link state (PHY may still have link UP). */
   5834 	BGE_STS_CLRBIT(sc, BGE_STS_LINK);
   5835 
   5836 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5837 }
   5838 
   5839 static void
   5840 bge_link_upd(struct bge_softc *sc)
   5841 {
   5842 	struct ifnet *ifp = &sc->ethercom.ec_if;
   5843 	struct mii_data *mii = &sc->bge_mii;
   5844 	uint32_t status;
   5845 	int link;
   5846 
   5847 	/* Clear 'pending link event' flag */
   5848 	BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT);
   5849 
   5850 	/*
   5851 	 * Process link state changes.
   5852 	 * Grrr. The link status word in the status block does
   5853 	 * not work correctly on the BCM5700 rev AX and BX chips,
   5854 	 * according to all available information. Hence, we have
   5855 	 * to enable MII interrupts in order to properly obtain
   5856 	 * async link changes. Unfortunately, this also means that
   5857 	 * we have to read the MAC status register to detect link
   5858 	 * changes, thereby adding an additional register access to
   5859 	 * the interrupt handler.
   5860 	 */
   5861 
   5862 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) {
   5863 		status = CSR_READ_4(sc, BGE_MAC_STS);
   5864 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
   5865 			mii_pollstat(mii);
   5866 
   5867 			if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
   5868 			    mii->mii_media_status & IFM_ACTIVE &&
   5869 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
   5870 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
   5871 			else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
   5872 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
   5873 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
   5874 				BGE_STS_CLRBIT(sc, BGE_STS_LINK);
   5875 
   5876 			/* Clear the interrupt */
   5877 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
   5878 			    BGE_EVTENB_MI_INTERRUPT);
   5879 			bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr,
   5880 			    BRGPHY_MII_ISR);
   5881 			bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr,
   5882 			    BRGPHY_MII_IMR, BRGPHY_INTRS);
   5883 		}
   5884 		return;
   5885 	}
   5886 
   5887 	if (sc->bge_flags & BGEF_FIBER_TBI) {
   5888 		status = CSR_READ_4(sc, BGE_MAC_STS);
   5889 		if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
   5890 			if (!BGE_STS_BIT(sc, BGE_STS_LINK)) {
   5891 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
   5892 				if (BGE_ASICREV(sc->bge_chipid)
   5893 				    == BGE_ASICREV_BCM5704) {
   5894 					BGE_CLRBIT(sc, BGE_MAC_MODE,
   5895 					    BGE_MACMODE_TBI_SEND_CFGS);
   5896 					DELAY(40);
   5897 				}
   5898 				CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
   5899 				if_link_state_change(ifp, LINK_STATE_UP);
   5900 			}
   5901 		} else if (BGE_STS_BIT(sc, BGE_STS_LINK)) {
   5902 			BGE_STS_CLRBIT(sc, BGE_STS_LINK);
   5903 			if_link_state_change(ifp, LINK_STATE_DOWN);
   5904 		}
   5905 	} else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) {
   5906 		/*
   5907 		 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED
   5908 		 * bit in status word always set. Workaround this bug by
   5909 		 * reading PHY link status directly.
   5910 		 */
   5911 		link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)?
   5912 		    BGE_STS_LINK : 0;
   5913 
   5914 		if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) {
   5915 			mii_pollstat(mii);
   5916 
   5917 			if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
   5918 			    mii->mii_media_status & IFM_ACTIVE &&
   5919 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
   5920 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
   5921 			else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
   5922 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
   5923 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
   5924 				BGE_STS_CLRBIT(sc, BGE_STS_LINK);
   5925 		}
   5926 	} else {
   5927 		/*
   5928 		 * For controllers that call mii_tick, we have to poll
   5929 		 * link status.
   5930 		 */
   5931 		mii_pollstat(mii);
   5932 	}
   5933 
   5934 	/* Clear the attention */
   5935 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
   5936 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
   5937 	    BGE_MACSTAT_LINK_CHANGED);
   5938 }
   5939 
   5940 static int
   5941 bge_sysctl_verify(SYSCTLFN_ARGS)
   5942 {
   5943 	int error, t;
   5944 	struct sysctlnode node;
   5945 
   5946 	node = *rnode;
   5947 	t = *(int*)rnode->sysctl_data;
   5948 	node.sysctl_data = &t;
   5949 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5950 	if (error || newp == NULL)
   5951 		return error;
   5952 
   5953 #if 0
   5954 	DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t,
   5955 	    node.sysctl_num, rnode->sysctl_num));
   5956 #endif
   5957 
   5958 	if (node.sysctl_num == bge_rxthresh_nodenum) {
   5959 		if (t < 0 || t >= NBGE_RX_THRESH)
   5960 			return EINVAL;
   5961 		bge_update_all_threshes(t);
   5962 	} else
   5963 		return EINVAL;
   5964 
   5965 	*(int*)rnode->sysctl_data = t;
   5966 
   5967 	return 0;
   5968 }
   5969 
   5970 /*
   5971  * Set up sysctl(3) MIB, hw.bge.*.
   5972  */
   5973 static void
   5974 bge_sysctl_init(struct bge_softc *sc)
   5975 {
   5976 	int rc, bge_root_num;
   5977 	const struct sysctlnode *node;
   5978 
   5979 	if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node,
   5980 	    0, CTLTYPE_NODE, "bge",
   5981 	    SYSCTL_DESCR("BGE interface controls"),
   5982 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
   5983 		goto out;
   5984 	}
   5985 
   5986 	bge_root_num = node->sysctl_num;
   5987 
   5988 	/* BGE Rx interrupt mitigation level */
   5989 	if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node,
   5990 	    CTLFLAG_READWRITE,
   5991 	    CTLTYPE_INT, "rx_lvl",
   5992 	    SYSCTL_DESCR("BGE receive interrupt mitigation level"),
   5993 	    bge_sysctl_verify, 0,
   5994 	    &bge_rx_thresh_lvl,
   5995 	    0, CTL_HW, bge_root_num, CTL_CREATE,
   5996 	    CTL_EOL)) != 0) {
   5997 		goto out;
   5998 	}
   5999 
   6000 	bge_rxthresh_nodenum = node->sysctl_num;
   6001 
   6002 	return;
   6003 
   6004 out:
   6005 	aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
   6006 }
   6007 
   6008 #ifdef BGE_DEBUG
   6009 void
   6010 bge_debug_info(struct bge_softc *sc)
   6011 {
   6012 
   6013 	printf("Hardware Flags:\n");
   6014 	if (BGE_IS_57765_PLUS(sc))
   6015 		printf(" - 57765 Plus\n");
   6016 	if (BGE_IS_5717_PLUS(sc))
   6017 		printf(" - 5717 Plus\n");
   6018 	if (BGE_IS_5755_PLUS(sc))
   6019 		printf(" - 5755 Plus\n");
   6020 	if (BGE_IS_575X_PLUS(sc))
   6021 		printf(" - 575X Plus\n");
   6022 	if (BGE_IS_5705_PLUS(sc))
   6023 		printf(" - 5705 Plus\n");
   6024 	if (BGE_IS_5714_FAMILY(sc))
   6025 		printf(" - 5714 Family\n");
   6026 	if (BGE_IS_5700_FAMILY(sc))
   6027 		printf(" - 5700 Family\n");
   6028 	if (sc->bge_flags & BGEF_IS_5788)
   6029 		printf(" - 5788\n");
   6030 	if (sc->bge_flags & BGEF_JUMBO_CAPABLE)
   6031 		printf(" - Supports Jumbo Frames\n");
   6032 	if (sc->bge_flags & BGEF_NO_EEPROM)
   6033 		printf(" - No EEPROM\n");
   6034 	if (sc->bge_flags & BGEF_PCIX)
   6035 		printf(" - PCI-X Bus\n");
   6036 	if (sc->bge_flags & BGEF_PCIE)
   6037 		printf(" - PCI Express Bus\n");
   6038 	if (sc->bge_flags & BGEF_RX_ALIGNBUG)
   6039 		printf(" - RX Alignment Bug\n");
   6040 	if (sc->bge_flags & BGEF_APE)
   6041 		printf(" - APE\n");
   6042 	if (sc->bge_flags & BGEF_CPMU_PRESENT)
   6043 		printf(" - CPMU\n");
   6044 	if (sc->bge_flags & BGEF_TSO)
   6045 		printf(" - TSO\n");
   6046 
   6047 	if (sc->bge_phy_flags & BGEPHYF_NO_3LED)
   6048 		printf(" - No 3 LEDs\n");
   6049 	if (sc->bge_phy_flags & BGEPHYF_CRC_BUG)
   6050 		printf(" - CRC bug\n");
   6051 	if (sc->bge_phy_flags & BGEPHYF_ADC_BUG)
   6052 		printf(" - ADC bug\n");
   6053 	if (sc->bge_phy_flags & BGEPHYF_5704_A0_BUG)
   6054 		printf(" - 5704 A0 bug\n");
   6055 	if (sc->bge_phy_flags & BGEPHYF_JITTER_BUG)
   6056 		printf(" - jitter bug\n");
   6057 	if (sc->bge_phy_flags & BGEPHYF_BER_BUG)
   6058 		printf(" - BER bug\n");
   6059 	if (sc->bge_phy_flags & BGEPHYF_ADJUST_TRIM)
   6060 		printf(" - adjust trim\n");
   6061 	if (sc->bge_phy_flags & BGEPHYF_NO_WIRESPEED)
   6062 		printf(" - no wirespeed\n");
   6063 }
   6064 #endif /* BGE_DEBUG */
   6065 
   6066 static int
   6067 bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
   6068 {
   6069 	prop_dictionary_t dict;
   6070 	prop_data_t ea;
   6071 
   6072 	if ((sc->bge_flags & BGEF_NO_EEPROM) == 0)
   6073 		return 1;
   6074 
   6075 	dict = device_properties(sc->bge_dev);
   6076 	ea = prop_dictionary_get(dict, "mac-address");
   6077 	if (ea != NULL) {
   6078 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   6079 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   6080 		memcpy(ether_addr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   6081 		return 0;
   6082 	}
   6083 
   6084 	return 1;
   6085 }
   6086 
   6087 static int
   6088 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
   6089 {
   6090 	uint32_t mac_addr;
   6091 
   6092 	mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB);
   6093 	if ((mac_addr >> 16) == 0x484b) {
   6094 		ether_addr[0] = (uint8_t)(mac_addr >> 8);
   6095 		ether_addr[1] = (uint8_t)mac_addr;
   6096 		mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB);
   6097 		ether_addr[2] = (uint8_t)(mac_addr >> 24);
   6098 		ether_addr[3] = (uint8_t)(mac_addr >> 16);
   6099 		ether_addr[4] = (uint8_t)(mac_addr >> 8);
   6100 		ether_addr[5] = (uint8_t)mac_addr;
   6101 		return 0;
   6102 	}
   6103 	return 1;
   6104 }
   6105 
   6106 static int
   6107 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
   6108 {
   6109 	int mac_offset = BGE_EE_MAC_OFFSET;
   6110 
   6111 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
   6112 		mac_offset = BGE_EE_MAC_OFFSET_5906;
   6113 
   6114 	return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
   6115 	    ETHER_ADDR_LEN));
   6116 }
   6117 
   6118 static int
   6119 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
   6120 {
   6121 
   6122 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
   6123 		return 1;
   6124 
   6125 	return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
   6126 	   ETHER_ADDR_LEN));
   6127 }
   6128 
   6129 static int
   6130 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
   6131 {
   6132 	static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
   6133 		/* NOTE: Order is critical */
   6134 		bge_get_eaddr_fw,
   6135 		bge_get_eaddr_mem,
   6136 		bge_get_eaddr_nvram,
   6137 		bge_get_eaddr_eeprom,
   6138 		NULL
   6139 	};
   6140 	const bge_eaddr_fcn_t *func;
   6141 
   6142 	for (func = bge_eaddr_funcs; *func != NULL; ++func) {
   6143 		if ((*func)(sc, eaddr) == 0)
   6144 			break;
   6145 	}
   6146 	return (*func == NULL ? ENXIO : 0);
   6147 }
   6148