Home | History | Annotate | Line # | Download | only in pci
if_bge.c revision 1.321
      1 /*	$NetBSD: if_bge.c,v 1.321 2019/01/16 07:32:13 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001 Wind River Systems
      5  * Copyright (c) 1997, 1998, 1999, 2001
      6  *	Bill Paul <wpaul (at) windriver.com>.  All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *	This product includes software developed by Bill Paul.
     19  * 4. Neither the name of the author nor the names of any co-contributors
     20  *    may be used to endorse or promote products derived from this software
     21  *    without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
     24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     26  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
     27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
     33  * THE POSSIBILITY OF SUCH DAMAGE.
     34  *
     35  * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $
     36  */
     37 
     38 /*
     39  * Broadcom BCM570x family gigabit ethernet driver for NetBSD.
     40  *
     41  * NetBSD version by:
     42  *
     43  *	Frank van der Linden <fvdl (at) wasabisystems.com>
     44  *	Jason Thorpe <thorpej (at) wasabisystems.com>
     45  *	Jonathan Stone <jonathan (at) dsg.stanford.edu>
     46  *
     47  * Originally written for FreeBSD by Bill Paul <wpaul (at) windriver.com>
     48  * Senior Engineer, Wind River Systems
     49  */
     50 
     51 /*
     52  * The Broadcom BCM5700 is based on technology originally developed by
     53  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
     54  * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
     55  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
     56  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
     57  * frames, highly configurable RX filtering, and 16 RX and TX queues
     58  * (which, along with RX filter rules, can be used for QOS applications).
     59  * Other features, such as TCP segmentation, may be available as part
     60  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
     61  * firmware images can be stored in hardware and need not be compiled
     62  * into the driver.
     63  *
     64  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
     65  * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
     66  *
     67  * The BCM5701 is a single-chip solution incorporating both the BCM5700
     68  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
     69  * does not support external SSRAM.
     70  *
     71  * Broadcom also produces a variation of the BCM5700 under the "Altima"
     72  * brand name, which is functionally similar but lacks PCI-X support.
     73  *
     74  * Without external SSRAM, you can only have at most 4 TX rings,
     75  * and the use of the mini RX ring is disabled. This seems to imply
     76  * that these features are simply not available on the BCM5701. As a
     77  * result, this driver does not implement any support for the mini RX
     78  * ring.
     79  */
     80 
     81 #include <sys/cdefs.h>
     82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.321 2019/01/16 07:32:13 msaitoh Exp $");
     83 
     84 #include <sys/param.h>
     85 #include <sys/systm.h>
     86 #include <sys/callout.h>
     87 #include <sys/sockio.h>
     88 #include <sys/mbuf.h>
     89 #include <sys/malloc.h>
     90 #include <sys/kernel.h>
     91 #include <sys/device.h>
     92 #include <sys/socket.h>
     93 #include <sys/sysctl.h>
     94 
     95 #include <net/if.h>
     96 #include <net/if_dl.h>
     97 #include <net/if_media.h>
     98 #include <net/if_ether.h>
     99 
    100 #include <sys/rndsource.h>
    101 
    102 #ifdef INET
    103 #include <netinet/in.h>
    104 #include <netinet/in_systm.h>
    105 #include <netinet/in_var.h>
    106 #include <netinet/ip.h>
    107 #endif
    108 
    109 /* Headers for TCP Segmentation Offload (TSO) */
    110 #include <netinet/in_systm.h>		/* n_time for <netinet/ip.h>... */
    111 #include <netinet/in.h>			/* ip_{src,dst}, for <netinet/ip.h> */
    112 #include <netinet/ip.h>			/* for struct ip */
    113 #include <netinet/tcp.h>		/* for struct tcphdr */
    114 
    115 
    116 #include <net/bpf.h>
    117 
    118 #include <dev/pci/pcireg.h>
    119 #include <dev/pci/pcivar.h>
    120 #include <dev/pci/pcidevs.h>
    121 
    122 #include <dev/mii/mii.h>
    123 #include <dev/mii/miivar.h>
    124 #include <dev/mii/miidevs.h>
    125 #include <dev/mii/brgphyreg.h>
    126 
    127 #include <dev/pci/if_bgereg.h>
    128 #include <dev/pci/if_bgevar.h>
    129 
    130 #include <prop/proplib.h>
    131 
    132 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
    133 
    134 
    135 /*
    136  * Tunable thresholds for rx-side bge interrupt mitigation.
    137  */
    138 
    139 /*
    140  * The pairs of values below were obtained from empirical measurement
    141  * on bcm5700 rev B2; they ar designed to give roughly 1 receive
    142  * interrupt for every N packets received, where N is, approximately,
    143  * the second value (rx_max_bds) in each pair.  The values are chosen
    144  * such that moving from one pair to the succeeding pair was observed
    145  * to roughly halve interrupt rate under sustained input packet load.
    146  * The values were empirically chosen to avoid overflowing internal
    147  * limits on the  bcm5700: increasing rx_ticks much beyond 600
    148  * results in internal wrapping and higher interrupt rates.
    149  * The limit of 46 frames was chosen to match NFS workloads.
    150  *
    151  * These values also work well on bcm5701, bcm5704C, and (less
    152  * tested) bcm5703.  On other chipsets, (including the Altima chip
    153  * family), the larger values may overflow internal chip limits,
    154  * leading to increasing interrupt rates rather than lower interrupt
    155  * rates.
    156  *
    157  * Applications using heavy interrupt mitigation (interrupting every
    158  * 32 or 46 frames) in both directions may need to increase the TCP
    159  * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain
    160  * full link bandwidth, due to ACKs and window updates lingering
    161  * in the RX queue during the 30-to-40-frame interrupt-mitigation window.
    162  */
    163 static const struct bge_load_rx_thresh {
    164 	int rx_ticks;
    165 	int rx_max_bds; }
    166 bge_rx_threshes[] = {
    167 	{ 16,   1 },	/* rx_max_bds = 1 disables interrupt mitigation */
    168 	{ 32,   2 },
    169 	{ 50,   4 },
    170 	{ 100,  8 },
    171 	{ 192, 16 },
    172 	{ 416, 32 },
    173 	{ 598, 46 }
    174 };
    175 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0]))
    176 
    177 /* XXX patchable; should be sysctl'able */
    178 static int bge_auto_thresh = 1;
    179 static int bge_rx_thresh_lvl;
    180 
    181 static int bge_rxthresh_nodenum;
    182 
    183 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
    184 
    185 static uint32_t bge_chipid(const struct pci_attach_args *);
    186 static int bge_can_use_msi(struct bge_softc *);
    187 static int bge_probe(device_t, cfdata_t, void *);
    188 static void bge_attach(device_t, device_t, void *);
    189 static int bge_detach(device_t, int);
    190 static void bge_release_resources(struct bge_softc *);
    191 
    192 static int bge_get_eaddr_fw(struct bge_softc *, uint8_t[]);
    193 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
    194 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
    195 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
    196 static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
    197 
    198 static void bge_txeof(struct bge_softc *);
    199 static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
    200 static void bge_rxeof(struct bge_softc *);
    201 
    202 static void bge_asf_driver_up (struct bge_softc *);
    203 static void bge_tick(void *);
    204 static void bge_stats_update(struct bge_softc *);
    205 static void bge_stats_update_regs(struct bge_softc *);
    206 static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *);
    207 
    208 static int bge_intr(void *);
    209 static void bge_start(struct ifnet *);
    210 static int bge_ifflags_cb(struct ethercom *);
    211 static int bge_ioctl(struct ifnet *, u_long, void *);
    212 static int bge_init(struct ifnet *);
    213 static void bge_stop(struct ifnet *, int);
    214 static void bge_watchdog(struct ifnet *);
    215 static int bge_ifmedia_upd(struct ifnet *);
    216 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
    217 
    218 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
    219 static int bge_read_nvram(struct bge_softc *, uint8_t *, int, int);
    220 
    221 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
    222 static int bge_read_eeprom(struct bge_softc *, void *, int, int);
    223 static void bge_setmulti(struct bge_softc *);
    224 
    225 static void bge_handle_events(struct bge_softc *);
    226 static int bge_alloc_jumbo_mem(struct bge_softc *);
    227 #if 0 /* XXX */
    228 static void bge_free_jumbo_mem(struct bge_softc *);
    229 #endif
    230 static void *bge_jalloc(struct bge_softc *);
    231 static void bge_jfree(struct mbuf *, void *, size_t, void *);
    232 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *,
    233 			       bus_dmamap_t);
    234 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
    235 static int bge_init_rx_ring_std(struct bge_softc *);
    236 static void bge_free_rx_ring_std(struct bge_softc *m, bool);
    237 static int bge_init_rx_ring_jumbo(struct bge_softc *);
    238 static void bge_free_rx_ring_jumbo(struct bge_softc *);
    239 static void bge_free_tx_ring(struct bge_softc *m, bool);
    240 static int bge_init_tx_ring(struct bge_softc *);
    241 
    242 static int bge_chipinit(struct bge_softc *);
    243 static int bge_blockinit(struct bge_softc *);
    244 static int bge_phy_addr(struct bge_softc *);
    245 static uint32_t bge_readmem_ind(struct bge_softc *, int);
    246 static void bge_writemem_ind(struct bge_softc *, int, int);
    247 static void bge_writembx(struct bge_softc *, int, int);
    248 static void bge_writembx_flush(struct bge_softc *, int, int);
    249 static void bge_writemem_direct(struct bge_softc *, int, int);
    250 static void bge_writereg_ind(struct bge_softc *, int, int);
    251 static void bge_set_max_readrq(struct bge_softc *);
    252 
    253 static int bge_miibus_readreg(device_t, int, int);
    254 static void bge_miibus_writereg(device_t, int, int, int);
    255 static void bge_miibus_statchg(struct ifnet *);
    256 
    257 #define BGE_RESET_SHUTDOWN	0
    258 #define	BGE_RESET_START		1
    259 #define	BGE_RESET_SUSPEND	2
    260 static void bge_sig_post_reset(struct bge_softc *, int);
    261 static void bge_sig_legacy(struct bge_softc *, int);
    262 static void bge_sig_pre_reset(struct bge_softc *, int);
    263 static void bge_wait_for_event_ack(struct bge_softc *);
    264 static void bge_stop_fw(struct bge_softc *);
    265 static int bge_reset(struct bge_softc *);
    266 static void bge_link_upd(struct bge_softc *);
    267 static void bge_sysctl_init(struct bge_softc *);
    268 static int bge_sysctl_verify(SYSCTLFN_PROTO);
    269 
    270 static void bge_ape_lock_init(struct bge_softc *);
    271 static void bge_ape_read_fw_ver(struct bge_softc *);
    272 static int bge_ape_lock(struct bge_softc *, int);
    273 static void bge_ape_unlock(struct bge_softc *, int);
    274 static void bge_ape_send_event(struct bge_softc *, uint32_t);
    275 static void bge_ape_driver_state_change(struct bge_softc *, int);
    276 
    277 #ifdef BGE_DEBUG
    278 #define DPRINTF(x)	if (bgedebug) printf x
    279 #define DPRINTFN(n,x)	if (bgedebug >= (n)) printf x
    280 #define BGE_TSO_PRINTF(x)  do { if (bge_tso_debug) printf x ;} while (0)
    281 int	bgedebug = 0;
    282 int	bge_tso_debug = 0;
    283 void		bge_debug_info(struct bge_softc *);
    284 #else
    285 #define DPRINTF(x)
    286 #define DPRINTFN(n,x)
    287 #define BGE_TSO_PRINTF(x)
    288 #endif
    289 
    290 #ifdef BGE_EVENT_COUNTERS
    291 #define	BGE_EVCNT_INCR(ev)	(ev).ev_count++
    292 #define	BGE_EVCNT_ADD(ev, val)	(ev).ev_count += (val)
    293 #define	BGE_EVCNT_UPD(ev, val)	(ev).ev_count = (val)
    294 #else
    295 #define	BGE_EVCNT_INCR(ev)	/* nothing */
    296 #define	BGE_EVCNT_ADD(ev, val)	/* nothing */
    297 #define	BGE_EVCNT_UPD(ev, val)	/* nothing */
    298 #endif
    299 
    300 static const struct bge_product {
    301 	pci_vendor_id_t		bp_vendor;
    302 	pci_product_id_t	bp_product;
    303 	const char		*bp_name;
    304 } bge_products[] = {
    305 	/*
    306 	 * The BCM5700 documentation seems to indicate that the hardware
    307 	 * still has the Alteon vendor ID burned into it, though it
    308 	 * should always be overridden by the value in the EEPROM.  We'll
    309 	 * check for it anyway.
    310 	 */
    311 	{ PCI_VENDOR_ALTEON,
    312 	  PCI_PRODUCT_ALTEON_BCM5700,
    313 	  "Broadcom BCM5700 Gigabit Ethernet",
    314 	  },
    315 	{ PCI_VENDOR_ALTEON,
    316 	  PCI_PRODUCT_ALTEON_BCM5701,
    317 	  "Broadcom BCM5701 Gigabit Ethernet",
    318 	  },
    319 	{ PCI_VENDOR_ALTIMA,
    320 	  PCI_PRODUCT_ALTIMA_AC1000,
    321 	  "Altima AC1000 Gigabit Ethernet",
    322 	  },
    323 	{ PCI_VENDOR_ALTIMA,
    324 	  PCI_PRODUCT_ALTIMA_AC1001,
    325 	  "Altima AC1001 Gigabit Ethernet",
    326 	   },
    327 	{ PCI_VENDOR_ALTIMA,
    328 	  PCI_PRODUCT_ALTIMA_AC1003,
    329 	  "Altima AC1003 Gigabit Ethernet",
    330 	   },
    331 	{ PCI_VENDOR_ALTIMA,
    332 	  PCI_PRODUCT_ALTIMA_AC9100,
    333 	  "Altima AC9100 Gigabit Ethernet",
    334 	  },
    335 	{ PCI_VENDOR_APPLE,
    336 	  PCI_PRODUCT_APPLE_BCM5701,
    337 	  "APPLE BCM5701 Gigabit Ethernet",
    338 	  },
    339 	{ PCI_VENDOR_BROADCOM,
    340 	  PCI_PRODUCT_BROADCOM_BCM5700,
    341 	  "Broadcom BCM5700 Gigabit Ethernet",
    342 	  },
    343 	{ PCI_VENDOR_BROADCOM,
    344 	  PCI_PRODUCT_BROADCOM_BCM5701,
    345 	  "Broadcom BCM5701 Gigabit Ethernet",
    346 	  },
    347 	{ PCI_VENDOR_BROADCOM,
    348 	  PCI_PRODUCT_BROADCOM_BCM5702,
    349 	  "Broadcom BCM5702 Gigabit Ethernet",
    350 	  },
    351 	{ PCI_VENDOR_BROADCOM,
    352 	  PCI_PRODUCT_BROADCOM_BCM5702X,
    353 	  "Broadcom BCM5702X Gigabit Ethernet" },
    354 	{ PCI_VENDOR_BROADCOM,
    355 	  PCI_PRODUCT_BROADCOM_BCM5703,
    356 	  "Broadcom BCM5703 Gigabit Ethernet",
    357 	  },
    358 	{ PCI_VENDOR_BROADCOM,
    359 	  PCI_PRODUCT_BROADCOM_BCM5703X,
    360 	  "Broadcom BCM5703X Gigabit Ethernet",
    361 	  },
    362 	{ PCI_VENDOR_BROADCOM,
    363 	  PCI_PRODUCT_BROADCOM_BCM5703_ALT,
    364 	  "Broadcom BCM5703 Gigabit Ethernet",
    365 	  },
    366 	{ PCI_VENDOR_BROADCOM,
    367 	  PCI_PRODUCT_BROADCOM_BCM5704C,
    368 	  "Broadcom BCM5704C Dual Gigabit Ethernet",
    369 	  },
    370 	{ PCI_VENDOR_BROADCOM,
    371 	  PCI_PRODUCT_BROADCOM_BCM5704S,
    372 	  "Broadcom BCM5704S Dual Gigabit Ethernet",
    373 	  },
    374 	{ PCI_VENDOR_BROADCOM,
    375 	  PCI_PRODUCT_BROADCOM_BCM5705,
    376 	  "Broadcom BCM5705 Gigabit Ethernet",
    377 	  },
    378 	{ PCI_VENDOR_BROADCOM,
    379 	  PCI_PRODUCT_BROADCOM_BCM5705F,
    380 	  "Broadcom BCM5705F Gigabit Ethernet",
    381 	  },
    382 	{ PCI_VENDOR_BROADCOM,
    383 	  PCI_PRODUCT_BROADCOM_BCM5705K,
    384 	  "Broadcom BCM5705K Gigabit Ethernet",
    385 	  },
    386 	{ PCI_VENDOR_BROADCOM,
    387 	  PCI_PRODUCT_BROADCOM_BCM5705M,
    388 	  "Broadcom BCM5705M Gigabit Ethernet",
    389 	  },
    390 	{ PCI_VENDOR_BROADCOM,
    391 	  PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
    392 	  "Broadcom BCM5705M Gigabit Ethernet",
    393 	  },
    394 	{ PCI_VENDOR_BROADCOM,
    395 	  PCI_PRODUCT_BROADCOM_BCM5714,
    396 	  "Broadcom BCM5714 Gigabit Ethernet",
    397 	  },
    398 	{ PCI_VENDOR_BROADCOM,
    399 	  PCI_PRODUCT_BROADCOM_BCM5714S,
    400 	  "Broadcom BCM5714S Gigabit Ethernet",
    401 	  },
    402 	{ PCI_VENDOR_BROADCOM,
    403 	  PCI_PRODUCT_BROADCOM_BCM5715,
    404 	  "Broadcom BCM5715 Gigabit Ethernet",
    405 	  },
    406 	{ PCI_VENDOR_BROADCOM,
    407 	  PCI_PRODUCT_BROADCOM_BCM5715S,
    408 	  "Broadcom BCM5715S Gigabit Ethernet",
    409 	  },
    410 	{ PCI_VENDOR_BROADCOM,
    411 	  PCI_PRODUCT_BROADCOM_BCM5717,
    412 	  "Broadcom BCM5717 Gigabit Ethernet",
    413 	  },
    414 	{ PCI_VENDOR_BROADCOM,
    415 	  PCI_PRODUCT_BROADCOM_BCM5718,
    416 	  "Broadcom BCM5718 Gigabit Ethernet",
    417 	  },
    418 	{ PCI_VENDOR_BROADCOM,
    419 	  PCI_PRODUCT_BROADCOM_BCM5719,
    420 	  "Broadcom BCM5719 Gigabit Ethernet",
    421 	  },
    422 	{ PCI_VENDOR_BROADCOM,
    423 	  PCI_PRODUCT_BROADCOM_BCM5720,
    424 	  "Broadcom BCM5720 Gigabit Ethernet",
    425 	  },
    426 	{ PCI_VENDOR_BROADCOM,
    427 	  PCI_PRODUCT_BROADCOM_BCM5721,
    428 	  "Broadcom BCM5721 Gigabit Ethernet",
    429 	  },
    430 	{ PCI_VENDOR_BROADCOM,
    431 	  PCI_PRODUCT_BROADCOM_BCM5722,
    432 	  "Broadcom BCM5722 Gigabit Ethernet",
    433 	  },
    434 	{ PCI_VENDOR_BROADCOM,
    435 	  PCI_PRODUCT_BROADCOM_BCM5723,
    436 	  "Broadcom BCM5723 Gigabit Ethernet",
    437 	  },
    438 	{ PCI_VENDOR_BROADCOM,
    439 	  PCI_PRODUCT_BROADCOM_BCM5750,
    440 	  "Broadcom BCM5750 Gigabit Ethernet",
    441 	  },
    442 	{ PCI_VENDOR_BROADCOM,
    443 	  PCI_PRODUCT_BROADCOM_BCM5751,
    444 	  "Broadcom BCM5751 Gigabit Ethernet",
    445 	  },
    446 	{ PCI_VENDOR_BROADCOM,
    447 	  PCI_PRODUCT_BROADCOM_BCM5751F,
    448 	  "Broadcom BCM5751F Gigabit Ethernet",
    449 	  },
    450 	{ PCI_VENDOR_BROADCOM,
    451 	  PCI_PRODUCT_BROADCOM_BCM5751M,
    452 	  "Broadcom BCM5751M Gigabit Ethernet",
    453 	  },
    454 	{ PCI_VENDOR_BROADCOM,
    455 	  PCI_PRODUCT_BROADCOM_BCM5752,
    456 	  "Broadcom BCM5752 Gigabit Ethernet",
    457 	  },
    458 	{ PCI_VENDOR_BROADCOM,
    459 	  PCI_PRODUCT_BROADCOM_BCM5752M,
    460 	  "Broadcom BCM5752M Gigabit Ethernet",
    461 	  },
    462 	{ PCI_VENDOR_BROADCOM,
    463 	  PCI_PRODUCT_BROADCOM_BCM5753,
    464 	  "Broadcom BCM5753 Gigabit Ethernet",
    465 	  },
    466 	{ PCI_VENDOR_BROADCOM,
    467 	  PCI_PRODUCT_BROADCOM_BCM5753F,
    468 	  "Broadcom BCM5753F Gigabit Ethernet",
    469 	  },
    470 	{ PCI_VENDOR_BROADCOM,
    471 	  PCI_PRODUCT_BROADCOM_BCM5753M,
    472 	  "Broadcom BCM5753M Gigabit Ethernet",
    473 	  },
    474 	{ PCI_VENDOR_BROADCOM,
    475 	  PCI_PRODUCT_BROADCOM_BCM5754,
    476 	  "Broadcom BCM5754 Gigabit Ethernet",
    477 	},
    478 	{ PCI_VENDOR_BROADCOM,
    479 	  PCI_PRODUCT_BROADCOM_BCM5754M,
    480 	  "Broadcom BCM5754M Gigabit Ethernet",
    481 	},
    482 	{ PCI_VENDOR_BROADCOM,
    483 	  PCI_PRODUCT_BROADCOM_BCM5755,
    484 	  "Broadcom BCM5755 Gigabit Ethernet",
    485 	},
    486 	{ PCI_VENDOR_BROADCOM,
    487 	  PCI_PRODUCT_BROADCOM_BCM5755M,
    488 	  "Broadcom BCM5755M Gigabit Ethernet",
    489 	},
    490 	{ PCI_VENDOR_BROADCOM,
    491 	  PCI_PRODUCT_BROADCOM_BCM5756,
    492 	  "Broadcom BCM5756 Gigabit Ethernet",
    493 	},
    494 	{ PCI_VENDOR_BROADCOM,
    495 	  PCI_PRODUCT_BROADCOM_BCM5761,
    496 	  "Broadcom BCM5761 Gigabit Ethernet",
    497 	},
    498 	{ PCI_VENDOR_BROADCOM,
    499 	  PCI_PRODUCT_BROADCOM_BCM5761E,
    500 	  "Broadcom BCM5761E Gigabit Ethernet",
    501 	},
    502 	{ PCI_VENDOR_BROADCOM,
    503 	  PCI_PRODUCT_BROADCOM_BCM5761S,
    504 	  "Broadcom BCM5761S Gigabit Ethernet",
    505 	},
    506 	{ PCI_VENDOR_BROADCOM,
    507 	  PCI_PRODUCT_BROADCOM_BCM5761SE,
    508 	  "Broadcom BCM5761SE Gigabit Ethernet",
    509 	},
    510 	{ PCI_VENDOR_BROADCOM,
    511 	  PCI_PRODUCT_BROADCOM_BCM5764,
    512 	  "Broadcom BCM5764 Gigabit Ethernet",
    513 	  },
    514 	{ PCI_VENDOR_BROADCOM,
    515 	  PCI_PRODUCT_BROADCOM_BCM5780,
    516 	  "Broadcom BCM5780 Gigabit Ethernet",
    517 	  },
    518 	{ PCI_VENDOR_BROADCOM,
    519 	  PCI_PRODUCT_BROADCOM_BCM5780S,
    520 	  "Broadcom BCM5780S Gigabit Ethernet",
    521 	  },
    522 	{ PCI_VENDOR_BROADCOM,
    523 	  PCI_PRODUCT_BROADCOM_BCM5781,
    524 	  "Broadcom BCM5781 Gigabit Ethernet",
    525 	  },
    526 	{ PCI_VENDOR_BROADCOM,
    527 	  PCI_PRODUCT_BROADCOM_BCM5782,
    528 	  "Broadcom BCM5782 Gigabit Ethernet",
    529 	},
    530 	{ PCI_VENDOR_BROADCOM,
    531 	  PCI_PRODUCT_BROADCOM_BCM5784M,
    532 	  "BCM5784M NetLink 1000baseT Ethernet",
    533 	},
    534 	{ PCI_VENDOR_BROADCOM,
    535 	  PCI_PRODUCT_BROADCOM_BCM5785F,
    536 	  "BCM5785F NetLink 10/100 Ethernet",
    537 	},
    538 	{ PCI_VENDOR_BROADCOM,
    539 	  PCI_PRODUCT_BROADCOM_BCM5785G,
    540 	  "BCM5785G NetLink 1000baseT Ethernet",
    541 	},
    542 	{ PCI_VENDOR_BROADCOM,
    543 	  PCI_PRODUCT_BROADCOM_BCM5786,
    544 	  "Broadcom BCM5786 Gigabit Ethernet",
    545 	},
    546 	{ PCI_VENDOR_BROADCOM,
    547 	  PCI_PRODUCT_BROADCOM_BCM5787,
    548 	  "Broadcom BCM5787 Gigabit Ethernet",
    549 	},
    550 	{ PCI_VENDOR_BROADCOM,
    551 	  PCI_PRODUCT_BROADCOM_BCM5787F,
    552 	  "Broadcom BCM5787F 10/100 Ethernet",
    553 	},
    554 	{ PCI_VENDOR_BROADCOM,
    555 	  PCI_PRODUCT_BROADCOM_BCM5787M,
    556 	  "Broadcom BCM5787M Gigabit Ethernet",
    557 	},
    558 	{ PCI_VENDOR_BROADCOM,
    559 	  PCI_PRODUCT_BROADCOM_BCM5788,
    560 	  "Broadcom BCM5788 Gigabit Ethernet",
    561 	  },
    562 	{ PCI_VENDOR_BROADCOM,
    563 	  PCI_PRODUCT_BROADCOM_BCM5789,
    564 	  "Broadcom BCM5789 Gigabit Ethernet",
    565 	  },
    566 	{ PCI_VENDOR_BROADCOM,
    567 	  PCI_PRODUCT_BROADCOM_BCM5901,
    568 	  "Broadcom BCM5901 Fast Ethernet",
    569 	  },
    570 	{ PCI_VENDOR_BROADCOM,
    571 	  PCI_PRODUCT_BROADCOM_BCM5901A2,
    572 	  "Broadcom BCM5901A2 Fast Ethernet",
    573 	  },
    574 	{ PCI_VENDOR_BROADCOM,
    575 	  PCI_PRODUCT_BROADCOM_BCM5903M,
    576 	  "Broadcom BCM5903M Fast Ethernet",
    577 	  },
    578 	{ PCI_VENDOR_BROADCOM,
    579 	  PCI_PRODUCT_BROADCOM_BCM5906,
    580 	  "Broadcom BCM5906 Fast Ethernet",
    581 	  },
    582 	{ PCI_VENDOR_BROADCOM,
    583 	  PCI_PRODUCT_BROADCOM_BCM5906M,
    584 	  "Broadcom BCM5906M Fast Ethernet",
    585 	  },
    586 	{ PCI_VENDOR_BROADCOM,
    587 	  PCI_PRODUCT_BROADCOM_BCM57760,
    588 	  "Broadcom BCM57760 Gigabit Ethernet",
    589 	  },
    590 	{ PCI_VENDOR_BROADCOM,
    591 	  PCI_PRODUCT_BROADCOM_BCM57761,
    592 	  "Broadcom BCM57761 Gigabit Ethernet",
    593 	  },
    594 	{ PCI_VENDOR_BROADCOM,
    595 	  PCI_PRODUCT_BROADCOM_BCM57762,
    596 	  "Broadcom BCM57762 Gigabit Ethernet",
    597 	  },
    598 	{ PCI_VENDOR_BROADCOM,
    599 	  PCI_PRODUCT_BROADCOM_BCM57765,
    600 	  "Broadcom BCM57765 Gigabit Ethernet",
    601 	  },
    602 	{ PCI_VENDOR_BROADCOM,
    603 	  PCI_PRODUCT_BROADCOM_BCM57766,
    604 	  "Broadcom BCM57766 Gigabit Ethernet",
    605 	  },
    606 	{ PCI_VENDOR_BROADCOM,
    607 	  PCI_PRODUCT_BROADCOM_BCM57780,
    608 	  "Broadcom BCM57780 Gigabit Ethernet",
    609 	  },
    610 	{ PCI_VENDOR_BROADCOM,
    611 	  PCI_PRODUCT_BROADCOM_BCM57781,
    612 	  "Broadcom BCM57781 Gigabit Ethernet",
    613 	  },
    614 	{ PCI_VENDOR_BROADCOM,
    615 	  PCI_PRODUCT_BROADCOM_BCM57782,
    616 	  "Broadcom BCM57782 Gigabit Ethernet",
    617 	  },
    618 	{ PCI_VENDOR_BROADCOM,
    619 	  PCI_PRODUCT_BROADCOM_BCM57785,
    620 	  "Broadcom BCM57785 Gigabit Ethernet",
    621 	  },
    622 	{ PCI_VENDOR_BROADCOM,
    623 	  PCI_PRODUCT_BROADCOM_BCM57786,
    624 	  "Broadcom BCM57786 Gigabit Ethernet",
    625 	  },
    626 	{ PCI_VENDOR_BROADCOM,
    627 	  PCI_PRODUCT_BROADCOM_BCM57788,
    628 	  "Broadcom BCM57788 Gigabit Ethernet",
    629 	  },
    630 	{ PCI_VENDOR_BROADCOM,
    631 	  PCI_PRODUCT_BROADCOM_BCM57790,
    632 	  "Broadcom BCM57790 Gigabit Ethernet",
    633 	  },
    634 	{ PCI_VENDOR_BROADCOM,
    635 	  PCI_PRODUCT_BROADCOM_BCM57791,
    636 	  "Broadcom BCM57791 Gigabit Ethernet",
    637 	  },
    638 	{ PCI_VENDOR_BROADCOM,
    639 	  PCI_PRODUCT_BROADCOM_BCM57795,
    640 	  "Broadcom BCM57795 Gigabit Ethernet",
    641 	  },
    642 	{ PCI_VENDOR_SCHNEIDERKOCH,
    643 	  PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
    644 	  "SysKonnect SK-9Dx1 Gigabit Ethernet",
    645 	  },
    646 	{ PCI_VENDOR_3COM,
    647 	  PCI_PRODUCT_3COM_3C996,
    648 	  "3Com 3c996 Gigabit Ethernet",
    649 	  },
    650 	{ PCI_VENDOR_FUJITSU4,
    651 	  PCI_PRODUCT_FUJITSU4_PW008GE4,
    652 	  "Fujitsu PW008GE4 Gigabit Ethernet",
    653 	  },
    654 	{ PCI_VENDOR_FUJITSU4,
    655 	  PCI_PRODUCT_FUJITSU4_PW008GE5,
    656 	  "Fujitsu PW008GE5 Gigabit Ethernet",
    657 	  },
    658 	{ PCI_VENDOR_FUJITSU4,
    659 	  PCI_PRODUCT_FUJITSU4_PP250_450_LAN,
    660 	  "Fujitsu Primepower 250/450 Gigabit Ethernet",
    661 	  },
    662 	{ 0,
    663 	  0,
    664 	  NULL },
    665 };
    666 
    667 #define BGE_IS_JUMBO_CAPABLE(sc)	((sc)->bge_flags & BGEF_JUMBO_CAPABLE)
    668 #define BGE_IS_5700_FAMILY(sc)		((sc)->bge_flags & BGEF_5700_FAMILY)
    669 #define BGE_IS_5705_PLUS(sc)		((sc)->bge_flags & BGEF_5705_PLUS)
    670 #define BGE_IS_5714_FAMILY(sc)		((sc)->bge_flags & BGEF_5714_FAMILY)
    671 #define BGE_IS_575X_PLUS(sc)		((sc)->bge_flags & BGEF_575X_PLUS)
    672 #define BGE_IS_5755_PLUS(sc)		((sc)->bge_flags & BGEF_5755_PLUS)
    673 #define BGE_IS_57765_FAMILY(sc)		((sc)->bge_flags & BGEF_57765_FAMILY)
    674 #define BGE_IS_57765_PLUS(sc)		((sc)->bge_flags & BGEF_57765_PLUS)
    675 #define BGE_IS_5717_PLUS(sc)		((sc)->bge_flags & BGEF_5717_PLUS)
    676 
    677 static const struct bge_revision {
    678 	uint32_t		br_chipid;
    679 	const char		*br_name;
    680 } bge_revisions[] = {
    681 	{ BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
    682 	{ BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
    683 	{ BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
    684 	{ BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
    685 	{ BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
    686 	{ BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
    687 	{ BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
    688 	{ BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
    689 	{ BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
    690 	{ BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
    691 	{ BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
    692 	{ BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
    693 	{ BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" },
    694 	{ BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" },
    695 	{ BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" },
    696 	{ BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" },
    697 	{ BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" },
    698 	{ BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
    699 	{ BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
    700 	{ BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
    701 	{ BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
    702 	{ BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
    703 	{ BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
    704 	{ BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
    705 	{ BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
    706 	{ BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
    707 	{ BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
    708 	{ BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
    709 	{ BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
    710 	{ BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
    711 	{ BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
    712 	{ BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
    713 	{ BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
    714 	{ BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
    715 	{ BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
    716 	{ BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
    717 	{ BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
    718 	{ BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
    719 	{ BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
    720 	{ BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
    721 	{ BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
    722 	{ BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
    723 	{ BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
    724 	{ BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
    725 	{ BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
    726 	{ BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
    727 	{ BGE_CHIPID_BCM5720_A0, "BCM5720 A0" },
    728 	{ BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
    729 	{ BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
    730 	{ BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
    731 	{ BGE_CHIPID_BCM5755_C0, "BCM5755 C0" },
    732 	{ BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
    733 	{ BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
    734 	{ BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
    735 	{ BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
    736 	{ BGE_CHIPID_BCM5784_B0, "BCM5784 B0" },
    737 	/* 5754 and 5787 share the same ASIC ID */
    738 	{ BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
    739 	{ BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
    740 	{ BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
    741 	{ BGE_CHIPID_BCM5906_A0, "BCM5906 A0" },
    742 	{ BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
    743 	{ BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
    744 	{ BGE_CHIPID_BCM57765_A0, "BCM57765 A0" },
    745 	{ BGE_CHIPID_BCM57765_B0, "BCM57765 B0" },
    746 	{ BGE_CHIPID_BCM57766_A0, "BCM57766 A0" },
    747 	{ BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
    748 	{ BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
    749 
    750 	{ 0, NULL }
    751 };
    752 
    753 /*
    754  * Some defaults for major revisions, so that newer steppings
    755  * that we don't know about have a shot at working.
    756  */
    757 static const struct bge_revision bge_majorrevs[] = {
    758 	{ BGE_ASICREV_BCM5700, "unknown BCM5700" },
    759 	{ BGE_ASICREV_BCM5701, "unknown BCM5701" },
    760 	{ BGE_ASICREV_BCM5703, "unknown BCM5703" },
    761 	{ BGE_ASICREV_BCM5704, "unknown BCM5704" },
    762 	{ BGE_ASICREV_BCM5705, "unknown BCM5705" },
    763 	{ BGE_ASICREV_BCM5750, "unknown BCM5750" },
    764 	{ BGE_ASICREV_BCM5714, "unknown BCM5714" },
    765 	{ BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
    766 	{ BGE_ASICREV_BCM5752, "unknown BCM5752" },
    767 	{ BGE_ASICREV_BCM5780, "unknown BCM5780" },
    768 	{ BGE_ASICREV_BCM5755, "unknown BCM5755" },
    769 	{ BGE_ASICREV_BCM5761, "unknown BCM5761" },
    770 	{ BGE_ASICREV_BCM5784, "unknown BCM5784" },
    771 	{ BGE_ASICREV_BCM5785, "unknown BCM5785" },
    772 	/* 5754 and 5787 share the same ASIC ID */
    773 	{ BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
    774 	{ BGE_ASICREV_BCM5906, "unknown BCM5906" },
    775 	{ BGE_ASICREV_BCM57765, "unknown BCM57765" },
    776 	{ BGE_ASICREV_BCM57766, "unknown BCM57766" },
    777 	{ BGE_ASICREV_BCM57780, "unknown BCM57780" },
    778 	{ BGE_ASICREV_BCM5717, "unknown BCM5717" },
    779 	{ BGE_ASICREV_BCM5719, "unknown BCM5719" },
    780 	{ BGE_ASICREV_BCM5720, "unknown BCM5720" },
    781 
    782 	{ 0, NULL }
    783 };
    784 
    785 static int bge_allow_asf = 1;
    786 
    787 CFATTACH_DECL3_NEW(bge, sizeof(struct bge_softc),
    788     bge_probe, bge_attach, bge_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    789 
    790 static uint32_t
    791 bge_readmem_ind(struct bge_softc *sc, int off)
    792 {
    793 	pcireg_t val;
    794 
    795 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
    796 	    off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
    797 		return 0;
    798 
    799 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off);
    800 	val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA);
    801 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0);
    802 	return val;
    803 }
    804 
    805 static void
    806 bge_writemem_ind(struct bge_softc *sc, int off, int val)
    807 {
    808 
    809 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off);
    810 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA, val);
    811 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0);
    812 }
    813 
    814 /*
    815  * PCI Express only
    816  */
    817 static void
    818 bge_set_max_readrq(struct bge_softc *sc)
    819 {
    820 	pcireg_t val;
    821 
    822 	val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap
    823 	    + PCIE_DCSR);
    824 	val &= ~PCIE_DCSR_MAX_READ_REQ;
    825 	switch (sc->bge_expmrq) {
    826 	case 2048:
    827 		val |= BGE_PCIE_DEVCTL_MAX_READRQ_2048;
    828 		break;
    829 	case 4096:
    830 		val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096;
    831 		break;
    832 	default:
    833 		panic("incorrect expmrq value(%d)", sc->bge_expmrq);
    834 		break;
    835 	}
    836 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap
    837 	    + PCIE_DCSR, val);
    838 }
    839 
    840 #ifdef notdef
    841 static uint32_t
    842 bge_readreg_ind(struct bge_softc *sc, int off)
    843 {
    844 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off);
    845 	return (pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA));
    846 }
    847 #endif
    848 
    849 static void
    850 bge_writereg_ind(struct bge_softc *sc, int off, int val)
    851 {
    852 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off);
    853 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA, val);
    854 }
    855 
    856 static void
    857 bge_writemem_direct(struct bge_softc *sc, int off, int val)
    858 {
    859 	CSR_WRITE_4(sc, off, val);
    860 }
    861 
    862 static void
    863 bge_writembx(struct bge_softc *sc, int off, int val)
    864 {
    865 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
    866 		off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
    867 
    868 	CSR_WRITE_4(sc, off, val);
    869 }
    870 
    871 static void
    872 bge_writembx_flush(struct bge_softc *sc, int off, int val)
    873 {
    874 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
    875 		off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
    876 
    877 	CSR_WRITE_4_FLUSH(sc, off, val);
    878 }
    879 
    880 /*
    881  * Clear all stale locks and select the lock for this driver instance.
    882  */
    883 void
    884 bge_ape_lock_init(struct bge_softc *sc)
    885 {
    886 	struct pci_attach_args *pa = &(sc->bge_pa);
    887 	uint32_t bit, regbase;
    888 	int i;
    889 
    890 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
    891 		regbase = BGE_APE_LOCK_GRANT;
    892 	else
    893 		regbase = BGE_APE_PER_LOCK_GRANT;
    894 
    895 	/* Clear any stale locks. */
    896 	for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) {
    897 		switch (i) {
    898 		case BGE_APE_LOCK_PHY0:
    899 		case BGE_APE_LOCK_PHY1:
    900 		case BGE_APE_LOCK_PHY2:
    901 		case BGE_APE_LOCK_PHY3:
    902 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
    903 			break;
    904 		default:
    905 			if (pa->pa_function == 0)
    906 				bit = BGE_APE_LOCK_GRANT_DRIVER0;
    907 			else
    908 				bit = (1 << pa->pa_function);
    909 		}
    910 		APE_WRITE_4(sc, regbase + 4 * i, bit);
    911 	}
    912 
    913 	/* Select the PHY lock based on the device's function number. */
    914 	switch (pa->pa_function) {
    915 	case 0:
    916 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0;
    917 		break;
    918 	case 1:
    919 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1;
    920 		break;
    921 	case 2:
    922 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2;
    923 		break;
    924 	case 3:
    925 		sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3;
    926 		break;
    927 	default:
    928 		printf("%s: PHY lock not supported on function\n",
    929 		    device_xname(sc->bge_dev));
    930 		break;
    931 	}
    932 }
    933 
    934 /*
    935  * Check for APE firmware, set flags, and print version info.
    936  */
    937 void
    938 bge_ape_read_fw_ver(struct bge_softc *sc)
    939 {
    940 	const char *fwtype;
    941 	uint32_t apedata, features;
    942 
    943 	/* Check for a valid APE signature in shared memory. */
    944 	apedata = APE_READ_4(sc, BGE_APE_SEG_SIG);
    945 	if (apedata != BGE_APE_SEG_SIG_MAGIC) {
    946 		sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE;
    947 		return;
    948 	}
    949 
    950 	/* Check if APE firmware is running. */
    951 	apedata = APE_READ_4(sc, BGE_APE_FW_STATUS);
    952 	if ((apedata & BGE_APE_FW_STATUS_READY) == 0) {
    953 		printf("%s: APE signature found but FW status not ready! "
    954 		    "0x%08x\n", device_xname(sc->bge_dev), apedata);
    955 		return;
    956 	}
    957 
    958 	sc->bge_mfw_flags |= BGE_MFW_ON_APE;
    959 
    960 	/* Fetch the APE firwmare type and version. */
    961 	apedata = APE_READ_4(sc, BGE_APE_FW_VERSION);
    962 	features = APE_READ_4(sc, BGE_APE_FW_FEATURES);
    963 	if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) {
    964 		sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI;
    965 		fwtype = "NCSI";
    966 	} else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) {
    967 		sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH;
    968 		fwtype = "DASH";
    969 	} else
    970 		fwtype = "UNKN";
    971 
    972 	/* Print the APE firmware version. */
    973 	aprint_normal_dev(sc->bge_dev, "APE firmware %s %d.%d.%d.%d\n", fwtype,
    974 	    (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT,
    975 	    (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT,
    976 	    (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT,
    977 	    (apedata & BGE_APE_FW_VERSION_BLDMSK));
    978 }
    979 
    980 int
    981 bge_ape_lock(struct bge_softc *sc, int locknum)
    982 {
    983 	struct pci_attach_args *pa = &(sc->bge_pa);
    984 	uint32_t bit, gnt, req, status;
    985 	int i, off;
    986 
    987 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
    988 		return (0);
    989 
    990 	/* Lock request/grant registers have different bases. */
    991 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) {
    992 		req = BGE_APE_LOCK_REQ;
    993 		gnt = BGE_APE_LOCK_GRANT;
    994 	} else {
    995 		req = BGE_APE_PER_LOCK_REQ;
    996 		gnt = BGE_APE_PER_LOCK_GRANT;
    997 	}
    998 
    999 	off = 4 * locknum;
   1000 
   1001 	switch (locknum) {
   1002 	case BGE_APE_LOCK_GPIO:
   1003 		/* Lock required when using GPIO. */
   1004 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
   1005 			return (0);
   1006 		if (pa->pa_function == 0)
   1007 			bit = BGE_APE_LOCK_REQ_DRIVER0;
   1008 		else
   1009 			bit = (1 << pa->pa_function);
   1010 		break;
   1011 	case BGE_APE_LOCK_GRC:
   1012 		/* Lock required to reset the device. */
   1013 		if (pa->pa_function == 0)
   1014 			bit = BGE_APE_LOCK_REQ_DRIVER0;
   1015 		else
   1016 			bit = (1 << pa->pa_function);
   1017 		break;
   1018 	case BGE_APE_LOCK_MEM:
   1019 		/* Lock required when accessing certain APE memory. */
   1020 		if (pa->pa_function == 0)
   1021 			bit = BGE_APE_LOCK_REQ_DRIVER0;
   1022 		else
   1023 			bit = (1 << pa->pa_function);
   1024 		break;
   1025 	case BGE_APE_LOCK_PHY0:
   1026 	case BGE_APE_LOCK_PHY1:
   1027 	case BGE_APE_LOCK_PHY2:
   1028 	case BGE_APE_LOCK_PHY3:
   1029 		/* Lock required when accessing PHYs. */
   1030 		bit = BGE_APE_LOCK_REQ_DRIVER0;
   1031 		break;
   1032 	default:
   1033 		return (EINVAL);
   1034 	}
   1035 
   1036 	/* Request a lock. */
   1037 	APE_WRITE_4_FLUSH(sc, req + off, bit);
   1038 
   1039 	/* Wait up to 1 second to acquire lock. */
   1040 	for (i = 0; i < 20000; i++) {
   1041 		status = APE_READ_4(sc, gnt + off);
   1042 		if (status == bit)
   1043 			break;
   1044 		DELAY(50);
   1045 	}
   1046 
   1047 	/* Handle any errors. */
   1048 	if (status != bit) {
   1049 		printf("%s: APE lock %d request failed! "
   1050 		    "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n",
   1051 		    device_xname(sc->bge_dev),
   1052 		    locknum, req + off, bit & 0xFFFF, gnt + off,
   1053 		    status & 0xFFFF);
   1054 		/* Revoke the lock request. */
   1055 		APE_WRITE_4(sc, gnt + off, bit);
   1056 		return (EBUSY);
   1057 	}
   1058 
   1059 	return (0);
   1060 }
   1061 
   1062 void
   1063 bge_ape_unlock(struct bge_softc *sc, int locknum)
   1064 {
   1065 	struct pci_attach_args *pa = &(sc->bge_pa);
   1066 	uint32_t bit, gnt;
   1067 	int off;
   1068 
   1069 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
   1070 		return;
   1071 
   1072 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
   1073 		gnt = BGE_APE_LOCK_GRANT;
   1074 	else
   1075 		gnt = BGE_APE_PER_LOCK_GRANT;
   1076 
   1077 	off = 4 * locknum;
   1078 
   1079 	switch (locknum) {
   1080 	case BGE_APE_LOCK_GPIO:
   1081 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
   1082 			return;
   1083 		if (pa->pa_function == 0)
   1084 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
   1085 		else
   1086 			bit = (1 << pa->pa_function);
   1087 		break;
   1088 	case BGE_APE_LOCK_GRC:
   1089 		if (pa->pa_function == 0)
   1090 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
   1091 		else
   1092 			bit = (1 << pa->pa_function);
   1093 		break;
   1094 	case BGE_APE_LOCK_MEM:
   1095 		if (pa->pa_function == 0)
   1096 			bit = BGE_APE_LOCK_GRANT_DRIVER0;
   1097 		else
   1098 			bit = (1 << pa->pa_function);
   1099 		break;
   1100 	case BGE_APE_LOCK_PHY0:
   1101 	case BGE_APE_LOCK_PHY1:
   1102 	case BGE_APE_LOCK_PHY2:
   1103 	case BGE_APE_LOCK_PHY3:
   1104 		bit = BGE_APE_LOCK_GRANT_DRIVER0;
   1105 		break;
   1106 	default:
   1107 		return;
   1108 	}
   1109 
   1110 	/* Write and flush for consecutive bge_ape_lock() */
   1111 	APE_WRITE_4_FLUSH(sc, gnt + off, bit);
   1112 }
   1113 
   1114 /*
   1115  * Send an event to the APE firmware.
   1116  */
   1117 void
   1118 bge_ape_send_event(struct bge_softc *sc, uint32_t event)
   1119 {
   1120 	uint32_t apedata;
   1121 	int i;
   1122 
   1123 	/* NCSI does not support APE events. */
   1124 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
   1125 		return;
   1126 
   1127 	/* Wait up to 1ms for APE to service previous event. */
   1128 	for (i = 10; i > 0; i--) {
   1129 		if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0)
   1130 			break;
   1131 		apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS);
   1132 		if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) {
   1133 			APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event |
   1134 			    BGE_APE_EVENT_STATUS_EVENT_PENDING);
   1135 			bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
   1136 			APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1);
   1137 			break;
   1138 		}
   1139 		bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
   1140 		DELAY(100);
   1141 	}
   1142 	if (i == 0) {
   1143 		printf("%s: APE event 0x%08x send timed out\n",
   1144 		    device_xname(sc->bge_dev), event);
   1145 	}
   1146 }
   1147 
   1148 void
   1149 bge_ape_driver_state_change(struct bge_softc *sc, int kind)
   1150 {
   1151 	uint32_t apedata, event;
   1152 
   1153 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
   1154 		return;
   1155 
   1156 	switch (kind) {
   1157 	case BGE_RESET_START:
   1158 		/* If this is the first load, clear the load counter. */
   1159 		apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG);
   1160 		if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC)
   1161 			APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0);
   1162 		else {
   1163 			apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT);
   1164 			APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata);
   1165 		}
   1166 		APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG,
   1167 		    BGE_APE_HOST_SEG_SIG_MAGIC);
   1168 		APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN,
   1169 		    BGE_APE_HOST_SEG_LEN_MAGIC);
   1170 
   1171 		/* Add some version info if bge(4) supports it. */
   1172 		APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID,
   1173 		    BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0));
   1174 		APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR,
   1175 		    BGE_APE_HOST_BEHAV_NO_PHYLOCK);
   1176 		APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS,
   1177 		    BGE_APE_HOST_HEARTBEAT_INT_DISABLE);
   1178 		APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
   1179 		    BGE_APE_HOST_DRVR_STATE_START);
   1180 		event = BGE_APE_EVENT_STATUS_STATE_START;
   1181 		break;
   1182 	case BGE_RESET_SHUTDOWN:
   1183 		APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
   1184 		    BGE_APE_HOST_DRVR_STATE_UNLOAD);
   1185 		event = BGE_APE_EVENT_STATUS_STATE_UNLOAD;
   1186 		break;
   1187 	case BGE_RESET_SUSPEND:
   1188 		event = BGE_APE_EVENT_STATUS_STATE_SUSPEND;
   1189 		break;
   1190 	default:
   1191 		return;
   1192 	}
   1193 
   1194 	bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT |
   1195 	    BGE_APE_EVENT_STATUS_STATE_CHNGE);
   1196 }
   1197 
   1198 static uint8_t
   1199 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
   1200 {
   1201 	uint32_t access, byte = 0;
   1202 	int i;
   1203 
   1204 	/* Lock. */
   1205 	CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
   1206 	for (i = 0; i < 8000; i++) {
   1207 		if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
   1208 			break;
   1209 		DELAY(20);
   1210 	}
   1211 	if (i == 8000)
   1212 		return 1;
   1213 
   1214 	/* Enable access. */
   1215 	access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
   1216 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
   1217 
   1218 	CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
   1219 	CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
   1220 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
   1221 		DELAY(10);
   1222 		if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
   1223 			DELAY(10);
   1224 			break;
   1225 		}
   1226 	}
   1227 
   1228 	if (i == BGE_TIMEOUT * 10) {
   1229 		aprint_error_dev(sc->bge_dev, "nvram read timed out\n");
   1230 		return 1;
   1231 	}
   1232 
   1233 	/* Get result. */
   1234 	byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
   1235 
   1236 	*dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
   1237 
   1238 	/* Disable access. */
   1239 	CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
   1240 
   1241 	/* Unlock. */
   1242 	CSR_WRITE_4_FLUSH(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
   1243 
   1244 	return 0;
   1245 }
   1246 
   1247 /*
   1248  * Read a sequence of bytes from NVRAM.
   1249  */
   1250 static int
   1251 bge_read_nvram(struct bge_softc *sc, uint8_t *dest, int off, int cnt)
   1252 {
   1253 	int error = 0, i;
   1254 	uint8_t byte = 0;
   1255 
   1256 	if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
   1257 		return 1;
   1258 
   1259 	for (i = 0; i < cnt; i++) {
   1260 		error = bge_nvram_getbyte(sc, off + i, &byte);
   1261 		if (error)
   1262 			break;
   1263 		*(dest + i) = byte;
   1264 	}
   1265 
   1266 	return (error ? 1 : 0);
   1267 }
   1268 
   1269 /*
   1270  * Read a byte of data stored in the EEPROM at address 'addr.' The
   1271  * BCM570x supports both the traditional bitbang interface and an
   1272  * auto access interface for reading the EEPROM. We use the auto
   1273  * access method.
   1274  */
   1275 static uint8_t
   1276 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
   1277 {
   1278 	int i;
   1279 	uint32_t byte = 0;
   1280 
   1281 	/*
   1282 	 * Enable use of auto EEPROM access so we can avoid
   1283 	 * having to use the bitbang method.
   1284 	 */
   1285 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
   1286 
   1287 	/* Reset the EEPROM, load the clock period. */
   1288 	CSR_WRITE_4(sc, BGE_EE_ADDR,
   1289 	    BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
   1290 	DELAY(20);
   1291 
   1292 	/* Issue the read EEPROM command. */
   1293 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
   1294 
   1295 	/* Wait for completion */
   1296 	for (i = 0; i < BGE_TIMEOUT * 10; i++) {
   1297 		DELAY(10);
   1298 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
   1299 			break;
   1300 	}
   1301 
   1302 	if (i == BGE_TIMEOUT * 10) {
   1303 		aprint_error_dev(sc->bge_dev, "eeprom read timed out\n");
   1304 		return 1;
   1305 	}
   1306 
   1307 	/* Get result. */
   1308 	byte = CSR_READ_4(sc, BGE_EE_DATA);
   1309 
   1310 	*dest = (byte >> ((addr % 4) * 8)) & 0xFF;
   1311 
   1312 	return 0;
   1313 }
   1314 
   1315 /*
   1316  * Read a sequence of bytes from the EEPROM.
   1317  */
   1318 static int
   1319 bge_read_eeprom(struct bge_softc *sc, void *destv, int off, int cnt)
   1320 {
   1321 	int error = 0, i;
   1322 	uint8_t byte = 0;
   1323 	char *dest = destv;
   1324 
   1325 	for (i = 0; i < cnt; i++) {
   1326 		error = bge_eeprom_getbyte(sc, off + i, &byte);
   1327 		if (error)
   1328 			break;
   1329 		*(dest + i) = byte;
   1330 	}
   1331 
   1332 	return (error ? 1 : 0);
   1333 }
   1334 
   1335 static int
   1336 bge_miibus_readreg(device_t dev, int phy, int reg)
   1337 {
   1338 	struct bge_softc *sc = device_private(dev);
   1339 	uint32_t val;
   1340 	uint32_t autopoll;
   1341 	int i;
   1342 
   1343 	if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
   1344 		return 0;
   1345 
   1346 	/* Reading with autopolling on may trigger PCI errors */
   1347 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
   1348 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
   1349 		BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
   1350 		BGE_CLRBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
   1351 		DELAY(80);
   1352 	}
   1353 
   1354 	CSR_WRITE_4_FLUSH(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
   1355 	    BGE_MIPHY(phy) | BGE_MIREG(reg));
   1356 
   1357 	for (i = 0; i < BGE_TIMEOUT; i++) {
   1358 		delay(10);
   1359 		val = CSR_READ_4(sc, BGE_MI_COMM);
   1360 		if (!(val & BGE_MICOMM_BUSY)) {
   1361 			DELAY(5);
   1362 			val = CSR_READ_4(sc, BGE_MI_COMM);
   1363 			break;
   1364 		}
   1365 	}
   1366 
   1367 	if (i == BGE_TIMEOUT) {
   1368 		aprint_error_dev(sc->bge_dev, "PHY read timed out\n");
   1369 		val = 0;
   1370 		goto done;
   1371 	}
   1372 
   1373 done:
   1374 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
   1375 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
   1376 		BGE_SETBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
   1377 		DELAY(80);
   1378 	}
   1379 
   1380 	bge_ape_unlock(sc, sc->bge_phy_ape_lock);
   1381 
   1382 	if (val & BGE_MICOMM_READFAIL)
   1383 		return 0;
   1384 
   1385 	return (val & 0xFFFF);
   1386 }
   1387 
   1388 static void
   1389 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
   1390 {
   1391 	struct bge_softc *sc = device_private(dev);
   1392 	uint32_t autopoll;
   1393 	int i;
   1394 
   1395 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
   1396 	    (reg == MII_GTCR || reg == BRGPHY_MII_AUXCTL))
   1397 		return;
   1398 
   1399 	if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
   1400 		return;
   1401 
   1402 	/* Reading with autopolling on may trigger PCI errors */
   1403 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
   1404 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
   1405 		BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL);
   1406 		BGE_CLRBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
   1407 		DELAY(80);
   1408 	}
   1409 
   1410 	CSR_WRITE_4_FLUSH(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
   1411 	    BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
   1412 
   1413 	for (i = 0; i < BGE_TIMEOUT; i++) {
   1414 		delay(10);
   1415 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
   1416 			delay(5);
   1417 			CSR_READ_4(sc, BGE_MI_COMM);
   1418 			break;
   1419 		}
   1420 	}
   1421 
   1422 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
   1423 		BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
   1424 		BGE_SETBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
   1425 		delay(80);
   1426 	}
   1427 
   1428 	bge_ape_unlock(sc, sc->bge_phy_ape_lock);
   1429 
   1430 	if (i == BGE_TIMEOUT)
   1431 		aprint_error_dev(sc->bge_dev, "PHY read timed out\n");
   1432 }
   1433 
   1434 static void
   1435 bge_miibus_statchg(struct ifnet *ifp)
   1436 {
   1437 	struct bge_softc *sc = ifp->if_softc;
   1438 	struct mii_data *mii = &sc->bge_mii;
   1439 	uint32_t mac_mode, rx_mode, tx_mode;
   1440 
   1441 	/*
   1442 	 * Get flow control negotiation result.
   1443 	 */
   1444 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   1445 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags)
   1446 		sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   1447 
   1448 	if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
   1449 	    mii->mii_media_status & IFM_ACTIVE &&
   1450 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
   1451 		BGE_STS_SETBIT(sc, BGE_STS_LINK);
   1452 	else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
   1453 	    (!(mii->mii_media_status & IFM_ACTIVE) ||
   1454 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
   1455 		BGE_STS_CLRBIT(sc, BGE_STS_LINK);
   1456 
   1457 	if (!BGE_STS_BIT(sc, BGE_STS_LINK))
   1458 		return;
   1459 
   1460 	/* Set the port mode (MII/GMII) to match the link speed. */
   1461 	mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) &
   1462 	    ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX);
   1463 	tx_mode = CSR_READ_4(sc, BGE_TX_MODE);
   1464 	rx_mode = CSR_READ_4(sc, BGE_RX_MODE);
   1465 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
   1466 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
   1467 		mac_mode |= BGE_PORTMODE_GMII;
   1468 	else
   1469 		mac_mode |= BGE_PORTMODE_MII;
   1470 
   1471 	tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE;
   1472 	rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE;
   1473 	if ((mii->mii_media_active & IFM_FDX) != 0) {
   1474 		if (sc->bge_flowflags & IFM_ETH_TXPAUSE)
   1475 			tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE;
   1476 		if (sc->bge_flowflags & IFM_ETH_RXPAUSE)
   1477 			rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE;
   1478 	} else
   1479 		mac_mode |= BGE_MACMODE_HALF_DUPLEX;
   1480 
   1481 	CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, mac_mode);
   1482 	DELAY(40);
   1483 	CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode);
   1484 	CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode);
   1485 }
   1486 
   1487 /*
   1488  * Update rx threshold levels to values in a particular slot
   1489  * of the interrupt-mitigation table bge_rx_threshes.
   1490  */
   1491 static void
   1492 bge_set_thresh(struct ifnet *ifp, int lvl)
   1493 {
   1494 	struct bge_softc *sc = ifp->if_softc;
   1495 	int s;
   1496 
   1497 	/* For now, just save the new Rx-intr thresholds and record
   1498 	 * that a threshold update is pending.  Updating the hardware
   1499 	 * registers here (even at splhigh()) is observed to
   1500 	 * occasionaly cause glitches where Rx-interrupts are not
   1501 	 * honoured for up to 10 seconds. jonathan (at) NetBSD.org, 2003-04-05
   1502 	 */
   1503 	s = splnet();
   1504 	sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks;
   1505 	sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds;
   1506 	sc->bge_pending_rxintr_change = 1;
   1507 	splx(s);
   1508 }
   1509 
   1510 
   1511 /*
   1512  * Update Rx thresholds of all bge devices
   1513  */
   1514 static void
   1515 bge_update_all_threshes(int lvl)
   1516 {
   1517 	struct ifnet *ifp;
   1518 	const char * const namebuf = "bge";
   1519 	int namelen;
   1520 	int s;
   1521 
   1522 	if (lvl < 0)
   1523 		lvl = 0;
   1524 	else if (lvl >= NBGE_RX_THRESH)
   1525 		lvl = NBGE_RX_THRESH - 1;
   1526 
   1527 	namelen = strlen(namebuf);
   1528 	/*
   1529 	 * Now search all the interfaces for this name/number
   1530 	 */
   1531 	s = pserialize_read_enter();
   1532 	IFNET_READER_FOREACH(ifp) {
   1533 		if (strncmp(ifp->if_xname, namebuf, namelen) != 0)
   1534 		      continue;
   1535 		/* We got a match: update if doing auto-threshold-tuning */
   1536 		if (bge_auto_thresh)
   1537 			bge_set_thresh(ifp, lvl);
   1538 	}
   1539 	pserialize_read_exit(s);
   1540 }
   1541 
   1542 /*
   1543  * Handle events that have triggered interrupts.
   1544  */
   1545 static void
   1546 bge_handle_events(struct bge_softc *sc)
   1547 {
   1548 
   1549 	return;
   1550 }
   1551 
   1552 /*
   1553  * Memory management for jumbo frames.
   1554  */
   1555 
   1556 static int
   1557 bge_alloc_jumbo_mem(struct bge_softc *sc)
   1558 {
   1559 	char *ptr, *kva;
   1560 	bus_dma_segment_t	seg;
   1561 	int		i, rseg, state, error;
   1562 	struct bge_jpool_entry   *entry;
   1563 
   1564 	state = error = 0;
   1565 
   1566 	/* Grab a big chunk o' storage. */
   1567 	if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0,
   1568 	     &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
   1569 		aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n");
   1570 		return ENOBUFS;
   1571 	}
   1572 
   1573 	state = 1;
   1574 	if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, (void **)&kva,
   1575 	    BUS_DMA_NOWAIT)) {
   1576 		aprint_error_dev(sc->bge_dev,
   1577 		    "can't map DMA buffers (%d bytes)\n", (int)BGE_JMEM);
   1578 		error = ENOBUFS;
   1579 		goto out;
   1580 	}
   1581 
   1582 	state = 2;
   1583 	if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0,
   1584 	    BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) {
   1585 		aprint_error_dev(sc->bge_dev, "can't create DMA map\n");
   1586 		error = ENOBUFS;
   1587 		goto out;
   1588 	}
   1589 
   1590 	state = 3;
   1591 	if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
   1592 	    kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) {
   1593 		aprint_error_dev(sc->bge_dev, "can't load DMA map\n");
   1594 		error = ENOBUFS;
   1595 		goto out;
   1596 	}
   1597 
   1598 	state = 4;
   1599 	sc->bge_cdata.bge_jumbo_buf = (void *)kva;
   1600 	DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf));
   1601 
   1602 	SLIST_INIT(&sc->bge_jfree_listhead);
   1603 	SLIST_INIT(&sc->bge_jinuse_listhead);
   1604 
   1605 	/*
   1606 	 * Now divide it up into 9K pieces and save the addresses
   1607 	 * in an array.
   1608 	 */
   1609 	ptr = sc->bge_cdata.bge_jumbo_buf;
   1610 	for (i = 0; i < BGE_JSLOTS; i++) {
   1611 		sc->bge_cdata.bge_jslots[i] = ptr;
   1612 		ptr += BGE_JLEN;
   1613 		entry = malloc(sizeof(struct bge_jpool_entry),
   1614 		    M_DEVBUF, M_NOWAIT);
   1615 		if (entry == NULL) {
   1616 			aprint_error_dev(sc->bge_dev,
   1617 			    "no memory for jumbo buffer queue!\n");
   1618 			error = ENOBUFS;
   1619 			goto out;
   1620 		}
   1621 		entry->slot = i;
   1622 		SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
   1623 				 entry, jpool_entries);
   1624 	}
   1625 out:
   1626 	if (error != 0) {
   1627 		switch (state) {
   1628 		case 4:
   1629 			bus_dmamap_unload(sc->bge_dmatag,
   1630 			    sc->bge_cdata.bge_rx_jumbo_map);
   1631 		case 3:
   1632 			bus_dmamap_destroy(sc->bge_dmatag,
   1633 			    sc->bge_cdata.bge_rx_jumbo_map);
   1634 		case 2:
   1635 			bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
   1636 		case 1:
   1637 			bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
   1638 			break;
   1639 		default:
   1640 			break;
   1641 		}
   1642 	}
   1643 
   1644 	return error;
   1645 }
   1646 
   1647 /*
   1648  * Allocate a jumbo buffer.
   1649  */
   1650 static void *
   1651 bge_jalloc(struct bge_softc *sc)
   1652 {
   1653 	struct bge_jpool_entry   *entry;
   1654 
   1655 	entry = SLIST_FIRST(&sc->bge_jfree_listhead);
   1656 
   1657 	if (entry == NULL) {
   1658 		aprint_error_dev(sc->bge_dev, "no free jumbo buffers\n");
   1659 		return NULL;
   1660 	}
   1661 
   1662 	SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
   1663 	SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
   1664 	return (sc->bge_cdata.bge_jslots[entry->slot]);
   1665 }
   1666 
   1667 /*
   1668  * Release a jumbo buffer.
   1669  */
   1670 static void
   1671 bge_jfree(struct mbuf *m, void *buf, size_t size, void *arg)
   1672 {
   1673 	struct bge_jpool_entry *entry;
   1674 	struct bge_softc *sc;
   1675 	int i, s;
   1676 
   1677 	/* Extract the softc struct pointer. */
   1678 	sc = (struct bge_softc *)arg;
   1679 
   1680 	if (sc == NULL)
   1681 		panic("bge_jfree: can't find softc pointer!");
   1682 
   1683 	/* calculate the slot this buffer belongs to */
   1684 
   1685 	i = ((char *)buf
   1686 	     - (char *)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
   1687 
   1688 	if ((i < 0) || (i >= BGE_JSLOTS))
   1689 		panic("bge_jfree: asked to free buffer that we don't manage!");
   1690 
   1691 	s = splvm();
   1692 	entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
   1693 	if (entry == NULL)
   1694 		panic("bge_jfree: buffer not in use!");
   1695 	entry->slot = i;
   1696 	SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
   1697 	SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
   1698 
   1699 	if (__predict_true(m != NULL))
   1700   		pool_cache_put(mb_cache, m);
   1701 	splx(s);
   1702 }
   1703 
   1704 
   1705 /*
   1706  * Initialize a standard receive ring descriptor.
   1707  */
   1708 static int
   1709 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m,
   1710     bus_dmamap_t dmamap)
   1711 {
   1712 	struct mbuf		*m_new = NULL;
   1713 	struct bge_rx_bd	*r;
   1714 	int			error;
   1715 
   1716 	if (dmamap == NULL)
   1717 		dmamap = sc->bge_cdata.bge_rx_std_map[i];
   1718 
   1719 	if (dmamap == NULL) {
   1720 		error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1,
   1721 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap);
   1722 		if (error != 0)
   1723 			return error;
   1724 	}
   1725 
   1726 	sc->bge_cdata.bge_rx_std_map[i] = dmamap;
   1727 
   1728 	if (m == NULL) {
   1729 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
   1730 		if (m_new == NULL)
   1731 			return ENOBUFS;
   1732 
   1733 		MCLGET(m_new, M_DONTWAIT);
   1734 		if (!(m_new->m_flags & M_EXT)) {
   1735 			m_freem(m_new);
   1736 			return ENOBUFS;
   1737 		}
   1738 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
   1739 
   1740 	} else {
   1741 		m_new = m;
   1742 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
   1743 		m_new->m_data = m_new->m_ext.ext_buf;
   1744 	}
   1745 	if (!(sc->bge_flags & BGEF_RX_ALIGNBUG))
   1746 	    m_adj(m_new, ETHER_ALIGN);
   1747 	if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new,
   1748 	    BUS_DMA_READ|BUS_DMA_NOWAIT)) {
   1749 		m_freem(m_new);
   1750 		return ENOBUFS;
   1751 	}
   1752 	bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
   1753 	    BUS_DMASYNC_PREREAD);
   1754 
   1755 	sc->bge_cdata.bge_rx_std_chain[i] = m_new;
   1756 	r = &sc->bge_rdata->bge_rx_std_ring[i];
   1757 	BGE_HOSTADDR(r->bge_addr, dmamap->dm_segs[0].ds_addr);
   1758 	r->bge_flags = BGE_RXBDFLAG_END;
   1759 	r->bge_len = m_new->m_len;
   1760 	r->bge_idx = i;
   1761 
   1762 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   1763 	    offsetof(struct bge_ring_data, bge_rx_std_ring) +
   1764 		i * sizeof (struct bge_rx_bd),
   1765 	    sizeof (struct bge_rx_bd),
   1766 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
   1767 
   1768 	return 0;
   1769 }
   1770 
   1771 /*
   1772  * Initialize a jumbo receive ring descriptor. This allocates
   1773  * a jumbo buffer from the pool managed internally by the driver.
   1774  */
   1775 static int
   1776 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
   1777 {
   1778 	struct mbuf *m_new = NULL;
   1779 	struct bge_rx_bd *r;
   1780 	void *buf = NULL;
   1781 
   1782 	if (m == NULL) {
   1783 
   1784 		/* Allocate the mbuf. */
   1785 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
   1786 		if (m_new == NULL)
   1787 			return ENOBUFS;
   1788 
   1789 		/* Allocate the jumbo buffer */
   1790 		buf = bge_jalloc(sc);
   1791 		if (buf == NULL) {
   1792 			m_freem(m_new);
   1793 			aprint_error_dev(sc->bge_dev,
   1794 			    "jumbo allocation failed -- packet dropped!\n");
   1795 			return ENOBUFS;
   1796 		}
   1797 
   1798 		/* Attach the buffer to the mbuf. */
   1799 		m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
   1800 		MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF,
   1801 		    bge_jfree, sc);
   1802 		m_new->m_flags |= M_EXT_RW;
   1803 	} else {
   1804 		m_new = m;
   1805 		buf = m_new->m_data = m_new->m_ext.ext_buf;
   1806 		m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
   1807 	}
   1808 	if (!(sc->bge_flags & BGEF_RX_ALIGNBUG))
   1809 	    m_adj(m_new, ETHER_ALIGN);
   1810 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
   1811 	    mtod(m_new, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, BGE_JLEN,
   1812 	    BUS_DMASYNC_PREREAD);
   1813 	/* Set up the descriptor. */
   1814 	r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
   1815 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
   1816 	BGE_HOSTADDR(r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new));
   1817 	r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
   1818 	r->bge_len = m_new->m_len;
   1819 	r->bge_idx = i;
   1820 
   1821 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   1822 	    offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
   1823 		i * sizeof (struct bge_rx_bd),
   1824 	    sizeof (struct bge_rx_bd),
   1825 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
   1826 
   1827 	return 0;
   1828 }
   1829 
   1830 /*
   1831  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
   1832  * that's 1MB or memory, which is a lot. For now, we fill only the first
   1833  * 256 ring entries and hope that our CPU is fast enough to keep up with
   1834  * the NIC.
   1835  */
   1836 static int
   1837 bge_init_rx_ring_std(struct bge_softc *sc)
   1838 {
   1839 	int i;
   1840 
   1841 	if (sc->bge_flags & BGEF_RXRING_VALID)
   1842 		return 0;
   1843 
   1844 	for (i = 0; i < BGE_SSLOTS; i++) {
   1845 		if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
   1846 			return ENOBUFS;
   1847 	}
   1848 
   1849 	sc->bge_std = i - 1;
   1850 	bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
   1851 
   1852 	sc->bge_flags |= BGEF_RXRING_VALID;
   1853 
   1854 	return 0;
   1855 }
   1856 
   1857 static void
   1858 bge_free_rx_ring_std(struct bge_softc *sc, bool disable)
   1859 {
   1860 	int i;
   1861 
   1862 	if (!(sc->bge_flags & BGEF_RXRING_VALID))
   1863 		return;
   1864 
   1865 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
   1866 		if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
   1867 			m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
   1868 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
   1869 			if (disable) {
   1870 				bus_dmamap_destroy(sc->bge_dmatag,
   1871 				    sc->bge_cdata.bge_rx_std_map[i]);
   1872 				sc->bge_cdata.bge_rx_std_map[i] = NULL;
   1873 			}
   1874 		}
   1875 		memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0,
   1876 		    sizeof(struct bge_rx_bd));
   1877 	}
   1878 
   1879 	sc->bge_flags &= ~BGEF_RXRING_VALID;
   1880 }
   1881 
   1882 static int
   1883 bge_init_rx_ring_jumbo(struct bge_softc *sc)
   1884 {
   1885 	int i;
   1886 	volatile struct bge_rcb *rcb;
   1887 
   1888 	if (sc->bge_flags & BGEF_JUMBO_RXRING_VALID)
   1889 		return 0;
   1890 
   1891 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
   1892 		if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
   1893 			return ENOBUFS;
   1894 	}
   1895 
   1896 	sc->bge_jumbo = i - 1;
   1897 	sc->bge_flags |= BGEF_JUMBO_RXRING_VALID;
   1898 
   1899 	rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
   1900 	rcb->bge_maxlen_flags = 0;
   1901 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
   1902 
   1903 	bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
   1904 
   1905 	return 0;
   1906 }
   1907 
   1908 static void
   1909 bge_free_rx_ring_jumbo(struct bge_softc *sc)
   1910 {
   1911 	int i;
   1912 
   1913 	if (!(sc->bge_flags & BGEF_JUMBO_RXRING_VALID))
   1914 		return;
   1915 
   1916 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
   1917 		if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
   1918 			m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
   1919 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
   1920 		}
   1921 		memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0,
   1922 		    sizeof(struct bge_rx_bd));
   1923 	}
   1924 
   1925 	sc->bge_flags &= ~BGEF_JUMBO_RXRING_VALID;
   1926 }
   1927 
   1928 static void
   1929 bge_free_tx_ring(struct bge_softc *sc, bool disable)
   1930 {
   1931 	int i;
   1932 	struct txdmamap_pool_entry *dma;
   1933 
   1934 	if (!(sc->bge_flags & BGEF_TXRING_VALID))
   1935 		return;
   1936 
   1937 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
   1938 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
   1939 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
   1940 			sc->bge_cdata.bge_tx_chain[i] = NULL;
   1941 			SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
   1942 					    link);
   1943 			sc->txdma[i] = 0;
   1944 		}
   1945 		memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0,
   1946 		    sizeof(struct bge_tx_bd));
   1947 	}
   1948 
   1949 	if (disable) {
   1950 		while ((dma = SLIST_FIRST(&sc->txdma_list))) {
   1951 			SLIST_REMOVE_HEAD(&sc->txdma_list, link);
   1952 			bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
   1953 			if (sc->bge_dma64) {
   1954 				bus_dmamap_destroy(sc->bge_dmatag32,
   1955 				    dma->dmamap32);
   1956 			}
   1957 			free(dma, M_DEVBUF);
   1958 		}
   1959 		SLIST_INIT(&sc->txdma_list);
   1960 	}
   1961 
   1962 	sc->bge_flags &= ~BGEF_TXRING_VALID;
   1963 }
   1964 
   1965 static int
   1966 bge_init_tx_ring(struct bge_softc *sc)
   1967 {
   1968 	struct ifnet *ifp = &sc->ethercom.ec_if;
   1969 	int i;
   1970 	bus_dmamap_t dmamap, dmamap32;
   1971 	bus_size_t maxsegsz;
   1972 	struct txdmamap_pool_entry *dma;
   1973 
   1974 	if (sc->bge_flags & BGEF_TXRING_VALID)
   1975 		return 0;
   1976 
   1977 	sc->bge_txcnt = 0;
   1978 	sc->bge_tx_saved_considx = 0;
   1979 
   1980 	/* Initialize transmit producer index for host-memory send ring. */
   1981 	sc->bge_tx_prodidx = 0;
   1982 	bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
   1983 	/* 5700 b2 errata */
   1984 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
   1985 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
   1986 
   1987 	/* NIC-memory send ring not used; initialize to zero. */
   1988 	bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
   1989 	/* 5700 b2 errata */
   1990 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
   1991 		bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
   1992 
   1993 	/* Limit DMA segment size for some chips */
   1994 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) &&
   1995 	    (ifp->if_mtu <= ETHERMTU))
   1996 		maxsegsz = 2048;
   1997 	else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
   1998 		maxsegsz = 4096;
   1999 	else
   2000 		maxsegsz = ETHER_MAX_LEN_JUMBO;
   2001 
   2002 	if (SLIST_FIRST(&sc->txdma_list) != NULL)
   2003 		goto alloc_done;
   2004 
   2005 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
   2006 		if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX,
   2007 		    BGE_NTXSEG, maxsegsz, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
   2008 		    &dmamap))
   2009 			return ENOBUFS;
   2010 		if (dmamap == NULL)
   2011 			panic("dmamap NULL in bge_init_tx_ring");
   2012 		if (sc->bge_dma64) {
   2013 			if (bus_dmamap_create(sc->bge_dmatag32, BGE_TXDMA_MAX,
   2014 			    BGE_NTXSEG, maxsegsz, 0,
   2015 			    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
   2016 			    &dmamap32)) {
   2017 				bus_dmamap_destroy(sc->bge_dmatag, dmamap);
   2018 				return ENOBUFS;
   2019 			}
   2020 			if (dmamap32 == NULL)
   2021 				panic("dmamap32 NULL in bge_init_tx_ring");
   2022 		} else
   2023 			dmamap32 = dmamap;
   2024 		dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
   2025 		if (dma == NULL) {
   2026 			aprint_error_dev(sc->bge_dev,
   2027 			    "can't alloc txdmamap_pool_entry\n");
   2028 			bus_dmamap_destroy(sc->bge_dmatag, dmamap);
   2029 			if (sc->bge_dma64)
   2030 				bus_dmamap_destroy(sc->bge_dmatag32, dmamap32);
   2031 			return ENOMEM;
   2032 		}
   2033 		dma->dmamap = dmamap;
   2034 		dma->dmamap32 = dmamap32;
   2035 		SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
   2036 	}
   2037 alloc_done:
   2038 	sc->bge_flags |= BGEF_TXRING_VALID;
   2039 
   2040 	return 0;
   2041 }
   2042 
   2043 static void
   2044 bge_setmulti(struct bge_softc *sc)
   2045 {
   2046 	struct ethercom		*ac = &sc->ethercom;
   2047 	struct ifnet		*ifp = &ac->ec_if;
   2048 	struct ether_multi	*enm;
   2049 	struct ether_multistep  step;
   2050 	uint32_t		hashes[4] = { 0, 0, 0, 0 };
   2051 	uint32_t		h;
   2052 	int			i;
   2053 
   2054 	if (ifp->if_flags & IFF_PROMISC)
   2055 		goto allmulti;
   2056 
   2057 	/* Now program new ones. */
   2058 	ETHER_FIRST_MULTI(step, ac, enm);
   2059 	while (enm != NULL) {
   2060 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   2061 			/*
   2062 			 * We must listen to a range of multicast addresses.
   2063 			 * For now, just accept all multicasts, rather than
   2064 			 * trying to set only those filter bits needed to match
   2065 			 * the range.  (At this time, the only use of address
   2066 			 * ranges is for IP multicast routing, for which the
   2067 			 * range is big enough to require all bits set.)
   2068 			 */
   2069 			goto allmulti;
   2070 		}
   2071 
   2072 		h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
   2073 
   2074 		/* Just want the 7 least-significant bits. */
   2075 		h &= 0x7f;
   2076 
   2077 		hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
   2078 		ETHER_NEXT_MULTI(step, enm);
   2079 	}
   2080 
   2081 	ifp->if_flags &= ~IFF_ALLMULTI;
   2082 	goto setit;
   2083 
   2084  allmulti:
   2085 	ifp->if_flags |= IFF_ALLMULTI;
   2086 	hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff;
   2087 
   2088  setit:
   2089 	for (i = 0; i < 4; i++)
   2090 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
   2091 }
   2092 
   2093 static void
   2094 bge_sig_pre_reset(struct bge_softc *sc, int type)
   2095 {
   2096 
   2097 	/*
   2098 	 * Some chips don't like this so only do this if ASF is enabled
   2099 	 */
   2100 	if (sc->bge_asf_mode)
   2101 		bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
   2102 
   2103 	if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
   2104 		switch (type) {
   2105 		case BGE_RESET_START:
   2106 			bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
   2107 			    BGE_FW_DRV_STATE_START);
   2108 			break;
   2109 		case BGE_RESET_SHUTDOWN:
   2110 			bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
   2111 			    BGE_FW_DRV_STATE_UNLOAD);
   2112 			break;
   2113 		case BGE_RESET_SUSPEND:
   2114 			bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
   2115 			    BGE_FW_DRV_STATE_SUSPEND);
   2116 			break;
   2117 		}
   2118 	}
   2119 
   2120 	if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND)
   2121 		bge_ape_driver_state_change(sc, type);
   2122 }
   2123 
   2124 static void
   2125 bge_sig_post_reset(struct bge_softc *sc, int type)
   2126 {
   2127 
   2128 	if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
   2129 		switch (type) {
   2130 		case BGE_RESET_START:
   2131 			bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
   2132 			    BGE_FW_DRV_STATE_START_DONE);
   2133 			/* START DONE */
   2134 			break;
   2135 		case BGE_RESET_SHUTDOWN:
   2136 			bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
   2137 			    BGE_FW_DRV_STATE_UNLOAD_DONE);
   2138 			break;
   2139 		}
   2140 	}
   2141 
   2142 	if (type == BGE_RESET_SHUTDOWN)
   2143 		bge_ape_driver_state_change(sc, type);
   2144 }
   2145 
   2146 static void
   2147 bge_sig_legacy(struct bge_softc *sc, int type)
   2148 {
   2149 
   2150 	if (sc->bge_asf_mode) {
   2151 		switch (type) {
   2152 		case BGE_RESET_START:
   2153 			bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
   2154 			    BGE_FW_DRV_STATE_START);
   2155 			break;
   2156 		case BGE_RESET_SHUTDOWN:
   2157 			bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
   2158 			    BGE_FW_DRV_STATE_UNLOAD);
   2159 			break;
   2160 		}
   2161 	}
   2162 }
   2163 
   2164 static void
   2165 bge_wait_for_event_ack(struct bge_softc *sc)
   2166 {
   2167 	int i;
   2168 
   2169 	/* wait up to 2500usec */
   2170 	for (i = 0; i < 250; i++) {
   2171 		if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) &
   2172 			BGE_RX_CPU_DRV_EVENT))
   2173 			break;
   2174 		DELAY(10);
   2175 	}
   2176 }
   2177 
   2178 static void
   2179 bge_stop_fw(struct bge_softc *sc)
   2180 {
   2181 
   2182 	if (sc->bge_asf_mode) {
   2183 		bge_wait_for_event_ack(sc);
   2184 
   2185 		bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE);
   2186 		CSR_WRITE_4_FLUSH(sc, BGE_RX_CPU_EVENT,
   2187 		    CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT);
   2188 
   2189 		bge_wait_for_event_ack(sc);
   2190 	}
   2191 }
   2192 
   2193 static int
   2194 bge_poll_fw(struct bge_softc *sc)
   2195 {
   2196 	uint32_t val;
   2197 	int i;
   2198 
   2199 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
   2200 		for (i = 0; i < BGE_TIMEOUT; i++) {
   2201 			val = CSR_READ_4(sc, BGE_VCPU_STATUS);
   2202 			if (val & BGE_VCPU_STATUS_INIT_DONE)
   2203 				break;
   2204 			DELAY(100);
   2205 		}
   2206 		if (i >= BGE_TIMEOUT) {
   2207 			aprint_error_dev(sc->bge_dev, "reset timed out\n");
   2208 			return -1;
   2209 		}
   2210 	} else {
   2211 		/*
   2212 		 * Poll the value location we just wrote until
   2213 		 * we see the 1's complement of the magic number.
   2214 		 * This indicates that the firmware initialization
   2215 		 * is complete.
   2216 		 * XXX 1000ms for Flash and 10000ms for SEEPROM.
   2217 		 */
   2218 		for (i = 0; i < BGE_TIMEOUT; i++) {
   2219 			val = bge_readmem_ind(sc, BGE_SRAM_FW_MB);
   2220 			if (val == ~BGE_SRAM_FW_MB_MAGIC)
   2221 				break;
   2222 			DELAY(10);
   2223 		}
   2224 
   2225 		if ((i >= BGE_TIMEOUT)
   2226 		    && ((sc->bge_flags & BGEF_NO_EEPROM) == 0)) {
   2227 			aprint_error_dev(sc->bge_dev,
   2228 			    "firmware handshake timed out, val = %x\n", val);
   2229 			return -1;
   2230 		}
   2231 	}
   2232 
   2233 	if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) {
   2234 		/* tg3 says we have to wait extra time */
   2235 		delay(10 * 1000);
   2236 	}
   2237 
   2238 	return 0;
   2239 }
   2240 
   2241 int
   2242 bge_phy_addr(struct bge_softc *sc)
   2243 {
   2244 	struct pci_attach_args *pa = &(sc->bge_pa);
   2245 	int phy_addr = 1;
   2246 
   2247 	/*
   2248 	 * PHY address mapping for various devices.
   2249 	 *
   2250 	 *          | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
   2251 	 * ---------+-------+-------+-------+-------+
   2252 	 * BCM57XX  |   1   |   X   |   X   |   X   |
   2253 	 * BCM5704  |   1   |   X   |   1   |   X   |
   2254 	 * BCM5717  |   1   |   8   |   2   |   9   |
   2255 	 * BCM5719  |   1   |   8   |   2   |   9   |
   2256 	 * BCM5720  |   1   |   8   |   2   |   9   |
   2257 	 *
   2258 	 *          | F2 Cu | F2 Sr | F3 Cu | F3 Sr |
   2259 	 * ---------+-------+-------+-------+-------+
   2260 	 * BCM57XX  |   X   |   X   |   X   |   X   |
   2261 	 * BCM5704  |   X   |   X   |   X   |   X   |
   2262 	 * BCM5717  |   X   |   X   |   X   |   X   |
   2263 	 * BCM5719  |   3   |   10  |   4   |   11  |
   2264 	 * BCM5720  |   X   |   X   |   X   |   X   |
   2265 	 *
   2266 	 * Other addresses may respond but they are not
   2267 	 * IEEE compliant PHYs and should be ignored.
   2268 	 */
   2269 	switch (BGE_ASICREV(sc->bge_chipid)) {
   2270 	case BGE_ASICREV_BCM5717:
   2271 	case BGE_ASICREV_BCM5719:
   2272 	case BGE_ASICREV_BCM5720:
   2273 		phy_addr = pa->pa_function;
   2274 		if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) {
   2275 			phy_addr += (CSR_READ_4(sc, BGE_SGDIG_STS) &
   2276 			    BGE_SGDIGSTS_IS_SERDES) ? 8 : 1;
   2277 		} else {
   2278 			phy_addr += (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
   2279 			    BGE_CPMU_PHY_STRAP_IS_SERDES) ? 8 : 1;
   2280 		}
   2281 	}
   2282 
   2283 	return phy_addr;
   2284 }
   2285 
   2286 /*
   2287  * Do endian, PCI and DMA initialization. Also check the on-board ROM
   2288  * self-test results.
   2289  */
   2290 static int
   2291 bge_chipinit(struct bge_softc *sc)
   2292 {
   2293 	uint32_t dma_rw_ctl, misc_ctl, mode_ctl, reg;
   2294 	int i;
   2295 
   2296 	/* Set endianness before we access any non-PCI registers. */
   2297 	misc_ctl = BGE_INIT;
   2298 	if (sc->bge_flags & BGEF_TAGGED_STATUS)
   2299 		misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
   2300 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
   2301 	    misc_ctl);
   2302 
   2303 	/*
   2304 	 * Clear the MAC statistics block in the NIC's
   2305 	 * internal memory.
   2306 	 */
   2307 	for (i = BGE_STATS_BLOCK;
   2308 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
   2309 		BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0);
   2310 
   2311 	for (i = BGE_STATUS_BLOCK;
   2312 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
   2313 		BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0);
   2314 
   2315 	/* 5717 workaround from tg3 */
   2316 	if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
   2317 		/* Save */
   2318 		mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
   2319 
   2320 		/* Temporary modify MODE_CTL to control TLP */
   2321 		reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK;
   2322 		CSR_WRITE_4(sc, BGE_MODE_CTL, reg | BGE_MODECTL_PCIE_TLPADDR1);
   2323 
   2324 		/* Control TLP */
   2325 		reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG +
   2326 		    BGE_TLP_PHYCTL1);
   2327 		CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_PHYCTL1,
   2328 		    reg | BGE_TLP_PHYCTL1_EN_L1PLLPD);
   2329 
   2330 		/* Restore */
   2331 		CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
   2332 	}
   2333 
   2334 	if (BGE_IS_57765_FAMILY(sc)) {
   2335 		if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) {
   2336 			/* Save */
   2337 			mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
   2338 
   2339 			/* Temporary modify MODE_CTL to control TLP */
   2340 			reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK;
   2341 			CSR_WRITE_4(sc, BGE_MODE_CTL,
   2342 			    reg | BGE_MODECTL_PCIE_TLPADDR1);
   2343 
   2344 			/* Control TLP */
   2345 			reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG +
   2346 			    BGE_TLP_PHYCTL5);
   2347 			CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_PHYCTL5,
   2348 			    reg | BGE_TLP_PHYCTL5_DIS_L2CLKREQ);
   2349 
   2350 			/* Restore */
   2351 			CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
   2352 		}
   2353 		if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_57765_AX) {
   2354 			/*
   2355 			 * For the 57766 and non Ax versions of 57765, bootcode
   2356 			 * needs to setup the PCIE Fast Training Sequence (FTS)
   2357 			 * value to prevent transmit hangs.
   2358 			 */
   2359 			reg = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL);
   2360 			CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL,
   2361 			    reg | BGE_CPMU_PADRNG_CTL_RDIV2);
   2362 
   2363 			/* Save */
   2364 			mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL);
   2365 
   2366 			/* Temporary modify MODE_CTL to control TLP */
   2367 			reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK;
   2368 			CSR_WRITE_4(sc, BGE_MODE_CTL,
   2369 			    reg | BGE_MODECTL_PCIE_TLPADDR0);
   2370 
   2371 			/* Control TLP */
   2372 			reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG +
   2373 			    BGE_TLP_FTSMAX);
   2374 			reg &= ~BGE_TLP_FTSMAX_MSK;
   2375 			CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_FTSMAX,
   2376 			    reg | BGE_TLP_FTSMAX_VAL);
   2377 
   2378 			/* Restore */
   2379 			CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
   2380 		}
   2381 
   2382 		reg = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK);
   2383 		reg &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK;
   2384 		reg |= BGE_CPMU_LSPD_10MB_MACCLK_6_25;
   2385 		CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, reg);
   2386 	}
   2387 
   2388 	/* Set up the PCI DMA control register. */
   2389 	dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD;
   2390 	if (sc->bge_flags & BGEF_PCIE) {
   2391 		/* Read watermark not used, 128 bytes for write. */
   2392 		DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n",
   2393 		    device_xname(sc->bge_dev)));
   2394 		if (sc->bge_mps >= 256)
   2395 			dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
   2396 		else
   2397 			dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
   2398 	} else if (sc->bge_flags & BGEF_PCIX) {
   2399 	  	DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n",
   2400 		    device_xname(sc->bge_dev)));
   2401 		/* PCI-X bus */
   2402 		if (BGE_IS_5714_FAMILY(sc)) {
   2403 			/* 256 bytes for read and write. */
   2404 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
   2405 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
   2406 
   2407 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780)
   2408 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
   2409 			else
   2410 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
   2411 		} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) {
   2412 			/*
   2413 			 * In the BCM5703, the DMA read watermark should
   2414 			 * be set to less than or equal to the maximum
   2415 			 * memory read byte count of the PCI-X command
   2416 			 * register.
   2417 			 */
   2418 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
   2419 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
   2420 		} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
   2421 			/* 1536 bytes for read, 384 bytes for write. */
   2422 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
   2423 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
   2424 		} else {
   2425 			/* 384 bytes for read and write. */
   2426 			dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
   2427 			    BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
   2428 			    (0x0F);
   2429 		}
   2430 
   2431 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
   2432 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
   2433 			uint32_t tmp;
   2434 
   2435 			/* Set ONEDMA_ATONCE for hardware workaround. */
   2436 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
   2437 			if (tmp == 6 || tmp == 7)
   2438 				dma_rw_ctl |=
   2439 				    BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
   2440 
   2441 			/* Set PCI-X DMA write workaround. */
   2442 			dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
   2443 		}
   2444 	} else {
   2445 		/* Conventional PCI bus: 256 bytes for read and write. */
   2446 	  	DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n",
   2447 		    device_xname(sc->bge_dev)));
   2448 		dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
   2449 		    BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
   2450 
   2451 		if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 &&
   2452 		    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750)
   2453 			dma_rw_ctl |= 0x0F;
   2454 	}
   2455 
   2456 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
   2457 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701)
   2458 		dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
   2459 		    BGE_PCIDMARWCTL_ASRT_ALL_BE;
   2460 
   2461 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
   2462 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
   2463 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
   2464 
   2465 	if (BGE_IS_57765_PLUS(sc)) {
   2466 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
   2467 		if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
   2468 			dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
   2469 
   2470 		/*
   2471 		 * Enable HW workaround for controllers that misinterpret
   2472 		 * a status tag update and leave interrupts permanently
   2473 		 * disabled.
   2474 		 */
   2475 		if (!BGE_IS_57765_FAMILY(sc) &&
   2476 		    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717)
   2477 			dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
   2478 	}
   2479 
   2480 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL,
   2481 	    dma_rw_ctl);
   2482 
   2483 	/*
   2484 	 * Set up general mode register.
   2485 	 */
   2486 	mode_ctl = BGE_DMA_SWAP_OPTIONS;
   2487 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
   2488 		/* Retain Host-2-BMC settings written by APE firmware. */
   2489 		mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) &
   2490 		    (BGE_MODECTL_BYTESWAP_B2HRX_DATA |
   2491 		    BGE_MODECTL_WORDSWAP_B2HRX_DATA |
   2492 		    BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE);
   2493 	}
   2494 	mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
   2495 	    BGE_MODECTL_TX_NO_PHDR_CSUM;
   2496 
   2497 	/*
   2498 	 * BCM5701 B5 have a bug causing data corruption when using
   2499 	 * 64-bit DMA reads, which can be terminated early and then
   2500 	 * completed later as 32-bit accesses, in combination with
   2501 	 * certain bridges.
   2502 	 */
   2503 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
   2504 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
   2505 		mode_ctl |= BGE_MODECTL_FORCE_PCI32;
   2506 
   2507 	/*
   2508 	 * Tell the firmware the driver is running
   2509 	 */
   2510 	if (sc->bge_asf_mode & ASF_STACKUP)
   2511 		mode_ctl |= BGE_MODECTL_STACKUP;
   2512 
   2513 	CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
   2514 
   2515 	/*
   2516 	 * Disable memory write invalidate.  Apparently it is not supported
   2517 	 * properly by these devices.
   2518 	 */
   2519 	PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG,
   2520 		   PCI_COMMAND_INVALIDATE_ENABLE);
   2521 
   2522 #ifdef __brokenalpha__
   2523 	/*
   2524 	 * Must insure that we do not cross an 8K (bytes) boundary
   2525 	 * for DMA reads.  Our highest limit is 1K bytes.  This is a
   2526 	 * restriction on some ALPHA platforms with early revision
   2527 	 * 21174 PCI chipsets, such as the AlphaPC 164lx
   2528 	 */
   2529 	PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4);
   2530 #endif
   2531 
   2532 	/* Set the timer prescaler (always 66MHz) */
   2533 	CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
   2534 
   2535 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
   2536 		DELAY(40);	/* XXX */
   2537 
   2538 		/* Put PHY into ready state */
   2539 		BGE_CLRBIT_FLUSH(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
   2540 		DELAY(40);
   2541 	}
   2542 
   2543 	return 0;
   2544 }
   2545 
   2546 static int
   2547 bge_blockinit(struct bge_softc *sc)
   2548 {
   2549 	volatile struct bge_rcb	 *rcb;
   2550 	bus_size_t rcb_addr;
   2551 	struct ifnet *ifp = &sc->ethercom.ec_if;
   2552 	bge_hostaddr taddr;
   2553 	uint32_t	dmactl, mimode, val;
   2554 	int		i, limit;
   2555 
   2556 	/*
   2557 	 * Initialize the memory window pointer register so that
   2558 	 * we can access the first 32K of internal NIC RAM. This will
   2559 	 * allow us to set up the TX send ring RCBs and the RX return
   2560 	 * ring RCBs, plus other things which live in NIC memory.
   2561 	 */
   2562 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0);
   2563 
   2564 	if (!BGE_IS_5705_PLUS(sc)) {
   2565 		/* 57XX step 33 */
   2566 		/* Configure mbuf memory pool */
   2567 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
   2568 		    BGE_BUFFPOOL_1);
   2569 
   2570 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
   2571 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
   2572 		else
   2573 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
   2574 
   2575 		/* 57XX step 34 */
   2576 		/* Configure DMA resource pool */
   2577 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
   2578 		    BGE_DMA_DESCRIPTORS);
   2579 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
   2580 	}
   2581 
   2582 	/* 5718 step 11, 57XX step 35 */
   2583 	/*
   2584 	 * Configure mbuf pool watermarks. New broadcom docs strongly
   2585 	 * recommend these.
   2586 	 */
   2587 	if (BGE_IS_5717_PLUS(sc)) {
   2588 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
   2589 		if (ifp->if_mtu > ETHERMTU) {
   2590 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
   2591 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
   2592 		} else {
   2593 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
   2594 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
   2595 		}
   2596 	} else if (BGE_IS_5705_PLUS(sc)) {
   2597 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
   2598 
   2599 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
   2600 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
   2601 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
   2602 		} else {
   2603 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
   2604 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
   2605 		}
   2606 	} else {
   2607 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
   2608 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
   2609 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
   2610 	}
   2611 
   2612 	/* 57XX step 36 */
   2613 	/* Configure DMA resource watermarks */
   2614 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
   2615 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
   2616 
   2617 	/* 5718 step 13, 57XX step 38 */
   2618 	/* Enable buffer manager */
   2619 	val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_ATTN;
   2620 	/*
   2621 	 * Change the arbitration algorithm of TXMBUF read request to
   2622 	 * round-robin instead of priority based for BCM5719.  When
   2623 	 * TXFIFO is almost empty, RDMA will hold its request until
   2624 	 * TXFIFO is not almost empty.
   2625 	 */
   2626 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
   2627 		val |= BGE_BMANMODE_NO_TX_UNDERRUN;
   2628 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
   2629 		sc->bge_chipid == BGE_CHIPID_BCM5719_A0 ||
   2630 		sc->bge_chipid == BGE_CHIPID_BCM5720_A0)
   2631 		val |= BGE_BMANMODE_LOMBUF_ATTN;
   2632 	CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
   2633 
   2634 	/* 57XX step 39 */
   2635 	/* Poll for buffer manager start indication */
   2636 	for (i = 0; i < BGE_TIMEOUT * 2; i++) {
   2637 		DELAY(10);
   2638 		if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
   2639 			break;
   2640 	}
   2641 
   2642 	if (i == BGE_TIMEOUT * 2) {
   2643 		aprint_error_dev(sc->bge_dev,
   2644 		    "buffer manager failed to start\n");
   2645 		return ENXIO;
   2646 	}
   2647 
   2648 	/* 57XX step 40 */
   2649 	/* Enable flow-through queues */
   2650 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
   2651 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
   2652 
   2653 	/* Wait until queue initialization is complete */
   2654 	for (i = 0; i < BGE_TIMEOUT * 2; i++) {
   2655 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
   2656 			break;
   2657 		DELAY(10);
   2658 	}
   2659 
   2660 	if (i == BGE_TIMEOUT * 2) {
   2661 		aprint_error_dev(sc->bge_dev,
   2662 		    "flow-through queue init failed\n");
   2663 		return ENXIO;
   2664 	}
   2665 
   2666 	/*
   2667 	 * Summary of rings supported by the controller:
   2668 	 *
   2669 	 * Standard Receive Producer Ring
   2670 	 * - This ring is used to feed receive buffers for "standard"
   2671 	 *   sized frames (typically 1536 bytes) to the controller.
   2672 	 *
   2673 	 * Jumbo Receive Producer Ring
   2674 	 * - This ring is used to feed receive buffers for jumbo sized
   2675 	 *   frames (i.e. anything bigger than the "standard" frames)
   2676 	 *   to the controller.
   2677 	 *
   2678 	 * Mini Receive Producer Ring
   2679 	 * - This ring is used to feed receive buffers for "mini"
   2680 	 *   sized frames to the controller.
   2681 	 * - This feature required external memory for the controller
   2682 	 *   but was never used in a production system.  Should always
   2683 	 *   be disabled.
   2684 	 *
   2685 	 * Receive Return Ring
   2686 	 * - After the controller has placed an incoming frame into a
   2687 	 *   receive buffer that buffer is moved into a receive return
   2688 	 *   ring.  The driver is then responsible to passing the
   2689 	 *   buffer up to the stack.  Many versions of the controller
   2690 	 *   support multiple RR rings.
   2691 	 *
   2692 	 * Send Ring
   2693 	 * - This ring is used for outgoing frames.  Many versions of
   2694 	 *   the controller support multiple send rings.
   2695 	 */
   2696 
   2697 	/* 5718 step 15, 57XX step 41 */
   2698 	/* Initialize the standard RX ring control block */
   2699 	rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
   2700 	BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
   2701 	/* 5718 step 16 */
   2702 	if (BGE_IS_57765_PLUS(sc)) {
   2703 		/*
   2704 		 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
   2705 		 * Bits 15-2 : Maximum RX frame size
   2706 		 * Bit 1     : 1 = Ring Disabled, 0 = Ring Enabled
   2707 		 * Bit 0     : Reserved
   2708 		 */
   2709 		rcb->bge_maxlen_flags =
   2710 		    BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
   2711 	} else if (BGE_IS_5705_PLUS(sc)) {
   2712 		/*
   2713 		 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
   2714 		 * Bits 15-2 : Reserved (should be 0)
   2715 		 * Bit 1     : 1 = Ring Disabled, 0 = Ring Enabled
   2716 		 * Bit 0     : Reserved
   2717 		 */
   2718 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
   2719 	} else {
   2720 		/*
   2721 		 * Ring size is always XXX entries
   2722 		 * Bits 31-16: Maximum RX frame size
   2723 		 * Bits 15-2 : Reserved (should be 0)
   2724 		 * Bit 1     : 1 = Ring Disabled, 0 = Ring Enabled
   2725 		 * Bit 0     : Reserved
   2726 		 */
   2727 		rcb->bge_maxlen_flags =
   2728 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
   2729 	}
   2730 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
   2731 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
   2732 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
   2733 		rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
   2734 	else
   2735 		rcb->bge_nicaddr = BGE_STD_RX_RINGS;
   2736 	/* Write the standard receive producer ring control block. */
   2737 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
   2738 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
   2739 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
   2740 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
   2741 
   2742 	/* Reset the standard receive producer ring producer index. */
   2743 	bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
   2744 
   2745 	/* 57XX step 42 */
   2746 	/*
   2747 	 * Initialize the jumbo RX ring control block
   2748 	 * We set the 'ring disabled' bit in the flags
   2749 	 * field until we're actually ready to start
   2750 	 * using this ring (i.e. once we set the MTU
   2751 	 * high enough to require it).
   2752 	 */
   2753 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
   2754 		rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
   2755 		BGE_HOSTADDR(rcb->bge_hostaddr,
   2756 		    BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
   2757 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
   2758 		    BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
   2759 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
   2760 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
   2761 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
   2762 			rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
   2763 		else
   2764 			rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
   2765 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
   2766 		    rcb->bge_hostaddr.bge_addr_hi);
   2767 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
   2768 		    rcb->bge_hostaddr.bge_addr_lo);
   2769 		/* Program the jumbo receive producer ring RCB parameters. */
   2770 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
   2771 		    rcb->bge_maxlen_flags);
   2772 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
   2773 		/* Reset the jumbo receive producer ring producer index. */
   2774 		bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
   2775 	}
   2776 
   2777 	/* 57XX step 43 */
   2778 	/* Disable the mini receive producer ring RCB. */
   2779 	if (BGE_IS_5700_FAMILY(sc)) {
   2780 		/* Set up dummy disabled mini ring RCB */
   2781 		rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
   2782 		rcb->bge_maxlen_flags =
   2783 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
   2784 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
   2785 		    rcb->bge_maxlen_flags);
   2786 		/* Reset the mini receive producer ring producer index. */
   2787 		bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
   2788 
   2789 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   2790 		    offsetof(struct bge_ring_data, bge_info),
   2791 		    sizeof (struct bge_gib),
   2792 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   2793 	}
   2794 
   2795 	/* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
   2796 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
   2797 		if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
   2798 		    sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
   2799 		    sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
   2800 			CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
   2801 			    (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
   2802 	}
   2803 	/* 5718 step 14, 57XX step 44 */
   2804 	/*
   2805 	 * The BD ring replenish thresholds control how often the
   2806 	 * hardware fetches new BD's from the producer rings in host
   2807 	 * memory.  Setting the value too low on a busy system can
   2808 	 * starve the hardware and recue the throughpout.
   2809 	 *
   2810 	 * Set the BD ring replenish thresholds. The recommended
   2811 	 * values are 1/8th the number of descriptors allocated to
   2812 	 * each ring, but since we try to avoid filling the entire
   2813 	 * ring we set these to the minimal value of 8.  This needs to
   2814 	 * be done on several of the supported chip revisions anyway,
   2815 	 * to work around HW bugs.
   2816 	 */
   2817 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, 8);
   2818 	if (BGE_IS_JUMBO_CAPABLE(sc))
   2819 		CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 8);
   2820 
   2821 	/* 5718 step 18 */
   2822 	if (BGE_IS_5717_PLUS(sc)) {
   2823 		CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4);
   2824 		CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4);
   2825 	}
   2826 
   2827 	/* 57XX step 45 */
   2828 	/*
   2829 	 * Disable all send rings by setting the 'ring disabled' bit
   2830 	 * in the flags field of all the TX send ring control blocks,
   2831 	 * located in NIC memory.
   2832 	 */
   2833 	if (BGE_IS_5700_FAMILY(sc)) {
   2834 		/* 5700 to 5704 had 16 send rings. */
   2835 		limit = BGE_TX_RINGS_EXTSSRAM_MAX;
   2836 	} else if (BGE_IS_5717_PLUS(sc)) {
   2837 		limit = BGE_TX_RINGS_5717_MAX;
   2838 	} else if (BGE_IS_57765_FAMILY(sc)) {
   2839 		limit = BGE_TX_RINGS_57765_MAX;
   2840 	} else
   2841 		limit = 1;
   2842 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
   2843 	for (i = 0; i < limit; i++) {
   2844 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
   2845 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
   2846 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
   2847 		rcb_addr += sizeof(struct bge_rcb);
   2848 	}
   2849 
   2850 	/* 57XX step 46 and 47 */
   2851 	/* Configure send ring RCB 0 (we use only the first ring) */
   2852 	rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
   2853 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
   2854 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
   2855 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
   2856 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 ||
   2857 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
   2858 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
   2859 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, BGE_SEND_RING_5717);
   2860 	else
   2861 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
   2862 		    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
   2863 	RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
   2864 	    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
   2865 
   2866 	/* 57XX step 48 */
   2867 	/*
   2868 	 * Disable all receive return rings by setting the
   2869 	 * 'ring diabled' bit in the flags field of all the receive
   2870 	 * return ring control blocks, located in NIC memory.
   2871 	 */
   2872 	if (BGE_IS_5717_PLUS(sc)) {
   2873 		/* Should be 17, use 16 until we get an SRAM map. */
   2874 		limit = 16;
   2875 	} else if (BGE_IS_5700_FAMILY(sc))
   2876 		limit = BGE_RX_RINGS_MAX;
   2877 	else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
   2878 	    BGE_IS_57765_FAMILY(sc))
   2879 		limit = 4;
   2880 	else
   2881 		limit = 1;
   2882 	/* Disable all receive return rings */
   2883 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
   2884 	for (i = 0; i < limit; i++) {
   2885 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
   2886 		RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
   2887 		RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
   2888 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
   2889 			BGE_RCB_FLAG_RING_DISABLED));
   2890 		RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
   2891 		bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
   2892 		    (i * (sizeof(uint64_t))), 0);
   2893 		rcb_addr += sizeof(struct bge_rcb);
   2894 	}
   2895 
   2896 	/* 57XX step 49 */
   2897 	/*
   2898 	 * Set up receive return ring 0.  Note that the NIC address
   2899 	 * for RX return rings is 0x0.  The return rings live entirely
   2900 	 * within the host, so the nicaddr field in the RCB isn't used.
   2901 	 */
   2902 	rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
   2903 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
   2904 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
   2905 	RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
   2906 	RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
   2907 	RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
   2908 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
   2909 
   2910 	/* 5718 step 24, 57XX step 53 */
   2911 	/* Set random backoff seed for TX */
   2912 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
   2913 	    (CLLADDR(ifp->if_sadl)[0] + CLLADDR(ifp->if_sadl)[1] +
   2914 		CLLADDR(ifp->if_sadl)[2] + CLLADDR(ifp->if_sadl)[3] +
   2915 		CLLADDR(ifp->if_sadl)[4] + CLLADDR(ifp->if_sadl)[5]) &
   2916 	    BGE_TX_BACKOFF_SEED_MASK);
   2917 
   2918 	/* 5718 step 26, 57XX step 55 */
   2919 	/* Set inter-packet gap */
   2920 	val = 0x2620;
   2921 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
   2922 		val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
   2923 		    (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
   2924 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
   2925 
   2926 	/* 5718 step 27, 57XX step 56 */
   2927 	/*
   2928 	 * Specify which ring to use for packets that don't match
   2929 	 * any RX rules.
   2930 	 */
   2931 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
   2932 
   2933 	/* 5718 step 28, 57XX step 57 */
   2934 	/*
   2935 	 * Configure number of RX lists. One interrupt distribution
   2936 	 * list, sixteen active lists, one bad frames class.
   2937 	 */
   2938 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
   2939 
   2940 	/* 5718 step 29, 57XX step 58 */
   2941 	/* Inialize RX list placement stats mask. */
   2942 	if (BGE_IS_575X_PLUS(sc)) {
   2943 		val = CSR_READ_4(sc, BGE_RXLP_STATS_ENABLE_MASK);
   2944 		val &= ~BGE_RXLPSTATCONTROL_DACK_FIX;
   2945 		CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, val);
   2946 	} else
   2947 		CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
   2948 
   2949 	/* 5718 step 30, 57XX step 59 */
   2950 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
   2951 
   2952 	/* 5718 step 33, 57XX step 62 */
   2953 	/* Disable host coalescing until we get it set up */
   2954 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
   2955 
   2956 	/* 5718 step 34, 57XX step 63 */
   2957 	/* Poll to make sure it's shut down. */
   2958 	for (i = 0; i < BGE_TIMEOUT * 2; i++) {
   2959 		DELAY(10);
   2960 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
   2961 			break;
   2962 	}
   2963 
   2964 	if (i == BGE_TIMEOUT * 2) {
   2965 		aprint_error_dev(sc->bge_dev,
   2966 		    "host coalescing engine failed to idle\n");
   2967 		return ENXIO;
   2968 	}
   2969 
   2970 	/* 5718 step 35, 36, 37 */
   2971 	/* Set up host coalescing defaults */
   2972 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
   2973 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
   2974 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
   2975 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
   2976 	if (!(BGE_IS_5705_PLUS(sc))) {
   2977 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
   2978 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
   2979 	}
   2980 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
   2981 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
   2982 
   2983 	/* Set up address of statistics block */
   2984 	if (BGE_IS_5700_FAMILY(sc)) {
   2985 		BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
   2986 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
   2987 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
   2988 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi);
   2989 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo);
   2990 	}
   2991 
   2992 	/* 5718 step 38 */
   2993 	/* Set up address of status block */
   2994 	BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
   2995 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
   2996 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
   2997 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
   2998 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
   2999 	sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
   3000 
   3001 	/* Set up status block size. */
   3002 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 &&
   3003 	    sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
   3004 		val = BGE_STATBLKSZ_FULL;
   3005 		bzero(&sc->bge_rdata->bge_status_block, BGE_STATUS_BLK_SZ);
   3006 	} else {
   3007 		val = BGE_STATBLKSZ_32BYTE;
   3008 		bzero(&sc->bge_rdata->bge_status_block, 32);
   3009 	}
   3010 
   3011 	/* 5718 step 39, 57XX step 73 */
   3012 	/* Turn on host coalescing state machine */
   3013 	CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
   3014 
   3015 	/* 5718 step 40, 57XX step 74 */
   3016 	/* Turn on RX BD completion state machine and enable attentions */
   3017 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
   3018 	    BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
   3019 
   3020 	/* 5718 step 41, 57XX step 75 */
   3021 	/* Turn on RX list placement state machine */
   3022 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
   3023 
   3024 	/* 57XX step 76 */
   3025 	/* Turn on RX list selector state machine. */
   3026 	if (!(BGE_IS_5705_PLUS(sc)))
   3027 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
   3028 
   3029 	val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
   3030 	    BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
   3031 	    BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
   3032 	    BGE_MACMODE_FRMHDR_DMA_ENB;
   3033 
   3034 	if (sc->bge_flags & BGEF_FIBER_TBI)
   3035 		val |= BGE_PORTMODE_TBI;
   3036 	else if (sc->bge_flags & BGEF_FIBER_MII)
   3037 		val |= BGE_PORTMODE_GMII;
   3038 	else
   3039 		val |= BGE_PORTMODE_MII;
   3040 
   3041 	/* 5718 step 42 and 43, 57XX step 77 and 78 */
   3042 	/* Allow APE to send/receive frames. */
   3043 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
   3044 		val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
   3045 
   3046 	/* Turn on DMA, clear stats */
   3047 	CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, val);
   3048 	/* 5718 step 44 */
   3049 	DELAY(40);
   3050 
   3051 	/* 5718 step 45, 57XX step 79 */
   3052 	/* Set misc. local control, enable interrupts on attentions */
   3053 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
   3054 	if (BGE_IS_5717_PLUS(sc)) {
   3055 		CSR_READ_4(sc, BGE_MISC_LOCAL_CTL); /* Flush */
   3056 		/* 5718 step 46 */
   3057 		DELAY(100);
   3058 	}
   3059 
   3060 	/* 57XX step 81 */
   3061 	/* Turn on DMA completion state machine */
   3062 	if (!(BGE_IS_5705_PLUS(sc)))
   3063 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
   3064 
   3065 	/* 5718 step 47, 57XX step 82 */
   3066 	val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
   3067 
   3068 	/* 5718 step 48 */
   3069 	/* Enable host coalescing bug fix. */
   3070 	if (BGE_IS_5755_PLUS(sc))
   3071 		val |= BGE_WDMAMODE_STATUS_TAG_FIX;
   3072 
   3073 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785)
   3074 		val |= BGE_WDMAMODE_BURST_ALL_DATA;
   3075 
   3076 	/* Turn on write DMA state machine */
   3077 	CSR_WRITE_4_FLUSH(sc, BGE_WDMA_MODE, val);
   3078 	/* 5718 step 49 */
   3079 	DELAY(40);
   3080 
   3081 	val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
   3082 
   3083 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717)
   3084 		val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
   3085 
   3086 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
   3087 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
   3088 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
   3089 		val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
   3090 		    BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
   3091 		    BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
   3092 
   3093 	if (sc->bge_flags & BGEF_PCIE)
   3094 		val |= BGE_RDMAMODE_FIFO_LONG_BURST;
   3095 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) {
   3096 		if (ifp->if_mtu <= ETHERMTU)
   3097 			val |= BGE_RDMAMODE_JMB_2K_MMRR;
   3098 	}
   3099 	if (sc->bge_flags & BGEF_TSO) {
   3100 		val |= BGE_RDMAMODE_TSO4_ENABLE;
   3101 		if (BGE_IS_5717_PLUS(sc))
   3102 			val |= BGE_RDMAMODE_TSO6_ENABLE;
   3103 	}
   3104 
   3105 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
   3106 		val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
   3107 		    BGE_RDMAMODE_H2BNC_VLAN_DET;
   3108 		/*
   3109 		 * Allow multiple outstanding read requests from
   3110 		 * non-LSO read DMA engine.
   3111 		 */
   3112 		val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
   3113 	}
   3114 
   3115 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
   3116 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
   3117 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
   3118 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780 ||
   3119 	    BGE_IS_57765_PLUS(sc)) {
   3120 		dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL);
   3121 		/*
   3122 		 * Adjust tx margin to prevent TX data corruption and
   3123 		 * fix internal FIFO overflow.
   3124 		 */
   3125 		if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
   3126 			dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
   3127 			    BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
   3128 			    BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
   3129 			dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
   3130 			    BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
   3131 			    BGE_RDMA_RSRVCTRL_TXMRGN_320B;
   3132 		}
   3133 		/*
   3134 		 * Enable fix for read DMA FIFO overruns.
   3135 		 * The fix is to limit the number of RX BDs
   3136 		 * the hardware would fetch at a fime.
   3137 		 */
   3138 		CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL, dmactl |
   3139 		    BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
   3140 	}
   3141 
   3142 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) {
   3143 		CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
   3144 		    CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
   3145 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
   3146 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
   3147 	} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
   3148 		/*
   3149 		 * Allow 4KB burst length reads for non-LSO frames.
   3150 		 * Enable 512B burst length reads for buffer descriptors.
   3151 		 */
   3152 		CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
   3153 		    CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
   3154 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
   3155 		    BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
   3156 	}
   3157 	/* Turn on read DMA state machine */
   3158 	CSR_WRITE_4_FLUSH(sc, BGE_RDMA_MODE, val);
   3159 	/* 5718 step 52 */
   3160 	delay(40);
   3161 
   3162 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
   3163 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
   3164 		for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) {
   3165 			val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4);
   3166 			if ((val & 0xFFFF) > BGE_FRAMELEN)
   3167 				break;
   3168 			if (((val >> 16) & 0xFFFF) > BGE_FRAMELEN)
   3169 				break;
   3170 		}
   3171 		if (i != BGE_NUM_RDMA_CHANNELS / 2) {
   3172 			val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
   3173 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
   3174 				val |= BGE_RDMA_TX_LENGTH_WA_5719;
   3175 			else
   3176 				val |= BGE_RDMA_TX_LENGTH_WA_5720;
   3177 			CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
   3178 		}
   3179 	}
   3180 
   3181 	/* 5718 step 56, 57XX step 84 */
   3182 	/* Turn on RX data completion state machine */
   3183 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
   3184 
   3185 	/* Turn on RX data and RX BD initiator state machine */
   3186 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
   3187 
   3188 	/* 57XX step 85 */
   3189 	/* Turn on Mbuf cluster free state machine */
   3190 	if (!BGE_IS_5705_PLUS(sc))
   3191 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
   3192 
   3193 	/* 5718 step 57, 57XX step 86 */
   3194 	/* Turn on send data completion state machine */
   3195 	val = BGE_SDCMODE_ENABLE;
   3196 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761)
   3197 		val |= BGE_SDCMODE_CDELAY;
   3198 	CSR_WRITE_4(sc, BGE_SDC_MODE, val);
   3199 
   3200 	/* 5718 step 58 */
   3201 	/* Turn on send BD completion state machine */
   3202 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
   3203 
   3204 	/* 57XX step 88 */
   3205 	/* Turn on RX BD initiator state machine */
   3206 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
   3207 
   3208 	/* 5718 step 60, 57XX step 90 */
   3209 	/* Turn on send data initiator state machine */
   3210 	if (sc->bge_flags & BGEF_TSO) {
   3211 		/* XXX: magic value from Linux driver */
   3212 		CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
   3213 		    BGE_SDIMODE_HW_LSO_PRE_DMA);
   3214 	} else
   3215 		CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
   3216 
   3217 	/* 5718 step 61, 57XX step 91 */
   3218 	/* Turn on send BD initiator state machine */
   3219 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
   3220 
   3221 	/* 5718 step 62, 57XX step 92 */
   3222 	/* Turn on send BD selector state machine */
   3223 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
   3224 
   3225 	/* 5718 step 31, 57XX step 60 */
   3226 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
   3227 	/* 5718 step 32, 57XX step 61 */
   3228 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
   3229 	    BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
   3230 
   3231 	/* ack/clear link change events */
   3232 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
   3233 	    BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
   3234 	    BGE_MACSTAT_LINK_CHANGED);
   3235 	CSR_WRITE_4(sc, BGE_MI_STS, 0);
   3236 
   3237 	/*
   3238 	 * Enable attention when the link has changed state for
   3239 	 * devices that use auto polling.
   3240 	 */
   3241 	if (sc->bge_flags & BGEF_FIBER_TBI) {
   3242 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
   3243 	} else {
   3244 		if ((sc->bge_flags & BGEF_CPMU_PRESENT) != 0)
   3245 			mimode = BGE_MIMODE_500KHZ_CONST;
   3246 		else
   3247 			mimode = BGE_MIMODE_BASE;
   3248 		/* 5718 step 68. 5718 step 69 (optionally). */
   3249 		if (BGE_IS_5700_FAMILY(sc) ||
   3250 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705) {
   3251 			mimode |= BGE_MIMODE_AUTOPOLL;
   3252 			BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL);
   3253 		}
   3254 		mimode |= BGE_MIMODE_PHYADDR(sc->bge_phy_addr);
   3255 		CSR_WRITE_4(sc, BGE_MI_MODE, mimode);
   3256 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700)
   3257 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
   3258 			    BGE_EVTENB_MI_INTERRUPT);
   3259 	}
   3260 
   3261 	/*
   3262 	 * Clear any pending link state attention.
   3263 	 * Otherwise some link state change events may be lost until attention
   3264 	 * is cleared by bge_intr() -> bge_link_upd() sequence.
   3265 	 * It's not necessary on newer BCM chips - perhaps enabling link
   3266 	 * state change attentions implies clearing pending attention.
   3267 	 */
   3268 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
   3269 	    BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
   3270 	    BGE_MACSTAT_LINK_CHANGED);
   3271 
   3272 	/* Enable link state change attentions. */
   3273 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
   3274 
   3275 	return 0;
   3276 }
   3277 
   3278 static const struct bge_revision *
   3279 bge_lookup_rev(uint32_t chipid)
   3280 {
   3281 	const struct bge_revision *br;
   3282 
   3283 	for (br = bge_revisions; br->br_name != NULL; br++) {
   3284 		if (br->br_chipid == chipid)
   3285 			return br;
   3286 	}
   3287 
   3288 	for (br = bge_majorrevs; br->br_name != NULL; br++) {
   3289 		if (br->br_chipid == BGE_ASICREV(chipid))
   3290 			return br;
   3291 	}
   3292 
   3293 	return NULL;
   3294 }
   3295 
   3296 static const struct bge_product *
   3297 bge_lookup(const struct pci_attach_args *pa)
   3298 {
   3299 	const struct bge_product *bp;
   3300 
   3301 	for (bp = bge_products; bp->bp_name != NULL; bp++) {
   3302 		if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor &&
   3303 		    PCI_PRODUCT(pa->pa_id) == bp->bp_product)
   3304 			return bp;
   3305 	}
   3306 
   3307 	return NULL;
   3308 }
   3309 
   3310 static uint32_t
   3311 bge_chipid(const struct pci_attach_args *pa)
   3312 {
   3313 	uint32_t id;
   3314 
   3315 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL)
   3316 		>> BGE_PCIMISCCTL_ASICREV_SHIFT;
   3317 
   3318 	if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
   3319 		switch (PCI_PRODUCT(pa->pa_id)) {
   3320 		case PCI_PRODUCT_BROADCOM_BCM5717:
   3321 		case PCI_PRODUCT_BROADCOM_BCM5718:
   3322 		case PCI_PRODUCT_BROADCOM_BCM5719:
   3323 		case PCI_PRODUCT_BROADCOM_BCM5720:
   3324 			id = pci_conf_read(pa->pa_pc, pa->pa_tag,
   3325 			    BGE_PCI_GEN2_PRODID_ASICREV);
   3326 			break;
   3327 		case PCI_PRODUCT_BROADCOM_BCM57761:
   3328 		case PCI_PRODUCT_BROADCOM_BCM57762:
   3329 		case PCI_PRODUCT_BROADCOM_BCM57765:
   3330 		case PCI_PRODUCT_BROADCOM_BCM57766:
   3331 		case PCI_PRODUCT_BROADCOM_BCM57781:
   3332 		case PCI_PRODUCT_BROADCOM_BCM57782:
   3333 		case PCI_PRODUCT_BROADCOM_BCM57785:
   3334 		case PCI_PRODUCT_BROADCOM_BCM57786:
   3335 		case PCI_PRODUCT_BROADCOM_BCM57791:
   3336 		case PCI_PRODUCT_BROADCOM_BCM57795:
   3337 			id = pci_conf_read(pa->pa_pc, pa->pa_tag,
   3338 			    BGE_PCI_GEN15_PRODID_ASICREV);
   3339 			break;
   3340 		default:
   3341 			id = pci_conf_read(pa->pa_pc, pa->pa_tag,
   3342 			    BGE_PCI_PRODID_ASICREV);
   3343 			break;
   3344 		}
   3345 	}
   3346 
   3347 	return id;
   3348 }
   3349 
   3350 /*
   3351  * Return true if MSI can be used with this device.
   3352  */
   3353 static int
   3354 bge_can_use_msi(struct bge_softc *sc)
   3355 {
   3356 	int can_use_msi = 0;
   3357 
   3358 	switch (BGE_ASICREV(sc->bge_chipid)) {
   3359 	case BGE_ASICREV_BCM5714_A0:
   3360 	case BGE_ASICREV_BCM5714:
   3361 		/*
   3362 		 * Apparently, MSI doesn't work when these chips are
   3363 		 * configured in single-port mode.
   3364 		 */
   3365 		break;
   3366 	case BGE_ASICREV_BCM5750:
   3367 		if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_AX &&
   3368 		    BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_BX)
   3369 			can_use_msi = 1;
   3370 		break;
   3371 	default:
   3372 		if (BGE_IS_575X_PLUS(sc))
   3373 			can_use_msi = 1;
   3374 	}
   3375 	return (can_use_msi);
   3376 }
   3377 
   3378 /*
   3379  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
   3380  * against our list and return its name if we find a match. Note
   3381  * that since the Broadcom controller contains VPD support, we
   3382  * can get the device name string from the controller itself instead
   3383  * of the compiled-in string. This is a little slow, but it guarantees
   3384  * we'll always announce the right product name.
   3385  */
   3386 static int
   3387 bge_probe(device_t parent, cfdata_t match, void *aux)
   3388 {
   3389 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
   3390 
   3391 	if (bge_lookup(pa) != NULL)
   3392 		return 1;
   3393 
   3394 	return 0;
   3395 }
   3396 
   3397 static void
   3398 bge_attach(device_t parent, device_t self, void *aux)
   3399 {
   3400 	struct bge_softc	*sc = device_private(self);
   3401 	struct pci_attach_args	*pa = aux;
   3402 	prop_dictionary_t dict;
   3403 	const struct bge_product *bp;
   3404 	const struct bge_revision *br;
   3405 	pci_chipset_tag_t	pc;
   3406 	const char		*intrstr = NULL;
   3407 	uint32_t 		hwcfg, hwcfg2, hwcfg3, hwcfg4, hwcfg5;
   3408 	uint32_t		command;
   3409 	struct ifnet		*ifp;
   3410 	uint32_t		misccfg, mimode;
   3411 	void *			kva;
   3412 	u_char			eaddr[ETHER_ADDR_LEN];
   3413 	pcireg_t		memtype, subid, reg;
   3414 	bus_addr_t		memaddr;
   3415 	uint32_t		pm_ctl;
   3416 	bool			no_seeprom;
   3417 	int			capmask;
   3418 	int			mii_flags;
   3419 	int			map_flags;
   3420 	char intrbuf[PCI_INTRSTR_LEN];
   3421 
   3422 	bp = bge_lookup(pa);
   3423 	KASSERT(bp != NULL);
   3424 
   3425 	sc->sc_pc = pa->pa_pc;
   3426 	sc->sc_pcitag = pa->pa_tag;
   3427 	sc->bge_dev = self;
   3428 
   3429 	sc->bge_pa = *pa;
   3430 	pc = sc->sc_pc;
   3431 	subid = pci_conf_read(pc, sc->sc_pcitag, PCI_SUBSYS_ID_REG);
   3432 
   3433 	aprint_naive(": Ethernet controller\n");
   3434 	aprint_normal(": %s\n", bp->bp_name);
   3435 
   3436 	/*
   3437 	 * Map control/status registers.
   3438 	 */
   3439 	DPRINTFN(5, ("Map control/status regs\n"));
   3440 	command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
   3441 	command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
   3442 	pci_conf_write(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, command);
   3443 	command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
   3444 
   3445 	if (!(command & PCI_COMMAND_MEM_ENABLE)) {
   3446 		aprint_error_dev(sc->bge_dev,
   3447 		    "failed to enable memory mapping!\n");
   3448 		return;
   3449 	}
   3450 
   3451 	DPRINTFN(5, ("pci_mem_find\n"));
   3452 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_pcitag, BGE_PCI_BAR0);
   3453 	switch (memtype) {
   3454 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   3455 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   3456 #if 0
   3457 		if (pci_mapreg_map(pa, BGE_PCI_BAR0,
   3458 		    memtype, 0, &sc->bge_btag, &sc->bge_bhandle,
   3459 		    &memaddr, &sc->bge_bsize) == 0)
   3460 			break;
   3461 #else
   3462 		/*
   3463 		 * Workaround for PCI prefetchable bit. Some BCM5717-5720 based
   3464 		 * system get NMI on boot (PR#48451). This problem might not be
   3465 		 * the driver's bug but our PCI common part's bug. Until we
   3466 		 * find a real reason, we ignore the prefetchable bit.
   3467 		 */
   3468 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0,
   3469 		    memtype, &memaddr, &sc->bge_bsize, &map_flags) == 0) {
   3470 			map_flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3471 			if (bus_space_map(pa->pa_memt, memaddr, sc->bge_bsize,
   3472 			    map_flags, &sc->bge_bhandle) == 0) {
   3473 				sc->bge_btag = pa->pa_memt;
   3474 				break;
   3475 			}
   3476 		}
   3477 #endif
   3478 	default:
   3479 		aprint_error_dev(sc->bge_dev, "can't find mem space\n");
   3480 		return;
   3481 	}
   3482 
   3483 	/* Save various chip information. */
   3484 	sc->bge_chipid = bge_chipid(pa);
   3485 	sc->bge_phy_addr = bge_phy_addr(sc);
   3486 
   3487 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PCIEXPRESS,
   3488 	    &sc->bge_pciecap, NULL) != 0) {
   3489 		/* PCIe */
   3490 		sc->bge_flags |= BGEF_PCIE;
   3491 		/* Extract supported maximum payload size. */
   3492 		reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3493 		    sc->bge_pciecap + PCIE_DCAP);
   3494 		sc->bge_mps = 128 << (reg & PCIE_DCAP_MAX_PAYLOAD);
   3495 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
   3496 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
   3497 			sc->bge_expmrq = 2048;
   3498 		else
   3499 			sc->bge_expmrq = 4096;
   3500 		bge_set_max_readrq(sc);
   3501 	} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785) {
   3502 		/* PCIe without PCIe cap */
   3503 		sc->bge_flags |= BGEF_PCIE;
   3504 	} else if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE) &
   3505 		BGE_PCISTATE_PCI_BUSMODE) == 0) {
   3506 		/* PCI-X */
   3507 		sc->bge_flags |= BGEF_PCIX;
   3508 		if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX,
   3509 			&sc->bge_pcixcap, NULL) == 0)
   3510 			aprint_error_dev(sc->bge_dev,
   3511 			    "unable to find PCIX capability\n");
   3512 	}
   3513 
   3514 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) {
   3515 		/*
   3516 		 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
   3517 		 * can clobber the chip's PCI config-space power control
   3518 		 * registers, leaving the card in D3 powersave state. We do
   3519 		 * not have memory-mapped registers in this state, so force
   3520 		 * device into D0 state before starting initialization.
   3521 		 */
   3522 		pm_ctl = pci_conf_read(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD);
   3523 		pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
   3524 		pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
   3525 		pci_conf_write(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
   3526 		DELAY(1000);	/* 27 usec is allegedly sufficent */
   3527 	}
   3528 
   3529 	/* Save chipset family. */
   3530 	switch (BGE_ASICREV(sc->bge_chipid)) {
   3531 	case BGE_ASICREV_BCM5717:
   3532 	case BGE_ASICREV_BCM5719:
   3533 	case BGE_ASICREV_BCM5720:
   3534 		sc->bge_flags |= BGEF_5717_PLUS;
   3535 		/* FALLTHROUGH */
   3536 	case BGE_ASICREV_BCM57765:
   3537 	case BGE_ASICREV_BCM57766:
   3538 		if (!BGE_IS_5717_PLUS(sc))
   3539 			sc->bge_flags |= BGEF_57765_FAMILY;
   3540 		sc->bge_flags |= BGEF_57765_PLUS | BGEF_5755_PLUS |
   3541 		    BGEF_575X_PLUS | BGEF_5705_PLUS | BGEF_JUMBO_CAPABLE;
   3542 		/* Jumbo frame on BCM5719 A0 does not work. */
   3543 		if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) &&
   3544 		    (sc->bge_chipid == BGE_CHIPID_BCM5719_A0))
   3545 			sc->bge_flags &= ~BGEF_JUMBO_CAPABLE;
   3546 		break;
   3547 	case BGE_ASICREV_BCM5755:
   3548 	case BGE_ASICREV_BCM5761:
   3549 	case BGE_ASICREV_BCM5784:
   3550 	case BGE_ASICREV_BCM5785:
   3551 	case BGE_ASICREV_BCM5787:
   3552 	case BGE_ASICREV_BCM57780:
   3553 		sc->bge_flags |= BGEF_5755_PLUS | BGEF_575X_PLUS | BGEF_5705_PLUS;
   3554 		break;
   3555 	case BGE_ASICREV_BCM5700:
   3556 	case BGE_ASICREV_BCM5701:
   3557 	case BGE_ASICREV_BCM5703:
   3558 	case BGE_ASICREV_BCM5704:
   3559 		sc->bge_flags |= BGEF_5700_FAMILY | BGEF_JUMBO_CAPABLE;
   3560 		break;
   3561 	case BGE_ASICREV_BCM5714_A0:
   3562 	case BGE_ASICREV_BCM5780:
   3563 	case BGE_ASICREV_BCM5714:
   3564 		sc->bge_flags |= BGEF_5714_FAMILY | BGEF_JUMBO_CAPABLE;
   3565 		/* FALLTHROUGH */
   3566 	case BGE_ASICREV_BCM5750:
   3567 	case BGE_ASICREV_BCM5752:
   3568 	case BGE_ASICREV_BCM5906:
   3569 		sc->bge_flags |= BGEF_575X_PLUS;
   3570 		/* FALLTHROUGH */
   3571 	case BGE_ASICREV_BCM5705:
   3572 		sc->bge_flags |= BGEF_5705_PLUS;
   3573 		break;
   3574 	}
   3575 
   3576 	/* Identify chips with APE processor. */
   3577 	switch (BGE_ASICREV(sc->bge_chipid)) {
   3578 	case BGE_ASICREV_BCM5717:
   3579 	case BGE_ASICREV_BCM5719:
   3580 	case BGE_ASICREV_BCM5720:
   3581 	case BGE_ASICREV_BCM5761:
   3582 		sc->bge_flags |= BGEF_APE;
   3583 		break;
   3584 	}
   3585 
   3586 	/*
   3587 	 * The 40bit DMA bug applies to the 5714/5715 controllers and is
   3588 	 * not actually a MAC controller bug but an issue with the embedded
   3589 	 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
   3590 	 */
   3591 	if (BGE_IS_5714_FAMILY(sc) && ((sc->bge_flags & BGEF_PCIX) != 0))
   3592 		sc->bge_flags |= BGEF_40BIT_BUG;
   3593 
   3594 	/* Chips with APE need BAR2 access for APE registers/memory. */
   3595 	if ((sc->bge_flags & BGEF_APE) != 0) {
   3596 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2);
   3597 #if 0
   3598 		if (pci_mapreg_map(pa, BGE_PCI_BAR2, memtype, 0,
   3599 			&sc->bge_apetag, &sc->bge_apehandle, NULL,
   3600 			&sc->bge_apesize)) {
   3601 			aprint_error_dev(sc->bge_dev,
   3602 			    "couldn't map BAR2 memory\n");
   3603 			return;
   3604 		}
   3605 #else
   3606 		/*
   3607 		 * Workaround for PCI prefetchable bit. Some BCM5717-5720 based
   3608 		 * system get NMI on boot (PR#48451). This problem might not be
   3609 		 * the driver's bug but our PCI common part's bug. Until we
   3610 		 * find a real reason, we ignore the prefetchable bit.
   3611 		 */
   3612 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2,
   3613 		    memtype, &memaddr, &sc->bge_apesize, &map_flags) != 0) {
   3614 			aprint_error_dev(sc->bge_dev,
   3615 			    "couldn't map BAR2 memory\n");
   3616 			return;
   3617 		}
   3618 
   3619 		map_flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3620 		if (bus_space_map(pa->pa_memt, memaddr,
   3621 		    sc->bge_apesize, map_flags, &sc->bge_apehandle) != 0) {
   3622 			aprint_error_dev(sc->bge_dev,
   3623 			    "couldn't map BAR2 memory\n");
   3624 			return;
   3625 		}
   3626 		sc->bge_apetag = pa->pa_memt;
   3627 #endif
   3628 
   3629 		/* Enable APE register/memory access by host driver. */
   3630 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
   3631 		reg |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
   3632 		    BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
   3633 		    BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
   3634 		pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE, reg);
   3635 
   3636 		bge_ape_lock_init(sc);
   3637 		bge_ape_read_fw_ver(sc);
   3638 	}
   3639 
   3640 	/* Identify the chips that use an CPMU. */
   3641 	if (BGE_IS_5717_PLUS(sc) ||
   3642 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
   3643 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
   3644 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 ||
   3645 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780)
   3646 		sc->bge_flags |= BGEF_CPMU_PRESENT;
   3647 
   3648 	/* Set MI_MODE */
   3649 	mimode = BGE_MIMODE_PHYADDR(sc->bge_phy_addr);
   3650 	if ((sc->bge_flags & BGEF_CPMU_PRESENT) != 0)
   3651 		mimode |= BGE_MIMODE_500KHZ_CONST;
   3652 	else
   3653 		mimode |= BGE_MIMODE_BASE;
   3654 	CSR_WRITE_4(sc, BGE_MI_MODE, mimode);
   3655 
   3656 	/*
   3657 	 * When using the BCM5701 in PCI-X mode, data corruption has
   3658 	 * been observed in the first few bytes of some received packets.
   3659 	 * Aligning the packet buffer in memory eliminates the corruption.
   3660 	 * Unfortunately, this misaligns the packet payloads.  On platforms
   3661 	 * which do not support unaligned accesses, we will realign the
   3662 	 * payloads by copying the received packets.
   3663 	 */
   3664 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
   3665 	    sc->bge_flags & BGEF_PCIX)
   3666 		sc->bge_flags |= BGEF_RX_ALIGNBUG;
   3667 
   3668 	if (BGE_IS_5700_FAMILY(sc))
   3669 		sc->bge_flags |= BGEF_JUMBO_CAPABLE;
   3670 
   3671 	misccfg = CSR_READ_4(sc, BGE_MISC_CFG);
   3672 	misccfg &= BGE_MISCCFG_BOARD_ID_MASK;
   3673 
   3674 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
   3675 	    (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
   3676 	     misccfg == BGE_MISCCFG_BOARD_ID_5788M))
   3677 		sc->bge_flags |= BGEF_IS_5788;
   3678 
   3679 	/*
   3680 	 * Some controllers seem to require a special firmware to use
   3681 	 * TSO. But the firmware is not available to FreeBSD and Linux
   3682 	 * claims that the TSO performed by the firmware is slower than
   3683 	 * hardware based TSO. Moreover the firmware based TSO has one
   3684 	 * known bug which can't handle TSO if ethernet header + IP/TCP
   3685 	 * header is greater than 80 bytes. The workaround for the TSO
   3686 	 * bug exist but it seems it's too expensive than not using
   3687 	 * TSO at all. Some hardwares also have the TSO bug so limit
   3688 	 * the TSO to the controllers that are not affected TSO issues
   3689 	 * (e.g. 5755 or higher).
   3690 	 */
   3691 	if (BGE_IS_5755_PLUS(sc)) {
   3692 		/*
   3693 		 * BCM5754 and BCM5787 shares the same ASIC id so
   3694 		 * explicit device id check is required.
   3695 		 */
   3696 		if ((PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754) &&
   3697 		    (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754M))
   3698 			sc->bge_flags |= BGEF_TSO;
   3699 		/* TSO on BCM5719 A0 does not work. */
   3700 		if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) &&
   3701 		    (sc->bge_chipid == BGE_CHIPID_BCM5719_A0))
   3702 			sc->bge_flags &= ~BGEF_TSO;
   3703 	}
   3704 
   3705 	capmask = 0xffffffff; /* XXX BMSR_DEFCAPMASK */
   3706 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 &&
   3707 	     (misccfg == 0x4000 || misccfg == 0x8000)) ||
   3708 	    (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
   3709 	     PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
   3710 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 ||
   3711 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
   3712 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
   3713 	    (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
   3714 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F ||
   3715 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F ||
   3716 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
   3717 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 ||
   3718 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 ||
   3719 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795 ||
   3720 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
   3721 		/* These chips are 10/100 only. */
   3722 		capmask &= ~BMSR_EXTSTAT;
   3723 		sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED;
   3724 	}
   3725 
   3726 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
   3727 	    (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
   3728 	     (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
   3729 		 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)))
   3730 		sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED;
   3731 
   3732 	/* Set various PHY bug flags. */
   3733 	if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
   3734 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
   3735 		sc->bge_phy_flags |= BGEPHYF_CRC_BUG;
   3736 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX ||
   3737 	    BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX)
   3738 		sc->bge_phy_flags |= BGEPHYF_ADC_BUG;
   3739 	if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
   3740 		sc->bge_phy_flags |= BGEPHYF_5704_A0_BUG;
   3741 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
   3742 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) &&
   3743 	    PCI_VENDOR(subid) == PCI_VENDOR_DELL)
   3744 		sc->bge_phy_flags |= BGEPHYF_NO_3LED;
   3745 	if (BGE_IS_5705_PLUS(sc) &&
   3746 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 &&
   3747 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 &&
   3748 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780 &&
   3749 	    !BGE_IS_57765_PLUS(sc)) {
   3750 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
   3751 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 ||
   3752 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 ||
   3753 		    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) {
   3754 			if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 &&
   3755 			    PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756)
   3756 				sc->bge_phy_flags |= BGEPHYF_JITTER_BUG;
   3757 			if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M)
   3758 				sc->bge_phy_flags |= BGEPHYF_ADJUST_TRIM;
   3759 		} else
   3760 			sc->bge_phy_flags |= BGEPHYF_BER_BUG;
   3761 	}
   3762 
   3763 	/*
   3764 	 * SEEPROM check.
   3765 	 * First check if firmware knows we do not have SEEPROM.
   3766 	 */
   3767 	if (prop_dictionary_get_bool(device_properties(self),
   3768 	     "without-seeprom", &no_seeprom) && no_seeprom)
   3769 	 	sc->bge_flags |= BGEF_NO_EEPROM;
   3770 
   3771 	else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
   3772 		sc->bge_flags |= BGEF_NO_EEPROM;
   3773 
   3774 	/* Now check the 'ROM failed' bit on the RX CPU */
   3775 	else if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL)
   3776 		sc->bge_flags |= BGEF_NO_EEPROM;
   3777 
   3778 	sc->bge_asf_mode = 0;
   3779 	/* No ASF if APE present. */
   3780 	if ((sc->bge_flags & BGEF_APE) == 0) {
   3781 		if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
   3782 			BGE_SRAM_DATA_SIG_MAGIC)) {
   3783 			if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG) &
   3784 			    BGE_HWCFG_ASF) {
   3785 				sc->bge_asf_mode |= ASF_ENABLE;
   3786 				sc->bge_asf_mode |= ASF_STACKUP;
   3787 				if (BGE_IS_575X_PLUS(sc))
   3788 					sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
   3789 			}
   3790 		}
   3791 	}
   3792 
   3793 	int counts[PCI_INTR_TYPE_SIZE] = {
   3794 		[PCI_INTR_TYPE_INTX] = 1,
   3795 		[PCI_INTR_TYPE_MSI] = 1,
   3796 		[PCI_INTR_TYPE_MSIX] = 1,
   3797 	};
   3798 	int max_type = PCI_INTR_TYPE_MSIX;
   3799 
   3800 	if (!bge_can_use_msi(sc)) {
   3801 		/* MSI broken, allow only INTx */
   3802 		max_type = PCI_INTR_TYPE_INTX;
   3803 	}
   3804 
   3805 	if (pci_intr_alloc(pa, &sc->bge_pihp, counts, max_type) != 0) {
   3806 		aprint_error_dev(sc->bge_dev, "couldn't alloc interrupt\n");
   3807 		return;
   3808 	}
   3809 
   3810 	DPRINTFN(5, ("pci_intr_string\n"));
   3811 	intrstr = pci_intr_string(pc, sc->bge_pihp[0], intrbuf,
   3812 	    sizeof(intrbuf));
   3813 	DPRINTFN(5, ("pci_intr_establish\n"));
   3814 	sc->bge_intrhand = pci_intr_establish_xname(pc, sc->bge_pihp[0],
   3815 	    IPL_NET, bge_intr, sc, device_xname(sc->bge_dev));
   3816 	if (sc->bge_intrhand == NULL) {
   3817 		pci_intr_release(pc, sc->bge_pihp, 1);
   3818 		sc->bge_pihp = NULL;
   3819 
   3820 		aprint_error_dev(self, "couldn't establish interrupt");
   3821 		if (intrstr != NULL)
   3822 			aprint_error(" at %s", intrstr);
   3823 		aprint_error("\n");
   3824 		return;
   3825 	}
   3826 	aprint_normal_dev(sc->bge_dev, "interrupting at %s\n", intrstr);
   3827 
   3828 	switch (pci_intr_type(pc, sc->bge_pihp[0])) {
   3829 	case PCI_INTR_TYPE_MSIX:
   3830 	case PCI_INTR_TYPE_MSI:
   3831 		KASSERT(bge_can_use_msi(sc));
   3832 		sc->bge_flags |= BGEF_MSI;
   3833 		break;
   3834 	default:
   3835 		/* nothing to do */
   3836 		break;
   3837 	}
   3838 
   3839 	/*
   3840 	 * All controllers except BCM5700 supports tagged status but
   3841 	 * we use tagged status only for MSI case on BCM5717. Otherwise
   3842 	 * MSI on BCM5717 does not work.
   3843 	 */
   3844 	if (BGE_IS_57765_PLUS(sc) && sc->bge_flags & BGEF_MSI)
   3845 		sc->bge_flags |= BGEF_TAGGED_STATUS;
   3846 
   3847 	/*
   3848 	 * Reset NVRAM before bge_reset(). It's required to acquire NVRAM
   3849 	 * lock in bge_reset().
   3850 	 */
   3851 	CSR_WRITE_4(sc, BGE_EE_ADDR,
   3852 	    BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
   3853 	delay(1000);
   3854 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
   3855 
   3856 	bge_stop_fw(sc);
   3857 	bge_sig_pre_reset(sc, BGE_RESET_START);
   3858 	if (bge_reset(sc))
   3859 		aprint_error_dev(sc->bge_dev, "chip reset failed\n");
   3860 
   3861 	/*
   3862 	 * Read the hardware config word in the first 32k of NIC internal
   3863 	 * memory, or fall back to the config word in the EEPROM.
   3864 	 * Note: on some BCM5700 cards, this value appears to be unset.
   3865 	 */
   3866 	hwcfg = hwcfg2 = hwcfg3 = hwcfg4 = hwcfg5 = 0;
   3867 	if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
   3868 	    BGE_SRAM_DATA_SIG_MAGIC) {
   3869 		uint32_t tmp;
   3870 
   3871 		hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG);
   3872 		tmp = bge_readmem_ind(sc, BGE_SRAM_DATA_VER) >>
   3873 		    BGE_SRAM_DATA_VER_SHIFT;
   3874 		if ((0 < tmp) && (tmp < 0x100))
   3875 			hwcfg2 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_2);
   3876 		if (sc->bge_flags & BGEF_PCIE)
   3877 			hwcfg3 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_3);
   3878 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785)
   3879 			hwcfg4 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_4);
   3880 		if (BGE_IS_5717_PLUS(sc))
   3881 			hwcfg5 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_5);
   3882 	} else if (!(sc->bge_flags & BGEF_NO_EEPROM)) {
   3883 		bge_read_eeprom(sc, (void *)&hwcfg,
   3884 		    BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
   3885 		hwcfg = be32toh(hwcfg);
   3886 	}
   3887 	aprint_normal_dev(sc->bge_dev,
   3888 	    "HW config %08x, %08x, %08x, %08x %08x\n",
   3889 	    hwcfg, hwcfg2, hwcfg3, hwcfg4, hwcfg5);
   3890 
   3891 	bge_sig_legacy(sc, BGE_RESET_START);
   3892 	bge_sig_post_reset(sc, BGE_RESET_START);
   3893 
   3894 	if (bge_chipinit(sc)) {
   3895 		aprint_error_dev(sc->bge_dev, "chip initialization failed\n");
   3896 		bge_release_resources(sc);
   3897 		return;
   3898 	}
   3899 
   3900 	/*
   3901 	 * Get station address from the EEPROM.
   3902 	 */
   3903 	if (bge_get_eaddr(sc, eaddr)) {
   3904 		aprint_error_dev(sc->bge_dev,
   3905 		    "failed to read station address\n");
   3906 		bge_release_resources(sc);
   3907 		return;
   3908 	}
   3909 
   3910 	br = bge_lookup_rev(sc->bge_chipid);
   3911 
   3912 	if (br == NULL) {
   3913 		aprint_normal_dev(sc->bge_dev, "unknown ASIC (0x%x)",
   3914 		    sc->bge_chipid);
   3915 	} else {
   3916 		aprint_normal_dev(sc->bge_dev, "ASIC %s (0x%x)",
   3917 		    br->br_name, sc->bge_chipid);
   3918 	}
   3919 	aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr));
   3920 
   3921 	/* Allocate the general information block and ring buffers. */
   3922 	if (pci_dma64_available(pa)) {
   3923 		sc->bge_dmatag = pa->pa_dmat64;
   3924 		sc->bge_dmatag32 = pa->pa_dmat;
   3925 		sc->bge_dma64 = true;
   3926 	} else {
   3927 		sc->bge_dmatag = pa->pa_dmat;
   3928 		sc->bge_dmatag32 = pa->pa_dmat;
   3929 		sc->bge_dma64 = false;
   3930 	}
   3931 
   3932 	/* 40bit DMA workaround */
   3933 	if (sizeof(bus_addr_t) > 4) {
   3934 		if ((sc->bge_flags & BGEF_40BIT_BUG) != 0) {
   3935 			bus_dma_tag_t olddmatag = sc->bge_dmatag; /* save */
   3936 
   3937 			if (bus_dmatag_subregion(olddmatag, 0,
   3938 				(bus_addr_t)(1ULL << 40), &(sc->bge_dmatag),
   3939 				BUS_DMA_NOWAIT) != 0) {
   3940 				aprint_error_dev(self,
   3941 				    "WARNING: failed to restrict dma range,"
   3942 				    " falling back to parent bus dma range\n");
   3943 				sc->bge_dmatag = olddmatag;
   3944 			}
   3945 		}
   3946 	}
   3947 	SLIST_INIT(&sc->txdma_list);
   3948 	DPRINTFN(5, ("bus_dmamem_alloc\n"));
   3949 	if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
   3950 			     PAGE_SIZE, 0, &sc->bge_ring_seg, 1,
   3951 		&sc->bge_ring_rseg, BUS_DMA_NOWAIT)) {
   3952 		aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n");
   3953 		return;
   3954 	}
   3955 	DPRINTFN(5, ("bus_dmamem_map\n"));
   3956 	if (bus_dmamem_map(sc->bge_dmatag, &sc->bge_ring_seg,
   3957 		sc->bge_ring_rseg, sizeof(struct bge_ring_data), &kva,
   3958 			   BUS_DMA_NOWAIT)) {
   3959 		aprint_error_dev(sc->bge_dev,
   3960 		    "can't map DMA buffers (%zu bytes)\n",
   3961 		    sizeof(struct bge_ring_data));
   3962 		bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg,
   3963 		    sc->bge_ring_rseg);
   3964 		return;
   3965 	}
   3966 	DPRINTFN(5, ("bus_dmamem_create\n"));
   3967 	if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
   3968 	    sizeof(struct bge_ring_data), 0,
   3969 	    BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
   3970 		aprint_error_dev(sc->bge_dev, "can't create DMA map\n");
   3971 		bus_dmamem_unmap(sc->bge_dmatag, kva,
   3972 				 sizeof(struct bge_ring_data));
   3973 		bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg,
   3974 		    sc->bge_ring_rseg);
   3975 		return;
   3976 	}
   3977 	DPRINTFN(5, ("bus_dmamem_load\n"));
   3978 	if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
   3979 			    sizeof(struct bge_ring_data), NULL,
   3980 			    BUS_DMA_NOWAIT)) {
   3981 		bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
   3982 		bus_dmamem_unmap(sc->bge_dmatag, kva,
   3983 				 sizeof(struct bge_ring_data));
   3984 		bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg,
   3985 		    sc->bge_ring_rseg);
   3986 		return;
   3987 	}
   3988 
   3989 	DPRINTFN(5, ("bzero\n"));
   3990 	sc->bge_rdata = (struct bge_ring_data *)kva;
   3991 
   3992 	memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data));
   3993 
   3994 	/* Try to allocate memory for jumbo buffers. */
   3995 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
   3996 		if (bge_alloc_jumbo_mem(sc)) {
   3997 			aprint_error_dev(sc->bge_dev,
   3998 			    "jumbo buffer allocation failed\n");
   3999 		} else
   4000 			sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   4001 	}
   4002 
   4003 	/* Set default tuneable values. */
   4004 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
   4005 	sc->bge_rx_coal_ticks = 150;
   4006 	sc->bge_rx_max_coal_bds = 64;
   4007 	sc->bge_tx_coal_ticks = 300;
   4008 	sc->bge_tx_max_coal_bds = 400;
   4009 	if (BGE_IS_5705_PLUS(sc)) {
   4010 		sc->bge_tx_coal_ticks = (12 * 5);
   4011 		sc->bge_tx_max_coal_bds = (12 * 5);
   4012 			aprint_verbose_dev(sc->bge_dev,
   4013 			    "setting short Tx thresholds\n");
   4014 	}
   4015 
   4016 	if (BGE_IS_5717_PLUS(sc))
   4017 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
   4018 	else if (BGE_IS_5705_PLUS(sc))
   4019 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
   4020 	else
   4021 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
   4022 
   4023 	/* Set up ifnet structure */
   4024 	ifp = &sc->ethercom.ec_if;
   4025 	ifp->if_softc = sc;
   4026 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   4027 	ifp->if_ioctl = bge_ioctl;
   4028 	ifp->if_stop = bge_stop;
   4029 	ifp->if_start = bge_start;
   4030 	ifp->if_init = bge_init;
   4031 	ifp->if_watchdog = bge_watchdog;
   4032 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(BGE_TX_RING_CNT - 1, IFQ_MAXLEN));
   4033 	IFQ_SET_READY(&ifp->if_snd);
   4034 	DPRINTFN(5, ("strcpy if_xname\n"));
   4035 	strcpy(ifp->if_xname, device_xname(sc->bge_dev));
   4036 
   4037 	if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0)
   4038 		sc->ethercom.ec_if.if_capabilities |=
   4039 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
   4040 #if 1	/* XXX TCP/UDP checksum offload breaks with pf(4) */
   4041 		sc->ethercom.ec_if.if_capabilities |=
   4042 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   4043 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
   4044 #endif
   4045 	sc->ethercom.ec_capabilities |=
   4046 	    ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
   4047 
   4048 	if (sc->bge_flags & BGEF_TSO)
   4049 		sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4;
   4050 
   4051 	/*
   4052 	 * Do MII setup.
   4053 	 */
   4054 	DPRINTFN(5, ("mii setup\n"));
   4055 	sc->bge_mii.mii_ifp = ifp;
   4056 	sc->bge_mii.mii_readreg = bge_miibus_readreg;
   4057 	sc->bge_mii.mii_writereg = bge_miibus_writereg;
   4058 	sc->bge_mii.mii_statchg = bge_miibus_statchg;
   4059 
   4060 	/*
   4061 	 * Figure out what sort of media we have by checking the hardware
   4062 	 * config word.  Note: on some BCM5700 cards, this value appears to be
   4063 	 * unset. If that's the case, we have to rely on identifying the NIC
   4064 	 * by its PCI subsystem ID, as we do below for the SysKonnect SK-9D41.
   4065 	 * The SysKonnect SK-9D41 is a 1000baseSX card.
   4066 	 */
   4067 	if (PCI_PRODUCT(pa->pa_id) == SK_SUBSYSID_9D41 ||
   4068 	    (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
   4069 		if (BGE_IS_5705_PLUS(sc)) {
   4070 			sc->bge_flags |= BGEF_FIBER_MII;
   4071 			sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED;
   4072 		} else
   4073 			sc->bge_flags |= BGEF_FIBER_TBI;
   4074 	}
   4075 
   4076 	/* Set bge_phy_flags before prop_dictionary_set_uint32() */
   4077 	if (BGE_IS_JUMBO_CAPABLE(sc))
   4078 		sc->bge_phy_flags |= BGEPHYF_JUMBO_CAPABLE;
   4079 
   4080 	/* set phyflags and chipid before mii_attach() */
   4081 	dict = device_properties(self);
   4082 	prop_dictionary_set_uint32(dict, "phyflags", sc->bge_phy_flags);
   4083 	prop_dictionary_set_uint32(dict, "chipid", sc->bge_chipid);
   4084 
   4085 	if (sc->bge_flags & BGEF_FIBER_TBI) {
   4086 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
   4087 		    bge_ifmedia_sts);
   4088 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER |IFM_1000_SX, 0, NULL);
   4089 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX|IFM_FDX,
   4090 			    0, NULL);
   4091 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
   4092 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
   4093 		/* Pretend the user requested this setting */
   4094 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
   4095 	} else {
   4096 		/*
   4097 		 * Do transceiver setup and tell the firmware the
   4098 		 * driver is down so we can try to get access the
   4099 		 * probe if ASF is running.  Retry a couple of times
   4100 		 * if we get a conflict with the ASF firmware accessing
   4101 		 * the PHY.
   4102 		 */
   4103 		BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
   4104 		bge_asf_driver_up(sc);
   4105 
   4106 		ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
   4107 			     bge_ifmedia_sts);
   4108 		mii_flags = MIIF_DOPAUSE;
   4109 		if (sc->bge_flags & BGEF_FIBER_MII)
   4110 			mii_flags |= MIIF_HAVEFIBER;
   4111 		mii_attach(sc->bge_dev, &sc->bge_mii, capmask, sc->bge_phy_addr,
   4112 		    MII_OFFSET_ANY, mii_flags);
   4113 
   4114 		if (LIST_EMPTY(&sc->bge_mii.mii_phys)) {
   4115 			aprint_error_dev(sc->bge_dev, "no PHY found!\n");
   4116 			ifmedia_add(&sc->bge_mii.mii_media,
   4117 				    IFM_ETHER|IFM_MANUAL, 0, NULL);
   4118 			ifmedia_set(&sc->bge_mii.mii_media,
   4119 				    IFM_ETHER|IFM_MANUAL);
   4120 		} else
   4121 			ifmedia_set(&sc->bge_mii.mii_media,
   4122 				    IFM_ETHER|IFM_AUTO);
   4123 
   4124 		/*
   4125 		 * Now tell the firmware we are going up after probing the PHY
   4126 		 */
   4127 		if (sc->bge_asf_mode & ASF_STACKUP)
   4128 			BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
   4129 	}
   4130 
   4131 	/*
   4132 	 * Call MI attach routine.
   4133 	 */
   4134 	DPRINTFN(5, ("if_attach\n"));
   4135 	if_attach(ifp);
   4136 	if_deferred_start_init(ifp, NULL);
   4137 	DPRINTFN(5, ("ether_ifattach\n"));
   4138 	ether_ifattach(ifp, eaddr);
   4139 	ether_set_ifflags_cb(&sc->ethercom, bge_ifflags_cb);
   4140 	rnd_attach_source(&sc->rnd_source, device_xname(sc->bge_dev),
   4141 		RND_TYPE_NET, RND_FLAG_DEFAULT);
   4142 #ifdef BGE_EVENT_COUNTERS
   4143 	/*
   4144 	 * Attach event counters.
   4145 	 */
   4146 	evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR,
   4147 	    NULL, device_xname(sc->bge_dev), "intr");
   4148 	evcnt_attach_dynamic(&sc->bge_ev_intr_spurious, EVCNT_TYPE_INTR,
   4149 	    NULL, device_xname(sc->bge_dev), "intr_spurious");
   4150 	evcnt_attach_dynamic(&sc->bge_ev_intr_spurious2, EVCNT_TYPE_INTR,
   4151 	    NULL, device_xname(sc->bge_dev), "intr_spurious2");
   4152 	evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC,
   4153 	    NULL, device_xname(sc->bge_dev), "tx_xoff");
   4154 	evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC,
   4155 	    NULL, device_xname(sc->bge_dev), "tx_xon");
   4156 	evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC,
   4157 	    NULL, device_xname(sc->bge_dev), "rx_xoff");
   4158 	evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC,
   4159 	    NULL, device_xname(sc->bge_dev), "rx_xon");
   4160 	evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC,
   4161 	    NULL, device_xname(sc->bge_dev), "rx_macctl");
   4162 	evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC,
   4163 	    NULL, device_xname(sc->bge_dev), "xoffentered");
   4164 #endif /* BGE_EVENT_COUNTERS */
   4165 	DPRINTFN(5, ("callout_init\n"));
   4166 	callout_init(&sc->bge_timeout, 0);
   4167 
   4168 	if (pmf_device_register(self, NULL, NULL))
   4169 		pmf_class_network_register(self, ifp);
   4170 	else
   4171 		aprint_error_dev(self, "couldn't establish power handler\n");
   4172 
   4173 	bge_sysctl_init(sc);
   4174 
   4175 #ifdef BGE_DEBUG
   4176 	bge_debug_info(sc);
   4177 #endif
   4178 }
   4179 
   4180 /*
   4181  * Stop all chip I/O so that the kernel's probe routines don't
   4182  * get confused by errant DMAs when rebooting.
   4183  */
   4184 static int
   4185 bge_detach(device_t self, int flags __unused)
   4186 {
   4187 	struct bge_softc *sc = device_private(self);
   4188 	struct ifnet *ifp = &sc->ethercom.ec_if;
   4189 	int s;
   4190 
   4191 	s = splnet();
   4192 	/* Stop the interface. Callouts are stopped in it. */
   4193 	bge_stop(ifp, 1);
   4194 	splx(s);
   4195 
   4196 	mii_detach(&sc->bge_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   4197 
   4198 	/* Delete all remaining media. */
   4199 	ifmedia_delete_instance(&sc->bge_mii.mii_media, IFM_INST_ANY);
   4200 
   4201 	ether_ifdetach(ifp);
   4202 	if_detach(ifp);
   4203 
   4204 	bge_release_resources(sc);
   4205 
   4206 	return 0;
   4207 }
   4208 
   4209 static void
   4210 bge_release_resources(struct bge_softc *sc)
   4211 {
   4212 
   4213 	/* Detach sysctl */
   4214 	if (sc->bge_log != NULL)
   4215 		sysctl_teardown(&sc->bge_log);
   4216 
   4217 #ifdef BGE_EVENT_COUNTERS
   4218 	/* Detach event counters. */
   4219 	evcnt_detach(&sc->bge_ev_intr);
   4220 	evcnt_detach(&sc->bge_ev_intr_spurious);
   4221 	evcnt_detach(&sc->bge_ev_intr_spurious2);
   4222 	evcnt_detach(&sc->bge_ev_tx_xoff);
   4223 	evcnt_detach(&sc->bge_ev_tx_xon);
   4224 	evcnt_detach(&sc->bge_ev_rx_xoff);
   4225 	evcnt_detach(&sc->bge_ev_rx_xon);
   4226 	evcnt_detach(&sc->bge_ev_rx_macctl);
   4227 	evcnt_detach(&sc->bge_ev_xoffentered);
   4228 #endif /* BGE_EVENT_COUNTERS */
   4229 
   4230 	/* Disestablish the interrupt handler */
   4231 	if (sc->bge_intrhand != NULL) {
   4232 		pci_intr_disestablish(sc->sc_pc, sc->bge_intrhand);
   4233 		pci_intr_release(sc->sc_pc, sc->bge_pihp, 1);
   4234 		sc->bge_intrhand = NULL;
   4235 	}
   4236 
   4237 	if (sc->bge_dmatag != NULL) {
   4238 		bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map);
   4239 		bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
   4240 		bus_dmamem_unmap(sc->bge_dmatag, (void *)sc->bge_rdata,
   4241 		    sizeof(struct bge_ring_data));
   4242 		bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg,
   4243 		    sc->bge_ring_rseg);
   4244 	}
   4245 
   4246 	/* Unmap the device registers */
   4247 	if (sc->bge_bsize != 0) {
   4248 		bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize);
   4249 		sc->bge_bsize = 0;
   4250 	}
   4251 
   4252 	/* Unmap the APE registers */
   4253 	if (sc->bge_apesize != 0) {
   4254 		bus_space_unmap(sc->bge_apetag, sc->bge_apehandle,
   4255 		    sc->bge_apesize);
   4256 		sc->bge_apesize = 0;
   4257 	}
   4258 }
   4259 
   4260 static int
   4261 bge_reset(struct bge_softc *sc)
   4262 {
   4263 	uint32_t cachesize, command;
   4264 	uint32_t reset, mac_mode, mac_mode_mask;
   4265 	pcireg_t devctl, reg;
   4266 	int i, val;
   4267 	void (*write_op)(struct bge_softc *, int, int);
   4268 
   4269 	/* Make mask for BGE_MAC_MODE register. */
   4270 	mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE;
   4271 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
   4272 		mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
   4273 	/* Keep mac_mode_mask's bits of BGE_MAC_MODE register into mac_mode */
   4274 	mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask;
   4275 
   4276 	if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
   4277 	    (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)) {
   4278 	    	if (sc->bge_flags & BGEF_PCIE)
   4279 			write_op = bge_writemem_direct;
   4280 		else
   4281 			write_op = bge_writemem_ind;
   4282 	} else
   4283 		write_op = bge_writereg_ind;
   4284 
   4285 	/* 57XX step 4 */
   4286 	/* Acquire the NVM lock */
   4287 	if ((sc->bge_flags & BGEF_NO_EEPROM) == 0 &&
   4288 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5700 &&
   4289 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5701) {
   4290 		CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
   4291 		for (i = 0; i < 8000; i++) {
   4292 			if (CSR_READ_4(sc, BGE_NVRAM_SWARB) &
   4293 			    BGE_NVRAMSWARB_GNT1)
   4294 				break;
   4295 			DELAY(20);
   4296 		}
   4297 		if (i == 8000) {
   4298 			printf("%s: NVRAM lock timedout!\n",
   4299 			    device_xname(sc->bge_dev));
   4300 		}
   4301 	}
   4302 
   4303 	/* Take APE lock when performing reset. */
   4304 	bge_ape_lock(sc, BGE_APE_LOCK_GRC);
   4305 
   4306 	/* 57XX step 3 */
   4307 	/* Save some important PCI state. */
   4308 	cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ);
   4309 	/* 5718 reset step 3 */
   4310 	command = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD);
   4311 
   4312 	/* 5718 reset step 5, 57XX step 5b-5d */
   4313 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
   4314 	    BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
   4315 	    BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW);
   4316 
   4317 	/* XXX ???: Disable fastboot on controllers that support it. */
   4318 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
   4319 	    BGE_IS_5755_PLUS(sc))
   4320 		CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0);
   4321 
   4322 	/* 5718 reset step 2, 57XX step 6 */
   4323 	/*
   4324 	 * Write the magic number to SRAM at offset 0xB50.
   4325 	 * When firmware finishes its initialization it will
   4326 	 * write ~BGE_MAGIC_NUMBER to the same location.
   4327 	 */
   4328 	bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
   4329 
   4330 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) {
   4331 		val = CSR_READ_4(sc, BGE_PCIE_LINKCTL);
   4332 		val = (val & ~BGE_PCIE_LINKCTL_L1_PLL_PDEN)
   4333 		    | BGE_PCIE_LINKCTL_L1_PLL_PDDIS;
   4334 		CSR_WRITE_4(sc, BGE_PCIE_LINKCTL, val);
   4335 	}
   4336 
   4337 	/* 5718 reset step 6, 57XX step 7 */
   4338 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
   4339 	/*
   4340 	 * XXX: from FreeBSD/Linux; no documentation
   4341 	 */
   4342 	if (sc->bge_flags & BGEF_PCIE) {
   4343 		if ((BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) &&
   4344 		    !BGE_IS_57765_PLUS(sc) &&
   4345 		    (CSR_READ_4(sc, BGE_PHY_TEST_CTRL_REG) ==
   4346 			(BGE_PHY_PCIE_LTASS_MODE | BGE_PHY_PCIE_SCRAM_MODE))) {
   4347 			/* PCI Express 1.0 system */
   4348 			CSR_WRITE_4(sc, BGE_PHY_TEST_CTRL_REG,
   4349 			    BGE_PHY_PCIE_SCRAM_MODE);
   4350 		}
   4351 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
   4352 			/*
   4353 			 * Prevent PCI Express link training
   4354 			 * during global reset.
   4355 			 */
   4356 			CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
   4357 			reset |= (1 << 29);
   4358 		}
   4359 	}
   4360 
   4361 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
   4362 		i = CSR_READ_4(sc, BGE_VCPU_STATUS);
   4363 		CSR_WRITE_4(sc, BGE_VCPU_STATUS,
   4364 		    i | BGE_VCPU_STATUS_DRV_RESET);
   4365 		i = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
   4366 		CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
   4367 		    i & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
   4368 	}
   4369 
   4370 	/*
   4371 	 * Set GPHY Power Down Override to leave GPHY
   4372 	 * powered up in D0 uninitialized.
   4373 	 */
   4374 	if (BGE_IS_5705_PLUS(sc) &&
   4375 	    (sc->bge_flags & BGEF_CPMU_PRESENT) == 0)
   4376 		reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
   4377 
   4378 	/* Issue global reset */
   4379 	write_op(sc, BGE_MISC_CFG, reset);
   4380 
   4381 	/* 5718 reset step 7, 57XX step 8 */
   4382 	if (sc->bge_flags & BGEF_PCIE)
   4383 		delay(100*1000); /* too big */
   4384 	else
   4385 		delay(1000);
   4386 
   4387 	if (sc->bge_flags & BGEF_PCIE) {
   4388 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
   4389 			DELAY(500000);
   4390 			/* XXX: Magic Numbers */
   4391 			reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4392 			    BGE_PCI_UNKNOWN0);
   4393 			pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4394 			    BGE_PCI_UNKNOWN0,
   4395 			    reg | (1 << 15));
   4396 		}
   4397 		devctl = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4398 		    sc->bge_pciecap + PCIE_DCSR);
   4399 		/* Clear enable no snoop and disable relaxed ordering. */
   4400 		devctl &= ~(PCIE_DCSR_ENA_RELAX_ORD |
   4401 		    PCIE_DCSR_ENA_NO_SNOOP);
   4402 
   4403 		/* Set PCIE max payload size to 128 for older PCIe devices */
   4404 		if ((sc->bge_flags & BGEF_CPMU_PRESENT) == 0)
   4405 			devctl &= ~(0x00e0);
   4406 		/* Clear device status register. Write 1b to clear */
   4407 		devctl |= PCIE_DCSR_URD | PCIE_DCSR_FED
   4408 		    | PCIE_DCSR_NFED | PCIE_DCSR_CED;
   4409 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   4410 		    sc->bge_pciecap + PCIE_DCSR, devctl);
   4411 		bge_set_max_readrq(sc);
   4412 	}
   4413 
   4414 	/* From Linux: dummy read to flush PCI posted writes */
   4415 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD);
   4416 
   4417 	/*
   4418 	 * Reset some of the PCI state that got zapped by reset
   4419 	 * To modify the PCISTATE register, BGE_PCIMISCCTL_PCISTATE_RW must be
   4420 	 * set, too.
   4421 	 */
   4422 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
   4423 	    BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
   4424 	    BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW);
   4425 	val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE;
   4426 	if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 &&
   4427 	    (sc->bge_flags & BGEF_PCIX) != 0)
   4428 		val |= BGE_PCISTATE_RETRY_SAME_DMA;
   4429 	if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
   4430 		val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
   4431 		    BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
   4432 		    BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
   4433 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE, val);
   4434 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ, cachesize);
   4435 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, command);
   4436 
   4437 	/* 57xx step 11: disable PCI-X Relaxed Ordering. */
   4438 	if (sc->bge_flags & BGEF_PCIX) {
   4439 		reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap
   4440 		    + PCIX_CMD);
   4441 		/* Set max memory read byte count to 2K */
   4442 		if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) {
   4443 			reg &= ~PCIX_CMD_BYTECNT_MASK;
   4444 			reg |= PCIX_CMD_BCNT_2048;
   4445 		} else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704){
   4446 			/*
   4447 			 * For 5704, set max outstanding split transaction
   4448 			 * field to 0 (0 means it supports 1 request)
   4449 			 */
   4450 			reg &= ~(PCIX_CMD_SPLTRANS_MASK
   4451 			    | PCIX_CMD_BYTECNT_MASK);
   4452 			reg |= PCIX_CMD_BCNT_2048;
   4453 		}
   4454 		pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap
   4455 		    + PCIX_CMD, reg & ~PCIX_CMD_RELAXED_ORDER);
   4456 	}
   4457 
   4458 	/* 5718 reset step 10, 57XX step 12 */
   4459 	/* Enable memory arbiter. */
   4460 	if (BGE_IS_5714_FAMILY(sc)) {
   4461 		val = CSR_READ_4(sc, BGE_MARB_MODE);
   4462 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
   4463 	} else
   4464 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
   4465 
   4466 	/* XXX 5721, 5751 and 5752 */
   4467 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750) {
   4468 		/* Step 19: */
   4469 		BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, 1 << 29 | 1 << 25);
   4470 		/* Step 20: */
   4471 		BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, BGE_TLP_DATA_FIFO_PROTECT);
   4472 	}
   4473 
   4474 	/* 5718 reset step 12, 57XX step 15 and 16 */
   4475 	/* Fix up byte swapping */
   4476 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
   4477 
   4478 	/* 5718 reset step 13, 57XX step 17 */
   4479 	/* Poll until the firmware initialization is complete */
   4480 	bge_poll_fw(sc);
   4481 
   4482 	/* 57XX step 21 */
   4483 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_BX) {
   4484 		pcireg_t msidata;
   4485 
   4486 		msidata = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   4487 		    BGE_PCI_MSI_DATA);
   4488 		msidata |= ((1 << 13 | 1 << 12 | 1 << 10) << 16);
   4489 		pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MSI_DATA,
   4490 		    msidata);
   4491 	}
   4492 
   4493 	/* 57XX step 18 */
   4494 	/* Write mac mode. */
   4495 	val = CSR_READ_4(sc, BGE_MAC_MODE);
   4496 	/* Restore mac_mode_mask's bits using mac_mode */
   4497 	val = (val & ~mac_mode_mask) | mac_mode;
   4498 	CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, val);
   4499 	DELAY(40);
   4500 
   4501 	bge_ape_unlock(sc, BGE_APE_LOCK_GRC);
   4502 
   4503 	/*
   4504 	 * The 5704 in TBI mode apparently needs some special
   4505 	 * adjustment to insure the SERDES drive level is set
   4506 	 * to 1.2V.
   4507 	 */
   4508 	if (sc->bge_flags & BGEF_FIBER_TBI &&
   4509 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
   4510 		uint32_t serdescfg;
   4511 
   4512 		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
   4513 		serdescfg = (serdescfg & ~0xFFF) | 0x880;
   4514 		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
   4515 	}
   4516 
   4517 	if (sc->bge_flags & BGEF_PCIE &&
   4518 	    !BGE_IS_57765_PLUS(sc) &&
   4519 	    sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
   4520 	    BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) {
   4521 		uint32_t v;
   4522 
   4523 		/* Enable PCI Express bug fix */
   4524 		v = CSR_READ_4(sc, BGE_TLP_CONTROL_REG);
   4525 		CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG,
   4526 		    v | BGE_TLP_DATA_FIFO_PROTECT);
   4527 	}
   4528 
   4529 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
   4530 		BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
   4531 		    CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
   4532 
   4533 	return 0;
   4534 }
   4535 
   4536 /*
   4537  * Frame reception handling. This is called if there's a frame
   4538  * on the receive return list.
   4539  *
   4540  * Note: we have to be able to handle two possibilities here:
   4541  * 1) the frame is from the jumbo receive ring
   4542  * 2) the frame is from the standard receive ring
   4543  */
   4544 
   4545 static void
   4546 bge_rxeof(struct bge_softc *sc)
   4547 {
   4548 	struct ifnet *ifp;
   4549 	uint16_t rx_prod, rx_cons;
   4550 	int stdcnt = 0, jumbocnt = 0;
   4551 	bus_dmamap_t dmamap;
   4552 	bus_addr_t offset, toff;
   4553 	bus_size_t tlen;
   4554 	int tosync;
   4555 
   4556 	rx_cons = sc->bge_rx_saved_considx;
   4557 	rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx;
   4558 
   4559 	/* Nothing to do */
   4560 	if (rx_cons == rx_prod)
   4561 		return;
   4562 
   4563 	ifp = &sc->ethercom.ec_if;
   4564 
   4565 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   4566 	    offsetof(struct bge_ring_data, bge_status_block),
   4567 	    sizeof (struct bge_status_block),
   4568 	    BUS_DMASYNC_POSTREAD);
   4569 
   4570 	offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
   4571 	tosync = rx_prod - rx_cons;
   4572 
   4573 	if (tosync != 0)
   4574 		rnd_add_uint32(&sc->rnd_source, tosync);
   4575 
   4576 	toff = offset + (rx_cons * sizeof (struct bge_rx_bd));
   4577 
   4578 	if (tosync < 0) {
   4579 		tlen = (sc->bge_return_ring_cnt - rx_cons) *
   4580 		    sizeof (struct bge_rx_bd);
   4581 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   4582 		    toff, tlen, BUS_DMASYNC_POSTREAD);
   4583 		tosync = -tosync;
   4584 	}
   4585 
   4586 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   4587 	    offset, tosync * sizeof (struct bge_rx_bd),
   4588 	    BUS_DMASYNC_POSTREAD);
   4589 
   4590 	while (rx_cons != rx_prod) {
   4591 		struct bge_rx_bd	*cur_rx;
   4592 		uint32_t		rxidx;
   4593 		struct mbuf		*m = NULL;
   4594 
   4595 		cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons];
   4596 
   4597 		rxidx = cur_rx->bge_idx;
   4598 		BGE_INC(rx_cons, sc->bge_return_ring_cnt);
   4599 
   4600 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
   4601 			BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
   4602 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
   4603 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
   4604 			jumbocnt++;
   4605 			bus_dmamap_sync(sc->bge_dmatag,
   4606 			    sc->bge_cdata.bge_rx_jumbo_map,
   4607 			    mtod(m, char *) - (char *)sc->bge_cdata.bge_jumbo_buf,
   4608 			    BGE_JLEN, BUS_DMASYNC_POSTREAD);
   4609 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
   4610 				ifp->if_ierrors++;
   4611 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
   4612 				continue;
   4613 			}
   4614 			if (bge_newbuf_jumbo(sc, sc->bge_jumbo,
   4615 					     NULL)== ENOBUFS) {
   4616 				ifp->if_ierrors++;
   4617 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
   4618 				continue;
   4619 			}
   4620 		} else {
   4621 			BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
   4622 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
   4623 
   4624 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
   4625 			stdcnt++;
   4626 			dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
   4627 			sc->bge_cdata.bge_rx_std_map[rxidx] = NULL;
   4628 			if (dmamap == NULL) {
   4629 				ifp->if_ierrors++;
   4630 				bge_newbuf_std(sc, sc->bge_std, m, dmamap);
   4631 				continue;
   4632 			}
   4633 			bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
   4634 			    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   4635 			bus_dmamap_unload(sc->bge_dmatag, dmamap);
   4636 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
   4637 				ifp->if_ierrors++;
   4638 				bge_newbuf_std(sc, sc->bge_std, m, dmamap);
   4639 				continue;
   4640 			}
   4641 			if (bge_newbuf_std(sc, sc->bge_std,
   4642 			    NULL, dmamap) == ENOBUFS) {
   4643 				ifp->if_ierrors++;
   4644 				bge_newbuf_std(sc, sc->bge_std, m, dmamap);
   4645 				continue;
   4646 			}
   4647 		}
   4648 
   4649 #ifndef __NO_STRICT_ALIGNMENT
   4650 		/*
   4651 		 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect,
   4652 		 * the Rx buffer has the layer-2 header unaligned.
   4653 		 * If our CPU requires alignment, re-align by copying.
   4654 		 */
   4655 		if (sc->bge_flags & BGEF_RX_ALIGNBUG) {
   4656 			memmove(mtod(m, char *) + ETHER_ALIGN, m->m_data,
   4657 				cur_rx->bge_len);
   4658 			m->m_data += ETHER_ALIGN;
   4659 		}
   4660 #endif
   4661 
   4662 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
   4663 		m_set_rcvif(m, ifp);
   4664 
   4665 		bge_rxcsum(sc, cur_rx, m);
   4666 
   4667 		/*
   4668 		 * If we received a packet with a vlan tag, pass it
   4669 		 * to vlan_input() instead of ether_input().
   4670 		 */
   4671 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
   4672 			vlan_set_tag(m, cur_rx->bge_vlan_tag);
   4673 		}
   4674 
   4675 		if_percpuq_enqueue(ifp->if_percpuq, m);
   4676 	}
   4677 
   4678 	sc->bge_rx_saved_considx = rx_cons;
   4679 	bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
   4680 	if (stdcnt)
   4681 		bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
   4682 	if (jumbocnt)
   4683 		bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
   4684 }
   4685 
   4686 static void
   4687 bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
   4688 {
   4689 
   4690 	if (BGE_IS_57765_PLUS(sc)) {
   4691 		if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
   4692 			if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) != 0)
   4693 				m->m_pkthdr.csum_flags = M_CSUM_IPv4;
   4694 			if ((cur_rx->bge_error_flag &
   4695 				BGE_RXERRFLAG_IP_CSUM_NOK) != 0)
   4696 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   4697 			if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
   4698 				m->m_pkthdr.csum_data =
   4699 				    cur_rx->bge_tcp_udp_csum;
   4700 				m->m_pkthdr.csum_flags |=
   4701 				    (M_CSUM_TCPv4|M_CSUM_UDPv4|
   4702 					M_CSUM_DATA);
   4703 			}
   4704 		}
   4705 	} else {
   4706 		if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) != 0)
   4707 			m->m_pkthdr.csum_flags = M_CSUM_IPv4;
   4708 		if ((cur_rx->bge_ip_csum ^ 0xffff) != 0)
   4709 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   4710 		/*
   4711 		 * Rx transport checksum-offload may also
   4712 		 * have bugs with packets which, when transmitted,
   4713 		 * were `runts' requiring padding.
   4714 		 */
   4715 		if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
   4716 		    (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/
   4717 			    m->m_pkthdr.len >= ETHER_MIN_NOPAD)) {
   4718 			m->m_pkthdr.csum_data =
   4719 			    cur_rx->bge_tcp_udp_csum;
   4720 			m->m_pkthdr.csum_flags |=
   4721 			    (M_CSUM_TCPv4|M_CSUM_UDPv4|
   4722 				M_CSUM_DATA);
   4723 		}
   4724 	}
   4725 }
   4726 
   4727 static void
   4728 bge_txeof(struct bge_softc *sc)
   4729 {
   4730 	struct bge_tx_bd *cur_tx = NULL;
   4731 	struct ifnet *ifp;
   4732 	struct txdmamap_pool_entry *dma;
   4733 	bus_addr_t offset, toff;
   4734 	bus_size_t tlen;
   4735 	int tosync;
   4736 	struct mbuf *m;
   4737 
   4738 	ifp = &sc->ethercom.ec_if;
   4739 
   4740 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   4741 	    offsetof(struct bge_ring_data, bge_status_block),
   4742 	    sizeof (struct bge_status_block),
   4743 	    BUS_DMASYNC_POSTREAD);
   4744 
   4745 	offset = offsetof(struct bge_ring_data, bge_tx_ring);
   4746 	tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
   4747 	    sc->bge_tx_saved_considx;
   4748 
   4749 	if (tosync != 0)
   4750 		rnd_add_uint32(&sc->rnd_source, tosync);
   4751 
   4752 	toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
   4753 
   4754 	if (tosync < 0) {
   4755 		tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
   4756 		    sizeof (struct bge_tx_bd);
   4757 		bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   4758 		    toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   4759 		tosync = -tosync;
   4760 	}
   4761 
   4762 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   4763 	    offset, tosync * sizeof (struct bge_tx_bd),
   4764 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   4765 
   4766 	/*
   4767 	 * Go through our tx ring and free mbufs for those
   4768 	 * frames that have been sent.
   4769 	 */
   4770 	while (sc->bge_tx_saved_considx !=
   4771 	    sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
   4772 		uint32_t		idx = 0;
   4773 
   4774 		idx = sc->bge_tx_saved_considx;
   4775 		cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
   4776 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
   4777 			ifp->if_opackets++;
   4778 		m = sc->bge_cdata.bge_tx_chain[idx];
   4779 		if (m != NULL) {
   4780 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
   4781 			dma = sc->txdma[idx];
   4782 			if (dma->is_dma32) {
   4783 				bus_dmamap_sync(sc->bge_dmatag32, dma->dmamap32,
   4784 				    0, dma->dmamap32->dm_mapsize,
   4785 				    BUS_DMASYNC_POSTWRITE);
   4786 				bus_dmamap_unload(
   4787 				    sc->bge_dmatag32, dma->dmamap32);
   4788 			} else {
   4789 				bus_dmamap_sync(sc->bge_dmatag, dma->dmamap,
   4790 				    0, dma->dmamap->dm_mapsize,
   4791 				    BUS_DMASYNC_POSTWRITE);
   4792 				bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
   4793 			}
   4794 			SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
   4795 			sc->txdma[idx] = NULL;
   4796 
   4797 			m_freem(m);
   4798 		}
   4799 		sc->bge_txcnt--;
   4800 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
   4801 		ifp->if_timer = 0;
   4802 	}
   4803 
   4804 	if (cur_tx != NULL)
   4805 		ifp->if_flags &= ~IFF_OACTIVE;
   4806 }
   4807 
   4808 static int
   4809 bge_intr(void *xsc)
   4810 {
   4811 	struct bge_softc *sc;
   4812 	struct ifnet *ifp;
   4813 	uint32_t pcistate, statusword, statustag;
   4814 	uint32_t intrmask = BGE_PCISTATE_INTR_NOT_ACTIVE;
   4815 
   4816 	sc = xsc;
   4817 	ifp = &sc->ethercom.ec_if;
   4818 
   4819 	/* 5717 and newer chips have no BGE_PCISTATE_INTR_NOT_ACTIVE bit */
   4820 	if (BGE_IS_5717_PLUS(sc))
   4821 		intrmask = 0;
   4822 
   4823 	/* It is possible for the interrupt to arrive before
   4824 	 * the status block is updated prior to the interrupt.
   4825 	 * Reading the PCI State register will confirm whether the
   4826 	 * interrupt is ours and will flush the status block.
   4827 	 */
   4828 	pcistate = CSR_READ_4(sc, BGE_PCI_PCISTATE);
   4829 
   4830 	/* read status word from status block */
   4831 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   4832 	    offsetof(struct bge_ring_data, bge_status_block),
   4833 	    sizeof (struct bge_status_block),
   4834 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   4835 	statusword = sc->bge_rdata->bge_status_block.bge_status;
   4836 	statustag = sc->bge_rdata->bge_status_block.bge_status_tag << 24;
   4837 
   4838 	if (sc->bge_flags & BGEF_TAGGED_STATUS) {
   4839 		if (sc->bge_lasttag == statustag &&
   4840 		    (~pcistate & intrmask)) {
   4841 			BGE_EVCNT_INCR(sc->bge_ev_intr_spurious);
   4842 			return (0);
   4843 		}
   4844 		sc->bge_lasttag = statustag;
   4845 	} else {
   4846 		if (!(statusword & BGE_STATFLAG_UPDATED) &&
   4847 		    !(~pcistate & intrmask)) {
   4848 			BGE_EVCNT_INCR(sc->bge_ev_intr_spurious2);
   4849 			return (0);
   4850 		}
   4851 		statustag = 0;
   4852 	}
   4853 	/* Ack interrupt and stop others from occurring. */
   4854 	bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 1);
   4855 	BGE_EVCNT_INCR(sc->bge_ev_intr);
   4856 
   4857 	/* clear status word */
   4858 	sc->bge_rdata->bge_status_block.bge_status = 0;
   4859 
   4860 	bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
   4861 	    offsetof(struct bge_ring_data, bge_status_block),
   4862 	    sizeof (struct bge_status_block),
   4863 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   4864 
   4865 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
   4866 	    statusword & BGE_STATFLAG_LINKSTATE_CHANGED ||
   4867 	    BGE_STS_BIT(sc, BGE_STS_LINK_EVT))
   4868 		bge_link_upd(sc);
   4869 
   4870 	if (ifp->if_flags & IFF_RUNNING) {
   4871 		/* Check RX return ring producer/consumer */
   4872 		bge_rxeof(sc);
   4873 
   4874 		/* Check TX ring producer/consumer */
   4875 		bge_txeof(sc);
   4876 	}
   4877 
   4878 	if (sc->bge_pending_rxintr_change) {
   4879 		uint32_t rx_ticks = sc->bge_rx_coal_ticks;
   4880 		uint32_t rx_bds = sc->bge_rx_max_coal_bds;
   4881 
   4882 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks);
   4883 		DELAY(10);
   4884 		(void)CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
   4885 
   4886 		CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds);
   4887 		DELAY(10);
   4888 		(void)CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
   4889 
   4890 		sc->bge_pending_rxintr_change = 0;
   4891 	}
   4892 	bge_handle_events(sc);
   4893 
   4894 	/* Re-enable interrupts. */
   4895 	bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, statustag);
   4896 
   4897 	if (ifp->if_flags & IFF_RUNNING)
   4898 		if_schedule_deferred_start(ifp);
   4899 
   4900 	return 1;
   4901 }
   4902 
   4903 static void
   4904 bge_asf_driver_up(struct bge_softc *sc)
   4905 {
   4906 	if (sc->bge_asf_mode & ASF_STACKUP) {
   4907 		/* Send ASF heartbeat aprox. every 2s */
   4908 		if (sc->bge_asf_count)
   4909 			sc->bge_asf_count --;
   4910 		else {
   4911 			sc->bge_asf_count = 2;
   4912 
   4913 			bge_wait_for_event_ack(sc);
   4914 
   4915 			bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
   4916 			    BGE_FW_CMD_DRV_ALIVE3);
   4917 			bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
   4918 			bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB,
   4919 			    BGE_FW_HB_TIMEOUT_SEC);
   4920 			CSR_WRITE_4_FLUSH(sc, BGE_RX_CPU_EVENT,
   4921 			    CSR_READ_4(sc, BGE_RX_CPU_EVENT) |
   4922 			    BGE_RX_CPU_DRV_EVENT);
   4923 		}
   4924 	}
   4925 }
   4926 
   4927 static void
   4928 bge_tick(void *xsc)
   4929 {
   4930 	struct bge_softc *sc = xsc;
   4931 	struct mii_data *mii = &sc->bge_mii;
   4932 	int s;
   4933 
   4934 	s = splnet();
   4935 
   4936 	if (BGE_IS_5705_PLUS(sc))
   4937 		bge_stats_update_regs(sc);
   4938 	else
   4939 		bge_stats_update(sc);
   4940 
   4941 	if (sc->bge_flags & BGEF_FIBER_TBI) {
   4942 		/*
   4943 		 * Since in TBI mode auto-polling can't be used we should poll
   4944 		 * link status manually. Here we register pending link event
   4945 		 * and trigger interrupt.
   4946 		 */
   4947 		BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
   4948 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
   4949 	} else {
   4950 		/*
   4951 		 * Do not touch PHY if we have link up. This could break
   4952 		 * IPMI/ASF mode or produce extra input errors.
   4953 		 * (extra input errors was reported for bcm5701 & bcm5704).
   4954 		 */
   4955 		if (!BGE_STS_BIT(sc, BGE_STS_LINK))
   4956 			mii_tick(mii);
   4957 	}
   4958 
   4959 	bge_asf_driver_up(sc);
   4960 
   4961 	if (!sc->bge_detaching)
   4962 		callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
   4963 
   4964 	splx(s);
   4965 }
   4966 
   4967 static void
   4968 bge_stats_update_regs(struct bge_softc *sc)
   4969 {
   4970 	struct ifnet *ifp = &sc->ethercom.ec_if;
   4971 
   4972 	ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
   4973 	    offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
   4974 
   4975 	/*
   4976 	 * On BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0,
   4977 	 * RXLP_LOCSTAT_IFIN_DROPS includes unwanted multicast frames
   4978 	 * (silicon bug). There's no reliable workaround so just
   4979 	 * ignore the counter
   4980 	 */
   4981 	if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
   4982 	    BGE_ASICREV(sc->bge_chipid) != BGE_CHIPID_BCM5719_A0 &&
   4983 	    BGE_ASICREV(sc->bge_chipid) != BGE_CHIPID_BCM5720_A0) {
   4984 		ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
   4985 	}
   4986 	ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
   4987 	ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
   4988 }
   4989 
   4990 static void
   4991 bge_stats_update(struct bge_softc *sc)
   4992 {
   4993 	struct ifnet *ifp = &sc->ethercom.ec_if;
   4994 	bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
   4995 
   4996 #define READ_STAT(sc, stats, stat) \
   4997 	  CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
   4998 
   4999 	ifp->if_collisions +=
   5000 	  (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) +
   5001 	   READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) +
   5002 	   READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) +
   5003 	   READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) -
   5004 	  ifp->if_collisions;
   5005 
   5006 	BGE_EVCNT_UPD(sc->bge_ev_tx_xoff,
   5007 		      READ_STAT(sc, stats, outXoffSent.bge_addr_lo));
   5008 	BGE_EVCNT_UPD(sc->bge_ev_tx_xon,
   5009 		      READ_STAT(sc, stats, outXonSent.bge_addr_lo));
   5010 	BGE_EVCNT_UPD(sc->bge_ev_rx_xoff,
   5011 		      READ_STAT(sc, stats,
   5012 		      		xoffPauseFramesReceived.bge_addr_lo));
   5013 	BGE_EVCNT_UPD(sc->bge_ev_rx_xon,
   5014 		      READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo));
   5015 	BGE_EVCNT_UPD(sc->bge_ev_rx_macctl,
   5016 		      READ_STAT(sc, stats,
   5017 		      		macControlFramesReceived.bge_addr_lo));
   5018 	BGE_EVCNT_UPD(sc->bge_ev_xoffentered,
   5019 		      READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo));
   5020 
   5021 #undef READ_STAT
   5022 
   5023 #ifdef notdef
   5024 	ifp->if_collisions +=
   5025 	   (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
   5026 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
   5027 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
   5028 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
   5029 	   ifp->if_collisions;
   5030 #endif
   5031 }
   5032 
   5033 /*
   5034  * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
   5035  * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
   5036  * but when such padded frames employ the  bge IP/TCP checksum offload,
   5037  * the hardware checksum assist gives incorrect results (possibly
   5038  * from incorporating its own padding into the UDP/TCP checksum; who knows).
   5039  * If we pad such runts with zeros, the onboard checksum comes out correct.
   5040  */
   5041 static inline int
   5042 bge_cksum_pad(struct mbuf *pkt)
   5043 {
   5044 	struct mbuf *last = NULL;
   5045 	int padlen;
   5046 
   5047 	padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len;
   5048 
   5049 	/* if there's only the packet-header and we can pad there, use it. */
   5050 	if (pkt->m_pkthdr.len == pkt->m_len &&
   5051 	    M_TRAILINGSPACE(pkt) >= padlen) {
   5052 		last = pkt;
   5053 	} else {
   5054 		/*
   5055 		 * Walk packet chain to find last mbuf. We will either
   5056 		 * pad there, or append a new mbuf and pad it
   5057 		 * (thus perhaps avoiding the bcm5700 dma-min bug).
   5058 		 */
   5059 		for (last = pkt; last->m_next != NULL; last = last->m_next) {
   5060 	      	       continue; /* do nothing */
   5061 		}
   5062 
   5063 		/* `last' now points to last in chain. */
   5064 		if (M_TRAILINGSPACE(last) < padlen) {
   5065 			/* Allocate new empty mbuf, pad it. Compact later. */
   5066 			struct mbuf *n;
   5067 			MGET(n, M_DONTWAIT, MT_DATA);
   5068 			if (n == NULL)
   5069 				return ENOBUFS;
   5070 			n->m_len = 0;
   5071 			last->m_next = n;
   5072 			last = n;
   5073 		}
   5074 	}
   5075 
   5076 	KDASSERT(!M_READONLY(last));
   5077 	KDASSERT(M_TRAILINGSPACE(last) >= padlen);
   5078 
   5079 	/* Now zero the pad area, to avoid the bge cksum-assist bug */
   5080 	memset(mtod(last, char *) + last->m_len, 0, padlen);
   5081 	last->m_len += padlen;
   5082 	pkt->m_pkthdr.len += padlen;
   5083 	return 0;
   5084 }
   5085 
   5086 /*
   5087  * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
   5088  */
   5089 static inline int
   5090 bge_compact_dma_runt(struct mbuf *pkt)
   5091 {
   5092 	struct mbuf	*m, *prev;
   5093 	int 		totlen;
   5094 
   5095 	prev = NULL;
   5096 	totlen = 0;
   5097 
   5098 	for (m = pkt; m != NULL; prev = m,m = m->m_next) {
   5099 		int mlen = m->m_len;
   5100 		int shortfall = 8 - mlen ;
   5101 
   5102 		totlen += mlen;
   5103 		if (mlen == 0)
   5104 			continue;
   5105 		if (mlen >= 8)
   5106 			continue;
   5107 
   5108 		/* If we get here, mbuf data is too small for DMA engine.
   5109 		 * Try to fix by shuffling data to prev or next in chain.
   5110 		 * If that fails, do a compacting deep-copy of the whole chain.
   5111 		 */
   5112 
   5113 		/* Internal frag. If fits in prev, copy it there. */
   5114 		if (prev && M_TRAILINGSPACE(prev) >= m->m_len) {
   5115 		  	memcpy(prev->m_data + prev->m_len, m->m_data, mlen);
   5116 			prev->m_len += mlen;
   5117 			m->m_len = 0;
   5118 			/* XXX stitch chain */
   5119 			prev->m_next = m_free(m);
   5120 			m = prev;
   5121 			continue;
   5122 		}
   5123 		else if (m->m_next != NULL &&
   5124 			     M_TRAILINGSPACE(m) >= shortfall &&
   5125 			     m->m_next->m_len >= (8 + shortfall)) {
   5126 		    /* m is writable and have enough data in next, pull up. */
   5127 
   5128 		  	memcpy(m->m_data + m->m_len, m->m_next->m_data,
   5129 			    shortfall);
   5130 			m->m_len += shortfall;
   5131 			m->m_next->m_len -= shortfall;
   5132 			m->m_next->m_data += shortfall;
   5133 		}
   5134 		else if (m->m_next == NULL || 1) {
   5135 		  	/* Got a runt at the very end of the packet.
   5136 			 * borrow data from the tail of the preceding mbuf and
   5137 			 * update its length in-place. (The original data is still
   5138 			 * valid, so we can do this even if prev is not writable.)
   5139 			 */
   5140 
   5141 			/* if we'd make prev a runt, just move all of its data. */
   5142 			KASSERT(prev != NULL /*, ("runt but null PREV")*/);
   5143 			KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
   5144 
   5145 			if ((prev->m_len - shortfall) < 8)
   5146 				shortfall = prev->m_len;
   5147 
   5148 #ifdef notyet	/* just do the safe slow thing for now */
   5149 			if (!M_READONLY(m)) {
   5150 				if (M_LEADINGSPACE(m) < shorfall) {
   5151 					void *m_dat;
   5152 					m_dat = (m->m_flags & M_PKTHDR) ?
   5153 					  m->m_pktdat : m->dat;
   5154 					memmove(m_dat, mtod(m, void*), m->m_len);
   5155 					m->m_data = m_dat;
   5156 				    }
   5157 			} else
   5158 #endif	/* just do the safe slow thing */
   5159 			{
   5160 				struct mbuf * n = NULL;
   5161 				int newprevlen = prev->m_len - shortfall;
   5162 
   5163 				MGET(n, M_NOWAIT, MT_DATA);
   5164 				if (n == NULL)
   5165 				   return ENOBUFS;
   5166 				KASSERT(m->m_len + shortfall < MLEN
   5167 					/*,
   5168 					  ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
   5169 
   5170 				/* first copy the data we're stealing from prev */
   5171 				memcpy(n->m_data, prev->m_data + newprevlen,
   5172 				    shortfall);
   5173 
   5174 				/* update prev->m_len accordingly */
   5175 				prev->m_len -= shortfall;
   5176 
   5177 				/* copy data from runt m */
   5178 				memcpy(n->m_data + shortfall, m->m_data,
   5179 				    m->m_len);
   5180 
   5181 				/* n holds what we stole from prev, plus m */
   5182 				n->m_len = shortfall + m->m_len;
   5183 
   5184 				/* stitch n into chain and free m */
   5185 				n->m_next = m->m_next;
   5186 				prev->m_next = n;
   5187 				/* KASSERT(m->m_next == NULL); */
   5188 				m->m_next = NULL;
   5189 				m_free(m);
   5190 				m = n;	/* for continuing loop */
   5191 			}
   5192 		}
   5193 	}
   5194 	return 0;
   5195 }
   5196 
   5197 /*
   5198  * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
   5199  * pointers to descriptors.
   5200  */
   5201 static int
   5202 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
   5203 {
   5204 	struct ifnet *ifp = &sc->ethercom.ec_if;
   5205 	struct bge_tx_bd	*f, *prev_f;
   5206 	uint32_t		frag, cur;
   5207 	uint16_t		csum_flags = 0;
   5208 	uint16_t		txbd_tso_flags = 0;
   5209 	struct txdmamap_pool_entry *dma;
   5210 	bus_dmamap_t dmamap;
   5211 	bus_dma_tag_t dmatag;
   5212 	int			i = 0;
   5213 	int			use_tso, maxsegsize, error;
   5214 	bool			have_vtag;
   5215 	uint16_t		vtag;
   5216 	bool 			remap;
   5217 
   5218 	if (m_head->m_pkthdr.csum_flags) {
   5219 		if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
   5220 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
   5221 		if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4))
   5222 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
   5223 	}
   5224 
   5225 	/*
   5226 	 * If we were asked to do an outboard checksum, and the NIC
   5227 	 * has the bug where it sometimes adds in the Ethernet padding,
   5228 	 * explicitly pad with zeros so the cksum will be correct either way.
   5229 	 * (For now, do this for all chip versions, until newer
   5230 	 * are confirmed to not require the workaround.)
   5231 	 */
   5232 	if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 ||
   5233 #ifdef notyet
   5234 	    (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||
   5235 #endif
   5236 	    m_head->m_pkthdr.len >= ETHER_MIN_NOPAD)
   5237 		goto check_dma_bug;
   5238 
   5239 	if (bge_cksum_pad(m_head) != 0)
   5240 		return ENOBUFS;
   5241 
   5242 check_dma_bug:
   5243 	if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX))
   5244 		goto doit;
   5245 
   5246 	/*
   5247 	 * bcm5700 Revision B silicon cannot handle DMA descriptors with
   5248 	 * less than eight bytes.  If we encounter a teeny mbuf
   5249 	 * at the end of a chain, we can pad.  Otherwise, copy.
   5250 	 */
   5251 	if (bge_compact_dma_runt(m_head) != 0)
   5252 		return ENOBUFS;
   5253 
   5254 doit:
   5255 	dma = SLIST_FIRST(&sc->txdma_list);
   5256 	if (dma == NULL) {
   5257 		ifp->if_flags |= IFF_OACTIVE;
   5258 		return ENOBUFS;
   5259 	}
   5260 	dmamap = dma->dmamap;
   5261 	dmatag = sc->bge_dmatag;
   5262 	dma->is_dma32 = false;
   5263 
   5264 	/*
   5265 	 * Set up any necessary TSO state before we start packing...
   5266 	 */
   5267 	use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   5268 	if (!use_tso) {
   5269 		maxsegsize = 0;
   5270 	} else {	/* TSO setup */
   5271 		unsigned  mss;
   5272 		struct ether_header *eh;
   5273 		unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset;
   5274 		unsigned bge_hlen;
   5275 		struct mbuf * m0 = m_head;
   5276 		struct ip *ip;
   5277 		struct tcphdr *th;
   5278 		int iphl, hlen;
   5279 
   5280 		/*
   5281 		 * XXX It would be nice if the mbuf pkthdr had offset
   5282 		 * fields for the protocol headers.
   5283 		 */
   5284 
   5285 		eh = mtod(m0, struct ether_header *);
   5286 		switch (htons(eh->ether_type)) {
   5287 		case ETHERTYPE_IP:
   5288 			offset = ETHER_HDR_LEN;
   5289 			break;
   5290 
   5291 		case ETHERTYPE_VLAN:
   5292 			offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   5293 			break;
   5294 
   5295 		default:
   5296 			/*
   5297 			 * Don't support this protocol or encapsulation.
   5298 			 */
   5299 			return ENOBUFS;
   5300 		}
   5301 
   5302 		/*
   5303 		 * TCP/IP headers are in the first mbuf; we can do
   5304 		 * this the easy way.
   5305 		 */
   5306 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   5307 		hlen = iphl + offset;
   5308 		if (__predict_false(m0->m_len <
   5309 				    (hlen + sizeof(struct tcphdr)))) {
   5310 
   5311 			aprint_error_dev(sc->bge_dev,
   5312 			    "TSO: hard case m0->m_len == %d < ip/tcp hlen %zd,"
   5313 			    "not handled yet\n",
   5314 			     m0->m_len, hlen+ sizeof(struct tcphdr));
   5315 #ifdef NOTYET
   5316 			/*
   5317 			 * XXX jonathan (at) NetBSD.org: untested.
   5318 			 * how to force  this branch to be taken?
   5319 			 */
   5320 			BGE_EVCNT_INCR(sc->bge_ev_txtsopain);
   5321 
   5322 			m_copydata(m0, offset, sizeof(ip), &ip);
   5323 			m_copydata(m0, hlen, sizeof(th), &th);
   5324 
   5325 			ip.ip_len = 0;
   5326 
   5327 			m_copyback(m0, hlen + offsetof(struct ip, ip_len),
   5328 			    sizeof(ip.ip_len), &ip.ip_len);
   5329 
   5330 			th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   5331 			    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   5332 
   5333 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   5334 			    sizeof(th.th_sum), &th.th_sum);
   5335 
   5336 			hlen += th.th_off << 2;
   5337 			iptcp_opt_words	= hlen;
   5338 #else
   5339 			/*
   5340 			 * if_wm "hard" case not yet supported, can we not
   5341 			 * mandate it out of existence?
   5342 			 */
   5343 			(void) ip; (void)th; (void) ip_tcp_hlen;
   5344 
   5345 			return ENOBUFS;
   5346 #endif
   5347 		} else {
   5348 			ip = (struct ip *) (mtod(m0, char *) + offset);
   5349 			th = (struct tcphdr *) (mtod(m0, char *) + hlen);
   5350 			ip_tcp_hlen = iphl +  (th->th_off << 2);
   5351 
   5352 			/* Total IP/TCP options, in 32-bit words */
   5353 			iptcp_opt_words = (ip_tcp_hlen
   5354 					   - sizeof(struct tcphdr)
   5355 					   - sizeof(struct ip)) >> 2;
   5356 		}
   5357 		if (BGE_IS_575X_PLUS(sc)) {
   5358 			th->th_sum = 0;
   5359 			csum_flags = 0;
   5360 		} else {
   5361 			/*
   5362 			 * XXX jonathan (at) NetBSD.org: 5705 untested.
   5363 			 * Requires TSO firmware patch for 5701/5703/5704.
   5364 			 */
   5365 			th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   5366 			    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   5367 		}
   5368 
   5369 		mss = m_head->m_pkthdr.segsz;
   5370 		txbd_tso_flags |=
   5371 		    BGE_TXBDFLAG_CPU_PRE_DMA |
   5372 		    BGE_TXBDFLAG_CPU_POST_DMA;
   5373 
   5374 		/*
   5375 		 * Our NIC TSO-assist assumes TSO has standard, optionless
   5376 		 * IPv4 and TCP headers, which total 40 bytes. By default,
   5377 		 * the NIC copies 40 bytes of IP/TCP header from the
   5378 		 * supplied header into the IP/TCP header portion of
   5379 		 * each post-TSO-segment. If the supplied packet has IP or
   5380 		 * TCP options, we need to tell the NIC to copy those extra
   5381 		 * bytes into each  post-TSO header, in addition to the normal
   5382 		 * 40-byte IP/TCP header (and to leave space accordingly).
   5383 		 * Unfortunately, the driver encoding of option length
   5384 		 * varies across different ASIC families.
   5385 		 */
   5386 		tcp_seg_flags = 0;
   5387 		bge_hlen = ip_tcp_hlen >> 2;
   5388 		if (BGE_IS_5717_PLUS(sc)) {
   5389 			tcp_seg_flags = (bge_hlen & 0x3) << 14;
   5390 			txbd_tso_flags |=
   5391 			    ((bge_hlen & 0xF8) << 7) | ((bge_hlen & 0x4) << 2);
   5392 		} else if (BGE_IS_5705_PLUS(sc)) {
   5393 			tcp_seg_flags =
   5394 				bge_hlen << 11;
   5395 		} else {
   5396 			/* XXX iptcp_opt_words or bge_hlen ? */
   5397 			txbd_tso_flags |=
   5398 				iptcp_opt_words << 12;
   5399 		}
   5400 		maxsegsize = mss | tcp_seg_flags;
   5401 		ip->ip_len = htons(mss + ip_tcp_hlen);
   5402 		ip->ip_sum = 0;
   5403 
   5404 	}	/* TSO setup */
   5405 
   5406 	have_vtag = vlan_has_tag(m_head);
   5407 	if (have_vtag)
   5408 		vtag = vlan_get_tag(m_head);
   5409 
   5410 	/*
   5411 	 * Start packing the mbufs in this chain into
   5412 	 * the fragment pointers. Stop when we run out
   5413 	 * of fragments or hit the end of the mbuf chain.
   5414 	 */
   5415 	remap = true;
   5416 load_again:
   5417 	error = bus_dmamap_load_mbuf(dmatag, dmamap,
   5418 	    m_head, BUS_DMA_NOWAIT);
   5419 	if (__predict_false(error)) {
   5420 		if (error == EFBIG && remap)  {
   5421 			struct mbuf *m;
   5422 			remap = false;
   5423 			m = m_defrag(m_head, M_NOWAIT);
   5424 			if (m != NULL) {
   5425 				KASSERT(m == m_head);
   5426 				goto load_again;
   5427 			}
   5428 		}
   5429 		return error;
   5430 	}
   5431 	/*
   5432 	 * Sanity check: avoid coming within 16 descriptors
   5433 	 * of the end of the ring.
   5434 	 */
   5435 	if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
   5436 		BGE_TSO_PRINTF(("%s: "
   5437 		    " dmamap_load_mbuf too close to ring wrap\n",
   5438 		    device_xname(sc->bge_dev)));
   5439 		goto fail_unload;
   5440 	}
   5441 
   5442 	/* Iterate over dmap-map fragments. */
   5443 	f = prev_f = NULL;
   5444 	cur = frag = *txidx;
   5445 
   5446 	for (i = 0; i < dmamap->dm_nsegs; i++) {
   5447 		f = &sc->bge_rdata->bge_tx_ring[frag];
   5448 		if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
   5449 			break;
   5450 
   5451 		BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr);
   5452 		f->bge_len = dmamap->dm_segs[i].ds_len;
   5453 		if (sizeof(bus_addr_t) > 4 && dma->is_dma32 == false && use_tso && (
   5454 		    (dmamap->dm_segs[i].ds_addr & 0xffffffff00000000) !=
   5455 		    ((dmamap->dm_segs[i].ds_addr + f->bge_len) & 0xffffffff00000000) ||
   5456 		    (prev_f != NULL &&
   5457 		     prev_f->bge_addr.bge_addr_hi != f->bge_addr.bge_addr_hi))
   5458 		   ) {
   5459 			/*
   5460 			 * watchdog timeout issue was observed with TSO,
   5461 			 * limiting DMA address space to 32bits seems to
   5462 			 * address the issue.
   5463 			 */
   5464 			bus_dmamap_unload(dmatag, dmamap);
   5465 			dmatag = sc->bge_dmatag32;
   5466 			dmamap = dma->dmamap32;
   5467 			dma->is_dma32 = true;
   5468 			remap = true;
   5469 			goto load_again;
   5470 		}
   5471 
   5472 		/*
   5473 		 * For 5751 and follow-ons, for TSO we must turn
   5474 		 * off checksum-assist flag in the tx-descr, and
   5475 		 * supply the ASIC-revision-specific encoding
   5476 		 * of TSO flags and segsize.
   5477 		 */
   5478 		if (use_tso) {
   5479 			if (BGE_IS_575X_PLUS(sc) || i == 0) {
   5480 				f->bge_rsvd = maxsegsize;
   5481 				f->bge_flags = csum_flags | txbd_tso_flags;
   5482 			} else {
   5483 				f->bge_rsvd = 0;
   5484 				f->bge_flags =
   5485 				  (csum_flags | txbd_tso_flags) & 0x0fff;
   5486 			}
   5487 		} else {
   5488 			f->bge_rsvd = 0;
   5489 			f->bge_flags = csum_flags;
   5490 		}
   5491 
   5492 		if (have_vtag) {
   5493 			f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
   5494 			f->bge_vlan_tag = vtag;
   5495 		} else {
   5496 			f->bge_vlan_tag = 0;
   5497 		}
   5498 		prev_f = f;
   5499 		cur = frag;
   5500 		BGE_INC(frag, BGE_TX_RING_CNT);
   5501 	}
   5502 
   5503 	if (i < dmamap->dm_nsegs) {
   5504 		BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n",
   5505 		    device_xname(sc->bge_dev), i, dmamap->dm_nsegs));
   5506 		goto fail_unload;
   5507 	}
   5508 
   5509 	bus_dmamap_sync(dmatag, dmamap, 0, dmamap->dm_mapsize,
   5510 	    BUS_DMASYNC_PREWRITE);
   5511 
   5512 	if (frag == sc->bge_tx_saved_considx) {
   5513 		BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n",
   5514 		    device_xname(sc->bge_dev), frag, sc->bge_tx_saved_considx));
   5515 
   5516 		goto fail_unload;
   5517 	}
   5518 
   5519 	sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
   5520 	sc->bge_cdata.bge_tx_chain[cur] = m_head;
   5521 	SLIST_REMOVE_HEAD(&sc->txdma_list, link);
   5522 	sc->txdma[cur] = dma;
   5523 	sc->bge_txcnt += dmamap->dm_nsegs;
   5524 
   5525 	*txidx = frag;
   5526 
   5527 	return 0;
   5528 
   5529 fail_unload:
   5530 	bus_dmamap_unload(dmatag, dmamap);
   5531 	ifp->if_flags |= IFF_OACTIVE;
   5532 
   5533 	return ENOBUFS;
   5534 }
   5535 
   5536 /*
   5537  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
   5538  * to the mbuf data regions directly in the transmit descriptors.
   5539  */
   5540 static void
   5541 bge_start(struct ifnet *ifp)
   5542 {
   5543 	struct bge_softc *sc;
   5544 	struct mbuf *m_head = NULL;
   5545 	struct mbuf *m;
   5546 	uint32_t prodidx;
   5547 	int pkts = 0;
   5548 	int error;
   5549 
   5550 	sc = ifp->if_softc;
   5551 
   5552 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   5553 		return;
   5554 
   5555 	prodidx = sc->bge_tx_prodidx;
   5556 
   5557 	while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
   5558 		IFQ_POLL(&ifp->if_snd, m_head);
   5559 		if (m_head == NULL)
   5560 			break;
   5561 
   5562 #if 0
   5563 		/*
   5564 		 * XXX
   5565 		 * safety overkill.  If this is a fragmented packet chain
   5566 		 * with delayed TCP/UDP checksums, then only encapsulate
   5567 		 * it if we have enough descriptors to handle the entire
   5568 		 * chain at once.
   5569 		 * (paranoia -- may not actually be needed)
   5570 		 */
   5571 		if (m_head->m_flags & M_FIRSTFRAG &&
   5572 		    m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
   5573 			if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
   5574 			    M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) {
   5575 				ifp->if_flags |= IFF_OACTIVE;
   5576 				break;
   5577 			}
   5578 		}
   5579 #endif
   5580 
   5581 		/*
   5582 		 * Pack the data into the transmit ring. If we
   5583 		 * don't have room, set the OACTIVE flag and wait
   5584 		 * for the NIC to drain the ring.
   5585 		 */
   5586 		error = bge_encap(sc, m_head, &prodidx);
   5587 		if (__predict_false(error)) {
   5588 			if (ifp->if_flags & IFF_OACTIVE) {
   5589 				/* just wait for the transmit ring to drain */
   5590 				break;
   5591 			}
   5592 			IFQ_DEQUEUE(&ifp->if_snd, m);
   5593 			KASSERT(m == m_head);
   5594 			m_freem(m_head);
   5595 			continue;
   5596 		}
   5597 
   5598 		/* now we are committed to transmit the packet */
   5599 		IFQ_DEQUEUE(&ifp->if_snd, m);
   5600 		KASSERT(m == m_head);
   5601 		pkts++;
   5602 
   5603 		/*
   5604 		 * If there's a BPF listener, bounce a copy of this frame
   5605 		 * to him.
   5606 		 */
   5607 		bpf_mtap(ifp, m_head, BPF_D_OUT);
   5608 	}
   5609 	if (pkts == 0)
   5610 		return;
   5611 
   5612 	/* Transmit */
   5613 	bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
   5614 	/* 5700 b2 errata */
   5615 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
   5616 		bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
   5617 
   5618 	sc->bge_tx_prodidx = prodidx;
   5619 
   5620 	/*
   5621 	 * Set a timeout in case the chip goes out to lunch.
   5622 	 */
   5623 	ifp->if_timer = 5;
   5624 }
   5625 
   5626 static int
   5627 bge_init(struct ifnet *ifp)
   5628 {
   5629 	struct bge_softc *sc = ifp->if_softc;
   5630 	const uint16_t *m;
   5631 	uint32_t mode, reg;
   5632 	int s, error = 0;
   5633 
   5634 	s = splnet();
   5635 
   5636 	ifp = &sc->ethercom.ec_if;
   5637 
   5638 	/* Cancel pending I/O and flush buffers. */
   5639 	bge_stop(ifp, 0);
   5640 
   5641 	bge_stop_fw(sc);
   5642 	bge_sig_pre_reset(sc, BGE_RESET_START);
   5643 	bge_reset(sc);
   5644 	bge_sig_legacy(sc, BGE_RESET_START);
   5645 
   5646 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) {
   5647 		reg = CSR_READ_4(sc, BGE_CPMU_CTRL);
   5648 		reg &= ~(BGE_CPMU_CTRL_LINK_AWARE_MODE |
   5649 		    BGE_CPMU_CTRL_LINK_IDLE_MODE);
   5650 		CSR_WRITE_4(sc, BGE_CPMU_CTRL, reg);
   5651 
   5652 		reg = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK);
   5653 		reg &= ~BGE_CPMU_LSPD_10MB_CLK;
   5654 		reg |= BGE_CPMU_LSPD_10MB_MACCLK_6_25;
   5655 		CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, reg);
   5656 
   5657 		reg = CSR_READ_4(sc, BGE_CPMU_LNK_AWARE_PWRMD);
   5658 		reg &= ~BGE_CPMU_LNK_AWARE_MACCLK_MASK;
   5659 		reg |= BGE_CPMU_LNK_AWARE_MACCLK_6_25;
   5660 		CSR_WRITE_4(sc, BGE_CPMU_LNK_AWARE_PWRMD, reg);
   5661 
   5662 		reg = CSR_READ_4(sc, BGE_CPMU_HST_ACC);
   5663 		reg &= ~BGE_CPMU_HST_ACC_MACCLK_MASK;
   5664 		reg |= BGE_CPMU_HST_ACC_MACCLK_6_25;
   5665 		CSR_WRITE_4(sc, BGE_CPMU_HST_ACC, reg);
   5666 	}
   5667 
   5668 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) {
   5669 		pcireg_t aercap;
   5670 
   5671 		reg = CSR_READ_4(sc, BGE_PCIE_PWRMNG_THRESH);
   5672 		reg = (reg & ~BGE_PCIE_PWRMNG_L1THRESH_MASK)
   5673 		    | BGE_PCIE_PWRMNG_L1THRESH_4MS
   5674 		    | BGE_PCIE_PWRMNG_EXTASPMTMR_EN;
   5675 		CSR_WRITE_4(sc, BGE_PCIE_PWRMNG_THRESH, reg);
   5676 
   5677 		reg = CSR_READ_4(sc, BGE_PCIE_EIDLE_DELAY);
   5678 		reg = (reg & ~BGE_PCIE_EIDLE_DELAY_MASK)
   5679 		    | BGE_PCIE_EIDLE_DELAY_13CLK;
   5680 		CSR_WRITE_4(sc, BGE_PCIE_EIDLE_DELAY, reg);
   5681 
   5682 		/* Clear correctable error */
   5683 		if (pci_get_ext_capability(sc->sc_pc, sc->sc_pcitag,
   5684 		    PCI_EXTCAP_AER, &aercap, NULL) != 0)
   5685 			pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   5686 			    aercap + PCI_AER_COR_STATUS, 0xffffffff);
   5687 
   5688 		reg = CSR_READ_4(sc, BGE_PCIE_LINKCTL);
   5689 		reg = (reg & ~BGE_PCIE_LINKCTL_L1_PLL_PDEN)
   5690 		    | BGE_PCIE_LINKCTL_L1_PLL_PDDIS;
   5691 		CSR_WRITE_4(sc, BGE_PCIE_LINKCTL, reg);
   5692 	}
   5693 
   5694 	bge_sig_post_reset(sc, BGE_RESET_START);
   5695 
   5696 	bge_chipinit(sc);
   5697 
   5698 	/*
   5699 	 * Init the various state machines, ring
   5700 	 * control blocks and firmware.
   5701 	 */
   5702 	error = bge_blockinit(sc);
   5703 	if (error != 0) {
   5704 		aprint_error_dev(sc->bge_dev, "initialization error %d\n",
   5705 		    error);
   5706 		splx(s);
   5707 		return error;
   5708 	}
   5709 
   5710 	ifp = &sc->ethercom.ec_if;
   5711 
   5712 	/* 5718 step 25, 57XX step 54 */
   5713 	/* Specify MTU. */
   5714 	CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
   5715 	    ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
   5716 
   5717 	/* 5718 step 23 */
   5718 	/* Load our MAC address. */
   5719 	m = (const uint16_t *)&(CLLADDR(ifp->if_sadl)[0]);
   5720 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
   5721 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
   5722 
   5723 	/* Enable or disable promiscuous mode as needed. */
   5724 	if (ifp->if_flags & IFF_PROMISC)
   5725 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
   5726 	else
   5727 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
   5728 
   5729 	/* Program multicast filter. */
   5730 	bge_setmulti(sc);
   5731 
   5732 	/* Init RX ring. */
   5733 	bge_init_rx_ring_std(sc);
   5734 
   5735 	/*
   5736 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
   5737 	 * memory to insure that the chip has in fact read the first
   5738 	 * entry of the ring.
   5739 	 */
   5740 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
   5741 		uint32_t		v, i;
   5742 		for (i = 0; i < 10; i++) {
   5743 			DELAY(20);
   5744 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
   5745 			if (v == (MCLBYTES - ETHER_ALIGN))
   5746 				break;
   5747 		}
   5748 		if (i == 10)
   5749 			aprint_error_dev(sc->bge_dev,
   5750 			    "5705 A0 chip failed to load RX ring\n");
   5751 	}
   5752 
   5753 	/* Init jumbo RX ring. */
   5754 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
   5755 		bge_init_rx_ring_jumbo(sc);
   5756 
   5757 	/* Init our RX return ring index */
   5758 	sc->bge_rx_saved_considx = 0;
   5759 
   5760 	/* Init TX ring. */
   5761 	bge_init_tx_ring(sc);
   5762 
   5763 	/* 5718 step 63, 57XX step 94 */
   5764 	/* Enable TX MAC state machine lockup fix. */
   5765 	mode = CSR_READ_4(sc, BGE_TX_MODE);
   5766 	if (BGE_IS_5755_PLUS(sc) ||
   5767 	    BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
   5768 		mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
   5769 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
   5770 		mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
   5771 		mode |= CSR_READ_4(sc, BGE_TX_MODE) &
   5772 		    (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
   5773 	}
   5774 
   5775 	/* Turn on transmitter */
   5776 	CSR_WRITE_4_FLUSH(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
   5777 	/* 5718 step 64 */
   5778 	DELAY(100);
   5779 
   5780 	/* 5718 step 65, 57XX step 95 */
   5781 	/* Turn on receiver */
   5782 	mode = CSR_READ_4(sc, BGE_RX_MODE);
   5783 	if (BGE_IS_5755_PLUS(sc))
   5784 		mode |= BGE_RXMODE_IPV6_ENABLE;
   5785 	CSR_WRITE_4_FLUSH(sc, BGE_RX_MODE, mode | BGE_RXMODE_ENABLE);
   5786 	/* 5718 step 66 */
   5787 	DELAY(10);
   5788 
   5789 	/* 5718 step 12, 57XX step 37 */
   5790 	/*
   5791 	 * XXX Doucments of 5718 series and 577xx say the recommended value
   5792 	 * is 1, but tg3 set 1 only on 57765 series.
   5793 	 */
   5794 	if (BGE_IS_57765_PLUS(sc))
   5795 		reg = 1;
   5796 	else
   5797 		reg = 2;
   5798 	CSR_WRITE_4_FLUSH(sc, BGE_MAX_RX_FRAME_LOWAT, reg);
   5799 
   5800 	/* Tell firmware we're alive. */
   5801 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
   5802 
   5803 	/* Enable host interrupts. */
   5804 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
   5805 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
   5806 	bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 0);
   5807 
   5808 	if ((error = bge_ifmedia_upd(ifp)) != 0)
   5809 		goto out;
   5810 
   5811 	ifp->if_flags |= IFF_RUNNING;
   5812 	ifp->if_flags &= ~IFF_OACTIVE;
   5813 
   5814 	callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
   5815 
   5816 out:
   5817 	sc->bge_if_flags = ifp->if_flags;
   5818 	splx(s);
   5819 
   5820 	return error;
   5821 }
   5822 
   5823 /*
   5824  * Set media options.
   5825  */
   5826 static int
   5827 bge_ifmedia_upd(struct ifnet *ifp)
   5828 {
   5829 	struct bge_softc *sc = ifp->if_softc;
   5830 	struct mii_data *mii = &sc->bge_mii;
   5831 	struct ifmedia *ifm = &sc->bge_ifmedia;
   5832 	int rc;
   5833 
   5834 	/* If this is a 1000baseX NIC, enable the TBI port. */
   5835 	if (sc->bge_flags & BGEF_FIBER_TBI) {
   5836 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   5837 			return EINVAL;
   5838 		switch (IFM_SUBTYPE(ifm->ifm_media)) {
   5839 		case IFM_AUTO:
   5840 			/*
   5841 			 * The BCM5704 ASIC appears to have a special
   5842 			 * mechanism for programming the autoneg
   5843 			 * advertisement registers in TBI mode.
   5844 			 */
   5845 			if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
   5846 				uint32_t sgdig;
   5847 				sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
   5848 				if (sgdig & BGE_SGDIGSTS_DONE) {
   5849 					CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
   5850 					sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
   5851 					sgdig |= BGE_SGDIGCFG_AUTO |
   5852 					    BGE_SGDIGCFG_PAUSE_CAP |
   5853 					    BGE_SGDIGCFG_ASYM_PAUSE;
   5854 					CSR_WRITE_4_FLUSH(sc, BGE_SGDIG_CFG,
   5855 					    sgdig | BGE_SGDIGCFG_SEND);
   5856 					DELAY(5);
   5857 					CSR_WRITE_4_FLUSH(sc, BGE_SGDIG_CFG,
   5858 					    sgdig);
   5859 				}
   5860 			}
   5861 			break;
   5862 		case IFM_1000_SX:
   5863 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
   5864 				BGE_CLRBIT(sc, BGE_MAC_MODE,
   5865 				    BGE_MACMODE_HALF_DUPLEX);
   5866 			} else {
   5867 				BGE_SETBIT(sc, BGE_MAC_MODE,
   5868 				    BGE_MACMODE_HALF_DUPLEX);
   5869 			}
   5870 			DELAY(40);
   5871 			break;
   5872 		default:
   5873 			return EINVAL;
   5874 		}
   5875 		/* XXX 802.3x flow control for 1000BASE-SX */
   5876 		return 0;
   5877 	}
   5878 
   5879 	if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784) &&
   5880 	    (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5784_AX)) {
   5881 		uint32_t reg;
   5882 
   5883 		reg = CSR_READ_4(sc, BGE_CPMU_CTRL);
   5884 		if ((reg & BGE_CPMU_CTRL_GPHY_10MB_RXONLY) != 0) {
   5885 			reg &= ~BGE_CPMU_CTRL_GPHY_10MB_RXONLY;
   5886 			CSR_WRITE_4(sc, BGE_CPMU_CTRL, reg);
   5887 		}
   5888 	}
   5889 
   5890 	BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT);
   5891 	if ((rc = mii_mediachg(mii)) == ENXIO)
   5892 		return 0;
   5893 
   5894 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) {
   5895 		uint32_t reg;
   5896 
   5897 		reg = CSR_READ_4(sc, BGE_CPMU_LSPD_1000MB_CLK);
   5898 		if ((reg & BGE_CPMU_LSPD_1000MB_MACCLK_MASK)
   5899 		    == (BGE_CPMU_LSPD_1000MB_MACCLK_12_5)) {
   5900 			reg &= ~BGE_CPMU_LSPD_1000MB_MACCLK_MASK;
   5901 			delay(40);
   5902 			CSR_WRITE_4(sc, BGE_CPMU_LSPD_1000MB_CLK, reg);
   5903 		}
   5904 	}
   5905 
   5906 	/*
   5907 	 * Force an interrupt so that we will call bge_link_upd
   5908 	 * if needed and clear any pending link state attention.
   5909 	 * Without this we are not getting any further interrupts
   5910 	 * for link state changes and thus will not UP the link and
   5911 	 * not be able to send in bge_start. The only way to get
   5912 	 * things working was to receive a packet and get a RX intr.
   5913 	 */
   5914 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
   5915 	    sc->bge_flags & BGEF_IS_5788)
   5916 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
   5917 	else
   5918 		BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
   5919 
   5920 	return rc;
   5921 }
   5922 
   5923 /*
   5924  * Report current media status.
   5925  */
   5926 static void
   5927 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
   5928 {
   5929 	struct bge_softc *sc = ifp->if_softc;
   5930 	struct mii_data *mii = &sc->bge_mii;
   5931 
   5932 	if (sc->bge_flags & BGEF_FIBER_TBI) {
   5933 		ifmr->ifm_status = IFM_AVALID;
   5934 		ifmr->ifm_active = IFM_ETHER;
   5935 		if (CSR_READ_4(sc, BGE_MAC_STS) &
   5936 		    BGE_MACSTAT_TBI_PCS_SYNCHED)
   5937 			ifmr->ifm_status |= IFM_ACTIVE;
   5938 		ifmr->ifm_active |= IFM_1000_SX;
   5939 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
   5940 			ifmr->ifm_active |= IFM_HDX;
   5941 		else
   5942 			ifmr->ifm_active |= IFM_FDX;
   5943 		return;
   5944 	}
   5945 
   5946 	mii_pollstat(mii);
   5947 	ifmr->ifm_status = mii->mii_media_status;
   5948 	ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
   5949 	    sc->bge_flowflags;
   5950 }
   5951 
   5952 static int
   5953 bge_ifflags_cb(struct ethercom *ec)
   5954 {
   5955 	struct ifnet *ifp = &ec->ec_if;
   5956 	struct bge_softc *sc = ifp->if_softc;
   5957 	int change = ifp->if_flags ^ sc->bge_if_flags;
   5958 
   5959 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
   5960 		return ENETRESET;
   5961 	else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0)
   5962 		return 0;
   5963 
   5964 	if ((ifp->if_flags & IFF_PROMISC) == 0)
   5965 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
   5966 	else
   5967 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
   5968 
   5969 	bge_setmulti(sc);
   5970 
   5971 	sc->bge_if_flags = ifp->if_flags;
   5972 	return 0;
   5973 }
   5974 
   5975 static int
   5976 bge_ioctl(struct ifnet *ifp, u_long command, void *data)
   5977 {
   5978 	struct bge_softc *sc = ifp->if_softc;
   5979 	struct ifreq *ifr = (struct ifreq *) data;
   5980 	int s, error = 0;
   5981 	struct mii_data *mii;
   5982 
   5983 	s = splnet();
   5984 
   5985 	switch (command) {
   5986 	case SIOCSIFMEDIA:
   5987 		/* XXX Flow control is not supported for 1000BASE-SX */
   5988 		if (sc->bge_flags & BGEF_FIBER_TBI) {
   5989 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   5990 			sc->bge_flowflags = 0;
   5991 		}
   5992 
   5993 		/* Flow control requires full-duplex mode. */
   5994 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   5995 		    (ifr->ifr_media & IFM_FDX) == 0) {
   5996 		    	ifr->ifr_media &= ~IFM_ETH_FMASK;
   5997 		}
   5998 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   5999 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   6000 				/* We can do both TXPAUSE and RXPAUSE. */
   6001 				ifr->ifr_media |=
   6002 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   6003 			}
   6004 			sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   6005 		}
   6006 		/* FALLTHROUGH */
   6007 	case SIOCGIFMEDIA:
   6008 		if (sc->bge_flags & BGEF_FIBER_TBI) {
   6009 			error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
   6010 			    command);
   6011 		} else {
   6012 			mii = &sc->bge_mii;
   6013 			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
   6014 			    command);
   6015 		}
   6016 		break;
   6017 	default:
   6018 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   6019 			break;
   6020 
   6021 		error = 0;
   6022 
   6023 		if (command != SIOCADDMULTI && command != SIOCDELMULTI)
   6024 			;
   6025 		else if (ifp->if_flags & IFF_RUNNING)
   6026 			bge_setmulti(sc);
   6027 		break;
   6028 	}
   6029 
   6030 	splx(s);
   6031 
   6032 	return error;
   6033 }
   6034 
   6035 static void
   6036 bge_watchdog(struct ifnet *ifp)
   6037 {
   6038 	struct bge_softc *sc;
   6039 	uint32_t status;
   6040 
   6041 	sc = ifp->if_softc;
   6042 
   6043         /* If pause frames are active then don't reset the hardware. */
   6044 	if ((CSR_READ_4(sc, BGE_RX_MODE) & BGE_RXMODE_FLOWCTL_ENABLE) != 0) {
   6045 		status = CSR_READ_4(sc, BGE_RX_STS);
   6046 		if ((status & BGE_RXSTAT_REMOTE_XOFFED) != 0) {
   6047 			/*
   6048 			 * If link partner has us in XOFF state then wait for
   6049 			 * the condition to clear.
   6050 			 */
   6051 			CSR_WRITE_4(sc, BGE_RX_STS, status);
   6052 			ifp->if_timer = 5;
   6053 			return;
   6054 		} else if ((status & BGE_RXSTAT_RCVD_XOFF) != 0 &&
   6055 		    (status & BGE_RXSTAT_RCVD_XON) != 0) {
   6056 			/*
   6057 			 * If link partner has us in XOFF state then wait for
   6058 			 * the condition to clear.
   6059 			 */
   6060 			CSR_WRITE_4(sc, BGE_RX_STS, status);
   6061 			ifp->if_timer = 5;
   6062 			return;
   6063 		}
   6064 		/*
   6065 		 * Any other condition is unexpected and the controller
   6066 		 * should be reset.
   6067 		 */
   6068 	}
   6069 
   6070 	aprint_error_dev(sc->bge_dev, "watchdog timeout -- resetting\n");
   6071 
   6072 	ifp->if_flags &= ~IFF_RUNNING;
   6073 	bge_init(ifp);
   6074 
   6075 	ifp->if_oerrors++;
   6076 }
   6077 
   6078 static void
   6079 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit)
   6080 {
   6081 	int i;
   6082 
   6083 	BGE_CLRBIT_FLUSH(sc, reg, bit);
   6084 
   6085 	for (i = 0; i < 1000; i++) {
   6086 		delay(100);
   6087 		if ((CSR_READ_4(sc, reg) & bit) == 0)
   6088 			return;
   6089 	}
   6090 
   6091 	/*
   6092 	 * Doesn't print only when the register is BGE_SRS_MODE. It occurs
   6093 	 * on some environment (and once after boot?)
   6094 	 */
   6095 	if (reg != BGE_SRS_MODE)
   6096 		aprint_error_dev(sc->bge_dev,
   6097 		    "block failed to stop: reg 0x%lx, bit 0x%08x\n",
   6098 		    (u_long)reg, bit);
   6099 }
   6100 
   6101 /*
   6102  * Stop the adapter and free any mbufs allocated to the
   6103  * RX and TX lists.
   6104  */
   6105 static void
   6106 bge_stop(struct ifnet *ifp, int disable)
   6107 {
   6108 	struct bge_softc *sc = ifp->if_softc;
   6109 
   6110 	if (disable) {
   6111 		sc->bge_detaching = 1;
   6112 		callout_halt(&sc->bge_timeout, NULL);
   6113 	} else
   6114 		callout_stop(&sc->bge_timeout);
   6115 
   6116 	/* Disable host interrupts. */
   6117 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
   6118 	bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 1);
   6119 
   6120 	/*
   6121 	 * Tell firmware we're shutting down.
   6122 	 */
   6123 	bge_stop_fw(sc);
   6124 	bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
   6125 
   6126 	/*
   6127 	 * Disable all of the receiver blocks.
   6128 	 */
   6129 	bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
   6130 	bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
   6131 	bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
   6132 	if (BGE_IS_5700_FAMILY(sc))
   6133 		bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
   6134 	bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
   6135 	bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
   6136 	bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
   6137 
   6138 	/*
   6139 	 * Disable all of the transmit blocks.
   6140 	 */
   6141 	bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
   6142 	bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
   6143 	bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
   6144 	bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
   6145 	bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
   6146 	if (BGE_IS_5700_FAMILY(sc))
   6147 		bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
   6148 	bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
   6149 
   6150 	BGE_CLRBIT_FLUSH(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB);
   6151 	delay(40);
   6152 
   6153 	bge_stop_block(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
   6154 
   6155 	/*
   6156 	 * Shut down all of the memory managers and related
   6157 	 * state machines.
   6158 	 */
   6159 	/* 5718 step 5a,5b */
   6160 	bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
   6161 	bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
   6162 	if (BGE_IS_5700_FAMILY(sc))
   6163 		bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
   6164 
   6165 	/* 5718 step 5c,5d */
   6166 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
   6167 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
   6168 
   6169 	if (BGE_IS_5700_FAMILY(sc)) {
   6170 		bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
   6171 		bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
   6172 	}
   6173 
   6174 	bge_reset(sc);
   6175 	bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
   6176 	bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
   6177 
   6178 	/*
   6179 	 * Keep the ASF firmware running if up.
   6180 	 */
   6181 	if (sc->bge_asf_mode & ASF_STACKUP)
   6182 		BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
   6183 	else
   6184 		BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
   6185 
   6186 	/* Free the RX lists. */
   6187 	bge_free_rx_ring_std(sc, disable);
   6188 
   6189 	/* Free jumbo RX list. */
   6190 	if (BGE_IS_JUMBO_CAPABLE(sc))
   6191 		bge_free_rx_ring_jumbo(sc);
   6192 
   6193 	/* Free TX buffers. */
   6194 	bge_free_tx_ring(sc, disable);
   6195 
   6196 	/*
   6197 	 * Isolate/power down the PHY.
   6198 	 */
   6199 	if (!(sc->bge_flags & BGEF_FIBER_TBI))
   6200 		mii_down(&sc->bge_mii);
   6201 
   6202 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
   6203 
   6204 	/* Clear MAC's link state (PHY may still have link UP). */
   6205 	BGE_STS_CLRBIT(sc, BGE_STS_LINK);
   6206 
   6207 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6208 }
   6209 
   6210 static void
   6211 bge_link_upd(struct bge_softc *sc)
   6212 {
   6213 	struct ifnet *ifp = &sc->ethercom.ec_if;
   6214 	struct mii_data *mii = &sc->bge_mii;
   6215 	uint32_t status;
   6216 	int link;
   6217 
   6218 	/* Clear 'pending link event' flag */
   6219 	BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT);
   6220 
   6221 	/*
   6222 	 * Process link state changes.
   6223 	 * Grrr. The link status word in the status block does
   6224 	 * not work correctly on the BCM5700 rev AX and BX chips,
   6225 	 * according to all available information. Hence, we have
   6226 	 * to enable MII interrupts in order to properly obtain
   6227 	 * async link changes. Unfortunately, this also means that
   6228 	 * we have to read the MAC status register to detect link
   6229 	 * changes, thereby adding an additional register access to
   6230 	 * the interrupt handler.
   6231 	 */
   6232 
   6233 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) {
   6234 		status = CSR_READ_4(sc, BGE_MAC_STS);
   6235 		if (status & BGE_MACSTAT_MI_INTERRUPT) {
   6236 			mii_pollstat(mii);
   6237 
   6238 			if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
   6239 			    mii->mii_media_status & IFM_ACTIVE &&
   6240 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
   6241 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
   6242 			else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
   6243 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
   6244 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
   6245 				BGE_STS_CLRBIT(sc, BGE_STS_LINK);
   6246 
   6247 			/* Clear the interrupt */
   6248 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
   6249 			    BGE_EVTENB_MI_INTERRUPT);
   6250 			bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr,
   6251 			    BRGPHY_MII_ISR);
   6252 			bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr,
   6253 			    BRGPHY_MII_IMR, BRGPHY_INTRS);
   6254 		}
   6255 		return;
   6256 	}
   6257 
   6258 	if (sc->bge_flags & BGEF_FIBER_TBI) {
   6259 		status = CSR_READ_4(sc, BGE_MAC_STS);
   6260 		if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
   6261 			if (!BGE_STS_BIT(sc, BGE_STS_LINK)) {
   6262 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
   6263 				if (BGE_ASICREV(sc->bge_chipid)
   6264 				    == BGE_ASICREV_BCM5704) {
   6265 					BGE_CLRBIT(sc, BGE_MAC_MODE,
   6266 					    BGE_MACMODE_TBI_SEND_CFGS);
   6267 					DELAY(40);
   6268 				}
   6269 				CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
   6270 				if_link_state_change(ifp, LINK_STATE_UP);
   6271 			}
   6272 		} else if (BGE_STS_BIT(sc, BGE_STS_LINK)) {
   6273 			BGE_STS_CLRBIT(sc, BGE_STS_LINK);
   6274 			if_link_state_change(ifp, LINK_STATE_DOWN);
   6275 		}
   6276 	} else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) {
   6277 		/*
   6278 		 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED
   6279 		 * bit in status word always set. Workaround this bug by
   6280 		 * reading PHY link status directly.
   6281 		 */
   6282 		link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)?
   6283 		    BGE_STS_LINK : 0;
   6284 
   6285 		if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) {
   6286 			mii_pollstat(mii);
   6287 
   6288 			if (!BGE_STS_BIT(sc, BGE_STS_LINK) &&
   6289 			    mii->mii_media_status & IFM_ACTIVE &&
   6290 			    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
   6291 				BGE_STS_SETBIT(sc, BGE_STS_LINK);
   6292 			else if (BGE_STS_BIT(sc, BGE_STS_LINK) &&
   6293 			    (!(mii->mii_media_status & IFM_ACTIVE) ||
   6294 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
   6295 				BGE_STS_CLRBIT(sc, BGE_STS_LINK);
   6296 		}
   6297 	} else {
   6298 		/*
   6299 		 * For controllers that call mii_tick, we have to poll
   6300 		 * link status.
   6301 		 */
   6302 		mii_pollstat(mii);
   6303 	}
   6304 
   6305 	if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) {
   6306 		uint32_t reg, scale;
   6307 
   6308 		reg = CSR_READ_4(sc, BGE_CPMU_CLCK_STAT) &
   6309 		    BGE_CPMU_CLCK_STAT_MAC_CLCK_MASK;
   6310 		if (reg == BGE_CPMU_CLCK_STAT_MAC_CLCK_62_5)
   6311 			scale = 65;
   6312 		else if (reg == BGE_CPMU_CLCK_STAT_MAC_CLCK_6_25)
   6313 			scale = 6;
   6314 		else
   6315 			scale = 12;
   6316 
   6317 		reg = CSR_READ_4(sc, BGE_MISC_CFG) &
   6318 		    ~BGE_MISCCFG_TIMER_PRESCALER;
   6319 		reg |= scale << 1;
   6320 		CSR_WRITE_4(sc, BGE_MISC_CFG, reg);
   6321 	}
   6322 	/* Clear the attention */
   6323 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
   6324 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
   6325 	    BGE_MACSTAT_LINK_CHANGED);
   6326 }
   6327 
   6328 static int
   6329 bge_sysctl_verify(SYSCTLFN_ARGS)
   6330 {
   6331 	int error, t;
   6332 	struct sysctlnode node;
   6333 
   6334 	node = *rnode;
   6335 	t = *(int*)rnode->sysctl_data;
   6336 	node.sysctl_data = &t;
   6337 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   6338 	if (error || newp == NULL)
   6339 		return error;
   6340 
   6341 #if 0
   6342 	DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t,
   6343 	    node.sysctl_num, rnode->sysctl_num));
   6344 #endif
   6345 
   6346 	if (node.sysctl_num == bge_rxthresh_nodenum) {
   6347 		if (t < 0 || t >= NBGE_RX_THRESH)
   6348 			return EINVAL;
   6349 		bge_update_all_threshes(t);
   6350 	} else
   6351 		return EINVAL;
   6352 
   6353 	*(int*)rnode->sysctl_data = t;
   6354 
   6355 	return 0;
   6356 }
   6357 
   6358 /*
   6359  * Set up sysctl(3) MIB, hw.bge.*.
   6360  */
   6361 static void
   6362 bge_sysctl_init(struct bge_softc *sc)
   6363 {
   6364 	int rc, bge_root_num;
   6365 	const struct sysctlnode *node;
   6366 
   6367 	if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node,
   6368 	    0, CTLTYPE_NODE, "bge",
   6369 	    SYSCTL_DESCR("BGE interface controls"),
   6370 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
   6371 		goto out;
   6372 	}
   6373 
   6374 	bge_root_num = node->sysctl_num;
   6375 
   6376 	/* BGE Rx interrupt mitigation level */
   6377 	if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node,
   6378 	    CTLFLAG_READWRITE,
   6379 	    CTLTYPE_INT, "rx_lvl",
   6380 	    SYSCTL_DESCR("BGE receive interrupt mitigation level"),
   6381 	    bge_sysctl_verify, 0,
   6382 	    &bge_rx_thresh_lvl,
   6383 	    0, CTL_HW, bge_root_num, CTL_CREATE,
   6384 	    CTL_EOL)) != 0) {
   6385 		goto out;
   6386 	}
   6387 
   6388 	bge_rxthresh_nodenum = node->sysctl_num;
   6389 
   6390 	return;
   6391 
   6392 out:
   6393 	aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
   6394 }
   6395 
   6396 #ifdef BGE_DEBUG
   6397 void
   6398 bge_debug_info(struct bge_softc *sc)
   6399 {
   6400 
   6401 	printf("Hardware Flags:\n");
   6402 	if (BGE_IS_57765_PLUS(sc))
   6403 		printf(" - 57765 Plus\n");
   6404 	if (BGE_IS_5717_PLUS(sc))
   6405 		printf(" - 5717 Plus\n");
   6406 	if (BGE_IS_5755_PLUS(sc))
   6407 		printf(" - 5755 Plus\n");
   6408 	if (BGE_IS_575X_PLUS(sc))
   6409 		printf(" - 575X Plus\n");
   6410 	if (BGE_IS_5705_PLUS(sc))
   6411 		printf(" - 5705 Plus\n");
   6412 	if (BGE_IS_5714_FAMILY(sc))
   6413 		printf(" - 5714 Family\n");
   6414 	if (BGE_IS_5700_FAMILY(sc))
   6415 		printf(" - 5700 Family\n");
   6416 	if (sc->bge_flags & BGEF_IS_5788)
   6417 		printf(" - 5788\n");
   6418 	if (sc->bge_flags & BGEF_JUMBO_CAPABLE)
   6419 		printf(" - Supports Jumbo Frames\n");
   6420 	if (sc->bge_flags & BGEF_NO_EEPROM)
   6421 		printf(" - No EEPROM\n");
   6422 	if (sc->bge_flags & BGEF_PCIX)
   6423 		printf(" - PCI-X Bus\n");
   6424 	if (sc->bge_flags & BGEF_PCIE)
   6425 		printf(" - PCI Express Bus\n");
   6426 	if (sc->bge_flags & BGEF_RX_ALIGNBUG)
   6427 		printf(" - RX Alignment Bug\n");
   6428 	if (sc->bge_flags & BGEF_APE)
   6429 		printf(" - APE\n");
   6430 	if (sc->bge_flags & BGEF_CPMU_PRESENT)
   6431 		printf(" - CPMU\n");
   6432 	if (sc->bge_flags & BGEF_TSO)
   6433 		printf(" - TSO\n");
   6434 	if (sc->bge_flags & BGEF_TAGGED_STATUS)
   6435 		printf(" - TAGGED_STATUS\n");
   6436 
   6437 	/* PHY related */
   6438 	if (sc->bge_phy_flags & BGEPHYF_NO_3LED)
   6439 		printf(" - No 3 LEDs\n");
   6440 	if (sc->bge_phy_flags & BGEPHYF_CRC_BUG)
   6441 		printf(" - CRC bug\n");
   6442 	if (sc->bge_phy_flags & BGEPHYF_ADC_BUG)
   6443 		printf(" - ADC bug\n");
   6444 	if (sc->bge_phy_flags & BGEPHYF_5704_A0_BUG)
   6445 		printf(" - 5704 A0 bug\n");
   6446 	if (sc->bge_phy_flags & BGEPHYF_JITTER_BUG)
   6447 		printf(" - jitter bug\n");
   6448 	if (sc->bge_phy_flags & BGEPHYF_BER_BUG)
   6449 		printf(" - BER bug\n");
   6450 	if (sc->bge_phy_flags & BGEPHYF_ADJUST_TRIM)
   6451 		printf(" - adjust trim\n");
   6452 	if (sc->bge_phy_flags & BGEPHYF_NO_WIRESPEED)
   6453 		printf(" - no wirespeed\n");
   6454 
   6455 	/* ASF related */
   6456 	if (sc->bge_asf_mode & ASF_ENABLE)
   6457 		printf(" - ASF enable\n");
   6458 	if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE)
   6459 		printf(" - ASF new handshake\n");
   6460 	if (sc->bge_asf_mode & ASF_STACKUP)
   6461 		printf(" - ASF stackup\n");
   6462 }
   6463 #endif /* BGE_DEBUG */
   6464 
   6465 static int
   6466 bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
   6467 {
   6468 	prop_dictionary_t dict;
   6469 	prop_data_t ea;
   6470 
   6471 	if ((sc->bge_flags & BGEF_NO_EEPROM) == 0)
   6472 		return 1;
   6473 
   6474 	dict = device_properties(sc->bge_dev);
   6475 	ea = prop_dictionary_get(dict, "mac-address");
   6476 	if (ea != NULL) {
   6477 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   6478 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   6479 		memcpy(ether_addr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   6480 		return 0;
   6481 	}
   6482 
   6483 	return 1;
   6484 }
   6485 
   6486 static int
   6487 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
   6488 {
   6489 	uint32_t mac_addr;
   6490 
   6491 	mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB);
   6492 	if ((mac_addr >> 16) == 0x484b) {
   6493 		ether_addr[0] = (uint8_t)(mac_addr >> 8);
   6494 		ether_addr[1] = (uint8_t)mac_addr;
   6495 		mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB);
   6496 		ether_addr[2] = (uint8_t)(mac_addr >> 24);
   6497 		ether_addr[3] = (uint8_t)(mac_addr >> 16);
   6498 		ether_addr[4] = (uint8_t)(mac_addr >> 8);
   6499 		ether_addr[5] = (uint8_t)mac_addr;
   6500 		return 0;
   6501 	}
   6502 	return 1;
   6503 }
   6504 
   6505 static int
   6506 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
   6507 {
   6508 	int mac_offset = BGE_EE_MAC_OFFSET;
   6509 
   6510 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
   6511 		mac_offset = BGE_EE_MAC_OFFSET_5906;
   6512 
   6513 	return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
   6514 	    ETHER_ADDR_LEN));
   6515 }
   6516 
   6517 static int
   6518 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
   6519 {
   6520 
   6521 	if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
   6522 		return 1;
   6523 
   6524 	return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
   6525 	   ETHER_ADDR_LEN));
   6526 }
   6527 
   6528 static int
   6529 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
   6530 {
   6531 	static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
   6532 		/* NOTE: Order is critical */
   6533 		bge_get_eaddr_fw,
   6534 		bge_get_eaddr_mem,
   6535 		bge_get_eaddr_nvram,
   6536 		bge_get_eaddr_eeprom,
   6537 		NULL
   6538 	};
   6539 	const bge_eaddr_fcn_t *func;
   6540 
   6541 	for (func = bge_eaddr_funcs; *func != NULL; ++func) {
   6542 		if ((*func)(sc, eaddr) == 0)
   6543 			break;
   6544 	}
   6545 	return (*func == NULL ? ENXIO : 0);
   6546 }
   6547