1 1.398 bouyer /* $NetBSD: if_bge.c,v 1.398 2025/05/26 08:27:04 bouyer Exp $ */ 2 1.8 thorpej 3 1.1 fvdl /* 4 1.1 fvdl * Copyright (c) 2001 Wind River Systems 5 1.1 fvdl * Copyright (c) 1997, 1998, 1999, 2001 6 1.1 fvdl * Bill Paul <wpaul (at) windriver.com>. All rights reserved. 7 1.1 fvdl * 8 1.1 fvdl * Redistribution and use in source and binary forms, with or without 9 1.1 fvdl * modification, are permitted provided that the following conditions 10 1.1 fvdl * are met: 11 1.1 fvdl * 1. Redistributions of source code must retain the above copyright 12 1.1 fvdl * notice, this list of conditions and the following disclaimer. 13 1.1 fvdl * 2. Redistributions in binary form must reproduce the above copyright 14 1.1 fvdl * notice, this list of conditions and the following disclaimer in the 15 1.1 fvdl * documentation and/or other materials provided with the distribution. 16 1.1 fvdl * 3. All advertising materials mentioning features or use of this software 17 1.1 fvdl * must display the following acknowledgement: 18 1.1 fvdl * This product includes software developed by Bill Paul. 19 1.1 fvdl * 4. Neither the name of the author nor the names of any co-contributors 20 1.1 fvdl * may be used to endorse or promote products derived from this software 21 1.1 fvdl * without specific prior written permission. 22 1.1 fvdl * 23 1.1 fvdl * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 1.1 fvdl * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 1.1 fvdl * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 1.1 fvdl * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 1.1 fvdl * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 1.1 fvdl * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 1.1 fvdl * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 1.1 fvdl * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 1.1 fvdl * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 1.1 fvdl * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 1.1 fvdl * THE POSSIBILITY OF SUCH DAMAGE. 34 1.1 fvdl * 35 1.1 fvdl * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $ 36 1.1 fvdl */ 37 1.1 fvdl 38 1.1 fvdl /* 39 1.12 thorpej * Broadcom BCM570x family gigabit ethernet driver for NetBSD. 40 1.1 fvdl * 41 1.12 thorpej * NetBSD version by: 42 1.12 thorpej * 43 1.12 thorpej * Frank van der Linden <fvdl (at) wasabisystems.com> 44 1.12 thorpej * Jason Thorpe <thorpej (at) wasabisystems.com> 45 1.32 tron * Jonathan Stone <jonathan (at) dsg.stanford.edu> 46 1.12 thorpej * 47 1.12 thorpej * Originally written for FreeBSD by Bill Paul <wpaul (at) windriver.com> 48 1.1 fvdl * Senior Engineer, Wind River Systems 49 1.1 fvdl */ 50 1.1 fvdl 51 1.1 fvdl /* 52 1.1 fvdl * The Broadcom BCM5700 is based on technology originally developed by 53 1.1 fvdl * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 54 1.203 msaitoh * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has 55 1.1 fvdl * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 56 1.1 fvdl * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 57 1.1 fvdl * frames, highly configurable RX filtering, and 16 RX and TX queues 58 1.1 fvdl * (which, along with RX filter rules, can be used for QOS applications). 59 1.1 fvdl * Other features, such as TCP segmentation, may be available as part 60 1.1 fvdl * of value-added firmware updates. Unlike the Tigon I and Tigon II, 61 1.1 fvdl * firmware images can be stored in hardware and need not be compiled 62 1.1 fvdl * into the driver. 63 1.1 fvdl * 64 1.1 fvdl * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 65 1.33 tsutsui * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 66 1.1 fvdl * 67 1.1 fvdl * The BCM5701 is a single-chip solution incorporating both the BCM5700 68 1.25 jonathan * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 69 1.1 fvdl * does not support external SSRAM. 70 1.1 fvdl * 71 1.1 fvdl * Broadcom also produces a variation of the BCM5700 under the "Altima" 72 1.1 fvdl * brand name, which is functionally similar but lacks PCI-X support. 73 1.1 fvdl * 74 1.1 fvdl * Without external SSRAM, you can only have at most 4 TX rings, 75 1.1 fvdl * and the use of the mini RX ring is disabled. This seems to imply 76 1.1 fvdl * that these features are simply not available on the BCM5701. As a 77 1.1 fvdl * result, this driver does not implement any support for the mini RX 78 1.1 fvdl * ring. 79 1.1 fvdl */ 80 1.43 lukem 81 1.43 lukem #include <sys/cdefs.h> 82 1.398 bouyer __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.398 2025/05/26 08:27:04 bouyer Exp $"); 83 1.1 fvdl 84 1.1 fvdl #include <sys/param.h> 85 1.370 skrll #include <sys/types.h> 86 1.355 skrll 87 1.1 fvdl #include <sys/callout.h> 88 1.355 skrll #include <sys/device.h> 89 1.364 skrll #include <sys/kernel.h> 90 1.366 skrll #include <sys/kmem.h> 91 1.1 fvdl #include <sys/mbuf.h> 92 1.355 skrll #include <sys/rndsource.h> 93 1.1 fvdl #include <sys/socket.h> 94 1.355 skrll #include <sys/sockio.h> 95 1.64 jonathan #include <sys/sysctl.h> 96 1.355 skrll #include <sys/systm.h> 97 1.1 fvdl 98 1.1 fvdl #include <net/if.h> 99 1.1 fvdl #include <net/if_dl.h> 100 1.1 fvdl #include <net/if_media.h> 101 1.1 fvdl #include <net/if_ether.h> 102 1.330 msaitoh #include <net/bpf.h> 103 1.148 mlelstv 104 1.247 msaitoh /* Headers for TCP Segmentation Offload (TSO) */ 105 1.95 jonathan #include <netinet/in_systm.h> /* n_time for <netinet/ip.h>... */ 106 1.95 jonathan #include <netinet/in.h> /* ip_{src,dst}, for <netinet/ip.h> */ 107 1.95 jonathan #include <netinet/ip.h> /* for struct ip */ 108 1.95 jonathan #include <netinet/tcp.h> /* for struct tcphdr */ 109 1.95 jonathan 110 1.1 fvdl #include <dev/pci/pcireg.h> 111 1.1 fvdl #include <dev/pci/pcivar.h> 112 1.1 fvdl #include <dev/pci/pcidevs.h> 113 1.1 fvdl 114 1.1 fvdl #include <dev/mii/mii.h> 115 1.1 fvdl #include <dev/mii/miivar.h> 116 1.1 fvdl #include <dev/mii/miidevs.h> 117 1.1 fvdl #include <dev/mii/brgphyreg.h> 118 1.1 fvdl 119 1.1 fvdl #include <dev/pci/if_bgereg.h> 120 1.164 msaitoh #include <dev/pci/if_bgevar.h> 121 1.1 fvdl 122 1.164 msaitoh #include <prop/proplib.h> 123 1.1 fvdl 124 1.46 jonathan #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 125 1.46 jonathan 126 1.63 jonathan 127 1.63 jonathan /* 128 1.63 jonathan * Tunable thresholds for rx-side bge interrupt mitigation. 129 1.63 jonathan */ 130 1.63 jonathan 131 1.63 jonathan /* 132 1.63 jonathan * The pairs of values below were obtained from empirical measurement 133 1.63 jonathan * on bcm5700 rev B2; they ar designed to give roughly 1 receive 134 1.63 jonathan * interrupt for every N packets received, where N is, approximately, 135 1.63 jonathan * the second value (rx_max_bds) in each pair. The values are chosen 136 1.63 jonathan * such that moving from one pair to the succeeding pair was observed 137 1.63 jonathan * to roughly halve interrupt rate under sustained input packet load. 138 1.63 jonathan * The values were empirically chosen to avoid overflowing internal 139 1.184 njoly * limits on the bcm5700: increasing rx_ticks much beyond 600 140 1.63 jonathan * results in internal wrapping and higher interrupt rates. 141 1.63 jonathan * The limit of 46 frames was chosen to match NFS workloads. 142 1.87 perry * 143 1.63 jonathan * These values also work well on bcm5701, bcm5704C, and (less 144 1.63 jonathan * tested) bcm5703. On other chipsets, (including the Altima chip 145 1.63 jonathan * family), the larger values may overflow internal chip limits, 146 1.63 jonathan * leading to increasing interrupt rates rather than lower interrupt 147 1.63 jonathan * rates. 148 1.63 jonathan * 149 1.63 jonathan * Applications using heavy interrupt mitigation (interrupting every 150 1.63 jonathan * 32 or 46 frames) in both directions may need to increase the TCP 151 1.63 jonathan * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain 152 1.87 perry * full link bandwidth, due to ACKs and window updates lingering 153 1.63 jonathan * in the RX queue during the 30-to-40-frame interrupt-mitigation window. 154 1.63 jonathan */ 155 1.104 thorpej static const struct bge_load_rx_thresh { 156 1.63 jonathan int rx_ticks; 157 1.63 jonathan int rx_max_bds; } 158 1.63 jonathan bge_rx_threshes[] = { 159 1.330 msaitoh { 16, 1 }, /* rx_max_bds = 1 disables interrupt mitigation */ 160 1.330 msaitoh { 32, 2 }, 161 1.330 msaitoh { 50, 4 }, 162 1.330 msaitoh { 100, 8 }, 163 1.63 jonathan { 192, 16 }, 164 1.63 jonathan { 416, 32 }, 165 1.63 jonathan { 598, 46 } 166 1.63 jonathan }; 167 1.63 jonathan #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0])) 168 1.63 jonathan 169 1.63 jonathan /* XXX patchable; should be sysctl'able */ 170 1.177 msaitoh static int bge_auto_thresh = 1; 171 1.177 msaitoh static int bge_rx_thresh_lvl; 172 1.64 jonathan 173 1.177 msaitoh static int bge_rxthresh_nodenum; 174 1.1 fvdl 175 1.170 msaitoh typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]); 176 1.151 cegger 177 1.237 msaitoh static uint32_t bge_chipid(const struct pci_attach_args *); 178 1.288 msaitoh static int bge_can_use_msi(struct bge_softc *); 179 1.177 msaitoh static int bge_probe(device_t, cfdata_t, void *); 180 1.177 msaitoh static void bge_attach(device_t, device_t, void *); 181 1.227 msaitoh static int bge_detach(device_t, int); 182 1.177 msaitoh static void bge_release_resources(struct bge_softc *); 183 1.177 msaitoh 184 1.177 msaitoh static int bge_get_eaddr_fw(struct bge_softc *, uint8_t[]); 185 1.177 msaitoh static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]); 186 1.177 msaitoh static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]); 187 1.177 msaitoh static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]); 188 1.177 msaitoh static int bge_get_eaddr(struct bge_softc *, uint8_t[]); 189 1.177 msaitoh 190 1.177 msaitoh static void bge_txeof(struct bge_softc *); 191 1.219 msaitoh static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *); 192 1.177 msaitoh static void bge_rxeof(struct bge_softc *); 193 1.177 msaitoh 194 1.177 msaitoh static void bge_asf_driver_up (struct bge_softc *); 195 1.177 msaitoh static void bge_tick(void *); 196 1.177 msaitoh static void bge_stats_update(struct bge_softc *); 197 1.177 msaitoh static void bge_stats_update_regs(struct bge_softc *); 198 1.177 msaitoh static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *); 199 1.177 msaitoh 200 1.177 msaitoh static int bge_intr(void *); 201 1.177 msaitoh static void bge_start(struct ifnet *); 202 1.375 skrll static void bge_start_locked(struct ifnet *); 203 1.186 msaitoh static int bge_ifflags_cb(struct ethercom *); 204 1.177 msaitoh static int bge_ioctl(struct ifnet *, u_long, void *); 205 1.177 msaitoh static int bge_init(struct ifnet *); 206 1.177 msaitoh static void bge_stop(struct ifnet *, int); 207 1.386 skrll static bool bge_watchdog_tick(struct ifnet *); 208 1.177 msaitoh static int bge_ifmedia_upd(struct ifnet *); 209 1.177 msaitoh static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 210 1.375 skrll static void bge_handle_reset_work(struct work *, void *); 211 1.177 msaitoh 212 1.177 msaitoh static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *); 213 1.177 msaitoh static int bge_read_nvram(struct bge_softc *, uint8_t *, int, int); 214 1.177 msaitoh 215 1.177 msaitoh static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); 216 1.177 msaitoh static int bge_read_eeprom(struct bge_softc *, void *, int, int); 217 1.177 msaitoh static void bge_setmulti(struct bge_softc *); 218 1.104 thorpej 219 1.177 msaitoh static void bge_handle_events(struct bge_softc *); 220 1.177 msaitoh static int bge_alloc_jumbo_mem(struct bge_softc *); 221 1.177 msaitoh static void bge_free_jumbo_mem(struct bge_softc *); 222 1.177 msaitoh static void *bge_jalloc(struct bge_softc *); 223 1.177 msaitoh static void bge_jfree(struct mbuf *, void *, size_t, void *); 224 1.177 msaitoh static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 225 1.177 msaitoh static int bge_init_rx_ring_jumbo(struct bge_softc *); 226 1.177 msaitoh static void bge_free_rx_ring_jumbo(struct bge_softc *); 227 1.376 skrll 228 1.376 skrll static int bge_newbuf_std(struct bge_softc *, int); 229 1.376 skrll static int bge_init_rx_ring_std(struct bge_softc *); 230 1.376 skrll static void bge_fill_rx_ring_std(struct bge_softc *); 231 1.376 skrll static void bge_free_rx_ring_std(struct bge_softc *m); 232 1.376 skrll 233 1.320 bouyer static void bge_free_tx_ring(struct bge_softc *m, bool); 234 1.177 msaitoh static int bge_init_tx_ring(struct bge_softc *); 235 1.177 msaitoh 236 1.177 msaitoh static int bge_chipinit(struct bge_softc *); 237 1.177 msaitoh static int bge_blockinit(struct bge_softc *); 238 1.216 msaitoh static int bge_phy_addr(struct bge_softc *); 239 1.177 msaitoh static uint32_t bge_readmem_ind(struct bge_softc *, int); 240 1.177 msaitoh static void bge_writemem_ind(struct bge_softc *, int, int); 241 1.177 msaitoh static void bge_writembx(struct bge_softc *, int, int); 242 1.211 msaitoh static void bge_writembx_flush(struct bge_softc *, int, int); 243 1.177 msaitoh static void bge_writemem_direct(struct bge_softc *, int, int); 244 1.177 msaitoh static void bge_writereg_ind(struct bge_softc *, int, int); 245 1.177 msaitoh static void bge_set_max_readrq(struct bge_softc *); 246 1.177 msaitoh 247 1.322 msaitoh static int bge_miibus_readreg(device_t, int, int, uint16_t *); 248 1.322 msaitoh static int bge_miibus_writereg(device_t, int, int, uint16_t); 249 1.201 matt static void bge_miibus_statchg(struct ifnet *); 250 1.177 msaitoh 251 1.216 msaitoh #define BGE_RESET_SHUTDOWN 0 252 1.216 msaitoh #define BGE_RESET_START 1 253 1.216 msaitoh #define BGE_RESET_SUSPEND 2 254 1.177 msaitoh static void bge_sig_post_reset(struct bge_softc *, int); 255 1.177 msaitoh static void bge_sig_legacy(struct bge_softc *, int); 256 1.177 msaitoh static void bge_sig_pre_reset(struct bge_softc *, int); 257 1.216 msaitoh static void bge_wait_for_event_ack(struct bge_softc *); 258 1.177 msaitoh static void bge_stop_fw(struct bge_softc *); 259 1.177 msaitoh static int bge_reset(struct bge_softc *); 260 1.177 msaitoh static void bge_link_upd(struct bge_softc *); 261 1.207 msaitoh static void bge_sysctl_init(struct bge_softc *); 262 1.207 msaitoh static int bge_sysctl_verify(SYSCTLFN_PROTO); 263 1.95 jonathan 264 1.216 msaitoh static void bge_ape_lock_init(struct bge_softc *); 265 1.216 msaitoh static void bge_ape_read_fw_ver(struct bge_softc *); 266 1.216 msaitoh static int bge_ape_lock(struct bge_softc *, int); 267 1.216 msaitoh static void bge_ape_unlock(struct bge_softc *, int); 268 1.216 msaitoh static void bge_ape_send_event(struct bge_softc *, uint32_t); 269 1.216 msaitoh static void bge_ape_driver_state_change(struct bge_softc *, int); 270 1.216 msaitoh 271 1.1 fvdl #ifdef BGE_DEBUG 272 1.1 fvdl #define DPRINTF(x) if (bgedebug) printf x 273 1.331 msaitoh #define DPRINTFN(n, x) if (bgedebug >= (n)) printf x 274 1.95 jonathan #define BGE_TSO_PRINTF(x) do { if (bge_tso_debug) printf x ;} while (0) 275 1.1 fvdl int bgedebug = 0; 276 1.95 jonathan int bge_tso_debug = 0; 277 1.369 skrll void bge_debug_info(struct bge_softc *); 278 1.1 fvdl #else 279 1.1 fvdl #define DPRINTF(x) 280 1.331 msaitoh #define DPRINTFN(n, x) 281 1.95 jonathan #define BGE_TSO_PRINTF(x) 282 1.1 fvdl #endif 283 1.1 fvdl 284 1.72 thorpej #ifdef BGE_EVENT_COUNTERS 285 1.72 thorpej #define BGE_EVCNT_INCR(ev) (ev).ev_count++ 286 1.72 thorpej #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val) 287 1.72 thorpej #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val) 288 1.72 thorpej #else 289 1.72 thorpej #define BGE_EVCNT_INCR(ev) /* nothing */ 290 1.72 thorpej #define BGE_EVCNT_ADD(ev, val) /* nothing */ 291 1.72 thorpej #define BGE_EVCNT_UPD(ev, val) /* nothing */ 292 1.72 thorpej #endif 293 1.72 thorpej 294 1.325 msaitoh #define VIDDID(a, b) PCI_VENDOR_ ## a, PCI_PRODUCT_ ## a ## _ ## b 295 1.325 msaitoh /* 296 1.325 msaitoh * The BCM5700 documentation seems to indicate that the hardware still has the 297 1.325 msaitoh * Alteon vendor ID burned into it, though it should always be overridden by 298 1.325 msaitoh * the value in the EEPROM. We'll check for it anyway. 299 1.325 msaitoh */ 300 1.158 msaitoh static const struct bge_product { 301 1.158 msaitoh pci_vendor_id_t bp_vendor; 302 1.158 msaitoh pci_product_id_t bp_product; 303 1.158 msaitoh const char *bp_name; 304 1.158 msaitoh } bge_products[] = { 305 1.325 msaitoh { VIDDID(ALTEON, BCM5700), "Broadcom BCM5700 Gigabit" }, 306 1.325 msaitoh { VIDDID(ALTEON, BCM5701), "Broadcom BCM5701 Gigabit" }, 307 1.325 msaitoh { VIDDID(ALTIMA, AC1000), "Altima AC1000 Gigabit" }, 308 1.325 msaitoh { VIDDID(ALTIMA, AC1001), "Altima AC1001 Gigabit" }, 309 1.325 msaitoh { VIDDID(ALTIMA, AC1003), "Altima AC1003 Gigabit" }, 310 1.325 msaitoh { VIDDID(ALTIMA, AC9100), "Altima AC9100 Gigabit" }, 311 1.325 msaitoh { VIDDID(APPLE, BCM5701), "APPLE BCM5701 Gigabit" }, 312 1.325 msaitoh { VIDDID(BROADCOM, BCM5700), "Broadcom BCM5700 Gigabit" }, 313 1.325 msaitoh { VIDDID(BROADCOM, BCM5701), "Broadcom BCM5701 Gigabit" }, 314 1.325 msaitoh { VIDDID(BROADCOM, BCM5702), "Broadcom BCM5702 Gigabit" }, 315 1.326 msaitoh { VIDDID(BROADCOM, BCM5702FE), "Broadcom BCM5702FE Fast" }, 316 1.325 msaitoh { VIDDID(BROADCOM, BCM5702X), "Broadcom BCM5702X Gigabit" }, 317 1.325 msaitoh { VIDDID(BROADCOM, BCM5703), "Broadcom BCM5703 Gigabit" }, 318 1.325 msaitoh { VIDDID(BROADCOM, BCM5703X), "Broadcom BCM5703X Gigabit" }, 319 1.325 msaitoh { VIDDID(BROADCOM, BCM5703_ALT),"Broadcom BCM5703 Gigabit" }, 320 1.325 msaitoh { VIDDID(BROADCOM, BCM5704C), "Broadcom BCM5704C Dual Gigabit" }, 321 1.325 msaitoh { VIDDID(BROADCOM, BCM5704S), "Broadcom BCM5704S Dual Gigabit" }, 322 1.326 msaitoh { VIDDID(BROADCOM, BCM5704S_ALT),"Broadcom BCM5704S Dual Gigabit" }, 323 1.325 msaitoh { VIDDID(BROADCOM, BCM5705), "Broadcom BCM5705 Gigabit" }, 324 1.325 msaitoh { VIDDID(BROADCOM, BCM5705F), "Broadcom BCM5705F Gigabit" }, 325 1.325 msaitoh { VIDDID(BROADCOM, BCM5705K), "Broadcom BCM5705K Gigabit" }, 326 1.325 msaitoh { VIDDID(BROADCOM, BCM5705M), "Broadcom BCM5705M Gigabit" }, 327 1.325 msaitoh { VIDDID(BROADCOM, BCM5705M_ALT),"Broadcom BCM5705M Gigabit" }, 328 1.325 msaitoh { VIDDID(BROADCOM, BCM5714), "Broadcom BCM5714 Gigabit" }, 329 1.325 msaitoh { VIDDID(BROADCOM, BCM5714S), "Broadcom BCM5714S Gigabit" }, 330 1.325 msaitoh { VIDDID(BROADCOM, BCM5715), "Broadcom BCM5715 Gigabit" }, 331 1.325 msaitoh { VIDDID(BROADCOM, BCM5715S), "Broadcom BCM5715S Gigabit" }, 332 1.325 msaitoh { VIDDID(BROADCOM, BCM5717), "Broadcom BCM5717 Gigabit" }, 333 1.325 msaitoh { VIDDID(BROADCOM, BCM5717C), "Broadcom BCM5717 Gigabit" }, 334 1.325 msaitoh { VIDDID(BROADCOM, BCM5718), "Broadcom BCM5718 Gigabit" }, 335 1.325 msaitoh { VIDDID(BROADCOM, BCM5719), "Broadcom BCM5719 Gigabit" }, 336 1.325 msaitoh { VIDDID(BROADCOM, BCM5720), "Broadcom BCM5720 Gigabit" }, 337 1.325 msaitoh { VIDDID(BROADCOM, BCM5721), "Broadcom BCM5721 Gigabit" }, 338 1.325 msaitoh { VIDDID(BROADCOM, BCM5722), "Broadcom BCM5722 Gigabit" }, 339 1.325 msaitoh { VIDDID(BROADCOM, BCM5723), "Broadcom BCM5723 Gigabit" }, 340 1.327 msaitoh { VIDDID(BROADCOM, BCM5725), "Broadcom BCM5725 Gigabit" }, 341 1.327 msaitoh { VIDDID(BROADCOM, BCM5727), "Broadcom BCM5727 Gigabit" }, 342 1.325 msaitoh { VIDDID(BROADCOM, BCM5750), "Broadcom BCM5750 Gigabit" }, 343 1.325 msaitoh { VIDDID(BROADCOM, BCM5751), "Broadcom BCM5751 Gigabit" }, 344 1.325 msaitoh { VIDDID(BROADCOM, BCM5751F), "Broadcom BCM5751F Gigabit" }, 345 1.325 msaitoh { VIDDID(BROADCOM, BCM5751M), "Broadcom BCM5751M Gigabit" }, 346 1.325 msaitoh { VIDDID(BROADCOM, BCM5752), "Broadcom BCM5752 Gigabit" }, 347 1.325 msaitoh { VIDDID(BROADCOM, BCM5752M), "Broadcom BCM5752M Gigabit" }, 348 1.325 msaitoh { VIDDID(BROADCOM, BCM5753), "Broadcom BCM5753 Gigabit" }, 349 1.325 msaitoh { VIDDID(BROADCOM, BCM5753F), "Broadcom BCM5753F Gigabit" }, 350 1.325 msaitoh { VIDDID(BROADCOM, BCM5753M), "Broadcom BCM5753M Gigabit" }, 351 1.325 msaitoh { VIDDID(BROADCOM, BCM5754), "Broadcom BCM5754 Gigabit" }, 352 1.325 msaitoh { VIDDID(BROADCOM, BCM5754M), "Broadcom BCM5754M Gigabit" }, 353 1.325 msaitoh { VIDDID(BROADCOM, BCM5755), "Broadcom BCM5755 Gigabit" }, 354 1.325 msaitoh { VIDDID(BROADCOM, BCM5755M), "Broadcom BCM5755M Gigabit" }, 355 1.325 msaitoh { VIDDID(BROADCOM, BCM5756), "Broadcom BCM5756 Gigabit" }, 356 1.325 msaitoh { VIDDID(BROADCOM, BCM5761), "Broadcom BCM5761 Gigabit" }, 357 1.325 msaitoh { VIDDID(BROADCOM, BCM5761E), "Broadcom BCM5761E Gigabit" }, 358 1.325 msaitoh { VIDDID(BROADCOM, BCM5761S), "Broadcom BCM5761S Gigabit" }, 359 1.325 msaitoh { VIDDID(BROADCOM, BCM5761SE), "Broadcom BCM5761SE Gigabit" }, 360 1.327 msaitoh { VIDDID(BROADCOM, BCM5762), "Broadcom BCM5762 Gigabit" }, 361 1.325 msaitoh { VIDDID(BROADCOM, BCM5764), "Broadcom BCM5764 Gigabit" }, 362 1.325 msaitoh { VIDDID(BROADCOM, BCM5780), "Broadcom BCM5780 Gigabit" }, 363 1.325 msaitoh { VIDDID(BROADCOM, BCM5780S), "Broadcom BCM5780S Gigabit" }, 364 1.325 msaitoh { VIDDID(BROADCOM, BCM5781), "Broadcom BCM5781 Gigabit" }, 365 1.325 msaitoh { VIDDID(BROADCOM, BCM5782), "Broadcom BCM5782 Gigabit" }, 366 1.325 msaitoh { VIDDID(BROADCOM, BCM5784M), "BCM5784M NetLink 1000baseT" }, 367 1.325 msaitoh { VIDDID(BROADCOM, BCM5785F), "BCM5785F NetLink 10/100" }, 368 1.325 msaitoh { VIDDID(BROADCOM, BCM5785G), "BCM5785G NetLink 1000baseT" }, 369 1.325 msaitoh { VIDDID(BROADCOM, BCM5786), "Broadcom BCM5786 Gigabit" }, 370 1.325 msaitoh { VIDDID(BROADCOM, BCM5787), "Broadcom BCM5787 Gigabit" }, 371 1.325 msaitoh { VIDDID(BROADCOM, BCM5787F), "Broadcom BCM5787F 10/100" }, 372 1.325 msaitoh { VIDDID(BROADCOM, BCM5787M), "Broadcom BCM5787M Gigabit" }, 373 1.325 msaitoh { VIDDID(BROADCOM, BCM5788), "Broadcom BCM5788 Gigabit" }, 374 1.325 msaitoh { VIDDID(BROADCOM, BCM5789), "Broadcom BCM5789 Gigabit" }, 375 1.325 msaitoh { VIDDID(BROADCOM, BCM5901), "Broadcom BCM5901 Fast" }, 376 1.325 msaitoh { VIDDID(BROADCOM, BCM5901A2), "Broadcom BCM5901A2 Fast" }, 377 1.325 msaitoh { VIDDID(BROADCOM, BCM5903M), "Broadcom BCM5903M Fast" }, 378 1.325 msaitoh { VIDDID(BROADCOM, BCM5906), "Broadcom BCM5906 Fast" }, 379 1.325 msaitoh { VIDDID(BROADCOM, BCM5906M), "Broadcom BCM5906M Fast" }, 380 1.325 msaitoh { VIDDID(BROADCOM, BCM57760), "Broadcom BCM57760 Gigabit" }, 381 1.325 msaitoh { VIDDID(BROADCOM, BCM57761), "Broadcom BCM57761 Gigabit" }, 382 1.325 msaitoh { VIDDID(BROADCOM, BCM57762), "Broadcom BCM57762 Gigabit" }, 383 1.327 msaitoh { VIDDID(BROADCOM, BCM57764), "Broadcom BCM57764 Gigabit" }, 384 1.325 msaitoh { VIDDID(BROADCOM, BCM57765), "Broadcom BCM57765 Gigabit" }, 385 1.325 msaitoh { VIDDID(BROADCOM, BCM57766), "Broadcom BCM57766 Gigabit" }, 386 1.327 msaitoh { VIDDID(BROADCOM, BCM57767), "Broadcom BCM57767 Gigabit" }, 387 1.325 msaitoh { VIDDID(BROADCOM, BCM57780), "Broadcom BCM57780 Gigabit" }, 388 1.325 msaitoh { VIDDID(BROADCOM, BCM57781), "Broadcom BCM57781 Gigabit" }, 389 1.325 msaitoh { VIDDID(BROADCOM, BCM57782), "Broadcom BCM57782 Gigabit" }, 390 1.325 msaitoh { VIDDID(BROADCOM, BCM57785), "Broadcom BCM57785 Gigabit" }, 391 1.325 msaitoh { VIDDID(BROADCOM, BCM57786), "Broadcom BCM57786 Gigabit" }, 392 1.327 msaitoh { VIDDID(BROADCOM, BCM57787), "Broadcom BCM57787 Gigabit" }, 393 1.325 msaitoh { VIDDID(BROADCOM, BCM57788), "Broadcom BCM57788 Gigabit" }, 394 1.325 msaitoh { VIDDID(BROADCOM, BCM57790), "Broadcom BCM57790 Gigabit" }, 395 1.325 msaitoh { VIDDID(BROADCOM, BCM57791), "Broadcom BCM57791 Gigabit" }, 396 1.325 msaitoh { VIDDID(BROADCOM, BCM57795), "Broadcom BCM57795 Gigabit" }, 397 1.325 msaitoh { VIDDID(SCHNEIDERKOCH, SK_9DX1),"SysKonnect SK-9Dx1 Gigabit" }, 398 1.326 msaitoh { VIDDID(SCHNEIDERKOCH, SK_9MXX),"SysKonnect SK-9Mxx Gigabit" }, 399 1.325 msaitoh { VIDDID(3COM, 3C996), "3Com 3c996 Gigabit" }, 400 1.325 msaitoh { VIDDID(FUJITSU4, PW008GE4), "Fujitsu PW008GE4 Gigabit" }, 401 1.325 msaitoh { VIDDID(FUJITSU4, PW008GE5), "Fujitsu PW008GE5 Gigabit" }, 402 1.325 msaitoh { VIDDID(FUJITSU4, PP250_450_LAN),"Fujitsu Primepower 250/450 Gigabit" }, 403 1.325 msaitoh { 0, 0, NULL }, 404 1.158 msaitoh }; 405 1.158 msaitoh 406 1.261 msaitoh #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGEF_JUMBO_CAPABLE) 407 1.261 msaitoh #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGEF_5700_FAMILY) 408 1.261 msaitoh #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGEF_5705_PLUS) 409 1.261 msaitoh #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGEF_5714_FAMILY) 410 1.261 msaitoh #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGEF_575X_PLUS) 411 1.261 msaitoh #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGEF_5755_PLUS) 412 1.261 msaitoh #define BGE_IS_57765_FAMILY(sc) ((sc)->bge_flags & BGEF_57765_FAMILY) 413 1.261 msaitoh #define BGE_IS_57765_PLUS(sc) ((sc)->bge_flags & BGEF_57765_PLUS) 414 1.261 msaitoh #define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGEF_5717_PLUS) 415 1.166 msaitoh 416 1.158 msaitoh static const struct bge_revision { 417 1.158 msaitoh uint32_t br_chipid; 418 1.158 msaitoh const char *br_name; 419 1.158 msaitoh } bge_revisions[] = { 420 1.158 msaitoh { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, 421 1.158 msaitoh { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, 422 1.158 msaitoh { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, 423 1.158 msaitoh { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, 424 1.158 msaitoh { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, 425 1.158 msaitoh { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, 426 1.158 msaitoh { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, 427 1.158 msaitoh { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, 428 1.158 msaitoh { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, 429 1.158 msaitoh { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, 430 1.158 msaitoh { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, 431 1.158 msaitoh { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, 432 1.172 msaitoh { BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" }, 433 1.172 msaitoh { BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" }, 434 1.172 msaitoh { BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" }, 435 1.172 msaitoh { BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" }, 436 1.172 msaitoh { BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" }, 437 1.158 msaitoh { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, 438 1.158 msaitoh { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, 439 1.158 msaitoh { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, 440 1.158 msaitoh { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, 441 1.159 msaitoh { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, 442 1.158 msaitoh { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, 443 1.158 msaitoh { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, 444 1.158 msaitoh { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, 445 1.158 msaitoh { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, 446 1.158 msaitoh { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, 447 1.158 msaitoh { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, 448 1.161 msaitoh { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, 449 1.161 msaitoh { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, 450 1.161 msaitoh { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, 451 1.161 msaitoh { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, 452 1.161 msaitoh { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, 453 1.161 msaitoh { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, 454 1.158 msaitoh { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, 455 1.158 msaitoh { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, 456 1.158 msaitoh { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, 457 1.159 msaitoh { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, 458 1.159 msaitoh { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, 459 1.159 msaitoh { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, 460 1.159 msaitoh { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, 461 1.159 msaitoh { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, 462 1.159 msaitoh { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, 463 1.216 msaitoh { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" }, 464 1.216 msaitoh { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" }, 465 1.216 msaitoh { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" }, 466 1.216 msaitoh { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" }, 467 1.158 msaitoh { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, 468 1.158 msaitoh { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, 469 1.158 msaitoh { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, 470 1.158 msaitoh { BGE_CHIPID_BCM5755_C0, "BCM5755 C0" }, 471 1.172 msaitoh { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" }, 472 1.172 msaitoh { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" }, 473 1.327 msaitoh { BGE_CHIPID_BCM5762_A0, "BCM5762 A0" }, 474 1.327 msaitoh { BGE_CHIPID_BCM5762_B0, "BCM5762 B0" }, 475 1.172 msaitoh { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" }, 476 1.172 msaitoh { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" }, 477 1.284 msaitoh { BGE_CHIPID_BCM5784_B0, "BCM5784 B0" }, 478 1.172 msaitoh /* 5754 and 5787 share the same ASIC ID */ 479 1.158 msaitoh { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, 480 1.158 msaitoh { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, 481 1.158 msaitoh { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, 482 1.206 msaitoh { BGE_CHIPID_BCM5906_A0, "BCM5906 A0" }, 483 1.161 msaitoh { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" }, 484 1.161 msaitoh { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" }, 485 1.214 msaitoh { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" }, 486 1.214 msaitoh { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" }, 487 1.305 msaitoh { BGE_CHIPID_BCM57766_A0, "BCM57766 A0" }, 488 1.172 msaitoh { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" }, 489 1.172 msaitoh { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" }, 490 1.172 msaitoh 491 1.158 msaitoh { 0, NULL } 492 1.158 msaitoh }; 493 1.158 msaitoh 494 1.158 msaitoh /* 495 1.158 msaitoh * Some defaults for major revisions, so that newer steppings 496 1.158 msaitoh * that we don't know about have a shot at working. 497 1.158 msaitoh */ 498 1.158 msaitoh static const struct bge_revision bge_majorrevs[] = { 499 1.158 msaitoh { BGE_ASICREV_BCM5700, "unknown BCM5700" }, 500 1.158 msaitoh { BGE_ASICREV_BCM5701, "unknown BCM5701" }, 501 1.158 msaitoh { BGE_ASICREV_BCM5703, "unknown BCM5703" }, 502 1.158 msaitoh { BGE_ASICREV_BCM5704, "unknown BCM5704" }, 503 1.158 msaitoh { BGE_ASICREV_BCM5705, "unknown BCM5705" }, 504 1.162 msaitoh { BGE_ASICREV_BCM5750, "unknown BCM5750" }, 505 1.216 msaitoh { BGE_ASICREV_BCM5714, "unknown BCM5714" }, 506 1.158 msaitoh { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, 507 1.172 msaitoh { BGE_ASICREV_BCM5752, "unknown BCM5752" }, 508 1.172 msaitoh { BGE_ASICREV_BCM5780, "unknown BCM5780" }, 509 1.158 msaitoh { BGE_ASICREV_BCM5755, "unknown BCM5755" }, 510 1.172 msaitoh { BGE_ASICREV_BCM5761, "unknown BCM5761" }, 511 1.172 msaitoh { BGE_ASICREV_BCM5784, "unknown BCM5784" }, 512 1.172 msaitoh { BGE_ASICREV_BCM5785, "unknown BCM5785" }, 513 1.162 msaitoh /* 5754 and 5787 share the same ASIC ID */ 514 1.166 msaitoh { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, 515 1.172 msaitoh { BGE_ASICREV_BCM5906, "unknown BCM5906" }, 516 1.216 msaitoh { BGE_ASICREV_BCM57765, "unknown BCM57765" }, 517 1.216 msaitoh { BGE_ASICREV_BCM57766, "unknown BCM57766" }, 518 1.172 msaitoh { BGE_ASICREV_BCM57780, "unknown BCM57780" }, 519 1.172 msaitoh { BGE_ASICREV_BCM5717, "unknown BCM5717" }, 520 1.216 msaitoh { BGE_ASICREV_BCM5719, "unknown BCM5719" }, 521 1.216 msaitoh { BGE_ASICREV_BCM5720, "unknown BCM5720" }, 522 1.327 msaitoh { BGE_ASICREV_BCM5762, "unknown BCM5762" }, 523 1.172 msaitoh 524 1.158 msaitoh { 0, NULL } 525 1.158 msaitoh }; 526 1.17 thorpej 527 1.177 msaitoh static int bge_allow_asf = 1; 528 1.177 msaitoh 529 1.375 skrll #ifndef BGE_WATCHDOG_TIMEOUT 530 1.375 skrll #define BGE_WATCHDOG_TIMEOUT 5 531 1.375 skrll #endif 532 1.375 skrll static int bge_watchdog_timeout = BGE_WATCHDOG_TIMEOUT; 533 1.375 skrll 534 1.375 skrll 535 1.227 msaitoh CFATTACH_DECL3_NEW(bge, sizeof(struct bge_softc), 536 1.227 msaitoh bge_probe, bge_attach, bge_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 537 1.1 fvdl 538 1.170 msaitoh static uint32_t 539 1.104 thorpej bge_readmem_ind(struct bge_softc *sc, int off) 540 1.1 fvdl { 541 1.1 fvdl pcireg_t val; 542 1.1 fvdl 543 1.216 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 && 544 1.216 msaitoh off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) 545 1.216 msaitoh return 0; 546 1.216 msaitoh 547 1.141 jmcneill pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); 548 1.141 jmcneill val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA); 549 1.216 msaitoh pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); 550 1.1 fvdl return val; 551 1.1 fvdl } 552 1.1 fvdl 553 1.104 thorpej static void 554 1.104 thorpej bge_writemem_ind(struct bge_softc *sc, int off, int val) 555 1.1 fvdl { 556 1.216 msaitoh 557 1.141 jmcneill pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); 558 1.141 jmcneill pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA, val); 559 1.216 msaitoh pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); 560 1.1 fvdl } 561 1.1 fvdl 562 1.177 msaitoh /* 563 1.177 msaitoh * PCI Express only 564 1.177 msaitoh */ 565 1.177 msaitoh static void 566 1.177 msaitoh bge_set_max_readrq(struct bge_softc *sc) 567 1.177 msaitoh { 568 1.177 msaitoh pcireg_t val; 569 1.177 msaitoh 570 1.180 msaitoh val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap 571 1.238 msaitoh + PCIE_DCSR); 572 1.238 msaitoh val &= ~PCIE_DCSR_MAX_READ_REQ; 573 1.216 msaitoh switch (sc->bge_expmrq) { 574 1.216 msaitoh case 2048: 575 1.216 msaitoh val |= BGE_PCIE_DEVCTL_MAX_READRQ_2048; 576 1.216 msaitoh break; 577 1.216 msaitoh case 4096: 578 1.177 msaitoh val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096; 579 1.216 msaitoh break; 580 1.216 msaitoh default: 581 1.216 msaitoh panic("incorrect expmrq value(%d)", sc->bge_expmrq); 582 1.216 msaitoh break; 583 1.177 msaitoh } 584 1.216 msaitoh pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap 585 1.238 msaitoh + PCIE_DCSR, val); 586 1.177 msaitoh } 587 1.177 msaitoh 588 1.1 fvdl #ifdef notdef 589 1.170 msaitoh static uint32_t 590 1.104 thorpej bge_readreg_ind(struct bge_softc *sc, int off) 591 1.1 fvdl { 592 1.141 jmcneill pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); 593 1.362 skrll return pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA); 594 1.1 fvdl } 595 1.1 fvdl #endif 596 1.1 fvdl 597 1.104 thorpej static void 598 1.104 thorpej bge_writereg_ind(struct bge_softc *sc, int off, int val) 599 1.1 fvdl { 600 1.141 jmcneill pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); 601 1.141 jmcneill pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA, val); 602 1.1 fvdl } 603 1.1 fvdl 604 1.151 cegger static void 605 1.151 cegger bge_writemem_direct(struct bge_softc *sc, int off, int val) 606 1.151 cegger { 607 1.151 cegger CSR_WRITE_4(sc, off, val); 608 1.151 cegger } 609 1.151 cegger 610 1.151 cegger static void 611 1.151 cegger bge_writembx(struct bge_softc *sc, int off, int val) 612 1.151 cegger { 613 1.151 cegger if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 614 1.151 cegger off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 615 1.151 cegger 616 1.151 cegger CSR_WRITE_4(sc, off, val); 617 1.151 cegger } 618 1.151 cegger 619 1.211 msaitoh static void 620 1.211 msaitoh bge_writembx_flush(struct bge_softc *sc, int off, int val) 621 1.211 msaitoh { 622 1.211 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 623 1.211 msaitoh off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 624 1.211 msaitoh 625 1.211 msaitoh CSR_WRITE_4_FLUSH(sc, off, val); 626 1.211 msaitoh } 627 1.211 msaitoh 628 1.216 msaitoh /* 629 1.216 msaitoh * Clear all stale locks and select the lock for this driver instance. 630 1.216 msaitoh */ 631 1.216 msaitoh void 632 1.216 msaitoh bge_ape_lock_init(struct bge_softc *sc) 633 1.216 msaitoh { 634 1.216 msaitoh struct pci_attach_args *pa = &(sc->bge_pa); 635 1.216 msaitoh uint32_t bit, regbase; 636 1.216 msaitoh int i; 637 1.216 msaitoh 638 1.216 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 639 1.216 msaitoh regbase = BGE_APE_LOCK_GRANT; 640 1.216 msaitoh else 641 1.216 msaitoh regbase = BGE_APE_PER_LOCK_GRANT; 642 1.216 msaitoh 643 1.216 msaitoh /* Clear any stale locks. */ 644 1.216 msaitoh for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) { 645 1.216 msaitoh switch (i) { 646 1.216 msaitoh case BGE_APE_LOCK_PHY0: 647 1.216 msaitoh case BGE_APE_LOCK_PHY1: 648 1.216 msaitoh case BGE_APE_LOCK_PHY2: 649 1.216 msaitoh case BGE_APE_LOCK_PHY3: 650 1.216 msaitoh bit = BGE_APE_LOCK_GRANT_DRIVER0; 651 1.216 msaitoh break; 652 1.216 msaitoh default: 653 1.231 msaitoh if (pa->pa_function == 0) 654 1.216 msaitoh bit = BGE_APE_LOCK_GRANT_DRIVER0; 655 1.216 msaitoh else 656 1.216 msaitoh bit = (1 << pa->pa_function); 657 1.216 msaitoh } 658 1.216 msaitoh APE_WRITE_4(sc, regbase + 4 * i, bit); 659 1.216 msaitoh } 660 1.216 msaitoh 661 1.216 msaitoh /* Select the PHY lock based on the device's function number. */ 662 1.216 msaitoh switch (pa->pa_function) { 663 1.216 msaitoh case 0: 664 1.216 msaitoh sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0; 665 1.216 msaitoh break; 666 1.216 msaitoh case 1: 667 1.216 msaitoh sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1; 668 1.216 msaitoh break; 669 1.216 msaitoh case 2: 670 1.216 msaitoh sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2; 671 1.216 msaitoh break; 672 1.216 msaitoh case 3: 673 1.216 msaitoh sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3; 674 1.216 msaitoh break; 675 1.216 msaitoh default: 676 1.216 msaitoh printf("%s: PHY lock not supported on function\n", 677 1.216 msaitoh device_xname(sc->bge_dev)); 678 1.216 msaitoh break; 679 1.216 msaitoh } 680 1.216 msaitoh } 681 1.216 msaitoh 682 1.216 msaitoh /* 683 1.216 msaitoh * Check for APE firmware, set flags, and print version info. 684 1.216 msaitoh */ 685 1.216 msaitoh void 686 1.216 msaitoh bge_ape_read_fw_ver(struct bge_softc *sc) 687 1.216 msaitoh { 688 1.216 msaitoh const char *fwtype; 689 1.216 msaitoh uint32_t apedata, features; 690 1.216 msaitoh 691 1.216 msaitoh /* Check for a valid APE signature in shared memory. */ 692 1.216 msaitoh apedata = APE_READ_4(sc, BGE_APE_SEG_SIG); 693 1.216 msaitoh if (apedata != BGE_APE_SEG_SIG_MAGIC) { 694 1.216 msaitoh sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE; 695 1.216 msaitoh return; 696 1.216 msaitoh } 697 1.216 msaitoh 698 1.216 msaitoh /* Check if APE firmware is running. */ 699 1.216 msaitoh apedata = APE_READ_4(sc, BGE_APE_FW_STATUS); 700 1.216 msaitoh if ((apedata & BGE_APE_FW_STATUS_READY) == 0) { 701 1.216 msaitoh printf("%s: APE signature found but FW status not ready! " 702 1.216 msaitoh "0x%08x\n", device_xname(sc->bge_dev), apedata); 703 1.216 msaitoh return; 704 1.216 msaitoh } 705 1.216 msaitoh 706 1.216 msaitoh sc->bge_mfw_flags |= BGE_MFW_ON_APE; 707 1.216 msaitoh 708 1.390 andvar /* Fetch the APE firmware type and version. */ 709 1.216 msaitoh apedata = APE_READ_4(sc, BGE_APE_FW_VERSION); 710 1.216 msaitoh features = APE_READ_4(sc, BGE_APE_FW_FEATURES); 711 1.216 msaitoh if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) { 712 1.216 msaitoh sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI; 713 1.216 msaitoh fwtype = "NCSI"; 714 1.216 msaitoh } else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) { 715 1.216 msaitoh sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH; 716 1.216 msaitoh fwtype = "DASH"; 717 1.216 msaitoh } else 718 1.216 msaitoh fwtype = "UNKN"; 719 1.216 msaitoh 720 1.216 msaitoh /* Print the APE firmware version. */ 721 1.271 msaitoh aprint_normal_dev(sc->bge_dev, "APE firmware %s %d.%d.%d.%d\n", fwtype, 722 1.216 msaitoh (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT, 723 1.216 msaitoh (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT, 724 1.216 msaitoh (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT, 725 1.216 msaitoh (apedata & BGE_APE_FW_VERSION_BLDMSK)); 726 1.216 msaitoh } 727 1.216 msaitoh 728 1.216 msaitoh int 729 1.216 msaitoh bge_ape_lock(struct bge_softc *sc, int locknum) 730 1.216 msaitoh { 731 1.216 msaitoh struct pci_attach_args *pa = &(sc->bge_pa); 732 1.216 msaitoh uint32_t bit, gnt, req, status; 733 1.216 msaitoh int i, off; 734 1.216 msaitoh 735 1.216 msaitoh if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 736 1.362 skrll return 0; 737 1.216 msaitoh 738 1.216 msaitoh /* Lock request/grant registers have different bases. */ 739 1.216 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) { 740 1.216 msaitoh req = BGE_APE_LOCK_REQ; 741 1.216 msaitoh gnt = BGE_APE_LOCK_GRANT; 742 1.216 msaitoh } else { 743 1.216 msaitoh req = BGE_APE_PER_LOCK_REQ; 744 1.216 msaitoh gnt = BGE_APE_PER_LOCK_GRANT; 745 1.216 msaitoh } 746 1.216 msaitoh 747 1.216 msaitoh off = 4 * locknum; 748 1.216 msaitoh 749 1.216 msaitoh switch (locknum) { 750 1.216 msaitoh case BGE_APE_LOCK_GPIO: 751 1.216 msaitoh /* Lock required when using GPIO. */ 752 1.216 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 753 1.362 skrll return 0; 754 1.216 msaitoh if (pa->pa_function == 0) 755 1.216 msaitoh bit = BGE_APE_LOCK_REQ_DRIVER0; 756 1.216 msaitoh else 757 1.216 msaitoh bit = (1 << pa->pa_function); 758 1.216 msaitoh break; 759 1.216 msaitoh case BGE_APE_LOCK_GRC: 760 1.216 msaitoh /* Lock required to reset the device. */ 761 1.216 msaitoh if (pa->pa_function == 0) 762 1.216 msaitoh bit = BGE_APE_LOCK_REQ_DRIVER0; 763 1.216 msaitoh else 764 1.216 msaitoh bit = (1 << pa->pa_function); 765 1.216 msaitoh break; 766 1.216 msaitoh case BGE_APE_LOCK_MEM: 767 1.216 msaitoh /* Lock required when accessing certain APE memory. */ 768 1.216 msaitoh if (pa->pa_function == 0) 769 1.216 msaitoh bit = BGE_APE_LOCK_REQ_DRIVER0; 770 1.216 msaitoh else 771 1.216 msaitoh bit = (1 << pa->pa_function); 772 1.216 msaitoh break; 773 1.216 msaitoh case BGE_APE_LOCK_PHY0: 774 1.216 msaitoh case BGE_APE_LOCK_PHY1: 775 1.216 msaitoh case BGE_APE_LOCK_PHY2: 776 1.216 msaitoh case BGE_APE_LOCK_PHY3: 777 1.216 msaitoh /* Lock required when accessing PHYs. */ 778 1.216 msaitoh bit = BGE_APE_LOCK_REQ_DRIVER0; 779 1.216 msaitoh break; 780 1.216 msaitoh default: 781 1.362 skrll return EINVAL; 782 1.216 msaitoh } 783 1.216 msaitoh 784 1.216 msaitoh /* Request a lock. */ 785 1.216 msaitoh APE_WRITE_4_FLUSH(sc, req + off, bit); 786 1.216 msaitoh 787 1.216 msaitoh /* Wait up to 1 second to acquire lock. */ 788 1.216 msaitoh for (i = 0; i < 20000; i++) { 789 1.216 msaitoh status = APE_READ_4(sc, gnt + off); 790 1.216 msaitoh if (status == bit) 791 1.216 msaitoh break; 792 1.216 msaitoh DELAY(50); 793 1.216 msaitoh } 794 1.216 msaitoh 795 1.216 msaitoh /* Handle any errors. */ 796 1.216 msaitoh if (status != bit) { 797 1.216 msaitoh printf("%s: APE lock %d request failed! " 798 1.216 msaitoh "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n", 799 1.216 msaitoh device_xname(sc->bge_dev), 800 1.216 msaitoh locknum, req + off, bit & 0xFFFF, gnt + off, 801 1.216 msaitoh status & 0xFFFF); 802 1.216 msaitoh /* Revoke the lock request. */ 803 1.216 msaitoh APE_WRITE_4(sc, gnt + off, bit); 804 1.362 skrll return EBUSY; 805 1.216 msaitoh } 806 1.216 msaitoh 807 1.362 skrll return 0; 808 1.216 msaitoh } 809 1.216 msaitoh 810 1.216 msaitoh void 811 1.216 msaitoh bge_ape_unlock(struct bge_softc *sc, int locknum) 812 1.216 msaitoh { 813 1.216 msaitoh struct pci_attach_args *pa = &(sc->bge_pa); 814 1.216 msaitoh uint32_t bit, gnt; 815 1.216 msaitoh int off; 816 1.216 msaitoh 817 1.216 msaitoh if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 818 1.216 msaitoh return; 819 1.216 msaitoh 820 1.216 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 821 1.216 msaitoh gnt = BGE_APE_LOCK_GRANT; 822 1.216 msaitoh else 823 1.216 msaitoh gnt = BGE_APE_PER_LOCK_GRANT; 824 1.216 msaitoh 825 1.216 msaitoh off = 4 * locknum; 826 1.216 msaitoh 827 1.216 msaitoh switch (locknum) { 828 1.216 msaitoh case BGE_APE_LOCK_GPIO: 829 1.216 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 830 1.216 msaitoh return; 831 1.216 msaitoh if (pa->pa_function == 0) 832 1.216 msaitoh bit = BGE_APE_LOCK_GRANT_DRIVER0; 833 1.216 msaitoh else 834 1.216 msaitoh bit = (1 << pa->pa_function); 835 1.216 msaitoh break; 836 1.216 msaitoh case BGE_APE_LOCK_GRC: 837 1.216 msaitoh if (pa->pa_function == 0) 838 1.216 msaitoh bit = BGE_APE_LOCK_GRANT_DRIVER0; 839 1.216 msaitoh else 840 1.216 msaitoh bit = (1 << pa->pa_function); 841 1.216 msaitoh break; 842 1.216 msaitoh case BGE_APE_LOCK_MEM: 843 1.216 msaitoh if (pa->pa_function == 0) 844 1.216 msaitoh bit = BGE_APE_LOCK_GRANT_DRIVER0; 845 1.216 msaitoh else 846 1.216 msaitoh bit = (1 << pa->pa_function); 847 1.216 msaitoh break; 848 1.216 msaitoh case BGE_APE_LOCK_PHY0: 849 1.216 msaitoh case BGE_APE_LOCK_PHY1: 850 1.216 msaitoh case BGE_APE_LOCK_PHY2: 851 1.216 msaitoh case BGE_APE_LOCK_PHY3: 852 1.216 msaitoh bit = BGE_APE_LOCK_GRANT_DRIVER0; 853 1.216 msaitoh break; 854 1.216 msaitoh default: 855 1.216 msaitoh return; 856 1.216 msaitoh } 857 1.216 msaitoh 858 1.216 msaitoh /* Write and flush for consecutive bge_ape_lock() */ 859 1.216 msaitoh APE_WRITE_4_FLUSH(sc, gnt + off, bit); 860 1.216 msaitoh } 861 1.216 msaitoh 862 1.216 msaitoh /* 863 1.216 msaitoh * Send an event to the APE firmware. 864 1.216 msaitoh */ 865 1.216 msaitoh void 866 1.216 msaitoh bge_ape_send_event(struct bge_softc *sc, uint32_t event) 867 1.216 msaitoh { 868 1.216 msaitoh uint32_t apedata; 869 1.216 msaitoh int i; 870 1.216 msaitoh 871 1.216 msaitoh /* NCSI does not support APE events. */ 872 1.216 msaitoh if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 873 1.216 msaitoh return; 874 1.216 msaitoh 875 1.216 msaitoh /* Wait up to 1ms for APE to service previous event. */ 876 1.216 msaitoh for (i = 10; i > 0; i--) { 877 1.216 msaitoh if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0) 878 1.216 msaitoh break; 879 1.216 msaitoh apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS); 880 1.216 msaitoh if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) { 881 1.216 msaitoh APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event | 882 1.216 msaitoh BGE_APE_EVENT_STATUS_EVENT_PENDING); 883 1.216 msaitoh bge_ape_unlock(sc, BGE_APE_LOCK_MEM); 884 1.216 msaitoh APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1); 885 1.216 msaitoh break; 886 1.216 msaitoh } 887 1.216 msaitoh bge_ape_unlock(sc, BGE_APE_LOCK_MEM); 888 1.216 msaitoh DELAY(100); 889 1.216 msaitoh } 890 1.216 msaitoh if (i == 0) { 891 1.216 msaitoh printf("%s: APE event 0x%08x send timed out\n", 892 1.216 msaitoh device_xname(sc->bge_dev), event); 893 1.216 msaitoh } 894 1.216 msaitoh } 895 1.216 msaitoh 896 1.216 msaitoh void 897 1.216 msaitoh bge_ape_driver_state_change(struct bge_softc *sc, int kind) 898 1.216 msaitoh { 899 1.216 msaitoh uint32_t apedata, event; 900 1.216 msaitoh 901 1.216 msaitoh if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 902 1.216 msaitoh return; 903 1.216 msaitoh 904 1.216 msaitoh switch (kind) { 905 1.216 msaitoh case BGE_RESET_START: 906 1.216 msaitoh /* If this is the first load, clear the load counter. */ 907 1.216 msaitoh apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG); 908 1.216 msaitoh if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC) 909 1.216 msaitoh APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0); 910 1.216 msaitoh else { 911 1.216 msaitoh apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT); 912 1.216 msaitoh APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata); 913 1.216 msaitoh } 914 1.216 msaitoh APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG, 915 1.216 msaitoh BGE_APE_HOST_SEG_SIG_MAGIC); 916 1.216 msaitoh APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN, 917 1.216 msaitoh BGE_APE_HOST_SEG_LEN_MAGIC); 918 1.216 msaitoh 919 1.216 msaitoh /* Add some version info if bge(4) supports it. */ 920 1.216 msaitoh APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID, 921 1.216 msaitoh BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0)); 922 1.216 msaitoh APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR, 923 1.216 msaitoh BGE_APE_HOST_BEHAV_NO_PHYLOCK); 924 1.216 msaitoh APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS, 925 1.216 msaitoh BGE_APE_HOST_HEARTBEAT_INT_DISABLE); 926 1.216 msaitoh APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 927 1.216 msaitoh BGE_APE_HOST_DRVR_STATE_START); 928 1.216 msaitoh event = BGE_APE_EVENT_STATUS_STATE_START; 929 1.216 msaitoh break; 930 1.216 msaitoh case BGE_RESET_SHUTDOWN: 931 1.216 msaitoh APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 932 1.216 msaitoh BGE_APE_HOST_DRVR_STATE_UNLOAD); 933 1.216 msaitoh event = BGE_APE_EVENT_STATUS_STATE_UNLOAD; 934 1.216 msaitoh break; 935 1.216 msaitoh case BGE_RESET_SUSPEND: 936 1.216 msaitoh event = BGE_APE_EVENT_STATUS_STATE_SUSPEND; 937 1.216 msaitoh break; 938 1.216 msaitoh default: 939 1.216 msaitoh return; 940 1.216 msaitoh } 941 1.216 msaitoh 942 1.216 msaitoh bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT | 943 1.216 msaitoh BGE_APE_EVENT_STATUS_STATE_CHNGE); 944 1.216 msaitoh } 945 1.216 msaitoh 946 1.170 msaitoh static uint8_t 947 1.170 msaitoh bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 948 1.151 cegger { 949 1.170 msaitoh uint32_t access, byte = 0; 950 1.151 cegger int i; 951 1.151 cegger 952 1.151 cegger /* Lock. */ 953 1.151 cegger CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 954 1.151 cegger for (i = 0; i < 8000; i++) { 955 1.151 cegger if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 956 1.151 cegger break; 957 1.151 cegger DELAY(20); 958 1.151 cegger } 959 1.151 cegger if (i == 8000) 960 1.170 msaitoh return 1; 961 1.151 cegger 962 1.151 cegger /* Enable access. */ 963 1.151 cegger access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); 964 1.151 cegger CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); 965 1.151 cegger 966 1.151 cegger CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); 967 1.151 cegger CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); 968 1.151 cegger for (i = 0; i < BGE_TIMEOUT * 10; i++) { 969 1.151 cegger DELAY(10); 970 1.151 cegger if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { 971 1.151 cegger DELAY(10); 972 1.151 cegger break; 973 1.151 cegger } 974 1.151 cegger } 975 1.151 cegger 976 1.151 cegger if (i == BGE_TIMEOUT * 10) { 977 1.151 cegger aprint_error_dev(sc->bge_dev, "nvram read timed out\n"); 978 1.170 msaitoh return 1; 979 1.151 cegger } 980 1.151 cegger 981 1.151 cegger /* Get result. */ 982 1.151 cegger byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); 983 1.151 cegger 984 1.151 cegger *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; 985 1.151 cegger 986 1.151 cegger /* Disable access. */ 987 1.151 cegger CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); 988 1.151 cegger 989 1.151 cegger /* Unlock. */ 990 1.211 msaitoh CSR_WRITE_4_FLUSH(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); 991 1.151 cegger 992 1.170 msaitoh return 0; 993 1.151 cegger } 994 1.151 cegger 995 1.151 cegger /* 996 1.151 cegger * Read a sequence of bytes from NVRAM. 997 1.151 cegger */ 998 1.151 cegger static int 999 1.170 msaitoh bge_read_nvram(struct bge_softc *sc, uint8_t *dest, int off, int cnt) 1000 1.151 cegger { 1001 1.203 msaitoh int error = 0, i; 1002 1.170 msaitoh uint8_t byte = 0; 1003 1.151 cegger 1004 1.151 cegger if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906) 1005 1.170 msaitoh return 1; 1006 1.151 cegger 1007 1.151 cegger for (i = 0; i < cnt; i++) { 1008 1.203 msaitoh error = bge_nvram_getbyte(sc, off + i, &byte); 1009 1.203 msaitoh if (error) 1010 1.151 cegger break; 1011 1.151 cegger *(dest + i) = byte; 1012 1.151 cegger } 1013 1.151 cegger 1014 1.362 skrll return error ? 1 : 0; 1015 1.151 cegger } 1016 1.151 cegger 1017 1.1 fvdl /* 1018 1.1 fvdl * Read a byte of data stored in the EEPROM at address 'addr.' The 1019 1.1 fvdl * BCM570x supports both the traditional bitbang interface and an 1020 1.1 fvdl * auto access interface for reading the EEPROM. We use the auto 1021 1.1 fvdl * access method. 1022 1.1 fvdl */ 1023 1.170 msaitoh static uint8_t 1024 1.170 msaitoh bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 1025 1.1 fvdl { 1026 1.1 fvdl int i; 1027 1.170 msaitoh uint32_t byte = 0; 1028 1.1 fvdl 1029 1.1 fvdl /* 1030 1.1 fvdl * Enable use of auto EEPROM access so we can avoid 1031 1.1 fvdl * having to use the bitbang method. 1032 1.1 fvdl */ 1033 1.341 msaitoh BGE_SETBIT_FLUSH(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 1034 1.1 fvdl 1035 1.1 fvdl /* Reset the EEPROM, load the clock period. */ 1036 1.341 msaitoh CSR_WRITE_4_FLUSH(sc, BGE_EE_ADDR, 1037 1.161 msaitoh BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 1038 1.1 fvdl DELAY(20); 1039 1.1 fvdl 1040 1.1 fvdl /* Issue the read EEPROM command. */ 1041 1.1 fvdl CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 1042 1.1 fvdl 1043 1.1 fvdl /* Wait for completion */ 1044 1.170 msaitoh for (i = 0; i < BGE_TIMEOUT * 10; i++) { 1045 1.1 fvdl DELAY(10); 1046 1.1 fvdl if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 1047 1.1 fvdl break; 1048 1.1 fvdl } 1049 1.1 fvdl 1050 1.172 msaitoh if (i == BGE_TIMEOUT * 10) { 1051 1.138 joerg aprint_error_dev(sc->bge_dev, "eeprom read timed out\n"); 1052 1.177 msaitoh return 1; 1053 1.1 fvdl } 1054 1.1 fvdl 1055 1.1 fvdl /* Get result. */ 1056 1.1 fvdl byte = CSR_READ_4(sc, BGE_EE_DATA); 1057 1.1 fvdl 1058 1.1 fvdl *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 1059 1.1 fvdl 1060 1.170 msaitoh return 0; 1061 1.1 fvdl } 1062 1.1 fvdl 1063 1.1 fvdl /* 1064 1.1 fvdl * Read a sequence of bytes from the EEPROM. 1065 1.1 fvdl */ 1066 1.104 thorpej static int 1067 1.126 christos bge_read_eeprom(struct bge_softc *sc, void *destv, int off, int cnt) 1068 1.1 fvdl { 1069 1.203 msaitoh int error = 0, i; 1070 1.170 msaitoh uint8_t byte = 0; 1071 1.126 christos char *dest = destv; 1072 1.1 fvdl 1073 1.1 fvdl for (i = 0; i < cnt; i++) { 1074 1.203 msaitoh error = bge_eeprom_getbyte(sc, off + i, &byte); 1075 1.203 msaitoh if (error) 1076 1.1 fvdl break; 1077 1.1 fvdl *(dest + i) = byte; 1078 1.1 fvdl } 1079 1.1 fvdl 1080 1.362 skrll return error ? 1 : 0; 1081 1.1 fvdl } 1082 1.1 fvdl 1083 1.104 thorpej static int 1084 1.322 msaitoh bge_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 1085 1.1 fvdl { 1086 1.354 skrll struct bge_softc * const sc = device_private(dev); 1087 1.322 msaitoh uint32_t data; 1088 1.172 msaitoh uint32_t autopoll; 1089 1.322 msaitoh int rv = 0; 1090 1.1 fvdl int i; 1091 1.1 fvdl 1092 1.394 skrll KASSERT(mutex_owned(sc->sc_intr_lock)); 1093 1.394 skrll 1094 1.216 msaitoh if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) 1095 1.322 msaitoh return -1; 1096 1.1 fvdl 1097 1.25 jonathan /* Reading with autopolling on may trigger PCI errors */ 1098 1.172 msaitoh autopoll = CSR_READ_4(sc, BGE_MI_MODE); 1099 1.172 msaitoh if (autopoll & BGE_MIMODE_AUTOPOLL) { 1100 1.161 msaitoh BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); 1101 1.211 msaitoh BGE_CLRBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1102 1.216 msaitoh DELAY(80); 1103 1.25 jonathan } 1104 1.25 jonathan 1105 1.211 msaitoh CSR_WRITE_4_FLUSH(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 1106 1.172 msaitoh BGE_MIPHY(phy) | BGE_MIREG(reg)); 1107 1.1 fvdl 1108 1.1 fvdl for (i = 0; i < BGE_TIMEOUT; i++) { 1109 1.216 msaitoh delay(10); 1110 1.322 msaitoh data = CSR_READ_4(sc, BGE_MI_COMM); 1111 1.322 msaitoh if (!(data & BGE_MICOMM_BUSY)) { 1112 1.216 msaitoh DELAY(5); 1113 1.322 msaitoh data = CSR_READ_4(sc, BGE_MI_COMM); 1114 1.1 fvdl break; 1115 1.216 msaitoh } 1116 1.1 fvdl } 1117 1.1 fvdl 1118 1.1 fvdl if (i == BGE_TIMEOUT) { 1119 1.138 joerg aprint_error_dev(sc->bge_dev, "PHY read timed out\n"); 1120 1.322 msaitoh rv = ETIMEDOUT; 1121 1.342 msaitoh } else if ((data & BGE_MICOMM_READFAIL) != 0) { 1122 1.342 msaitoh /* XXX This error occurs on some devices while attaching. */ 1123 1.342 msaitoh aprint_debug_dev(sc->bge_dev, "PHY read I/O error\n"); 1124 1.342 msaitoh rv = EIO; 1125 1.342 msaitoh } else 1126 1.322 msaitoh *val = data & BGE_MICOMM_DATA; 1127 1.1 fvdl 1128 1.172 msaitoh if (autopoll & BGE_MIMODE_AUTOPOLL) { 1129 1.161 msaitoh BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 1130 1.211 msaitoh BGE_SETBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1131 1.216 msaitoh DELAY(80); 1132 1.25 jonathan } 1133 1.29 itojun 1134 1.216 msaitoh bge_ape_unlock(sc, sc->bge_phy_ape_lock); 1135 1.216 msaitoh 1136 1.322 msaitoh return rv; 1137 1.1 fvdl } 1138 1.1 fvdl 1139 1.322 msaitoh static int 1140 1.322 msaitoh bge_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 1141 1.1 fvdl { 1142 1.354 skrll struct bge_softc * const sc = device_private(dev); 1143 1.342 msaitoh uint32_t data, autopoll; 1144 1.342 msaitoh int rv = 0; 1145 1.29 itojun int i; 1146 1.1 fvdl 1147 1.394 skrll KASSERT(mutex_owned(sc->sc_intr_lock)); 1148 1.394 skrll 1149 1.278 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 && 1150 1.321 msaitoh (reg == MII_GTCR || reg == BRGPHY_MII_AUXCTL)) 1151 1.322 msaitoh return 0; 1152 1.151 cegger 1153 1.278 msaitoh if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) 1154 1.322 msaitoh return -1; 1155 1.151 cegger 1156 1.161 msaitoh /* Reading with autopolling on may trigger PCI errors */ 1157 1.172 msaitoh autopoll = CSR_READ_4(sc, BGE_MI_MODE); 1158 1.172 msaitoh if (autopoll & BGE_MIMODE_AUTOPOLL) { 1159 1.161 msaitoh BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); 1160 1.211 msaitoh BGE_CLRBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1161 1.216 msaitoh DELAY(80); 1162 1.25 jonathan } 1163 1.29 itojun 1164 1.211 msaitoh CSR_WRITE_4_FLUSH(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 1165 1.177 msaitoh BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 1166 1.1 fvdl 1167 1.1 fvdl for (i = 0; i < BGE_TIMEOUT; i++) { 1168 1.151 cegger delay(10); 1169 1.342 msaitoh data = CSR_READ_4(sc, BGE_MI_COMM); 1170 1.342 msaitoh if (!(data & BGE_MICOMM_BUSY)) { 1171 1.151 cegger delay(5); 1172 1.342 msaitoh data = CSR_READ_4(sc, BGE_MI_COMM); 1173 1.1 fvdl break; 1174 1.151 cegger } 1175 1.1 fvdl } 1176 1.1 fvdl 1177 1.342 msaitoh if (i == BGE_TIMEOUT) { 1178 1.342 msaitoh aprint_error_dev(sc->bge_dev, "PHY write timed out\n"); 1179 1.342 msaitoh rv = ETIMEDOUT; 1180 1.342 msaitoh } else if ((data & BGE_MICOMM_READFAIL) != 0) { 1181 1.342 msaitoh aprint_error_dev(sc->bge_dev, "PHY write I/O error\n"); 1182 1.342 msaitoh rv = EIO; 1183 1.342 msaitoh } 1184 1.342 msaitoh 1185 1.172 msaitoh if (autopoll & BGE_MIMODE_AUTOPOLL) { 1186 1.161 msaitoh BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 1187 1.211 msaitoh BGE_SETBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1188 1.216 msaitoh delay(80); 1189 1.25 jonathan } 1190 1.29 itojun 1191 1.216 msaitoh bge_ape_unlock(sc, sc->bge_phy_ape_lock); 1192 1.216 msaitoh 1193 1.342 msaitoh return rv; 1194 1.1 fvdl } 1195 1.1 fvdl 1196 1.104 thorpej static void 1197 1.201 matt bge_miibus_statchg(struct ifnet *ifp) 1198 1.1 fvdl { 1199 1.354 skrll struct bge_softc * const sc = ifp->if_softc; 1200 1.1 fvdl struct mii_data *mii = &sc->bge_mii; 1201 1.216 msaitoh uint32_t mac_mode, rx_mode, tx_mode; 1202 1.1 fvdl 1203 1.394 skrll KASSERT(mutex_owned(sc->sc_intr_lock)); 1204 1.394 skrll 1205 1.69 thorpej /* 1206 1.69 thorpej * Get flow control negotiation result. 1207 1.69 thorpej */ 1208 1.69 thorpej if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1209 1.256 msaitoh (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) 1210 1.69 thorpej sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1211 1.256 msaitoh 1212 1.256 msaitoh if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 1213 1.256 msaitoh mii->mii_media_status & IFM_ACTIVE && 1214 1.256 msaitoh IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 1215 1.256 msaitoh BGE_STS_SETBIT(sc, BGE_STS_LINK); 1216 1.256 msaitoh else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 1217 1.256 msaitoh (!(mii->mii_media_status & IFM_ACTIVE) || 1218 1.256 msaitoh IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 1219 1.256 msaitoh BGE_STS_CLRBIT(sc, BGE_STS_LINK); 1220 1.256 msaitoh 1221 1.256 msaitoh if (!BGE_STS_BIT(sc, BGE_STS_LINK)) 1222 1.256 msaitoh return; 1223 1.69 thorpej 1224 1.216 msaitoh /* Set the port mode (MII/GMII) to match the link speed. */ 1225 1.216 msaitoh mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & 1226 1.216 msaitoh ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX); 1227 1.216 msaitoh tx_mode = CSR_READ_4(sc, BGE_TX_MODE); 1228 1.216 msaitoh rx_mode = CSR_READ_4(sc, BGE_RX_MODE); 1229 1.161 msaitoh if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 1230 1.161 msaitoh IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 1231 1.216 msaitoh mac_mode |= BGE_PORTMODE_GMII; 1232 1.161 msaitoh else 1233 1.216 msaitoh mac_mode |= BGE_PORTMODE_MII; 1234 1.216 msaitoh 1235 1.216 msaitoh tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE; 1236 1.216 msaitoh rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE; 1237 1.256 msaitoh if ((mii->mii_media_active & IFM_FDX) != 0) { 1238 1.216 msaitoh if (sc->bge_flowflags & IFM_ETH_TXPAUSE) 1239 1.216 msaitoh tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE; 1240 1.216 msaitoh if (sc->bge_flowflags & IFM_ETH_RXPAUSE) 1241 1.216 msaitoh rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE; 1242 1.216 msaitoh } else 1243 1.216 msaitoh mac_mode |= BGE_MACMODE_HALF_DUPLEX; 1244 1.1 fvdl 1245 1.216 msaitoh CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, mac_mode); 1246 1.211 msaitoh DELAY(40); 1247 1.216 msaitoh CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode); 1248 1.216 msaitoh CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode); 1249 1.1 fvdl } 1250 1.1 fvdl 1251 1.1 fvdl /* 1252 1.63 jonathan * Update rx threshold levels to values in a particular slot 1253 1.63 jonathan * of the interrupt-mitigation table bge_rx_threshes. 1254 1.63 jonathan */ 1255 1.104 thorpej static void 1256 1.63 jonathan bge_set_thresh(struct ifnet *ifp, int lvl) 1257 1.63 jonathan { 1258 1.354 skrll struct bge_softc * const sc = ifp->if_softc; 1259 1.63 jonathan 1260 1.357 skrll /* 1261 1.357 skrll * For now, just save the new Rx-intr thresholds and record 1262 1.63 jonathan * that a threshold update is pending. Updating the hardware 1263 1.63 jonathan * registers here (even at splhigh()) is observed to 1264 1.352 andvar * occasionally cause glitches where Rx-interrupts are not 1265 1.68 keihan * honoured for up to 10 seconds. jonathan (at) NetBSD.org, 2003-04-05 1266 1.63 jonathan */ 1267 1.386 skrll mutex_enter(sc->sc_intr_lock); 1268 1.63 jonathan sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks; 1269 1.63 jonathan sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds; 1270 1.384 skrll sc->bge_pending_rxintr_change = true; 1271 1.386 skrll mutex_exit(sc->sc_intr_lock); 1272 1.63 jonathan } 1273 1.63 jonathan 1274 1.63 jonathan 1275 1.63 jonathan /* 1276 1.63 jonathan * Update Rx thresholds of all bge devices 1277 1.63 jonathan */ 1278 1.104 thorpej static void 1279 1.63 jonathan bge_update_all_threshes(int lvl) 1280 1.63 jonathan { 1281 1.360 skrll const char * const namebuf = "bge"; 1282 1.360 skrll const size_t namelen = strlen(namebuf); 1283 1.63 jonathan struct ifnet *ifp; 1284 1.63 jonathan 1285 1.63 jonathan if (lvl < 0) 1286 1.63 jonathan lvl = 0; 1287 1.170 msaitoh else if (lvl >= NBGE_RX_THRESH) 1288 1.63 jonathan lvl = NBGE_RX_THRESH - 1; 1289 1.87 perry 1290 1.63 jonathan /* 1291 1.63 jonathan * Now search all the interfaces for this name/number 1292 1.63 jonathan */ 1293 1.360 skrll int s = pserialize_read_enter(); 1294 1.296 ozaki IFNET_READER_FOREACH(ifp) { 1295 1.67 jonathan if (strncmp(ifp->if_xname, namebuf, namelen) != 0) 1296 1.367 skrll continue; 1297 1.63 jonathan /* We got a match: update if doing auto-threshold-tuning */ 1298 1.63 jonathan if (bge_auto_thresh) 1299 1.67 jonathan bge_set_thresh(ifp, lvl); 1300 1.63 jonathan } 1301 1.296 ozaki pserialize_read_exit(s); 1302 1.63 jonathan } 1303 1.63 jonathan 1304 1.63 jonathan /* 1305 1.1 fvdl * Handle events that have triggered interrupts. 1306 1.1 fvdl */ 1307 1.104 thorpej static void 1308 1.116 christos bge_handle_events(struct bge_softc *sc) 1309 1.1 fvdl { 1310 1.1 fvdl 1311 1.1 fvdl return; 1312 1.1 fvdl } 1313 1.1 fvdl 1314 1.1 fvdl /* 1315 1.1 fvdl * Memory management for jumbo frames. 1316 1.1 fvdl */ 1317 1.1 fvdl 1318 1.104 thorpej static int 1319 1.104 thorpej bge_alloc_jumbo_mem(struct bge_softc *sc) 1320 1.1 fvdl { 1321 1.126 christos char *ptr, *kva; 1322 1.375 skrll int i, rseg, state, error; 1323 1.375 skrll struct bge_jpool_entry *entry; 1324 1.1 fvdl 1325 1.1 fvdl state = error = 0; 1326 1.1 fvdl 1327 1.1 fvdl /* Grab a big chunk o' storage. */ 1328 1.1 fvdl if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 1329 1.377 skrll &sc->bge_cdata.bge_rx_jumbo_seg, 1, &rseg, BUS_DMA_WAITOK)) { 1330 1.138 joerg aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 1331 1.1 fvdl return ENOBUFS; 1332 1.1 fvdl } 1333 1.1 fvdl 1334 1.1 fvdl state = 1; 1335 1.373 skrll if (bus_dmamem_map(sc->bge_dmatag, &sc->bge_cdata.bge_rx_jumbo_seg, 1336 1.377 skrll rseg, BGE_JMEM, (void **)&kva, BUS_DMA_WAITOK)) { 1337 1.138 joerg aprint_error_dev(sc->bge_dev, 1338 1.138 joerg "can't map DMA buffers (%d bytes)\n", (int)BGE_JMEM); 1339 1.1 fvdl error = ENOBUFS; 1340 1.1 fvdl goto out; 1341 1.1 fvdl } 1342 1.1 fvdl 1343 1.1 fvdl state = 2; 1344 1.1 fvdl if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 1345 1.377 skrll BUS_DMA_WAITOK, &sc->bge_cdata.bge_rx_jumbo_map)) { 1346 1.138 joerg aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 1347 1.1 fvdl error = ENOBUFS; 1348 1.1 fvdl goto out; 1349 1.1 fvdl } 1350 1.1 fvdl 1351 1.1 fvdl state = 3; 1352 1.1 fvdl if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 1353 1.377 skrll kva, BGE_JMEM, NULL, BUS_DMA_WAITOK)) { 1354 1.138 joerg aprint_error_dev(sc->bge_dev, "can't load DMA map\n"); 1355 1.1 fvdl error = ENOBUFS; 1356 1.1 fvdl goto out; 1357 1.1 fvdl } 1358 1.1 fvdl 1359 1.1 fvdl state = 4; 1360 1.126 christos sc->bge_cdata.bge_jumbo_buf = (void *)kva; 1361 1.89 christos DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf)); 1362 1.1 fvdl 1363 1.1 fvdl SLIST_INIT(&sc->bge_jfree_listhead); 1364 1.1 fvdl SLIST_INIT(&sc->bge_jinuse_listhead); 1365 1.1 fvdl 1366 1.1 fvdl /* 1367 1.1 fvdl * Now divide it up into 9K pieces and save the addresses 1368 1.1 fvdl * in an array. 1369 1.1 fvdl */ 1370 1.1 fvdl ptr = sc->bge_cdata.bge_jumbo_buf; 1371 1.1 fvdl for (i = 0; i < BGE_JSLOTS; i++) { 1372 1.1 fvdl sc->bge_cdata.bge_jslots[i] = ptr; 1373 1.1 fvdl ptr += BGE_JLEN; 1374 1.366 skrll entry = kmem_alloc(sizeof(*entry), KM_SLEEP); 1375 1.1 fvdl entry->slot = i; 1376 1.1 fvdl SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 1377 1.1 fvdl entry, jpool_entries); 1378 1.1 fvdl } 1379 1.1 fvdl out: 1380 1.1 fvdl if (error != 0) { 1381 1.1 fvdl switch (state) { 1382 1.1 fvdl case 4: 1383 1.1 fvdl bus_dmamap_unload(sc->bge_dmatag, 1384 1.1 fvdl sc->bge_cdata.bge_rx_jumbo_map); 1385 1.323 mrg /* FALLTHROUGH */ 1386 1.1 fvdl case 3: 1387 1.1 fvdl bus_dmamap_destroy(sc->bge_dmatag, 1388 1.1 fvdl sc->bge_cdata.bge_rx_jumbo_map); 1389 1.323 mrg /* FALLTHROUGH */ 1390 1.1 fvdl case 2: 1391 1.1 fvdl bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 1392 1.323 mrg /* FALLTHROUGH */ 1393 1.1 fvdl case 1: 1394 1.373 skrll bus_dmamem_free(sc->bge_dmatag, 1395 1.373 skrll &sc->bge_cdata.bge_rx_jumbo_seg, rseg); 1396 1.1 fvdl break; 1397 1.1 fvdl default: 1398 1.1 fvdl break; 1399 1.1 fvdl } 1400 1.1 fvdl } 1401 1.1 fvdl 1402 1.1 fvdl return error; 1403 1.1 fvdl } 1404 1.1 fvdl 1405 1.373 skrll static void 1406 1.373 skrll bge_free_jumbo_mem(struct bge_softc *sc) 1407 1.373 skrll { 1408 1.373 skrll struct bge_jpool_entry *entry, *tmp; 1409 1.373 skrll 1410 1.373 skrll KASSERT(SLIST_EMPTY(&sc->bge_jinuse_listhead)); 1411 1.373 skrll 1412 1.373 skrll SLIST_FOREACH_SAFE(entry, &sc->bge_jfree_listhead, jpool_entries, tmp) { 1413 1.373 skrll kmem_free(entry, sizeof(*entry)); 1414 1.373 skrll } 1415 1.373 skrll 1416 1.373 skrll bus_dmamap_unload(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map); 1417 1.373 skrll 1418 1.373 skrll bus_dmamap_destroy(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map); 1419 1.373 skrll 1420 1.373 skrll bus_dmamem_unmap(sc->bge_dmatag, sc->bge_cdata.bge_jumbo_buf, BGE_JMEM); 1421 1.373 skrll 1422 1.373 skrll bus_dmamem_free(sc->bge_dmatag, &sc->bge_cdata.bge_rx_jumbo_seg, 1); 1423 1.373 skrll } 1424 1.373 skrll 1425 1.1 fvdl /* 1426 1.1 fvdl * Allocate a jumbo buffer. 1427 1.1 fvdl */ 1428 1.104 thorpej static void * 1429 1.104 thorpej bge_jalloc(struct bge_softc *sc) 1430 1.1 fvdl { 1431 1.330 msaitoh struct bge_jpool_entry *entry; 1432 1.1 fvdl 1433 1.1 fvdl entry = SLIST_FIRST(&sc->bge_jfree_listhead); 1434 1.1 fvdl 1435 1.1 fvdl if (entry == NULL) { 1436 1.138 joerg aprint_error_dev(sc->bge_dev, "no free jumbo buffers\n"); 1437 1.170 msaitoh return NULL; 1438 1.1 fvdl } 1439 1.1 fvdl 1440 1.1 fvdl SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 1441 1.1 fvdl SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 1442 1.362 skrll return sc->bge_cdata.bge_jslots[entry->slot]; 1443 1.1 fvdl } 1444 1.1 fvdl 1445 1.1 fvdl /* 1446 1.1 fvdl * Release a jumbo buffer. 1447 1.1 fvdl */ 1448 1.104 thorpej static void 1449 1.126 christos bge_jfree(struct mbuf *m, void *buf, size_t size, void *arg) 1450 1.1 fvdl { 1451 1.1 fvdl struct bge_jpool_entry *entry; 1452 1.354 skrll struct bge_softc * const sc = arg; 1453 1.1 fvdl 1454 1.1 fvdl if (sc == NULL) 1455 1.1 fvdl panic("bge_jfree: can't find softc pointer!"); 1456 1.1 fvdl 1457 1.1 fvdl /* calculate the slot this buffer belongs to */ 1458 1.371 skrll int i = ((char *)buf - (char *)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 1459 1.1 fvdl 1460 1.371 skrll if (i < 0 || i >= BGE_JSLOTS) 1461 1.1 fvdl panic("bge_jfree: asked to free buffer that we don't manage!"); 1462 1.1 fvdl 1463 1.386 skrll mutex_enter(sc->sc_intr_lock); 1464 1.1 fvdl entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 1465 1.1 fvdl if (entry == NULL) 1466 1.1 fvdl panic("bge_jfree: buffer not in use!"); 1467 1.1 fvdl entry->slot = i; 1468 1.1 fvdl SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 1469 1.1 fvdl SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 1470 1.386 skrll mutex_exit(sc->sc_intr_lock); 1471 1.1 fvdl 1472 1.1 fvdl if (__predict_true(m != NULL)) 1473 1.330 msaitoh pool_cache_put(mb_cache, m); 1474 1.1 fvdl } 1475 1.1 fvdl 1476 1.1 fvdl 1477 1.1 fvdl /* 1478 1.184 njoly * Initialize a standard receive ring descriptor. 1479 1.1 fvdl */ 1480 1.104 thorpej static int 1481 1.376 skrll bge_newbuf_std(struct bge_softc *sc, int i) 1482 1.1 fvdl { 1483 1.376 skrll const bus_dmamap_t dmamap = sc->bge_cdata.bge_rx_std_map[i]; 1484 1.376 skrll struct mbuf *m; 1485 1.1 fvdl 1486 1.376 skrll MGETHDR(m, M_DONTWAIT, MT_DATA); 1487 1.376 skrll if (m == NULL) 1488 1.376 skrll return ENOBUFS; 1489 1.397 mlelstv MCLAIM(m, &sc->ethercom.ec_rx_mowner); 1490 1.320 bouyer 1491 1.376 skrll MCLGET(m, M_DONTWAIT); 1492 1.376 skrll if (!(m->m_flags & M_EXT)) { 1493 1.376 skrll m_freem(m); 1494 1.376 skrll return ENOBUFS; 1495 1.1 fvdl } 1496 1.376 skrll m->m_len = m->m_pkthdr.len = MCLBYTES; 1497 1.1 fvdl 1498 1.261 msaitoh if (!(sc->bge_flags & BGEF_RX_ALIGNBUG)) 1499 1.376 skrll m_adj(m, ETHER_ALIGN); 1500 1.376 skrll if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m, 1501 1.331 msaitoh BUS_DMA_READ | BUS_DMA_NOWAIT)) { 1502 1.376 skrll m_freem(m); 1503 1.170 msaitoh return ENOBUFS; 1504 1.283 christos } 1505 1.178 msaitoh bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 1506 1.124 bouyer BUS_DMASYNC_PREREAD); 1507 1.376 skrll sc->bge_cdata.bge_rx_std_chain[i] = m; 1508 1.1 fvdl 1509 1.376 skrll bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1510 1.376 skrll offsetof(struct bge_ring_data, bge_rx_std_ring) + 1511 1.376 skrll i * sizeof(struct bge_rx_bd), 1512 1.376 skrll sizeof(struct bge_rx_bd), 1513 1.376 skrll BUS_DMASYNC_POSTWRITE); 1514 1.376 skrll 1515 1.376 skrll struct bge_rx_bd * const r = &sc->bge_rdata->bge_rx_std_ring[i]; 1516 1.172 msaitoh BGE_HOSTADDR(r->bge_addr, dmamap->dm_segs[0].ds_addr); 1517 1.1 fvdl r->bge_flags = BGE_RXBDFLAG_END; 1518 1.376 skrll r->bge_len = m->m_len; 1519 1.1 fvdl r->bge_idx = i; 1520 1.1 fvdl 1521 1.1 fvdl bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1522 1.1 fvdl offsetof(struct bge_ring_data, bge_rx_std_ring) + 1523 1.364 skrll i * sizeof(struct bge_rx_bd), 1524 1.364 skrll sizeof(struct bge_rx_bd), 1525 1.376 skrll BUS_DMASYNC_PREWRITE); 1526 1.376 skrll 1527 1.376 skrll sc->bge_std_cnt++; 1528 1.1 fvdl 1529 1.170 msaitoh return 0; 1530 1.1 fvdl } 1531 1.1 fvdl 1532 1.1 fvdl /* 1533 1.1 fvdl * Initialize a jumbo receive ring descriptor. This allocates 1534 1.1 fvdl * a jumbo buffer from the pool managed internally by the driver. 1535 1.1 fvdl */ 1536 1.104 thorpej static int 1537 1.104 thorpej bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m) 1538 1.1 fvdl { 1539 1.1 fvdl struct mbuf *m_new = NULL; 1540 1.1 fvdl struct bge_rx_bd *r; 1541 1.126 christos void *buf = NULL; 1542 1.1 fvdl 1543 1.1 fvdl if (m == NULL) { 1544 1.1 fvdl 1545 1.1 fvdl /* Allocate the mbuf. */ 1546 1.1 fvdl MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1547 1.158 msaitoh if (m_new == NULL) 1548 1.170 msaitoh return ENOBUFS; 1549 1.397 mlelstv MCLAIM(m, &sc->ethercom.ec_rx_mowner); 1550 1.1 fvdl 1551 1.1 fvdl /* Allocate the jumbo buffer */ 1552 1.1 fvdl buf = bge_jalloc(sc); 1553 1.1 fvdl if (buf == NULL) { 1554 1.1 fvdl m_freem(m_new); 1555 1.138 joerg aprint_error_dev(sc->bge_dev, 1556 1.138 joerg "jumbo allocation failed -- packet dropped!\n"); 1557 1.170 msaitoh return ENOBUFS; 1558 1.1 fvdl } 1559 1.1 fvdl 1560 1.1 fvdl /* Attach the buffer to the mbuf. */ 1561 1.1 fvdl m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 1562 1.1 fvdl MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF, 1563 1.1 fvdl bge_jfree, sc); 1564 1.74 yamt m_new->m_flags |= M_EXT_RW; 1565 1.1 fvdl } else { 1566 1.1 fvdl m_new = m; 1567 1.124 bouyer buf = m_new->m_data = m_new->m_ext.ext_buf; 1568 1.1 fvdl m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 1569 1.1 fvdl } 1570 1.261 msaitoh if (!(sc->bge_flags & BGEF_RX_ALIGNBUG)) 1571 1.125 bouyer m_adj(m_new, ETHER_ALIGN); 1572 1.124 bouyer bus_dmamap_sync(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 1573 1.332 msaitoh mtod(m_new, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, 1574 1.332 msaitoh BGE_JLEN, BUS_DMASYNC_PREREAD); 1575 1.375 skrll 1576 1.1 fvdl /* Set up the descriptor. */ 1577 1.1 fvdl r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 1578 1.1 fvdl sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 1579 1.172 msaitoh BGE_HOSTADDR(r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new)); 1580 1.331 msaitoh r->bge_flags = BGE_RXBDFLAG_END | BGE_RXBDFLAG_JUMBO_RING; 1581 1.1 fvdl r->bge_len = m_new->m_len; 1582 1.1 fvdl r->bge_idx = i; 1583 1.1 fvdl 1584 1.1 fvdl bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1585 1.1 fvdl offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 1586 1.364 skrll i * sizeof(struct bge_rx_bd), 1587 1.364 skrll sizeof(struct bge_rx_bd), 1588 1.331 msaitoh BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1589 1.1 fvdl 1590 1.170 msaitoh return 0; 1591 1.1 fvdl } 1592 1.1 fvdl 1593 1.104 thorpej static int 1594 1.104 thorpej bge_init_rx_ring_std(struct bge_softc *sc) 1595 1.1 fvdl { 1596 1.376 skrll bus_dmamap_t dmamap; 1597 1.376 skrll int error = 0; 1598 1.376 skrll u_int i; 1599 1.1 fvdl 1600 1.261 msaitoh if (sc->bge_flags & BGEF_RXRING_VALID) 1601 1.1 fvdl return 0; 1602 1.1 fvdl 1603 1.376 skrll for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1604 1.376 skrll error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, 1605 1.387 skrll MCLBYTES, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &dmamap); 1606 1.376 skrll if (error) 1607 1.376 skrll goto uncreate; 1608 1.376 skrll 1609 1.376 skrll sc->bge_cdata.bge_rx_std_map[i] = dmamap; 1610 1.376 skrll memset(&sc->bge_rdata->bge_rx_std_ring[i], 0, 1611 1.376 skrll sizeof(struct bge_rx_bd)); 1612 1.1 fvdl } 1613 1.1 fvdl 1614 1.1 fvdl sc->bge_std = i - 1; 1615 1.376 skrll sc->bge_std_cnt = 0; 1616 1.376 skrll bge_fill_rx_ring_std(sc); 1617 1.1 fvdl 1618 1.261 msaitoh sc->bge_flags |= BGEF_RXRING_VALID; 1619 1.1 fvdl 1620 1.170 msaitoh return 0; 1621 1.376 skrll 1622 1.376 skrll uncreate: 1623 1.376 skrll while (--i) { 1624 1.376 skrll bus_dmamap_destroy(sc->bge_dmatag, 1625 1.376 skrll sc->bge_cdata.bge_rx_std_map[i]); 1626 1.376 skrll } 1627 1.376 skrll return error; 1628 1.1 fvdl } 1629 1.1 fvdl 1630 1.104 thorpej static void 1631 1.376 skrll bge_fill_rx_ring_std(struct bge_softc *sc) 1632 1.376 skrll { 1633 1.376 skrll int i = sc->bge_std; 1634 1.376 skrll bool post = false; 1635 1.376 skrll 1636 1.376 skrll while (sc->bge_std_cnt < BGE_STD_RX_RING_CNT) { 1637 1.376 skrll BGE_INC(i, BGE_STD_RX_RING_CNT); 1638 1.376 skrll 1639 1.376 skrll if (bge_newbuf_std(sc, i) != 0) 1640 1.376 skrll break; 1641 1.376 skrll 1642 1.376 skrll sc->bge_std = i; 1643 1.376 skrll post = true; 1644 1.376 skrll } 1645 1.376 skrll 1646 1.376 skrll if (post) 1647 1.376 skrll bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1648 1.376 skrll } 1649 1.376 skrll 1650 1.376 skrll 1651 1.376 skrll static void 1652 1.376 skrll bge_free_rx_ring_std(struct bge_softc *sc) 1653 1.1 fvdl { 1654 1.1 fvdl 1655 1.261 msaitoh if (!(sc->bge_flags & BGEF_RXRING_VALID)) 1656 1.1 fvdl return; 1657 1.1 fvdl 1658 1.376 skrll for (u_int i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1659 1.376 skrll const bus_dmamap_t dmap = sc->bge_cdata.bge_rx_std_map[i]; 1660 1.376 skrll struct mbuf * const m = sc->bge_cdata.bge_rx_std_chain[i]; 1661 1.376 skrll if (m != NULL) { 1662 1.376 skrll bus_dmamap_sync(sc->bge_dmatag, dmap, 0, 1663 1.376 skrll dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1664 1.376 skrll bus_dmamap_unload(sc->bge_dmatag, dmap); 1665 1.376 skrll m_freem(m); 1666 1.1 fvdl sc->bge_cdata.bge_rx_std_chain[i] = NULL; 1667 1.1 fvdl } 1668 1.376 skrll bus_dmamap_destroy(sc->bge_dmatag, 1669 1.376 skrll sc->bge_cdata.bge_rx_std_map[i]); 1670 1.376 skrll sc->bge_cdata.bge_rx_std_map[i] = NULL; 1671 1.1 fvdl memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, 1672 1.1 fvdl sizeof(struct bge_rx_bd)); 1673 1.1 fvdl } 1674 1.1 fvdl 1675 1.261 msaitoh sc->bge_flags &= ~BGEF_RXRING_VALID; 1676 1.1 fvdl } 1677 1.1 fvdl 1678 1.104 thorpej static int 1679 1.104 thorpej bge_init_rx_ring_jumbo(struct bge_softc *sc) 1680 1.1 fvdl { 1681 1.1 fvdl int i; 1682 1.34 jonathan volatile struct bge_rcb *rcb; 1683 1.1 fvdl 1684 1.261 msaitoh if (sc->bge_flags & BGEF_JUMBO_RXRING_VALID) 1685 1.59 martin return 0; 1686 1.59 martin 1687 1.1 fvdl for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1688 1.1 fvdl if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 1689 1.170 msaitoh return ENOBUFS; 1690 1.205 msaitoh } 1691 1.1 fvdl 1692 1.1 fvdl sc->bge_jumbo = i - 1; 1693 1.261 msaitoh sc->bge_flags |= BGEF_JUMBO_RXRING_VALID; 1694 1.1 fvdl 1695 1.1 fvdl rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1696 1.34 jonathan rcb->bge_maxlen_flags = 0; 1697 1.34 jonathan CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1698 1.1 fvdl 1699 1.151 cegger bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1700 1.1 fvdl 1701 1.170 msaitoh return 0; 1702 1.1 fvdl } 1703 1.1 fvdl 1704 1.104 thorpej static void 1705 1.104 thorpej bge_free_rx_ring_jumbo(struct bge_softc *sc) 1706 1.1 fvdl { 1707 1.1 fvdl int i; 1708 1.1 fvdl 1709 1.261 msaitoh if (!(sc->bge_flags & BGEF_JUMBO_RXRING_VALID)) 1710 1.1 fvdl return; 1711 1.1 fvdl 1712 1.1 fvdl for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1713 1.393 rin m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1714 1.393 rin sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 1715 1.1 fvdl memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0, 1716 1.1 fvdl sizeof(struct bge_rx_bd)); 1717 1.1 fvdl } 1718 1.1 fvdl 1719 1.261 msaitoh sc->bge_flags &= ~BGEF_JUMBO_RXRING_VALID; 1720 1.1 fvdl } 1721 1.1 fvdl 1722 1.104 thorpej static void 1723 1.320 bouyer bge_free_tx_ring(struct bge_softc *sc, bool disable) 1724 1.1 fvdl { 1725 1.204 msaitoh int i; 1726 1.1 fvdl struct txdmamap_pool_entry *dma; 1727 1.1 fvdl 1728 1.261 msaitoh if (!(sc->bge_flags & BGEF_TXRING_VALID)) 1729 1.1 fvdl return; 1730 1.1 fvdl 1731 1.1 fvdl for (i = 0; i < BGE_TX_RING_CNT; i++) { 1732 1.1 fvdl if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1733 1.1 fvdl m_freem(sc->bge_cdata.bge_tx_chain[i]); 1734 1.1 fvdl sc->bge_cdata.bge_tx_chain[i] = NULL; 1735 1.1 fvdl SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 1736 1.1 fvdl link); 1737 1.1 fvdl sc->txdma[i] = 0; 1738 1.1 fvdl } 1739 1.1 fvdl memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0, 1740 1.1 fvdl sizeof(struct bge_tx_bd)); 1741 1.1 fvdl } 1742 1.1 fvdl 1743 1.320 bouyer if (disable) { 1744 1.320 bouyer while ((dma = SLIST_FIRST(&sc->txdma_list))) { 1745 1.320 bouyer SLIST_REMOVE_HEAD(&sc->txdma_list, link); 1746 1.320 bouyer bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); 1747 1.320 bouyer if (sc->bge_dma64) { 1748 1.320 bouyer bus_dmamap_destroy(sc->bge_dmatag32, 1749 1.320 bouyer dma->dmamap32); 1750 1.320 bouyer } 1751 1.366 skrll kmem_free(dma, sizeof(*dma)); 1752 1.320 bouyer } 1753 1.320 bouyer SLIST_INIT(&sc->txdma_list); 1754 1.1 fvdl } 1755 1.1 fvdl 1756 1.261 msaitoh sc->bge_flags &= ~BGEF_TXRING_VALID; 1757 1.1 fvdl } 1758 1.1 fvdl 1759 1.104 thorpej static int 1760 1.104 thorpej bge_init_tx_ring(struct bge_softc *sc) 1761 1.1 fvdl { 1762 1.354 skrll struct ifnet * const ifp = &sc->ethercom.ec_if; 1763 1.1 fvdl int i; 1764 1.317 bouyer bus_dmamap_t dmamap, dmamap32; 1765 1.258 msaitoh bus_size_t maxsegsz; 1766 1.1 fvdl struct txdmamap_pool_entry *dma; 1767 1.1 fvdl 1768 1.261 msaitoh if (sc->bge_flags & BGEF_TXRING_VALID) 1769 1.1 fvdl return 0; 1770 1.1 fvdl 1771 1.1 fvdl sc->bge_txcnt = 0; 1772 1.1 fvdl sc->bge_tx_saved_considx = 0; 1773 1.94 jonathan 1774 1.94 jonathan /* Initialize transmit producer index for host-memory send ring. */ 1775 1.94 jonathan sc->bge_tx_prodidx = 0; 1776 1.151 cegger bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1777 1.158 msaitoh /* 5700 b2 errata */ 1778 1.158 msaitoh if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 1779 1.151 cegger bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1780 1.25 jonathan 1781 1.158 msaitoh /* NIC-memory send ring not used; initialize to zero. */ 1782 1.151 cegger bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1783 1.158 msaitoh /* 5700 b2 errata */ 1784 1.158 msaitoh if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 1785 1.151 cegger bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1786 1.1 fvdl 1787 1.258 msaitoh /* Limit DMA segment size for some chips */ 1788 1.258 msaitoh if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) && 1789 1.258 msaitoh (ifp->if_mtu <= ETHERMTU)) 1790 1.258 msaitoh maxsegsz = 2048; 1791 1.258 msaitoh else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) 1792 1.258 msaitoh maxsegsz = 4096; 1793 1.258 msaitoh else 1794 1.258 msaitoh maxsegsz = ETHER_MAX_LEN_JUMBO; 1795 1.317 bouyer 1796 1.320 bouyer if (SLIST_FIRST(&sc->txdma_list) != NULL) 1797 1.320 bouyer goto alloc_done; 1798 1.320 bouyer 1799 1.246 msaitoh for (i = 0; i < BGE_TX_RING_CNT; i++) { 1800 1.95 jonathan if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX, 1801 1.387 skrll BGE_NTXSEG, maxsegsz, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 1802 1.1 fvdl &dmamap)) 1803 1.170 msaitoh return ENOBUFS; 1804 1.1 fvdl if (dmamap == NULL) 1805 1.1 fvdl panic("dmamap NULL in bge_init_tx_ring"); 1806 1.317 bouyer if (sc->bge_dma64) { 1807 1.317 bouyer if (bus_dmamap_create(sc->bge_dmatag32, BGE_TXDMA_MAX, 1808 1.317 bouyer BGE_NTXSEG, maxsegsz, 0, 1809 1.387 skrll BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 1810 1.317 bouyer &dmamap32)) { 1811 1.317 bouyer bus_dmamap_destroy(sc->bge_dmatag, dmamap); 1812 1.317 bouyer return ENOBUFS; 1813 1.317 bouyer } 1814 1.317 bouyer if (dmamap32 == NULL) 1815 1.317 bouyer panic("dmamap32 NULL in bge_init_tx_ring"); 1816 1.317 bouyer } else 1817 1.317 bouyer dmamap32 = dmamap; 1818 1.366 skrll dma = kmem_alloc(sizeof(*dma), KM_NOSLEEP); 1819 1.1 fvdl if (dma == NULL) { 1820 1.138 joerg aprint_error_dev(sc->bge_dev, 1821 1.138 joerg "can't alloc txdmamap_pool_entry\n"); 1822 1.1 fvdl bus_dmamap_destroy(sc->bge_dmatag, dmamap); 1823 1.317 bouyer if (sc->bge_dma64) 1824 1.317 bouyer bus_dmamap_destroy(sc->bge_dmatag32, dmamap32); 1825 1.170 msaitoh return ENOMEM; 1826 1.1 fvdl } 1827 1.1 fvdl dma->dmamap = dmamap; 1828 1.317 bouyer dma->dmamap32 = dmamap32; 1829 1.1 fvdl SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 1830 1.1 fvdl } 1831 1.320 bouyer alloc_done: 1832 1.261 msaitoh sc->bge_flags |= BGEF_TXRING_VALID; 1833 1.1 fvdl 1834 1.170 msaitoh return 0; 1835 1.1 fvdl } 1836 1.1 fvdl 1837 1.104 thorpej static void 1838 1.104 thorpej bge_setmulti(struct bge_softc *sc) 1839 1.1 fvdl { 1840 1.354 skrll struct ethercom * const ec = &sc->ethercom; 1841 1.1 fvdl struct ether_multi *enm; 1842 1.330 msaitoh struct ether_multistep step; 1843 1.170 msaitoh uint32_t hashes[4] = { 0, 0, 0, 0 }; 1844 1.170 msaitoh uint32_t h; 1845 1.1 fvdl int i; 1846 1.1 fvdl 1847 1.394 skrll KASSERT(mutex_owned(sc->sc_mcast_lock)); 1848 1.375 skrll if (sc->bge_if_flags & IFF_PROMISC) 1849 1.13 thorpej goto allmulti; 1850 1.1 fvdl 1851 1.1 fvdl /* Now program new ones. */ 1852 1.333 msaitoh ETHER_LOCK(ec); 1853 1.332 msaitoh ETHER_FIRST_MULTI(step, ec, enm); 1854 1.1 fvdl while (enm != NULL) { 1855 1.13 thorpej if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1856 1.13 thorpej /* 1857 1.13 thorpej * We must listen to a range of multicast addresses. 1858 1.13 thorpej * For now, just accept all multicasts, rather than 1859 1.13 thorpej * trying to set only those filter bits needed to match 1860 1.13 thorpej * the range. (At this time, the only use of address 1861 1.13 thorpej * ranges is for IP multicast routing, for which the 1862 1.13 thorpej * range is big enough to require all bits set.) 1863 1.13 thorpej */ 1864 1.333 msaitoh ETHER_UNLOCK(ec); 1865 1.13 thorpej goto allmulti; 1866 1.13 thorpej } 1867 1.13 thorpej 1868 1.158 msaitoh h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1869 1.1 fvdl 1870 1.158 msaitoh /* Just want the 7 least-significant bits. */ 1871 1.158 msaitoh h &= 0x7f; 1872 1.1 fvdl 1873 1.336 msaitoh hashes[(h & 0x60) >> 5] |= 1U << (h & 0x1F); 1874 1.158 msaitoh ETHER_NEXT_MULTI(step, enm); 1875 1.25 jonathan } 1876 1.375 skrll ec->ec_flags &= ~ETHER_F_ALLMULTI; 1877 1.333 msaitoh ETHER_UNLOCK(ec); 1878 1.25 jonathan 1879 1.158 msaitoh goto setit; 1880 1.1 fvdl 1881 1.158 msaitoh allmulti: 1882 1.375 skrll ETHER_LOCK(ec); 1883 1.375 skrll ec->ec_flags |= ETHER_F_ALLMULTI; 1884 1.375 skrll ETHER_UNLOCK(ec); 1885 1.158 msaitoh hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff; 1886 1.133 markd 1887 1.158 msaitoh setit: 1888 1.158 msaitoh for (i = 0; i < 4; i++) 1889 1.158 msaitoh CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1890 1.158 msaitoh } 1891 1.133 markd 1892 1.177 msaitoh static void 1893 1.178 msaitoh bge_sig_pre_reset(struct bge_softc *sc, int type) 1894 1.177 msaitoh { 1895 1.208 msaitoh 1896 1.177 msaitoh /* 1897 1.177 msaitoh * Some chips don't like this so only do this if ASF is enabled 1898 1.177 msaitoh */ 1899 1.177 msaitoh if (sc->bge_asf_mode) 1900 1.216 msaitoh bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); 1901 1.1 fvdl 1902 1.177 msaitoh if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1903 1.177 msaitoh switch (type) { 1904 1.177 msaitoh case BGE_RESET_START: 1905 1.216 msaitoh bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1906 1.216 msaitoh BGE_FW_DRV_STATE_START); 1907 1.216 msaitoh break; 1908 1.216 msaitoh case BGE_RESET_SHUTDOWN: 1909 1.216 msaitoh bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1910 1.216 msaitoh BGE_FW_DRV_STATE_UNLOAD); 1911 1.177 msaitoh break; 1912 1.216 msaitoh case BGE_RESET_SUSPEND: 1913 1.216 msaitoh bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1914 1.216 msaitoh BGE_FW_DRV_STATE_SUSPEND); 1915 1.177 msaitoh break; 1916 1.177 msaitoh } 1917 1.177 msaitoh } 1918 1.216 msaitoh 1919 1.216 msaitoh if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND) 1920 1.216 msaitoh bge_ape_driver_state_change(sc, type); 1921 1.177 msaitoh } 1922 1.177 msaitoh 1923 1.177 msaitoh static void 1924 1.178 msaitoh bge_sig_post_reset(struct bge_softc *sc, int type) 1925 1.177 msaitoh { 1926 1.178 msaitoh 1927 1.177 msaitoh if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1928 1.177 msaitoh switch (type) { 1929 1.177 msaitoh case BGE_RESET_START: 1930 1.216 msaitoh bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1931 1.216 msaitoh BGE_FW_DRV_STATE_START_DONE); 1932 1.177 msaitoh /* START DONE */ 1933 1.177 msaitoh break; 1934 1.216 msaitoh case BGE_RESET_SHUTDOWN: 1935 1.216 msaitoh bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1936 1.216 msaitoh BGE_FW_DRV_STATE_UNLOAD_DONE); 1937 1.177 msaitoh break; 1938 1.177 msaitoh } 1939 1.177 msaitoh } 1940 1.216 msaitoh 1941 1.216 msaitoh if (type == BGE_RESET_SHUTDOWN) 1942 1.216 msaitoh bge_ape_driver_state_change(sc, type); 1943 1.177 msaitoh } 1944 1.177 msaitoh 1945 1.177 msaitoh static void 1946 1.178 msaitoh bge_sig_legacy(struct bge_softc *sc, int type) 1947 1.177 msaitoh { 1948 1.178 msaitoh 1949 1.177 msaitoh if (sc->bge_asf_mode) { 1950 1.177 msaitoh switch (type) { 1951 1.177 msaitoh case BGE_RESET_START: 1952 1.216 msaitoh bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1953 1.216 msaitoh BGE_FW_DRV_STATE_START); 1954 1.177 msaitoh break; 1955 1.216 msaitoh case BGE_RESET_SHUTDOWN: 1956 1.216 msaitoh bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1957 1.216 msaitoh BGE_FW_DRV_STATE_UNLOAD); 1958 1.177 msaitoh break; 1959 1.177 msaitoh } 1960 1.177 msaitoh } 1961 1.177 msaitoh } 1962 1.177 msaitoh 1963 1.177 msaitoh static void 1964 1.216 msaitoh bge_wait_for_event_ack(struct bge_softc *sc) 1965 1.216 msaitoh { 1966 1.216 msaitoh int i; 1967 1.216 msaitoh 1968 1.216 msaitoh /* wait up to 2500usec */ 1969 1.216 msaitoh for (i = 0; i < 250; i++) { 1970 1.216 msaitoh if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) & 1971 1.216 msaitoh BGE_RX_CPU_DRV_EVENT)) 1972 1.216 msaitoh break; 1973 1.216 msaitoh DELAY(10); 1974 1.216 msaitoh } 1975 1.216 msaitoh } 1976 1.216 msaitoh 1977 1.216 msaitoh static void 1978 1.178 msaitoh bge_stop_fw(struct bge_softc *sc) 1979 1.177 msaitoh { 1980 1.1 fvdl 1981 1.177 msaitoh if (sc->bge_asf_mode) { 1982 1.216 msaitoh bge_wait_for_event_ack(sc); 1983 1.216 msaitoh 1984 1.216 msaitoh bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE); 1985 1.216 msaitoh CSR_WRITE_4_FLUSH(sc, BGE_RX_CPU_EVENT, 1986 1.216 msaitoh CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT); 1987 1.177 msaitoh 1988 1.216 msaitoh bge_wait_for_event_ack(sc); 1989 1.177 msaitoh } 1990 1.177 msaitoh } 1991 1.1 fvdl 1992 1.180 msaitoh static int 1993 1.180 msaitoh bge_poll_fw(struct bge_softc *sc) 1994 1.180 msaitoh { 1995 1.180 msaitoh uint32_t val; 1996 1.180 msaitoh int i; 1997 1.180 msaitoh 1998 1.180 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 1999 1.180 msaitoh for (i = 0; i < BGE_TIMEOUT; i++) { 2000 1.180 msaitoh val = CSR_READ_4(sc, BGE_VCPU_STATUS); 2001 1.180 msaitoh if (val & BGE_VCPU_STATUS_INIT_DONE) 2002 1.180 msaitoh break; 2003 1.180 msaitoh DELAY(100); 2004 1.180 msaitoh } 2005 1.180 msaitoh if (i >= BGE_TIMEOUT) { 2006 1.180 msaitoh aprint_error_dev(sc->bge_dev, "reset timed out\n"); 2007 1.180 msaitoh return -1; 2008 1.180 msaitoh } 2009 1.274 msaitoh } else { 2010 1.180 msaitoh /* 2011 1.180 msaitoh * Poll the value location we just wrote until 2012 1.180 msaitoh * we see the 1's complement of the magic number. 2013 1.180 msaitoh * This indicates that the firmware initialization 2014 1.180 msaitoh * is complete. 2015 1.180 msaitoh * XXX 1000ms for Flash and 10000ms for SEEPROM. 2016 1.180 msaitoh */ 2017 1.180 msaitoh for (i = 0; i < BGE_TIMEOUT; i++) { 2018 1.216 msaitoh val = bge_readmem_ind(sc, BGE_SRAM_FW_MB); 2019 1.216 msaitoh if (val == ~BGE_SRAM_FW_MB_MAGIC) 2020 1.180 msaitoh break; 2021 1.180 msaitoh DELAY(10); 2022 1.180 msaitoh } 2023 1.180 msaitoh 2024 1.274 msaitoh if ((i >= BGE_TIMEOUT) 2025 1.274 msaitoh && ((sc->bge_flags & BGEF_NO_EEPROM) == 0)) { 2026 1.180 msaitoh aprint_error_dev(sc->bge_dev, 2027 1.180 msaitoh "firmware handshake timed out, val = %x\n", val); 2028 1.180 msaitoh return -1; 2029 1.180 msaitoh } 2030 1.180 msaitoh } 2031 1.180 msaitoh 2032 1.214 msaitoh if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) { 2033 1.214 msaitoh /* tg3 says we have to wait extra time */ 2034 1.214 msaitoh delay(10 * 1000); 2035 1.214 msaitoh } 2036 1.214 msaitoh 2037 1.180 msaitoh return 0; 2038 1.180 msaitoh } 2039 1.180 msaitoh 2040 1.216 msaitoh int 2041 1.216 msaitoh bge_phy_addr(struct bge_softc *sc) 2042 1.216 msaitoh { 2043 1.216 msaitoh struct pci_attach_args *pa = &(sc->bge_pa); 2044 1.216 msaitoh int phy_addr = 1; 2045 1.216 msaitoh 2046 1.216 msaitoh /* 2047 1.216 msaitoh * PHY address mapping for various devices. 2048 1.216 msaitoh * 2049 1.330 msaitoh * | F0 Cu | F0 Sr | F1 Cu | F1 Sr | 2050 1.216 msaitoh * ---------+-------+-------+-------+-------+ 2051 1.330 msaitoh * BCM57XX | 1 | X | X | X | 2052 1.330 msaitoh * BCM5704 | 1 | X | 1 | X | 2053 1.330 msaitoh * BCM5717 | 1 | 8 | 2 | 9 | 2054 1.330 msaitoh * BCM5719 | 1 | 8 | 2 | 9 | 2055 1.330 msaitoh * BCM5720 | 1 | 8 | 2 | 9 | 2056 1.216 msaitoh * 2057 1.330 msaitoh * | F2 Cu | F2 Sr | F3 Cu | F3 Sr | 2058 1.216 msaitoh * ---------+-------+-------+-------+-------+ 2059 1.330 msaitoh * BCM57XX | X | X | X | X | 2060 1.330 msaitoh * BCM5704 | X | X | X | X | 2061 1.330 msaitoh * BCM5717 | X | X | X | X | 2062 1.330 msaitoh * BCM5719 | 3 | 10 | 4 | 11 | 2063 1.330 msaitoh * BCM5720 | X | X | X | X | 2064 1.216 msaitoh * 2065 1.216 msaitoh * Other addresses may respond but they are not 2066 1.216 msaitoh * IEEE compliant PHYs and should be ignored. 2067 1.216 msaitoh */ 2068 1.216 msaitoh switch (BGE_ASICREV(sc->bge_chipid)) { 2069 1.216 msaitoh case BGE_ASICREV_BCM5717: 2070 1.216 msaitoh case BGE_ASICREV_BCM5719: 2071 1.216 msaitoh case BGE_ASICREV_BCM5720: 2072 1.216 msaitoh phy_addr = pa->pa_function; 2073 1.234 msaitoh if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) { 2074 1.216 msaitoh phy_addr += (CSR_READ_4(sc, BGE_SGDIG_STS) & 2075 1.216 msaitoh BGE_SGDIGSTS_IS_SERDES) ? 8 : 1; 2076 1.216 msaitoh } else { 2077 1.216 msaitoh phy_addr += (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) & 2078 1.216 msaitoh BGE_CPMU_PHY_STRAP_IS_SERDES) ? 8 : 1; 2079 1.216 msaitoh } 2080 1.216 msaitoh } 2081 1.216 msaitoh 2082 1.216 msaitoh return phy_addr; 2083 1.216 msaitoh } 2084 1.216 msaitoh 2085 1.158 msaitoh /* 2086 1.158 msaitoh * Do endian, PCI and DMA initialization. Also check the on-board ROM 2087 1.158 msaitoh * self-test results. 2088 1.158 msaitoh */ 2089 1.158 msaitoh static int 2090 1.158 msaitoh bge_chipinit(struct bge_softc *sc) 2091 1.158 msaitoh { 2092 1.288 msaitoh uint32_t dma_rw_ctl, misc_ctl, mode_ctl, reg; 2093 1.178 msaitoh int i; 2094 1.1 fvdl 2095 1.158 msaitoh /* Set endianness before we access any non-PCI registers. */ 2096 1.288 msaitoh misc_ctl = BGE_INIT; 2097 1.288 msaitoh if (sc->bge_flags & BGEF_TAGGED_STATUS) 2098 1.288 msaitoh misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS; 2099 1.158 msaitoh pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 2100 1.288 msaitoh misc_ctl); 2101 1.1 fvdl 2102 1.158 msaitoh /* 2103 1.158 msaitoh * Clear the MAC statistics block in the NIC's 2104 1.158 msaitoh * internal memory. 2105 1.158 msaitoh */ 2106 1.158 msaitoh for (i = BGE_STATS_BLOCK; 2107 1.170 msaitoh i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 2108 1.158 msaitoh BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); 2109 1.1 fvdl 2110 1.158 msaitoh for (i = BGE_STATUS_BLOCK; 2111 1.170 msaitoh i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 2112 1.158 msaitoh BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); 2113 1.1 fvdl 2114 1.214 msaitoh /* 5717 workaround from tg3 */ 2115 1.214 msaitoh if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) { 2116 1.214 msaitoh /* Save */ 2117 1.214 msaitoh mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 2118 1.214 msaitoh 2119 1.214 msaitoh /* Temporary modify MODE_CTL to control TLP */ 2120 1.214 msaitoh reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK; 2121 1.214 msaitoh CSR_WRITE_4(sc, BGE_MODE_CTL, reg | BGE_MODECTL_PCIE_TLPADDR1); 2122 1.214 msaitoh 2123 1.214 msaitoh /* Control TLP */ 2124 1.214 msaitoh reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG + 2125 1.214 msaitoh BGE_TLP_PHYCTL1); 2126 1.214 msaitoh CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_PHYCTL1, 2127 1.214 msaitoh reg | BGE_TLP_PHYCTL1_EN_L1PLLPD); 2128 1.214 msaitoh 2129 1.214 msaitoh /* Restore */ 2130 1.214 msaitoh CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 2131 1.214 msaitoh } 2132 1.330 msaitoh 2133 1.257 msaitoh if (BGE_IS_57765_FAMILY(sc)) { 2134 1.214 msaitoh if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) { 2135 1.214 msaitoh /* Save */ 2136 1.214 msaitoh mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 2137 1.214 msaitoh 2138 1.214 msaitoh /* Temporary modify MODE_CTL to control TLP */ 2139 1.214 msaitoh reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK; 2140 1.214 msaitoh CSR_WRITE_4(sc, BGE_MODE_CTL, 2141 1.214 msaitoh reg | BGE_MODECTL_PCIE_TLPADDR1); 2142 1.330 msaitoh 2143 1.214 msaitoh /* Control TLP */ 2144 1.214 msaitoh reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG + 2145 1.214 msaitoh BGE_TLP_PHYCTL5); 2146 1.214 msaitoh CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_PHYCTL5, 2147 1.214 msaitoh reg | BGE_TLP_PHYCTL5_DIS_L2CLKREQ); 2148 1.214 msaitoh 2149 1.214 msaitoh /* Restore */ 2150 1.214 msaitoh CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 2151 1.214 msaitoh } 2152 1.214 msaitoh if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_57765_AX) { 2153 1.305 msaitoh /* 2154 1.305 msaitoh * For the 57766 and non Ax versions of 57765, bootcode 2155 1.305 msaitoh * needs to setup the PCIE Fast Training Sequence (FTS) 2156 1.305 msaitoh * value to prevent transmit hangs. 2157 1.305 msaitoh */ 2158 1.214 msaitoh reg = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL); 2159 1.214 msaitoh CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, 2160 1.214 msaitoh reg | BGE_CPMU_PADRNG_CTL_RDIV2); 2161 1.214 msaitoh 2162 1.214 msaitoh /* Save */ 2163 1.214 msaitoh mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 2164 1.214 msaitoh 2165 1.214 msaitoh /* Temporary modify MODE_CTL to control TLP */ 2166 1.214 msaitoh reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK; 2167 1.214 msaitoh CSR_WRITE_4(sc, BGE_MODE_CTL, 2168 1.214 msaitoh reg | BGE_MODECTL_PCIE_TLPADDR0); 2169 1.214 msaitoh 2170 1.214 msaitoh /* Control TLP */ 2171 1.214 msaitoh reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG + 2172 1.214 msaitoh BGE_TLP_FTSMAX); 2173 1.214 msaitoh reg &= ~BGE_TLP_FTSMAX_MSK; 2174 1.214 msaitoh CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_FTSMAX, 2175 1.214 msaitoh reg | BGE_TLP_FTSMAX_VAL); 2176 1.214 msaitoh 2177 1.214 msaitoh /* Restore */ 2178 1.214 msaitoh CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 2179 1.214 msaitoh } 2180 1.214 msaitoh 2181 1.214 msaitoh reg = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK); 2182 1.214 msaitoh reg &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK; 2183 1.214 msaitoh reg |= BGE_CPMU_LSPD_10MB_MACCLK_6_25; 2184 1.214 msaitoh CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, reg); 2185 1.214 msaitoh } 2186 1.214 msaitoh 2187 1.158 msaitoh /* Set up the PCI DMA control register. */ 2188 1.166 msaitoh dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD; 2189 1.261 msaitoh if (sc->bge_flags & BGEF_PCIE) { 2190 1.166 msaitoh /* Read watermark not used, 128 bytes for write. */ 2191 1.158 msaitoh DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n", 2192 1.158 msaitoh device_xname(sc->bge_dev))); 2193 1.253 msaitoh if (sc->bge_mps >= 256) 2194 1.253 msaitoh dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); 2195 1.253 msaitoh else 2196 1.253 msaitoh dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 2197 1.261 msaitoh } else if (sc->bge_flags & BGEF_PCIX) { 2198 1.330 msaitoh DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", 2199 1.158 msaitoh device_xname(sc->bge_dev))); 2200 1.158 msaitoh /* PCI-X bus */ 2201 1.172 msaitoh if (BGE_IS_5714_FAMILY(sc)) { 2202 1.172 msaitoh /* 256 bytes for read and write. */ 2203 1.204 msaitoh dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) | 2204 1.204 msaitoh BGE_PCIDMARWCTL_WR_WAT_SHIFT(2); 2205 1.172 msaitoh 2206 1.172 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780) 2207 1.172 msaitoh dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 2208 1.172 msaitoh else 2209 1.172 msaitoh dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; 2210 1.276 msaitoh } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) { 2211 1.276 msaitoh /* 2212 1.276 msaitoh * In the BCM5703, the DMA read watermark should 2213 1.276 msaitoh * be set to less than or equal to the maximum 2214 1.276 msaitoh * memory read byte count of the PCI-X command 2215 1.276 msaitoh * register. 2216 1.276 msaitoh */ 2217 1.276 msaitoh dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) | 2218 1.276 msaitoh BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 2219 1.172 msaitoh } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 2220 1.172 msaitoh /* 1536 bytes for read, 384 bytes for write. */ 2221 1.204 msaitoh dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 2222 1.204 msaitoh BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 2223 1.172 msaitoh } else { 2224 1.172 msaitoh /* 384 bytes for read and write. */ 2225 1.204 msaitoh dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) | 2226 1.204 msaitoh BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) | 2227 1.172 msaitoh (0x0F); 2228 1.172 msaitoh } 2229 1.172 msaitoh 2230 1.172 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 2231 1.172 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 2232 1.172 msaitoh uint32_t tmp; 2233 1.172 msaitoh 2234 1.172 msaitoh /* Set ONEDMA_ATONCE for hardware workaround. */ 2235 1.226 msaitoh tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; 2236 1.172 msaitoh if (tmp == 6 || tmp == 7) 2237 1.172 msaitoh dma_rw_ctl |= 2238 1.172 msaitoh BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 2239 1.172 msaitoh 2240 1.172 msaitoh /* Set PCI-X DMA write workaround. */ 2241 1.172 msaitoh dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; 2242 1.158 msaitoh } 2243 1.158 msaitoh } else { 2244 1.172 msaitoh /* Conventional PCI bus: 256 bytes for read and write. */ 2245 1.330 msaitoh DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", 2246 1.158 msaitoh device_xname(sc->bge_dev))); 2247 1.204 msaitoh dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 2248 1.204 msaitoh BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); 2249 1.204 msaitoh 2250 1.160 msaitoh if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 && 2251 1.160 msaitoh BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750) 2252 1.158 msaitoh dma_rw_ctl |= 0x0F; 2253 1.158 msaitoh } 2254 1.157 msaitoh 2255 1.161 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 2256 1.161 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) 2257 1.161 msaitoh dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | 2258 1.161 msaitoh BGE_PCIDMARWCTL_ASRT_ALL_BE; 2259 1.178 msaitoh 2260 1.161 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 2261 1.161 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 2262 1.161 msaitoh dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 2263 1.161 msaitoh 2264 1.257 msaitoh if (BGE_IS_57765_PLUS(sc)) { 2265 1.214 msaitoh dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT; 2266 1.214 msaitoh if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) 2267 1.214 msaitoh dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK; 2268 1.214 msaitoh 2269 1.214 msaitoh /* 2270 1.214 msaitoh * Enable HW workaround for controllers that misinterpret 2271 1.214 msaitoh * a status tag update and leave interrupts permanently 2272 1.214 msaitoh * disabled. 2273 1.214 msaitoh */ 2274 1.257 msaitoh if (!BGE_IS_57765_FAMILY(sc) && 2275 1.327 msaitoh BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 && 2276 1.327 msaitoh BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5762) 2277 1.214 msaitoh dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA; 2278 1.214 msaitoh } 2279 1.214 msaitoh 2280 1.177 msaitoh pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, 2281 1.177 msaitoh dma_rw_ctl); 2282 1.120 tsutsui 2283 1.158 msaitoh /* 2284 1.158 msaitoh * Set up general mode register. 2285 1.158 msaitoh */ 2286 1.216 msaitoh mode_ctl = BGE_DMA_SWAP_OPTIONS; 2287 1.327 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 2288 1.327 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2289 1.216 msaitoh /* Retain Host-2-BMC settings written by APE firmware. */ 2290 1.216 msaitoh mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) & 2291 1.216 msaitoh (BGE_MODECTL_BYTESWAP_B2HRX_DATA | 2292 1.216 msaitoh BGE_MODECTL_WORDSWAP_B2HRX_DATA | 2293 1.216 msaitoh BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE); 2294 1.216 msaitoh } 2295 1.216 msaitoh mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | 2296 1.216 msaitoh BGE_MODECTL_TX_NO_PHDR_CSUM; 2297 1.16 thorpej 2298 1.158 msaitoh /* 2299 1.172 msaitoh * BCM5701 B5 have a bug causing data corruption when using 2300 1.172 msaitoh * 64-bit DMA reads, which can be terminated early and then 2301 1.172 msaitoh * completed later as 32-bit accesses, in combination with 2302 1.172 msaitoh * certain bridges. 2303 1.172 msaitoh */ 2304 1.172 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && 2305 1.172 msaitoh sc->bge_chipid == BGE_CHIPID_BCM5701_B5) 2306 1.216 msaitoh mode_ctl |= BGE_MODECTL_FORCE_PCI32; 2307 1.172 msaitoh 2308 1.172 msaitoh /* 2309 1.177 msaitoh * Tell the firmware the driver is running 2310 1.177 msaitoh */ 2311 1.177 msaitoh if (sc->bge_asf_mode & ASF_STACKUP) 2312 1.216 msaitoh mode_ctl |= BGE_MODECTL_STACKUP; 2313 1.216 msaitoh 2314 1.216 msaitoh CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 2315 1.177 msaitoh 2316 1.177 msaitoh /* 2317 1.158 msaitoh * Disable memory write invalidate. Apparently it is not supported 2318 1.158 msaitoh * properly by these devices. 2319 1.158 msaitoh */ 2320 1.172 msaitoh PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, 2321 1.172 msaitoh PCI_COMMAND_INVALIDATE_ENABLE); 2322 1.16 thorpej 2323 1.158 msaitoh #ifdef __brokenalpha__ 2324 1.158 msaitoh /* 2325 1.158 msaitoh * Must insure that we do not cross an 8K (bytes) boundary 2326 1.158 msaitoh * for DMA reads. Our highest limit is 1K bytes. This is a 2327 1.158 msaitoh * restriction on some ALPHA platforms with early revision 2328 1.158 msaitoh * 21174 PCI chipsets, such as the AlphaPC 164lx 2329 1.158 msaitoh */ 2330 1.158 msaitoh PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 2331 1.158 msaitoh #endif 2332 1.16 thorpej 2333 1.158 msaitoh /* Set the timer prescaler (always 66MHz) */ 2334 1.341 msaitoh CSR_WRITE_4_FLUSH(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); 2335 1.16 thorpej 2336 1.159 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 2337 1.159 msaitoh DELAY(40); /* XXX */ 2338 1.159 msaitoh 2339 1.159 msaitoh /* Put PHY into ready state */ 2340 1.211 msaitoh BGE_CLRBIT_FLUSH(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); 2341 1.159 msaitoh DELAY(40); 2342 1.159 msaitoh } 2343 1.159 msaitoh 2344 1.170 msaitoh return 0; 2345 1.158 msaitoh } 2346 1.16 thorpej 2347 1.158 msaitoh static int 2348 1.158 msaitoh bge_blockinit(struct bge_softc *sc) 2349 1.158 msaitoh { 2350 1.177 msaitoh volatile struct bge_rcb *rcb; 2351 1.177 msaitoh bus_size_t rcb_addr; 2352 1.354 skrll struct ifnet * const ifp = &sc->ethercom.ec_if; 2353 1.177 msaitoh bge_hostaddr taddr; 2354 1.327 msaitoh uint32_t dmactl, rdmareg, mimode, val; 2355 1.222 msaitoh int i, limit; 2356 1.16 thorpej 2357 1.158 msaitoh /* 2358 1.158 msaitoh * Initialize the memory window pointer register so that 2359 1.158 msaitoh * we can access the first 32K of internal NIC RAM. This will 2360 1.158 msaitoh * allow us to set up the TX send ring RCBs and the RX return 2361 1.158 msaitoh * ring RCBs, plus other things which live in NIC memory. 2362 1.158 msaitoh */ 2363 1.158 msaitoh pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); 2364 1.120 tsutsui 2365 1.216 msaitoh if (!BGE_IS_5705_PLUS(sc)) { 2366 1.236 msaitoh /* 57XX step 33 */ 2367 1.236 msaitoh /* Configure mbuf memory pool */ 2368 1.332 msaitoh CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); 2369 1.172 msaitoh 2370 1.172 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 2371 1.172 msaitoh CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 2372 1.172 msaitoh else 2373 1.172 msaitoh CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 2374 1.40 fvdl 2375 1.236 msaitoh /* 57XX step 34 */ 2376 1.158 msaitoh /* Configure DMA resource pool */ 2377 1.158 msaitoh CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 2378 1.158 msaitoh BGE_DMA_DESCRIPTORS); 2379 1.158 msaitoh CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 2380 1.158 msaitoh } 2381 1.40 fvdl 2382 1.236 msaitoh /* 5718 step 11, 57XX step 35 */ 2383 1.236 msaitoh /* 2384 1.236 msaitoh * Configure mbuf pool watermarks. New broadcom docs strongly 2385 1.236 msaitoh * recommend these. 2386 1.236 msaitoh */ 2387 1.216 msaitoh if (BGE_IS_5717_PLUS(sc)) { 2388 1.202 tsutsui CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 2389 1.316 bouyer if (ifp->if_mtu > ETHERMTU) { 2390 1.316 bouyer CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e); 2391 1.316 bouyer CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea); 2392 1.316 bouyer } else { 2393 1.316 bouyer CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a); 2394 1.316 bouyer CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0); 2395 1.316 bouyer } 2396 1.202 tsutsui } else if (BGE_IS_5705_PLUS(sc)) { 2397 1.202 tsutsui CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 2398 1.202 tsutsui 2399 1.202 tsutsui if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 2400 1.202 tsutsui CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); 2401 1.202 tsutsui CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); 2402 1.202 tsutsui } else { 2403 1.202 tsutsui CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 2404 1.202 tsutsui CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 2405 1.202 tsutsui } 2406 1.158 msaitoh } else { 2407 1.218 msaitoh CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 2408 1.218 msaitoh CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 2409 1.158 msaitoh CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 2410 1.158 msaitoh } 2411 1.25 jonathan 2412 1.236 msaitoh /* 57XX step 36 */ 2413 1.236 msaitoh /* Configure DMA resource watermarks */ 2414 1.158 msaitoh CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 2415 1.158 msaitoh CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 2416 1.51 fvdl 2417 1.236 msaitoh /* 5718 step 13, 57XX step 38 */ 2418 1.236 msaitoh /* Enable buffer manager */ 2419 1.216 msaitoh val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_ATTN; 2420 1.216 msaitoh /* 2421 1.216 msaitoh * Change the arbitration algorithm of TXMBUF read request to 2422 1.216 msaitoh * round-robin instead of priority based for BCM5719. When 2423 1.216 msaitoh * TXFIFO is almost empty, RDMA will hold its request until 2424 1.216 msaitoh * TXFIFO is not almost empty. 2425 1.216 msaitoh */ 2426 1.216 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) 2427 1.216 msaitoh val |= BGE_BMANMODE_NO_TX_UNDERRUN; 2428 1.216 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2429 1.216 msaitoh sc->bge_chipid == BGE_CHIPID_BCM5719_A0 || 2430 1.216 msaitoh sc->bge_chipid == BGE_CHIPID_BCM5720_A0) 2431 1.216 msaitoh val |= BGE_BMANMODE_LOMBUF_ATTN; 2432 1.216 msaitoh CSR_WRITE_4(sc, BGE_BMAN_MODE, val); 2433 1.44 hannken 2434 1.236 msaitoh /* 57XX step 39 */ 2435 1.236 msaitoh /* Poll for buffer manager start indication */ 2436 1.172 msaitoh for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2437 1.216 msaitoh DELAY(10); 2438 1.172 msaitoh if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 2439 1.172 msaitoh break; 2440 1.172 msaitoh } 2441 1.51 fvdl 2442 1.172 msaitoh if (i == BGE_TIMEOUT * 2) { 2443 1.172 msaitoh aprint_error_dev(sc->bge_dev, 2444 1.172 msaitoh "buffer manager failed to start\n"); 2445 1.172 msaitoh return ENXIO; 2446 1.158 msaitoh } 2447 1.51 fvdl 2448 1.236 msaitoh /* 57XX step 40 */ 2449 1.236 msaitoh /* Enable flow-through queues */ 2450 1.158 msaitoh CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 2451 1.158 msaitoh CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 2452 1.76 cube 2453 1.158 msaitoh /* Wait until queue initialization is complete */ 2454 1.172 msaitoh for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2455 1.158 msaitoh if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 2456 1.158 msaitoh break; 2457 1.158 msaitoh DELAY(10); 2458 1.158 msaitoh } 2459 1.76 cube 2460 1.172 msaitoh if (i == BGE_TIMEOUT * 2) { 2461 1.158 msaitoh aprint_error_dev(sc->bge_dev, 2462 1.158 msaitoh "flow-through queue init failed\n"); 2463 1.170 msaitoh return ENXIO; 2464 1.158 msaitoh } 2465 1.92 gavan 2466 1.222 msaitoh /* 2467 1.222 msaitoh * Summary of rings supported by the controller: 2468 1.222 msaitoh * 2469 1.222 msaitoh * Standard Receive Producer Ring 2470 1.222 msaitoh * - This ring is used to feed receive buffers for "standard" 2471 1.222 msaitoh * sized frames (typically 1536 bytes) to the controller. 2472 1.222 msaitoh * 2473 1.222 msaitoh * Jumbo Receive Producer Ring 2474 1.222 msaitoh * - This ring is used to feed receive buffers for jumbo sized 2475 1.222 msaitoh * frames (i.e. anything bigger than the "standard" frames) 2476 1.222 msaitoh * to the controller. 2477 1.222 msaitoh * 2478 1.222 msaitoh * Mini Receive Producer Ring 2479 1.222 msaitoh * - This ring is used to feed receive buffers for "mini" 2480 1.222 msaitoh * sized frames to the controller. 2481 1.222 msaitoh * - This feature required external memory for the controller 2482 1.222 msaitoh * but was never used in a production system. Should always 2483 1.222 msaitoh * be disabled. 2484 1.222 msaitoh * 2485 1.222 msaitoh * Receive Return Ring 2486 1.222 msaitoh * - After the controller has placed an incoming frame into a 2487 1.222 msaitoh * receive buffer that buffer is moved into a receive return 2488 1.222 msaitoh * ring. The driver is then responsible to passing the 2489 1.222 msaitoh * buffer up to the stack. Many versions of the controller 2490 1.222 msaitoh * support multiple RR rings. 2491 1.222 msaitoh * 2492 1.222 msaitoh * Send Ring 2493 1.222 msaitoh * - This ring is used for outgoing frames. Many versions of 2494 1.222 msaitoh * the controller support multiple send rings. 2495 1.222 msaitoh */ 2496 1.222 msaitoh 2497 1.236 msaitoh /* 5718 step 15, 57XX step 41 */ 2498 1.236 msaitoh /* Initialize the standard RX ring control block */ 2499 1.158 msaitoh rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 2500 1.172 msaitoh BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 2501 1.236 msaitoh /* 5718 step 16 */ 2502 1.257 msaitoh if (BGE_IS_57765_PLUS(sc)) { 2503 1.222 msaitoh /* 2504 1.222 msaitoh * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) 2505 1.222 msaitoh * Bits 15-2 : Maximum RX frame size 2506 1.309 snj * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 2507 1.222 msaitoh * Bit 0 : Reserved 2508 1.222 msaitoh */ 2509 1.202 tsutsui rcb->bge_maxlen_flags = 2510 1.202 tsutsui BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2); 2511 1.222 msaitoh } else if (BGE_IS_5705_PLUS(sc)) { 2512 1.222 msaitoh /* 2513 1.222 msaitoh * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) 2514 1.222 msaitoh * Bits 15-2 : Reserved (should be 0) 2515 1.222 msaitoh * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 2516 1.222 msaitoh * Bit 0 : Reserved 2517 1.222 msaitoh */ 2518 1.158 msaitoh rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 2519 1.222 msaitoh } else { 2520 1.222 msaitoh /* 2521 1.222 msaitoh * Ring size is always XXX entries 2522 1.222 msaitoh * Bits 31-16: Maximum RX frame size 2523 1.222 msaitoh * Bits 15-2 : Reserved (should be 0) 2524 1.222 msaitoh * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 2525 1.222 msaitoh * Bit 0 : Reserved 2526 1.222 msaitoh */ 2527 1.158 msaitoh rcb->bge_maxlen_flags = 2528 1.158 msaitoh BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 2529 1.222 msaitoh } 2530 1.216 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2531 1.216 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2532 1.216 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2533 1.216 msaitoh rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717; 2534 1.216 msaitoh else 2535 1.216 msaitoh rcb->bge_nicaddr = BGE_STD_RX_RINGS; 2536 1.222 msaitoh /* Write the standard receive producer ring control block. */ 2537 1.158 msaitoh CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 2538 1.158 msaitoh CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 2539 1.158 msaitoh CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 2540 1.158 msaitoh CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 2541 1.119 tsutsui 2542 1.222 msaitoh /* Reset the standard receive producer ring producer index. */ 2543 1.222 msaitoh bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 2544 1.222 msaitoh 2545 1.236 msaitoh /* 57XX step 42 */ 2546 1.158 msaitoh /* 2547 1.236 msaitoh * Initialize the jumbo RX ring control block 2548 1.158 msaitoh * We set the 'ring disabled' bit in the flags 2549 1.158 msaitoh * field until we're actually ready to start 2550 1.158 msaitoh * using this ring (i.e. once we set the MTU 2551 1.158 msaitoh * high enough to require it). 2552 1.158 msaitoh */ 2553 1.166 msaitoh if (BGE_IS_JUMBO_CAPABLE(sc)) { 2554 1.158 msaitoh rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 2555 1.172 msaitoh BGE_HOSTADDR(rcb->bge_hostaddr, 2556 1.158 msaitoh BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 2557 1.222 msaitoh rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 2558 1.222 msaitoh BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED); 2559 1.216 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2560 1.216 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2561 1.216 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2562 1.216 msaitoh rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717; 2563 1.216 msaitoh else 2564 1.216 msaitoh rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 2565 1.158 msaitoh CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 2566 1.158 msaitoh rcb->bge_hostaddr.bge_addr_hi); 2567 1.158 msaitoh CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 2568 1.158 msaitoh rcb->bge_hostaddr.bge_addr_lo); 2569 1.222 msaitoh /* Program the jumbo receive producer ring RCB parameters. */ 2570 1.158 msaitoh CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 2571 1.158 msaitoh rcb->bge_maxlen_flags); 2572 1.158 msaitoh CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 2573 1.216 msaitoh /* Reset the jumbo receive producer ring producer index. */ 2574 1.216 msaitoh bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 2575 1.216 msaitoh } 2576 1.149 sborrill 2577 1.236 msaitoh /* 57XX step 43 */ 2578 1.216 msaitoh /* Disable the mini receive producer ring RCB. */ 2579 1.216 msaitoh if (BGE_IS_5700_FAMILY(sc)) { 2580 1.158 msaitoh /* Set up dummy disabled mini ring RCB */ 2581 1.158 msaitoh rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 2582 1.222 msaitoh rcb->bge_maxlen_flags = 2583 1.222 msaitoh BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 2584 1.158 msaitoh CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 2585 1.158 msaitoh rcb->bge_maxlen_flags); 2586 1.216 msaitoh /* Reset the mini receive producer ring producer index. */ 2587 1.216 msaitoh bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 2588 1.133 markd 2589 1.158 msaitoh bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2590 1.158 msaitoh offsetof(struct bge_ring_data, bge_info), 2591 1.364 skrll sizeof(struct bge_gib), 2592 1.331 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2593 1.158 msaitoh } 2594 1.133 markd 2595 1.206 msaitoh /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */ 2596 1.206 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 2597 1.206 msaitoh if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 || 2598 1.206 msaitoh sc->bge_chipid == BGE_CHIPID_BCM5906_A1 || 2599 1.206 msaitoh sc->bge_chipid == BGE_CHIPID_BCM5906_A2) 2600 1.206 msaitoh CSR_WRITE_4(sc, BGE_ISO_PKT_TX, 2601 1.206 msaitoh (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2); 2602 1.206 msaitoh } 2603 1.236 msaitoh /* 5718 step 14, 57XX step 44 */ 2604 1.158 msaitoh /* 2605 1.222 msaitoh * The BD ring replenish thresholds control how often the 2606 1.222 msaitoh * hardware fetches new BD's from the producer rings in host 2607 1.222 msaitoh * memory. Setting the value too low on a busy system can 2608 1.390 andvar * starve the hardware and reduce the throughput. 2609 1.222 msaitoh * 2610 1.158 msaitoh * Set the BD ring replenish thresholds. The recommended 2611 1.158 msaitoh * values are 1/8th the number of descriptors allocated to 2612 1.222 msaitoh * each ring, but since we try to avoid filling the entire 2613 1.222 msaitoh * ring we set these to the minimal value of 8. This needs to 2614 1.222 msaitoh * be done on several of the supported chip revisions anyway, 2615 1.222 msaitoh * to work around HW bugs. 2616 1.158 msaitoh */ 2617 1.222 msaitoh CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, 8); 2618 1.222 msaitoh if (BGE_IS_JUMBO_CAPABLE(sc)) 2619 1.222 msaitoh CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 8); 2620 1.157 msaitoh 2621 1.236 msaitoh /* 5718 step 18 */ 2622 1.216 msaitoh if (BGE_IS_5717_PLUS(sc)) { 2623 1.172 msaitoh CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4); 2624 1.172 msaitoh CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4); 2625 1.172 msaitoh } 2626 1.172 msaitoh 2627 1.236 msaitoh /* 57XX step 45 */ 2628 1.158 msaitoh /* 2629 1.222 msaitoh * Disable all send rings by setting the 'ring disabled' bit 2630 1.222 msaitoh * in the flags field of all the TX send ring control blocks, 2631 1.222 msaitoh * located in NIC memory. 2632 1.158 msaitoh */ 2633 1.222 msaitoh if (BGE_IS_5700_FAMILY(sc)) { 2634 1.222 msaitoh /* 5700 to 5704 had 16 send rings. */ 2635 1.222 msaitoh limit = BGE_TX_RINGS_EXTSSRAM_MAX; 2636 1.258 msaitoh } else if (BGE_IS_5717_PLUS(sc)) { 2637 1.258 msaitoh limit = BGE_TX_RINGS_5717_MAX; 2638 1.327 msaitoh } else if (BGE_IS_57765_FAMILY(sc) || 2639 1.327 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2640 1.258 msaitoh limit = BGE_TX_RINGS_57765_MAX; 2641 1.222 msaitoh } else 2642 1.222 msaitoh limit = 1; 2643 1.158 msaitoh rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2644 1.222 msaitoh for (i = 0; i < limit; i++) { 2645 1.158 msaitoh RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2646 1.158 msaitoh BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 2647 1.158 msaitoh RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 2648 1.158 msaitoh rcb_addr += sizeof(struct bge_rcb); 2649 1.158 msaitoh } 2650 1.157 msaitoh 2651 1.236 msaitoh /* 57XX step 46 and 47 */ 2652 1.222 msaitoh /* Configure send ring RCB 0 (we use only the first ring) */ 2653 1.158 msaitoh rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2654 1.172 msaitoh BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 2655 1.158 msaitoh RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2656 1.158 msaitoh RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2657 1.216 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2658 1.216 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2659 1.216 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2660 1.216 msaitoh RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, BGE_SEND_RING_5717); 2661 1.216 msaitoh else 2662 1.216 msaitoh RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 2663 1.158 msaitoh BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 2664 1.222 msaitoh RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2665 1.222 msaitoh BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 2666 1.157 msaitoh 2667 1.236 msaitoh /* 57XX step 48 */ 2668 1.222 msaitoh /* 2669 1.222 msaitoh * Disable all receive return rings by setting the 2670 1.389 andvar * 'ring disabled' bit in the flags field of all the receive 2671 1.222 msaitoh * return ring control blocks, located in NIC memory. 2672 1.222 msaitoh */ 2673 1.257 msaitoh if (BGE_IS_5717_PLUS(sc)) { 2674 1.222 msaitoh /* Should be 17, use 16 until we get an SRAM map. */ 2675 1.222 msaitoh limit = 16; 2676 1.222 msaitoh } else if (BGE_IS_5700_FAMILY(sc)) 2677 1.222 msaitoh limit = BGE_RX_RINGS_MAX; 2678 1.222 msaitoh else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 2679 1.327 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762 || 2680 1.257 msaitoh BGE_IS_57765_FAMILY(sc)) 2681 1.222 msaitoh limit = 4; 2682 1.222 msaitoh else 2683 1.222 msaitoh limit = 1; 2684 1.222 msaitoh /* Disable all receive return rings */ 2685 1.158 msaitoh rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2686 1.222 msaitoh for (i = 0; i < limit; i++) { 2687 1.158 msaitoh RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 2688 1.158 msaitoh RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 2689 1.158 msaitoh RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2690 1.172 msaitoh BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 2691 1.172 msaitoh BGE_RCB_FLAG_RING_DISABLED)); 2692 1.158 msaitoh RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 2693 1.158 msaitoh bge_writembx(sc, BGE_MBX_RX_CONS0_LO + 2694 1.170 msaitoh (i * (sizeof(uint64_t))), 0); 2695 1.158 msaitoh rcb_addr += sizeof(struct bge_rcb); 2696 1.158 msaitoh } 2697 1.157 msaitoh 2698 1.236 msaitoh /* 57XX step 49 */ 2699 1.158 msaitoh /* 2700 1.222 msaitoh * Set up receive return ring 0. Note that the NIC address 2701 1.222 msaitoh * for RX return rings is 0x0. The return rings live entirely 2702 1.222 msaitoh * within the host, so the nicaddr field in the RCB isn't used. 2703 1.158 msaitoh */ 2704 1.158 msaitoh rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2705 1.172 msaitoh BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 2706 1.158 msaitoh RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2707 1.158 msaitoh RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2708 1.158 msaitoh RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 2709 1.158 msaitoh RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2710 1.158 msaitoh BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 2711 1.157 msaitoh 2712 1.236 msaitoh /* 5718 step 24, 57XX step 53 */ 2713 1.158 msaitoh /* Set random backoff seed for TX */ 2714 1.158 msaitoh CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 2715 1.235 msaitoh (CLLADDR(ifp->if_sadl)[0] + CLLADDR(ifp->if_sadl)[1] + 2716 1.235 msaitoh CLLADDR(ifp->if_sadl)[2] + CLLADDR(ifp->if_sadl)[3] + 2717 1.235 msaitoh CLLADDR(ifp->if_sadl)[4] + CLLADDR(ifp->if_sadl)[5]) & 2718 1.158 msaitoh BGE_TX_BACKOFF_SEED_MASK); 2719 1.157 msaitoh 2720 1.236 msaitoh /* 5718 step 26, 57XX step 55 */ 2721 1.158 msaitoh /* Set inter-packet gap */ 2722 1.216 msaitoh val = 0x2620; 2723 1.327 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 2724 1.327 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) 2725 1.216 msaitoh val |= CSR_READ_4(sc, BGE_TX_LENGTHS) & 2726 1.216 msaitoh (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK); 2727 1.216 msaitoh CSR_WRITE_4(sc, BGE_TX_LENGTHS, val); 2728 1.51 fvdl 2729 1.236 msaitoh /* 5718 step 27, 57XX step 56 */ 2730 1.158 msaitoh /* 2731 1.158 msaitoh * Specify which ring to use for packets that don't match 2732 1.158 msaitoh * any RX rules. 2733 1.158 msaitoh */ 2734 1.158 msaitoh CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 2735 1.157 msaitoh 2736 1.236 msaitoh /* 5718 step 28, 57XX step 57 */ 2737 1.158 msaitoh /* 2738 1.158 msaitoh * Configure number of RX lists. One interrupt distribution 2739 1.158 msaitoh * list, sixteen active lists, one bad frames class. 2740 1.158 msaitoh */ 2741 1.158 msaitoh CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 2742 1.157 msaitoh 2743 1.236 msaitoh /* 5718 step 29, 57XX step 58 */ 2744 1.390 andvar /* Initialize RX list placement stats mask. */ 2745 1.244 msaitoh if (BGE_IS_575X_PLUS(sc)) { 2746 1.244 msaitoh val = CSR_READ_4(sc, BGE_RXLP_STATS_ENABLE_MASK); 2747 1.244 msaitoh val &= ~BGE_RXLPSTATCONTROL_DACK_FIX; 2748 1.244 msaitoh CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, val); 2749 1.244 msaitoh } else 2750 1.244 msaitoh CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 2751 1.244 msaitoh 2752 1.236 msaitoh /* 5718 step 30, 57XX step 59 */ 2753 1.158 msaitoh CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 2754 1.157 msaitoh 2755 1.236 msaitoh /* 5718 step 33, 57XX step 62 */ 2756 1.158 msaitoh /* Disable host coalescing until we get it set up */ 2757 1.158 msaitoh CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 2758 1.51 fvdl 2759 1.236 msaitoh /* 5718 step 34, 57XX step 63 */ 2760 1.158 msaitoh /* Poll to make sure it's shut down. */ 2761 1.172 msaitoh for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2762 1.216 msaitoh DELAY(10); 2763 1.158 msaitoh if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 2764 1.158 msaitoh break; 2765 1.158 msaitoh } 2766 1.151 cegger 2767 1.172 msaitoh if (i == BGE_TIMEOUT * 2) { 2768 1.158 msaitoh aprint_error_dev(sc->bge_dev, 2769 1.158 msaitoh "host coalescing engine failed to idle\n"); 2770 1.170 msaitoh return ENXIO; 2771 1.158 msaitoh } 2772 1.51 fvdl 2773 1.236 msaitoh /* 5718 step 35, 36, 37 */ 2774 1.158 msaitoh /* Set up host coalescing defaults */ 2775 1.394 skrll mutex_enter(sc->sc_intr_lock); 2776 1.394 skrll const uint32_t rx_coal_ticks = sc->bge_rx_coal_ticks; 2777 1.394 skrll const uint32_t tx_coal_ticks = sc->bge_tx_coal_ticks; 2778 1.394 skrll const uint32_t rx_max_coal_bds = sc->bge_rx_max_coal_bds; 2779 1.394 skrll const uint32_t tx_max_coal_bds = sc->bge_tx_max_coal_bds; 2780 1.394 skrll mutex_exit(sc->sc_intr_lock); 2781 1.394 skrll CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_coal_ticks); 2782 1.394 skrll CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, tx_coal_ticks); 2783 1.394 skrll CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_max_coal_bds); 2784 1.394 skrll CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, tx_max_coal_bds); 2785 1.216 msaitoh if (!(BGE_IS_5705_PLUS(sc))) { 2786 1.158 msaitoh CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 2787 1.158 msaitoh CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 2788 1.51 fvdl } 2789 1.158 msaitoh CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 2790 1.158 msaitoh CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 2791 1.51 fvdl 2792 1.158 msaitoh /* Set up address of statistics block */ 2793 1.172 msaitoh if (BGE_IS_5700_FAMILY(sc)) { 2794 1.172 msaitoh BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 2795 1.158 msaitoh CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 2796 1.158 msaitoh CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 2797 1.158 msaitoh CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 2798 1.158 msaitoh CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 2799 1.16 thorpej } 2800 1.16 thorpej 2801 1.236 msaitoh /* 5718 step 38 */ 2802 1.158 msaitoh /* Set up address of status block */ 2803 1.172 msaitoh BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 2804 1.158 msaitoh CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 2805 1.158 msaitoh CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 2806 1.158 msaitoh CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 2807 1.158 msaitoh sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 2808 1.158 msaitoh sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 2809 1.16 thorpej 2810 1.216 msaitoh /* Set up status block size. */ 2811 1.216 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 && 2812 1.216 msaitoh sc->bge_chipid != BGE_CHIPID_BCM5700_C0) { 2813 1.216 msaitoh val = BGE_STATBLKSZ_FULL; 2814 1.216 msaitoh bzero(&sc->bge_rdata->bge_status_block, BGE_STATUS_BLK_SZ); 2815 1.216 msaitoh } else { 2816 1.216 msaitoh val = BGE_STATBLKSZ_32BYTE; 2817 1.216 msaitoh bzero(&sc->bge_rdata->bge_status_block, 32); 2818 1.216 msaitoh } 2819 1.216 msaitoh 2820 1.236 msaitoh /* 5718 step 39, 57XX step 73 */ 2821 1.158 msaitoh /* Turn on host coalescing state machine */ 2822 1.216 msaitoh CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); 2823 1.7 thorpej 2824 1.236 msaitoh /* 5718 step 40, 57XX step 74 */ 2825 1.158 msaitoh /* Turn on RX BD completion state machine and enable attentions */ 2826 1.158 msaitoh CSR_WRITE_4(sc, BGE_RBDC_MODE, 2827 1.161 msaitoh BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN); 2828 1.7 thorpej 2829 1.236 msaitoh /* 5718 step 41, 57XX step 75 */ 2830 1.158 msaitoh /* Turn on RX list placement state machine */ 2831 1.158 msaitoh CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 2832 1.51 fvdl 2833 1.236 msaitoh /* 57XX step 76 */ 2834 1.158 msaitoh /* Turn on RX list selector state machine. */ 2835 1.216 msaitoh if (!(BGE_IS_5705_PLUS(sc))) 2836 1.158 msaitoh CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 2837 1.51 fvdl 2838 1.161 msaitoh val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | 2839 1.161 msaitoh BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | 2840 1.161 msaitoh BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | 2841 1.161 msaitoh BGE_MACMODE_FRMHDR_DMA_ENB; 2842 1.161 msaitoh 2843 1.261 msaitoh if (sc->bge_flags & BGEF_FIBER_TBI) 2844 1.177 msaitoh val |= BGE_PORTMODE_TBI; 2845 1.261 msaitoh else if (sc->bge_flags & BGEF_FIBER_MII) 2846 1.177 msaitoh val |= BGE_PORTMODE_GMII; 2847 1.161 msaitoh else 2848 1.177 msaitoh val |= BGE_PORTMODE_MII; 2849 1.161 msaitoh 2850 1.236 msaitoh /* 5718 step 42 and 43, 57XX step 77 and 78 */ 2851 1.216 msaitoh /* Allow APE to send/receive frames. */ 2852 1.216 msaitoh if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 2853 1.216 msaitoh val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 2854 1.216 msaitoh 2855 1.158 msaitoh /* Turn on DMA, clear stats */ 2856 1.211 msaitoh CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, val); 2857 1.236 msaitoh /* 5718 step 44 */ 2858 1.211 msaitoh DELAY(40); 2859 1.161 msaitoh 2860 1.236 msaitoh /* 5718 step 45, 57XX step 79 */ 2861 1.158 msaitoh /* Set misc. local control, enable interrupts on attentions */ 2862 1.251 msaitoh BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 2863 1.224 msaitoh if (BGE_IS_5717_PLUS(sc)) { 2864 1.224 msaitoh CSR_READ_4(sc, BGE_MISC_LOCAL_CTL); /* Flush */ 2865 1.236 msaitoh /* 5718 step 46 */ 2866 1.224 msaitoh DELAY(100); 2867 1.224 msaitoh } 2868 1.80 fredb 2869 1.236 msaitoh /* 57XX step 81 */ 2870 1.158 msaitoh /* Turn on DMA completion state machine */ 2871 1.216 msaitoh if (!(BGE_IS_5705_PLUS(sc))) 2872 1.158 msaitoh CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 2873 1.149 sborrill 2874 1.236 msaitoh /* 5718 step 47, 57XX step 82 */ 2875 1.203 msaitoh val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS; 2876 1.203 msaitoh 2877 1.236 msaitoh /* 5718 step 48 */ 2878 1.216 msaitoh /* Enable host coalescing bug fix. */ 2879 1.203 msaitoh if (BGE_IS_5755_PLUS(sc)) 2880 1.203 msaitoh val |= BGE_WDMAMODE_STATUS_TAG_FIX; 2881 1.203 msaitoh 2882 1.206 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785) 2883 1.206 msaitoh val |= BGE_WDMAMODE_BURST_ALL_DATA; 2884 1.206 msaitoh 2885 1.158 msaitoh /* Turn on write DMA state machine */ 2886 1.213 msaitoh CSR_WRITE_4_FLUSH(sc, BGE_WDMA_MODE, val); 2887 1.236 msaitoh /* 5718 step 49 */ 2888 1.213 msaitoh DELAY(40); 2889 1.203 msaitoh 2890 1.203 msaitoh val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 2891 1.216 msaitoh 2892 1.216 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717) 2893 1.216 msaitoh val |= BGE_RDMAMODE_MULT_DMA_RD_DIS; 2894 1.216 msaitoh 2895 1.203 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2896 1.203 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 2897 1.203 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) 2898 1.203 msaitoh val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 2899 1.203 msaitoh BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 2900 1.203 msaitoh BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 2901 1.76 cube 2902 1.261 msaitoh if (sc->bge_flags & BGEF_PCIE) 2903 1.204 msaitoh val |= BGE_RDMAMODE_FIFO_LONG_BURST; 2904 1.258 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) { 2905 1.258 msaitoh if (ifp->if_mtu <= ETHERMTU) 2906 1.258 msaitoh val |= BGE_RDMAMODE_JMB_2K_MMRR; 2907 1.258 msaitoh } 2908 1.316 bouyer if (sc->bge_flags & BGEF_TSO) { 2909 1.203 msaitoh val |= BGE_RDMAMODE_TSO4_ENABLE; 2910 1.316 bouyer if (BGE_IS_5717_PLUS(sc)) 2911 1.316 bouyer val |= BGE_RDMAMODE_TSO6_ENABLE; 2912 1.316 bouyer } 2913 1.76 cube 2914 1.327 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 2915 1.327 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2916 1.216 msaitoh val |= CSR_READ_4(sc, BGE_RDMA_MODE) & 2917 1.216 msaitoh BGE_RDMAMODE_H2BNC_VLAN_DET; 2918 1.216 msaitoh /* 2919 1.216 msaitoh * Allow multiple outstanding read requests from 2920 1.216 msaitoh * non-LSO read DMA engine. 2921 1.216 msaitoh */ 2922 1.216 msaitoh val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS; 2923 1.216 msaitoh } 2924 1.216 msaitoh 2925 1.216 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 2926 1.216 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2927 1.216 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 2928 1.216 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780 || 2929 1.257 msaitoh BGE_IS_57765_PLUS(sc)) { 2930 1.327 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) 2931 1.327 msaitoh rdmareg = BGE_RDMA_RSRVCTRL_REG2; 2932 1.327 msaitoh else 2933 1.327 msaitoh rdmareg = BGE_RDMA_RSRVCTRL; 2934 1.327 msaitoh dmactl = CSR_READ_4(sc, rdmareg); 2935 1.216 msaitoh /* 2936 1.216 msaitoh * Adjust tx margin to prevent TX data corruption and 2937 1.216 msaitoh * fix internal FIFO overflow. 2938 1.216 msaitoh */ 2939 1.327 msaitoh if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0 || 2940 1.327 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2941 1.216 msaitoh dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK | 2942 1.216 msaitoh BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK | 2943 1.216 msaitoh BGE_RDMA_RSRVCTRL_TXMRGN_MASK); 2944 1.216 msaitoh dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 2945 1.216 msaitoh BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K | 2946 1.216 msaitoh BGE_RDMA_RSRVCTRL_TXMRGN_320B; 2947 1.216 msaitoh } 2948 1.216 msaitoh /* 2949 1.216 msaitoh * Enable fix for read DMA FIFO overruns. 2950 1.216 msaitoh * The fix is to limit the number of RX BDs 2951 1.349 andvar * the hardware would fetch at a time. 2952 1.216 msaitoh */ 2953 1.327 msaitoh CSR_WRITE_4(sc, rdmareg, dmactl | 2954 1.216 msaitoh BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 2955 1.216 msaitoh } 2956 1.216 msaitoh 2957 1.216 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) { 2958 1.216 msaitoh CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 2959 1.216 msaitoh CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 2960 1.216 msaitoh BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | 2961 1.216 msaitoh BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 2962 1.216 msaitoh } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { 2963 1.216 msaitoh /* 2964 1.216 msaitoh * Allow 4KB burst length reads for non-LSO frames. 2965 1.216 msaitoh * Enable 512B burst length reads for buffer descriptors. 2966 1.216 msaitoh */ 2967 1.216 msaitoh CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 2968 1.216 msaitoh CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 2969 1.216 msaitoh BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 | 2970 1.216 msaitoh BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 2971 1.327 msaitoh } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2972 1.327 msaitoh CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2, 2973 1.327 msaitoh CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) | 2974 1.327 msaitoh BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | 2975 1.327 msaitoh BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 2976 1.216 msaitoh } 2977 1.158 msaitoh /* Turn on read DMA state machine */ 2978 1.211 msaitoh CSR_WRITE_4_FLUSH(sc, BGE_RDMA_MODE, val); 2979 1.236 msaitoh /* 5718 step 52 */ 2980 1.203 msaitoh delay(40); 2981 1.128 tron 2982 1.327 msaitoh if (sc->bge_flags & BGEF_RDMA_BUG) { 2983 1.320 bouyer for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) { 2984 1.320 bouyer val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4); 2985 1.320 bouyer if ((val & 0xFFFF) > BGE_FRAMELEN) 2986 1.320 bouyer break; 2987 1.320 bouyer if (((val >> 16) & 0xFFFF) > BGE_FRAMELEN) 2988 1.320 bouyer break; 2989 1.320 bouyer } 2990 1.320 bouyer if (i != BGE_NUM_RDMA_CHANNELS / 2) { 2991 1.320 bouyer val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL); 2992 1.320 bouyer if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) 2993 1.320 bouyer val |= BGE_RDMA_TX_LENGTH_WA_5719; 2994 1.320 bouyer else 2995 1.320 bouyer val |= BGE_RDMA_TX_LENGTH_WA_5720; 2996 1.320 bouyer CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val); 2997 1.320 bouyer } 2998 1.320 bouyer } 2999 1.320 bouyer 3000 1.236 msaitoh /* 5718 step 56, 57XX step 84 */ 3001 1.158 msaitoh /* Turn on RX data completion state machine */ 3002 1.158 msaitoh CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 3003 1.128 tron 3004 1.158 msaitoh /* Turn on RX data and RX BD initiator state machine */ 3005 1.158 msaitoh CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 3006 1.133 markd 3007 1.236 msaitoh /* 57XX step 85 */ 3008 1.158 msaitoh /* Turn on Mbuf cluster free state machine */ 3009 1.216 msaitoh if (!BGE_IS_5705_PLUS(sc)) 3010 1.158 msaitoh CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 3011 1.133 markd 3012 1.236 msaitoh /* 5718 step 57, 57XX step 86 */ 3013 1.158 msaitoh /* Turn on send data completion state machine */ 3014 1.172 msaitoh val = BGE_SDCMODE_ENABLE; 3015 1.172 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 3016 1.172 msaitoh val |= BGE_SDCMODE_CDELAY; 3017 1.172 msaitoh CSR_WRITE_4(sc, BGE_SDC_MODE, val); 3018 1.106 jonathan 3019 1.236 msaitoh /* 5718 step 58 */ 3020 1.225 msaitoh /* Turn on send BD completion state machine */ 3021 1.225 msaitoh CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 3022 1.225 msaitoh 3023 1.236 msaitoh /* 57XX step 88 */ 3024 1.225 msaitoh /* Turn on RX BD initiator state machine */ 3025 1.225 msaitoh CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 3026 1.225 msaitoh 3027 1.236 msaitoh /* 5718 step 60, 57XX step 90 */ 3028 1.158 msaitoh /* Turn on send data initiator state machine */ 3029 1.261 msaitoh if (sc->bge_flags & BGEF_TSO) { 3030 1.158 msaitoh /* XXX: magic value from Linux driver */ 3031 1.222 msaitoh CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 3032 1.222 msaitoh BGE_SDIMODE_HW_LSO_PRE_DMA); 3033 1.177 msaitoh } else 3034 1.158 msaitoh CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 3035 1.106 jonathan 3036 1.236 msaitoh /* 5718 step 61, 57XX step 91 */ 3037 1.158 msaitoh /* Turn on send BD initiator state machine */ 3038 1.158 msaitoh CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 3039 1.133 markd 3040 1.236 msaitoh /* 5718 step 62, 57XX step 92 */ 3041 1.158 msaitoh /* Turn on send BD selector state machine */ 3042 1.158 msaitoh CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 3043 1.135 taca 3044 1.236 msaitoh /* 5718 step 31, 57XX step 60 */ 3045 1.158 msaitoh CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 3046 1.236 msaitoh /* 5718 step 32, 57XX step 61 */ 3047 1.158 msaitoh CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 3048 1.161 msaitoh BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER); 3049 1.133 markd 3050 1.158 msaitoh /* ack/clear link change events */ 3051 1.161 msaitoh CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 3052 1.161 msaitoh BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 3053 1.172 msaitoh BGE_MACSTAT_LINK_CHANGED); 3054 1.158 msaitoh CSR_WRITE_4(sc, BGE_MI_STS, 0); 3055 1.106 jonathan 3056 1.216 msaitoh /* 3057 1.216 msaitoh * Enable attention when the link has changed state for 3058 1.216 msaitoh * devices that use auto polling. 3059 1.216 msaitoh */ 3060 1.261 msaitoh if (sc->bge_flags & BGEF_FIBER_TBI) { 3061 1.158 msaitoh CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 3062 1.178 msaitoh } else { 3063 1.272 msaitoh if ((sc->bge_flags & BGEF_CPMU_PRESENT) != 0) 3064 1.272 msaitoh mimode = BGE_MIMODE_500KHZ_CONST; 3065 1.272 msaitoh else 3066 1.272 msaitoh mimode = BGE_MIMODE_BASE; 3067 1.272 msaitoh /* 5718 step 68. 5718 step 69 (optionally). */ 3068 1.272 msaitoh if (BGE_IS_5700_FAMILY(sc) || 3069 1.272 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705) { 3070 1.272 msaitoh mimode |= BGE_MIMODE_AUTOPOLL; 3071 1.272 msaitoh BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 3072 1.272 msaitoh } 3073 1.272 msaitoh mimode |= BGE_MIMODE_PHYADDR(sc->bge_phy_addr); 3074 1.272 msaitoh CSR_WRITE_4(sc, BGE_MI_MODE, mimode); 3075 1.158 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) 3076 1.158 msaitoh CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 3077 1.158 msaitoh BGE_EVTENB_MI_INTERRUPT); 3078 1.158 msaitoh } 3079 1.70 tron 3080 1.161 msaitoh /* 3081 1.161 msaitoh * Clear any pending link state attention. 3082 1.161 msaitoh * Otherwise some link state change events may be lost until attention 3083 1.161 msaitoh * is cleared by bge_intr() -> bge_link_upd() sequence. 3084 1.161 msaitoh * It's not necessary on newer BCM chips - perhaps enabling link 3085 1.161 msaitoh * state change attentions implies clearing pending attention. 3086 1.161 msaitoh */ 3087 1.161 msaitoh CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 3088 1.161 msaitoh BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 3089 1.161 msaitoh BGE_MACSTAT_LINK_CHANGED); 3090 1.161 msaitoh 3091 1.158 msaitoh /* Enable link state change attentions. */ 3092 1.158 msaitoh BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 3093 1.51 fvdl 3094 1.170 msaitoh return 0; 3095 1.158 msaitoh } 3096 1.7 thorpej 3097 1.158 msaitoh static const struct bge_revision * 3098 1.158 msaitoh bge_lookup_rev(uint32_t chipid) 3099 1.158 msaitoh { 3100 1.158 msaitoh const struct bge_revision *br; 3101 1.7 thorpej 3102 1.158 msaitoh for (br = bge_revisions; br->br_name != NULL; br++) { 3103 1.158 msaitoh if (br->br_chipid == chipid) 3104 1.170 msaitoh return br; 3105 1.158 msaitoh } 3106 1.151 cegger 3107 1.158 msaitoh for (br = bge_majorrevs; br->br_name != NULL; br++) { 3108 1.158 msaitoh if (br->br_chipid == BGE_ASICREV(chipid)) 3109 1.170 msaitoh return br; 3110 1.158 msaitoh } 3111 1.151 cegger 3112 1.170 msaitoh return NULL; 3113 1.158 msaitoh } 3114 1.7 thorpej 3115 1.7 thorpej static const struct bge_product * 3116 1.7 thorpej bge_lookup(const struct pci_attach_args *pa) 3117 1.7 thorpej { 3118 1.7 thorpej const struct bge_product *bp; 3119 1.7 thorpej 3120 1.7 thorpej for (bp = bge_products; bp->bp_name != NULL; bp++) { 3121 1.7 thorpej if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && 3122 1.7 thorpej PCI_PRODUCT(pa->pa_id) == bp->bp_product) 3123 1.170 msaitoh return bp; 3124 1.7 thorpej } 3125 1.7 thorpej 3126 1.170 msaitoh return NULL; 3127 1.7 thorpej } 3128 1.7 thorpej 3129 1.215 msaitoh static uint32_t 3130 1.215 msaitoh bge_chipid(const struct pci_attach_args *pa) 3131 1.215 msaitoh { 3132 1.215 msaitoh uint32_t id; 3133 1.215 msaitoh 3134 1.215 msaitoh id = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) 3135 1.215 msaitoh >> BGE_PCIMISCCTL_ASICREV_SHIFT; 3136 1.215 msaitoh 3137 1.215 msaitoh if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) { 3138 1.215 msaitoh switch (PCI_PRODUCT(pa->pa_id)) { 3139 1.215 msaitoh case PCI_PRODUCT_BROADCOM_BCM5717: 3140 1.215 msaitoh case PCI_PRODUCT_BROADCOM_BCM5718: 3141 1.216 msaitoh case PCI_PRODUCT_BROADCOM_BCM5719: 3142 1.216 msaitoh case PCI_PRODUCT_BROADCOM_BCM5720: 3143 1.327 msaitoh case PCI_PRODUCT_BROADCOM_BCM5725: 3144 1.327 msaitoh case PCI_PRODUCT_BROADCOM_BCM5727: 3145 1.327 msaitoh case PCI_PRODUCT_BROADCOM_BCM5762: 3146 1.327 msaitoh case PCI_PRODUCT_BROADCOM_BCM57764: 3147 1.327 msaitoh case PCI_PRODUCT_BROADCOM_BCM57767: 3148 1.327 msaitoh case PCI_PRODUCT_BROADCOM_BCM57787: 3149 1.215 msaitoh id = pci_conf_read(pa->pa_pc, pa->pa_tag, 3150 1.215 msaitoh BGE_PCI_GEN2_PRODID_ASICREV); 3151 1.215 msaitoh break; 3152 1.215 msaitoh case PCI_PRODUCT_BROADCOM_BCM57761: 3153 1.215 msaitoh case PCI_PRODUCT_BROADCOM_BCM57762: 3154 1.215 msaitoh case PCI_PRODUCT_BROADCOM_BCM57765: 3155 1.215 msaitoh case PCI_PRODUCT_BROADCOM_BCM57766: 3156 1.215 msaitoh case PCI_PRODUCT_BROADCOM_BCM57781: 3157 1.305 msaitoh case PCI_PRODUCT_BROADCOM_BCM57782: 3158 1.215 msaitoh case PCI_PRODUCT_BROADCOM_BCM57785: 3159 1.305 msaitoh case PCI_PRODUCT_BROADCOM_BCM57786: 3160 1.215 msaitoh case PCI_PRODUCT_BROADCOM_BCM57791: 3161 1.215 msaitoh case PCI_PRODUCT_BROADCOM_BCM57795: 3162 1.215 msaitoh id = pci_conf_read(pa->pa_pc, pa->pa_tag, 3163 1.215 msaitoh BGE_PCI_GEN15_PRODID_ASICREV); 3164 1.215 msaitoh break; 3165 1.215 msaitoh default: 3166 1.215 msaitoh id = pci_conf_read(pa->pa_pc, pa->pa_tag, 3167 1.215 msaitoh BGE_PCI_PRODID_ASICREV); 3168 1.215 msaitoh break; 3169 1.215 msaitoh } 3170 1.215 msaitoh } 3171 1.215 msaitoh 3172 1.215 msaitoh return id; 3173 1.215 msaitoh } 3174 1.25 jonathan 3175 1.1 fvdl /* 3176 1.288 msaitoh * Return true if MSI can be used with this device. 3177 1.288 msaitoh */ 3178 1.288 msaitoh static int 3179 1.288 msaitoh bge_can_use_msi(struct bge_softc *sc) 3180 1.288 msaitoh { 3181 1.288 msaitoh int can_use_msi = 0; 3182 1.288 msaitoh 3183 1.288 msaitoh switch (BGE_ASICREV(sc->bge_chipid)) { 3184 1.288 msaitoh case BGE_ASICREV_BCM5714_A0: 3185 1.288 msaitoh case BGE_ASICREV_BCM5714: 3186 1.288 msaitoh /* 3187 1.288 msaitoh * Apparently, MSI doesn't work when these chips are 3188 1.288 msaitoh * configured in single-port mode. 3189 1.288 msaitoh */ 3190 1.288 msaitoh break; 3191 1.288 msaitoh case BGE_ASICREV_BCM5750: 3192 1.288 msaitoh if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_AX && 3193 1.288 msaitoh BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_BX) 3194 1.288 msaitoh can_use_msi = 1; 3195 1.288 msaitoh break; 3196 1.288 msaitoh default: 3197 1.288 msaitoh if (BGE_IS_575X_PLUS(sc)) 3198 1.288 msaitoh can_use_msi = 1; 3199 1.288 msaitoh } 3200 1.362 skrll return can_use_msi; 3201 1.288 msaitoh } 3202 1.288 msaitoh 3203 1.288 msaitoh /* 3204 1.1 fvdl * Probe for a Broadcom chip. Check the PCI vendor and device IDs 3205 1.1 fvdl * against our list and return its name if we find a match. Note 3206 1.1 fvdl * that since the Broadcom controller contains VPD support, we 3207 1.1 fvdl * can get the device name string from the controller itself instead 3208 1.1 fvdl * of the compiled-in string. This is a little slow, but it guarantees 3209 1.1 fvdl * we'll always announce the right product name. 3210 1.1 fvdl */ 3211 1.104 thorpej static int 3212 1.116 christos bge_probe(device_t parent, cfdata_t match, void *aux) 3213 1.1 fvdl { 3214 1.1 fvdl struct pci_attach_args *pa = (struct pci_attach_args *)aux; 3215 1.1 fvdl 3216 1.7 thorpej if (bge_lookup(pa) != NULL) 3217 1.170 msaitoh return 1; 3218 1.1 fvdl 3219 1.170 msaitoh return 0; 3220 1.1 fvdl } 3221 1.1 fvdl 3222 1.104 thorpej static void 3223 1.116 christos bge_attach(device_t parent, device_t self, void *aux) 3224 1.1 fvdl { 3225 1.354 skrll struct bge_softc * const sc = device_private(self); 3226 1.354 skrll struct pci_attach_args * const pa = aux; 3227 1.164 msaitoh prop_dictionary_t dict; 3228 1.7 thorpej const struct bge_product *bp; 3229 1.16 thorpej const struct bge_revision *br; 3230 1.143 tron pci_chipset_tag_t pc; 3231 1.1 fvdl const char *intrstr = NULL; 3232 1.330 msaitoh uint32_t hwcfg, hwcfg2, hwcfg3, hwcfg4, hwcfg5; 3233 1.170 msaitoh uint32_t command; 3234 1.1 fvdl struct ifnet *ifp; 3235 1.331 msaitoh struct mii_data * const mii = &sc->bge_mii; 3236 1.342 msaitoh uint32_t misccfg, mimode, macmode; 3237 1.126 christos void * kva; 3238 1.1 fvdl u_char eaddr[ETHER_ADDR_LEN]; 3239 1.216 msaitoh pcireg_t memtype, subid, reg; 3240 1.1 fvdl bus_addr_t memaddr; 3241 1.170 msaitoh uint32_t pm_ctl; 3242 1.174 martin bool no_seeprom; 3243 1.342 msaitoh int capmask, trys; 3244 1.269 msaitoh int mii_flags; 3245 1.273 msaitoh int map_flags; 3246 1.266 christos char intrbuf[PCI_INTRSTR_LEN]; 3247 1.87 perry 3248 1.7 thorpej bp = bge_lookup(pa); 3249 1.7 thorpej KASSERT(bp != NULL); 3250 1.7 thorpej 3251 1.141 jmcneill sc->sc_pc = pa->pa_pc; 3252 1.141 jmcneill sc->sc_pcitag = pa->pa_tag; 3253 1.138 joerg sc->bge_dev = self; 3254 1.1 fvdl 3255 1.216 msaitoh sc->bge_pa = *pa; 3256 1.172 msaitoh pc = sc->sc_pc; 3257 1.172 msaitoh subid = pci_conf_read(pc, sc->sc_pcitag, PCI_SUBSYS_ID_REG); 3258 1.172 msaitoh 3259 1.30 thorpej aprint_naive(": Ethernet controller\n"); 3260 1.325 msaitoh aprint_normal(": %s Ethernet\n", bp->bp_name); 3261 1.1 fvdl 3262 1.1 fvdl /* 3263 1.1 fvdl * Map control/status registers. 3264 1.1 fvdl */ 3265 1.1 fvdl DPRINTFN(5, ("Map control/status regs\n")); 3266 1.141 jmcneill command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 3267 1.1 fvdl command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 3268 1.141 jmcneill pci_conf_write(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, command); 3269 1.141 jmcneill command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 3270 1.1 fvdl 3271 1.1 fvdl if (!(command & PCI_COMMAND_MEM_ENABLE)) { 3272 1.138 joerg aprint_error_dev(sc->bge_dev, 3273 1.138 joerg "failed to enable memory mapping!\n"); 3274 1.1 fvdl return; 3275 1.1 fvdl } 3276 1.1 fvdl 3277 1.1 fvdl DPRINTFN(5, ("pci_mem_find\n")); 3278 1.141 jmcneill memtype = pci_mapreg_type(sc->sc_pc, sc->sc_pcitag, BGE_PCI_BAR0); 3279 1.178 msaitoh switch (memtype) { 3280 1.29 itojun case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 3281 1.29 itojun case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 3282 1.275 msaitoh #if 0 3283 1.1 fvdl if (pci_mapreg_map(pa, BGE_PCI_BAR0, 3284 1.29 itojun memtype, 0, &sc->bge_btag, &sc->bge_bhandle, 3285 1.227 msaitoh &memaddr, &sc->bge_bsize) == 0) 3286 1.1 fvdl break; 3287 1.275 msaitoh #else 3288 1.275 msaitoh /* 3289 1.275 msaitoh * Workaround for PCI prefetchable bit. Some BCM5717-5720 based 3290 1.275 msaitoh * system get NMI on boot (PR#48451). This problem might not be 3291 1.275 msaitoh * the driver's bug but our PCI common part's bug. Until we 3292 1.275 msaitoh * find a real reason, we ignore the prefetchable bit. 3293 1.275 msaitoh */ 3294 1.275 msaitoh if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0, 3295 1.275 msaitoh memtype, &memaddr, &sc->bge_bsize, &map_flags) == 0) { 3296 1.275 msaitoh map_flags &= ~BUS_SPACE_MAP_PREFETCHABLE; 3297 1.275 msaitoh if (bus_space_map(pa->pa_memt, memaddr, sc->bge_bsize, 3298 1.275 msaitoh map_flags, &sc->bge_bhandle) == 0) { 3299 1.275 msaitoh sc->bge_btag = pa->pa_memt; 3300 1.275 msaitoh break; 3301 1.275 msaitoh } 3302 1.275 msaitoh } 3303 1.275 msaitoh #endif 3304 1.323 mrg /* FALLTHROUGH */ 3305 1.1 fvdl default: 3306 1.138 joerg aprint_error_dev(sc->bge_dev, "can't find mem space\n"); 3307 1.1 fvdl return; 3308 1.1 fvdl } 3309 1.1 fvdl 3310 1.386 skrll sc->bge_txrx_stopping = false; 3311 1.386 skrll 3312 1.215 msaitoh /* Save various chip information. */ 3313 1.215 msaitoh sc->bge_chipid = bge_chipid(pa); 3314 1.216 msaitoh sc->bge_phy_addr = bge_phy_addr(sc); 3315 1.76 cube 3316 1.303 msaitoh if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PCIEXPRESS, 3317 1.303 msaitoh &sc->bge_pciecap, NULL) != 0) { 3318 1.171 msaitoh /* PCIe */ 3319 1.261 msaitoh sc->bge_flags |= BGEF_PCIE; 3320 1.253 msaitoh /* Extract supported maximum payload size. */ 3321 1.253 msaitoh reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 3322 1.253 msaitoh sc->bge_pciecap + PCIE_DCAP); 3323 1.253 msaitoh sc->bge_mps = 128 << (reg & PCIE_DCAP_MAX_PAYLOAD); 3324 1.216 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 3325 1.216 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 3326 1.216 msaitoh sc->bge_expmrq = 2048; 3327 1.216 msaitoh else 3328 1.216 msaitoh sc->bge_expmrq = 4096; 3329 1.177 msaitoh bge_set_max_readrq(sc); 3330 1.303 msaitoh } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785) { 3331 1.303 msaitoh /* PCIe without PCIe cap */ 3332 1.303 msaitoh sc->bge_flags |= BGEF_PCIE; 3333 1.171 msaitoh } else if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE) & 3334 1.171 msaitoh BGE_PCISTATE_PCI_BUSMODE) == 0) { 3335 1.171 msaitoh /* PCI-X */ 3336 1.261 msaitoh sc->bge_flags |= BGEF_PCIX; 3337 1.180 msaitoh if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX, 3338 1.180 msaitoh &sc->bge_pcixcap, NULL) == 0) 3339 1.180 msaitoh aprint_error_dev(sc->bge_dev, 3340 1.180 msaitoh "unable to find PCIX capability\n"); 3341 1.171 msaitoh } 3342 1.76 cube 3343 1.216 msaitoh if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) { 3344 1.216 msaitoh /* 3345 1.216 msaitoh * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 3346 1.216 msaitoh * can clobber the chip's PCI config-space power control 3347 1.216 msaitoh * registers, leaving the card in D3 powersave state. We do 3348 1.216 msaitoh * not have memory-mapped registers in this state, so force 3349 1.216 msaitoh * device into D0 state before starting initialization. 3350 1.216 msaitoh */ 3351 1.216 msaitoh pm_ctl = pci_conf_read(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD); 3352 1.331 msaitoh pm_ctl &= ~(PCI_PWR_D0 | PCI_PWR_D1 | PCI_PWR_D2 | PCI_PWR_D3); 3353 1.216 msaitoh pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 3354 1.216 msaitoh pci_conf_write(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 3355 1.348 andvar DELAY(1000); /* 27 usec is allegedly sufficient */ 3356 1.216 msaitoh } 3357 1.216 msaitoh 3358 1.215 msaitoh /* Save chipset family. */ 3359 1.215 msaitoh switch (BGE_ASICREV(sc->bge_chipid)) { 3360 1.215 msaitoh case BGE_ASICREV_BCM5717: 3361 1.216 msaitoh case BGE_ASICREV_BCM5719: 3362 1.216 msaitoh case BGE_ASICREV_BCM5720: 3363 1.261 msaitoh sc->bge_flags |= BGEF_5717_PLUS; 3364 1.257 msaitoh /* FALLTHROUGH */ 3365 1.327 msaitoh case BGE_ASICREV_BCM5762: 3366 1.257 msaitoh case BGE_ASICREV_BCM57765: 3367 1.257 msaitoh case BGE_ASICREV_BCM57766: 3368 1.257 msaitoh if (!BGE_IS_5717_PLUS(sc)) 3369 1.261 msaitoh sc->bge_flags |= BGEF_57765_FAMILY; 3370 1.261 msaitoh sc->bge_flags |= BGEF_57765_PLUS | BGEF_5755_PLUS | 3371 1.261 msaitoh BGEF_575X_PLUS | BGEF_5705_PLUS | BGEF_JUMBO_CAPABLE; 3372 1.327 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 3373 1.327 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { 3374 1.327 msaitoh /* 3375 1.327 msaitoh * Enable work around for DMA engine miscalculation 3376 1.327 msaitoh * of TXMBUF available space. 3377 1.327 msaitoh */ 3378 1.327 msaitoh sc->bge_flags |= BGEF_RDMA_BUG; 3379 1.327 msaitoh 3380 1.327 msaitoh if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) && 3381 1.327 msaitoh (sc->bge_chipid == BGE_CHIPID_BCM5719_A0)) { 3382 1.327 msaitoh /* Jumbo frame on BCM5719 A0 does not work. */ 3383 1.327 msaitoh sc->bge_flags &= ~BGEF_JUMBO_CAPABLE; 3384 1.327 msaitoh } 3385 1.327 msaitoh } 3386 1.215 msaitoh break; 3387 1.215 msaitoh case BGE_ASICREV_BCM5755: 3388 1.215 msaitoh case BGE_ASICREV_BCM5761: 3389 1.215 msaitoh case BGE_ASICREV_BCM5784: 3390 1.215 msaitoh case BGE_ASICREV_BCM5785: 3391 1.215 msaitoh case BGE_ASICREV_BCM5787: 3392 1.215 msaitoh case BGE_ASICREV_BCM57780: 3393 1.261 msaitoh sc->bge_flags |= BGEF_5755_PLUS | BGEF_575X_PLUS | BGEF_5705_PLUS; 3394 1.215 msaitoh break; 3395 1.215 msaitoh case BGE_ASICREV_BCM5700: 3396 1.215 msaitoh case BGE_ASICREV_BCM5701: 3397 1.215 msaitoh case BGE_ASICREV_BCM5703: 3398 1.215 msaitoh case BGE_ASICREV_BCM5704: 3399 1.261 msaitoh sc->bge_flags |= BGEF_5700_FAMILY | BGEF_JUMBO_CAPABLE; 3400 1.215 msaitoh break; 3401 1.215 msaitoh case BGE_ASICREV_BCM5714_A0: 3402 1.215 msaitoh case BGE_ASICREV_BCM5780: 3403 1.215 msaitoh case BGE_ASICREV_BCM5714: 3404 1.261 msaitoh sc->bge_flags |= BGEF_5714_FAMILY | BGEF_JUMBO_CAPABLE; 3405 1.215 msaitoh /* FALLTHROUGH */ 3406 1.215 msaitoh case BGE_ASICREV_BCM5750: 3407 1.215 msaitoh case BGE_ASICREV_BCM5752: 3408 1.215 msaitoh case BGE_ASICREV_BCM5906: 3409 1.261 msaitoh sc->bge_flags |= BGEF_575X_PLUS; 3410 1.215 msaitoh /* FALLTHROUGH */ 3411 1.215 msaitoh case BGE_ASICREV_BCM5705: 3412 1.261 msaitoh sc->bge_flags |= BGEF_5705_PLUS; 3413 1.215 msaitoh break; 3414 1.215 msaitoh } 3415 1.172 msaitoh 3416 1.216 msaitoh /* Identify chips with APE processor. */ 3417 1.216 msaitoh switch (BGE_ASICREV(sc->bge_chipid)) { 3418 1.216 msaitoh case BGE_ASICREV_BCM5717: 3419 1.216 msaitoh case BGE_ASICREV_BCM5719: 3420 1.216 msaitoh case BGE_ASICREV_BCM5720: 3421 1.216 msaitoh case BGE_ASICREV_BCM5761: 3422 1.327 msaitoh case BGE_ASICREV_BCM5762: 3423 1.261 msaitoh sc->bge_flags |= BGEF_APE; 3424 1.216 msaitoh break; 3425 1.216 msaitoh } 3426 1.216 msaitoh 3427 1.262 msaitoh /* 3428 1.262 msaitoh * The 40bit DMA bug applies to the 5714/5715 controllers and is 3429 1.262 msaitoh * not actually a MAC controller bug but an issue with the embedded 3430 1.262 msaitoh * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround. 3431 1.262 msaitoh */ 3432 1.262 msaitoh if (BGE_IS_5714_FAMILY(sc) && ((sc->bge_flags & BGEF_PCIX) != 0)) 3433 1.262 msaitoh sc->bge_flags |= BGEF_40BIT_BUG; 3434 1.262 msaitoh 3435 1.216 msaitoh /* Chips with APE need BAR2 access for APE registers/memory. */ 3436 1.261 msaitoh if ((sc->bge_flags & BGEF_APE) != 0) { 3437 1.216 msaitoh memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2); 3438 1.273 msaitoh #if 0 3439 1.216 msaitoh if (pci_mapreg_map(pa, BGE_PCI_BAR2, memtype, 0, 3440 1.227 msaitoh &sc->bge_apetag, &sc->bge_apehandle, NULL, 3441 1.227 msaitoh &sc->bge_apesize)) { 3442 1.216 msaitoh aprint_error_dev(sc->bge_dev, 3443 1.216 msaitoh "couldn't map BAR2 memory\n"); 3444 1.216 msaitoh return; 3445 1.216 msaitoh } 3446 1.273 msaitoh #else 3447 1.273 msaitoh /* 3448 1.273 msaitoh * Workaround for PCI prefetchable bit. Some BCM5717-5720 based 3449 1.273 msaitoh * system get NMI on boot (PR#48451). This problem might not be 3450 1.273 msaitoh * the driver's bug but our PCI common part's bug. Until we 3451 1.273 msaitoh * find a real reason, we ignore the prefetchable bit. 3452 1.273 msaitoh */ 3453 1.273 msaitoh if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2, 3454 1.273 msaitoh memtype, &memaddr, &sc->bge_apesize, &map_flags) != 0) { 3455 1.273 msaitoh aprint_error_dev(sc->bge_dev, 3456 1.273 msaitoh "couldn't map BAR2 memory\n"); 3457 1.273 msaitoh return; 3458 1.273 msaitoh } 3459 1.273 msaitoh 3460 1.273 msaitoh map_flags &= ~BUS_SPACE_MAP_PREFETCHABLE; 3461 1.273 msaitoh if (bus_space_map(pa->pa_memt, memaddr, 3462 1.273 msaitoh sc->bge_apesize, map_flags, &sc->bge_apehandle) != 0) { 3463 1.273 msaitoh aprint_error_dev(sc->bge_dev, 3464 1.273 msaitoh "couldn't map BAR2 memory\n"); 3465 1.273 msaitoh return; 3466 1.273 msaitoh } 3467 1.273 msaitoh sc->bge_apetag = pa->pa_memt; 3468 1.273 msaitoh #endif 3469 1.216 msaitoh 3470 1.216 msaitoh /* Enable APE register/memory access by host driver. */ 3471 1.216 msaitoh reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE); 3472 1.216 msaitoh reg |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 3473 1.216 msaitoh BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 3474 1.216 msaitoh BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 3475 1.216 msaitoh pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE, reg); 3476 1.216 msaitoh 3477 1.216 msaitoh bge_ape_lock_init(sc); 3478 1.216 msaitoh bge_ape_read_fw_ver(sc); 3479 1.216 msaitoh } 3480 1.216 msaitoh 3481 1.216 msaitoh /* Identify the chips that use an CPMU. */ 3482 1.216 msaitoh if (BGE_IS_5717_PLUS(sc) || 3483 1.216 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 3484 1.216 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 3485 1.216 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 3486 1.216 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) 3487 1.261 msaitoh sc->bge_flags |= BGEF_CPMU_PRESENT; 3488 1.216 msaitoh 3489 1.172 msaitoh /* 3490 1.172 msaitoh * When using the BCM5701 in PCI-X mode, data corruption has 3491 1.172 msaitoh * been observed in the first few bytes of some received packets. 3492 1.172 msaitoh * Aligning the packet buffer in memory eliminates the corruption. 3493 1.172 msaitoh * Unfortunately, this misaligns the packet payloads. On platforms 3494 1.172 msaitoh * which do not support unaligned accesses, we will realign the 3495 1.172 msaitoh * payloads by copying the received packets. 3496 1.172 msaitoh */ 3497 1.172 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && 3498 1.261 msaitoh sc->bge_flags & BGEF_PCIX) 3499 1.261 msaitoh sc->bge_flags |= BGEF_RX_ALIGNBUG; 3500 1.172 msaitoh 3501 1.172 msaitoh if (BGE_IS_5700_FAMILY(sc)) 3502 1.261 msaitoh sc->bge_flags |= BGEF_JUMBO_CAPABLE; 3503 1.172 msaitoh 3504 1.172 msaitoh misccfg = CSR_READ_4(sc, BGE_MISC_CFG); 3505 1.172 msaitoh misccfg &= BGE_MISCCFG_BOARD_ID_MASK; 3506 1.172 msaitoh 3507 1.172 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 3508 1.172 msaitoh (misccfg == BGE_MISCCFG_BOARD_ID_5788 || 3509 1.172 msaitoh misccfg == BGE_MISCCFG_BOARD_ID_5788M)) 3510 1.261 msaitoh sc->bge_flags |= BGEF_IS_5788; 3511 1.172 msaitoh 3512 1.172 msaitoh /* 3513 1.172 msaitoh * Some controllers seem to require a special firmware to use 3514 1.172 msaitoh * TSO. But the firmware is not available to FreeBSD and Linux 3515 1.172 msaitoh * claims that the TSO performed by the firmware is slower than 3516 1.172 msaitoh * hardware based TSO. Moreover the firmware based TSO has one 3517 1.172 msaitoh * known bug which can't handle TSO if ethernet header + IP/TCP 3518 1.172 msaitoh * header is greater than 80 bytes. The workaround for the TSO 3519 1.172 msaitoh * bug exist but it seems it's too expensive than not using 3520 1.390 andvar * TSO at all. Some hardware also have the TSO bug so limit 3521 1.172 msaitoh * the TSO to the controllers that are not affected TSO issues 3522 1.172 msaitoh * (e.g. 5755 or higher). 3523 1.172 msaitoh */ 3524 1.172 msaitoh if (BGE_IS_5755_PLUS(sc)) { 3525 1.172 msaitoh /* 3526 1.172 msaitoh * BCM5754 and BCM5787 shares the same ASIC id so 3527 1.172 msaitoh * explicit device id check is required. 3528 1.172 msaitoh */ 3529 1.172 msaitoh if ((PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754) && 3530 1.172 msaitoh (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754M)) 3531 1.261 msaitoh sc->bge_flags |= BGEF_TSO; 3532 1.316 bouyer /* TSO on BCM5719 A0 does not work. */ 3533 1.316 bouyer if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) && 3534 1.316 bouyer (sc->bge_chipid == BGE_CHIPID_BCM5719_A0)) 3535 1.316 bouyer sc->bge_flags &= ~BGEF_TSO; 3536 1.172 msaitoh } 3537 1.172 msaitoh 3538 1.220 msaitoh capmask = 0xffffffff; /* XXX BMSR_DEFCAPMASK */ 3539 1.172 msaitoh if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 && 3540 1.172 msaitoh (misccfg == 0x4000 || misccfg == 0x8000)) || 3541 1.172 msaitoh (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 3542 1.172 msaitoh PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && 3543 1.172 msaitoh (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 || 3544 1.172 msaitoh PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 || 3545 1.172 msaitoh PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) || 3546 1.172 msaitoh (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && 3547 1.172 msaitoh (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F || 3548 1.172 msaitoh PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F || 3549 1.172 msaitoh PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) || 3550 1.172 msaitoh PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 || 3551 1.216 msaitoh PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 || 3552 1.216 msaitoh PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795 || 3553 1.220 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 3554 1.270 msaitoh /* These chips are 10/100 only. */ 3555 1.220 msaitoh capmask &= ~BMSR_EXTSTAT; 3556 1.261 msaitoh sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED; 3557 1.220 msaitoh } 3558 1.172 msaitoh 3559 1.172 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 3560 1.172 msaitoh (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 3561 1.172 msaitoh (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && 3562 1.220 msaitoh sc->bge_chipid != BGE_CHIPID_BCM5705_A1))) 3563 1.261 msaitoh sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED; 3564 1.172 msaitoh 3565 1.220 msaitoh /* Set various PHY bug flags. */ 3566 1.162 msaitoh if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || 3567 1.162 msaitoh sc->bge_chipid == BGE_CHIPID_BCM5701_B0) 3568 1.261 msaitoh sc->bge_phy_flags |= BGEPHYF_CRC_BUG; 3569 1.162 msaitoh if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX || 3570 1.162 msaitoh BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX) 3571 1.261 msaitoh sc->bge_phy_flags |= BGEPHYF_ADC_BUG; 3572 1.162 msaitoh if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) 3573 1.261 msaitoh sc->bge_phy_flags |= BGEPHYF_5704_A0_BUG; 3574 1.220 msaitoh if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 3575 1.220 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) && 3576 1.220 msaitoh PCI_VENDOR(subid) == PCI_VENDOR_DELL) 3577 1.261 msaitoh sc->bge_phy_flags |= BGEPHYF_NO_3LED; 3578 1.172 msaitoh if (BGE_IS_5705_PLUS(sc) && 3579 1.172 msaitoh BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 && 3580 1.172 msaitoh BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 && 3581 1.216 msaitoh BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780 && 3582 1.257 msaitoh !BGE_IS_57765_PLUS(sc)) { 3583 1.162 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 3584 1.172 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 3585 1.172 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 3586 1.162 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) { 3587 1.162 msaitoh if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 && 3588 1.162 msaitoh PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756) 3589 1.261 msaitoh sc->bge_phy_flags |= BGEPHYF_JITTER_BUG; 3590 1.162 msaitoh if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M) 3591 1.261 msaitoh sc->bge_phy_flags |= BGEPHYF_ADJUST_TRIM; 3592 1.216 msaitoh } else 3593 1.261 msaitoh sc->bge_phy_flags |= BGEPHYF_BER_BUG; 3594 1.162 msaitoh } 3595 1.162 msaitoh 3596 1.174 martin /* 3597 1.174 martin * SEEPROM check. 3598 1.174 martin * First check if firmware knows we do not have SEEPROM. 3599 1.174 martin */ 3600 1.180 msaitoh if (prop_dictionary_get_bool(device_properties(self), 3601 1.367 skrll "without-seeprom", &no_seeprom) && no_seeprom) 3602 1.330 msaitoh sc->bge_flags |= BGEF_NO_EEPROM; 3603 1.174 martin 3604 1.228 msaitoh else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 3605 1.261 msaitoh sc->bge_flags |= BGEF_NO_EEPROM; 3606 1.228 msaitoh 3607 1.174 martin /* Now check the 'ROM failed' bit on the RX CPU */ 3608 1.174 martin else if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) 3609 1.261 msaitoh sc->bge_flags |= BGEF_NO_EEPROM; 3610 1.172 msaitoh 3611 1.177 msaitoh sc->bge_asf_mode = 0; 3612 1.216 msaitoh /* No ASF if APE present. */ 3613 1.261 msaitoh if ((sc->bge_flags & BGEF_APE) == 0) { 3614 1.216 msaitoh if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == 3615 1.216 msaitoh BGE_SRAM_DATA_SIG_MAGIC)) { 3616 1.216 msaitoh if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG) & 3617 1.216 msaitoh BGE_HWCFG_ASF) { 3618 1.216 msaitoh sc->bge_asf_mode |= ASF_ENABLE; 3619 1.216 msaitoh sc->bge_asf_mode |= ASF_STACKUP; 3620 1.216 msaitoh if (BGE_IS_575X_PLUS(sc)) 3621 1.216 msaitoh sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; 3622 1.177 msaitoh } 3623 1.177 msaitoh } 3624 1.177 msaitoh } 3625 1.177 msaitoh 3626 1.318 jdolecek int counts[PCI_INTR_TYPE_SIZE] = { 3627 1.318 jdolecek [PCI_INTR_TYPE_INTX] = 1, 3628 1.318 jdolecek [PCI_INTR_TYPE_MSI] = 1, 3629 1.319 jdolecek [PCI_INTR_TYPE_MSIX] = 1, 3630 1.318 jdolecek }; 3631 1.318 jdolecek int max_type = PCI_INTR_TYPE_MSIX; 3632 1.318 jdolecek 3633 1.318 jdolecek if (!bge_can_use_msi(sc)) { 3634 1.318 jdolecek /* MSI broken, allow only INTx */ 3635 1.293 knakahar max_type = PCI_INTR_TYPE_INTX; 3636 1.318 jdolecek } 3637 1.293 knakahar 3638 1.293 knakahar if (pci_intr_alloc(pa, &sc->bge_pihp, counts, max_type) != 0) { 3639 1.293 knakahar aprint_error_dev(sc->bge_dev, "couldn't alloc interrupt\n"); 3640 1.293 knakahar return; 3641 1.288 msaitoh } 3642 1.288 msaitoh 3643 1.293 knakahar DPRINTFN(5, ("pci_intr_string\n")); 3644 1.288 msaitoh intrstr = pci_intr_string(pc, sc->bge_pihp[0], intrbuf, 3645 1.288 msaitoh sizeof(intrbuf)); 3646 1.398 bouyer pci_intr_setattr(pc, &sc->bge_pihp[0], PCI_INTR_MPSAFE, true); 3647 1.288 msaitoh DPRINTFN(5, ("pci_intr_establish\n")); 3648 1.310 msaitoh sc->bge_intrhand = pci_intr_establish_xname(pc, sc->bge_pihp[0], 3649 1.310 msaitoh IPL_NET, bge_intr, sc, device_xname(sc->bge_dev)); 3650 1.293 knakahar if (sc->bge_intrhand == NULL) { 3651 1.293 knakahar pci_intr_release(pc, sc->bge_pihp, 1); 3652 1.318 jdolecek sc->bge_pihp = NULL; 3653 1.288 msaitoh 3654 1.318 jdolecek aprint_error_dev(self, "couldn't establish interrupt"); 3655 1.318 jdolecek if (intrstr != NULL) 3656 1.318 jdolecek aprint_error(" at %s", intrstr); 3657 1.318 jdolecek aprint_error("\n"); 3658 1.288 msaitoh return; 3659 1.288 msaitoh } 3660 1.288 msaitoh aprint_normal_dev(sc->bge_dev, "interrupting at %s\n", intrstr); 3661 1.288 msaitoh 3662 1.318 jdolecek switch (pci_intr_type(pc, sc->bge_pihp[0])) { 3663 1.318 jdolecek case PCI_INTR_TYPE_MSIX: 3664 1.318 jdolecek case PCI_INTR_TYPE_MSI: 3665 1.318 jdolecek KASSERT(bge_can_use_msi(sc)); 3666 1.318 jdolecek sc->bge_flags |= BGEF_MSI; 3667 1.318 jdolecek break; 3668 1.318 jdolecek default: 3669 1.318 jdolecek /* nothing to do */ 3670 1.318 jdolecek break; 3671 1.318 jdolecek } 3672 1.318 jdolecek 3673 1.375 skrll char wqname[MAXCOMLEN]; 3674 1.375 skrll snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->bge_dev)); 3675 1.375 skrll int error = workqueue_create(&sc->sc_reset_wq, wqname, 3676 1.375 skrll bge_handle_reset_work, sc, PRI_NONE, IPL_SOFTCLOCK, 3677 1.375 skrll WQ_MPSAFE); 3678 1.375 skrll if (error) { 3679 1.375 skrll aprint_error_dev(sc->bge_dev, 3680 1.375 skrll "unable to create reset workqueue\n"); 3681 1.375 skrll return; 3682 1.375 skrll } 3683 1.375 skrll 3684 1.375 skrll 3685 1.288 msaitoh /* 3686 1.288 msaitoh * All controllers except BCM5700 supports tagged status but 3687 1.288 msaitoh * we use tagged status only for MSI case on BCM5717. Otherwise 3688 1.288 msaitoh * MSI on BCM5717 does not work. 3689 1.288 msaitoh */ 3690 1.307 msaitoh if (BGE_IS_57765_PLUS(sc) && sc->bge_flags & BGEF_MSI) 3691 1.288 msaitoh sc->bge_flags |= BGEF_TAGGED_STATUS; 3692 1.288 msaitoh 3693 1.248 msaitoh /* 3694 1.248 msaitoh * Reset NVRAM before bge_reset(). It's required to acquire NVRAM 3695 1.248 msaitoh * lock in bge_reset(). 3696 1.248 msaitoh */ 3697 1.341 msaitoh CSR_WRITE_4_FLUSH(sc, BGE_EE_ADDR, 3698 1.248 msaitoh BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 3699 1.248 msaitoh delay(1000); 3700 1.341 msaitoh BGE_SETBIT_FLUSH(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 3701 1.248 msaitoh 3702 1.248 msaitoh bge_stop_fw(sc); 3703 1.353 buhrow bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); 3704 1.248 msaitoh if (bge_reset(sc)) 3705 1.248 msaitoh aprint_error_dev(sc->bge_dev, "chip reset failed\n"); 3706 1.243 msaitoh 3707 1.241 msaitoh /* 3708 1.241 msaitoh * Read the hardware config word in the first 32k of NIC internal 3709 1.241 msaitoh * memory, or fall back to the config word in the EEPROM. 3710 1.241 msaitoh * Note: on some BCM5700 cards, this value appears to be unset. 3711 1.241 msaitoh */ 3712 1.267 msaitoh hwcfg = hwcfg2 = hwcfg3 = hwcfg4 = hwcfg5 = 0; 3713 1.248 msaitoh if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == 3714 1.241 msaitoh BGE_SRAM_DATA_SIG_MAGIC) { 3715 1.241 msaitoh uint32_t tmp; 3716 1.241 msaitoh 3717 1.241 msaitoh hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG); 3718 1.241 msaitoh tmp = bge_readmem_ind(sc, BGE_SRAM_DATA_VER) >> 3719 1.241 msaitoh BGE_SRAM_DATA_VER_SHIFT; 3720 1.241 msaitoh if ((0 < tmp) && (tmp < 0x100)) 3721 1.241 msaitoh hwcfg2 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_2); 3722 1.261 msaitoh if (sc->bge_flags & BGEF_PCIE) 3723 1.241 msaitoh hwcfg3 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_3); 3724 1.278 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785) 3725 1.241 msaitoh hwcfg4 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_4); 3726 1.267 msaitoh if (BGE_IS_5717_PLUS(sc)) 3727 1.268 msaitoh hwcfg5 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_5); 3728 1.261 msaitoh } else if (!(sc->bge_flags & BGEF_NO_EEPROM)) { 3729 1.241 msaitoh bge_read_eeprom(sc, (void *)&hwcfg, 3730 1.241 msaitoh BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 3731 1.241 msaitoh hwcfg = be32toh(hwcfg); 3732 1.241 msaitoh } 3733 1.267 msaitoh aprint_normal_dev(sc->bge_dev, 3734 1.267 msaitoh "HW config %08x, %08x, %08x, %08x %08x\n", 3735 1.267 msaitoh hwcfg, hwcfg2, hwcfg3, hwcfg4, hwcfg5); 3736 1.241 msaitoh 3737 1.353 buhrow bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); 3738 1.353 buhrow bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); 3739 1.177 msaitoh 3740 1.1 fvdl if (bge_chipinit(sc)) { 3741 1.138 joerg aprint_error_dev(sc->bge_dev, "chip initialization failed\n"); 3742 1.1 fvdl bge_release_resources(sc); 3743 1.1 fvdl return; 3744 1.1 fvdl } 3745 1.1 fvdl 3746 1.342 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) { 3747 1.342 msaitoh BGE_SETBIT_FLUSH(sc, BGE_MISC_LOCAL_CTL, 3748 1.342 msaitoh BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUTEN1); 3749 1.342 msaitoh DELAY(100); 3750 1.342 msaitoh } 3751 1.342 msaitoh 3752 1.342 msaitoh /* Set MI_MODE */ 3753 1.342 msaitoh mimode = BGE_MIMODE_PHYADDR(sc->bge_phy_addr); 3754 1.342 msaitoh if ((sc->bge_flags & BGEF_CPMU_PRESENT) != 0) 3755 1.342 msaitoh mimode |= BGE_MIMODE_500KHZ_CONST; 3756 1.342 msaitoh else 3757 1.342 msaitoh mimode |= BGE_MIMODE_BASE; 3758 1.342 msaitoh CSR_WRITE_4_FLUSH(sc, BGE_MI_MODE, mimode); 3759 1.342 msaitoh DELAY(80); 3760 1.342 msaitoh 3761 1.1 fvdl /* 3762 1.203 msaitoh * Get station address from the EEPROM. 3763 1.1 fvdl */ 3764 1.151 cegger if (bge_get_eaddr(sc, eaddr)) { 3765 1.178 msaitoh aprint_error_dev(sc->bge_dev, 3766 1.178 msaitoh "failed to read station address\n"); 3767 1.1 fvdl bge_release_resources(sc); 3768 1.1 fvdl return; 3769 1.1 fvdl } 3770 1.1 fvdl 3771 1.51 fvdl br = bge_lookup_rev(sc->bge_chipid); 3772 1.51 fvdl 3773 1.16 thorpej if (br == NULL) { 3774 1.172 msaitoh aprint_normal_dev(sc->bge_dev, "unknown ASIC (0x%x)", 3775 1.172 msaitoh sc->bge_chipid); 3776 1.16 thorpej } else { 3777 1.172 msaitoh aprint_normal_dev(sc->bge_dev, "ASIC %s (0x%x)", 3778 1.172 msaitoh br->br_name, sc->bge_chipid); 3779 1.16 thorpej } 3780 1.30 thorpej aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr)); 3781 1.1 fvdl 3782 1.1 fvdl /* Allocate the general information block and ring buffers. */ 3783 1.317 bouyer if (pci_dma64_available(pa)) { 3784 1.41 fvdl sc->bge_dmatag = pa->pa_dmat64; 3785 1.317 bouyer sc->bge_dmatag32 = pa->pa_dmat; 3786 1.317 bouyer sc->bge_dma64 = true; 3787 1.317 bouyer } else { 3788 1.41 fvdl sc->bge_dmatag = pa->pa_dmat; 3789 1.317 bouyer sc->bge_dmatag32 = pa->pa_dmat; 3790 1.317 bouyer sc->bge_dma64 = false; 3791 1.317 bouyer } 3792 1.262 msaitoh 3793 1.262 msaitoh /* 40bit DMA workaround */ 3794 1.262 msaitoh if (sizeof(bus_addr_t) > 4) { 3795 1.262 msaitoh if ((sc->bge_flags & BGEF_40BIT_BUG) != 0) { 3796 1.262 msaitoh bus_dma_tag_t olddmatag = sc->bge_dmatag; /* save */ 3797 1.262 msaitoh 3798 1.351 martin if (bus_dmatag_subregion(olddmatag, 0, 3799 1.351 martin (bus_addr_t)__MASK(40), 3800 1.377 skrll &(sc->bge_dmatag), BUS_DMA_WAITOK) != 0) { 3801 1.262 msaitoh aprint_error_dev(self, 3802 1.262 msaitoh "WARNING: failed to restrict dma range," 3803 1.262 msaitoh " falling back to parent bus dma range\n"); 3804 1.262 msaitoh sc->bge_dmatag = olddmatag; 3805 1.262 msaitoh } 3806 1.262 msaitoh } 3807 1.262 msaitoh } 3808 1.320 bouyer SLIST_INIT(&sc->txdma_list); 3809 1.1 fvdl DPRINTFN(5, ("bus_dmamem_alloc\n")); 3810 1.1 fvdl if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 3811 1.227 msaitoh PAGE_SIZE, 0, &sc->bge_ring_seg, 1, 3812 1.377 skrll &sc->bge_ring_rseg, BUS_DMA_WAITOK)) { 3813 1.138 joerg aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 3814 1.1 fvdl return; 3815 1.1 fvdl } 3816 1.1 fvdl DPRINTFN(5, ("bus_dmamem_map\n")); 3817 1.227 msaitoh if (bus_dmamem_map(sc->bge_dmatag, &sc->bge_ring_seg, 3818 1.227 msaitoh sc->bge_ring_rseg, sizeof(struct bge_ring_data), &kva, 3819 1.377 skrll BUS_DMA_WAITOK)) { 3820 1.138 joerg aprint_error_dev(sc->bge_dev, 3821 1.138 joerg "can't map DMA buffers (%zu bytes)\n", 3822 1.138 joerg sizeof(struct bge_ring_data)); 3823 1.227 msaitoh bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, 3824 1.227 msaitoh sc->bge_ring_rseg); 3825 1.1 fvdl return; 3826 1.1 fvdl } 3827 1.388 andvar DPRINTFN(5, ("bus_dmamap_create\n")); 3828 1.1 fvdl if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 3829 1.1 fvdl sizeof(struct bge_ring_data), 0, 3830 1.377 skrll BUS_DMA_WAITOK, &sc->bge_ring_map)) { 3831 1.138 joerg aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 3832 1.1 fvdl bus_dmamem_unmap(sc->bge_dmatag, kva, 3833 1.1 fvdl sizeof(struct bge_ring_data)); 3834 1.227 msaitoh bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, 3835 1.227 msaitoh sc->bge_ring_rseg); 3836 1.1 fvdl return; 3837 1.1 fvdl } 3838 1.388 andvar DPRINTFN(5, ("bus_dmamap_load\n")); 3839 1.1 fvdl if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 3840 1.1 fvdl sizeof(struct bge_ring_data), NULL, 3841 1.377 skrll BUS_DMA_WAITOK)) { 3842 1.1 fvdl bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 3843 1.1 fvdl bus_dmamem_unmap(sc->bge_dmatag, kva, 3844 1.1 fvdl sizeof(struct bge_ring_data)); 3845 1.227 msaitoh bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, 3846 1.227 msaitoh sc->bge_ring_rseg); 3847 1.1 fvdl return; 3848 1.1 fvdl } 3849 1.1 fvdl 3850 1.1 fvdl DPRINTFN(5, ("bzero\n")); 3851 1.1 fvdl sc->bge_rdata = (struct bge_ring_data *)kva; 3852 1.1 fvdl 3853 1.19 mjl memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data)); 3854 1.1 fvdl 3855 1.1 fvdl /* Try to allocate memory for jumbo buffers. */ 3856 1.166 msaitoh if (BGE_IS_JUMBO_CAPABLE(sc)) { 3857 1.44 hannken if (bge_alloc_jumbo_mem(sc)) { 3858 1.138 joerg aprint_error_dev(sc->bge_dev, 3859 1.138 joerg "jumbo buffer allocation failed\n"); 3860 1.44 hannken } else 3861 1.44 hannken sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 3862 1.44 hannken } 3863 1.1 fvdl 3864 1.1 fvdl /* Set default tuneable values. */ 3865 1.1 fvdl sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 3866 1.1 fvdl sc->bge_rx_coal_ticks = 150; 3867 1.25 jonathan sc->bge_rx_max_coal_bds = 64; 3868 1.25 jonathan sc->bge_tx_coal_ticks = 300; 3869 1.25 jonathan sc->bge_tx_max_coal_bds = 400; 3870 1.172 msaitoh if (BGE_IS_5705_PLUS(sc)) { 3871 1.95 jonathan sc->bge_tx_coal_ticks = (12 * 5); 3872 1.146 mlelstv sc->bge_tx_max_coal_bds = (12 * 5); 3873 1.138 joerg aprint_verbose_dev(sc->bge_dev, 3874 1.138 joerg "setting short Tx thresholds\n"); 3875 1.95 jonathan } 3876 1.1 fvdl 3877 1.216 msaitoh if (BGE_IS_5717_PLUS(sc)) 3878 1.202 tsutsui sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 3879 1.202 tsutsui else if (BGE_IS_5705_PLUS(sc)) 3880 1.172 msaitoh sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 3881 1.172 msaitoh else 3882 1.172 msaitoh sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 3883 1.172 msaitoh 3884 1.394 skrll sc->sc_mcast_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 3885 1.386 skrll sc->sc_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 3886 1.375 skrll 3887 1.1 fvdl /* Set up ifnet structure */ 3888 1.1 fvdl ifp = &sc->ethercom.ec_if; 3889 1.1 fvdl ifp->if_softc = sc; 3890 1.1 fvdl ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 3891 1.375 skrll ifp->if_extflags = IFEF_MPSAFE; 3892 1.1 fvdl ifp->if_ioctl = bge_ioctl; 3893 1.141 jmcneill ifp->if_stop = bge_stop; 3894 1.1 fvdl ifp->if_start = bge_start; 3895 1.1 fvdl ifp->if_init = bge_init; 3896 1.315 riastrad IFQ_SET_MAXLEN(&ifp->if_snd, uimax(BGE_TX_RING_CNT - 1, IFQ_MAXLEN)); 3897 1.1 fvdl IFQ_SET_READY(&ifp->if_snd); 3898 1.115 tsutsui DPRINTFN(5, ("strcpy if_xname\n")); 3899 1.138 joerg strcpy(ifp->if_xname, device_xname(sc->bge_dev)); 3900 1.1 fvdl 3901 1.157 msaitoh if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) 3902 1.18 thorpej sc->ethercom.ec_if.if_capabilities |= 3903 1.172 msaitoh IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx; 3904 1.172 msaitoh #if 1 /* XXX TCP/UDP checksum offload breaks with pf(4) */ 3905 1.172 msaitoh sc->ethercom.ec_if.if_capabilities |= 3906 1.88 yamt IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 3907 1.88 yamt IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 3908 1.172 msaitoh #endif 3909 1.87 perry sc->ethercom.ec_capabilities |= 3910 1.1 fvdl ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 3911 1.335 msaitoh sc->ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING; 3912 1.1 fvdl 3913 1.261 msaitoh if (sc->bge_flags & BGEF_TSO) 3914 1.95 jonathan sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4; 3915 1.95 jonathan 3916 1.1 fvdl /* 3917 1.1 fvdl * Do MII setup. 3918 1.1 fvdl */ 3919 1.1 fvdl DPRINTFN(5, ("mii setup\n")); 3920 1.331 msaitoh mii->mii_ifp = ifp; 3921 1.331 msaitoh mii->mii_readreg = bge_miibus_readreg; 3922 1.331 msaitoh mii->mii_writereg = bge_miibus_writereg; 3923 1.331 msaitoh mii->mii_statchg = bge_miibus_statchg; 3924 1.1 fvdl 3925 1.1 fvdl /* 3926 1.203 msaitoh * Figure out what sort of media we have by checking the hardware 3927 1.241 msaitoh * config word. Note: on some BCM5700 cards, this value appears to be 3928 1.241 msaitoh * unset. If that's the case, we have to rely on identifying the NIC 3929 1.241 msaitoh * by its PCI subsystem ID, as we do below for the SysKonnect SK-9D41. 3930 1.241 msaitoh * The SysKonnect SK-9D41 is a 1000baseSX card. 3931 1.1 fvdl */ 3932 1.340 msaitoh if (PCI_PRODUCT(subid) == SK_SUBSYSID_9D41 || 3933 1.161 msaitoh (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) { 3934 1.270 msaitoh if (BGE_IS_5705_PLUS(sc)) { 3935 1.270 msaitoh sc->bge_flags |= BGEF_FIBER_MII; 3936 1.270 msaitoh sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED; 3937 1.270 msaitoh } else 3938 1.270 msaitoh sc->bge_flags |= BGEF_FIBER_TBI; 3939 1.161 msaitoh } 3940 1.1 fvdl 3941 1.261 msaitoh /* Set bge_phy_flags before prop_dictionary_set_uint32() */ 3942 1.261 msaitoh if (BGE_IS_JUMBO_CAPABLE(sc)) 3943 1.261 msaitoh sc->bge_phy_flags |= BGEPHYF_JUMBO_CAPABLE; 3944 1.261 msaitoh 3945 1.195 jym /* set phyflags and chipid before mii_attach() */ 3946 1.167 msaitoh dict = device_properties(self); 3947 1.261 msaitoh prop_dictionary_set_uint32(dict, "phyflags", sc->bge_phy_flags); 3948 1.195 jym prop_dictionary_set_uint32(dict, "chipid", sc->bge_chipid); 3949 1.167 msaitoh 3950 1.342 msaitoh macmode = CSR_READ_4(sc, BGE_MAC_MODE); 3951 1.342 msaitoh macmode &= ~BGE_MACMODE_PORTMODE; 3952 1.334 msaitoh /* Initialize ifmedia structures. */ 3953 1.261 msaitoh if (sc->bge_flags & BGEF_FIBER_TBI) { 3954 1.342 msaitoh CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, 3955 1.342 msaitoh macmode | BGE_PORTMODE_TBI); 3956 1.342 msaitoh DELAY(40); 3957 1.342 msaitoh 3958 1.385 skrll struct ifmedia * const ifm = &sc->bge_ifmedia; 3959 1.385 skrll sc->ethercom.ec_ifmedia = ifm; 3960 1.386 skrll 3961 1.386 skrll ifmedia_init_with_lock(ifm, IFM_IMASK, 3962 1.386 skrll bge_ifmedia_upd, bge_ifmedia_sts, sc->sc_intr_lock); 3963 1.385 skrll ifmedia_add(ifm, IFM_ETHER | IFM_1000_SX, 0, NULL); 3964 1.385 skrll ifmedia_add(ifm, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL); 3965 1.385 skrll ifmedia_add(ifm, IFM_ETHER | IFM_AUTO, 0, NULL); 3966 1.385 skrll ifmedia_set(ifm, IFM_ETHER | IFM_AUTO); 3967 1.155 he /* Pretend the user requested this setting */ 3968 1.162 msaitoh sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 3969 1.1 fvdl } else { 3970 1.342 msaitoh uint16_t phyreg; 3971 1.342 msaitoh int rv; 3972 1.1 fvdl /* 3973 1.177 msaitoh * Do transceiver setup and tell the firmware the 3974 1.177 msaitoh * driver is down so we can try to get access the 3975 1.177 msaitoh * probe if ASF is running. Retry a couple of times 3976 1.177 msaitoh * if we get a conflict with the ASF firmware accessing 3977 1.177 msaitoh * the PHY. 3978 1.1 fvdl */ 3979 1.342 msaitoh if (sc->bge_flags & BGEF_FIBER_MII) 3980 1.342 msaitoh macmode |= BGE_PORTMODE_GMII; 3981 1.342 msaitoh else 3982 1.342 msaitoh macmode |= BGE_PORTMODE_MII; 3983 1.342 msaitoh CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, macmode); 3984 1.342 msaitoh DELAY(40); 3985 1.342 msaitoh 3986 1.342 msaitoh /* 3987 1.342 msaitoh * Do transceiver setup and tell the firmware the 3988 1.342 msaitoh * driver is down so we can try to get access the 3989 1.342 msaitoh * probe if ASF is running. Retry a couple of times 3990 1.342 msaitoh * if we get a conflict with the ASF firmware accessing 3991 1.342 msaitoh * the PHY. 3992 1.342 msaitoh */ 3993 1.342 msaitoh trys = 0; 3994 1.177 msaitoh BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3995 1.334 msaitoh sc->ethercom.ec_mii = mii; 3996 1.386 skrll ifmedia_init_with_lock(&mii->mii_media, 0, bge_ifmedia_upd, 3997 1.386 skrll bge_ifmedia_sts, sc->sc_intr_lock); 3998 1.269 msaitoh mii_flags = MIIF_DOPAUSE; 3999 1.269 msaitoh if (sc->bge_flags & BGEF_FIBER_MII) 4000 1.269 msaitoh mii_flags |= MIIF_HAVEFIBER; 4001 1.342 msaitoh again: 4002 1.342 msaitoh bge_asf_driver_up(sc); 4003 1.394 skrll mutex_enter(sc->sc_intr_lock); 4004 1.342 msaitoh rv = bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr, 4005 1.342 msaitoh MII_BMCR, &phyreg); 4006 1.342 msaitoh if ((rv != 0) || ((phyreg & BMCR_PDOWN) != 0)) { 4007 1.342 msaitoh int i; 4008 1.342 msaitoh 4009 1.342 msaitoh bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr, 4010 1.342 msaitoh MII_BMCR, BMCR_RESET); 4011 1.342 msaitoh /* Wait up to 500ms for it to complete. */ 4012 1.342 msaitoh for (i = 0; i < 500; i++) { 4013 1.342 msaitoh bge_miibus_readreg(sc->bge_dev, 4014 1.342 msaitoh sc->bge_phy_addr, MII_BMCR, &phyreg); 4015 1.342 msaitoh if ((phyreg & BMCR_RESET) == 0) 4016 1.342 msaitoh break; 4017 1.342 msaitoh DELAY(1000); 4018 1.342 msaitoh } 4019 1.342 msaitoh } 4020 1.394 skrll mutex_exit(sc->sc_intr_lock); 4021 1.342 msaitoh 4022 1.331 msaitoh mii_attach(sc->bge_dev, mii, capmask, sc->bge_phy_addr, 4023 1.269 msaitoh MII_OFFSET_ANY, mii_flags); 4024 1.87 perry 4025 1.342 msaitoh if (LIST_EMPTY(&mii->mii_phys) && (trys++ < 4)) 4026 1.342 msaitoh goto again; 4027 1.342 msaitoh 4028 1.331 msaitoh if (LIST_EMPTY(&mii->mii_phys)) { 4029 1.138 joerg aprint_error_dev(sc->bge_dev, "no PHY found!\n"); 4030 1.331 msaitoh ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 4031 1.331 msaitoh 0, NULL); 4032 1.331 msaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL); 4033 1.1 fvdl } else 4034 1.331 msaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 4035 1.177 msaitoh 4036 1.177 msaitoh /* 4037 1.177 msaitoh * Now tell the firmware we are going up after probing the PHY 4038 1.177 msaitoh */ 4039 1.177 msaitoh if (sc->bge_asf_mode & ASF_STACKUP) 4040 1.177 msaitoh BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4041 1.1 fvdl } 4042 1.1 fvdl 4043 1.1 fvdl /* 4044 1.1 fvdl * Call MI attach routine. 4045 1.1 fvdl */ 4046 1.375 skrll DPRINTFN(5, ("if_initialize\n")); 4047 1.375 skrll if_initialize(ifp); 4048 1.375 skrll ifp->if_percpuq = if_percpuq_create(ifp); 4049 1.299 ozaki if_deferred_start_init(ifp, NULL); 4050 1.375 skrll if_register(ifp); 4051 1.375 skrll 4052 1.1 fvdl DPRINTFN(5, ("ether_ifattach\n")); 4053 1.1 fvdl ether_ifattach(ifp, eaddr); 4054 1.186 msaitoh ether_set_ifflags_cb(&sc->ethercom, bge_ifflags_cb); 4055 1.375 skrll 4056 1.148 mlelstv rnd_attach_source(&sc->rnd_source, device_xname(sc->bge_dev), 4057 1.277 tls RND_TYPE_NET, RND_FLAG_DEFAULT); 4058 1.72 thorpej #ifdef BGE_EVENT_COUNTERS 4059 1.72 thorpej /* 4060 1.72 thorpej * Attach event counters. 4061 1.72 thorpej */ 4062 1.72 thorpej evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR, 4063 1.138 joerg NULL, device_xname(sc->bge_dev), "intr"); 4064 1.302 msaitoh evcnt_attach_dynamic(&sc->bge_ev_intr_spurious, EVCNT_TYPE_INTR, 4065 1.302 msaitoh NULL, device_xname(sc->bge_dev), "intr_spurious"); 4066 1.302 msaitoh evcnt_attach_dynamic(&sc->bge_ev_intr_spurious2, EVCNT_TYPE_INTR, 4067 1.302 msaitoh NULL, device_xname(sc->bge_dev), "intr_spurious2"); 4068 1.72 thorpej evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC, 4069 1.138 joerg NULL, device_xname(sc->bge_dev), "tx_xoff"); 4070 1.72 thorpej evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC, 4071 1.138 joerg NULL, device_xname(sc->bge_dev), "tx_xon"); 4072 1.72 thorpej evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC, 4073 1.138 joerg NULL, device_xname(sc->bge_dev), "rx_xoff"); 4074 1.72 thorpej evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC, 4075 1.138 joerg NULL, device_xname(sc->bge_dev), "rx_xon"); 4076 1.72 thorpej evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC, 4077 1.138 joerg NULL, device_xname(sc->bge_dev), "rx_macctl"); 4078 1.72 thorpej evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC, 4079 1.138 joerg NULL, device_xname(sc->bge_dev), "xoffentered"); 4080 1.72 thorpej #endif /* BGE_EVENT_COUNTERS */ 4081 1.1 fvdl DPRINTFN(5, ("callout_init\n")); 4082 1.375 skrll callout_init(&sc->bge_timeout, CALLOUT_MPSAFE); 4083 1.345 thorpej callout_setfunc(&sc->bge_timeout, bge_tick, sc); 4084 1.82 jmcneill 4085 1.168 tsutsui if (pmf_device_register(self, NULL, NULL)) 4086 1.168 tsutsui pmf_class_network_register(self, ifp); 4087 1.168 tsutsui else 4088 1.141 jmcneill aprint_error_dev(self, "couldn't establish power handler\n"); 4089 1.172 msaitoh 4090 1.207 msaitoh bge_sysctl_init(sc); 4091 1.190 jruoho 4092 1.172 msaitoh #ifdef BGE_DEBUG 4093 1.172 msaitoh bge_debug_info(sc); 4094 1.172 msaitoh #endif 4095 1.394 skrll 4096 1.394 skrll sc->bge_attached = true; 4097 1.1 fvdl } 4098 1.1 fvdl 4099 1.227 msaitoh /* 4100 1.227 msaitoh * Stop all chip I/O so that the kernel's probe routines don't 4101 1.227 msaitoh * get confused by errant DMAs when rebooting. 4102 1.227 msaitoh */ 4103 1.227 msaitoh static int 4104 1.227 msaitoh bge_detach(device_t self, int flags __unused) 4105 1.227 msaitoh { 4106 1.354 skrll struct bge_softc * const sc = device_private(self); 4107 1.354 skrll struct ifnet * const ifp = &sc->ethercom.ec_if; 4108 1.227 msaitoh 4109 1.394 skrll if (!sc->bge_attached) 4110 1.394 skrll return 0; 4111 1.394 skrll 4112 1.386 skrll IFNET_LOCK(ifp); 4113 1.386 skrll 4114 1.227 msaitoh /* Stop the interface. Callouts are stopped in it. */ 4115 1.227 msaitoh bge_stop(ifp, 1); 4116 1.386 skrll sc->bge_detaching = true; 4117 1.386 skrll 4118 1.386 skrll IFNET_UNLOCK(ifp); 4119 1.227 msaitoh 4120 1.227 msaitoh mii_detach(&sc->bge_mii, MII_PHY_ANY, MII_OFFSET_ANY); 4121 1.230 christos 4122 1.227 msaitoh ether_ifdetach(ifp); 4123 1.227 msaitoh if_detach(ifp); 4124 1.227 msaitoh 4125 1.344 thorpej /* Delete all remaining media. */ 4126 1.344 thorpej ifmedia_fini(&sc->bge_mii.mii_media); 4127 1.344 thorpej 4128 1.227 msaitoh bge_release_resources(sc); 4129 1.227 msaitoh 4130 1.227 msaitoh return 0; 4131 1.227 msaitoh } 4132 1.227 msaitoh 4133 1.104 thorpej static void 4134 1.104 thorpej bge_release_resources(struct bge_softc *sc) 4135 1.1 fvdl { 4136 1.1 fvdl 4137 1.301 msaitoh /* Detach sysctl */ 4138 1.301 msaitoh if (sc->bge_log != NULL) 4139 1.301 msaitoh sysctl_teardown(&sc->bge_log); 4140 1.301 msaitoh 4141 1.394 skrll callout_destroy(&sc->bge_timeout); 4142 1.394 skrll 4143 1.301 msaitoh #ifdef BGE_EVENT_COUNTERS 4144 1.301 msaitoh /* Detach event counters. */ 4145 1.301 msaitoh evcnt_detach(&sc->bge_ev_intr); 4146 1.301 msaitoh evcnt_detach(&sc->bge_ev_intr_spurious); 4147 1.301 msaitoh evcnt_detach(&sc->bge_ev_intr_spurious2); 4148 1.301 msaitoh evcnt_detach(&sc->bge_ev_tx_xoff); 4149 1.301 msaitoh evcnt_detach(&sc->bge_ev_tx_xon); 4150 1.301 msaitoh evcnt_detach(&sc->bge_ev_rx_xoff); 4151 1.301 msaitoh evcnt_detach(&sc->bge_ev_rx_xon); 4152 1.301 msaitoh evcnt_detach(&sc->bge_ev_rx_macctl); 4153 1.301 msaitoh evcnt_detach(&sc->bge_ev_xoffentered); 4154 1.301 msaitoh #endif /* BGE_EVENT_COUNTERS */ 4155 1.301 msaitoh 4156 1.227 msaitoh /* Disestablish the interrupt handler */ 4157 1.227 msaitoh if (sc->bge_intrhand != NULL) { 4158 1.227 msaitoh pci_intr_disestablish(sc->sc_pc, sc->bge_intrhand); 4159 1.290 msaitoh pci_intr_release(sc->sc_pc, sc->bge_pihp, 1); 4160 1.227 msaitoh sc->bge_intrhand = NULL; 4161 1.227 msaitoh } 4162 1.227 msaitoh 4163 1.373 skrll if (sc->bge_cdata.bge_jumbo_buf != NULL) 4164 1.373 skrll bge_free_jumbo_mem(sc); 4165 1.373 skrll 4166 1.239 msaitoh if (sc->bge_dmatag != NULL) { 4167 1.239 msaitoh bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map); 4168 1.239 msaitoh bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 4169 1.239 msaitoh bus_dmamem_unmap(sc->bge_dmatag, (void *)sc->bge_rdata, 4170 1.239 msaitoh sizeof(struct bge_ring_data)); 4171 1.294 msaitoh bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, 4172 1.294 msaitoh sc->bge_ring_rseg); 4173 1.239 msaitoh } 4174 1.227 msaitoh 4175 1.227 msaitoh /* Unmap the device registers */ 4176 1.227 msaitoh if (sc->bge_bsize != 0) { 4177 1.227 msaitoh bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize); 4178 1.227 msaitoh sc->bge_bsize = 0; 4179 1.227 msaitoh } 4180 1.227 msaitoh 4181 1.227 msaitoh /* Unmap the APE registers */ 4182 1.227 msaitoh if (sc->bge_apesize != 0) { 4183 1.227 msaitoh bus_space_unmap(sc->bge_apetag, sc->bge_apehandle, 4184 1.227 msaitoh sc->bge_apesize); 4185 1.227 msaitoh sc->bge_apesize = 0; 4186 1.227 msaitoh } 4187 1.395 skrll if (sc->sc_intr_lock) { 4188 1.395 skrll mutex_obj_free(sc->sc_intr_lock); 4189 1.395 skrll sc->sc_intr_lock = NULL; 4190 1.395 skrll } 4191 1.395 skrll if (sc->sc_mcast_lock) { 4192 1.395 skrll mutex_obj_free(sc->sc_mcast_lock); 4193 1.395 skrll sc->sc_mcast_lock = NULL; 4194 1.395 skrll } 4195 1.1 fvdl } 4196 1.1 fvdl 4197 1.177 msaitoh static int 4198 1.104 thorpej bge_reset(struct bge_softc *sc) 4199 1.1 fvdl { 4200 1.216 msaitoh uint32_t cachesize, command; 4201 1.216 msaitoh uint32_t reset, mac_mode, mac_mode_mask; 4202 1.180 msaitoh pcireg_t devctl, reg; 4203 1.76 cube int i, val; 4204 1.151 cegger void (*write_op)(struct bge_softc *, int, int); 4205 1.151 cegger 4206 1.253 msaitoh /* Make mask for BGE_MAC_MODE register. */ 4207 1.216 msaitoh mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE; 4208 1.216 msaitoh if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 4209 1.216 msaitoh mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 4210 1.253 msaitoh /* Keep mac_mode_mask's bits of BGE_MAC_MODE register into mac_mode */ 4211 1.253 msaitoh mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask; 4212 1.330 msaitoh 4213 1.216 msaitoh if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) && 4214 1.216 msaitoh (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)) { 4215 1.330 msaitoh if (sc->bge_flags & BGEF_PCIE) 4216 1.151 cegger write_op = bge_writemem_direct; 4217 1.178 msaitoh else 4218 1.151 cegger write_op = bge_writemem_ind; 4219 1.178 msaitoh } else 4220 1.151 cegger write_op = bge_writereg_ind; 4221 1.1 fvdl 4222 1.236 msaitoh /* 57XX step 4 */ 4223 1.236 msaitoh /* Acquire the NVM lock */ 4224 1.261 msaitoh if ((sc->bge_flags & BGEF_NO_EEPROM) == 0 && 4225 1.232 msaitoh BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5700 && 4226 1.216 msaitoh BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5701) { 4227 1.216 msaitoh CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 4228 1.216 msaitoh for (i = 0; i < 8000; i++) { 4229 1.216 msaitoh if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & 4230 1.216 msaitoh BGE_NVRAMSWARB_GNT1) 4231 1.216 msaitoh break; 4232 1.216 msaitoh DELAY(20); 4233 1.216 msaitoh } 4234 1.216 msaitoh if (i == 8000) { 4235 1.216 msaitoh printf("%s: NVRAM lock timedout!\n", 4236 1.216 msaitoh device_xname(sc->bge_dev)); 4237 1.216 msaitoh } 4238 1.216 msaitoh } 4239 1.243 msaitoh 4240 1.216 msaitoh /* Take APE lock when performing reset. */ 4241 1.216 msaitoh bge_ape_lock(sc, BGE_APE_LOCK_GRC); 4242 1.216 msaitoh 4243 1.236 msaitoh /* 57XX step 3 */ 4244 1.1 fvdl /* Save some important PCI state. */ 4245 1.141 jmcneill cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ); 4246 1.236 msaitoh /* 5718 reset step 3 */ 4247 1.141 jmcneill command = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD); 4248 1.180 msaitoh 4249 1.236 msaitoh /* 5718 reset step 5, 57XX step 5b-5d */ 4250 1.141 jmcneill pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 4251 1.172 msaitoh BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 4252 1.172 msaitoh BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW); 4253 1.1 fvdl 4254 1.180 msaitoh /* XXX ???: Disable fastboot on controllers that support it. */ 4255 1.134 markd if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || 4256 1.172 msaitoh BGE_IS_5755_PLUS(sc)) 4257 1.119 tsutsui CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0); 4258 1.119 tsutsui 4259 1.236 msaitoh /* 5718 reset step 2, 57XX step 6 */ 4260 1.177 msaitoh /* 4261 1.236 msaitoh * Write the magic number to SRAM at offset 0xB50. 4262 1.177 msaitoh * When firmware finishes its initialization it will 4263 1.177 msaitoh * write ~BGE_MAGIC_NUMBER to the same location. 4264 1.177 msaitoh */ 4265 1.216 msaitoh bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); 4266 1.177 msaitoh 4267 1.304 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) { 4268 1.304 msaitoh val = CSR_READ_4(sc, BGE_PCIE_LINKCTL); 4269 1.304 msaitoh val = (val & ~BGE_PCIE_LINKCTL_L1_PLL_PDEN) 4270 1.304 msaitoh | BGE_PCIE_LINKCTL_L1_PLL_PDDIS; 4271 1.304 msaitoh CSR_WRITE_4(sc, BGE_PCIE_LINKCTL, val); 4272 1.304 msaitoh } 4273 1.304 msaitoh 4274 1.236 msaitoh /* 5718 reset step 6, 57XX step 7 */ 4275 1.216 msaitoh reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ; 4276 1.76 cube /* 4277 1.76 cube * XXX: from FreeBSD/Linux; no documentation 4278 1.76 cube */ 4279 1.261 msaitoh if (sc->bge_flags & BGEF_PCIE) { 4280 1.278 msaitoh if ((BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) && 4281 1.214 msaitoh !BGE_IS_57765_PLUS(sc) && 4282 1.216 msaitoh (CSR_READ_4(sc, BGE_PHY_TEST_CTRL_REG) == 4283 1.214 msaitoh (BGE_PHY_PCIE_LTASS_MODE | BGE_PHY_PCIE_SCRAM_MODE))) { 4284 1.157 msaitoh /* PCI Express 1.0 system */ 4285 1.214 msaitoh CSR_WRITE_4(sc, BGE_PHY_TEST_CTRL_REG, 4286 1.214 msaitoh BGE_PHY_PCIE_SCRAM_MODE); 4287 1.214 msaitoh } 4288 1.76 cube if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 4289 1.157 msaitoh /* 4290 1.157 msaitoh * Prevent PCI Express link training 4291 1.157 msaitoh * during global reset. 4292 1.157 msaitoh */ 4293 1.76 cube CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 4294 1.222 msaitoh reset |= (1 << 29); 4295 1.76 cube } 4296 1.76 cube } 4297 1.76 cube 4298 1.180 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 4299 1.180 msaitoh i = CSR_READ_4(sc, BGE_VCPU_STATUS); 4300 1.180 msaitoh CSR_WRITE_4(sc, BGE_VCPU_STATUS, 4301 1.180 msaitoh i | BGE_VCPU_STATUS_DRV_RESET); 4302 1.180 msaitoh i = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); 4303 1.180 msaitoh CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, 4304 1.180 msaitoh i & ~BGE_VCPU_EXT_CTRL_HALT_CPU); 4305 1.180 msaitoh } 4306 1.180 msaitoh 4307 1.161 msaitoh /* 4308 1.161 msaitoh * Set GPHY Power Down Override to leave GPHY 4309 1.161 msaitoh * powered up in D0 uninitialized. 4310 1.161 msaitoh */ 4311 1.216 msaitoh if (BGE_IS_5705_PLUS(sc) && 4312 1.261 msaitoh (sc->bge_flags & BGEF_CPMU_PRESENT) == 0) 4313 1.216 msaitoh reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE; 4314 1.161 msaitoh 4315 1.1 fvdl /* Issue global reset */ 4316 1.216 msaitoh write_op(sc, BGE_MISC_CFG, reset); 4317 1.151 cegger 4318 1.236 msaitoh /* 5718 reset step 7, 57XX step 8 */ 4319 1.261 msaitoh if (sc->bge_flags & BGEF_PCIE) 4320 1.180 msaitoh delay(100*1000); /* too big */ 4321 1.180 msaitoh else 4322 1.216 msaitoh delay(1000); 4323 1.151 cegger 4324 1.261 msaitoh if (sc->bge_flags & BGEF_PCIE) { 4325 1.76 cube if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 4326 1.76 cube DELAY(500000); 4327 1.76 cube /* XXX: Magic Numbers */ 4328 1.170 msaitoh reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 4329 1.170 msaitoh BGE_PCI_UNKNOWN0); 4330 1.170 msaitoh pci_conf_write(sc->sc_pc, sc->sc_pcitag, 4331 1.170 msaitoh BGE_PCI_UNKNOWN0, 4332 1.76 cube reg | (1 << 15)); 4333 1.76 cube } 4334 1.177 msaitoh devctl = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 4335 1.238 msaitoh sc->bge_pciecap + PCIE_DCSR); 4336 1.177 msaitoh /* Clear enable no snoop and disable relaxed ordering. */ 4337 1.238 msaitoh devctl &= ~(PCIE_DCSR_ENA_RELAX_ORD | 4338 1.238 msaitoh PCIE_DCSR_ENA_NO_SNOOP); 4339 1.216 msaitoh 4340 1.216 msaitoh /* Set PCIE max payload size to 128 for older PCIe devices */ 4341 1.261 msaitoh if ((sc->bge_flags & BGEF_CPMU_PRESENT) == 0) 4342 1.216 msaitoh devctl &= ~(0x00e0); 4343 1.179 msaitoh /* Clear device status register. Write 1b to clear */ 4344 1.238 msaitoh devctl |= PCIE_DCSR_URD | PCIE_DCSR_FED 4345 1.238 msaitoh | PCIE_DCSR_NFED | PCIE_DCSR_CED; 4346 1.177 msaitoh pci_conf_write(sc->sc_pc, sc->sc_pcitag, 4347 1.238 msaitoh sc->bge_pciecap + PCIE_DCSR, devctl); 4348 1.216 msaitoh bge_set_max_readrq(sc); 4349 1.216 msaitoh } 4350 1.216 msaitoh 4351 1.216 msaitoh /* From Linux: dummy read to flush PCI posted writes */ 4352 1.216 msaitoh reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD); 4353 1.216 msaitoh 4354 1.236 msaitoh /* 4355 1.236 msaitoh * Reset some of the PCI state that got zapped by reset 4356 1.236 msaitoh * To modify the PCISTATE register, BGE_PCIMISCCTL_PCISTATE_RW must be 4357 1.236 msaitoh * set, too. 4358 1.236 msaitoh */ 4359 1.216 msaitoh pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 4360 1.216 msaitoh BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 4361 1.216 msaitoh BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW); 4362 1.216 msaitoh val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE; 4363 1.216 msaitoh if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 && 4364 1.261 msaitoh (sc->bge_flags & BGEF_PCIX) != 0) 4365 1.216 msaitoh val |= BGE_PCISTATE_RETRY_SAME_DMA; 4366 1.216 msaitoh if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 4367 1.216 msaitoh val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 4368 1.216 msaitoh BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 4369 1.216 msaitoh BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 4370 1.216 msaitoh pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE, val); 4371 1.216 msaitoh pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ, cachesize); 4372 1.216 msaitoh pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, command); 4373 1.216 msaitoh 4374 1.260 msaitoh /* 57xx step 11: disable PCI-X Relaxed Ordering. */ 4375 1.261 msaitoh if (sc->bge_flags & BGEF_PCIX) { 4376 1.216 msaitoh reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap 4377 1.238 msaitoh + PCIX_CMD); 4378 1.260 msaitoh /* Set max memory read byte count to 2K */ 4379 1.260 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) { 4380 1.260 msaitoh reg &= ~PCIX_CMD_BYTECNT_MASK; 4381 1.260 msaitoh reg |= PCIX_CMD_BCNT_2048; 4382 1.260 msaitoh } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704){ 4383 1.260 msaitoh /* 4384 1.260 msaitoh * For 5704, set max outstanding split transaction 4385 1.260 msaitoh * field to 0 (0 means it supports 1 request) 4386 1.260 msaitoh */ 4387 1.260 msaitoh reg &= ~(PCIX_CMD_SPLTRANS_MASK 4388 1.260 msaitoh | PCIX_CMD_BYTECNT_MASK); 4389 1.260 msaitoh reg |= PCIX_CMD_BCNT_2048; 4390 1.260 msaitoh } 4391 1.216 msaitoh pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap 4392 1.238 msaitoh + PCIX_CMD, reg & ~PCIX_CMD_RELAXED_ORDER); 4393 1.76 cube } 4394 1.76 cube 4395 1.236 msaitoh /* 5718 reset step 10, 57XX step 12 */ 4396 1.236 msaitoh /* Enable memory arbiter. */ 4397 1.216 msaitoh if (BGE_IS_5714_FAMILY(sc)) { 4398 1.216 msaitoh val = CSR_READ_4(sc, BGE_MARB_MODE); 4399 1.216 msaitoh CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); 4400 1.216 msaitoh } else 4401 1.216 msaitoh CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 4402 1.1 fvdl 4403 1.180 msaitoh /* XXX 5721, 5751 and 5752 */ 4404 1.180 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750) { 4405 1.180 msaitoh /* Step 19: */ 4406 1.180 msaitoh BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, 1 << 29 | 1 << 25); 4407 1.180 msaitoh /* Step 20: */ 4408 1.180 msaitoh BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, BGE_TLP_DATA_FIFO_PROTECT); 4409 1.44 hannken } 4410 1.1 fvdl 4411 1.274 msaitoh /* 5718 reset step 12, 57XX step 15 and 16 */ 4412 1.274 msaitoh /* Fix up byte swapping */ 4413 1.274 msaitoh CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS); 4414 1.274 msaitoh 4415 1.253 msaitoh /* 5718 reset step 13, 57XX step 17 */ 4416 1.252 msaitoh /* Poll until the firmware initialization is complete */ 4417 1.252 msaitoh bge_poll_fw(sc); 4418 1.252 msaitoh 4419 1.236 msaitoh /* 57XX step 21 */ 4420 1.181 msaitoh if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_BX) { 4421 1.181 msaitoh pcireg_t msidata; 4422 1.330 msaitoh 4423 1.181 msaitoh msidata = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 4424 1.181 msaitoh BGE_PCI_MSI_DATA); 4425 1.181 msaitoh msidata |= ((1 << 13 | 1 << 12 | 1 << 10) << 16); 4426 1.181 msaitoh pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MSI_DATA, 4427 1.181 msaitoh msidata); 4428 1.181 msaitoh } 4429 1.151 cegger 4430 1.236 msaitoh /* 57XX step 18 */ 4431 1.253 msaitoh /* Write mac mode. */ 4432 1.216 msaitoh val = CSR_READ_4(sc, BGE_MAC_MODE); 4433 1.253 msaitoh /* Restore mac_mode_mask's bits using mac_mode */ 4434 1.216 msaitoh val = (val & ~mac_mode_mask) | mac_mode; 4435 1.216 msaitoh CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, val); 4436 1.216 msaitoh DELAY(40); 4437 1.1 fvdl 4438 1.216 msaitoh bge_ape_unlock(sc, BGE_APE_LOCK_GRC); 4439 1.1 fvdl 4440 1.161 msaitoh /* 4441 1.161 msaitoh * The 5704 in TBI mode apparently needs some special 4442 1.161 msaitoh * adjustment to insure the SERDES drive level is set 4443 1.161 msaitoh * to 1.2V. 4444 1.161 msaitoh */ 4445 1.261 msaitoh if (sc->bge_flags & BGEF_FIBER_TBI && 4446 1.161 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 4447 1.170 msaitoh uint32_t serdescfg; 4448 1.161 msaitoh 4449 1.161 msaitoh serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 4450 1.161 msaitoh serdescfg = (serdescfg & ~0xFFF) | 0x880; 4451 1.161 msaitoh CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 4452 1.161 msaitoh } 4453 1.161 msaitoh 4454 1.261 msaitoh if (sc->bge_flags & BGEF_PCIE && 4455 1.214 msaitoh !BGE_IS_57765_PLUS(sc) && 4456 1.172 msaitoh sc->bge_chipid != BGE_CHIPID_BCM5750_A0 && 4457 1.214 msaitoh BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) { 4458 1.172 msaitoh uint32_t v; 4459 1.172 msaitoh 4460 1.172 msaitoh /* Enable PCI Express bug fix */ 4461 1.217 msaitoh v = CSR_READ_4(sc, BGE_TLP_CONTROL_REG); 4462 1.217 msaitoh CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG, 4463 1.217 msaitoh v | BGE_TLP_DATA_FIFO_PROTECT); 4464 1.172 msaitoh } 4465 1.216 msaitoh 4466 1.216 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 4467 1.216 msaitoh BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE, 4468 1.216 msaitoh CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 4469 1.177 msaitoh 4470 1.177 msaitoh return 0; 4471 1.1 fvdl } 4472 1.1 fvdl 4473 1.1 fvdl /* 4474 1.1 fvdl * Frame reception handling. This is called if there's a frame 4475 1.1 fvdl * on the receive return list. 4476 1.1 fvdl * 4477 1.1 fvdl * Note: we have to be able to handle two possibilities here: 4478 1.184 njoly * 1) the frame is from the jumbo receive ring 4479 1.1 fvdl * 2) the frame is from the standard receive ring 4480 1.1 fvdl */ 4481 1.1 fvdl 4482 1.104 thorpej static void 4483 1.104 thorpej bge_rxeof(struct bge_softc *sc) 4484 1.1 fvdl { 4485 1.358 skrll struct ifnet * const ifp = &sc->ethercom.ec_if; 4486 1.172 msaitoh uint16_t rx_prod, rx_cons; 4487 1.1 fvdl int stdcnt = 0, jumbocnt = 0; 4488 1.1 fvdl bus_dmamap_t dmamap; 4489 1.1 fvdl bus_addr_t offset, toff; 4490 1.1 fvdl bus_size_t tlen; 4491 1.1 fvdl int tosync; 4492 1.1 fvdl 4493 1.394 skrll KASSERT(mutex_owned(sc->sc_intr_lock)); 4494 1.394 skrll 4495 1.363 skrll bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4496 1.363 skrll offsetof(struct bge_ring_data, bge_status_block), 4497 1.364 skrll sizeof(struct bge_status_block), 4498 1.363 skrll BUS_DMASYNC_POSTREAD); 4499 1.363 skrll 4500 1.172 msaitoh rx_cons = sc->bge_rx_saved_considx; 4501 1.172 msaitoh rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx; 4502 1.172 msaitoh 4503 1.172 msaitoh /* Nothing to do */ 4504 1.172 msaitoh if (rx_cons == rx_prod) 4505 1.172 msaitoh return; 4506 1.172 msaitoh 4507 1.1 fvdl offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 4508 1.172 msaitoh tosync = rx_prod - rx_cons; 4509 1.1 fvdl 4510 1.200 tls if (tosync != 0) 4511 1.148 mlelstv rnd_add_uint32(&sc->rnd_source, tosync); 4512 1.148 mlelstv 4513 1.364 skrll toff = offset + (rx_cons * sizeof(struct bge_rx_bd)); 4514 1.1 fvdl 4515 1.1 fvdl if (tosync < 0) { 4516 1.172 msaitoh tlen = (sc->bge_return_ring_cnt - rx_cons) * 4517 1.364 skrll sizeof(struct bge_rx_bd); 4518 1.1 fvdl bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4519 1.1 fvdl toff, tlen, BUS_DMASYNC_POSTREAD); 4520 1.374 skrll tosync = rx_prod; 4521 1.374 skrll toff = offset; 4522 1.1 fvdl } 4523 1.1 fvdl 4524 1.347 jmcneill if (tosync != 0) { 4525 1.347 jmcneill bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4526 1.374 skrll toff, tosync * sizeof(struct bge_rx_bd), 4527 1.347 jmcneill BUS_DMASYNC_POSTREAD); 4528 1.347 jmcneill } 4529 1.1 fvdl 4530 1.172 msaitoh while (rx_cons != rx_prod) { 4531 1.1 fvdl struct bge_rx_bd *cur_rx; 4532 1.170 msaitoh uint32_t rxidx; 4533 1.1 fvdl struct mbuf *m = NULL; 4534 1.1 fvdl 4535 1.172 msaitoh cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons]; 4536 1.1 fvdl 4537 1.1 fvdl rxidx = cur_rx->bge_idx; 4538 1.172 msaitoh BGE_INC(rx_cons, sc->bge_return_ring_cnt); 4539 1.1 fvdl 4540 1.1 fvdl if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 4541 1.1 fvdl BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 4542 1.1 fvdl m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 4543 1.1 fvdl sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 4544 1.1 fvdl jumbocnt++; 4545 1.124 bouyer bus_dmamap_sync(sc->bge_dmatag, 4546 1.124 bouyer sc->bge_cdata.bge_rx_jumbo_map, 4547 1.126 christos mtod(m, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, 4548 1.125 bouyer BGE_JLEN, BUS_DMASYNC_POSTREAD); 4549 1.1 fvdl if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 4550 1.343 thorpej if_statinc(ifp, if_ierrors); 4551 1.1 fvdl bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 4552 1.1 fvdl continue; 4553 1.1 fvdl } 4554 1.1 fvdl if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 4555 1.367 skrll NULL) == ENOBUFS) { 4556 1.343 thorpej if_statinc(ifp, if_ierrors); 4557 1.1 fvdl bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 4558 1.1 fvdl continue; 4559 1.1 fvdl } 4560 1.1 fvdl } else { 4561 1.1 fvdl m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 4562 1.376 skrll sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 4563 1.124 bouyer 4564 1.1 fvdl stdcnt++; 4565 1.376 skrll sc->bge_std_cnt--; 4566 1.376 skrll 4567 1.1 fvdl dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 4568 1.125 bouyer bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, 4569 1.125 bouyer dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 4570 1.125 bouyer bus_dmamap_unload(sc->bge_dmatag, dmamap); 4571 1.376 skrll 4572 1.1 fvdl if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 4573 1.376 skrll m_free(m); 4574 1.343 thorpej if_statinc(ifp, if_ierrors); 4575 1.1 fvdl continue; 4576 1.1 fvdl } 4577 1.1 fvdl } 4578 1.1 fvdl 4579 1.37 jonathan #ifndef __NO_STRICT_ALIGNMENT 4580 1.178 msaitoh /* 4581 1.178 msaitoh * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect, 4582 1.178 msaitoh * the Rx buffer has the layer-2 header unaligned. 4583 1.178 msaitoh * If our CPU requires alignment, re-align by copying. 4584 1.178 msaitoh */ 4585 1.261 msaitoh if (sc->bge_flags & BGEF_RX_ALIGNBUG) { 4586 1.127 tsutsui memmove(mtod(m, char *) + ETHER_ALIGN, m->m_data, 4587 1.178 msaitoh cur_rx->bge_len); 4588 1.37 jonathan m->m_data += ETHER_ALIGN; 4589 1.37 jonathan } 4590 1.37 jonathan #endif 4591 1.87 perry 4592 1.54 fvdl m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 4593 1.297 ozaki m_set_rcvif(m, ifp); 4594 1.1 fvdl 4595 1.219 msaitoh bge_rxcsum(sc, cur_rx, m); 4596 1.219 msaitoh 4597 1.219 msaitoh /* 4598 1.219 msaitoh * If we received a packet with a vlan tag, pass it 4599 1.219 msaitoh * to vlan_input() instead of ether_input(). 4600 1.219 msaitoh */ 4601 1.332 msaitoh if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) 4602 1.313 msaitoh vlan_set_tag(m, cur_rx->bge_vlan_tag); 4603 1.219 msaitoh 4604 1.295 ozaki if_percpuq_enqueue(ifp->if_percpuq, m); 4605 1.219 msaitoh } 4606 1.219 msaitoh 4607 1.219 msaitoh sc->bge_rx_saved_considx = rx_cons; 4608 1.219 msaitoh bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 4609 1.219 msaitoh if (stdcnt) 4610 1.376 skrll bge_fill_rx_ring_std(sc); 4611 1.219 msaitoh if (jumbocnt) 4612 1.219 msaitoh bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 4613 1.219 msaitoh } 4614 1.219 msaitoh 4615 1.219 msaitoh static void 4616 1.219 msaitoh bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m) 4617 1.219 msaitoh { 4618 1.46 jonathan 4619 1.257 msaitoh if (BGE_IS_57765_PLUS(sc)) { 4620 1.219 msaitoh if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) { 4621 1.219 msaitoh if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) != 0) 4622 1.219 msaitoh m->m_pkthdr.csum_flags = M_CSUM_IPv4; 4623 1.216 msaitoh if ((cur_rx->bge_error_flag & 4624 1.216 msaitoh BGE_RXERRFLAG_IP_CSUM_NOK) != 0) 4625 1.216 msaitoh m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 4626 1.219 msaitoh if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 4627 1.219 msaitoh m->m_pkthdr.csum_data = 4628 1.219 msaitoh cur_rx->bge_tcp_udp_csum; 4629 1.219 msaitoh m->m_pkthdr.csum_flags |= 4630 1.331 msaitoh (M_CSUM_TCPv4 | M_CSUM_UDPv4 |M_CSUM_DATA); 4631 1.219 msaitoh } 4632 1.216 msaitoh } 4633 1.219 msaitoh } else { 4634 1.219 msaitoh if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) != 0) 4635 1.219 msaitoh m->m_pkthdr.csum_flags = M_CSUM_IPv4; 4636 1.219 msaitoh if ((cur_rx->bge_ip_csum ^ 0xffff) != 0) 4637 1.219 msaitoh m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 4638 1.46 jonathan /* 4639 1.46 jonathan * Rx transport checksum-offload may also 4640 1.46 jonathan * have bugs with packets which, when transmitted, 4641 1.46 jonathan * were `runts' requiring padding. 4642 1.46 jonathan */ 4643 1.46 jonathan if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 4644 1.46 jonathan (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/ 4645 1.219 msaitoh m->m_pkthdr.len >= ETHER_MIN_NOPAD)) { 4646 1.46 jonathan m->m_pkthdr.csum_data = 4647 1.46 jonathan cur_rx->bge_tcp_udp_csum; 4648 1.46 jonathan m->m_pkthdr.csum_flags |= 4649 1.331 msaitoh (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_DATA); 4650 1.1 fvdl } 4651 1.1 fvdl } 4652 1.1 fvdl } 4653 1.1 fvdl 4654 1.104 thorpej static void 4655 1.104 thorpej bge_txeof(struct bge_softc *sc) 4656 1.1 fvdl { 4657 1.358 skrll struct ifnet * const ifp = &sc->ethercom.ec_if; 4658 1.1 fvdl struct bge_tx_bd *cur_tx = NULL; 4659 1.1 fvdl struct txdmamap_pool_entry *dma; 4660 1.1 fvdl bus_addr_t offset, toff; 4661 1.1 fvdl bus_size_t tlen; 4662 1.1 fvdl int tosync; 4663 1.1 fvdl struct mbuf *m; 4664 1.1 fvdl 4665 1.394 skrll KASSERT(mutex_owned(sc->sc_intr_lock)); 4666 1.394 skrll 4667 1.1 fvdl bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4668 1.1 fvdl offsetof(struct bge_ring_data, bge_status_block), 4669 1.364 skrll sizeof(struct bge_status_block), 4670 1.1 fvdl BUS_DMASYNC_POSTREAD); 4671 1.1 fvdl 4672 1.374 skrll const uint16_t hw_cons_idx = 4673 1.374 skrll sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx; 4674 1.1 fvdl offset = offsetof(struct bge_ring_data, bge_tx_ring); 4675 1.374 skrll tosync = hw_cons_idx - sc->bge_tx_saved_considx; 4676 1.1 fvdl 4677 1.200 tls if (tosync != 0) 4678 1.148 mlelstv rnd_add_uint32(&sc->rnd_source, tosync); 4679 1.148 mlelstv 4680 1.364 skrll toff = offset + (sc->bge_tx_saved_considx * sizeof(struct bge_tx_bd)); 4681 1.1 fvdl 4682 1.1 fvdl if (tosync < 0) { 4683 1.1 fvdl tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) * 4684 1.364 skrll sizeof(struct bge_tx_bd); 4685 1.1 fvdl bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4686 1.331 msaitoh toff, tlen, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 4687 1.374 skrll tosync = hw_cons_idx; 4688 1.374 skrll toff = offset; 4689 1.1 fvdl } 4690 1.1 fvdl 4691 1.347 jmcneill if (tosync != 0) { 4692 1.347 jmcneill bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4693 1.374 skrll toff, tosync * sizeof(struct bge_tx_bd), 4694 1.347 jmcneill BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 4695 1.347 jmcneill } 4696 1.1 fvdl 4697 1.1 fvdl /* 4698 1.1 fvdl * Go through our tx ring and free mbufs for those 4699 1.1 fvdl * frames that have been sent. 4700 1.1 fvdl */ 4701 1.374 skrll while (sc->bge_tx_saved_considx != hw_cons_idx) { 4702 1.359 skrll uint32_t idx = sc->bge_tx_saved_considx; 4703 1.1 fvdl cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 4704 1.1 fvdl if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 4705 1.343 thorpej if_statinc(ifp, if_opackets); 4706 1.1 fvdl m = sc->bge_cdata.bge_tx_chain[idx]; 4707 1.1 fvdl if (m != NULL) { 4708 1.1 fvdl sc->bge_cdata.bge_tx_chain[idx] = NULL; 4709 1.1 fvdl dma = sc->txdma[idx]; 4710 1.317 bouyer if (dma->is_dma32) { 4711 1.317 bouyer bus_dmamap_sync(sc->bge_dmatag32, dma->dmamap32, 4712 1.317 bouyer 0, dma->dmamap32->dm_mapsize, 4713 1.317 bouyer BUS_DMASYNC_POSTWRITE); 4714 1.317 bouyer bus_dmamap_unload( 4715 1.317 bouyer sc->bge_dmatag32, dma->dmamap32); 4716 1.317 bouyer } else { 4717 1.317 bouyer bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 4718 1.317 bouyer 0, dma->dmamap->dm_mapsize, 4719 1.317 bouyer BUS_DMASYNC_POSTWRITE); 4720 1.317 bouyer bus_dmamap_unload(sc->bge_dmatag, dma->dmamap); 4721 1.317 bouyer } 4722 1.1 fvdl SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 4723 1.1 fvdl sc->txdma[idx] = NULL; 4724 1.1 fvdl 4725 1.1 fvdl m_freem(m); 4726 1.1 fvdl } 4727 1.1 fvdl sc->bge_txcnt--; 4728 1.1 fvdl BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 4729 1.375 skrll sc->bge_tx_sending = false; 4730 1.1 fvdl } 4731 1.1 fvdl } 4732 1.1 fvdl 4733 1.104 thorpej static int 4734 1.104 thorpej bge_intr(void *xsc) 4735 1.1 fvdl { 4736 1.354 skrll struct bge_softc * const sc = xsc; 4737 1.354 skrll struct ifnet * const ifp = &sc->ethercom.ec_if; 4738 1.288 msaitoh uint32_t pcistate, statusword, statustag; 4739 1.247 msaitoh uint32_t intrmask = BGE_PCISTATE_INTR_NOT_ACTIVE; 4740 1.1 fvdl 4741 1.247 msaitoh /* 5717 and newer chips have no BGE_PCISTATE_INTR_NOT_ACTIVE bit */ 4742 1.247 msaitoh if (BGE_IS_5717_PLUS(sc)) 4743 1.247 msaitoh intrmask = 0; 4744 1.247 msaitoh 4745 1.386 skrll mutex_enter(sc->sc_intr_lock); 4746 1.386 skrll if (sc->bge_txrx_stopping) { 4747 1.386 skrll mutex_exit(sc->sc_intr_lock); 4748 1.386 skrll return 1; 4749 1.386 skrll } 4750 1.375 skrll 4751 1.357 skrll /* 4752 1.357 skrll * It is possible for the interrupt to arrive before 4753 1.161 msaitoh * the status block is updated prior to the interrupt. 4754 1.161 msaitoh * Reading the PCI State register will confirm whether the 4755 1.161 msaitoh * interrupt is ours and will flush the status block. 4756 1.161 msaitoh */ 4757 1.288 msaitoh pcistate = CSR_READ_4(sc, BGE_PCI_PCISTATE); 4758 1.144 mlelstv 4759 1.161 msaitoh /* read status word from status block */ 4760 1.240 msaitoh bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4761 1.240 msaitoh offsetof(struct bge_ring_data, bge_status_block), 4762 1.364 skrll sizeof(struct bge_status_block), 4763 1.240 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 4764 1.161 msaitoh statusword = sc->bge_rdata->bge_status_block.bge_status; 4765 1.288 msaitoh statustag = sc->bge_rdata->bge_status_block.bge_status_tag << 24; 4766 1.144 mlelstv 4767 1.288 msaitoh if (sc->bge_flags & BGEF_TAGGED_STATUS) { 4768 1.288 msaitoh if (sc->bge_lasttag == statustag && 4769 1.288 msaitoh (~pcistate & intrmask)) { 4770 1.306 msaitoh BGE_EVCNT_INCR(sc->bge_ev_intr_spurious); 4771 1.386 skrll mutex_exit(sc->sc_intr_lock); 4772 1.362 skrll return 0; 4773 1.288 msaitoh } 4774 1.288 msaitoh sc->bge_lasttag = statustag; 4775 1.288 msaitoh } else { 4776 1.288 msaitoh if (!(statusword & BGE_STATFLAG_UPDATED) && 4777 1.288 msaitoh !(~pcistate & intrmask)) { 4778 1.306 msaitoh BGE_EVCNT_INCR(sc->bge_ev_intr_spurious2); 4779 1.386 skrll mutex_exit(sc->sc_intr_lock); 4780 1.362 skrll return 0; 4781 1.288 msaitoh } 4782 1.288 msaitoh statustag = 0; 4783 1.288 msaitoh } 4784 1.288 msaitoh /* Ack interrupt and stop others from occurring. */ 4785 1.288 msaitoh bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 1); 4786 1.288 msaitoh BGE_EVCNT_INCR(sc->bge_ev_intr); 4787 1.144 mlelstv 4788 1.288 msaitoh /* clear status word */ 4789 1.288 msaitoh sc->bge_rdata->bge_status_block.bge_status = 0; 4790 1.1 fvdl 4791 1.288 msaitoh bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4792 1.288 msaitoh offsetof(struct bge_ring_data, bge_status_block), 4793 1.364 skrll sizeof(struct bge_status_block), 4794 1.288 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4795 1.72 thorpej 4796 1.288 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 4797 1.288 msaitoh statusword & BGE_STATFLAG_LINKSTATE_CHANGED || 4798 1.288 msaitoh BGE_STS_BIT(sc, BGE_STS_LINK_EVT)) 4799 1.288 msaitoh bge_link_upd(sc); 4800 1.1 fvdl 4801 1.386 skrll /* Check RX return ring producer/consumer */ 4802 1.386 skrll bge_rxeof(sc); 4803 1.144 mlelstv 4804 1.386 skrll /* Check TX ring producer/consumer */ 4805 1.386 skrll bge_txeof(sc); 4806 1.1 fvdl 4807 1.288 msaitoh if (sc->bge_pending_rxintr_change) { 4808 1.288 msaitoh uint32_t rx_ticks = sc->bge_rx_coal_ticks; 4809 1.288 msaitoh uint32_t rx_bds = sc->bge_rx_max_coal_bds; 4810 1.1 fvdl 4811 1.288 msaitoh CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks); 4812 1.288 msaitoh DELAY(10); 4813 1.288 msaitoh (void)CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); 4814 1.1 fvdl 4815 1.288 msaitoh CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds); 4816 1.288 msaitoh DELAY(10); 4817 1.288 msaitoh (void)CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); 4818 1.58 jonathan 4819 1.384 skrll sc->bge_pending_rxintr_change = false; 4820 1.288 msaitoh } 4821 1.288 msaitoh bge_handle_events(sc); 4822 1.87 perry 4823 1.288 msaitoh /* Re-enable interrupts. */ 4824 1.288 msaitoh bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, statustag); 4825 1.58 jonathan 4826 1.386 skrll if_schedule_deferred_start(ifp); 4827 1.1 fvdl 4828 1.386 skrll mutex_exit(sc->sc_intr_lock); 4829 1.375 skrll 4830 1.288 msaitoh return 1; 4831 1.1 fvdl } 4832 1.1 fvdl 4833 1.104 thorpej static void 4834 1.177 msaitoh bge_asf_driver_up(struct bge_softc *sc) 4835 1.177 msaitoh { 4836 1.177 msaitoh if (sc->bge_asf_mode & ASF_STACKUP) { 4837 1.382 skrll /* Send ASF heartbeat approx. every 2s */ 4838 1.177 msaitoh if (sc->bge_asf_count) 4839 1.177 msaitoh sc->bge_asf_count --; 4840 1.177 msaitoh else { 4841 1.180 msaitoh sc->bge_asf_count = 2; 4842 1.216 msaitoh 4843 1.216 msaitoh bge_wait_for_event_ack(sc); 4844 1.216 msaitoh 4845 1.216 msaitoh bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, 4846 1.285 msaitoh BGE_FW_CMD_DRV_ALIVE3); 4847 1.216 msaitoh bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4); 4848 1.216 msaitoh bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB, 4849 1.216 msaitoh BGE_FW_HB_TIMEOUT_SEC); 4850 1.216 msaitoh CSR_WRITE_4_FLUSH(sc, BGE_RX_CPU_EVENT, 4851 1.216 msaitoh CSR_READ_4(sc, BGE_RX_CPU_EVENT) | 4852 1.216 msaitoh BGE_RX_CPU_DRV_EVENT); 4853 1.177 msaitoh } 4854 1.177 msaitoh } 4855 1.177 msaitoh } 4856 1.177 msaitoh 4857 1.177 msaitoh static void 4858 1.104 thorpej bge_tick(void *xsc) 4859 1.1 fvdl { 4860 1.354 skrll struct bge_softc * const sc = xsc; 4861 1.375 skrll struct ifnet * const ifp = &sc->ethercom.ec_if; 4862 1.354 skrll struct mii_data * const mii = &sc->bge_mii; 4863 1.1 fvdl 4864 1.394 skrll mutex_enter(sc->sc_intr_lock); 4865 1.1 fvdl 4866 1.172 msaitoh if (BGE_IS_5705_PLUS(sc)) 4867 1.172 msaitoh bge_stats_update_regs(sc); 4868 1.172 msaitoh else 4869 1.172 msaitoh bge_stats_update(sc); 4870 1.1 fvdl 4871 1.261 msaitoh if (sc->bge_flags & BGEF_FIBER_TBI) { 4872 1.161 msaitoh /* 4873 1.161 msaitoh * Since in TBI mode auto-polling can't be used we should poll 4874 1.161 msaitoh * link status manually. Here we register pending link event 4875 1.161 msaitoh * and trigger interrupt. 4876 1.161 msaitoh */ 4877 1.161 msaitoh BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); 4878 1.161 msaitoh BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 4879 1.161 msaitoh } else { 4880 1.161 msaitoh /* 4881 1.161 msaitoh * Do not touch PHY if we have link up. This could break 4882 1.161 msaitoh * IPMI/ASF mode or produce extra input errors. 4883 1.161 msaitoh * (extra input errors was reported for bcm5701 & bcm5704). 4884 1.161 msaitoh */ 4885 1.386 skrll if (!BGE_STS_BIT(sc, BGE_STS_LINK)) { 4886 1.161 msaitoh mii_tick(mii); 4887 1.386 skrll } 4888 1.161 msaitoh } 4889 1.161 msaitoh 4890 1.216 msaitoh bge_asf_driver_up(sc); 4891 1.216 msaitoh 4892 1.386 skrll const bool ok = bge_watchdog_tick(ifp); 4893 1.386 skrll if (ok) 4894 1.345 thorpej callout_schedule(&sc->bge_timeout, hz); 4895 1.394 skrll mutex_exit(sc->sc_intr_lock); 4896 1.1 fvdl } 4897 1.1 fvdl 4898 1.104 thorpej static void 4899 1.172 msaitoh bge_stats_update_regs(struct bge_softc *sc) 4900 1.172 msaitoh { 4901 1.375 skrll struct ifnet * const ifp = &sc->ethercom.ec_if; 4902 1.172 msaitoh 4903 1.343 thorpej net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 4904 1.343 thorpej 4905 1.392 riastrad if_statadd_ref(ifp, nsr, if_collisions, 4906 1.343 thorpej CSR_READ_4(sc, BGE_MAC_STATS + 4907 1.343 thorpej offsetof(struct bge_mac_stats_regs, etherStatsCollisions))); 4908 1.172 msaitoh 4909 1.320 bouyer /* 4910 1.320 bouyer * On BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0, 4911 1.320 bouyer * RXLP_LOCSTAT_IFIN_DROPS includes unwanted multicast frames 4912 1.320 bouyer * (silicon bug). There's no reliable workaround so just 4913 1.320 bouyer * ignore the counter 4914 1.320 bouyer */ 4915 1.320 bouyer if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 && 4916 1.328 bouyer sc->bge_chipid != BGE_CHIPID_BCM5719_A0 && 4917 1.328 bouyer sc->bge_chipid != BGE_CHIPID_BCM5720_A0) { 4918 1.392 riastrad if_statadd_ref(ifp, nsr, if_ierrors, 4919 1.343 thorpej CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS)); 4920 1.320 bouyer } 4921 1.392 riastrad if_statadd_ref(ifp, nsr, if_ierrors, 4922 1.343 thorpej CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS)); 4923 1.392 riastrad if_statadd_ref(ifp, nsr, if_ierrors, 4924 1.343 thorpej CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS)); 4925 1.343 thorpej 4926 1.343 thorpej IF_STAT_PUTREF(ifp); 4927 1.327 msaitoh 4928 1.327 msaitoh if (sc->bge_flags & BGEF_RDMA_BUG) { 4929 1.327 msaitoh uint32_t val, ucast, mcast, bcast; 4930 1.327 msaitoh 4931 1.327 msaitoh ucast = CSR_READ_4(sc, BGE_MAC_STATS + 4932 1.327 msaitoh offsetof(struct bge_mac_stats_regs, ifHCOutUcastPkts)); 4933 1.327 msaitoh mcast = CSR_READ_4(sc, BGE_MAC_STATS + 4934 1.327 msaitoh offsetof(struct bge_mac_stats_regs, ifHCOutMulticastPkts)); 4935 1.327 msaitoh bcast = CSR_READ_4(sc, BGE_MAC_STATS + 4936 1.327 msaitoh offsetof(struct bge_mac_stats_regs, ifHCOutBroadcastPkts)); 4937 1.327 msaitoh 4938 1.327 msaitoh /* 4939 1.327 msaitoh * If controller transmitted more than BGE_NUM_RDMA_CHANNELS 4940 1.327 msaitoh * frames, it's safe to disable workaround for DMA engine's 4941 1.327 msaitoh * miscalculation of TXMBUF space. 4942 1.327 msaitoh */ 4943 1.327 msaitoh if (ucast + mcast + bcast > BGE_NUM_RDMA_CHANNELS) { 4944 1.327 msaitoh val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL); 4945 1.327 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) 4946 1.327 msaitoh val &= ~BGE_RDMA_TX_LENGTH_WA_5719; 4947 1.327 msaitoh else 4948 1.327 msaitoh val &= ~BGE_RDMA_TX_LENGTH_WA_5720; 4949 1.327 msaitoh CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val); 4950 1.327 msaitoh sc->bge_flags &= ~BGEF_RDMA_BUG; 4951 1.327 msaitoh } 4952 1.327 msaitoh } 4953 1.172 msaitoh } 4954 1.172 msaitoh 4955 1.172 msaitoh static void 4956 1.104 thorpej bge_stats_update(struct bge_softc *sc) 4957 1.1 fvdl { 4958 1.354 skrll struct ifnet * const ifp = &sc->ethercom.ec_if; 4959 1.1 fvdl bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 4960 1.44 hannken 4961 1.1 fvdl #define READ_STAT(sc, stats, stat) \ 4962 1.1 fvdl CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 4963 1.1 fvdl 4964 1.343 thorpej uint64_t collisions = 4965 1.1 fvdl (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + 4966 1.1 fvdl READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + 4967 1.1 fvdl READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + 4968 1.343 thorpej READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)); 4969 1.343 thorpej 4970 1.343 thorpej if_statadd(ifp, if_collisions, collisions - sc->bge_if_collisions); 4971 1.343 thorpej sc->bge_if_collisions = collisions; 4972 1.343 thorpej 4973 1.1 fvdl 4974 1.72 thorpej BGE_EVCNT_UPD(sc->bge_ev_tx_xoff, 4975 1.72 thorpej READ_STAT(sc, stats, outXoffSent.bge_addr_lo)); 4976 1.72 thorpej BGE_EVCNT_UPD(sc->bge_ev_tx_xon, 4977 1.72 thorpej READ_STAT(sc, stats, outXonSent.bge_addr_lo)); 4978 1.72 thorpej BGE_EVCNT_UPD(sc->bge_ev_rx_xoff, 4979 1.72 thorpej READ_STAT(sc, stats, 4980 1.330 msaitoh xoffPauseFramesReceived.bge_addr_lo)); 4981 1.72 thorpej BGE_EVCNT_UPD(sc->bge_ev_rx_xon, 4982 1.72 thorpej READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo)); 4983 1.72 thorpej BGE_EVCNT_UPD(sc->bge_ev_rx_macctl, 4984 1.72 thorpej READ_STAT(sc, stats, 4985 1.330 msaitoh macControlFramesReceived.bge_addr_lo)); 4986 1.72 thorpej BGE_EVCNT_UPD(sc->bge_ev_xoffentered, 4987 1.72 thorpej READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo)); 4988 1.72 thorpej 4989 1.1 fvdl #undef READ_STAT 4990 1.1 fvdl } 4991 1.1 fvdl 4992 1.46 jonathan /* 4993 1.46 jonathan * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 4994 1.46 jonathan * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 4995 1.46 jonathan * but when such padded frames employ the bge IP/TCP checksum offload, 4996 1.46 jonathan * the hardware checksum assist gives incorrect results (possibly 4997 1.46 jonathan * from incorporating its own padding into the UDP/TCP checksum; who knows). 4998 1.46 jonathan * If we pad such runts with zeros, the onboard checksum comes out correct. 4999 1.46 jonathan */ 5000 1.102 perry static inline int 5001 1.46 jonathan bge_cksum_pad(struct mbuf *pkt) 5002 1.46 jonathan { 5003 1.46 jonathan struct mbuf *last = NULL; 5004 1.46 jonathan int padlen; 5005 1.46 jonathan 5006 1.46 jonathan padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len; 5007 1.46 jonathan 5008 1.46 jonathan /* if there's only the packet-header and we can pad there, use it. */ 5009 1.46 jonathan if (pkt->m_pkthdr.len == pkt->m_len && 5010 1.113 tsutsui M_TRAILINGSPACE(pkt) >= padlen) { 5011 1.46 jonathan last = pkt; 5012 1.46 jonathan } else { 5013 1.46 jonathan /* 5014 1.46 jonathan * Walk packet chain to find last mbuf. We will either 5015 1.87 perry * pad there, or append a new mbuf and pad it 5016 1.46 jonathan * (thus perhaps avoiding the bcm5700 dma-min bug). 5017 1.46 jonathan */ 5018 1.46 jonathan for (last = pkt; last->m_next != NULL; last = last->m_next) { 5019 1.367 skrll continue; /* do nothing */ 5020 1.46 jonathan } 5021 1.46 jonathan 5022 1.46 jonathan /* `last' now points to last in chain. */ 5023 1.114 tsutsui if (M_TRAILINGSPACE(last) < padlen) { 5024 1.46 jonathan /* Allocate new empty mbuf, pad it. Compact later. */ 5025 1.46 jonathan struct mbuf *n; 5026 1.46 jonathan MGET(n, M_DONTWAIT, MT_DATA); 5027 1.129 joerg if (n == NULL) 5028 1.129 joerg return ENOBUFS; 5029 1.397 mlelstv MCLAIM(n, last->m_owner); 5030 1.46 jonathan n->m_len = 0; 5031 1.46 jonathan last->m_next = n; 5032 1.46 jonathan last = n; 5033 1.46 jonathan } 5034 1.46 jonathan } 5035 1.46 jonathan 5036 1.114 tsutsui KDASSERT(!M_READONLY(last)); 5037 1.114 tsutsui KDASSERT(M_TRAILINGSPACE(last) >= padlen); 5038 1.114 tsutsui 5039 1.46 jonathan /* Now zero the pad area, to avoid the bge cksum-assist bug */ 5040 1.126 christos memset(mtod(last, char *) + last->m_len, 0, padlen); 5041 1.46 jonathan last->m_len += padlen; 5042 1.46 jonathan pkt->m_pkthdr.len += padlen; 5043 1.46 jonathan return 0; 5044 1.46 jonathan } 5045 1.45 jonathan 5046 1.45 jonathan /* 5047 1.45 jonathan * Compact outbound packets to avoid bug with DMA segments less than 8 bytes. 5048 1.45 jonathan */ 5049 1.102 perry static inline int 5050 1.45 jonathan bge_compact_dma_runt(struct mbuf *pkt) 5051 1.45 jonathan { 5052 1.45 jonathan struct mbuf *m, *prev; 5053 1.330 msaitoh int totlen; 5054 1.45 jonathan 5055 1.45 jonathan prev = NULL; 5056 1.45 jonathan totlen = 0; 5057 1.45 jonathan 5058 1.331 msaitoh for (m = pkt; m != NULL; prev = m, m = m->m_next) { 5059 1.45 jonathan int mlen = m->m_len; 5060 1.45 jonathan int shortfall = 8 - mlen ; 5061 1.45 jonathan 5062 1.45 jonathan totlen += mlen; 5063 1.203 msaitoh if (mlen == 0) 5064 1.45 jonathan continue; 5065 1.45 jonathan if (mlen >= 8) 5066 1.45 jonathan continue; 5067 1.45 jonathan 5068 1.357 skrll /* 5069 1.357 skrll * If we get here, mbuf data is too small for DMA engine. 5070 1.45 jonathan * Try to fix by shuffling data to prev or next in chain. 5071 1.45 jonathan * If that fails, do a compacting deep-copy of the whole chain. 5072 1.45 jonathan */ 5073 1.45 jonathan 5074 1.45 jonathan /* Internal frag. If fits in prev, copy it there. */ 5075 1.113 tsutsui if (prev && M_TRAILINGSPACE(prev) >= m->m_len) { 5076 1.330 msaitoh memcpy(prev->m_data + prev->m_len, m->m_data, mlen); 5077 1.45 jonathan prev->m_len += mlen; 5078 1.45 jonathan m->m_len = 0; 5079 1.45 jonathan /* XXX stitch chain */ 5080 1.45 jonathan prev->m_next = m_free(m); 5081 1.45 jonathan m = prev; 5082 1.45 jonathan continue; 5083 1.332 msaitoh } else if (m->m_next != NULL && 5084 1.367 skrll M_TRAILINGSPACE(m) >= shortfall && 5085 1.367 skrll m->m_next->m_len >= (8 + shortfall)) { 5086 1.45 jonathan /* m is writable and have enough data in next, pull up. */ 5087 1.45 jonathan 5088 1.330 msaitoh memcpy(m->m_data + m->m_len, m->m_next->m_data, 5089 1.115 tsutsui shortfall); 5090 1.45 jonathan m->m_len += shortfall; 5091 1.45 jonathan m->m_next->m_len -= shortfall; 5092 1.45 jonathan m->m_next->m_data += shortfall; 5093 1.332 msaitoh } else if (m->m_next == NULL || 1) { 5094 1.357 skrll /* 5095 1.357 skrll * Got a runt at the very end of the packet. 5096 1.45 jonathan * borrow data from the tail of the preceding mbuf and 5097 1.332 msaitoh * update its length in-place. (The original data is 5098 1.332 msaitoh * still valid, so we can do this even if prev is not 5099 1.332 msaitoh * writable.) 5100 1.45 jonathan */ 5101 1.45 jonathan 5102 1.332 msaitoh /* 5103 1.332 msaitoh * If we'd make prev a runt, just move all of its data. 5104 1.332 msaitoh */ 5105 1.45 jonathan KASSERT(prev != NULL /*, ("runt but null PREV")*/); 5106 1.45 jonathan KASSERT(prev->m_len >= 8 /*, ("runt prev")*/); 5107 1.111 christos 5108 1.45 jonathan if ((prev->m_len - shortfall) < 8) 5109 1.45 jonathan shortfall = prev->m_len; 5110 1.87 perry 5111 1.45 jonathan #ifdef notyet /* just do the safe slow thing for now */ 5112 1.45 jonathan if (!M_READONLY(m)) { 5113 1.45 jonathan if (M_LEADINGSPACE(m) < shorfall) { 5114 1.45 jonathan void *m_dat; 5115 1.338 maxv m_dat = M_BUFADDR(m); 5116 1.332 msaitoh memmove(m_dat, mtod(m, void*), 5117 1.332 msaitoh m->m_len); 5118 1.45 jonathan m->m_data = m_dat; 5119 1.332 msaitoh } 5120 1.45 jonathan } else 5121 1.45 jonathan #endif /* just do the safe slow thing */ 5122 1.45 jonathan { 5123 1.45 jonathan struct mbuf * n = NULL; 5124 1.45 jonathan int newprevlen = prev->m_len - shortfall; 5125 1.45 jonathan 5126 1.45 jonathan MGET(n, M_NOWAIT, MT_DATA); 5127 1.45 jonathan if (n == NULL) 5128 1.45 jonathan return ENOBUFS; 5129 1.397 mlelstv MCLAIM(n, prev->m_owner); 5130 1.45 jonathan KASSERT(m->m_len + shortfall < MLEN 5131 1.45 jonathan /*, 5132 1.45 jonathan ("runt %d +prev %d too big\n", m->m_len, shortfall)*/); 5133 1.45 jonathan 5134 1.45 jonathan /* first copy the data we're stealing from prev */ 5135 1.115 tsutsui memcpy(n->m_data, prev->m_data + newprevlen, 5136 1.115 tsutsui shortfall); 5137 1.45 jonathan 5138 1.45 jonathan /* update prev->m_len accordingly */ 5139 1.45 jonathan prev->m_len -= shortfall; 5140 1.45 jonathan 5141 1.45 jonathan /* copy data from runt m */ 5142 1.115 tsutsui memcpy(n->m_data + shortfall, m->m_data, 5143 1.115 tsutsui m->m_len); 5144 1.45 jonathan 5145 1.45 jonathan /* n holds what we stole from prev, plus m */ 5146 1.45 jonathan n->m_len = shortfall + m->m_len; 5147 1.45 jonathan 5148 1.45 jonathan /* stitch n into chain and free m */ 5149 1.45 jonathan n->m_next = m->m_next; 5150 1.45 jonathan prev->m_next = n; 5151 1.45 jonathan /* KASSERT(m->m_next == NULL); */ 5152 1.45 jonathan m->m_next = NULL; 5153 1.45 jonathan m_free(m); 5154 1.45 jonathan m = n; /* for continuing loop */ 5155 1.45 jonathan } 5156 1.45 jonathan } 5157 1.45 jonathan } 5158 1.45 jonathan return 0; 5159 1.45 jonathan } 5160 1.45 jonathan 5161 1.1 fvdl /* 5162 1.207 msaitoh * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 5163 1.1 fvdl * pointers to descriptors. 5164 1.1 fvdl */ 5165 1.104 thorpej static int 5166 1.170 msaitoh bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx) 5167 1.1 fvdl { 5168 1.317 bouyer struct bge_tx_bd *f, *prev_f; 5169 1.170 msaitoh uint32_t frag, cur; 5170 1.170 msaitoh uint16_t csum_flags = 0; 5171 1.170 msaitoh uint16_t txbd_tso_flags = 0; 5172 1.1 fvdl struct txdmamap_pool_entry *dma; 5173 1.1 fvdl bus_dmamap_t dmamap; 5174 1.317 bouyer bus_dma_tag_t dmatag; 5175 1.1 fvdl int i = 0; 5176 1.95 jonathan int use_tso, maxsegsize, error; 5177 1.311 knakahar bool have_vtag; 5178 1.311 knakahar uint16_t vtag; 5179 1.330 msaitoh bool remap; 5180 1.107 blymn 5181 1.394 skrll KASSERT(mutex_owned(sc->sc_intr_lock)); 5182 1.394 skrll 5183 1.1 fvdl if (m_head->m_pkthdr.csum_flags) { 5184 1.1 fvdl if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) 5185 1.1 fvdl csum_flags |= BGE_TXBDFLAG_IP_CSUM; 5186 1.331 msaitoh if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4 |M_CSUM_UDPv4)) 5187 1.1 fvdl csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 5188 1.1 fvdl } 5189 1.1 fvdl 5190 1.87 perry /* 5191 1.46 jonathan * If we were asked to do an outboard checksum, and the NIC 5192 1.46 jonathan * has the bug where it sometimes adds in the Ethernet padding, 5193 1.46 jonathan * explicitly pad with zeros so the cksum will be correct either way. 5194 1.46 jonathan * (For now, do this for all chip versions, until newer 5195 1.46 jonathan * are confirmed to not require the workaround.) 5196 1.46 jonathan */ 5197 1.46 jonathan if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 || 5198 1.46 jonathan #ifdef notyet 5199 1.46 jonathan (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 || 5200 1.87 perry #endif 5201 1.46 jonathan m_head->m_pkthdr.len >= ETHER_MIN_NOPAD) 5202 1.46 jonathan goto check_dma_bug; 5203 1.46 jonathan 5204 1.170 msaitoh if (bge_cksum_pad(m_head) != 0) 5205 1.320 bouyer return ENOBUFS; 5206 1.46 jonathan 5207 1.46 jonathan check_dma_bug: 5208 1.157 msaitoh if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)) 5209 1.29 itojun goto doit; 5210 1.157 msaitoh 5211 1.25 jonathan /* 5212 1.25 jonathan * bcm5700 Revision B silicon cannot handle DMA descriptors with 5213 1.87 perry * less than eight bytes. If we encounter a teeny mbuf 5214 1.25 jonathan * at the end of a chain, we can pad. Otherwise, copy. 5215 1.25 jonathan */ 5216 1.45 jonathan if (bge_compact_dma_runt(m_head) != 0) 5217 1.45 jonathan return ENOBUFS; 5218 1.25 jonathan 5219 1.25 jonathan doit: 5220 1.1 fvdl dma = SLIST_FIRST(&sc->txdma_list); 5221 1.320 bouyer if (dma == NULL) { 5222 1.1 fvdl return ENOBUFS; 5223 1.320 bouyer } 5224 1.1 fvdl dmamap = dma->dmamap; 5225 1.317 bouyer dmatag = sc->bge_dmatag; 5226 1.317 bouyer dma->is_dma32 = false; 5227 1.1 fvdl 5228 1.1 fvdl /* 5229 1.95 jonathan * Set up any necessary TSO state before we start packing... 5230 1.95 jonathan */ 5231 1.95 jonathan use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 5232 1.95 jonathan if (!use_tso) { 5233 1.95 jonathan maxsegsize = 0; 5234 1.95 jonathan } else { /* TSO setup */ 5235 1.95 jonathan unsigned mss; 5236 1.95 jonathan struct ether_header *eh; 5237 1.95 jonathan unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset; 5238 1.317 bouyer unsigned bge_hlen; 5239 1.95 jonathan struct mbuf * m0 = m_head; 5240 1.95 jonathan struct ip *ip; 5241 1.95 jonathan struct tcphdr *th; 5242 1.95 jonathan int iphl, hlen; 5243 1.95 jonathan 5244 1.95 jonathan /* 5245 1.95 jonathan * XXX It would be nice if the mbuf pkthdr had offset 5246 1.95 jonathan * fields for the protocol headers. 5247 1.95 jonathan */ 5248 1.95 jonathan 5249 1.95 jonathan eh = mtod(m0, struct ether_header *); 5250 1.95 jonathan switch (htons(eh->ether_type)) { 5251 1.95 jonathan case ETHERTYPE_IP: 5252 1.95 jonathan offset = ETHER_HDR_LEN; 5253 1.95 jonathan break; 5254 1.95 jonathan 5255 1.95 jonathan case ETHERTYPE_VLAN: 5256 1.95 jonathan offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 5257 1.95 jonathan break; 5258 1.95 jonathan 5259 1.95 jonathan default: 5260 1.95 jonathan /* 5261 1.95 jonathan * Don't support this protocol or encapsulation. 5262 1.95 jonathan */ 5263 1.170 msaitoh return ENOBUFS; 5264 1.95 jonathan } 5265 1.95 jonathan 5266 1.95 jonathan /* 5267 1.95 jonathan * TCP/IP headers are in the first mbuf; we can do 5268 1.95 jonathan * this the easy way. 5269 1.95 jonathan */ 5270 1.95 jonathan iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 5271 1.95 jonathan hlen = iphl + offset; 5272 1.95 jonathan if (__predict_false(m0->m_len < 5273 1.95 jonathan (hlen + sizeof(struct tcphdr)))) { 5274 1.95 jonathan 5275 1.316 bouyer aprint_error_dev(sc->bge_dev, 5276 1.138 joerg "TSO: hard case m0->m_len == %d < ip/tcp hlen %zd," 5277 1.138 joerg "not handled yet\n", 5278 1.367 skrll m0->m_len, hlen+ sizeof(struct tcphdr)); 5279 1.95 jonathan #ifdef NOTYET 5280 1.95 jonathan /* 5281 1.95 jonathan * XXX jonathan (at) NetBSD.org: untested. 5282 1.330 msaitoh * how to force this branch to be taken? 5283 1.95 jonathan */ 5284 1.267 msaitoh BGE_EVCNT_INCR(sc->bge_ev_txtsopain); 5285 1.95 jonathan 5286 1.95 jonathan m_copydata(m0, offset, sizeof(ip), &ip); 5287 1.95 jonathan m_copydata(m0, hlen, sizeof(th), &th); 5288 1.95 jonathan 5289 1.95 jonathan ip.ip_len = 0; 5290 1.95 jonathan 5291 1.95 jonathan m_copyback(m0, hlen + offsetof(struct ip, ip_len), 5292 1.95 jonathan sizeof(ip.ip_len), &ip.ip_len); 5293 1.95 jonathan 5294 1.95 jonathan th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 5295 1.95 jonathan ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 5296 1.95 jonathan 5297 1.95 jonathan m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 5298 1.95 jonathan sizeof(th.th_sum), &th.th_sum); 5299 1.95 jonathan 5300 1.95 jonathan hlen += th.th_off << 2; 5301 1.95 jonathan iptcp_opt_words = hlen; 5302 1.95 jonathan #else 5303 1.95 jonathan /* 5304 1.95 jonathan * if_wm "hard" case not yet supported, can we not 5305 1.95 jonathan * mandate it out of existence? 5306 1.95 jonathan */ 5307 1.95 jonathan (void) ip; (void)th; (void) ip_tcp_hlen; 5308 1.95 jonathan 5309 1.95 jonathan return ENOBUFS; 5310 1.95 jonathan #endif 5311 1.95 jonathan } else { 5312 1.126 christos ip = (struct ip *) (mtod(m0, char *) + offset); 5313 1.126 christos th = (struct tcphdr *) (mtod(m0, char *) + hlen); 5314 1.95 jonathan ip_tcp_hlen = iphl + (th->th_off << 2); 5315 1.95 jonathan 5316 1.95 jonathan /* Total IP/TCP options, in 32-bit words */ 5317 1.95 jonathan iptcp_opt_words = (ip_tcp_hlen 5318 1.95 jonathan - sizeof(struct tcphdr) 5319 1.95 jonathan - sizeof(struct ip)) >> 2; 5320 1.95 jonathan } 5321 1.207 msaitoh if (BGE_IS_575X_PLUS(sc)) { 5322 1.95 jonathan th->th_sum = 0; 5323 1.317 bouyer csum_flags = 0; 5324 1.95 jonathan } else { 5325 1.95 jonathan /* 5326 1.107 blymn * XXX jonathan (at) NetBSD.org: 5705 untested. 5327 1.95 jonathan * Requires TSO firmware patch for 5701/5703/5704. 5328 1.95 jonathan */ 5329 1.95 jonathan th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 5330 1.95 jonathan ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 5331 1.95 jonathan } 5332 1.95 jonathan 5333 1.95 jonathan mss = m_head->m_pkthdr.segsz; 5334 1.107 blymn txbd_tso_flags |= 5335 1.95 jonathan BGE_TXBDFLAG_CPU_PRE_DMA | 5336 1.95 jonathan BGE_TXBDFLAG_CPU_POST_DMA; 5337 1.95 jonathan 5338 1.95 jonathan /* 5339 1.95 jonathan * Our NIC TSO-assist assumes TSO has standard, optionless 5340 1.95 jonathan * IPv4 and TCP headers, which total 40 bytes. By default, 5341 1.95 jonathan * the NIC copies 40 bytes of IP/TCP header from the 5342 1.95 jonathan * supplied header into the IP/TCP header portion of 5343 1.95 jonathan * each post-TSO-segment. If the supplied packet has IP or 5344 1.95 jonathan * TCP options, we need to tell the NIC to copy those extra 5345 1.95 jonathan * bytes into each post-TSO header, in addition to the normal 5346 1.95 jonathan * 40-byte IP/TCP header (and to leave space accordingly). 5347 1.95 jonathan * Unfortunately, the driver encoding of option length 5348 1.95 jonathan * varies across different ASIC families. 5349 1.95 jonathan */ 5350 1.95 jonathan tcp_seg_flags = 0; 5351 1.317 bouyer bge_hlen = ip_tcp_hlen >> 2; 5352 1.317 bouyer if (BGE_IS_5717_PLUS(sc)) { 5353 1.317 bouyer tcp_seg_flags = (bge_hlen & 0x3) << 14; 5354 1.317 bouyer txbd_tso_flags |= 5355 1.317 bouyer ((bge_hlen & 0xF8) << 7) | ((bge_hlen & 0x4) << 2); 5356 1.317 bouyer } else if (BGE_IS_5705_PLUS(sc)) { 5357 1.332 msaitoh tcp_seg_flags = bge_hlen << 11; 5358 1.317 bouyer } else { 5359 1.317 bouyer /* XXX iptcp_opt_words or bge_hlen ? */ 5360 1.332 msaitoh txbd_tso_flags |= iptcp_opt_words << 12; 5361 1.95 jonathan } 5362 1.95 jonathan maxsegsize = mss | tcp_seg_flags; 5363 1.95 jonathan ip->ip_len = htons(mss + ip_tcp_hlen); 5364 1.317 bouyer ip->ip_sum = 0; 5365 1.95 jonathan 5366 1.95 jonathan } /* TSO setup */ 5367 1.95 jonathan 5368 1.317 bouyer have_vtag = vlan_has_tag(m_head); 5369 1.317 bouyer if (have_vtag) 5370 1.317 bouyer vtag = vlan_get_tag(m_head); 5371 1.317 bouyer 5372 1.95 jonathan /* 5373 1.1 fvdl * Start packing the mbufs in this chain into 5374 1.1 fvdl * the fragment pointers. Stop when we run out 5375 1.1 fvdl * of fragments or hit the end of the mbuf chain. 5376 1.1 fvdl */ 5377 1.320 bouyer remap = true; 5378 1.317 bouyer load_again: 5379 1.332 msaitoh error = bus_dmamap_load_mbuf(dmatag, dmamap, m_head, BUS_DMA_NOWAIT); 5380 1.320 bouyer if (__predict_false(error)) { 5381 1.332 msaitoh if (error == EFBIG && remap) { 5382 1.320 bouyer struct mbuf *m; 5383 1.320 bouyer remap = false; 5384 1.320 bouyer m = m_defrag(m_head, M_NOWAIT); 5385 1.320 bouyer if (m != NULL) { 5386 1.320 bouyer KASSERT(m == m_head); 5387 1.320 bouyer goto load_again; 5388 1.320 bouyer } 5389 1.320 bouyer } 5390 1.320 bouyer return error; 5391 1.320 bouyer } 5392 1.118 tsutsui /* 5393 1.118 tsutsui * Sanity check: avoid coming within 16 descriptors 5394 1.118 tsutsui * of the end of the ring. 5395 1.118 tsutsui */ 5396 1.118 tsutsui if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) { 5397 1.118 tsutsui BGE_TSO_PRINTF(("%s: " 5398 1.118 tsutsui " dmamap_load_mbuf too close to ring wrap\n", 5399 1.138 joerg device_xname(sc->bge_dev))); 5400 1.118 tsutsui goto fail_unload; 5401 1.118 tsutsui } 5402 1.95 jonathan 5403 1.317 bouyer /* Iterate over dmap-map fragments. */ 5404 1.317 bouyer f = prev_f = NULL; 5405 1.317 bouyer cur = frag = *txidx; 5406 1.6 thorpej 5407 1.1 fvdl for (i = 0; i < dmamap->dm_nsegs; i++) { 5408 1.1 fvdl f = &sc->bge_rdata->bge_tx_ring[frag]; 5409 1.1 fvdl if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 5410 1.1 fvdl break; 5411 1.107 blymn 5412 1.172 msaitoh BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr); 5413 1.1 fvdl f->bge_len = dmamap->dm_segs[i].ds_len; 5414 1.320 bouyer if (sizeof(bus_addr_t) > 4 && dma->is_dma32 == false && use_tso && ( 5415 1.320 bouyer (dmamap->dm_segs[i].ds_addr & 0xffffffff00000000) != 5416 1.320 bouyer ((dmamap->dm_segs[i].ds_addr + f->bge_len) & 0xffffffff00000000) || 5417 1.320 bouyer (prev_f != NULL && 5418 1.320 bouyer prev_f->bge_addr.bge_addr_hi != f->bge_addr.bge_addr_hi)) 5419 1.320 bouyer ) { 5420 1.317 bouyer /* 5421 1.317 bouyer * watchdog timeout issue was observed with TSO, 5422 1.317 bouyer * limiting DMA address space to 32bits seems to 5423 1.317 bouyer * address the issue. 5424 1.317 bouyer */ 5425 1.317 bouyer bus_dmamap_unload(dmatag, dmamap); 5426 1.317 bouyer dmatag = sc->bge_dmatag32; 5427 1.317 bouyer dmamap = dma->dmamap32; 5428 1.317 bouyer dma->is_dma32 = true; 5429 1.320 bouyer remap = true; 5430 1.317 bouyer goto load_again; 5431 1.317 bouyer } 5432 1.95 jonathan 5433 1.95 jonathan /* 5434 1.95 jonathan * For 5751 and follow-ons, for TSO we must turn 5435 1.95 jonathan * off checksum-assist flag in the tx-descr, and 5436 1.95 jonathan * supply the ASIC-revision-specific encoding 5437 1.95 jonathan * of TSO flags and segsize. 5438 1.95 jonathan */ 5439 1.95 jonathan if (use_tso) { 5440 1.207 msaitoh if (BGE_IS_575X_PLUS(sc) || i == 0) { 5441 1.95 jonathan f->bge_rsvd = maxsegsize; 5442 1.95 jonathan f->bge_flags = csum_flags | txbd_tso_flags; 5443 1.95 jonathan } else { 5444 1.95 jonathan f->bge_rsvd = 0; 5445 1.95 jonathan f->bge_flags = 5446 1.95 jonathan (csum_flags | txbd_tso_flags) & 0x0fff; 5447 1.95 jonathan } 5448 1.95 jonathan } else { 5449 1.95 jonathan f->bge_rsvd = 0; 5450 1.95 jonathan f->bge_flags = csum_flags; 5451 1.95 jonathan } 5452 1.1 fvdl 5453 1.311 knakahar if (have_vtag) { 5454 1.1 fvdl f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 5455 1.311 knakahar f->bge_vlan_tag = vtag; 5456 1.1 fvdl } else { 5457 1.1 fvdl f->bge_vlan_tag = 0; 5458 1.1 fvdl } 5459 1.317 bouyer prev_f = f; 5460 1.1 fvdl cur = frag; 5461 1.1 fvdl BGE_INC(frag, BGE_TX_RING_CNT); 5462 1.1 fvdl } 5463 1.1 fvdl 5464 1.95 jonathan if (i < dmamap->dm_nsegs) { 5465 1.95 jonathan BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n", 5466 1.138 joerg device_xname(sc->bge_dev), i, dmamap->dm_nsegs)); 5467 1.118 tsutsui goto fail_unload; 5468 1.95 jonathan } 5469 1.1 fvdl 5470 1.317 bouyer bus_dmamap_sync(dmatag, dmamap, 0, dmamap->dm_mapsize, 5471 1.1 fvdl BUS_DMASYNC_PREWRITE); 5472 1.1 fvdl 5473 1.95 jonathan if (frag == sc->bge_tx_saved_considx) { 5474 1.95 jonathan BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n", 5475 1.138 joerg device_xname(sc->bge_dev), frag, sc->bge_tx_saved_considx)); 5476 1.95 jonathan 5477 1.118 tsutsui goto fail_unload; 5478 1.95 jonathan } 5479 1.1 fvdl 5480 1.1 fvdl sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 5481 1.1 fvdl sc->bge_cdata.bge_tx_chain[cur] = m_head; 5482 1.1 fvdl SLIST_REMOVE_HEAD(&sc->txdma_list, link); 5483 1.1 fvdl sc->txdma[cur] = dma; 5484 1.118 tsutsui sc->bge_txcnt += dmamap->dm_nsegs; 5485 1.1 fvdl 5486 1.1 fvdl *txidx = frag; 5487 1.1 fvdl 5488 1.170 msaitoh return 0; 5489 1.118 tsutsui 5490 1.158 msaitoh fail_unload: 5491 1.317 bouyer bus_dmamap_unload(dmatag, dmamap); 5492 1.118 tsutsui 5493 1.118 tsutsui return ENOBUFS; 5494 1.1 fvdl } 5495 1.1 fvdl 5496 1.375 skrll 5497 1.375 skrll static void 5498 1.375 skrll bge_start(struct ifnet *ifp) 5499 1.375 skrll { 5500 1.375 skrll struct bge_softc * const sc = ifp->if_softc; 5501 1.375 skrll 5502 1.386 skrll mutex_enter(sc->sc_intr_lock); 5503 1.386 skrll if (!sc->bge_txrx_stopping) 5504 1.386 skrll bge_start_locked(ifp); 5505 1.386 skrll mutex_exit(sc->sc_intr_lock); 5506 1.375 skrll } 5507 1.375 skrll 5508 1.1 fvdl /* 5509 1.1 fvdl * Main transmit routine. To avoid having to do mbuf copies, we put pointers 5510 1.1 fvdl * to the mbuf data regions directly in the transmit descriptors. 5511 1.1 fvdl */ 5512 1.104 thorpej static void 5513 1.375 skrll bge_start_locked(struct ifnet *ifp) 5514 1.1 fvdl { 5515 1.354 skrll struct bge_softc * const sc = ifp->if_softc; 5516 1.1 fvdl struct mbuf *m_head = NULL; 5517 1.320 bouyer struct mbuf *m; 5518 1.170 msaitoh uint32_t prodidx; 5519 1.1 fvdl int pkts = 0; 5520 1.320 bouyer int error; 5521 1.1 fvdl 5522 1.386 skrll KASSERT(mutex_owned(sc->sc_intr_lock)); 5523 1.1 fvdl 5524 1.94 jonathan prodidx = sc->bge_tx_prodidx; 5525 1.1 fvdl 5526 1.170 msaitoh while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 5527 1.1 fvdl IFQ_POLL(&ifp->if_snd, m_head); 5528 1.1 fvdl if (m_head == NULL) 5529 1.1 fvdl break; 5530 1.1 fvdl 5531 1.1 fvdl #if 0 5532 1.1 fvdl /* 5533 1.1 fvdl * XXX 5534 1.1 fvdl * safety overkill. If this is a fragmented packet chain 5535 1.1 fvdl * with delayed TCP/UDP checksums, then only encapsulate 5536 1.1 fvdl * it if we have enough descriptors to handle the entire 5537 1.1 fvdl * chain at once. 5538 1.1 fvdl * (paranoia -- may not actually be needed) 5539 1.1 fvdl */ 5540 1.1 fvdl if (m_head->m_flags & M_FIRSTFRAG && 5541 1.1 fvdl m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 5542 1.1 fvdl if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 5543 1.86 thorpej M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) { 5544 1.1 fvdl ifp->if_flags |= IFF_OACTIVE; 5545 1.1 fvdl break; 5546 1.1 fvdl } 5547 1.1 fvdl } 5548 1.1 fvdl #endif 5549 1.1 fvdl 5550 1.1 fvdl /* 5551 1.1 fvdl * Pack the data into the transmit ring. If we 5552 1.1 fvdl * don't have room, set the OACTIVE flag and wait 5553 1.1 fvdl * for the NIC to drain the ring. 5554 1.1 fvdl */ 5555 1.320 bouyer error = bge_encap(sc, m_head, &prodidx); 5556 1.320 bouyer if (__predict_false(error)) { 5557 1.375 skrll if (SLIST_EMPTY(&sc->txdma_list)) { 5558 1.320 bouyer /* just wait for the transmit ring to drain */ 5559 1.320 bouyer break; 5560 1.320 bouyer } 5561 1.320 bouyer IFQ_DEQUEUE(&ifp->if_snd, m); 5562 1.320 bouyer KASSERT(m == m_head); 5563 1.320 bouyer m_freem(m_head); 5564 1.320 bouyer continue; 5565 1.1 fvdl } 5566 1.330 msaitoh 5567 1.1 fvdl /* now we are committed to transmit the packet */ 5568 1.320 bouyer IFQ_DEQUEUE(&ifp->if_snd, m); 5569 1.320 bouyer KASSERT(m == m_head); 5570 1.1 fvdl pkts++; 5571 1.1 fvdl 5572 1.1 fvdl /* 5573 1.1 fvdl * If there's a BPF listener, bounce a copy of this frame 5574 1.1 fvdl * to him. 5575 1.1 fvdl */ 5576 1.314 msaitoh bpf_mtap(ifp, m_head, BPF_D_OUT); 5577 1.1 fvdl } 5578 1.1 fvdl if (pkts == 0) 5579 1.1 fvdl return; 5580 1.1 fvdl 5581 1.1 fvdl /* Transmit */ 5582 1.151 cegger bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 5583 1.158 msaitoh /* 5700 b2 errata */ 5584 1.158 msaitoh if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 5585 1.151 cegger bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 5586 1.1 fvdl 5587 1.94 jonathan sc->bge_tx_prodidx = prodidx; 5588 1.375 skrll sc->bge_tx_lastsent = time_uptime; 5589 1.375 skrll sc->bge_tx_sending = true; 5590 1.375 skrll } 5591 1.94 jonathan 5592 1.375 skrll static int 5593 1.375 skrll bge_init(struct ifnet *ifp) 5594 1.375 skrll { 5595 1.375 skrll struct bge_softc * const sc = ifp->if_softc; 5596 1.170 msaitoh const uint16_t *m; 5597 1.258 msaitoh uint32_t mode, reg; 5598 1.375 skrll int error = 0; 5599 1.1 fvdl 5600 1.386 skrll ASSERT_SLEEPABLE(); 5601 1.375 skrll KASSERT(IFNET_LOCKED(ifp)); 5602 1.358 skrll KASSERT(ifp == &sc->ethercom.ec_if); 5603 1.1 fvdl 5604 1.394 skrll if (sc->bge_detaching) 5605 1.394 skrll return ENXIO; 5606 1.394 skrll 5607 1.1 fvdl /* Cancel pending I/O and flush buffers. */ 5608 1.394 skrll bge_stop(ifp, 0); 5609 1.177 msaitoh 5610 1.177 msaitoh bge_stop_fw(sc); 5611 1.177 msaitoh bge_sig_pre_reset(sc, BGE_RESET_START); 5612 1.1 fvdl bge_reset(sc); 5613 1.177 msaitoh bge_sig_legacy(sc, BGE_RESET_START); 5614 1.287 msaitoh 5615 1.287 msaitoh if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) { 5616 1.287 msaitoh reg = CSR_READ_4(sc, BGE_CPMU_CTRL); 5617 1.287 msaitoh reg &= ~(BGE_CPMU_CTRL_LINK_AWARE_MODE | 5618 1.287 msaitoh BGE_CPMU_CTRL_LINK_IDLE_MODE); 5619 1.287 msaitoh CSR_WRITE_4(sc, BGE_CPMU_CTRL, reg); 5620 1.287 msaitoh 5621 1.287 msaitoh reg = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK); 5622 1.287 msaitoh reg &= ~BGE_CPMU_LSPD_10MB_CLK; 5623 1.287 msaitoh reg |= BGE_CPMU_LSPD_10MB_MACCLK_6_25; 5624 1.287 msaitoh CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, reg); 5625 1.287 msaitoh 5626 1.287 msaitoh reg = CSR_READ_4(sc, BGE_CPMU_LNK_AWARE_PWRMD); 5627 1.287 msaitoh reg &= ~BGE_CPMU_LNK_AWARE_MACCLK_MASK; 5628 1.287 msaitoh reg |= BGE_CPMU_LNK_AWARE_MACCLK_6_25; 5629 1.287 msaitoh CSR_WRITE_4(sc, BGE_CPMU_LNK_AWARE_PWRMD, reg); 5630 1.287 msaitoh 5631 1.287 msaitoh reg = CSR_READ_4(sc, BGE_CPMU_HST_ACC); 5632 1.287 msaitoh reg &= ~BGE_CPMU_HST_ACC_MACCLK_MASK; 5633 1.287 msaitoh reg |= BGE_CPMU_HST_ACC_MACCLK_6_25; 5634 1.287 msaitoh CSR_WRITE_4(sc, BGE_CPMU_HST_ACC, reg); 5635 1.287 msaitoh } 5636 1.287 msaitoh 5637 1.304 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) { 5638 1.305 msaitoh pcireg_t aercap; 5639 1.305 msaitoh 5640 1.304 msaitoh reg = CSR_READ_4(sc, BGE_PCIE_PWRMNG_THRESH); 5641 1.304 msaitoh reg = (reg & ~BGE_PCIE_PWRMNG_L1THRESH_MASK) 5642 1.304 msaitoh | BGE_PCIE_PWRMNG_L1THRESH_4MS 5643 1.304 msaitoh | BGE_PCIE_PWRMNG_EXTASPMTMR_EN; 5644 1.304 msaitoh CSR_WRITE_4(sc, BGE_PCIE_PWRMNG_THRESH, reg); 5645 1.304 msaitoh 5646 1.304 msaitoh reg = CSR_READ_4(sc, BGE_PCIE_EIDLE_DELAY); 5647 1.304 msaitoh reg = (reg & ~BGE_PCIE_EIDLE_DELAY_MASK) 5648 1.304 msaitoh | BGE_PCIE_EIDLE_DELAY_13CLK; 5649 1.304 msaitoh CSR_WRITE_4(sc, BGE_PCIE_EIDLE_DELAY, reg); 5650 1.304 msaitoh 5651 1.305 msaitoh /* Clear correctable error */ 5652 1.305 msaitoh if (pci_get_ext_capability(sc->sc_pc, sc->sc_pcitag, 5653 1.305 msaitoh PCI_EXTCAP_AER, &aercap, NULL) != 0) 5654 1.305 msaitoh pci_conf_write(sc->sc_pc, sc->sc_pcitag, 5655 1.305 msaitoh aercap + PCI_AER_COR_STATUS, 0xffffffff); 5656 1.304 msaitoh 5657 1.304 msaitoh reg = CSR_READ_4(sc, BGE_PCIE_LINKCTL); 5658 1.304 msaitoh reg = (reg & ~BGE_PCIE_LINKCTL_L1_PLL_PDEN) 5659 1.304 msaitoh | BGE_PCIE_LINKCTL_L1_PLL_PDDIS; 5660 1.304 msaitoh CSR_WRITE_4(sc, BGE_PCIE_LINKCTL, reg); 5661 1.304 msaitoh } 5662 1.304 msaitoh 5663 1.177 msaitoh bge_sig_post_reset(sc, BGE_RESET_START); 5664 1.177 msaitoh 5665 1.1 fvdl bge_chipinit(sc); 5666 1.1 fvdl 5667 1.1 fvdl /* 5668 1.1 fvdl * Init the various state machines, ring 5669 1.1 fvdl * control blocks and firmware. 5670 1.1 fvdl */ 5671 1.1 fvdl error = bge_blockinit(sc); 5672 1.1 fvdl if (error != 0) { 5673 1.138 joerg aprint_error_dev(sc->bge_dev, "initialization error %d\n", 5674 1.1 fvdl error); 5675 1.1 fvdl return error; 5676 1.1 fvdl } 5677 1.1 fvdl 5678 1.236 msaitoh /* 5718 step 25, 57XX step 54 */ 5679 1.1 fvdl /* Specify MTU. */ 5680 1.1 fvdl CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 5681 1.107 blymn ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 5682 1.1 fvdl 5683 1.236 msaitoh /* 5718 step 23 */ 5684 1.1 fvdl /* Load our MAC address. */ 5685 1.170 msaitoh m = (const uint16_t *)&(CLLADDR(ifp->if_sadl)[0]); 5686 1.1 fvdl CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 5687 1.336 msaitoh CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, 5688 1.336 msaitoh ((uint32_t)htons(m[1]) << 16) | htons(m[2])); 5689 1.1 fvdl 5690 1.1 fvdl /* Enable or disable promiscuous mode as needed. */ 5691 1.378 skrll if (ifp->if_flags & IFF_PROMISC) 5692 1.1 fvdl BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 5693 1.178 msaitoh else 5694 1.1 fvdl BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 5695 1.1 fvdl 5696 1.1 fvdl /* Program multicast filter. */ 5697 1.394 skrll mutex_enter(sc->sc_mcast_lock); 5698 1.1 fvdl bge_setmulti(sc); 5699 1.394 skrll mutex_exit(sc->sc_mcast_lock); 5700 1.1 fvdl 5701 1.1 fvdl /* Init RX ring. */ 5702 1.1 fvdl bge_init_rx_ring_std(sc); 5703 1.1 fvdl 5704 1.161 msaitoh /* 5705 1.161 msaitoh * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 5706 1.161 msaitoh * memory to insure that the chip has in fact read the first 5707 1.161 msaitoh * entry of the ring. 5708 1.161 msaitoh */ 5709 1.161 msaitoh if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 5710 1.372 skrll u_int i; 5711 1.161 msaitoh for (i = 0; i < 10; i++) { 5712 1.161 msaitoh DELAY(20); 5713 1.372 skrll uint32_t v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 5714 1.161 msaitoh if (v == (MCLBYTES - ETHER_ALIGN)) 5715 1.161 msaitoh break; 5716 1.161 msaitoh } 5717 1.161 msaitoh if (i == 10) 5718 1.161 msaitoh aprint_error_dev(sc->bge_dev, 5719 1.161 msaitoh "5705 A0 chip failed to load RX ring\n"); 5720 1.161 msaitoh } 5721 1.161 msaitoh 5722 1.1 fvdl /* Init jumbo RX ring. */ 5723 1.1 fvdl if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 5724 1.1 fvdl bge_init_rx_ring_jumbo(sc); 5725 1.1 fvdl 5726 1.1 fvdl /* Init our RX return ring index */ 5727 1.1 fvdl sc->bge_rx_saved_considx = 0; 5728 1.1 fvdl 5729 1.1 fvdl /* Init TX ring. */ 5730 1.1 fvdl bge_init_tx_ring(sc); 5731 1.1 fvdl 5732 1.236 msaitoh /* 5718 step 63, 57XX step 94 */ 5733 1.206 msaitoh /* Enable TX MAC state machine lockup fix. */ 5734 1.206 msaitoh mode = CSR_READ_4(sc, BGE_TX_MODE); 5735 1.206 msaitoh if (BGE_IS_5755_PLUS(sc) || 5736 1.206 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 5737 1.206 msaitoh mode |= BGE_TXMODE_MBUF_LOCKUP_FIX; 5738 1.327 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 5739 1.327 msaitoh BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 5740 1.216 msaitoh mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 5741 1.216 msaitoh mode |= CSR_READ_4(sc, BGE_TX_MODE) & 5742 1.216 msaitoh (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 5743 1.216 msaitoh } 5744 1.206 msaitoh 5745 1.1 fvdl /* Turn on transmitter */ 5746 1.211 msaitoh CSR_WRITE_4_FLUSH(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE); 5747 1.236 msaitoh /* 5718 step 64 */ 5748 1.206 msaitoh DELAY(100); 5749 1.1 fvdl 5750 1.236 msaitoh /* 5718 step 65, 57XX step 95 */ 5751 1.1 fvdl /* Turn on receiver */ 5752 1.216 msaitoh mode = CSR_READ_4(sc, BGE_RX_MODE); 5753 1.216 msaitoh if (BGE_IS_5755_PLUS(sc)) 5754 1.216 msaitoh mode |= BGE_RXMODE_IPV6_ENABLE; 5755 1.327 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) 5756 1.327 msaitoh mode |= BGE_RXMODE_IPV4_FRAG_FIX; 5757 1.216 msaitoh CSR_WRITE_4_FLUSH(sc, BGE_RX_MODE, mode | BGE_RXMODE_ENABLE); 5758 1.236 msaitoh /* 5718 step 66 */ 5759 1.206 msaitoh DELAY(10); 5760 1.1 fvdl 5761 1.258 msaitoh /* 5718 step 12, 57XX step 37 */ 5762 1.258 msaitoh /* 5763 1.391 andvar * XXX Documents of 5718 series and 577xx say the recommended value 5764 1.258 msaitoh * is 1, but tg3 set 1 only on 57765 series. 5765 1.258 msaitoh */ 5766 1.258 msaitoh if (BGE_IS_57765_PLUS(sc)) 5767 1.258 msaitoh reg = 1; 5768 1.258 msaitoh else 5769 1.258 msaitoh reg = 2; 5770 1.258 msaitoh CSR_WRITE_4_FLUSH(sc, BGE_MAX_RX_FRAME_LOWAT, reg); 5771 1.71 thorpej 5772 1.1 fvdl /* Tell firmware we're alive. */ 5773 1.1 fvdl BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 5774 1.1 fvdl 5775 1.1 fvdl /* Enable host interrupts. */ 5776 1.226 msaitoh BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 5777 1.226 msaitoh BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 5778 1.211 msaitoh bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 0); 5779 1.1 fvdl 5780 1.386 skrll mutex_enter(sc->sc_intr_lock); 5781 1.386 skrll if ((error = bge_ifmedia_upd(ifp)) == 0) { 5782 1.386 skrll sc->bge_txrx_stopping = false; 5783 1.1 fvdl 5784 1.386 skrll /* IFNET_LOCKED asserted above */ 5785 1.386 skrll ifp->if_flags |= IFF_RUNNING; 5786 1.1 fvdl 5787 1.386 skrll callout_schedule(&sc->bge_timeout, hz); 5788 1.386 skrll } 5789 1.386 skrll mutex_exit(sc->sc_intr_lock); 5790 1.142 dyoung 5791 1.394 skrll mutex_enter(sc->sc_mcast_lock); 5792 1.186 msaitoh sc->bge_if_flags = ifp->if_flags; 5793 1.394 skrll mutex_exit(sc->sc_mcast_lock); 5794 1.1 fvdl 5795 1.142 dyoung return error; 5796 1.1 fvdl } 5797 1.1 fvdl 5798 1.1 fvdl /* 5799 1.1 fvdl * Set media options. 5800 1.1 fvdl */ 5801 1.104 thorpej static int 5802 1.104 thorpej bge_ifmedia_upd(struct ifnet *ifp) 5803 1.1 fvdl { 5804 1.354 skrll struct bge_softc * const sc = ifp->if_softc; 5805 1.354 skrll struct mii_data * const mii = &sc->bge_mii; 5806 1.354 skrll struct ifmedia * const ifm = &sc->bge_ifmedia; 5807 1.142 dyoung int rc; 5808 1.1 fvdl 5809 1.386 skrll KASSERT(mutex_owned(sc->sc_intr_lock)); 5810 1.386 skrll 5811 1.1 fvdl /* If this is a 1000baseX NIC, enable the TBI port. */ 5812 1.261 msaitoh if (sc->bge_flags & BGEF_FIBER_TBI) { 5813 1.1 fvdl if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 5814 1.170 msaitoh return EINVAL; 5815 1.170 msaitoh switch (IFM_SUBTYPE(ifm->ifm_media)) { 5816 1.1 fvdl case IFM_AUTO: 5817 1.161 msaitoh /* 5818 1.161 msaitoh * The BCM5704 ASIC appears to have a special 5819 1.161 msaitoh * mechanism for programming the autoneg 5820 1.161 msaitoh * advertisement registers in TBI mode. 5821 1.161 msaitoh */ 5822 1.161 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 5823 1.170 msaitoh uint32_t sgdig; 5824 1.161 msaitoh sgdig = CSR_READ_4(sc, BGE_SGDIG_STS); 5825 1.161 msaitoh if (sgdig & BGE_SGDIGSTS_DONE) { 5826 1.161 msaitoh CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 5827 1.161 msaitoh sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 5828 1.161 msaitoh sgdig |= BGE_SGDIGCFG_AUTO | 5829 1.161 msaitoh BGE_SGDIGCFG_PAUSE_CAP | 5830 1.161 msaitoh BGE_SGDIGCFG_ASYM_PAUSE; 5831 1.211 msaitoh CSR_WRITE_4_FLUSH(sc, BGE_SGDIG_CFG, 5832 1.161 msaitoh sgdig | BGE_SGDIGCFG_SEND); 5833 1.161 msaitoh DELAY(5); 5834 1.211 msaitoh CSR_WRITE_4_FLUSH(sc, BGE_SGDIG_CFG, 5835 1.211 msaitoh sgdig); 5836 1.161 msaitoh } 5837 1.161 msaitoh } 5838 1.1 fvdl break; 5839 1.1 fvdl case IFM_1000_SX: 5840 1.329 msaitoh if ((ifm->ifm_media & IFM_FDX) != 0) { 5841 1.341 msaitoh BGE_CLRBIT_FLUSH(sc, BGE_MAC_MODE, 5842 1.1 fvdl BGE_MACMODE_HALF_DUPLEX); 5843 1.1 fvdl } else { 5844 1.341 msaitoh BGE_SETBIT_FLUSH(sc, BGE_MAC_MODE, 5845 1.1 fvdl BGE_MACMODE_HALF_DUPLEX); 5846 1.1 fvdl } 5847 1.216 msaitoh DELAY(40); 5848 1.1 fvdl break; 5849 1.1 fvdl default: 5850 1.170 msaitoh return EINVAL; 5851 1.1 fvdl } 5852 1.69 thorpej /* XXX 802.3x flow control for 1000BASE-SX */ 5853 1.170 msaitoh return 0; 5854 1.1 fvdl } 5855 1.1 fvdl 5856 1.287 msaitoh if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784) && 5857 1.287 msaitoh (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5784_AX)) { 5858 1.287 msaitoh uint32_t reg; 5859 1.287 msaitoh 5860 1.287 msaitoh reg = CSR_READ_4(sc, BGE_CPMU_CTRL); 5861 1.287 msaitoh if ((reg & BGE_CPMU_CTRL_GPHY_10MB_RXONLY) != 0) { 5862 1.287 msaitoh reg &= ~BGE_CPMU_CTRL_GPHY_10MB_RXONLY; 5863 1.287 msaitoh CSR_WRITE_4(sc, BGE_CPMU_CTRL, reg); 5864 1.287 msaitoh } 5865 1.287 msaitoh } 5866 1.287 msaitoh 5867 1.161 msaitoh BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); 5868 1.142 dyoung if ((rc = mii_mediachg(mii)) == ENXIO) 5869 1.142 dyoung return 0; 5870 1.161 msaitoh 5871 1.287 msaitoh if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) { 5872 1.287 msaitoh uint32_t reg; 5873 1.287 msaitoh 5874 1.287 msaitoh reg = CSR_READ_4(sc, BGE_CPMU_LSPD_1000MB_CLK); 5875 1.287 msaitoh if ((reg & BGE_CPMU_LSPD_1000MB_MACCLK_MASK) 5876 1.287 msaitoh == (BGE_CPMU_LSPD_1000MB_MACCLK_12_5)) { 5877 1.287 msaitoh reg &= ~BGE_CPMU_LSPD_1000MB_MACCLK_MASK; 5878 1.287 msaitoh delay(40); 5879 1.287 msaitoh CSR_WRITE_4(sc, BGE_CPMU_LSPD_1000MB_CLK, reg); 5880 1.287 msaitoh } 5881 1.287 msaitoh } 5882 1.287 msaitoh 5883 1.161 msaitoh /* 5884 1.161 msaitoh * Force an interrupt so that we will call bge_link_upd 5885 1.161 msaitoh * if needed and clear any pending link state attention. 5886 1.161 msaitoh * Without this we are not getting any further interrupts 5887 1.161 msaitoh * for link state changes and thus will not UP the link and 5888 1.161 msaitoh * not be able to send in bge_start. The only way to get 5889 1.161 msaitoh * things working was to receive a packet and get a RX intr. 5890 1.161 msaitoh */ 5891 1.161 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 5892 1.261 msaitoh sc->bge_flags & BGEF_IS_5788) 5893 1.161 msaitoh BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 5894 1.161 msaitoh else 5895 1.161 msaitoh BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 5896 1.161 msaitoh 5897 1.142 dyoung return rc; 5898 1.1 fvdl } 5899 1.1 fvdl 5900 1.1 fvdl /* 5901 1.1 fvdl * Report current media status. 5902 1.1 fvdl */ 5903 1.104 thorpej static void 5904 1.104 thorpej bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 5905 1.1 fvdl { 5906 1.354 skrll struct bge_softc * const sc = ifp->if_softc; 5907 1.354 skrll struct mii_data * const mii = &sc->bge_mii; 5908 1.1 fvdl 5909 1.386 skrll KASSERT(mutex_owned(sc->sc_intr_lock)); 5910 1.386 skrll 5911 1.261 msaitoh if (sc->bge_flags & BGEF_FIBER_TBI) { 5912 1.1 fvdl ifmr->ifm_status = IFM_AVALID; 5913 1.1 fvdl ifmr->ifm_active = IFM_ETHER; 5914 1.1 fvdl if (CSR_READ_4(sc, BGE_MAC_STS) & 5915 1.1 fvdl BGE_MACSTAT_TBI_PCS_SYNCHED) 5916 1.1 fvdl ifmr->ifm_status |= IFM_ACTIVE; 5917 1.1 fvdl ifmr->ifm_active |= IFM_1000_SX; 5918 1.1 fvdl if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 5919 1.1 fvdl ifmr->ifm_active |= IFM_HDX; 5920 1.1 fvdl else 5921 1.1 fvdl ifmr->ifm_active |= IFM_FDX; 5922 1.1 fvdl return; 5923 1.1 fvdl } 5924 1.1 fvdl 5925 1.1 fvdl mii_pollstat(mii); 5926 1.1 fvdl ifmr->ifm_status = mii->mii_media_status; 5927 1.69 thorpej ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 5928 1.69 thorpej sc->bge_flowflags; 5929 1.1 fvdl } 5930 1.1 fvdl 5931 1.104 thorpej static int 5932 1.186 msaitoh bge_ifflags_cb(struct ethercom *ec) 5933 1.186 msaitoh { 5934 1.354 skrll struct ifnet * const ifp = &ec->ec_if; 5935 1.354 skrll struct bge_softc * const sc = ifp->if_softc; 5936 1.375 skrll int ret = 0; 5937 1.375 skrll 5938 1.375 skrll KASSERT(IFNET_LOCKED(ifp)); 5939 1.394 skrll mutex_enter(sc->sc_mcast_lock); 5940 1.375 skrll 5941 1.337 msaitoh u_short change = ifp->if_flags ^ sc->bge_if_flags; 5942 1.396 skrll sc->bge_if_flags = ifp->if_flags; 5943 1.186 msaitoh 5944 1.375 skrll if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) { 5945 1.375 skrll ret = ENETRESET; 5946 1.375 skrll } else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 5947 1.375 skrll if ((ifp->if_flags & IFF_PROMISC) == 0) 5948 1.375 skrll BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 5949 1.375 skrll else 5950 1.375 skrll BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 5951 1.186 msaitoh 5952 1.375 skrll bge_setmulti(sc); 5953 1.375 skrll } 5954 1.186 msaitoh 5955 1.394 skrll mutex_exit(sc->sc_mcast_lock); 5956 1.186 msaitoh 5957 1.375 skrll return ret; 5958 1.186 msaitoh } 5959 1.186 msaitoh 5960 1.186 msaitoh static int 5961 1.126 christos bge_ioctl(struct ifnet *ifp, u_long command, void *data) 5962 1.1 fvdl { 5963 1.354 skrll struct bge_softc * const sc = ifp->if_softc; 5964 1.354 skrll struct ifreq * const ifr = (struct ifreq *) data; 5965 1.375 skrll int error = 0; 5966 1.375 skrll 5967 1.375 skrll switch (command) { 5968 1.375 skrll case SIOCADDMULTI: 5969 1.375 skrll case SIOCDELMULTI: 5970 1.375 skrll break; 5971 1.375 skrll default: 5972 1.375 skrll KASSERT(IFNET_LOCKED(ifp)); 5973 1.375 skrll } 5974 1.1 fvdl 5975 1.375 skrll const int s = splnet(); 5976 1.1 fvdl 5977 1.170 msaitoh switch (command) { 5978 1.1 fvdl case SIOCSIFMEDIA: 5979 1.394 skrll mutex_enter(sc->sc_intr_lock); 5980 1.69 thorpej /* XXX Flow control is not supported for 1000BASE-SX */ 5981 1.261 msaitoh if (sc->bge_flags & BGEF_FIBER_TBI) { 5982 1.69 thorpej ifr->ifr_media &= ~IFM_ETH_FMASK; 5983 1.69 thorpej sc->bge_flowflags = 0; 5984 1.69 thorpej } 5985 1.69 thorpej 5986 1.69 thorpej /* Flow control requires full-duplex mode. */ 5987 1.69 thorpej if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 5988 1.69 thorpej (ifr->ifr_media & IFM_FDX) == 0) { 5989 1.330 msaitoh ifr->ifr_media &= ~IFM_ETH_FMASK; 5990 1.69 thorpej } 5991 1.69 thorpej if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 5992 1.69 thorpej if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 5993 1.157 msaitoh /* We can do both TXPAUSE and RXPAUSE. */ 5994 1.69 thorpej ifr->ifr_media |= 5995 1.69 thorpej IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 5996 1.69 thorpej } 5997 1.69 thorpej sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 5998 1.69 thorpej } 5999 1.394 skrll mutex_exit(sc->sc_intr_lock); 6000 1.334 msaitoh 6001 1.261 msaitoh if (sc->bge_flags & BGEF_FIBER_TBI) { 6002 1.1 fvdl error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 6003 1.1 fvdl command); 6004 1.1 fvdl } else { 6005 1.375 skrll struct mii_data * const mii = &sc->bge_mii; 6006 1.1 fvdl error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 6007 1.1 fvdl command); 6008 1.1 fvdl } 6009 1.1 fvdl break; 6010 1.1 fvdl default: 6011 1.152 tron if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) 6012 1.152 tron break; 6013 1.152 tron 6014 1.152 tron error = 0; 6015 1.152 tron 6016 1.375 skrll if (command == SIOCADDMULTI || command == SIOCDELMULTI) { 6017 1.394 skrll mutex_enter(sc->sc_mcast_lock); 6018 1.375 skrll if (sc->bge_if_flags & IFF_RUNNING) { 6019 1.375 skrll bge_setmulti(sc); 6020 1.375 skrll } 6021 1.394 skrll mutex_exit(sc->sc_mcast_lock); 6022 1.375 skrll } 6023 1.1 fvdl break; 6024 1.1 fvdl } 6025 1.1 fvdl 6026 1.1 fvdl splx(s); 6027 1.1 fvdl 6028 1.170 msaitoh return error; 6029 1.1 fvdl } 6030 1.1 fvdl 6031 1.375 skrll static bool 6032 1.375 skrll bge_watchdog_check(struct bge_softc * const sc) 6033 1.1 fvdl { 6034 1.375 skrll 6035 1.394 skrll KASSERT(mutex_owned(sc->sc_intr_lock)); 6036 1.375 skrll 6037 1.375 skrll if (!sc->bge_tx_sending) 6038 1.375 skrll return true; 6039 1.375 skrll 6040 1.375 skrll if (time_uptime - sc->bge_tx_lastsent <= bge_watchdog_timeout) 6041 1.375 skrll return true; 6042 1.1 fvdl 6043 1.330 msaitoh /* If pause frames are active then don't reset the hardware. */ 6044 1.320 bouyer if ((CSR_READ_4(sc, BGE_RX_MODE) & BGE_RXMODE_FLOWCTL_ENABLE) != 0) { 6045 1.375 skrll const uint32_t status = CSR_READ_4(sc, BGE_RX_STS); 6046 1.320 bouyer if ((status & BGE_RXSTAT_REMOTE_XOFFED) != 0) { 6047 1.320 bouyer /* 6048 1.320 bouyer * If link partner has us in XOFF state then wait for 6049 1.320 bouyer * the condition to clear. 6050 1.320 bouyer */ 6051 1.320 bouyer CSR_WRITE_4(sc, BGE_RX_STS, status); 6052 1.375 skrll sc->bge_tx_lastsent = time_uptime; 6053 1.375 skrll return true; 6054 1.320 bouyer } else if ((status & BGE_RXSTAT_RCVD_XOFF) != 0 && 6055 1.320 bouyer (status & BGE_RXSTAT_RCVD_XON) != 0) { 6056 1.320 bouyer /* 6057 1.320 bouyer * If link partner has us in XOFF state then wait for 6058 1.320 bouyer * the condition to clear. 6059 1.320 bouyer */ 6060 1.320 bouyer CSR_WRITE_4(sc, BGE_RX_STS, status); 6061 1.375 skrll sc->bge_tx_lastsent = time_uptime; 6062 1.375 skrll return true; 6063 1.320 bouyer } 6064 1.320 bouyer /* 6065 1.330 msaitoh * Any other condition is unexpected and the controller 6066 1.330 msaitoh * should be reset. 6067 1.320 bouyer */ 6068 1.320 bouyer } 6069 1.320 bouyer 6070 1.375 skrll return false; 6071 1.375 skrll } 6072 1.375 skrll 6073 1.375 skrll static bool 6074 1.386 skrll bge_watchdog_tick(struct ifnet *ifp) 6075 1.375 skrll { 6076 1.375 skrll struct bge_softc * const sc = ifp->if_softc; 6077 1.375 skrll 6078 1.394 skrll KASSERT(mutex_owned(sc->sc_intr_lock)); 6079 1.375 skrll 6080 1.379 skrll if (!sc->sc_trigger_reset && bge_watchdog_check(sc)) 6081 1.375 skrll return true; 6082 1.375 skrll 6083 1.375 skrll if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0) 6084 1.375 skrll workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL); 6085 1.375 skrll 6086 1.375 skrll return false; 6087 1.375 skrll } 6088 1.375 skrll 6089 1.375 skrll /* 6090 1.375 skrll * Perform an interface watchdog reset. 6091 1.375 skrll */ 6092 1.375 skrll static void 6093 1.375 skrll bge_handle_reset_work(struct work *work, void *arg) 6094 1.375 skrll { 6095 1.375 skrll struct bge_softc * const sc = arg; 6096 1.375 skrll struct ifnet * const ifp = &sc->ethercom.ec_if; 6097 1.375 skrll 6098 1.386 skrll printf("%s: watchdog timeout -- resetting\n", ifp->if_xname); 6099 1.386 skrll 6100 1.375 skrll /* Don't want ioctl operations to happen */ 6101 1.375 skrll IFNET_LOCK(ifp); 6102 1.375 skrll 6103 1.375 skrll /* reset the interface. */ 6104 1.1 fvdl bge_init(ifp); 6105 1.1 fvdl 6106 1.375 skrll IFNET_UNLOCK(ifp); 6107 1.375 skrll 6108 1.375 skrll /* 6109 1.375 skrll * There are still some upper layer processing which call 6110 1.375 skrll * ifp->if_start(). e.g. ALTQ or one CPU system 6111 1.375 skrll */ 6112 1.375 skrll /* Try to get more packets going. */ 6113 1.375 skrll ifp->if_start(ifp); 6114 1.375 skrll 6115 1.375 skrll atomic_store_relaxed(&sc->sc_reset_pending, 0); 6116 1.1 fvdl } 6117 1.1 fvdl 6118 1.11 thorpej static void 6119 1.11 thorpej bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit) 6120 1.11 thorpej { 6121 1.11 thorpej int i; 6122 1.11 thorpej 6123 1.211 msaitoh BGE_CLRBIT_FLUSH(sc, reg, bit); 6124 1.11 thorpej 6125 1.180 msaitoh for (i = 0; i < 1000; i++) { 6126 1.216 msaitoh delay(100); 6127 1.11 thorpej if ((CSR_READ_4(sc, reg) & bit) == 0) 6128 1.11 thorpej return; 6129 1.11 thorpej } 6130 1.11 thorpej 6131 1.165 msaitoh /* 6132 1.165 msaitoh * Doesn't print only when the register is BGE_SRS_MODE. It occurs 6133 1.165 msaitoh * on some environment (and once after boot?) 6134 1.165 msaitoh */ 6135 1.165 msaitoh if (reg != BGE_SRS_MODE) 6136 1.165 msaitoh aprint_error_dev(sc->bge_dev, 6137 1.165 msaitoh "block failed to stop: reg 0x%lx, bit 0x%08x\n", 6138 1.165 msaitoh (u_long)reg, bit); 6139 1.11 thorpej } 6140 1.11 thorpej 6141 1.1 fvdl /* 6142 1.1 fvdl * Stop the adapter and free any mbufs allocated to the 6143 1.1 fvdl * RX and TX lists. 6144 1.1 fvdl */ 6145 1.104 thorpej static void 6146 1.394 skrll bge_stop(struct ifnet *ifp, int disable) 6147 1.1 fvdl { 6148 1.354 skrll struct bge_softc * const sc = ifp->if_softc; 6149 1.1 fvdl 6150 1.386 skrll ASSERT_SLEEPABLE(); 6151 1.386 skrll KASSERT(IFNET_LOCKED(ifp)); 6152 1.386 skrll 6153 1.386 skrll mutex_enter(sc->sc_intr_lock); 6154 1.386 skrll sc->bge_txrx_stopping = true; 6155 1.386 skrll mutex_exit(sc->sc_intr_lock); 6156 1.386 skrll 6157 1.394 skrll callout_halt(&sc->bge_timeout, NULL); 6158 1.1 fvdl 6159 1.216 msaitoh /* Disable host interrupts. */ 6160 1.226 msaitoh BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 6161 1.216 msaitoh bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 1); 6162 1.216 msaitoh 6163 1.1 fvdl /* 6164 1.177 msaitoh * Tell firmware we're shutting down. 6165 1.177 msaitoh */ 6166 1.177 msaitoh bge_stop_fw(sc); 6167 1.216 msaitoh bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); 6168 1.177 msaitoh 6169 1.177 msaitoh /* 6170 1.208 msaitoh * Disable all of the receiver blocks. 6171 1.1 fvdl */ 6172 1.11 thorpej bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 6173 1.11 thorpej bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 6174 1.11 thorpej bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 6175 1.172 msaitoh if (BGE_IS_5700_FAMILY(sc)) 6176 1.44 hannken bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 6177 1.11 thorpej bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 6178 1.11 thorpej bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 6179 1.11 thorpej bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 6180 1.1 fvdl 6181 1.1 fvdl /* 6182 1.208 msaitoh * Disable all of the transmit blocks. 6183 1.1 fvdl */ 6184 1.11 thorpej bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 6185 1.11 thorpej bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 6186 1.11 thorpej bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 6187 1.11 thorpej bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 6188 1.11 thorpej bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 6189 1.172 msaitoh if (BGE_IS_5700_FAMILY(sc)) 6190 1.44 hannken bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 6191 1.11 thorpej bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 6192 1.1 fvdl 6193 1.216 msaitoh BGE_CLRBIT_FLUSH(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB); 6194 1.216 msaitoh delay(40); 6195 1.216 msaitoh 6196 1.216 msaitoh bge_stop_block(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 6197 1.216 msaitoh 6198 1.1 fvdl /* 6199 1.1 fvdl * Shut down all of the memory managers and related 6200 1.1 fvdl * state machines. 6201 1.1 fvdl */ 6202 1.236 msaitoh /* 5718 step 5a,5b */ 6203 1.11 thorpej bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 6204 1.11 thorpej bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 6205 1.172 msaitoh if (BGE_IS_5700_FAMILY(sc)) 6206 1.44 hannken bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 6207 1.11 thorpej 6208 1.236 msaitoh /* 5718 step 5c,5d */ 6209 1.1 fvdl CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 6210 1.1 fvdl CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 6211 1.11 thorpej 6212 1.172 msaitoh if (BGE_IS_5700_FAMILY(sc)) { 6213 1.44 hannken bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 6214 1.44 hannken bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 6215 1.44 hannken } 6216 1.1 fvdl 6217 1.177 msaitoh bge_reset(sc); 6218 1.216 msaitoh bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); 6219 1.216 msaitoh bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); 6220 1.1 fvdl 6221 1.1 fvdl /* 6222 1.177 msaitoh * Keep the ASF firmware running if up. 6223 1.1 fvdl */ 6224 1.177 msaitoh if (sc->bge_asf_mode & ASF_STACKUP) 6225 1.177 msaitoh BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 6226 1.177 msaitoh else 6227 1.177 msaitoh BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 6228 1.1 fvdl 6229 1.1 fvdl /* Free the RX lists. */ 6230 1.376 skrll bge_free_rx_ring_std(sc); 6231 1.1 fvdl 6232 1.1 fvdl /* Free jumbo RX list. */ 6233 1.172 msaitoh if (BGE_IS_JUMBO_CAPABLE(sc)) 6234 1.172 msaitoh bge_free_rx_ring_jumbo(sc); 6235 1.1 fvdl 6236 1.1 fvdl /* Free TX buffers. */ 6237 1.320 bouyer bge_free_tx_ring(sc, disable); 6238 1.1 fvdl 6239 1.1 fvdl /* 6240 1.1 fvdl * Isolate/power down the PHY. 6241 1.1 fvdl */ 6242 1.386 skrll if (!(sc->bge_flags & BGEF_FIBER_TBI)) { 6243 1.386 skrll mutex_enter(sc->sc_intr_lock); 6244 1.1 fvdl mii_down(&sc->bge_mii); 6245 1.386 skrll mutex_exit(sc->sc_intr_lock); 6246 1.386 skrll } 6247 1.1 fvdl 6248 1.161 msaitoh sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 6249 1.1 fvdl 6250 1.161 msaitoh /* Clear MAC's link state (PHY may still have link UP). */ 6251 1.161 msaitoh BGE_STS_CLRBIT(sc, BGE_STS_LINK); 6252 1.1 fvdl 6253 1.375 skrll ifp->if_flags &= ~IFF_RUNNING; 6254 1.386 skrll 6255 1.394 skrll mutex_enter(sc->sc_mcast_lock); 6256 1.386 skrll sc->bge_if_flags = ifp->if_flags; 6257 1.394 skrll mutex_exit(sc->sc_mcast_lock); 6258 1.1 fvdl } 6259 1.1 fvdl 6260 1.161 msaitoh static void 6261 1.161 msaitoh bge_link_upd(struct bge_softc *sc) 6262 1.161 msaitoh { 6263 1.354 skrll struct ifnet * const ifp = &sc->ethercom.ec_if; 6264 1.354 skrll struct mii_data * const mii = &sc->bge_mii; 6265 1.170 msaitoh uint32_t status; 6266 1.322 msaitoh uint16_t phyval; 6267 1.161 msaitoh int link; 6268 1.161 msaitoh 6269 1.386 skrll KASSERT(sc->sc_intr_lock); 6270 1.386 skrll 6271 1.161 msaitoh /* Clear 'pending link event' flag */ 6272 1.161 msaitoh BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT); 6273 1.161 msaitoh 6274 1.161 msaitoh /* 6275 1.161 msaitoh * Process link state changes. 6276 1.161 msaitoh * Grrr. The link status word in the status block does 6277 1.161 msaitoh * not work correctly on the BCM5700 rev AX and BX chips, 6278 1.161 msaitoh * according to all available information. Hence, we have 6279 1.161 msaitoh * to enable MII interrupts in order to properly obtain 6280 1.161 msaitoh * async link changes. Unfortunately, this also means that 6281 1.161 msaitoh * we have to read the MAC status register to detect link 6282 1.161 msaitoh * changes, thereby adding an additional register access to 6283 1.161 msaitoh * the interrupt handler. 6284 1.161 msaitoh */ 6285 1.161 msaitoh 6286 1.161 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) { 6287 1.161 msaitoh status = CSR_READ_4(sc, BGE_MAC_STS); 6288 1.161 msaitoh if (status & BGE_MACSTAT_MI_INTERRUPT) { 6289 1.161 msaitoh mii_pollstat(mii); 6290 1.161 msaitoh 6291 1.161 msaitoh if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 6292 1.161 msaitoh mii->mii_media_status & IFM_ACTIVE && 6293 1.161 msaitoh IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 6294 1.161 msaitoh BGE_STS_SETBIT(sc, BGE_STS_LINK); 6295 1.161 msaitoh else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 6296 1.161 msaitoh (!(mii->mii_media_status & IFM_ACTIVE) || 6297 1.161 msaitoh IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 6298 1.161 msaitoh BGE_STS_CLRBIT(sc, BGE_STS_LINK); 6299 1.161 msaitoh 6300 1.161 msaitoh /* Clear the interrupt */ 6301 1.161 msaitoh CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 6302 1.161 msaitoh BGE_EVTENB_MI_INTERRUPT); 6303 1.216 msaitoh bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr, 6304 1.322 msaitoh BRGPHY_MII_ISR, &phyval); 6305 1.216 msaitoh bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr, 6306 1.216 msaitoh BRGPHY_MII_IMR, BRGPHY_INTRS); 6307 1.161 msaitoh } 6308 1.161 msaitoh return; 6309 1.161 msaitoh } 6310 1.161 msaitoh 6311 1.261 msaitoh if (sc->bge_flags & BGEF_FIBER_TBI) { 6312 1.161 msaitoh status = CSR_READ_4(sc, BGE_MAC_STS); 6313 1.161 msaitoh if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 6314 1.161 msaitoh if (!BGE_STS_BIT(sc, BGE_STS_LINK)) { 6315 1.161 msaitoh BGE_STS_SETBIT(sc, BGE_STS_LINK); 6316 1.219 msaitoh if (BGE_ASICREV(sc->bge_chipid) 6317 1.219 msaitoh == BGE_ASICREV_BCM5704) { 6318 1.341 msaitoh BGE_CLRBIT_FLUSH(sc, BGE_MAC_MODE, 6319 1.161 msaitoh BGE_MACMODE_TBI_SEND_CFGS); 6320 1.219 msaitoh DELAY(40); 6321 1.219 msaitoh } 6322 1.161 msaitoh CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 6323 1.161 msaitoh if_link_state_change(ifp, LINK_STATE_UP); 6324 1.161 msaitoh } 6325 1.161 msaitoh } else if (BGE_STS_BIT(sc, BGE_STS_LINK)) { 6326 1.161 msaitoh BGE_STS_CLRBIT(sc, BGE_STS_LINK); 6327 1.161 msaitoh if_link_state_change(ifp, LINK_STATE_DOWN); 6328 1.161 msaitoh } 6329 1.161 msaitoh } else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) { 6330 1.178 msaitoh /* 6331 1.161 msaitoh * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED 6332 1.161 msaitoh * bit in status word always set. Workaround this bug by 6333 1.161 msaitoh * reading PHY link status directly. 6334 1.161 msaitoh */ 6335 1.161 msaitoh link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)? 6336 1.161 msaitoh BGE_STS_LINK : 0; 6337 1.161 msaitoh 6338 1.161 msaitoh if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) { 6339 1.161 msaitoh mii_pollstat(mii); 6340 1.161 msaitoh 6341 1.161 msaitoh if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 6342 1.161 msaitoh mii->mii_media_status & IFM_ACTIVE && 6343 1.161 msaitoh IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 6344 1.161 msaitoh BGE_STS_SETBIT(sc, BGE_STS_LINK); 6345 1.161 msaitoh else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 6346 1.161 msaitoh (!(mii->mii_media_status & IFM_ACTIVE) || 6347 1.161 msaitoh IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 6348 1.161 msaitoh BGE_STS_CLRBIT(sc, BGE_STS_LINK); 6349 1.161 msaitoh } 6350 1.256 msaitoh } else { 6351 1.256 msaitoh /* 6352 1.256 msaitoh * For controllers that call mii_tick, we have to poll 6353 1.256 msaitoh * link status. 6354 1.256 msaitoh */ 6355 1.256 msaitoh mii_pollstat(mii); 6356 1.161 msaitoh } 6357 1.161 msaitoh 6358 1.287 msaitoh if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) { 6359 1.287 msaitoh uint32_t reg, scale; 6360 1.287 msaitoh 6361 1.287 msaitoh reg = CSR_READ_4(sc, BGE_CPMU_CLCK_STAT) & 6362 1.287 msaitoh BGE_CPMU_CLCK_STAT_MAC_CLCK_MASK; 6363 1.287 msaitoh if (reg == BGE_CPMU_CLCK_STAT_MAC_CLCK_62_5) 6364 1.287 msaitoh scale = 65; 6365 1.287 msaitoh else if (reg == BGE_CPMU_CLCK_STAT_MAC_CLCK_6_25) 6366 1.287 msaitoh scale = 6; 6367 1.287 msaitoh else 6368 1.287 msaitoh scale = 12; 6369 1.287 msaitoh 6370 1.287 msaitoh reg = CSR_READ_4(sc, BGE_MISC_CFG) & 6371 1.287 msaitoh ~BGE_MISCCFG_TIMER_PRESCALER; 6372 1.287 msaitoh reg |= scale << 1; 6373 1.287 msaitoh CSR_WRITE_4(sc, BGE_MISC_CFG, reg); 6374 1.287 msaitoh } 6375 1.161 msaitoh /* Clear the attention */ 6376 1.331 msaitoh CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 6377 1.331 msaitoh BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 6378 1.161 msaitoh BGE_MACSTAT_LINK_CHANGED); 6379 1.161 msaitoh } 6380 1.161 msaitoh 6381 1.64 jonathan static int 6382 1.207 msaitoh bge_sysctl_verify(SYSCTLFN_ARGS) 6383 1.64 jonathan { 6384 1.64 jonathan int error, t; 6385 1.64 jonathan struct sysctlnode node; 6386 1.64 jonathan 6387 1.64 jonathan node = *rnode; 6388 1.64 jonathan t = *(int*)rnode->sysctl_data; 6389 1.64 jonathan node.sysctl_data = &t; 6390 1.64 jonathan error = sysctl_lookup(SYSCTLFN_CALL(&node)); 6391 1.64 jonathan if (error || newp == NULL) 6392 1.170 msaitoh return error; 6393 1.64 jonathan 6394 1.64 jonathan #if 0 6395 1.64 jonathan DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t, 6396 1.64 jonathan node.sysctl_num, rnode->sysctl_num)); 6397 1.64 jonathan #endif 6398 1.64 jonathan 6399 1.64 jonathan if (node.sysctl_num == bge_rxthresh_nodenum) { 6400 1.64 jonathan if (t < 0 || t >= NBGE_RX_THRESH) 6401 1.170 msaitoh return EINVAL; 6402 1.64 jonathan bge_update_all_threshes(t); 6403 1.64 jonathan } else 6404 1.170 msaitoh return EINVAL; 6405 1.64 jonathan 6406 1.64 jonathan *(int*)rnode->sysctl_data = t; 6407 1.64 jonathan 6408 1.170 msaitoh return 0; 6409 1.64 jonathan } 6410 1.64 jonathan 6411 1.64 jonathan /* 6412 1.65 atatat * Set up sysctl(3) MIB, hw.bge.*. 6413 1.64 jonathan */ 6414 1.190 jruoho static void 6415 1.207 msaitoh bge_sysctl_init(struct bge_softc *sc) 6416 1.64 jonathan { 6417 1.66 atatat int rc, bge_root_num; 6418 1.90 atatat const struct sysctlnode *node; 6419 1.64 jonathan 6420 1.190 jruoho if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node, 6421 1.190 jruoho 0, CTLTYPE_NODE, "bge", 6422 1.73 atatat SYSCTL_DESCR("BGE interface controls"), 6423 1.64 jonathan NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 6424 1.203 msaitoh goto out; 6425 1.64 jonathan } 6426 1.64 jonathan 6427 1.66 atatat bge_root_num = node->sysctl_num; 6428 1.66 atatat 6429 1.64 jonathan /* BGE Rx interrupt mitigation level */ 6430 1.190 jruoho if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node, 6431 1.190 jruoho CTLFLAG_READWRITE, 6432 1.73 atatat CTLTYPE_INT, "rx_lvl", 6433 1.73 atatat SYSCTL_DESCR("BGE receive interrupt mitigation level"), 6434 1.207 msaitoh bge_sysctl_verify, 0, 6435 1.64 jonathan &bge_rx_thresh_lvl, 6436 1.66 atatat 0, CTL_HW, bge_root_num, CTL_CREATE, 6437 1.64 jonathan CTL_EOL)) != 0) { 6438 1.203 msaitoh goto out; 6439 1.64 jonathan } 6440 1.64 jonathan 6441 1.64 jonathan bge_rxthresh_nodenum = node->sysctl_num; 6442 1.64 jonathan 6443 1.375 skrll #ifdef BGE_DEBUG 6444 1.375 skrll if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node, 6445 1.375 skrll CTLFLAG_READWRITE, 6446 1.375 skrll CTLTYPE_BOOL, "trigger_reset", 6447 1.375 skrll SYSCTL_DESCR("Trigger an interface reset"), 6448 1.379 skrll NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, 6449 1.375 skrll CTL_EOL)) != 0) { 6450 1.375 skrll goto out; 6451 1.375 skrll } 6452 1.375 skrll #endif 6453 1.64 jonathan return; 6454 1.64 jonathan 6455 1.203 msaitoh out: 6456 1.138 joerg aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc); 6457 1.64 jonathan } 6458 1.151 cegger 6459 1.172 msaitoh #ifdef BGE_DEBUG 6460 1.172 msaitoh void 6461 1.172 msaitoh bge_debug_info(struct bge_softc *sc) 6462 1.172 msaitoh { 6463 1.172 msaitoh 6464 1.172 msaitoh printf("Hardware Flags:\n"); 6465 1.214 msaitoh if (BGE_IS_57765_PLUS(sc)) 6466 1.214 msaitoh printf(" - 57765 Plus\n"); 6467 1.214 msaitoh if (BGE_IS_5717_PLUS(sc)) 6468 1.214 msaitoh printf(" - 5717 Plus\n"); 6469 1.172 msaitoh if (BGE_IS_5755_PLUS(sc)) 6470 1.172 msaitoh printf(" - 5755 Plus\n"); 6471 1.207 msaitoh if (BGE_IS_575X_PLUS(sc)) 6472 1.207 msaitoh printf(" - 575X Plus\n"); 6473 1.172 msaitoh if (BGE_IS_5705_PLUS(sc)) 6474 1.172 msaitoh printf(" - 5705 Plus\n"); 6475 1.172 msaitoh if (BGE_IS_5714_FAMILY(sc)) 6476 1.172 msaitoh printf(" - 5714 Family\n"); 6477 1.172 msaitoh if (BGE_IS_5700_FAMILY(sc)) 6478 1.172 msaitoh printf(" - 5700 Family\n"); 6479 1.261 msaitoh if (sc->bge_flags & BGEF_IS_5788) 6480 1.172 msaitoh printf(" - 5788\n"); 6481 1.261 msaitoh if (sc->bge_flags & BGEF_JUMBO_CAPABLE) 6482 1.172 msaitoh printf(" - Supports Jumbo Frames\n"); 6483 1.261 msaitoh if (sc->bge_flags & BGEF_NO_EEPROM) 6484 1.173 msaitoh printf(" - No EEPROM\n"); 6485 1.261 msaitoh if (sc->bge_flags & BGEF_PCIX) 6486 1.172 msaitoh printf(" - PCI-X Bus\n"); 6487 1.261 msaitoh if (sc->bge_flags & BGEF_PCIE) 6488 1.172 msaitoh printf(" - PCI Express Bus\n"); 6489 1.261 msaitoh if (sc->bge_flags & BGEF_RX_ALIGNBUG) 6490 1.172 msaitoh printf(" - RX Alignment Bug\n"); 6491 1.261 msaitoh if (sc->bge_flags & BGEF_APE) 6492 1.216 msaitoh printf(" - APE\n"); 6493 1.261 msaitoh if (sc->bge_flags & BGEF_CPMU_PRESENT) 6494 1.214 msaitoh printf(" - CPMU\n"); 6495 1.261 msaitoh if (sc->bge_flags & BGEF_TSO) 6496 1.172 msaitoh printf(" - TSO\n"); 6497 1.288 msaitoh if (sc->bge_flags & BGEF_TAGGED_STATUS) 6498 1.288 msaitoh printf(" - TAGGED_STATUS\n"); 6499 1.220 msaitoh 6500 1.279 msaitoh /* PHY related */ 6501 1.261 msaitoh if (sc->bge_phy_flags & BGEPHYF_NO_3LED) 6502 1.220 msaitoh printf(" - No 3 LEDs\n"); 6503 1.261 msaitoh if (sc->bge_phy_flags & BGEPHYF_CRC_BUG) 6504 1.220 msaitoh printf(" - CRC bug\n"); 6505 1.261 msaitoh if (sc->bge_phy_flags & BGEPHYF_ADC_BUG) 6506 1.220 msaitoh printf(" - ADC bug\n"); 6507 1.261 msaitoh if (sc->bge_phy_flags & BGEPHYF_5704_A0_BUG) 6508 1.220 msaitoh printf(" - 5704 A0 bug\n"); 6509 1.261 msaitoh if (sc->bge_phy_flags & BGEPHYF_JITTER_BUG) 6510 1.220 msaitoh printf(" - jitter bug\n"); 6511 1.261 msaitoh if (sc->bge_phy_flags & BGEPHYF_BER_BUG) 6512 1.220 msaitoh printf(" - BER bug\n"); 6513 1.261 msaitoh if (sc->bge_phy_flags & BGEPHYF_ADJUST_TRIM) 6514 1.220 msaitoh printf(" - adjust trim\n"); 6515 1.261 msaitoh if (sc->bge_phy_flags & BGEPHYF_NO_WIRESPEED) 6516 1.220 msaitoh printf(" - no wirespeed\n"); 6517 1.279 msaitoh 6518 1.279 msaitoh /* ASF related */ 6519 1.279 msaitoh if (sc->bge_asf_mode & ASF_ENABLE) 6520 1.279 msaitoh printf(" - ASF enable\n"); 6521 1.280 enami if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) 6522 1.279 msaitoh printf(" - ASF new handshake\n"); 6523 1.279 msaitoh if (sc->bge_asf_mode & ASF_STACKUP) 6524 1.279 msaitoh printf(" - ASF stackup\n"); 6525 1.172 msaitoh } 6526 1.172 msaitoh #endif /* BGE_DEBUG */ 6527 1.172 msaitoh 6528 1.172 msaitoh static int 6529 1.172 msaitoh bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]) 6530 1.172 msaitoh { 6531 1.172 msaitoh prop_dictionary_t dict; 6532 1.172 msaitoh prop_data_t ea; 6533 1.172 msaitoh 6534 1.261 msaitoh if ((sc->bge_flags & BGEF_NO_EEPROM) == 0) 6535 1.172 msaitoh return 1; 6536 1.172 msaitoh 6537 1.172 msaitoh dict = device_properties(sc->bge_dev); 6538 1.172 msaitoh ea = prop_dictionary_get(dict, "mac-address"); 6539 1.172 msaitoh if (ea != NULL) { 6540 1.172 msaitoh KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 6541 1.172 msaitoh KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 6542 1.346 msaitoh memcpy(ether_addr, prop_data_value(ea), ETHER_ADDR_LEN); 6543 1.172 msaitoh return 0; 6544 1.172 msaitoh } 6545 1.172 msaitoh 6546 1.172 msaitoh return 1; 6547 1.172 msaitoh } 6548 1.172 msaitoh 6549 1.178 msaitoh static int 6550 1.170 msaitoh bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[]) 6551 1.151 cegger { 6552 1.170 msaitoh uint32_t mac_addr; 6553 1.151 cegger 6554 1.205 msaitoh mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB); 6555 1.151 cegger if ((mac_addr >> 16) == 0x484b) { 6556 1.151 cegger ether_addr[0] = (uint8_t)(mac_addr >> 8); 6557 1.151 cegger ether_addr[1] = (uint8_t)mac_addr; 6558 1.205 msaitoh mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB); 6559 1.151 cegger ether_addr[2] = (uint8_t)(mac_addr >> 24); 6560 1.151 cegger ether_addr[3] = (uint8_t)(mac_addr >> 16); 6561 1.151 cegger ether_addr[4] = (uint8_t)(mac_addr >> 8); 6562 1.151 cegger ether_addr[5] = (uint8_t)mac_addr; 6563 1.170 msaitoh return 0; 6564 1.151 cegger } 6565 1.170 msaitoh return 1; 6566 1.151 cegger } 6567 1.151 cegger 6568 1.151 cegger static int 6569 1.170 msaitoh bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[]) 6570 1.151 cegger { 6571 1.151 cegger int mac_offset = BGE_EE_MAC_OFFSET; 6572 1.151 cegger 6573 1.177 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 6574 1.151 cegger mac_offset = BGE_EE_MAC_OFFSET_5906; 6575 1.151 cegger 6576 1.151 cegger return (bge_read_nvram(sc, ether_addr, mac_offset + 2, 6577 1.151 cegger ETHER_ADDR_LEN)); 6578 1.151 cegger } 6579 1.151 cegger 6580 1.151 cegger static int 6581 1.170 msaitoh bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[]) 6582 1.151 cegger { 6583 1.151 cegger 6584 1.170 msaitoh if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 6585 1.170 msaitoh return 1; 6586 1.151 cegger 6587 1.151 cegger return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, 6588 1.151 cegger ETHER_ADDR_LEN)); 6589 1.151 cegger } 6590 1.151 cegger 6591 1.151 cegger static int 6592 1.170 msaitoh bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[]) 6593 1.151 cegger { 6594 1.151 cegger static const bge_eaddr_fcn_t bge_eaddr_funcs[] = { 6595 1.151 cegger /* NOTE: Order is critical */ 6596 1.172 msaitoh bge_get_eaddr_fw, 6597 1.151 cegger bge_get_eaddr_mem, 6598 1.151 cegger bge_get_eaddr_nvram, 6599 1.151 cegger bge_get_eaddr_eeprom, 6600 1.151 cegger NULL 6601 1.151 cegger }; 6602 1.151 cegger const bge_eaddr_fcn_t *func; 6603 1.151 cegger 6604 1.151 cegger for (func = bge_eaddr_funcs; *func != NULL; ++func) { 6605 1.151 cegger if ((*func)(sc, eaddr) == 0) 6606 1.151 cegger break; 6607 1.151 cegger } 6608 1.362 skrll return *func == NULL ? ENXIO : 0; 6609 1.151 cegger } 6610