1 1.354 msaitoh /* $NetBSD: ixgbe.c,v 1.354 2024/07/10 03:26:30 msaitoh Exp $ */ 2 1.99 msaitoh 3 1.1 dyoung /****************************************************************************** 4 1.1 dyoung 5 1.99 msaitoh Copyright (c) 2001-2017, Intel Corporation 6 1.1 dyoung All rights reserved. 7 1.99 msaitoh 8 1.99 msaitoh Redistribution and use in source and binary forms, with or without 9 1.1 dyoung modification, are permitted provided that the following conditions are met: 10 1.99 msaitoh 11 1.99 msaitoh 1. Redistributions of source code must retain the above copyright notice, 12 1.1 dyoung this list of conditions and the following disclaimer. 13 1.99 msaitoh 14 1.99 msaitoh 2. Redistributions in binary form must reproduce the above copyright 15 1.99 msaitoh notice, this list of conditions and the following disclaimer in the 16 1.1 dyoung documentation and/or other materials provided with the distribution. 17 1.99 msaitoh 18 1.99 msaitoh 3. Neither the name of the Intel Corporation nor the names of its 19 1.99 msaitoh contributors may be used to endorse or promote products derived from 20 1.1 dyoung this software without specific prior written permission. 21 1.99 msaitoh 22 1.1 dyoung THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 1.99 msaitoh AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 1.99 msaitoh IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 1.99 msaitoh ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 1.99 msaitoh LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 1.99 msaitoh CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 1.99 msaitoh SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 1.99 msaitoh INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 1.99 msaitoh CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 1.1 dyoung ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 1.1 dyoung POSSIBILITY OF SUCH DAMAGE. 33 1.1 dyoung 34 1.1 dyoung ******************************************************************************/ 35 1.145 msaitoh /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/ 36 1.99 msaitoh 37 1.1 dyoung /* 38 1.1 dyoung * Copyright (c) 2011 The NetBSD Foundation, Inc. 39 1.1 dyoung * All rights reserved. 40 1.1 dyoung * 41 1.1 dyoung * This code is derived from software contributed to The NetBSD Foundation 42 1.1 dyoung * by Coyote Point Systems, Inc. 43 1.1 dyoung * 44 1.1 dyoung * Redistribution and use in source and binary forms, with or without 45 1.1 dyoung * modification, are permitted provided that the following conditions 46 1.1 dyoung * are met: 47 1.1 dyoung * 1. Redistributions of source code must retain the above copyright 48 1.1 dyoung * notice, this list of conditions and the following disclaimer. 49 1.1 dyoung * 2. Redistributions in binary form must reproduce the above copyright 50 1.1 dyoung * notice, this list of conditions and the following disclaimer in the 51 1.1 dyoung * documentation and/or other materials provided with the distribution. 52 1.1 dyoung * 53 1.1 dyoung * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 54 1.1 dyoung * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 55 1.1 dyoung * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 56 1.1 dyoung * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 57 1.1 dyoung * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 58 1.1 dyoung * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 59 1.1 dyoung * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 60 1.1 dyoung * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 61 1.1 dyoung * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 62 1.1 dyoung * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 63 1.1 dyoung * POSSIBILITY OF SUCH DAMAGE. 64 1.1 dyoung */ 65 1.1 dyoung 66 1.281 msaitoh #include <sys/cdefs.h> 67 1.354 msaitoh __KERNEL_RCSID(0, "$NetBSD: ixgbe.c,v 1.354 2024/07/10 03:26:30 msaitoh Exp $"); 68 1.281 msaitoh 69 1.80 msaitoh #ifdef _KERNEL_OPT 70 1.1 dyoung #include "opt_inet.h" 71 1.22 msaitoh #include "opt_inet6.h" 72 1.80 msaitoh #endif 73 1.1 dyoung 74 1.1 dyoung #include "ixgbe.h" 75 1.251 msaitoh #include "ixgbe_phy.h" 76 1.135 msaitoh #include "ixgbe_sriov.h" 77 1.1 dyoung 78 1.33 msaitoh #include <sys/cprng.h> 79 1.95 msaitoh #include <dev/mii/mii.h> 80 1.95 msaitoh #include <dev/mii/miivar.h> 81 1.33 msaitoh 82 1.99 msaitoh /************************************************************************ 83 1.99 msaitoh * Driver version 84 1.99 msaitoh ************************************************************************/ 85 1.159 maxv static const char ixgbe_driver_version[] = "4.0.1-k"; 86 1.301 msaitoh /* XXX NetBSD: + 3.3.24 */ 87 1.1 dyoung 88 1.99 msaitoh /************************************************************************ 89 1.99 msaitoh * PCI Device ID Table 90 1.1 dyoung * 91 1.99 msaitoh * Used by probe to select devices to load on 92 1.99 msaitoh * Last field stores an index into ixgbe_strings 93 1.99 msaitoh * Last entry must be all 0s 94 1.1 dyoung * 95 1.99 msaitoh * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 96 1.99 msaitoh ************************************************************************/ 97 1.159 maxv static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] = 98 1.1 dyoung { 99 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0}, 100 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0}, 101 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0}, 102 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0}, 103 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0}, 104 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0}, 105 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0}, 106 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0}, 107 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0}, 108 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0}, 109 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0}, 110 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0}, 111 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0}, 112 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0}, 113 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0}, 114 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0}, 115 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0}, 116 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0}, 117 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0}, 118 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0}, 119 1.334 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS, 0, 0, 0}, 120 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0}, 121 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0}, 122 1.21 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0}, 123 1.1 dyoung {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0}, 124 1.21 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0}, 125 1.21 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0}, 126 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0}, 127 1.24 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0}, 128 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0}, 129 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0}, 130 1.48 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0}, 131 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0}, 132 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0}, 133 1.43 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0}, 134 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0}, 135 1.48 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0}, 136 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0}, 137 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0}, 138 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0}, 139 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0}, 140 1.188 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0}, 141 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0}, 142 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0}, 143 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0}, 144 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0}, 145 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0}, 146 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0}, 147 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0}, 148 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0}, 149 1.99 msaitoh {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0}, 150 1.1 dyoung /* required last entry */ 151 1.1 dyoung {0, 0, 0, 0, 0} 152 1.1 dyoung }; 153 1.1 dyoung 154 1.99 msaitoh /************************************************************************ 155 1.99 msaitoh * Table of branding strings 156 1.99 msaitoh ************************************************************************/ 157 1.1 dyoung static const char *ixgbe_strings[] = { 158 1.1 dyoung "Intel(R) PRO/10GbE PCI-Express Network Driver" 159 1.1 dyoung }; 160 1.1 dyoung 161 1.99 msaitoh /************************************************************************ 162 1.99 msaitoh * Function prototypes 163 1.99 msaitoh ************************************************************************/ 164 1.186 msaitoh static int ixgbe_probe(device_t, cfdata_t, void *); 165 1.333 msaitoh static void ixgbe_quirks(struct ixgbe_softc *); 166 1.186 msaitoh static void ixgbe_attach(device_t, device_t, void *); 167 1.186 msaitoh static int ixgbe_detach(device_t, int); 168 1.1 dyoung #if 0 169 1.186 msaitoh static int ixgbe_shutdown(device_t); 170 1.1 dyoung #endif 171 1.44 msaitoh static bool ixgbe_suspend(device_t, const pmf_qual_t *); 172 1.44 msaitoh static bool ixgbe_resume(device_t, const pmf_qual_t *); 173 1.98 msaitoh static int ixgbe_ifflags_cb(struct ethercom *); 174 1.186 msaitoh static int ixgbe_ioctl(struct ifnet *, u_long, void *); 175 1.1 dyoung static int ixgbe_init(struct ifnet *); 176 1.333 msaitoh static void ixgbe_init_locked(struct ixgbe_softc *); 177 1.232 msaitoh static void ixgbe_ifstop(struct ifnet *, int); 178 1.252 msaitoh static void ixgbe_stop_locked(void *); 179 1.333 msaitoh static void ixgbe_init_device_features(struct ixgbe_softc *); 180 1.333 msaitoh static int ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool); 181 1.333 msaitoh static void ixgbe_add_media_types(struct ixgbe_softc *); 182 1.186 msaitoh static void ixgbe_media_status(struct ifnet *, struct ifmediareq *); 183 1.186 msaitoh static int ixgbe_media_change(struct ifnet *); 184 1.333 msaitoh static int ixgbe_allocate_pci_resources(struct ixgbe_softc *, 185 1.1 dyoung const struct pci_attach_args *); 186 1.333 msaitoh static void ixgbe_free_deferred_handlers(struct ixgbe_softc *); 187 1.333 msaitoh static void ixgbe_get_slot_info(struct ixgbe_softc *); 188 1.333 msaitoh static int ixgbe_allocate_msix(struct ixgbe_softc *, 189 1.1 dyoung const struct pci_attach_args *); 190 1.333 msaitoh static int ixgbe_allocate_legacy(struct ixgbe_softc *, 191 1.1 dyoung const struct pci_attach_args *); 192 1.333 msaitoh static int ixgbe_configure_interrupts(struct ixgbe_softc *); 193 1.333 msaitoh static void ixgbe_free_pciintr_resources(struct ixgbe_softc *); 194 1.333 msaitoh static void ixgbe_free_pci_resources(struct ixgbe_softc *); 195 1.1 dyoung static void ixgbe_local_timer(void *); 196 1.233 msaitoh static void ixgbe_handle_timer(struct work *, void *); 197 1.186 msaitoh static void ixgbe_recovery_mode_timer(void *); 198 1.233 msaitoh static void ixgbe_handle_recovery_mode_timer(struct work *, void *); 199 1.333 msaitoh static int ixgbe_setup_interface(device_t, struct ixgbe_softc *); 200 1.333 msaitoh static void ixgbe_config_gpie(struct ixgbe_softc *); 201 1.333 msaitoh static void ixgbe_config_dmac(struct ixgbe_softc *); 202 1.333 msaitoh static void ixgbe_config_delay_values(struct ixgbe_softc *); 203 1.333 msaitoh static void ixgbe_schedule_admin_tasklet(struct ixgbe_softc *); 204 1.333 msaitoh static void ixgbe_config_link(struct ixgbe_softc *); 205 1.333 msaitoh static void ixgbe_check_wol_support(struct ixgbe_softc *); 206 1.333 msaitoh static int ixgbe_setup_low_power_mode(struct ixgbe_softc *); 207 1.161 kamil #if 0 208 1.333 msaitoh static void ixgbe_rearm_queues(struct ixgbe_softc *, u64); 209 1.161 kamil #endif 210 1.1 dyoung 211 1.333 msaitoh static void ixgbe_initialize_transmit_units(struct ixgbe_softc *); 212 1.333 msaitoh static void ixgbe_initialize_receive_units(struct ixgbe_softc *); 213 1.333 msaitoh static void ixgbe_enable_rx_drop(struct ixgbe_softc *); 214 1.333 msaitoh static void ixgbe_disable_rx_drop(struct ixgbe_softc *); 215 1.333 msaitoh static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *); 216 1.333 msaitoh 217 1.333 msaitoh static void ixgbe_enable_intr(struct ixgbe_softc *); 218 1.333 msaitoh static void ixgbe_disable_intr(struct ixgbe_softc *); 219 1.333 msaitoh static void ixgbe_update_stats_counters(struct ixgbe_softc *); 220 1.333 msaitoh static void ixgbe_set_rxfilter(struct ixgbe_softc *); 221 1.333 msaitoh static void ixgbe_update_link_status(struct ixgbe_softc *); 222 1.333 msaitoh static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8); 223 1.333 msaitoh static void ixgbe_configure_ivars(struct ixgbe_softc *); 224 1.1 dyoung static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 225 1.333 msaitoh static void ixgbe_eitr_write(struct ixgbe_softc *, uint32_t, uint32_t); 226 1.1 dyoung 227 1.333 msaitoh static void ixgbe_setup_vlan_hw_tagging(struct ixgbe_softc *); 228 1.333 msaitoh static void ixgbe_setup_vlan_hw_support(struct ixgbe_softc *); 229 1.193 msaitoh static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool); 230 1.333 msaitoh static int ixgbe_register_vlan(struct ixgbe_softc *, u16); 231 1.333 msaitoh static int ixgbe_unregister_vlan(struct ixgbe_softc *, u16); 232 1.1 dyoung 233 1.333 msaitoh static void ixgbe_add_device_sysctls(struct ixgbe_softc *); 234 1.333 msaitoh static void ixgbe_add_hw_stats(struct ixgbe_softc *); 235 1.333 msaitoh static void ixgbe_clear_evcnt(struct ixgbe_softc *); 236 1.333 msaitoh static int ixgbe_set_flowcntl(struct ixgbe_softc *, int); 237 1.333 msaitoh static int ixgbe_set_advertise(struct ixgbe_softc *, int); 238 1.333 msaitoh static int ixgbe_get_default_advertise(struct ixgbe_softc *); 239 1.44 msaitoh 240 1.44 msaitoh /* Sysctl handlers */ 241 1.52 msaitoh static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO); 242 1.52 msaitoh static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO); 243 1.186 msaitoh static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO); 244 1.44 msaitoh static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO); 245 1.44 msaitoh static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO); 246 1.44 msaitoh static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO); 247 1.48 msaitoh #ifdef IXGBE_DEBUG 248 1.48 msaitoh static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO); 249 1.48 msaitoh static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO); 250 1.48 msaitoh #endif 251 1.186 msaitoh static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO); 252 1.287 msaitoh static int ixgbe_sysctl_next_to_refresh_handler(SYSCTLFN_PROTO); 253 1.186 msaitoh static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO); 254 1.186 msaitoh static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO); 255 1.186 msaitoh static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO); 256 1.186 msaitoh static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO); 257 1.186 msaitoh static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO); 258 1.158 msaitoh static int ixgbe_sysctl_debug(SYSCTLFN_PROTO); 259 1.286 msaitoh static int ixgbe_sysctl_rx_copy_len(SYSCTLFN_PROTO); 260 1.313 msaitoh static int ixgbe_sysctl_tx_process_limit(SYSCTLFN_PROTO); 261 1.313 msaitoh static int ixgbe_sysctl_rx_process_limit(SYSCTLFN_PROTO); 262 1.44 msaitoh static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO); 263 1.44 msaitoh static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO); 264 1.1 dyoung 265 1.277 msaitoh /* Interrupt functions */ 266 1.34 msaitoh static int ixgbe_msix_que(void *); 267 1.233 msaitoh static int ixgbe_msix_admin(void *); 268 1.333 msaitoh static void ixgbe_intr_admin_common(struct ixgbe_softc *, u32, u32 *); 269 1.277 msaitoh static int ixgbe_legacy_irq(void *); 270 1.1 dyoung 271 1.233 msaitoh /* Event handlers running on workqueue */ 272 1.1 dyoung static void ixgbe_handle_que(void *); 273 1.1 dyoung static void ixgbe_handle_link(void *); 274 1.233 msaitoh static void ixgbe_handle_msf(void *); 275 1.273 msaitoh static void ixgbe_handle_mod(void *, bool); 276 1.44 msaitoh static void ixgbe_handle_phy(void *); 277 1.1 dyoung 278 1.233 msaitoh /* Deferred workqueue handlers */ 279 1.233 msaitoh static void ixgbe_handle_admin(struct work *, void *); 280 1.128 knakahar static void ixgbe_handle_que_work(struct work *, void *); 281 1.128 knakahar 282 1.159 maxv static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *); 283 1.1 dyoung 284 1.99 msaitoh /************************************************************************ 285 1.99 msaitoh * NetBSD Device Interface Entry Points 286 1.99 msaitoh ************************************************************************/ 287 1.333 msaitoh CFATTACH_DECL3_NEW(ixg, sizeof(struct ixgbe_softc), 288 1.1 dyoung ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL, 289 1.1 dyoung DVF_DETACH_SHUTDOWN); 290 1.1 dyoung 291 1.1 dyoung #if 0 292 1.44 msaitoh devclass_t ix_devclass; 293 1.44 msaitoh DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); 294 1.1 dyoung 295 1.44 msaitoh MODULE_DEPEND(ix, pci, 1, 1, 1); 296 1.44 msaitoh MODULE_DEPEND(ix, ether, 1, 1, 1); 297 1.115 msaitoh #ifdef DEV_NETMAP 298 1.115 msaitoh MODULE_DEPEND(ix, netmap, 1, 1, 1); 299 1.115 msaitoh #endif 300 1.1 dyoung #endif 301 1.1 dyoung 302 1.1 dyoung /* 303 1.99 msaitoh * TUNEABLE PARAMETERS: 304 1.99 msaitoh */ 305 1.1 dyoung 306 1.1 dyoung /* 307 1.99 msaitoh * AIM: Adaptive Interrupt Moderation 308 1.99 msaitoh * which means that the interrupt rate 309 1.99 msaitoh * is varied over time based on the 310 1.99 msaitoh * traffic for that interrupt vector 311 1.99 msaitoh */ 312 1.73 msaitoh static bool ixgbe_enable_aim = true; 313 1.52 msaitoh #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7) 314 1.99 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0, 315 1.52 msaitoh "Enable adaptive interrupt moderation"); 316 1.1 dyoung 317 1.22 msaitoh static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 318 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 319 1.52 msaitoh &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 320 1.1 dyoung 321 1.1 dyoung /* How many packets rxeof tries to clean at a time */ 322 1.1 dyoung static int ixgbe_rx_process_limit = 256; 323 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, 324 1.99 msaitoh &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited"); 325 1.1 dyoung 326 1.28 msaitoh /* How many packets txeof tries to clean at a time */ 327 1.28 msaitoh static int ixgbe_tx_process_limit = 256; 328 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN, 329 1.52 msaitoh &ixgbe_tx_process_limit, 0, 330 1.99 msaitoh "Maximum number of sent packets to process at a time, -1 means unlimited"); 331 1.52 msaitoh 332 1.52 msaitoh /* Flow control setting, default to full */ 333 1.52 msaitoh static int ixgbe_flow_control = ixgbe_fc_full; 334 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 335 1.52 msaitoh &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 336 1.52 msaitoh 337 1.179 msaitoh /* Which packet processing uses workqueue or softint */ 338 1.128 knakahar static bool ixgbe_txrx_workqueue = false; 339 1.128 knakahar 340 1.1 dyoung /* 341 1.99 msaitoh * Smart speed setting, default to on 342 1.99 msaitoh * this only works as a compile option 343 1.99 msaitoh * right now as its during attach, set 344 1.99 msaitoh * this to 'ixgbe_smart_speed_off' to 345 1.99 msaitoh * disable. 346 1.99 msaitoh */ 347 1.1 dyoung static int ixgbe_smart_speed = ixgbe_smart_speed_on; 348 1.1 dyoung 349 1.1 dyoung /* 350 1.99 msaitoh * MSI-X should be the default for best performance, 351 1.1 dyoung * but this allows it to be forced off for testing. 352 1.1 dyoung */ 353 1.1 dyoung static int ixgbe_enable_msix = 1; 354 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 355 1.52 msaitoh "Enable MSI-X interrupts"); 356 1.1 dyoung 357 1.1 dyoung /* 358 1.1 dyoung * Number of Queues, can be set to 0, 359 1.1 dyoung * it then autoconfigures based on the 360 1.350 msaitoh * number of cpus and number of MSI-X vectors. 361 1.350 msaitoh * This can be overridden manually here. 362 1.1 dyoung */ 363 1.62 msaitoh static int ixgbe_num_queues = 0; 364 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0, 365 1.52 msaitoh "Number of queues to configure, 0 indicates autoconfigure"); 366 1.1 dyoung 367 1.1 dyoung /* 368 1.99 msaitoh * Number of TX descriptors per ring, 369 1.99 msaitoh * setting higher than RX as this seems 370 1.99 msaitoh * the better performing choice. 371 1.99 msaitoh */ 372 1.335 msaitoh static int ixgbe_txd = DEFAULT_TXD; 373 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0, 374 1.52 msaitoh "Number of transmit descriptors per queue"); 375 1.1 dyoung 376 1.1 dyoung /* Number of RX descriptors per ring */ 377 1.335 msaitoh static int ixgbe_rxd = DEFAULT_RXD; 378 1.52 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0, 379 1.52 msaitoh "Number of receive descriptors per queue"); 380 1.33 msaitoh 381 1.33 msaitoh /* 382 1.99 msaitoh * Defining this on will allow the use 383 1.99 msaitoh * of unsupported SFP+ modules, note that 384 1.99 msaitoh * doing so you are on your own :) 385 1.99 msaitoh */ 386 1.35 msaitoh static int allow_unsupported_sfp = false; 387 1.52 msaitoh #define TUNABLE_INT(__x, __y) 388 1.52 msaitoh TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp); 389 1.1 dyoung 390 1.99 msaitoh /* 391 1.99 msaitoh * Not sure if Flow Director is fully baked, 392 1.99 msaitoh * so we'll default to turning it off. 393 1.99 msaitoh */ 394 1.99 msaitoh static int ixgbe_enable_fdir = 0; 395 1.99 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, 396 1.99 msaitoh "Enable Flow Director"); 397 1.99 msaitoh 398 1.99 msaitoh /* Legacy Transmit (single queue) */ 399 1.99 msaitoh static int ixgbe_enable_legacy_tx = 0; 400 1.99 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN, 401 1.99 msaitoh &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow"); 402 1.99 msaitoh 403 1.99 msaitoh /* Receive-Side Scaling */ 404 1.99 msaitoh static int ixgbe_enable_rss = 1; 405 1.99 msaitoh SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, 406 1.99 msaitoh "Enable Receive-Side Scaling (RSS)"); 407 1.99 msaitoh 408 1.99 msaitoh #if 0 409 1.99 msaitoh static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *); 410 1.99 msaitoh static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *); 411 1.1 dyoung #endif 412 1.1 dyoung 413 1.128 knakahar #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET 414 1.80 msaitoh 415 1.312 msaitoh /* Interval between reports of errors */ 416 1.312 msaitoh static const struct timeval ixgbe_errlog_intrvl = { 60, 0 }; /* 60s */ 417 1.312 msaitoh 418 1.99 msaitoh /************************************************************************ 419 1.99 msaitoh * ixgbe_initialize_rss_mapping 420 1.99 msaitoh ************************************************************************/ 421 1.98 msaitoh static void 422 1.333 msaitoh ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc) 423 1.1 dyoung { 424 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 425 1.186 msaitoh u32 reta = 0, mrqc, rss_key[10]; 426 1.186 msaitoh int queue_id, table_size, index_mult; 427 1.186 msaitoh int i, j; 428 1.186 msaitoh u32 rss_hash_config; 429 1.99 msaitoh 430 1.122 knakahar /* force use default RSS key. */ 431 1.122 knakahar #ifdef __NetBSD__ 432 1.122 knakahar rss_getkey((uint8_t *) &rss_key); 433 1.122 knakahar #else 434 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS) { 435 1.99 msaitoh /* Fetch the configured RSS key */ 436 1.99 msaitoh rss_getkey((uint8_t *) &rss_key); 437 1.99 msaitoh } else { 438 1.99 msaitoh /* set up random bits */ 439 1.99 msaitoh cprng_fast(&rss_key, sizeof(rss_key)); 440 1.99 msaitoh } 441 1.122 knakahar #endif 442 1.1 dyoung 443 1.98 msaitoh /* Set multiplier for RETA setup and table size based on MAC */ 444 1.98 msaitoh index_mult = 0x1; 445 1.98 msaitoh table_size = 128; 446 1.333 msaitoh switch (sc->hw.mac.type) { 447 1.98 msaitoh case ixgbe_mac_82598EB: 448 1.98 msaitoh index_mult = 0x11; 449 1.98 msaitoh break; 450 1.98 msaitoh case ixgbe_mac_X550: 451 1.98 msaitoh case ixgbe_mac_X550EM_x: 452 1.99 msaitoh case ixgbe_mac_X550EM_a: 453 1.98 msaitoh table_size = 512; 454 1.98 msaitoh break; 455 1.98 msaitoh default: 456 1.98 msaitoh break; 457 1.98 msaitoh } 458 1.1 dyoung 459 1.98 msaitoh /* Set up the redirection table */ 460 1.99 msaitoh for (i = 0, j = 0; i < table_size; i++, j++) { 461 1.333 msaitoh if (j == sc->num_queues) 462 1.99 msaitoh j = 0; 463 1.99 msaitoh 464 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS) { 465 1.99 msaitoh /* 466 1.99 msaitoh * Fetch the RSS bucket id for the given indirection 467 1.99 msaitoh * entry. Cap it at the number of configured buckets 468 1.99 msaitoh * (which is num_queues.) 469 1.99 msaitoh */ 470 1.99 msaitoh queue_id = rss_get_indirection_to_bucket(i); 471 1.333 msaitoh queue_id = queue_id % sc->num_queues; 472 1.99 msaitoh } else 473 1.99 msaitoh queue_id = (j * index_mult); 474 1.99 msaitoh 475 1.98 msaitoh /* 476 1.98 msaitoh * The low 8 bits are for hash value (n+0); 477 1.98 msaitoh * The next 8 bits are for hash value (n+1), etc. 478 1.98 msaitoh */ 479 1.98 msaitoh reta = reta >> 8; 480 1.98 msaitoh reta = reta | (((uint32_t) queue_id) << 24); 481 1.98 msaitoh if ((i & 3) == 3) { 482 1.98 msaitoh if (i < 128) 483 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 484 1.98 msaitoh else 485 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 486 1.99 msaitoh reta); 487 1.98 msaitoh reta = 0; 488 1.98 msaitoh } 489 1.98 msaitoh } 490 1.1 dyoung 491 1.98 msaitoh /* Now fill our hash function seeds */ 492 1.99 msaitoh for (i = 0; i < 10; i++) 493 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 494 1.1 dyoung 495 1.98 msaitoh /* Perform hash on these packet types */ 496 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS) 497 1.99 msaitoh rss_hash_config = rss_gethashconfig(); 498 1.99 msaitoh else { 499 1.99 msaitoh /* 500 1.99 msaitoh * Disable UDP - IP fragments aren't currently being handled 501 1.99 msaitoh * and so we end up with a mix of 2-tuple and 4-tuple 502 1.99 msaitoh * traffic. 503 1.99 msaitoh */ 504 1.99 msaitoh rss_hash_config = RSS_HASHTYPE_RSS_IPV4 505 1.186 msaitoh | RSS_HASHTYPE_RSS_TCP_IPV4 506 1.186 msaitoh | RSS_HASHTYPE_RSS_IPV6 507 1.186 msaitoh | RSS_HASHTYPE_RSS_TCP_IPV6 508 1.186 msaitoh | RSS_HASHTYPE_RSS_IPV6_EX 509 1.186 msaitoh | RSS_HASHTYPE_RSS_TCP_IPV6_EX; 510 1.99 msaitoh } 511 1.99 msaitoh 512 1.98 msaitoh mrqc = IXGBE_MRQC_RSSEN; 513 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 514 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 515 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 516 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 517 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 518 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 519 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 520 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 521 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 522 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 523 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 524 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 525 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 526 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 527 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 528 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 529 1.98 msaitoh if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 530 1.98 msaitoh mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 531 1.333 msaitoh mrqc |= ixgbe_get_mrqc(sc->iov_mode); 532 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 533 1.99 msaitoh } /* ixgbe_initialize_rss_mapping */ 534 1.1 dyoung 535 1.99 msaitoh /************************************************************************ 536 1.99 msaitoh * ixgbe_initialize_receive_units - Setup receive registers and features. 537 1.99 msaitoh ************************************************************************/ 538 1.98 msaitoh #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 539 1.185 msaitoh 540 1.1 dyoung static void 541 1.333 msaitoh ixgbe_initialize_receive_units(struct ixgbe_softc *sc) 542 1.1 dyoung { 543 1.333 msaitoh struct rx_ring *rxr = sc->rx_rings; 544 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 545 1.333 msaitoh struct ifnet *ifp = sc->ifp; 546 1.186 msaitoh int i, j; 547 1.98 msaitoh u32 bufsz, fctrl, srrctl, rxcsum; 548 1.98 msaitoh u32 hlreg; 549 1.98 msaitoh 550 1.98 msaitoh /* 551 1.98 msaitoh * Make sure receives are disabled while 552 1.98 msaitoh * setting up the descriptor ring 553 1.98 msaitoh */ 554 1.98 msaitoh ixgbe_disable_rx(hw); 555 1.1 dyoung 556 1.98 msaitoh /* Enable broadcasts */ 557 1.98 msaitoh fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 558 1.98 msaitoh fctrl |= IXGBE_FCTRL_BAM; 559 1.333 msaitoh if (sc->hw.mac.type == ixgbe_mac_82598EB) { 560 1.98 msaitoh fctrl |= IXGBE_FCTRL_DPF; 561 1.98 msaitoh fctrl |= IXGBE_FCTRL_PMCF; 562 1.98 msaitoh } 563 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 564 1.1 dyoung 565 1.98 msaitoh /* Set for Jumbo Frames? */ 566 1.98 msaitoh hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 567 1.98 msaitoh if (ifp->if_mtu > ETHERMTU) 568 1.98 msaitoh hlreg |= IXGBE_HLREG0_JUMBOEN; 569 1.98 msaitoh else 570 1.98 msaitoh hlreg &= ~IXGBE_HLREG0_JUMBOEN; 571 1.99 msaitoh 572 1.98 msaitoh #ifdef DEV_NETMAP 573 1.99 msaitoh /* CRC stripping is conditional in Netmap */ 574 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_NETMAP) && 575 1.99 msaitoh (ifp->if_capenable & IFCAP_NETMAP) && 576 1.99 msaitoh !ix_crcstrip) 577 1.98 msaitoh hlreg &= ~IXGBE_HLREG0_RXCRCSTRP; 578 1.60 msaitoh else 579 1.99 msaitoh #endif /* DEV_NETMAP */ 580 1.98 msaitoh hlreg |= IXGBE_HLREG0_RXCRCSTRP; 581 1.99 msaitoh 582 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 583 1.1 dyoung 584 1.333 msaitoh bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 585 1.99 msaitoh IXGBE_SRRCTL_BSIZEPKT_SHIFT; 586 1.1 dyoung 587 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, rxr++) { 588 1.98 msaitoh u64 rdba = rxr->rxdma.dma_paddr; 589 1.152 msaitoh u32 reg; 590 1.98 msaitoh int regnum = i / 4; /* 1 register per 4 queues */ 591 1.98 msaitoh int regshift = i % 4; /* 4 bits per 1 queue */ 592 1.99 msaitoh j = rxr->me; 593 1.1 dyoung 594 1.98 msaitoh /* Setup the Base and Length of the Rx Descriptor Ring */ 595 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 596 1.98 msaitoh (rdba & 0x00000000ffffffffULL)); 597 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 598 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 599 1.333 msaitoh sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); 600 1.1 dyoung 601 1.98 msaitoh /* Set up the SRRCTL register */ 602 1.98 msaitoh srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 603 1.98 msaitoh srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 604 1.98 msaitoh srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 605 1.98 msaitoh srrctl |= bufsz; 606 1.98 msaitoh srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 607 1.47 msaitoh 608 1.98 msaitoh /* Set RQSMR (Receive Queue Statistic Mapping) register */ 609 1.98 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum)); 610 1.194 msaitoh reg &= ~(0x000000ffUL << (regshift * 8)); 611 1.98 msaitoh reg |= i << (regshift * 8); 612 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg); 613 1.98 msaitoh 614 1.98 msaitoh /* 615 1.98 msaitoh * Set DROP_EN iff we have no flow control and >1 queue. 616 1.98 msaitoh * Note that srrctl was cleared shortly before during reset, 617 1.98 msaitoh * so we do not need to clear the bit, but do it just in case 618 1.98 msaitoh * this code is moved elsewhere. 619 1.98 msaitoh */ 620 1.333 msaitoh if ((sc->num_queues > 1) && 621 1.333 msaitoh (sc->hw.fc.requested_mode == ixgbe_fc_none)) 622 1.98 msaitoh srrctl |= IXGBE_SRRCTL_DROP_EN; 623 1.319 msaitoh else 624 1.98 msaitoh srrctl &= ~IXGBE_SRRCTL_DROP_EN; 625 1.98 msaitoh 626 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 627 1.98 msaitoh 628 1.98 msaitoh /* Setup the HW Rx Head and Tail Descriptor Pointers */ 629 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 630 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 631 1.98 msaitoh 632 1.98 msaitoh /* Set the driver rx tail address */ 633 1.98 msaitoh rxr->tail = IXGBE_RDT(rxr->me); 634 1.98 msaitoh } 635 1.98 msaitoh 636 1.333 msaitoh if (sc->hw.mac.type != ixgbe_mac_82598EB) { 637 1.99 msaitoh u32 psrtype = IXGBE_PSRTYPE_TCPHDR 638 1.186 msaitoh | IXGBE_PSRTYPE_UDPHDR 639 1.186 msaitoh | IXGBE_PSRTYPE_IPV4HDR 640 1.186 msaitoh | IXGBE_PSRTYPE_IPV6HDR; 641 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 642 1.98 msaitoh } 643 1.98 msaitoh 644 1.98 msaitoh rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 645 1.98 msaitoh 646 1.333 msaitoh ixgbe_initialize_rss_mapping(sc); 647 1.98 msaitoh 648 1.333 msaitoh if (sc->num_queues > 1) { 649 1.98 msaitoh /* RSS and RX IPP Checksum are mutually exclusive */ 650 1.98 msaitoh rxcsum |= IXGBE_RXCSUM_PCSD; 651 1.98 msaitoh } 652 1.98 msaitoh 653 1.98 msaitoh if (ifp->if_capenable & IFCAP_RXCSUM) 654 1.98 msaitoh rxcsum |= IXGBE_RXCSUM_PCSD; 655 1.98 msaitoh 656 1.98 msaitoh /* This is useful for calculating UDP/IP fragment checksums */ 657 1.98 msaitoh if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 658 1.98 msaitoh rxcsum |= IXGBE_RXCSUM_IPPCSE; 659 1.98 msaitoh 660 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 661 1.98 msaitoh 662 1.99 msaitoh } /* ixgbe_initialize_receive_units */ 663 1.98 msaitoh 664 1.99 msaitoh /************************************************************************ 665 1.99 msaitoh * ixgbe_initialize_transmit_units - Enable transmit units. 666 1.99 msaitoh ************************************************************************/ 667 1.98 msaitoh static void 668 1.333 msaitoh ixgbe_initialize_transmit_units(struct ixgbe_softc *sc) 669 1.98 msaitoh { 670 1.333 msaitoh struct tx_ring *txr = sc->tx_rings; 671 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 672 1.144 msaitoh int i; 673 1.98 msaitoh 674 1.225 msaitoh INIT_DEBUGOUT("ixgbe_initialize_transmit_units"); 675 1.225 msaitoh 676 1.98 msaitoh /* Setup the Base and Length of the Tx Descriptor Ring */ 677 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, txr++) { 678 1.99 msaitoh u64 tdba = txr->txdma.dma_paddr; 679 1.99 msaitoh u32 txctrl = 0; 680 1.152 msaitoh u32 tqsmreg, reg; 681 1.152 msaitoh int regnum = i / 4; /* 1 register per 4 queues */ 682 1.152 msaitoh int regshift = i % 4; /* 4 bits per 1 queue */ 683 1.99 msaitoh int j = txr->me; 684 1.98 msaitoh 685 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 686 1.98 msaitoh (tdba & 0x00000000ffffffffULL)); 687 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 688 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 689 1.333 msaitoh sc->num_tx_desc * sizeof(union ixgbe_adv_tx_desc)); 690 1.98 msaitoh 691 1.152 msaitoh /* 692 1.152 msaitoh * Set TQSMR (Transmit Queue Statistic Mapping) register. 693 1.152 msaitoh * Register location is different between 82598 and others. 694 1.152 msaitoh */ 695 1.333 msaitoh if (sc->hw.mac.type == ixgbe_mac_82598EB) 696 1.152 msaitoh tqsmreg = IXGBE_TQSMR(regnum); 697 1.152 msaitoh else 698 1.152 msaitoh tqsmreg = IXGBE_TQSM(regnum); 699 1.152 msaitoh reg = IXGBE_READ_REG(hw, tqsmreg); 700 1.194 msaitoh reg &= ~(0x000000ffUL << (regshift * 8)); 701 1.152 msaitoh reg |= i << (regshift * 8); 702 1.152 msaitoh IXGBE_WRITE_REG(hw, tqsmreg, reg); 703 1.152 msaitoh 704 1.98 msaitoh /* Setup the HW Tx Head and Tail descriptor pointers */ 705 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 706 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 707 1.98 msaitoh 708 1.98 msaitoh /* Cache the tail address */ 709 1.98 msaitoh txr->tail = IXGBE_TDT(j); 710 1.98 msaitoh 711 1.155 msaitoh txr->txr_no_space = false; 712 1.155 msaitoh 713 1.345 msaitoh /* Disable relax ordering */ 714 1.98 msaitoh /* 715 1.98 msaitoh * Note: for X550 series devices, these registers are actually 716 1.295 andvar * prefixed with TPH_ instead of DCA_, but the addresses and 717 1.98 msaitoh * fields remain the same. 718 1.98 msaitoh */ 719 1.98 msaitoh switch (hw->mac.type) { 720 1.98 msaitoh case ixgbe_mac_82598EB: 721 1.98 msaitoh txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 722 1.98 msaitoh break; 723 1.98 msaitoh default: 724 1.98 msaitoh txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 725 1.98 msaitoh break; 726 1.98 msaitoh } 727 1.98 msaitoh txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 728 1.98 msaitoh switch (hw->mac.type) { 729 1.98 msaitoh case ixgbe_mac_82598EB: 730 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 731 1.98 msaitoh break; 732 1.98 msaitoh default: 733 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 734 1.98 msaitoh break; 735 1.98 msaitoh } 736 1.98 msaitoh 737 1.98 msaitoh } 738 1.98 msaitoh 739 1.98 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) { 740 1.98 msaitoh u32 dmatxctl, rttdcs; 741 1.99 msaitoh 742 1.98 msaitoh dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 743 1.98 msaitoh dmatxctl |= IXGBE_DMATXCTL_TE; 744 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 745 1.98 msaitoh /* Disable arbiter to set MTQC */ 746 1.98 msaitoh rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 747 1.98 msaitoh rttdcs |= IXGBE_RTTDCS_ARBDIS; 748 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 749 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MTQC, 750 1.333 msaitoh ixgbe_get_mtqc(sc->iov_mode)); 751 1.98 msaitoh rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 752 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 753 1.98 msaitoh } 754 1.99 msaitoh } /* ixgbe_initialize_transmit_units */ 755 1.98 msaitoh 756 1.245 msaitoh static void 757 1.333 msaitoh ixgbe_quirks(struct ixgbe_softc *sc) 758 1.245 msaitoh { 759 1.333 msaitoh device_t dev = sc->dev; 760 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 761 1.245 msaitoh const char *vendor, *product; 762 1.245 msaitoh 763 1.248 msaitoh if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) { 764 1.248 msaitoh /* 765 1.248 msaitoh * Quirk for inverted logic of SFP+'s MOD_ABS on GIGABYTE 766 1.248 msaitoh * MA10-ST0. 767 1.248 msaitoh */ 768 1.248 msaitoh vendor = pmf_get_platform("system-vendor"); 769 1.248 msaitoh product = pmf_get_platform("system-product"); 770 1.245 msaitoh 771 1.248 msaitoh if ((vendor == NULL) || (product == NULL)) 772 1.248 msaitoh return; 773 1.245 msaitoh 774 1.248 msaitoh if ((strcmp(vendor, "GIGABYTE") == 0) && 775 1.248 msaitoh (strcmp(product, "MA10-ST0") == 0)) { 776 1.248 msaitoh aprint_verbose_dev(dev, 777 1.248 msaitoh "Enable SFP+ MOD_ABS inverse quirk\n"); 778 1.333 msaitoh sc->hw.quirks |= IXGBE_QUIRK_MOD_ABS_INVERT; 779 1.248 msaitoh } 780 1.245 msaitoh } 781 1.245 msaitoh } 782 1.245 msaitoh 783 1.99 msaitoh /************************************************************************ 784 1.99 msaitoh * ixgbe_attach - Device initialization routine 785 1.98 msaitoh * 786 1.99 msaitoh * Called when the driver is being loaded. 787 1.99 msaitoh * Identifies the type of hardware, allocates all resources 788 1.99 msaitoh * and initializes the hardware. 789 1.98 msaitoh * 790 1.99 msaitoh * return 0 on success, positive on failure 791 1.99 msaitoh ************************************************************************/ 792 1.98 msaitoh static void 793 1.98 msaitoh ixgbe_attach(device_t parent, device_t dev, void *aux) 794 1.98 msaitoh { 795 1.333 msaitoh struct ixgbe_softc *sc; 796 1.98 msaitoh struct ixgbe_hw *hw; 797 1.186 msaitoh int error = -1; 798 1.98 msaitoh u32 ctrl_ext; 799 1.340 msaitoh u16 high, low, nvmreg, dev_caps; 800 1.99 msaitoh pcireg_t id, subid; 801 1.159 maxv const ixgbe_vendor_info_t *ent; 802 1.98 msaitoh struct pci_attach_args *pa = aux; 803 1.219 msaitoh bool unsupported_sfp = false; 804 1.98 msaitoh const char *str; 805 1.233 msaitoh char wqname[MAXCOMLEN]; 806 1.99 msaitoh char buf[256]; 807 1.98 msaitoh 808 1.98 msaitoh INIT_DEBUGOUT("ixgbe_attach: begin"); 809 1.98 msaitoh 810 1.98 msaitoh /* Allocate, clear, and link in our adapter structure */ 811 1.333 msaitoh sc = device_private(dev); 812 1.333 msaitoh sc->hw.back = sc; 813 1.333 msaitoh sc->dev = dev; 814 1.333 msaitoh hw = &sc->hw; 815 1.333 msaitoh sc->osdep.pc = pa->pa_pc; 816 1.333 msaitoh sc->osdep.tag = pa->pa_tag; 817 1.98 msaitoh if (pci_dma64_available(pa)) 818 1.333 msaitoh sc->osdep.dmat = pa->pa_dmat64; 819 1.98 msaitoh else 820 1.333 msaitoh sc->osdep.dmat = pa->pa_dmat; 821 1.333 msaitoh sc->osdep.attached = false; 822 1.333 msaitoh sc->osdep.detaching = false; 823 1.98 msaitoh 824 1.98 msaitoh ent = ixgbe_lookup(pa); 825 1.98 msaitoh 826 1.98 msaitoh KASSERT(ent != NULL); 827 1.98 msaitoh 828 1.98 msaitoh aprint_normal(": %s, Version - %s\n", 829 1.98 msaitoh ixgbe_strings[ent->index], ixgbe_driver_version); 830 1.98 msaitoh 831 1.233 msaitoh /* Core Lock Init */ 832 1.333 msaitoh IXGBE_CORE_LOCK_INIT(sc, device_xname(dev)); 833 1.1 dyoung 834 1.233 msaitoh /* Set up the timer callout and workqueue */ 835 1.354 msaitoh callout_init(&sc->timer, CALLOUT_MPSAFE); 836 1.233 msaitoh snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev)); 837 1.333 msaitoh error = workqueue_create(&sc->timer_wq, wqname, 838 1.354 msaitoh ixgbe_handle_timer, sc, IXGBE_WORKQUEUE_PRI, IPL_NET, WQ_MPSAFE); 839 1.233 msaitoh if (error) { 840 1.233 msaitoh aprint_error_dev(dev, 841 1.233 msaitoh "could not create timer workqueue (%d)\n", error); 842 1.233 msaitoh goto err_out; 843 1.233 msaitoh } 844 1.1 dyoung 845 1.1 dyoung /* Determine hardware revision */ 846 1.99 msaitoh id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG); 847 1.99 msaitoh subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 848 1.99 msaitoh 849 1.99 msaitoh hw->vendor_id = PCI_VENDOR(id); 850 1.99 msaitoh hw->device_id = PCI_PRODUCT(id); 851 1.99 msaitoh hw->revision_id = 852 1.99 msaitoh PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG)); 853 1.99 msaitoh hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid); 854 1.99 msaitoh hw->subsystem_device_id = PCI_SUBSYS_ID(subid); 855 1.99 msaitoh 856 1.248 msaitoh /* Set quirk flags */ 857 1.333 msaitoh ixgbe_quirks(sc); 858 1.248 msaitoh 859 1.99 msaitoh /* 860 1.99 msaitoh * Make sure BUSMASTER is set 861 1.99 msaitoh */ 862 1.99 msaitoh ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag); 863 1.99 msaitoh 864 1.99 msaitoh /* Do base PCI setup - map BAR0 */ 865 1.333 msaitoh if (ixgbe_allocate_pci_resources(sc, pa)) { 866 1.99 msaitoh aprint_error_dev(dev, "Allocation of PCI resources failed\n"); 867 1.99 msaitoh error = ENXIO; 868 1.99 msaitoh goto err_out; 869 1.99 msaitoh } 870 1.99 msaitoh 871 1.99 msaitoh /* let hardware know driver is loaded */ 872 1.99 msaitoh ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 873 1.99 msaitoh ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 874 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 875 1.99 msaitoh 876 1.99 msaitoh /* 877 1.99 msaitoh * Initialize the shared code 878 1.99 msaitoh */ 879 1.144 msaitoh if (ixgbe_init_shared_code(hw) != 0) { 880 1.319 msaitoh aprint_error_dev(dev, 881 1.319 msaitoh "Unable to initialize the shared code\n"); 882 1.99 msaitoh error = ENXIO; 883 1.99 msaitoh goto err_out; 884 1.99 msaitoh } 885 1.1 dyoung 886 1.79 msaitoh switch (hw->mac.type) { 887 1.79 msaitoh case ixgbe_mac_82598EB: 888 1.79 msaitoh str = "82598EB"; 889 1.79 msaitoh break; 890 1.79 msaitoh case ixgbe_mac_82599EB: 891 1.79 msaitoh str = "82599EB"; 892 1.79 msaitoh break; 893 1.79 msaitoh case ixgbe_mac_X540: 894 1.79 msaitoh str = "X540"; 895 1.79 msaitoh break; 896 1.79 msaitoh case ixgbe_mac_X550: 897 1.79 msaitoh str = "X550"; 898 1.79 msaitoh break; 899 1.79 msaitoh case ixgbe_mac_X550EM_x: 900 1.246 msaitoh str = "X550EM X"; 901 1.79 msaitoh break; 902 1.99 msaitoh case ixgbe_mac_X550EM_a: 903 1.99 msaitoh str = "X550EM A"; 904 1.99 msaitoh break; 905 1.79 msaitoh default: 906 1.79 msaitoh str = "Unknown"; 907 1.79 msaitoh break; 908 1.79 msaitoh } 909 1.79 msaitoh aprint_normal_dev(dev, "device %s\n", str); 910 1.79 msaitoh 911 1.99 msaitoh hw->allow_unsupported_sfp = allow_unsupported_sfp; 912 1.99 msaitoh 913 1.99 msaitoh /* Pick up the 82599 settings */ 914 1.292 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) 915 1.99 msaitoh hw->phy.smart_speed = ixgbe_smart_speed; 916 1.292 msaitoh 917 1.292 msaitoh /* Set the right number of segments */ 918 1.292 msaitoh KASSERT(IXGBE_82599_SCATTER_MAX >= IXGBE_SCATTER_DEFAULT); 919 1.333 msaitoh sc->num_segs = IXGBE_SCATTER_DEFAULT; 920 1.99 msaitoh 921 1.172 msaitoh /* Ensure SW/FW semaphore is free */ 922 1.172 msaitoh ixgbe_init_swfw_semaphore(hw); 923 1.172 msaitoh 924 1.113 msaitoh hw->mac.ops.set_lan_id(hw); 925 1.333 msaitoh ixgbe_init_device_features(sc); 926 1.99 msaitoh 927 1.333 msaitoh if (ixgbe_configure_interrupts(sc)) { 928 1.1 dyoung error = ENXIO; 929 1.1 dyoung goto err_out; 930 1.1 dyoung } 931 1.1 dyoung 932 1.99 msaitoh /* Allocate multicast array memory. */ 933 1.333 msaitoh sc->mta = malloc(sizeof(*sc->mta) * 934 1.215 chs MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK); 935 1.99 msaitoh 936 1.99 msaitoh /* Enable WoL (if supported) */ 937 1.333 msaitoh ixgbe_check_wol_support(sc); 938 1.99 msaitoh 939 1.193 msaitoh /* Register for VLAN events */ 940 1.333 msaitoh ether_set_vlan_cb(&sc->osdep.ec, ixgbe_vlan_cb); 941 1.193 msaitoh 942 1.99 msaitoh /* Verify adapter fan is still functional (if applicable) */ 943 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) { 944 1.99 msaitoh u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 945 1.333 msaitoh ixgbe_check_fan_failure(sc, esdp, FALSE); 946 1.99 msaitoh } 947 1.99 msaitoh 948 1.99 msaitoh /* Set an initial default flow control value */ 949 1.99 msaitoh hw->fc.requested_mode = ixgbe_flow_control; 950 1.99 msaitoh 951 1.1 dyoung /* Do descriptor calc and sanity checks */ 952 1.1 dyoung if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || 953 1.1 dyoung ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) { 954 1.336 msaitoh aprint_error_dev(dev, "Invalid TX ring size (%d). " 955 1.336 msaitoh "It must be between %d and %d, " 956 1.336 msaitoh "inclusive, and must be a multiple of %zu. " 957 1.336 msaitoh "Using default value of %d instead.\n", 958 1.336 msaitoh ixgbe_txd, MIN_TXD, MAX_TXD, 959 1.336 msaitoh DBA_ALIGN / sizeof(union ixgbe_adv_tx_desc), 960 1.336 msaitoh DEFAULT_TXD); 961 1.333 msaitoh sc->num_tx_desc = DEFAULT_TXD; 962 1.1 dyoung } else 963 1.333 msaitoh sc->num_tx_desc = ixgbe_txd; 964 1.1 dyoung 965 1.1 dyoung if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || 966 1.33 msaitoh ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) { 967 1.336 msaitoh aprint_error_dev(dev, "Invalid RX ring size (%d). " 968 1.336 msaitoh "It must be between %d and %d, " 969 1.336 msaitoh "inclusive, and must be a multiple of %zu. " 970 1.336 msaitoh "Using default value of %d instead.\n", 971 1.336 msaitoh ixgbe_rxd, MIN_RXD, MAX_RXD, 972 1.336 msaitoh DBA_ALIGN / sizeof(union ixgbe_adv_rx_desc), 973 1.336 msaitoh DEFAULT_RXD); 974 1.333 msaitoh sc->num_rx_desc = DEFAULT_RXD; 975 1.1 dyoung } else 976 1.333 msaitoh sc->num_rx_desc = ixgbe_rxd; 977 1.1 dyoung 978 1.313 msaitoh /* Sysctls for limiting the amount of work done in the taskqueues */ 979 1.333 msaitoh sc->rx_process_limit 980 1.333 msaitoh = (ixgbe_rx_process_limit <= sc->num_rx_desc) 981 1.333 msaitoh ? ixgbe_rx_process_limit : sc->num_rx_desc; 982 1.333 msaitoh sc->tx_process_limit 983 1.333 msaitoh = (ixgbe_tx_process_limit <= sc->num_tx_desc) 984 1.333 msaitoh ? ixgbe_tx_process_limit : sc->num_tx_desc; 985 1.313 msaitoh 986 1.286 msaitoh /* Set default high limit of copying mbuf in rxeof */ 987 1.333 msaitoh sc->rx_copy_len = IXGBE_RX_COPY_LEN_MAX; 988 1.286 msaitoh 989 1.1 dyoung /* Allocate our TX/RX Queues */ 990 1.333 msaitoh if (ixgbe_allocate_queues(sc)) { 991 1.1 dyoung error = ENOMEM; 992 1.1 dyoung goto err_out; 993 1.1 dyoung } 994 1.1 dyoung 995 1.99 msaitoh hw->phy.reset_if_overtemp = TRUE; 996 1.99 msaitoh error = ixgbe_reset_hw(hw); 997 1.99 msaitoh hw->phy.reset_if_overtemp = FALSE; 998 1.237 msaitoh if (error == IXGBE_ERR_SFP_NOT_PRESENT) 999 1.99 msaitoh error = IXGBE_SUCCESS; 1000 1.237 msaitoh else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 1001 1.324 msaitoh aprint_error_dev(dev, 1002 1.324 msaitoh "Unsupported SFP+ module type was detected.\n"); 1003 1.219 msaitoh unsupported_sfp = true; 1004 1.219 msaitoh error = IXGBE_SUCCESS; 1005 1.1 dyoung } else if (error) { 1006 1.282 msaitoh aprint_error_dev(dev, 1007 1.282 msaitoh "Hardware initialization failed(error = %d)\n", error); 1008 1.1 dyoung error = EIO; 1009 1.1 dyoung goto err_late; 1010 1.1 dyoung } 1011 1.1 dyoung 1012 1.1 dyoung /* Make sure we have a good EEPROM before we read from it */ 1013 1.333 msaitoh if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) { 1014 1.48 msaitoh aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n"); 1015 1.1 dyoung error = EIO; 1016 1.1 dyoung goto err_late; 1017 1.1 dyoung } 1018 1.1 dyoung 1019 1.88 msaitoh aprint_normal("%s:", device_xname(dev)); 1020 1.88 msaitoh /* NVM Image Version */ 1021 1.169 msaitoh high = low = 0; 1022 1.88 msaitoh switch (hw->mac.type) { 1023 1.300 msaitoh case ixgbe_mac_82598EB: 1024 1.300 msaitoh /* 1025 1.300 msaitoh * Print version from the dev starter version (0x29). The 1026 1.300 msaitoh * location is the same as newer device's IXGBE_NVM_MAP_VER. 1027 1.300 msaitoh */ 1028 1.300 msaitoh hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg); 1029 1.300 msaitoh if (nvmreg == 0xffff) 1030 1.300 msaitoh break; 1031 1.300 msaitoh high = (nvmreg >> 12) & 0x0f; 1032 1.300 msaitoh low = (nvmreg >> 4) & 0xff; 1033 1.300 msaitoh id = nvmreg & 0x0f; 1034 1.300 msaitoh /* 1035 1.300 msaitoh * The following output might not be correct. Some 82598 cards 1036 1.300 msaitoh * have 0x1070 or 0x2090. 82598 spec update notes about 2.9.0. 1037 1.300 msaitoh */ 1038 1.300 msaitoh aprint_normal(" NVM Image Version %u.%u.%u,", high, low, id); 1039 1.300 msaitoh break; 1040 1.88 msaitoh case ixgbe_mac_X540: 1041 1.99 msaitoh case ixgbe_mac_X550EM_a: 1042 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg); 1043 1.88 msaitoh if (nvmreg == 0xffff) 1044 1.88 msaitoh break; 1045 1.88 msaitoh high = (nvmreg >> 12) & 0x0f; 1046 1.88 msaitoh low = (nvmreg >> 4) & 0xff; 1047 1.88 msaitoh id = nvmreg & 0x0f; 1048 1.107 msaitoh aprint_normal(" NVM Image Version %u.", high); 1049 1.107 msaitoh if (hw->mac.type == ixgbe_mac_X540) 1050 1.107 msaitoh str = "%x"; 1051 1.107 msaitoh else 1052 1.107 msaitoh str = "%02x"; 1053 1.107 msaitoh aprint_normal(str, low); 1054 1.107 msaitoh aprint_normal(" ID 0x%x,", id); 1055 1.88 msaitoh break; 1056 1.88 msaitoh case ixgbe_mac_X550EM_x: 1057 1.88 msaitoh case ixgbe_mac_X550: 1058 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg); 1059 1.88 msaitoh if (nvmreg == 0xffff) 1060 1.88 msaitoh break; 1061 1.88 msaitoh high = (nvmreg >> 12) & 0x0f; 1062 1.88 msaitoh low = nvmreg & 0xff; 1063 1.107 msaitoh aprint_normal(" NVM Image Version %u.%02x,", high, low); 1064 1.88 msaitoh break; 1065 1.88 msaitoh default: 1066 1.88 msaitoh break; 1067 1.88 msaitoh } 1068 1.169 msaitoh hw->eeprom.nvm_image_ver_high = high; 1069 1.169 msaitoh hw->eeprom.nvm_image_ver_low = low; 1070 1.88 msaitoh 1071 1.88 msaitoh /* PHY firmware revision */ 1072 1.88 msaitoh switch (hw->mac.type) { 1073 1.88 msaitoh case ixgbe_mac_X540: 1074 1.88 msaitoh case ixgbe_mac_X550: 1075 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg); 1076 1.88 msaitoh if (nvmreg == 0xffff) 1077 1.88 msaitoh break; 1078 1.88 msaitoh high = (nvmreg >> 12) & 0x0f; 1079 1.88 msaitoh low = (nvmreg >> 4) & 0xff; 1080 1.88 msaitoh id = nvmreg & 0x000f; 1081 1.114 msaitoh aprint_normal(" PHY FW Revision %u.", high); 1082 1.114 msaitoh if (hw->mac.type == ixgbe_mac_X540) 1083 1.114 msaitoh str = "%x"; 1084 1.114 msaitoh else 1085 1.114 msaitoh str = "%02x"; 1086 1.114 msaitoh aprint_normal(str, low); 1087 1.114 msaitoh aprint_normal(" ID 0x%x,", id); 1088 1.88 msaitoh break; 1089 1.88 msaitoh default: 1090 1.88 msaitoh break; 1091 1.88 msaitoh } 1092 1.88 msaitoh 1093 1.88 msaitoh /* NVM Map version & OEM NVM Image version */ 1094 1.88 msaitoh switch (hw->mac.type) { 1095 1.88 msaitoh case ixgbe_mac_X550: 1096 1.88 msaitoh case ixgbe_mac_X550EM_x: 1097 1.99 msaitoh case ixgbe_mac_X550EM_a: 1098 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg); 1099 1.88 msaitoh if (nvmreg != 0xffff) { 1100 1.88 msaitoh high = (nvmreg >> 12) & 0x0f; 1101 1.88 msaitoh low = nvmreg & 0x00ff; 1102 1.88 msaitoh aprint_normal(" NVM Map version %u.%02x,", high, low); 1103 1.88 msaitoh } 1104 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg); 1105 1.107 msaitoh if (nvmreg != 0xffff) { 1106 1.88 msaitoh high = (nvmreg >> 12) & 0x0f; 1107 1.88 msaitoh low = nvmreg & 0x00ff; 1108 1.88 msaitoh aprint_verbose(" OEM NVM Image version %u.%02x,", high, 1109 1.88 msaitoh low); 1110 1.88 msaitoh } 1111 1.88 msaitoh break; 1112 1.88 msaitoh default: 1113 1.88 msaitoh break; 1114 1.88 msaitoh } 1115 1.88 msaitoh 1116 1.88 msaitoh /* Print the ETrackID */ 1117 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high); 1118 1.88 msaitoh hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low); 1119 1.88 msaitoh aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low); 1120 1.79 msaitoh 1121 1.307 msaitoh /* Printed Board Assembly number */ 1122 1.307 msaitoh error = ixgbe_read_pba_string(hw, buf, IXGBE_PBANUM_LENGTH); 1123 1.307 msaitoh aprint_normal_dev(dev, "PBA number %s\n", error ? "unknown" : buf); 1124 1.307 msaitoh 1125 1.351 msaitoh /* Recovery mode */ 1126 1.351 msaitoh switch (sc->hw.mac.type) { 1127 1.351 msaitoh case ixgbe_mac_X550: 1128 1.351 msaitoh case ixgbe_mac_X550EM_x: 1129 1.351 msaitoh case ixgbe_mac_X550EM_a: 1130 1.351 msaitoh /* >= 2.00 */ 1131 1.351 msaitoh if (hw->eeprom.nvm_image_ver_high >= 2) { 1132 1.351 msaitoh sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE; 1133 1.351 msaitoh sc->feat_en |= IXGBE_FEATURE_RECOVERY_MODE; 1134 1.351 msaitoh } 1135 1.351 msaitoh break; 1136 1.351 msaitoh default: 1137 1.351 msaitoh break; 1138 1.351 msaitoh } 1139 1.351 msaitoh 1140 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_MSIX) { 1141 1.333 msaitoh error = ixgbe_allocate_msix(sc, pa); 1142 1.119 msaitoh if (error) { 1143 1.119 msaitoh /* Free allocated queue structures first */ 1144 1.333 msaitoh ixgbe_free_queues(sc); 1145 1.119 msaitoh 1146 1.119 msaitoh /* Fallback to legacy interrupt */ 1147 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_MSI) 1148 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_MSI; 1149 1.333 msaitoh sc->num_queues = 1; 1150 1.119 msaitoh 1151 1.119 msaitoh /* Allocate our TX/RX Queues again */ 1152 1.333 msaitoh if (ixgbe_allocate_queues(sc)) { 1153 1.119 msaitoh error = ENOMEM; 1154 1.119 msaitoh goto err_out; 1155 1.119 msaitoh } 1156 1.119 msaitoh } 1157 1.119 msaitoh } 1158 1.307 msaitoh 1159 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_MSIX) == 0) 1160 1.333 msaitoh error = ixgbe_allocate_legacy(sc, pa); 1161 1.185 msaitoh if (error) 1162 1.99 msaitoh goto err_late; 1163 1.99 msaitoh 1164 1.119 msaitoh /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */ 1165 1.333 msaitoh mutex_init(&(sc)->admin_mtx, MUTEX_DEFAULT, IPL_NET); 1166 1.233 msaitoh snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev)); 1167 1.333 msaitoh error = workqueue_create(&sc->admin_wq, wqname, 1168 1.354 msaitoh ixgbe_handle_admin, sc, IXGBE_WORKQUEUE_PRI, IPL_NET, WQ_MPSAFE); 1169 1.223 thorpej if (error) { 1170 1.223 thorpej aprint_error_dev(dev, 1171 1.233 msaitoh "could not create admin workqueue (%d)\n", error); 1172 1.223 thorpej goto err_out; 1173 1.223 thorpej } 1174 1.119 msaitoh 1175 1.99 msaitoh error = ixgbe_start_hw(hw); 1176 1.25 msaitoh switch (error) { 1177 1.25 msaitoh case IXGBE_ERR_EEPROM_VERSION: 1178 1.319 msaitoh aprint_error_dev(dev, 1179 1.319 msaitoh "This device is a pre-production adapter/" 1180 1.1 dyoung "LOM. Please be aware there may be issues associated " 1181 1.48 msaitoh "with your hardware.\nIf you are experiencing problems " 1182 1.1 dyoung "please contact your Intel or hardware representative " 1183 1.1 dyoung "who provided you with this hardware.\n"); 1184 1.25 msaitoh break; 1185 1.25 msaitoh default: 1186 1.25 msaitoh break; 1187 1.1 dyoung } 1188 1.1 dyoung 1189 1.116 msaitoh /* Setup OS specific network interface */ 1190 1.333 msaitoh if (ixgbe_setup_interface(dev, sc) != 0) 1191 1.116 msaitoh goto err_late; 1192 1.116 msaitoh 1193 1.110 msaitoh /* 1194 1.110 msaitoh * Print PHY ID only for copper PHY. On device which has SFP(+) cage 1195 1.110 msaitoh * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID. 1196 1.110 msaitoh */ 1197 1.110 msaitoh if (hw->phy.media_type == ixgbe_media_type_copper) { 1198 1.95 msaitoh uint16_t id1, id2; 1199 1.95 msaitoh int oui, model, rev; 1200 1.285 pgoyette char descr[MII_MAX_DESCR_LEN]; 1201 1.95 msaitoh 1202 1.95 msaitoh id1 = hw->phy.id >> 16; 1203 1.95 msaitoh id2 = hw->phy.id & 0xffff; 1204 1.95 msaitoh oui = MII_OUI(id1, id2); 1205 1.95 msaitoh model = MII_MODEL(id2); 1206 1.95 msaitoh rev = MII_REV(id2); 1207 1.285 pgoyette mii_get_descr(descr, sizeof(descr), oui, model); 1208 1.285 pgoyette if (descr[0]) 1209 1.299 msaitoh aprint_normal_dev(dev, "PHY: %s, rev. %d\n", 1210 1.299 msaitoh descr, rev); 1211 1.95 msaitoh else 1212 1.95 msaitoh aprint_normal_dev(dev, 1213 1.95 msaitoh "PHY OUI 0x%06x, model 0x%04x, rev. %d\n", 1214 1.95 msaitoh oui, model, rev); 1215 1.95 msaitoh } 1216 1.95 msaitoh 1217 1.173 msaitoh /* Enable EEE power saving */ 1218 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_EEE) 1219 1.173 msaitoh hw->mac.ops.setup_eee(hw, 1220 1.333 msaitoh sc->feat_en & IXGBE_FEATURE_EEE); 1221 1.173 msaitoh 1222 1.52 msaitoh /* Enable power to the phy. */ 1223 1.219 msaitoh if (!unsupported_sfp) { 1224 1.219 msaitoh /* Enable the optics for 82599 SFP+ fiber */ 1225 1.219 msaitoh ixgbe_enable_tx_laser(hw); 1226 1.219 msaitoh 1227 1.219 msaitoh /* 1228 1.219 msaitoh * XXX Currently, ixgbe_set_phy_power() supports only copper 1229 1.219 msaitoh * PHY, so it's not required to test with !unsupported_sfp. 1230 1.219 msaitoh */ 1231 1.219 msaitoh ixgbe_set_phy_power(hw, TRUE); 1232 1.219 msaitoh } 1233 1.52 msaitoh 1234 1.1 dyoung /* Initialize statistics */ 1235 1.333 msaitoh ixgbe_update_stats_counters(sc); 1236 1.1 dyoung 1237 1.98 msaitoh /* Check PCIE slot type/speed/width */ 1238 1.333 msaitoh ixgbe_get_slot_info(sc); 1239 1.1 dyoung 1240 1.99 msaitoh /* 1241 1.99 msaitoh * Do time init and sysctl init here, but 1242 1.99 msaitoh * only on the first port of a bypass adapter. 1243 1.99 msaitoh */ 1244 1.333 msaitoh ixgbe_bypass_init(sc); 1245 1.99 msaitoh 1246 1.99 msaitoh /* Set an initial dmac value */ 1247 1.333 msaitoh sc->dmac = 0; 1248 1.99 msaitoh /* Set initial advertised speeds (if applicable) */ 1249 1.333 msaitoh sc->advertise = ixgbe_get_default_advertise(sc); 1250 1.45 msaitoh 1251 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_SRIOV) 1252 1.99 msaitoh ixgbe_define_iov_schemas(dev, &error); 1253 1.44 msaitoh 1254 1.44 msaitoh /* Add sysctls */ 1255 1.333 msaitoh ixgbe_add_device_sysctls(sc); 1256 1.333 msaitoh ixgbe_add_hw_stats(sc); 1257 1.44 msaitoh 1258 1.99 msaitoh /* For Netmap */ 1259 1.333 msaitoh sc->init_locked = ixgbe_init_locked; 1260 1.333 msaitoh sc->stop_locked = ixgbe_stop_locked; 1261 1.99 msaitoh 1262 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_NETMAP) 1263 1.333 msaitoh ixgbe_netmap_attach(sc); 1264 1.1 dyoung 1265 1.340 msaitoh /* Print some flags */ 1266 1.333 msaitoh snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, sc->feat_cap); 1267 1.99 msaitoh aprint_verbose_dev(dev, "feature cap %s\n", buf); 1268 1.333 msaitoh snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, sc->feat_en); 1269 1.99 msaitoh aprint_verbose_dev(dev, "feature ena %s\n", buf); 1270 1.340 msaitoh if (ixgbe_get_device_caps(hw, &dev_caps) == 0) { 1271 1.340 msaitoh snprintb(buf, sizeof(buf), IXGBE_DEVICE_CAPS_FLAGS, dev_caps); 1272 1.340 msaitoh aprint_verbose_dev(dev, "device cap %s\n", buf); 1273 1.340 msaitoh } 1274 1.44 msaitoh 1275 1.44 msaitoh if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume)) 1276 1.333 msaitoh pmf_class_network_register(dev, sc->ifp); 1277 1.44 msaitoh else 1278 1.44 msaitoh aprint_error_dev(dev, "couldn't establish power handler\n"); 1279 1.44 msaitoh 1280 1.169 msaitoh /* Init recovery mode timer and state variable */ 1281 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE) { 1282 1.333 msaitoh sc->recovery_mode = 0; 1283 1.169 msaitoh 1284 1.169 msaitoh /* Set up the timer callout */ 1285 1.354 msaitoh callout_init(&sc->recovery_mode_timer, CALLOUT_MPSAFE); 1286 1.235 msaitoh snprintf(wqname, sizeof(wqname), "%s-recovery", 1287 1.235 msaitoh device_xname(dev)); 1288 1.333 msaitoh error = workqueue_create(&sc->recovery_mode_timer_wq, 1289 1.333 msaitoh wqname, ixgbe_handle_recovery_mode_timer, sc, 1290 1.354 msaitoh IXGBE_WORKQUEUE_PRI, IPL_NET, WQ_MPSAFE); 1291 1.233 msaitoh if (error) { 1292 1.233 msaitoh aprint_error_dev(dev, "could not create " 1293 1.233 msaitoh "recovery_mode_timer workqueue (%d)\n", error); 1294 1.233 msaitoh goto err_out; 1295 1.233 msaitoh } 1296 1.169 msaitoh 1297 1.169 msaitoh /* Start the task */ 1298 1.333 msaitoh callout_reset(&sc->recovery_mode_timer, hz, 1299 1.333 msaitoh ixgbe_recovery_mode_timer, sc); 1300 1.169 msaitoh } 1301 1.169 msaitoh 1302 1.1 dyoung INIT_DEBUGOUT("ixgbe_attach: end"); 1303 1.333 msaitoh sc->osdep.attached = true; 1304 1.98 msaitoh 1305 1.1 dyoung return; 1306 1.43 msaitoh 1307 1.1 dyoung err_late: 1308 1.333 msaitoh ixgbe_free_queues(sc); 1309 1.1 dyoung err_out: 1310 1.333 msaitoh ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT); 1311 1.99 msaitoh ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 1312 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext); 1313 1.333 msaitoh ixgbe_free_deferred_handlers(sc); 1314 1.333 msaitoh ixgbe_free_pci_resources(sc); 1315 1.333 msaitoh if (sc->mta != NULL) 1316 1.333 msaitoh free(sc->mta, M_DEVBUF); 1317 1.333 msaitoh mutex_destroy(&(sc)->admin_mtx); /* XXX appropriate order? */ 1318 1.333 msaitoh IXGBE_CORE_LOCK_DESTROY(sc); 1319 1.99 msaitoh 1320 1.1 dyoung return; 1321 1.99 msaitoh } /* ixgbe_attach */ 1322 1.1 dyoung 1323 1.99 msaitoh /************************************************************************ 1324 1.99 msaitoh * ixgbe_check_wol_support 1325 1.99 msaitoh * 1326 1.99 msaitoh * Checks whether the adapter's ports are capable of 1327 1.99 msaitoh * Wake On LAN by reading the adapter's NVM. 1328 1.1 dyoung * 1329 1.99 msaitoh * Sets each port's hw->wol_enabled value depending 1330 1.99 msaitoh * on the value read here. 1331 1.99 msaitoh ************************************************************************/ 1332 1.98 msaitoh static void 1333 1.333 msaitoh ixgbe_check_wol_support(struct ixgbe_softc *sc) 1334 1.98 msaitoh { 1335 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 1336 1.186 msaitoh u16 dev_caps = 0; 1337 1.1 dyoung 1338 1.98 msaitoh /* Find out WoL support for port */ 1339 1.333 msaitoh sc->wol_support = hw->wol_enabled = 0; 1340 1.98 msaitoh ixgbe_get_device_caps(hw, &dev_caps); 1341 1.98 msaitoh if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 1342 1.98 msaitoh ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && 1343 1.99 msaitoh hw->bus.func == 0)) 1344 1.333 msaitoh sc->wol_support = hw->wol_enabled = 1; 1345 1.98 msaitoh 1346 1.98 msaitoh /* Save initial wake up filter configuration */ 1347 1.333 msaitoh sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); 1348 1.98 msaitoh 1349 1.98 msaitoh return; 1350 1.99 msaitoh } /* ixgbe_check_wol_support */ 1351 1.98 msaitoh 1352 1.99 msaitoh /************************************************************************ 1353 1.99 msaitoh * ixgbe_setup_interface 1354 1.98 msaitoh * 1355 1.99 msaitoh * Setup networking device structure and register an interface. 1356 1.99 msaitoh ************************************************************************/ 1357 1.1 dyoung static int 1358 1.333 msaitoh ixgbe_setup_interface(device_t dev, struct ixgbe_softc *sc) 1359 1.1 dyoung { 1360 1.333 msaitoh struct ethercom *ec = &sc->osdep.ec; 1361 1.98 msaitoh struct ifnet *ifp; 1362 1.1 dyoung 1363 1.98 msaitoh INIT_DEBUGOUT("ixgbe_setup_interface: begin"); 1364 1.1 dyoung 1365 1.333 msaitoh ifp = sc->ifp = &ec->ec_if; 1366 1.98 msaitoh strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ); 1367 1.98 msaitoh ifp->if_baudrate = IF_Gbps(10); 1368 1.98 msaitoh ifp->if_init = ixgbe_init; 1369 1.98 msaitoh ifp->if_stop = ixgbe_ifstop; 1370 1.333 msaitoh ifp->if_softc = sc; 1371 1.98 msaitoh ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1372 1.112 ozaki ifp->if_extflags = IFEF_MPSAFE; 1373 1.98 msaitoh ifp->if_ioctl = ixgbe_ioctl; 1374 1.98 msaitoh #if __FreeBSD_version >= 1100045 1375 1.98 msaitoh /* TSO parameters */ 1376 1.98 msaitoh ifp->if_hw_tsomax = 65518; 1377 1.98 msaitoh ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER; 1378 1.98 msaitoh ifp->if_hw_tsomaxsegsize = 2048; 1379 1.98 msaitoh #endif 1380 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_LEGACY_TX) { 1381 1.99 msaitoh #if 0 1382 1.99 msaitoh ixgbe_start_locked = ixgbe_legacy_start_locked; 1383 1.99 msaitoh #endif 1384 1.99 msaitoh } else { 1385 1.99 msaitoh ifp->if_transmit = ixgbe_mq_start; 1386 1.99 msaitoh #if 0 1387 1.99 msaitoh ixgbe_start_locked = ixgbe_mq_start_locked; 1388 1.29 msaitoh #endif 1389 1.99 msaitoh } 1390 1.99 msaitoh ifp->if_start = ixgbe_legacy_start; 1391 1.333 msaitoh IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 2); 1392 1.98 msaitoh IFQ_SET_READY(&ifp->if_snd); 1393 1.98 msaitoh 1394 1.284 riastrad if_initialize(ifp); 1395 1.333 msaitoh sc->ipq = if_percpuq_create(&sc->osdep.ec.ec_if); 1396 1.98 msaitoh /* 1397 1.98 msaitoh * We use per TX queue softint, so if_deferred_start_init() isn't 1398 1.98 msaitoh * used. 1399 1.98 msaitoh */ 1400 1.98 msaitoh 1401 1.333 msaitoh sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1402 1.98 msaitoh 1403 1.98 msaitoh /* 1404 1.98 msaitoh * Tell the upper layer(s) we support long frames. 1405 1.98 msaitoh */ 1406 1.98 msaitoh ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1407 1.98 msaitoh 1408 1.98 msaitoh /* Set capability flags */ 1409 1.98 msaitoh ifp->if_capabilities |= IFCAP_RXCSUM 1410 1.186 msaitoh | IFCAP_TXCSUM 1411 1.186 msaitoh | IFCAP_TSOv4 1412 1.186 msaitoh | IFCAP_TSOv6; 1413 1.98 msaitoh ifp->if_capenable = 0; 1414 1.98 msaitoh 1415 1.98 msaitoh ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING 1416 1.186 msaitoh | ETHERCAP_VLAN_HWCSUM 1417 1.186 msaitoh | ETHERCAP_JUMBO_MTU 1418 1.186 msaitoh | ETHERCAP_VLAN_MTU; 1419 1.98 msaitoh 1420 1.98 msaitoh /* Enable the above capabilities by default */ 1421 1.98 msaitoh ec->ec_capenable = ec->ec_capabilities; 1422 1.98 msaitoh 1423 1.347 yamaguch ether_ifattach(ifp, sc->hw.mac.addr); 1424 1.347 yamaguch aprint_normal_dev(dev, "Ethernet address %s\n", 1425 1.347 yamaguch ether_sprintf(sc->hw.mac.addr)); 1426 1.347 yamaguch ether_set_ifflags_cb(ec, ixgbe_ifflags_cb); 1427 1.347 yamaguch 1428 1.98 msaitoh /* 1429 1.99 msaitoh * Don't turn this on by default, if vlans are 1430 1.99 msaitoh * created on another pseudo device (eg. lagg) 1431 1.99 msaitoh * then vlan events are not passed thru, breaking 1432 1.99 msaitoh * operation, but with HW FILTER off it works. If 1433 1.99 msaitoh * using vlans directly on the ixgbe driver you can 1434 1.99 msaitoh * enable this and get full hardware tag filtering. 1435 1.99 msaitoh */ 1436 1.98 msaitoh ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER; 1437 1.1 dyoung 1438 1.98 msaitoh /* 1439 1.98 msaitoh * Specify the media types supported by this adapter and register 1440 1.98 msaitoh * callbacks to update media and link information 1441 1.98 msaitoh */ 1442 1.333 msaitoh ec->ec_ifmedia = &sc->media; 1443 1.333 msaitoh ifmedia_init_with_lock(&sc->media, IFM_IMASK, ixgbe_media_change, 1444 1.333 msaitoh ixgbe_media_status, &sc->core_mtx); 1445 1.45 msaitoh 1446 1.333 msaitoh sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw); 1447 1.333 msaitoh ixgbe_add_media_types(sc); 1448 1.49 msaitoh 1449 1.98 msaitoh /* Set autoselect media by default */ 1450 1.333 msaitoh ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); 1451 1.1 dyoung 1452 1.156 ozaki if_register(ifp); 1453 1.156 ozaki 1454 1.98 msaitoh return (0); 1455 1.99 msaitoh } /* ixgbe_setup_interface */ 1456 1.1 dyoung 1457 1.99 msaitoh /************************************************************************ 1458 1.99 msaitoh * ixgbe_add_media_types 1459 1.99 msaitoh ************************************************************************/ 1460 1.98 msaitoh static void 1461 1.333 msaitoh ixgbe_add_media_types(struct ixgbe_softc *sc) 1462 1.98 msaitoh { 1463 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 1464 1.186 msaitoh u64 layer; 1465 1.1 dyoung 1466 1.333 msaitoh layer = sc->phy_layer; 1467 1.1 dyoung 1468 1.98 msaitoh #define ADD(mm, dd) \ 1469 1.333 msaitoh ifmedia_add(&sc->media, IFM_ETHER | (mm), (dd), NULL); 1470 1.1 dyoung 1471 1.140 msaitoh ADD(IFM_NONE, 0); 1472 1.140 msaitoh 1473 1.98 msaitoh /* Media types with matching NetBSD media defines */ 1474 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) 1475 1.98 msaitoh ADD(IFM_10G_T | IFM_FDX, 0); 1476 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) 1477 1.98 msaitoh ADD(IFM_1000_T | IFM_FDX, 0); 1478 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) 1479 1.98 msaitoh ADD(IFM_100_TX | IFM_FDX, 0); 1480 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 1481 1.99 msaitoh ADD(IFM_10_T | IFM_FDX, 0); 1482 1.26 msaitoh 1483 1.98 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 1484 1.319 msaitoh layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 1485 1.98 msaitoh ADD(IFM_10G_TWINAX | IFM_FDX, 0); 1486 1.1 dyoung 1487 1.98 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { 1488 1.98 msaitoh ADD(IFM_10G_LR | IFM_FDX, 0); 1489 1.319 msaitoh if (hw->phy.multispeed_fiber) 1490 1.98 msaitoh ADD(IFM_1000_LX | IFM_FDX, 0); 1491 1.98 msaitoh } 1492 1.98 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 1493 1.98 msaitoh ADD(IFM_10G_SR | IFM_FDX, 0); 1494 1.319 msaitoh if (hw->phy.multispeed_fiber) 1495 1.98 msaitoh ADD(IFM_1000_SX | IFM_FDX, 0); 1496 1.319 msaitoh } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 1497 1.98 msaitoh ADD(IFM_1000_SX | IFM_FDX, 0); 1498 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 1499 1.98 msaitoh ADD(IFM_10G_CX4 | IFM_FDX, 0); 1500 1.1 dyoung 1501 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 1502 1.98 msaitoh ADD(IFM_10G_KR | IFM_FDX, 0); 1503 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) 1504 1.180 msaitoh ADD(IFM_10G_KX4 | IFM_FDX, 0); 1505 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 1506 1.98 msaitoh ADD(IFM_1000_KX | IFM_FDX, 0); 1507 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) 1508 1.99 msaitoh ADD(IFM_2500_KX | IFM_FDX, 0); 1509 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) 1510 1.103 msaitoh ADD(IFM_2500_T | IFM_FDX, 0); 1511 1.319 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) 1512 1.103 msaitoh ADD(IFM_5000_T | IFM_FDX, 0); 1513 1.98 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) 1514 1.208 msaitoh ADD(IFM_1000_BX10 | IFM_FDX, 0); 1515 1.98 msaitoh /* XXX no ifmedia_set? */ 1516 1.185 msaitoh 1517 1.98 msaitoh ADD(IFM_AUTO, 0); 1518 1.98 msaitoh 1519 1.98 msaitoh #undef ADD 1520 1.99 msaitoh } /* ixgbe_add_media_types */ 1521 1.1 dyoung 1522 1.99 msaitoh /************************************************************************ 1523 1.99 msaitoh * ixgbe_is_sfp 1524 1.99 msaitoh ************************************************************************/ 1525 1.99 msaitoh static inline bool 1526 1.99 msaitoh ixgbe_is_sfp(struct ixgbe_hw *hw) 1527 1.99 msaitoh { 1528 1.99 msaitoh switch (hw->mac.type) { 1529 1.99 msaitoh case ixgbe_mac_82598EB: 1530 1.99 msaitoh if (hw->phy.type == ixgbe_phy_nl) 1531 1.144 msaitoh return (TRUE); 1532 1.144 msaitoh return (FALSE); 1533 1.99 msaitoh case ixgbe_mac_82599EB: 1534 1.203 msaitoh case ixgbe_mac_X550EM_x: 1535 1.203 msaitoh case ixgbe_mac_X550EM_a: 1536 1.99 msaitoh switch (hw->mac.ops.get_media_type(hw)) { 1537 1.99 msaitoh case ixgbe_media_type_fiber: 1538 1.99 msaitoh case ixgbe_media_type_fiber_qsfp: 1539 1.144 msaitoh return (TRUE); 1540 1.99 msaitoh default: 1541 1.144 msaitoh return (FALSE); 1542 1.99 msaitoh } 1543 1.99 msaitoh default: 1544 1.144 msaitoh return (FALSE); 1545 1.99 msaitoh } 1546 1.99 msaitoh } /* ixgbe_is_sfp */ 1547 1.99 msaitoh 1548 1.226 thorpej static void 1549 1.333 msaitoh ixgbe_schedule_admin_tasklet(struct ixgbe_softc *sc) 1550 1.226 thorpej { 1551 1.243 msaitoh 1552 1.333 msaitoh KASSERT(mutex_owned(&sc->admin_mtx)); 1553 1.260 knakahar 1554 1.333 msaitoh if (__predict_true(sc->osdep.detaching == false)) { 1555 1.333 msaitoh if (sc->admin_pending == 0) 1556 1.333 msaitoh workqueue_enqueue(sc->admin_wq, 1557 1.333 msaitoh &sc->admin_wc, NULL); 1558 1.333 msaitoh sc->admin_pending = 1; 1559 1.255 msaitoh } 1560 1.226 thorpej } 1561 1.226 thorpej 1562 1.99 msaitoh /************************************************************************ 1563 1.99 msaitoh * ixgbe_config_link 1564 1.99 msaitoh ************************************************************************/ 1565 1.98 msaitoh static void 1566 1.333 msaitoh ixgbe_config_link(struct ixgbe_softc *sc) 1567 1.98 msaitoh { 1568 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 1569 1.186 msaitoh u32 autoneg, err = 0; 1570 1.233 msaitoh u32 task_requests = 0; 1571 1.186 msaitoh bool sfp, negotiate = false; 1572 1.1 dyoung 1573 1.98 msaitoh sfp = ixgbe_is_sfp(hw); 1574 1.1 dyoung 1575 1.185 msaitoh if (sfp) { 1576 1.99 msaitoh if (hw->phy.multispeed_fiber) { 1577 1.99 msaitoh ixgbe_enable_tx_laser(hw); 1578 1.273 msaitoh task_requests |= IXGBE_REQUEST_TASK_MSF_WOI; 1579 1.99 msaitoh } 1580 1.273 msaitoh task_requests |= IXGBE_REQUEST_TASK_MOD_WOI; 1581 1.260 knakahar 1582 1.333 msaitoh mutex_enter(&sc->admin_mtx); 1583 1.333 msaitoh sc->task_requests |= task_requests; 1584 1.333 msaitoh ixgbe_schedule_admin_tasklet(sc); 1585 1.333 msaitoh mutex_exit(&sc->admin_mtx); 1586 1.98 msaitoh } else { 1587 1.333 msaitoh struct ifmedia *ifm = &sc->media; 1588 1.143 msaitoh 1589 1.98 msaitoh if (hw->mac.ops.check_link) 1590 1.333 msaitoh err = ixgbe_check_link(hw, &sc->link_speed, 1591 1.333 msaitoh &sc->link_up, FALSE); 1592 1.98 msaitoh if (err) 1593 1.144 msaitoh return; 1594 1.143 msaitoh 1595 1.143 msaitoh /* 1596 1.143 msaitoh * Check if it's the first call. If it's the first call, 1597 1.143 msaitoh * get value for auto negotiation. 1598 1.143 msaitoh */ 1599 1.98 msaitoh autoneg = hw->phy.autoneg_advertised; 1600 1.143 msaitoh if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE) 1601 1.143 msaitoh && ((!autoneg) && (hw->mac.ops.get_link_capabilities))) 1602 1.186 msaitoh err = hw->mac.ops.get_link_capabilities(hw, &autoneg, 1603 1.99 msaitoh &negotiate); 1604 1.98 msaitoh if (err) 1605 1.144 msaitoh return; 1606 1.98 msaitoh if (hw->mac.ops.setup_link) 1607 1.186 msaitoh err = hw->mac.ops.setup_link(hw, autoneg, 1608 1.333 msaitoh sc->link_up); 1609 1.98 msaitoh } 1610 1.99 msaitoh } /* ixgbe_config_link */ 1611 1.98 msaitoh 1612 1.99 msaitoh /************************************************************************ 1613 1.99 msaitoh * ixgbe_update_stats_counters - Update board statistics counters. 1614 1.99 msaitoh ************************************************************************/ 1615 1.98 msaitoh static void 1616 1.333 msaitoh ixgbe_update_stats_counters(struct ixgbe_softc *sc) 1617 1.1 dyoung { 1618 1.333 msaitoh struct ifnet *ifp = sc->ifp; 1619 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 1620 1.333 msaitoh struct ixgbe_hw_stats *stats = &sc->stats.pf; 1621 1.305 msaitoh u32 missed_rx = 0, bprc, lxontxc, lxofftxc; 1622 1.349 msaitoh u64 total, total_missed_rx = 0, total_qprdc = 0; 1623 1.303 msaitoh uint64_t crcerrs, illerrc, rlec, ruc, rfc, roc, rjc; 1624 1.186 msaitoh unsigned int queue_counters; 1625 1.176 msaitoh int i; 1626 1.44 msaitoh 1627 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_CRCERRS, crcerrs); 1628 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_ILLERRC, illerrc); 1629 1.303 msaitoh 1630 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_ERRBC, errbc); 1631 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MSPDC, mspdc); 1632 1.209 msaitoh if (hw->mac.type >= ixgbe_mac_X550) 1633 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MBSDC, mbsdc); 1634 1.44 msaitoh 1635 1.176 msaitoh /* 16 registers exist */ 1636 1.333 msaitoh queue_counters = uimin(__arraycount(stats->qprc), sc->num_queues); 1637 1.176 msaitoh for (i = 0; i < queue_counters; i++) { 1638 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_QPRC(i), qprc[i]); 1639 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_QPTC(i), qptc[i]); 1640 1.329 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB) { 1641 1.349 msaitoh uint32_t qprdc; 1642 1.349 msaitoh 1643 1.329 msaitoh IXGBE_EVC_ADD(&stats->qbrc[i], 1644 1.329 msaitoh IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)) + 1645 1.329 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32)); 1646 1.329 msaitoh IXGBE_EVC_ADD(&stats->qbtc[i], 1647 1.329 msaitoh IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)) + 1648 1.329 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32)); 1649 1.349 msaitoh /* QPRDC will be added to iqdrops. */ 1650 1.349 msaitoh qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 1651 1.349 msaitoh IXGBE_EVC_ADD(&stats->qprdc[i], qprdc); 1652 1.349 msaitoh total_qprdc += qprdc; 1653 1.329 msaitoh } else { 1654 1.329 msaitoh /* 82598 */ 1655 1.329 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_QBRC(i), qbrc[i]); 1656 1.329 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_QBTC(i), qbtc[i]); 1657 1.329 msaitoh } 1658 1.98 msaitoh } 1659 1.151 msaitoh 1660 1.175 msaitoh /* 8 registers exist */ 1661 1.175 msaitoh for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { 1662 1.98 msaitoh uint32_t mp; 1663 1.44 msaitoh 1664 1.151 msaitoh /* MPC */ 1665 1.98 msaitoh mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 1666 1.98 msaitoh /* global total per queue */ 1667 1.305 msaitoh IXGBE_EVC_ADD(&stats->mpc[i], mp); 1668 1.98 msaitoh /* running comprehensive total for stats display */ 1669 1.98 msaitoh total_missed_rx += mp; 1670 1.44 msaitoh 1671 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB) 1672 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_RNBC(i), rnbc[i]); 1673 1.151 msaitoh 1674 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PXONTXC(i), pxontxc[i]); 1675 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PXOFFTXC(i), pxofftxc[i]); 1676 1.151 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB) { 1677 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats, 1678 1.319 msaitoh IXGBE_PXONRXCNT(i), pxonrxc[i]); 1679 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats, 1680 1.319 msaitoh IXGBE_PXOFFRXCNT(i), pxoffrxc[i]); 1681 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats, 1682 1.319 msaitoh IXGBE_PXON2OFFCNT(i), pxon2offc[i]); 1683 1.151 msaitoh } else { 1684 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats, 1685 1.319 msaitoh IXGBE_PXONRXC(i), pxonrxc[i]); 1686 1.319 msaitoh IXGBE_EVC_REGADD(hw, stats, 1687 1.319 msaitoh IXGBE_PXOFFRXC(i), pxoffrxc[i]); 1688 1.151 msaitoh } 1689 1.98 msaitoh } 1690 1.305 msaitoh IXGBE_EVC_ADD(&stats->mpctotal, total_missed_rx); 1691 1.44 msaitoh 1692 1.98 msaitoh /* Document says M[LR]FC are valid when link is up and 10Gbps */ 1693 1.333 msaitoh if ((sc->link_active == LINK_STATE_UP) 1694 1.333 msaitoh && (sc->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) { 1695 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MLFC, mlfc); 1696 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MRFC, mrfc); 1697 1.98 msaitoh } 1698 1.326 msaitoh if (hw->mac.type == ixgbe_mac_X550EM_a) 1699 1.326 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LINK_DN_CNT, link_dn_cnt); 1700 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_RLEC, rlec); 1701 1.44 msaitoh 1702 1.98 msaitoh /* Hardware workaround, gprc counts missed packets */ 1703 1.305 msaitoh IXGBE_EVC_ADD(&stats->gprc, 1704 1.305 msaitoh IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx); 1705 1.44 msaitoh 1706 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_LXONTXC, lxontxc); 1707 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_LXOFFTXC, lxofftxc); 1708 1.305 msaitoh total = lxontxc + lxofftxc; 1709 1.44 msaitoh 1710 1.98 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) { 1711 1.305 msaitoh IXGBE_EVC_ADD(&stats->gorc, IXGBE_READ_REG(hw, IXGBE_GORCL) + 1712 1.305 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32)); 1713 1.305 msaitoh IXGBE_EVC_ADD(&stats->gotc, IXGBE_READ_REG(hw, IXGBE_GOTCL) + 1714 1.280 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) 1715 1.305 msaitoh - total * ETHER_MIN_LEN); 1716 1.305 msaitoh IXGBE_EVC_ADD(&stats->tor, IXGBE_READ_REG(hw, IXGBE_TORL) + 1717 1.305 msaitoh ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32)); 1718 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LXONRXCNT, lxonrxc); 1719 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LXOFFRXCNT, lxoffrxc); 1720 1.98 msaitoh } else { 1721 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LXONRXC, lxonrxc); 1722 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_LXOFFRXC, lxoffrxc); 1723 1.98 msaitoh /* 82598 only has a counter in the high register */ 1724 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_GORCH, gorc); 1725 1.305 msaitoh IXGBE_EVC_ADD(&stats->gotc, IXGBE_READ_REG(hw, IXGBE_GOTCH) 1726 1.305 msaitoh - total * ETHER_MIN_LEN); 1727 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_TORH, tor); 1728 1.98 msaitoh } 1729 1.44 msaitoh 1730 1.98 msaitoh /* 1731 1.98 msaitoh * Workaround: mprc hardware is incorrectly counting 1732 1.98 msaitoh * broadcasts, so for now we subtract those. 1733 1.98 msaitoh */ 1734 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_BPRC, bprc); 1735 1.305 msaitoh IXGBE_EVC_ADD(&stats->mprc, IXGBE_READ_REG(hw, IXGBE_MPRC) 1736 1.305 msaitoh - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0)); 1737 1.305 msaitoh 1738 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC64, prc64); 1739 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC127, prc127); 1740 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC255, prc255); 1741 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC511, prc511); 1742 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC1023, prc1023); 1743 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC1522, prc1522); 1744 1.305 msaitoh 1745 1.305 msaitoh IXGBE_EVC_ADD(&stats->gptc, IXGBE_READ_REG(hw, IXGBE_GPTC) - total); 1746 1.305 msaitoh IXGBE_EVC_ADD(&stats->mptc, IXGBE_READ_REG(hw, IXGBE_MPTC) - total); 1747 1.305 msaitoh IXGBE_EVC_ADD(&stats->ptc64, IXGBE_READ_REG(hw, IXGBE_PTC64) - total); 1748 1.305 msaitoh 1749 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_RUC, ruc); 1750 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_RFC, rfc); 1751 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_ROC, roc); 1752 1.305 msaitoh IXGBE_EVC_REGADD2(hw, stats, IXGBE_RJC, rjc); 1753 1.305 msaitoh 1754 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPRC, mngprc); 1755 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPDC, mngpdc); 1756 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPTC, mngptc); 1757 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_TPR, tpr); 1758 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_TPT, tpt); 1759 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC127, ptc127); 1760 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC255, ptc255); 1761 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC511, ptc511); 1762 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC1023, ptc1023); 1763 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC1522, ptc1522); 1764 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_BPTC, bptc); 1765 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_XEC, xec); 1766 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCCRC, fccrc); 1767 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCLAST, fclast); 1768 1.98 msaitoh /* Only read FCOE on 82599 */ 1769 1.98 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) { 1770 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOERPDC, fcoerpdc); 1771 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEPRC, fcoeprc); 1772 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEPTC, fcoeptc); 1773 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEDWRC, fcoedwrc); 1774 1.305 msaitoh IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEDWTC, fcoedwtc); 1775 1.98 msaitoh } 1776 1.44 msaitoh 1777 1.44 msaitoh /* 1778 1.224 msaitoh * Fill out the OS statistics structure. Only RX errors are required 1779 1.224 msaitoh * here because all TX counters are incremented in the TX path and 1780 1.224 msaitoh * normal RX counters are prepared in ether_input(). 1781 1.44 msaitoh */ 1782 1.222 thorpej net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 1783 1.352 riastrad if_statadd_ref(ifp, nsr, if_iqdrops, total_missed_rx + total_qprdc); 1784 1.298 msaitoh 1785 1.298 msaitoh /* 1786 1.298 msaitoh * Aggregate following types of errors as RX errors: 1787 1.298 msaitoh * - CRC error count, 1788 1.298 msaitoh * - illegal byte error count, 1789 1.298 msaitoh * - length error count, 1790 1.298 msaitoh * - undersized packets count, 1791 1.298 msaitoh * - fragmented packets count, 1792 1.298 msaitoh * - oversized packets count, 1793 1.298 msaitoh * - jabber count. 1794 1.298 msaitoh */ 1795 1.352 riastrad if_statadd_ref(ifp, nsr, if_ierrors, 1796 1.303 msaitoh crcerrs + illerrc + rlec + ruc + rfc + roc + rjc); 1797 1.298 msaitoh 1798 1.222 thorpej IF_STAT_PUTREF(ifp); 1799 1.99 msaitoh } /* ixgbe_update_stats_counters */ 1800 1.1 dyoung 1801 1.99 msaitoh /************************************************************************ 1802 1.99 msaitoh * ixgbe_add_hw_stats 1803 1.99 msaitoh * 1804 1.99 msaitoh * Add sysctl variables, one per statistic, to the system. 1805 1.99 msaitoh ************************************************************************/ 1806 1.98 msaitoh static void 1807 1.333 msaitoh ixgbe_add_hw_stats(struct ixgbe_softc *sc) 1808 1.1 dyoung { 1809 1.333 msaitoh device_t dev = sc->dev; 1810 1.98 msaitoh const struct sysctlnode *rnode, *cnode; 1811 1.333 msaitoh struct sysctllog **log = &sc->sysctllog; 1812 1.333 msaitoh struct tx_ring *txr = sc->tx_rings; 1813 1.333 msaitoh struct rx_ring *rxr = sc->rx_rings; 1814 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 1815 1.333 msaitoh struct ixgbe_hw_stats *stats = &sc->stats.pf; 1816 1.98 msaitoh const char *xname = device_xname(dev); 1817 1.144 msaitoh int i; 1818 1.1 dyoung 1819 1.98 msaitoh /* Driver Statistics */ 1820 1.333 msaitoh evcnt_attach_dynamic(&sc->efbig_tx_dma_setup, EVCNT_TYPE_MISC, 1821 1.98 msaitoh NULL, xname, "Driver tx dma soft fail EFBIG"); 1822 1.333 msaitoh evcnt_attach_dynamic(&sc->mbuf_defrag_failed, EVCNT_TYPE_MISC, 1823 1.98 msaitoh NULL, xname, "m_defrag() failed"); 1824 1.333 msaitoh evcnt_attach_dynamic(&sc->efbig2_tx_dma_setup, EVCNT_TYPE_MISC, 1825 1.98 msaitoh NULL, xname, "Driver tx dma hard fail EFBIG"); 1826 1.333 msaitoh evcnt_attach_dynamic(&sc->einval_tx_dma_setup, EVCNT_TYPE_MISC, 1827 1.98 msaitoh NULL, xname, "Driver tx dma hard fail EINVAL"); 1828 1.333 msaitoh evcnt_attach_dynamic(&sc->other_tx_dma_setup, EVCNT_TYPE_MISC, 1829 1.98 msaitoh NULL, xname, "Driver tx dma hard fail other"); 1830 1.333 msaitoh evcnt_attach_dynamic(&sc->eagain_tx_dma_setup, EVCNT_TYPE_MISC, 1831 1.98 msaitoh NULL, xname, "Driver tx dma soft fail EAGAIN"); 1832 1.333 msaitoh evcnt_attach_dynamic(&sc->enomem_tx_dma_setup, EVCNT_TYPE_MISC, 1833 1.98 msaitoh NULL, xname, "Driver tx dma soft fail ENOMEM"); 1834 1.333 msaitoh evcnt_attach_dynamic(&sc->watchdog_events, EVCNT_TYPE_MISC, 1835 1.98 msaitoh NULL, xname, "Watchdog timeouts"); 1836 1.333 msaitoh evcnt_attach_dynamic(&sc->tso_err, EVCNT_TYPE_MISC, 1837 1.98 msaitoh NULL, xname, "TSO errors"); 1838 1.333 msaitoh evcnt_attach_dynamic(&sc->admin_irqev, EVCNT_TYPE_INTR, 1839 1.233 msaitoh NULL, xname, "Admin MSI-X IRQ Handled"); 1840 1.333 msaitoh evcnt_attach_dynamic(&sc->link_workev, EVCNT_TYPE_INTR, 1841 1.233 msaitoh NULL, xname, "Link event"); 1842 1.333 msaitoh evcnt_attach_dynamic(&sc->mod_workev, EVCNT_TYPE_INTR, 1843 1.233 msaitoh NULL, xname, "SFP+ module event"); 1844 1.333 msaitoh evcnt_attach_dynamic(&sc->msf_workev, EVCNT_TYPE_INTR, 1845 1.233 msaitoh NULL, xname, "Multispeed event"); 1846 1.333 msaitoh evcnt_attach_dynamic(&sc->phy_workev, EVCNT_TYPE_INTR, 1847 1.233 msaitoh NULL, xname, "External PHY event"); 1848 1.1 dyoung 1849 1.168 msaitoh /* Max number of traffic class is 8 */ 1850 1.168 msaitoh KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8); 1851 1.175 msaitoh for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { 1852 1.333 msaitoh snprintf(sc->tcs[i].evnamebuf, 1853 1.333 msaitoh sizeof(sc->tcs[i].evnamebuf), "%s tc%d", xname, i); 1854 1.168 msaitoh if (i < __arraycount(stats->mpc)) { 1855 1.168 msaitoh evcnt_attach_dynamic(&stats->mpc[i], 1856 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf, 1857 1.168 msaitoh "RX Missed Packet Count"); 1858 1.168 msaitoh if (hw->mac.type == ixgbe_mac_82598EB) 1859 1.168 msaitoh evcnt_attach_dynamic(&stats->rnbc[i], 1860 1.168 msaitoh EVCNT_TYPE_MISC, NULL, 1861 1.333 msaitoh sc->tcs[i].evnamebuf, 1862 1.168 msaitoh "Receive No Buffers"); 1863 1.168 msaitoh } 1864 1.168 msaitoh if (i < __arraycount(stats->pxontxc)) { 1865 1.168 msaitoh evcnt_attach_dynamic(&stats->pxontxc[i], 1866 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf, 1867 1.331 msaitoh "Priority XON Transmitted"); 1868 1.168 msaitoh evcnt_attach_dynamic(&stats->pxofftxc[i], 1869 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf, 1870 1.331 msaitoh "Priority XOFF Transmitted"); 1871 1.168 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB) 1872 1.168 msaitoh evcnt_attach_dynamic(&stats->pxon2offc[i], 1873 1.168 msaitoh EVCNT_TYPE_MISC, NULL, 1874 1.333 msaitoh sc->tcs[i].evnamebuf, 1875 1.331 msaitoh "Priority XON to XOFF"); 1876 1.330 msaitoh evcnt_attach_dynamic(&stats->pxonrxc[i], 1877 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf, 1878 1.331 msaitoh "Priority XON Received"); 1879 1.330 msaitoh evcnt_attach_dynamic(&stats->pxoffrxc[i], 1880 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->tcs[i].evnamebuf, 1881 1.331 msaitoh "Priority XOFF Received"); 1882 1.168 msaitoh } 1883 1.168 msaitoh } 1884 1.168 msaitoh 1885 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, rxr++, txr++) { 1886 1.135 msaitoh #ifdef LRO 1887 1.135 msaitoh struct lro_ctrl *lro = &rxr->lro; 1888 1.327 msaitoh #endif 1889 1.135 msaitoh 1890 1.333 msaitoh snprintf(sc->queues[i].evnamebuf, 1891 1.333 msaitoh sizeof(sc->queues[i].evnamebuf), "%s q%d", xname, i); 1892 1.333 msaitoh snprintf(sc->queues[i].namebuf, 1893 1.333 msaitoh sizeof(sc->queues[i].namebuf), "q%d", i); 1894 1.1 dyoung 1895 1.333 msaitoh if ((rnode = ixgbe_sysctl_instance(sc)) == NULL) { 1896 1.319 msaitoh aprint_error_dev(dev, 1897 1.319 msaitoh "could not create sysctl root\n"); 1898 1.98 msaitoh break; 1899 1.98 msaitoh } 1900 1.1 dyoung 1901 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &rnode, 1902 1.98 msaitoh 0, CTLTYPE_NODE, 1903 1.333 msaitoh sc->queues[i].namebuf, SYSCTL_DESCR("Queue Name"), 1904 1.98 msaitoh NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) 1905 1.98 msaitoh break; 1906 1.23 msaitoh 1907 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, 1908 1.98 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT, 1909 1.98 msaitoh "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"), 1910 1.98 msaitoh ixgbe_sysctl_interrupt_rate_handler, 0, 1911 1.333 msaitoh (void *)&sc->queues[i], 0, CTL_CREATE, CTL_EOL) != 0) 1912 1.98 msaitoh break; 1913 1.1 dyoung 1914 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, 1915 1.98 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, 1916 1.98 msaitoh "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"), 1917 1.98 msaitoh ixgbe_sysctl_tdh_handler, 0, (void *)txr, 1918 1.98 msaitoh 0, CTL_CREATE, CTL_EOL) != 0) 1919 1.98 msaitoh break; 1920 1.1 dyoung 1921 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, 1922 1.98 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, 1923 1.98 msaitoh "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"), 1924 1.98 msaitoh ixgbe_sysctl_tdt_handler, 0, (void *)txr, 1925 1.98 msaitoh 0, CTL_CREATE, CTL_EOL) != 0) 1926 1.98 msaitoh break; 1927 1.1 dyoung 1928 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, 1929 1.280 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxck", 1930 1.280 msaitoh SYSCTL_DESCR("Receive Descriptor next to check"), 1931 1.280 msaitoh ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0, 1932 1.154 msaitoh CTL_CREATE, CTL_EOL) != 0) 1933 1.154 msaitoh break; 1934 1.154 msaitoh 1935 1.154 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, 1936 1.287 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxrf", 1937 1.287 msaitoh SYSCTL_DESCR("Receive Descriptor next to refresh"), 1938 1.287 msaitoh ixgbe_sysctl_next_to_refresh_handler, 0, (void *)rxr, 0, 1939 1.287 msaitoh CTL_CREATE, CTL_EOL) != 0) 1940 1.287 msaitoh break; 1941 1.287 msaitoh 1942 1.287 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, 1943 1.280 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_head", 1944 1.280 msaitoh SYSCTL_DESCR("Receive Descriptor Head"), 1945 1.98 msaitoh ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0, 1946 1.98 msaitoh CTL_CREATE, CTL_EOL) != 0) 1947 1.33 msaitoh break; 1948 1.98 msaitoh 1949 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, 1950 1.280 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, "rxd_tail", 1951 1.280 msaitoh SYSCTL_DESCR("Receive Descriptor Tail"), 1952 1.98 msaitoh ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0, 1953 1.98 msaitoh CTL_CREATE, CTL_EOL) != 0) 1954 1.28 msaitoh break; 1955 1.98 msaitoh 1956 1.333 msaitoh evcnt_attach_dynamic(&sc->queues[i].irqs, EVCNT_TYPE_INTR, 1957 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "IRQs on queue"); 1958 1.333 msaitoh evcnt_attach_dynamic(&sc->queues[i].handleq, 1959 1.333 msaitoh EVCNT_TYPE_MISC, NULL, sc->queues[i].evnamebuf, 1960 1.327 msaitoh "Handled queue in softint"); 1961 1.333 msaitoh evcnt_attach_dynamic(&sc->queues[i].req, EVCNT_TYPE_MISC, 1962 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "Requeued in softint"); 1963 1.327 msaitoh if (i < __arraycount(stats->qbtc)) 1964 1.327 msaitoh evcnt_attach_dynamic(&stats->qbtc[i], EVCNT_TYPE_MISC, 1965 1.333 msaitoh NULL, sc->queues[i].evnamebuf, 1966 1.328 msaitoh "Queue Bytes Transmitted (reg)"); 1967 1.327 msaitoh evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, 1968 1.333 msaitoh NULL, sc->queues[i].evnamebuf, 1969 1.328 msaitoh "Queue Packets Transmitted (soft)"); 1970 1.327 msaitoh if (i < __arraycount(stats->qptc)) 1971 1.280 msaitoh evcnt_attach_dynamic(&stats->qptc[i], EVCNT_TYPE_MISC, 1972 1.333 msaitoh NULL, sc->queues[i].evnamebuf, 1973 1.328 msaitoh "Queue Packets Transmitted (reg)"); 1974 1.327 msaitoh #ifndef IXGBE_LEGACY_TX 1975 1.327 msaitoh evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC, 1976 1.333 msaitoh NULL, sc->queues[i].evnamebuf, 1977 1.327 msaitoh "Packets dropped in pcq"); 1978 1.327 msaitoh #endif 1979 1.327 msaitoh evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, 1980 1.333 msaitoh NULL, sc->queues[i].evnamebuf, 1981 1.327 msaitoh "TX Queue No Descriptor Available"); 1982 1.327 msaitoh evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, 1983 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "TSO"); 1984 1.327 msaitoh 1985 1.327 msaitoh evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, 1986 1.333 msaitoh NULL, sc->queues[i].evnamebuf, 1987 1.328 msaitoh "Queue Bytes Received (soft)"); 1988 1.327 msaitoh if (i < __arraycount(stats->qbrc)) 1989 1.280 msaitoh evcnt_attach_dynamic(&stats->qbrc[i], EVCNT_TYPE_MISC, 1990 1.333 msaitoh NULL, sc->queues[i].evnamebuf, 1991 1.328 msaitoh "Queue Bytes Received (reg)"); 1992 1.327 msaitoh evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, 1993 1.333 msaitoh NULL, sc->queues[i].evnamebuf, 1994 1.328 msaitoh "Queue Packets Received (soft)"); 1995 1.327 msaitoh if (i < __arraycount(stats->qprc)) 1996 1.327 msaitoh evcnt_attach_dynamic(&stats->qprc[i], EVCNT_TYPE_MISC, 1997 1.333 msaitoh NULL, sc->queues[i].evnamebuf, 1998 1.328 msaitoh "Queue Packets Received (reg)"); 1999 1.327 msaitoh if ((i < __arraycount(stats->qprdc)) && 2000 1.327 msaitoh (hw->mac.type >= ixgbe_mac_82599EB)) 2001 1.151 msaitoh evcnt_attach_dynamic(&stats->qprdc[i], 2002 1.151 msaitoh EVCNT_TYPE_MISC, NULL, 2003 1.333 msaitoh sc->queues[i].evnamebuf, 2004 1.328 msaitoh "Queue Packets Received Drop"); 2005 1.33 msaitoh 2006 1.290 msaitoh evcnt_attach_dynamic(&rxr->no_mbuf, EVCNT_TYPE_MISC, 2007 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "Rx no mbuf"); 2008 1.98 msaitoh evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, 2009 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "Rx discarded"); 2010 1.327 msaitoh evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC, 2011 1.333 msaitoh NULL, sc->queues[i].evnamebuf, "Copied RX Frames"); 2012 1.98 msaitoh #ifdef LRO 2013 1.98 msaitoh SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued", 2014 1.98 msaitoh CTLFLAG_RD, &lro->lro_queued, 0, 2015 1.98 msaitoh "LRO Queued"); 2016 1.98 msaitoh SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed", 2017 1.98 msaitoh CTLFLAG_RD, &lro->lro_flushed, 0, 2018 1.98 msaitoh "LRO Flushed"); 2019 1.98 msaitoh #endif /* LRO */ 2020 1.1 dyoung } 2021 1.28 msaitoh 2022 1.99 msaitoh /* MAC stats get their own sub node */ 2023 1.98 msaitoh 2024 1.98 msaitoh snprintf(stats->namebuf, 2025 1.98 msaitoh sizeof(stats->namebuf), "%s MAC Statistics", xname); 2026 1.98 msaitoh 2027 1.98 msaitoh evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL, 2028 1.98 msaitoh stats->namebuf, "rx csum offload - IP"); 2029 1.98 msaitoh evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL, 2030 1.98 msaitoh stats->namebuf, "rx csum offload - L4"); 2031 1.98 msaitoh evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL, 2032 1.98 msaitoh stats->namebuf, "rx csum offload - IP bad"); 2033 1.98 msaitoh evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL, 2034 1.98 msaitoh stats->namebuf, "rx csum offload - L4 bad"); 2035 1.98 msaitoh evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL, 2036 1.98 msaitoh stats->namebuf, "Interrupt conditions zero"); 2037 1.98 msaitoh evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL, 2038 1.98 msaitoh stats->namebuf, "Legacy interrupts"); 2039 1.99 msaitoh 2040 1.98 msaitoh evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL, 2041 1.98 msaitoh stats->namebuf, "CRC Errors"); 2042 1.98 msaitoh evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL, 2043 1.98 msaitoh stats->namebuf, "Illegal Byte Errors"); 2044 1.98 msaitoh evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL, 2045 1.98 msaitoh stats->namebuf, "Byte Errors"); 2046 1.98 msaitoh evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL, 2047 1.98 msaitoh stats->namebuf, "MAC Short Packets Discarded"); 2048 1.98 msaitoh if (hw->mac.type >= ixgbe_mac_X550) 2049 1.98 msaitoh evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL, 2050 1.98 msaitoh stats->namebuf, "Bad SFD"); 2051 1.98 msaitoh evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL, 2052 1.98 msaitoh stats->namebuf, "Total Packets Missed"); 2053 1.98 msaitoh evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL, 2054 1.98 msaitoh stats->namebuf, "MAC Local Faults"); 2055 1.98 msaitoh evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL, 2056 1.98 msaitoh stats->namebuf, "MAC Remote Faults"); 2057 1.326 msaitoh if (hw->mac.type == ixgbe_mac_X550EM_a) 2058 1.326 msaitoh evcnt_attach_dynamic(&stats->link_dn_cnt, EVCNT_TYPE_MISC, 2059 1.326 msaitoh NULL, stats->namebuf, "Link down event in the MAC"); 2060 1.98 msaitoh evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL, 2061 1.98 msaitoh stats->namebuf, "Receive Length Errors"); 2062 1.98 msaitoh evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL, 2063 1.98 msaitoh stats->namebuf, "Link XON Transmitted"); 2064 1.330 msaitoh evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL, 2065 1.330 msaitoh stats->namebuf, "Link XOFF Transmitted"); 2066 1.98 msaitoh evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL, 2067 1.98 msaitoh stats->namebuf, "Link XON Received"); 2068 1.98 msaitoh evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL, 2069 1.98 msaitoh stats->namebuf, "Link XOFF Received"); 2070 1.98 msaitoh 2071 1.98 msaitoh /* Packet Reception Stats */ 2072 1.98 msaitoh evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL, 2073 1.98 msaitoh stats->namebuf, "Total Octets Received"); 2074 1.98 msaitoh evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL, 2075 1.98 msaitoh stats->namebuf, "Good Octets Received"); 2076 1.98 msaitoh evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL, 2077 1.98 msaitoh stats->namebuf, "Total Packets Received"); 2078 1.98 msaitoh evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL, 2079 1.98 msaitoh stats->namebuf, "Good Packets Received"); 2080 1.98 msaitoh evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL, 2081 1.98 msaitoh stats->namebuf, "Multicast Packets Received"); 2082 1.98 msaitoh evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL, 2083 1.98 msaitoh stats->namebuf, "Broadcast Packets Received"); 2084 1.98 msaitoh evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL, 2085 1.98 msaitoh stats->namebuf, "64 byte frames received "); 2086 1.98 msaitoh evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL, 2087 1.98 msaitoh stats->namebuf, "65-127 byte frames received"); 2088 1.98 msaitoh evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL, 2089 1.98 msaitoh stats->namebuf, "128-255 byte frames received"); 2090 1.98 msaitoh evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL, 2091 1.98 msaitoh stats->namebuf, "256-511 byte frames received"); 2092 1.98 msaitoh evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL, 2093 1.98 msaitoh stats->namebuf, "512-1023 byte frames received"); 2094 1.98 msaitoh evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL, 2095 1.98 msaitoh stats->namebuf, "1023-1522 byte frames received"); 2096 1.98 msaitoh evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL, 2097 1.98 msaitoh stats->namebuf, "Receive Undersized"); 2098 1.98 msaitoh evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL, 2099 1.98 msaitoh stats->namebuf, "Fragmented Packets Received "); 2100 1.98 msaitoh evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL, 2101 1.98 msaitoh stats->namebuf, "Oversized Packets Received"); 2102 1.98 msaitoh evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL, 2103 1.98 msaitoh stats->namebuf, "Received Jabber"); 2104 1.98 msaitoh evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL, 2105 1.98 msaitoh stats->namebuf, "Management Packets Received"); 2106 1.98 msaitoh evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL, 2107 1.98 msaitoh stats->namebuf, "Management Packets Dropped"); 2108 1.98 msaitoh evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL, 2109 1.98 msaitoh stats->namebuf, "Checksum Errors"); 2110 1.1 dyoung 2111 1.98 msaitoh /* Packet Transmission Stats */ 2112 1.98 msaitoh evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL, 2113 1.98 msaitoh stats->namebuf, "Good Octets Transmitted"); 2114 1.98 msaitoh evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL, 2115 1.98 msaitoh stats->namebuf, "Total Packets Transmitted"); 2116 1.98 msaitoh evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL, 2117 1.98 msaitoh stats->namebuf, "Good Packets Transmitted"); 2118 1.98 msaitoh evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL, 2119 1.98 msaitoh stats->namebuf, "Broadcast Packets Transmitted"); 2120 1.98 msaitoh evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL, 2121 1.98 msaitoh stats->namebuf, "Multicast Packets Transmitted"); 2122 1.98 msaitoh evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL, 2123 1.98 msaitoh stats->namebuf, "Management Packets Transmitted"); 2124 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL, 2125 1.98 msaitoh stats->namebuf, "64 byte frames transmitted "); 2126 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL, 2127 1.98 msaitoh stats->namebuf, "65-127 byte frames transmitted"); 2128 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL, 2129 1.98 msaitoh stats->namebuf, "128-255 byte frames transmitted"); 2130 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL, 2131 1.98 msaitoh stats->namebuf, "256-511 byte frames transmitted"); 2132 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL, 2133 1.98 msaitoh stats->namebuf, "512-1023 byte frames transmitted"); 2134 1.98 msaitoh evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL, 2135 1.98 msaitoh stats->namebuf, "1024-1522 byte frames transmitted"); 2136 1.99 msaitoh } /* ixgbe_add_hw_stats */ 2137 1.48 msaitoh 2138 1.1 dyoung static void 2139 1.333 msaitoh ixgbe_clear_evcnt(struct ixgbe_softc *sc) 2140 1.1 dyoung { 2141 1.333 msaitoh struct tx_ring *txr = sc->tx_rings; 2142 1.333 msaitoh struct rx_ring *rxr = sc->rx_rings; 2143 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 2144 1.333 msaitoh struct ixgbe_hw_stats *stats = &sc->stats.pf; 2145 1.168 msaitoh int i; 2146 1.98 msaitoh 2147 1.333 msaitoh IXGBE_EVC_STORE(&sc->efbig_tx_dma_setup, 0); 2148 1.333 msaitoh IXGBE_EVC_STORE(&sc->mbuf_defrag_failed, 0); 2149 1.333 msaitoh IXGBE_EVC_STORE(&sc->efbig2_tx_dma_setup, 0); 2150 1.333 msaitoh IXGBE_EVC_STORE(&sc->einval_tx_dma_setup, 0); 2151 1.333 msaitoh IXGBE_EVC_STORE(&sc->other_tx_dma_setup, 0); 2152 1.333 msaitoh IXGBE_EVC_STORE(&sc->eagain_tx_dma_setup, 0); 2153 1.333 msaitoh IXGBE_EVC_STORE(&sc->enomem_tx_dma_setup, 0); 2154 1.333 msaitoh IXGBE_EVC_STORE(&sc->tso_err, 0); 2155 1.333 msaitoh IXGBE_EVC_STORE(&sc->watchdog_events, 0); 2156 1.333 msaitoh IXGBE_EVC_STORE(&sc->admin_irqev, 0); 2157 1.333 msaitoh IXGBE_EVC_STORE(&sc->link_workev, 0); 2158 1.333 msaitoh IXGBE_EVC_STORE(&sc->mod_workev, 0); 2159 1.333 msaitoh IXGBE_EVC_STORE(&sc->msf_workev, 0); 2160 1.333 msaitoh IXGBE_EVC_STORE(&sc->phy_workev, 0); 2161 1.98 msaitoh 2162 1.175 msaitoh for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { 2163 1.168 msaitoh if (i < __arraycount(stats->mpc)) { 2164 1.305 msaitoh IXGBE_EVC_STORE(&stats->mpc[i], 0); 2165 1.168 msaitoh if (hw->mac.type == ixgbe_mac_82598EB) 2166 1.305 msaitoh IXGBE_EVC_STORE(&stats->rnbc[i], 0); 2167 1.168 msaitoh } 2168 1.168 msaitoh if (i < __arraycount(stats->pxontxc)) { 2169 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxontxc[i], 0); 2170 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxonrxc[i], 0); 2171 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxofftxc[i], 0); 2172 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxoffrxc[i], 0); 2173 1.168 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB) 2174 1.305 msaitoh IXGBE_EVC_STORE(&stats->pxon2offc[i], 0); 2175 1.168 msaitoh } 2176 1.168 msaitoh } 2177 1.168 msaitoh 2178 1.333 msaitoh txr = sc->tx_rings; 2179 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, rxr++, txr++) { 2180 1.333 msaitoh IXGBE_EVC_STORE(&sc->queues[i].irqs, 0); 2181 1.333 msaitoh IXGBE_EVC_STORE(&sc->queues[i].handleq, 0); 2182 1.333 msaitoh IXGBE_EVC_STORE(&sc->queues[i].req, 0); 2183 1.305 msaitoh IXGBE_EVC_STORE(&txr->total_packets, 0); 2184 1.98 msaitoh #ifndef IXGBE_LEGACY_TX 2185 1.305 msaitoh IXGBE_EVC_STORE(&txr->pcq_drops, 0); 2186 1.45 msaitoh #endif 2187 1.327 msaitoh IXGBE_EVC_STORE(&txr->no_desc_avail, 0); 2188 1.327 msaitoh IXGBE_EVC_STORE(&txr->tso_tx, 0); 2189 1.134 msaitoh txr->q_efbig_tx_dma_setup = 0; 2190 1.134 msaitoh txr->q_mbuf_defrag_failed = 0; 2191 1.134 msaitoh txr->q_efbig2_tx_dma_setup = 0; 2192 1.134 msaitoh txr->q_einval_tx_dma_setup = 0; 2193 1.134 msaitoh txr->q_other_tx_dma_setup = 0; 2194 1.134 msaitoh txr->q_eagain_tx_dma_setup = 0; 2195 1.134 msaitoh txr->q_enomem_tx_dma_setup = 0; 2196 1.134 msaitoh txr->q_tso_err = 0; 2197 1.1 dyoung 2198 1.98 msaitoh if (i < __arraycount(stats->qprc)) { 2199 1.305 msaitoh IXGBE_EVC_STORE(&stats->qprc[i], 0); 2200 1.305 msaitoh IXGBE_EVC_STORE(&stats->qptc[i], 0); 2201 1.305 msaitoh IXGBE_EVC_STORE(&stats->qbrc[i], 0); 2202 1.305 msaitoh IXGBE_EVC_STORE(&stats->qbtc[i], 0); 2203 1.151 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB) 2204 1.305 msaitoh IXGBE_EVC_STORE(&stats->qprdc[i], 0); 2205 1.98 msaitoh } 2206 1.98 msaitoh 2207 1.305 msaitoh IXGBE_EVC_STORE(&rxr->rx_packets, 0); 2208 1.305 msaitoh IXGBE_EVC_STORE(&rxr->rx_bytes, 0); 2209 1.305 msaitoh IXGBE_EVC_STORE(&rxr->rx_copies, 0); 2210 1.305 msaitoh IXGBE_EVC_STORE(&rxr->no_mbuf, 0); 2211 1.305 msaitoh IXGBE_EVC_STORE(&rxr->rx_discarded, 0); 2212 1.305 msaitoh } 2213 1.305 msaitoh IXGBE_EVC_STORE(&stats->ipcs, 0); 2214 1.305 msaitoh IXGBE_EVC_STORE(&stats->l4cs, 0); 2215 1.305 msaitoh IXGBE_EVC_STORE(&stats->ipcs_bad, 0); 2216 1.305 msaitoh IXGBE_EVC_STORE(&stats->l4cs_bad, 0); 2217 1.305 msaitoh IXGBE_EVC_STORE(&stats->intzero, 0); 2218 1.305 msaitoh IXGBE_EVC_STORE(&stats->legint, 0); 2219 1.305 msaitoh IXGBE_EVC_STORE(&stats->crcerrs, 0); 2220 1.305 msaitoh IXGBE_EVC_STORE(&stats->illerrc, 0); 2221 1.305 msaitoh IXGBE_EVC_STORE(&stats->errbc, 0); 2222 1.305 msaitoh IXGBE_EVC_STORE(&stats->mspdc, 0); 2223 1.209 msaitoh if (hw->mac.type >= ixgbe_mac_X550) 2224 1.305 msaitoh IXGBE_EVC_STORE(&stats->mbsdc, 0); 2225 1.305 msaitoh IXGBE_EVC_STORE(&stats->mpctotal, 0); 2226 1.305 msaitoh IXGBE_EVC_STORE(&stats->mlfc, 0); 2227 1.305 msaitoh IXGBE_EVC_STORE(&stats->mrfc, 0); 2228 1.326 msaitoh if (hw->mac.type == ixgbe_mac_X550EM_a) 2229 1.326 msaitoh IXGBE_EVC_STORE(&stats->link_dn_cnt, 0); 2230 1.305 msaitoh IXGBE_EVC_STORE(&stats->rlec, 0); 2231 1.305 msaitoh IXGBE_EVC_STORE(&stats->lxontxc, 0); 2232 1.305 msaitoh IXGBE_EVC_STORE(&stats->lxonrxc, 0); 2233 1.305 msaitoh IXGBE_EVC_STORE(&stats->lxofftxc, 0); 2234 1.305 msaitoh IXGBE_EVC_STORE(&stats->lxoffrxc, 0); 2235 1.98 msaitoh 2236 1.98 msaitoh /* Packet Reception Stats */ 2237 1.305 msaitoh IXGBE_EVC_STORE(&stats->tor, 0); 2238 1.305 msaitoh IXGBE_EVC_STORE(&stats->gorc, 0); 2239 1.305 msaitoh IXGBE_EVC_STORE(&stats->tpr, 0); 2240 1.305 msaitoh IXGBE_EVC_STORE(&stats->gprc, 0); 2241 1.305 msaitoh IXGBE_EVC_STORE(&stats->mprc, 0); 2242 1.305 msaitoh IXGBE_EVC_STORE(&stats->bprc, 0); 2243 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc64, 0); 2244 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc127, 0); 2245 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc255, 0); 2246 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc511, 0); 2247 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc1023, 0); 2248 1.305 msaitoh IXGBE_EVC_STORE(&stats->prc1522, 0); 2249 1.305 msaitoh IXGBE_EVC_STORE(&stats->ruc, 0); 2250 1.305 msaitoh IXGBE_EVC_STORE(&stats->rfc, 0); 2251 1.305 msaitoh IXGBE_EVC_STORE(&stats->roc, 0); 2252 1.305 msaitoh IXGBE_EVC_STORE(&stats->rjc, 0); 2253 1.305 msaitoh IXGBE_EVC_STORE(&stats->mngprc, 0); 2254 1.305 msaitoh IXGBE_EVC_STORE(&stats->mngpdc, 0); 2255 1.305 msaitoh IXGBE_EVC_STORE(&stats->xec, 0); 2256 1.98 msaitoh 2257 1.98 msaitoh /* Packet Transmission Stats */ 2258 1.305 msaitoh IXGBE_EVC_STORE(&stats->gotc, 0); 2259 1.305 msaitoh IXGBE_EVC_STORE(&stats->tpt, 0); 2260 1.305 msaitoh IXGBE_EVC_STORE(&stats->gptc, 0); 2261 1.305 msaitoh IXGBE_EVC_STORE(&stats->bptc, 0); 2262 1.305 msaitoh IXGBE_EVC_STORE(&stats->mptc, 0); 2263 1.305 msaitoh IXGBE_EVC_STORE(&stats->mngptc, 0); 2264 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc64, 0); 2265 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc127, 0); 2266 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc255, 0); 2267 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc511, 0); 2268 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc1023, 0); 2269 1.305 msaitoh IXGBE_EVC_STORE(&stats->ptc1522, 0); 2270 1.98 msaitoh } 2271 1.98 msaitoh 2272 1.99 msaitoh /************************************************************************ 2273 1.99 msaitoh * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function 2274 1.99 msaitoh * 2275 1.99 msaitoh * Retrieves the TDH value from the hardware 2276 1.99 msaitoh ************************************************************************/ 2277 1.185 msaitoh static int 2278 1.98 msaitoh ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS) 2279 1.98 msaitoh { 2280 1.98 msaitoh struct sysctlnode node = *rnode; 2281 1.99 msaitoh struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; 2282 1.333 msaitoh struct ixgbe_softc *sc; 2283 1.98 msaitoh uint32_t val; 2284 1.98 msaitoh 2285 1.99 msaitoh if (!txr) 2286 1.99 msaitoh return (0); 2287 1.99 msaitoh 2288 1.333 msaitoh sc = txr->sc; 2289 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc)) 2290 1.169 msaitoh return (EPERM); 2291 1.169 msaitoh 2292 1.333 msaitoh val = IXGBE_READ_REG(&sc->hw, IXGBE_TDH(txr->me)); 2293 1.98 msaitoh node.sysctl_data = &val; 2294 1.98 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node)); 2295 1.99 msaitoh } /* ixgbe_sysctl_tdh_handler */ 2296 1.98 msaitoh 2297 1.99 msaitoh /************************************************************************ 2298 1.99 msaitoh * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function 2299 1.99 msaitoh * 2300 1.99 msaitoh * Retrieves the TDT value from the hardware 2301 1.99 msaitoh ************************************************************************/ 2302 1.185 msaitoh static int 2303 1.98 msaitoh ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS) 2304 1.98 msaitoh { 2305 1.98 msaitoh struct sysctlnode node = *rnode; 2306 1.99 msaitoh struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; 2307 1.333 msaitoh struct ixgbe_softc *sc; 2308 1.98 msaitoh uint32_t val; 2309 1.1 dyoung 2310 1.99 msaitoh if (!txr) 2311 1.99 msaitoh return (0); 2312 1.99 msaitoh 2313 1.333 msaitoh sc = txr->sc; 2314 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc)) 2315 1.169 msaitoh return (EPERM); 2316 1.169 msaitoh 2317 1.333 msaitoh val = IXGBE_READ_REG(&sc->hw, IXGBE_TDT(txr->me)); 2318 1.98 msaitoh node.sysctl_data = &val; 2319 1.98 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node)); 2320 1.99 msaitoh } /* ixgbe_sysctl_tdt_handler */ 2321 1.45 msaitoh 2322 1.99 msaitoh /************************************************************************ 2323 1.154 msaitoh * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check 2324 1.154 msaitoh * handler function 2325 1.154 msaitoh * 2326 1.154 msaitoh * Retrieves the next_to_check value 2327 1.154 msaitoh ************************************************************************/ 2328 1.185 msaitoh static int 2329 1.154 msaitoh ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS) 2330 1.154 msaitoh { 2331 1.154 msaitoh struct sysctlnode node = *rnode; 2332 1.154 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 2333 1.333 msaitoh struct ixgbe_softc *sc; 2334 1.154 msaitoh uint32_t val; 2335 1.154 msaitoh 2336 1.154 msaitoh if (!rxr) 2337 1.154 msaitoh return (0); 2338 1.154 msaitoh 2339 1.333 msaitoh sc = rxr->sc; 2340 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc)) 2341 1.169 msaitoh return (EPERM); 2342 1.169 msaitoh 2343 1.154 msaitoh val = rxr->next_to_check; 2344 1.154 msaitoh node.sysctl_data = &val; 2345 1.154 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node)); 2346 1.154 msaitoh } /* ixgbe_sysctl_next_to_check_handler */ 2347 1.154 msaitoh 2348 1.154 msaitoh /************************************************************************ 2349 1.287 msaitoh * ixgbe_sysctl_next_to_refresh_handler - Receive Descriptor next to check 2350 1.287 msaitoh * handler function 2351 1.287 msaitoh * 2352 1.287 msaitoh * Retrieves the next_to_refresh value 2353 1.287 msaitoh ************************************************************************/ 2354 1.287 msaitoh static int 2355 1.287 msaitoh ixgbe_sysctl_next_to_refresh_handler(SYSCTLFN_ARGS) 2356 1.287 msaitoh { 2357 1.287 msaitoh struct sysctlnode node = *rnode; 2358 1.287 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 2359 1.333 msaitoh struct ixgbe_softc *sc; 2360 1.287 msaitoh uint32_t val; 2361 1.287 msaitoh 2362 1.287 msaitoh if (!rxr) 2363 1.287 msaitoh return (0); 2364 1.287 msaitoh 2365 1.333 msaitoh sc = rxr->sc; 2366 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc)) 2367 1.287 msaitoh return (EPERM); 2368 1.287 msaitoh 2369 1.287 msaitoh val = rxr->next_to_refresh; 2370 1.287 msaitoh node.sysctl_data = &val; 2371 1.287 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node)); 2372 1.287 msaitoh } /* ixgbe_sysctl_next_to_refresh_handler */ 2373 1.287 msaitoh 2374 1.287 msaitoh /************************************************************************ 2375 1.99 msaitoh * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function 2376 1.99 msaitoh * 2377 1.99 msaitoh * Retrieves the RDH value from the hardware 2378 1.99 msaitoh ************************************************************************/ 2379 1.185 msaitoh static int 2380 1.98 msaitoh ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS) 2381 1.98 msaitoh { 2382 1.98 msaitoh struct sysctlnode node = *rnode; 2383 1.99 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 2384 1.333 msaitoh struct ixgbe_softc *sc; 2385 1.98 msaitoh uint32_t val; 2386 1.1 dyoung 2387 1.99 msaitoh if (!rxr) 2388 1.99 msaitoh return (0); 2389 1.99 msaitoh 2390 1.333 msaitoh sc = rxr->sc; 2391 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc)) 2392 1.169 msaitoh return (EPERM); 2393 1.169 msaitoh 2394 1.333 msaitoh val = IXGBE_READ_REG(&sc->hw, IXGBE_RDH(rxr->me)); 2395 1.98 msaitoh node.sysctl_data = &val; 2396 1.98 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node)); 2397 1.99 msaitoh } /* ixgbe_sysctl_rdh_handler */ 2398 1.1 dyoung 2399 1.99 msaitoh /************************************************************************ 2400 1.99 msaitoh * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function 2401 1.99 msaitoh * 2402 1.99 msaitoh * Retrieves the RDT value from the hardware 2403 1.99 msaitoh ************************************************************************/ 2404 1.185 msaitoh static int 2405 1.98 msaitoh ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS) 2406 1.98 msaitoh { 2407 1.98 msaitoh struct sysctlnode node = *rnode; 2408 1.99 msaitoh struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 2409 1.333 msaitoh struct ixgbe_softc *sc; 2410 1.98 msaitoh uint32_t val; 2411 1.1 dyoung 2412 1.99 msaitoh if (!rxr) 2413 1.99 msaitoh return (0); 2414 1.99 msaitoh 2415 1.333 msaitoh sc = rxr->sc; 2416 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc)) 2417 1.169 msaitoh return (EPERM); 2418 1.169 msaitoh 2419 1.333 msaitoh val = IXGBE_READ_REG(&sc->hw, IXGBE_RDT(rxr->me)); 2420 1.98 msaitoh node.sysctl_data = &val; 2421 1.98 msaitoh return sysctl_lookup(SYSCTLFN_CALL(&node)); 2422 1.99 msaitoh } /* ixgbe_sysctl_rdt_handler */ 2423 1.1 dyoung 2424 1.193 msaitoh static int 2425 1.193 msaitoh ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set) 2426 1.193 msaitoh { 2427 1.193 msaitoh struct ifnet *ifp = &ec->ec_if; 2428 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc; 2429 1.193 msaitoh int rv; 2430 1.193 msaitoh 2431 1.193 msaitoh if (set) 2432 1.333 msaitoh rv = ixgbe_register_vlan(sc, vid); 2433 1.193 msaitoh else 2434 1.333 msaitoh rv = ixgbe_unregister_vlan(sc, vid); 2435 1.193 msaitoh 2436 1.200 msaitoh if (rv != 0) 2437 1.200 msaitoh return rv; 2438 1.200 msaitoh 2439 1.200 msaitoh /* 2440 1.200 msaitoh * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0 2441 1.200 msaitoh * or 0 to 1. 2442 1.200 msaitoh */ 2443 1.200 msaitoh if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0))) 2444 1.333 msaitoh ixgbe_setup_vlan_hw_tagging(sc); 2445 1.200 msaitoh 2446 1.193 msaitoh return rv; 2447 1.193 msaitoh } 2448 1.193 msaitoh 2449 1.99 msaitoh /************************************************************************ 2450 1.99 msaitoh * ixgbe_register_vlan 2451 1.99 msaitoh * 2452 1.99 msaitoh * Run via vlan config EVENT, it enables us to use the 2453 1.99 msaitoh * HW Filter table since we can get the vlan id. This 2454 1.99 msaitoh * just creates the entry in the soft version of the 2455 1.99 msaitoh * VFTA, init will repopulate the real table. 2456 1.99 msaitoh ************************************************************************/ 2457 1.193 msaitoh static int 2458 1.333 msaitoh ixgbe_register_vlan(struct ixgbe_softc *sc, u16 vtag) 2459 1.98 msaitoh { 2460 1.98 msaitoh u16 index, bit; 2461 1.193 msaitoh int error; 2462 1.48 msaitoh 2463 1.98 msaitoh if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 2464 1.193 msaitoh return EINVAL; 2465 1.1 dyoung 2466 1.333 msaitoh IXGBE_CORE_LOCK(sc); 2467 1.98 msaitoh index = (vtag >> 5) & 0x7F; 2468 1.98 msaitoh bit = vtag & 0x1F; 2469 1.333 msaitoh sc->shadow_vfta[index] |= ((u32)1 << bit); 2470 1.333 msaitoh error = sc->hw.mac.ops.set_vfta(&sc->hw, vtag, 0, true, 2471 1.193 msaitoh true); 2472 1.333 msaitoh IXGBE_CORE_UNLOCK(sc); 2473 1.193 msaitoh if (error != 0) 2474 1.193 msaitoh error = EACCES; 2475 1.193 msaitoh 2476 1.193 msaitoh return error; 2477 1.99 msaitoh } /* ixgbe_register_vlan */ 2478 1.1 dyoung 2479 1.99 msaitoh /************************************************************************ 2480 1.99 msaitoh * ixgbe_unregister_vlan 2481 1.99 msaitoh * 2482 1.99 msaitoh * Run via vlan unconfig EVENT, remove our entry in the soft vfta. 2483 1.99 msaitoh ************************************************************************/ 2484 1.193 msaitoh static int 2485 1.333 msaitoh ixgbe_unregister_vlan(struct ixgbe_softc *sc, u16 vtag) 2486 1.98 msaitoh { 2487 1.98 msaitoh u16 index, bit; 2488 1.193 msaitoh int error; 2489 1.1 dyoung 2490 1.98 msaitoh if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 2491 1.193 msaitoh return EINVAL; 2492 1.1 dyoung 2493 1.333 msaitoh IXGBE_CORE_LOCK(sc); 2494 1.98 msaitoh index = (vtag >> 5) & 0x7F; 2495 1.98 msaitoh bit = vtag & 0x1F; 2496 1.333 msaitoh sc->shadow_vfta[index] &= ~((u32)1 << bit); 2497 1.333 msaitoh error = sc->hw.mac.ops.set_vfta(&sc->hw, vtag, 0, false, 2498 1.193 msaitoh true); 2499 1.333 msaitoh IXGBE_CORE_UNLOCK(sc); 2500 1.193 msaitoh if (error != 0) 2501 1.193 msaitoh error = EACCES; 2502 1.193 msaitoh 2503 1.193 msaitoh return error; 2504 1.99 msaitoh } /* ixgbe_unregister_vlan */ 2505 1.98 msaitoh 2506 1.98 msaitoh static void 2507 1.333 msaitoh ixgbe_setup_vlan_hw_tagging(struct ixgbe_softc *sc) 2508 1.98 msaitoh { 2509 1.333 msaitoh struct ethercom *ec = &sc->osdep.ec; 2510 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 2511 1.98 msaitoh struct rx_ring *rxr; 2512 1.200 msaitoh u32 ctrl; 2513 1.186 msaitoh int i; 2514 1.177 msaitoh bool hwtagging; 2515 1.98 msaitoh 2516 1.178 msaitoh /* Enable HW tagging only if any vlan is attached */ 2517 1.177 msaitoh hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) 2518 1.178 msaitoh && VLAN_ATTACHED(ec); 2519 1.1 dyoung 2520 1.98 msaitoh /* Setup the queues for vlans */ 2521 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) { 2522 1.333 msaitoh rxr = &sc->rx_rings[i]; 2523 1.178 msaitoh /* 2524 1.178 msaitoh * On 82599 and later, the VLAN enable is per/queue in RXDCTL. 2525 1.178 msaitoh */ 2526 1.177 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) { 2527 1.177 msaitoh ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 2528 1.177 msaitoh if (hwtagging) 2529 1.115 msaitoh ctrl |= IXGBE_RXDCTL_VME; 2530 1.177 msaitoh else 2531 1.177 msaitoh ctrl &= ~IXGBE_RXDCTL_VME; 2532 1.177 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 2533 1.98 msaitoh } 2534 1.177 msaitoh rxr->vtag_strip = hwtagging ? TRUE : FALSE; 2535 1.1 dyoung } 2536 1.1 dyoung 2537 1.200 msaitoh /* VLAN hw tagging for 82598 */ 2538 1.200 msaitoh if (hw->mac.type == ixgbe_mac_82598EB) { 2539 1.200 msaitoh ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2540 1.200 msaitoh if (hwtagging) 2541 1.200 msaitoh ctrl |= IXGBE_VLNCTRL_VME; 2542 1.200 msaitoh else 2543 1.200 msaitoh ctrl &= ~IXGBE_VLNCTRL_VME; 2544 1.200 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2545 1.200 msaitoh } 2546 1.200 msaitoh } /* ixgbe_setup_vlan_hw_tagging */ 2547 1.200 msaitoh 2548 1.200 msaitoh static void 2549 1.333 msaitoh ixgbe_setup_vlan_hw_support(struct ixgbe_softc *sc) 2550 1.200 msaitoh { 2551 1.333 msaitoh struct ethercom *ec = &sc->osdep.ec; 2552 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 2553 1.200 msaitoh int i; 2554 1.200 msaitoh u32 ctrl; 2555 1.200 msaitoh struct vlanid_list *vlanidp; 2556 1.200 msaitoh 2557 1.200 msaitoh /* 2558 1.294 skrll * This function is called from both if_init and ifflags_cb() 2559 1.200 msaitoh * on NetBSD. 2560 1.200 msaitoh */ 2561 1.200 msaitoh 2562 1.200 msaitoh /* 2563 1.200 msaitoh * Part 1: 2564 1.200 msaitoh * Setup VLAN HW tagging 2565 1.200 msaitoh */ 2566 1.333 msaitoh ixgbe_setup_vlan_hw_tagging(sc); 2567 1.200 msaitoh 2568 1.200 msaitoh /* 2569 1.200 msaitoh * Part 2: 2570 1.200 msaitoh * Setup VLAN HW filter 2571 1.200 msaitoh */ 2572 1.193 msaitoh /* Cleanup shadow_vfta */ 2573 1.193 msaitoh for (i = 0; i < IXGBE_VFTA_SIZE; i++) 2574 1.333 msaitoh sc->shadow_vfta[i] = 0; 2575 1.193 msaitoh /* Generate shadow_vfta from ec_vids */ 2576 1.201 msaitoh ETHER_LOCK(ec); 2577 1.193 msaitoh SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) { 2578 1.193 msaitoh uint32_t idx; 2579 1.193 msaitoh 2580 1.193 msaitoh idx = vlanidp->vid / 32; 2581 1.193 msaitoh KASSERT(idx < IXGBE_VFTA_SIZE); 2582 1.333 msaitoh sc->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32); 2583 1.193 msaitoh } 2584 1.201 msaitoh ETHER_UNLOCK(ec); 2585 1.99 msaitoh for (i = 0; i < IXGBE_VFTA_SIZE; i++) 2586 1.333 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), sc->shadow_vfta[i]); 2587 1.22 msaitoh 2588 1.98 msaitoh ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2589 1.98 msaitoh /* Enable the Filter Table if enabled */ 2590 1.177 msaitoh if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) 2591 1.98 msaitoh ctrl |= IXGBE_VLNCTRL_VFE; 2592 1.177 msaitoh else 2593 1.177 msaitoh ctrl &= ~IXGBE_VLNCTRL_VFE; 2594 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2595 1.99 msaitoh } /* ixgbe_setup_vlan_hw_support */ 2596 1.1 dyoung 2597 1.99 msaitoh /************************************************************************ 2598 1.99 msaitoh * ixgbe_get_slot_info 2599 1.99 msaitoh * 2600 1.99 msaitoh * Get the width and transaction speed of 2601 1.99 msaitoh * the slot this adapter is plugged into. 2602 1.99 msaitoh ************************************************************************/ 2603 1.98 msaitoh static void 2604 1.333 msaitoh ixgbe_get_slot_info(struct ixgbe_softc *sc) 2605 1.98 msaitoh { 2606 1.333 msaitoh device_t dev = sc->dev; 2607 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 2608 1.186 msaitoh u32 offset; 2609 1.98 msaitoh u16 link; 2610 1.186 msaitoh int bus_info_valid = TRUE; 2611 1.99 msaitoh 2612 1.99 msaitoh /* Some devices are behind an internal bridge */ 2613 1.99 msaitoh switch (hw->device_id) { 2614 1.99 msaitoh case IXGBE_DEV_ID_82599_SFP_SF_QP: 2615 1.99 msaitoh case IXGBE_DEV_ID_82599_QSFP_SF_QP: 2616 1.99 msaitoh goto get_parent_info; 2617 1.99 msaitoh default: 2618 1.99 msaitoh break; 2619 1.99 msaitoh } 2620 1.1 dyoung 2621 1.99 msaitoh ixgbe_get_bus_info(hw); 2622 1.99 msaitoh 2623 1.99 msaitoh /* 2624 1.99 msaitoh * Some devices don't use PCI-E, but there is no need 2625 1.99 msaitoh * to display "Unknown" for bus speed and width. 2626 1.99 msaitoh */ 2627 1.99 msaitoh switch (hw->mac.type) { 2628 1.99 msaitoh case ixgbe_mac_X550EM_x: 2629 1.99 msaitoh case ixgbe_mac_X550EM_a: 2630 1.99 msaitoh return; 2631 1.99 msaitoh default: 2632 1.99 msaitoh goto display; 2633 1.1 dyoung } 2634 1.1 dyoung 2635 1.99 msaitoh get_parent_info: 2636 1.98 msaitoh /* 2637 1.99 msaitoh * For the Quad port adapter we need to parse back 2638 1.99 msaitoh * up the PCI tree to find the speed of the expansion 2639 1.99 msaitoh * slot into which this adapter is plugged. A bit more work. 2640 1.99 msaitoh */ 2641 1.98 msaitoh dev = device_parent(device_parent(dev)); 2642 1.99 msaitoh #if 0 2643 1.98 msaitoh #ifdef IXGBE_DEBUG 2644 1.99 msaitoh device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev), 2645 1.99 msaitoh pci_get_slot(dev), pci_get_function(dev)); 2646 1.98 msaitoh #endif 2647 1.98 msaitoh dev = device_parent(device_parent(dev)); 2648 1.98 msaitoh #ifdef IXGBE_DEBUG 2649 1.99 msaitoh device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev), 2650 1.99 msaitoh pci_get_slot(dev), pci_get_function(dev)); 2651 1.99 msaitoh #endif 2652 1.1 dyoung #endif 2653 1.98 msaitoh /* Now get the PCI Express Capabilities offset */ 2654 1.333 msaitoh if (pci_get_capability(sc->osdep.pc, sc->osdep.tag, 2655 1.99 msaitoh PCI_CAP_PCIEXPRESS, &offset, NULL)) { 2656 1.99 msaitoh /* 2657 1.99 msaitoh * Hmm...can't get PCI-Express capabilities. 2658 1.99 msaitoh * Falling back to default method. 2659 1.99 msaitoh */ 2660 1.99 msaitoh bus_info_valid = FALSE; 2661 1.99 msaitoh ixgbe_get_bus_info(hw); 2662 1.99 msaitoh goto display; 2663 1.99 msaitoh } 2664 1.98 msaitoh /* ...and read the Link Status Register */ 2665 1.333 msaitoh link = pci_conf_read(sc->osdep.pc, sc->osdep.tag, 2666 1.120 msaitoh offset + PCIE_LCSR) >> 16; 2667 1.120 msaitoh ixgbe_set_pci_config_data_generic(hw, link); 2668 1.52 msaitoh 2669 1.98 msaitoh display: 2670 1.99 msaitoh device_printf(dev, "PCI Express Bus: Speed %s Width %s\n", 2671 1.186 msaitoh ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" : 2672 1.186 msaitoh (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" : 2673 1.186 msaitoh (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" : 2674 1.99 msaitoh "Unknown"), 2675 1.99 msaitoh ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" : 2676 1.99 msaitoh (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" : 2677 1.99 msaitoh (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" : 2678 1.99 msaitoh "Unknown")); 2679 1.99 msaitoh 2680 1.99 msaitoh if (bus_info_valid) { 2681 1.99 msaitoh if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && 2682 1.99 msaitoh ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && 2683 1.99 msaitoh (hw->bus.speed == ixgbe_bus_speed_2500))) { 2684 1.99 msaitoh device_printf(dev, "PCI-Express bandwidth available" 2685 1.99 msaitoh " for this card\n is not sufficient for" 2686 1.99 msaitoh " optimal performance.\n"); 2687 1.99 msaitoh device_printf(dev, "For optimal performance a x8 " 2688 1.99 msaitoh "PCIE, or x4 PCIE Gen2 slot is required.\n"); 2689 1.99 msaitoh } 2690 1.99 msaitoh if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && 2691 1.99 msaitoh ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && 2692 1.99 msaitoh (hw->bus.speed < ixgbe_bus_speed_8000))) { 2693 1.99 msaitoh device_printf(dev, "PCI-Express bandwidth available" 2694 1.99 msaitoh " for this card\n is not sufficient for" 2695 1.99 msaitoh " optimal performance.\n"); 2696 1.99 msaitoh device_printf(dev, "For optimal performance a x8 " 2697 1.99 msaitoh "PCIE Gen3 slot is required.\n"); 2698 1.99 msaitoh } 2699 1.99 msaitoh } else 2700 1.319 msaitoh device_printf(dev, 2701 1.319 msaitoh "Unable to determine slot speed/width. The speed/width " 2702 1.319 msaitoh "reported are that of the internal switch.\n"); 2703 1.45 msaitoh 2704 1.45 msaitoh return; 2705 1.99 msaitoh } /* ixgbe_get_slot_info */ 2706 1.1 dyoung 2707 1.99 msaitoh /************************************************************************ 2708 1.321 msaitoh * ixgbe_enable_queue - Queue Interrupt Enabler 2709 1.99 msaitoh ************************************************************************/ 2710 1.1 dyoung static inline void 2711 1.333 msaitoh ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector) 2712 1.1 dyoung { 2713 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 2714 1.333 msaitoh struct ix_queue *que = &sc->queues[vector]; 2715 1.197 msaitoh u64 queue = 1ULL << vector; 2716 1.186 msaitoh u32 mask; 2717 1.1 dyoung 2718 1.139 knakahar mutex_enter(&que->dc_mtx); 2719 1.139 knakahar if (que->disabled_count > 0 && --que->disabled_count > 0) 2720 1.127 knakahar goto out; 2721 1.127 knakahar 2722 1.1 dyoung if (hw->mac.type == ixgbe_mac_82598EB) { 2723 1.98 msaitoh mask = (IXGBE_EIMS_RTX_QUEUE & queue); 2724 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 2725 1.1 dyoung } else { 2726 1.98 msaitoh mask = (queue & 0xFFFFFFFF); 2727 1.98 msaitoh if (mask) 2728 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 2729 1.98 msaitoh mask = (queue >> 32); 2730 1.98 msaitoh if (mask) 2731 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 2732 1.1 dyoung } 2733 1.127 knakahar out: 2734 1.139 knakahar mutex_exit(&que->dc_mtx); 2735 1.99 msaitoh } /* ixgbe_enable_queue */ 2736 1.1 dyoung 2737 1.99 msaitoh /************************************************************************ 2738 1.139 knakahar * ixgbe_disable_queue_internal 2739 1.99 msaitoh ************************************************************************/ 2740 1.82 msaitoh static inline void 2741 1.333 msaitoh ixgbe_disable_queue_internal(struct ixgbe_softc *sc, u32 vector, bool nestok) 2742 1.1 dyoung { 2743 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 2744 1.333 msaitoh struct ix_queue *que = &sc->queues[vector]; 2745 1.197 msaitoh u64 queue = 1ULL << vector; 2746 1.186 msaitoh u32 mask; 2747 1.1 dyoung 2748 1.139 knakahar mutex_enter(&que->dc_mtx); 2749 1.139 knakahar 2750 1.139 knakahar if (que->disabled_count > 0) { 2751 1.139 knakahar if (nestok) 2752 1.139 knakahar que->disabled_count++; 2753 1.139 knakahar goto out; 2754 1.139 knakahar } 2755 1.139 knakahar que->disabled_count++; 2756 1.127 knakahar 2757 1.1 dyoung if (hw->mac.type == ixgbe_mac_82598EB) { 2758 1.98 msaitoh mask = (IXGBE_EIMS_RTX_QUEUE & queue); 2759 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 2760 1.1 dyoung } else { 2761 1.98 msaitoh mask = (queue & 0xFFFFFFFF); 2762 1.98 msaitoh if (mask) 2763 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 2764 1.98 msaitoh mask = (queue >> 32); 2765 1.98 msaitoh if (mask) 2766 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 2767 1.1 dyoung } 2768 1.127 knakahar out: 2769 1.139 knakahar mutex_exit(&que->dc_mtx); 2770 1.139 knakahar } /* ixgbe_disable_queue_internal */ 2771 1.139 knakahar 2772 1.139 knakahar /************************************************************************ 2773 1.139 knakahar * ixgbe_disable_queue 2774 1.139 knakahar ************************************************************************/ 2775 1.139 knakahar static inline void 2776 1.333 msaitoh ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector) 2777 1.139 knakahar { 2778 1.139 knakahar 2779 1.333 msaitoh ixgbe_disable_queue_internal(sc, vector, true); 2780 1.99 msaitoh } /* ixgbe_disable_queue */ 2781 1.1 dyoung 2782 1.99 msaitoh /************************************************************************ 2783 1.133 knakahar * ixgbe_sched_handle_que - schedule deferred packet processing 2784 1.133 knakahar ************************************************************************/ 2785 1.133 knakahar static inline void 2786 1.333 msaitoh ixgbe_sched_handle_que(struct ixgbe_softc *sc, struct ix_queue *que) 2787 1.133 knakahar { 2788 1.133 knakahar 2789 1.185 msaitoh if (que->txrx_use_workqueue) { 2790 1.133 knakahar /* 2791 1.333 msaitoh * sc->que_wq is bound to each CPU instead of 2792 1.133 knakahar * each NIC queue to reduce workqueue kthread. As we 2793 1.133 knakahar * should consider about interrupt affinity in this 2794 1.133 knakahar * function, the workqueue kthread must be WQ_PERCPU. 2795 1.133 knakahar * If create WQ_PERCPU workqueue kthread for each NIC 2796 1.133 knakahar * queue, that number of created workqueue kthread is 2797 1.133 knakahar * (number of used NIC queue) * (number of CPUs) = 2798 1.133 knakahar * (number of CPUs) ^ 2 most often. 2799 1.133 knakahar * 2800 1.133 knakahar * The same NIC queue's interrupts are avoided by 2801 1.133 knakahar * masking the queue's interrupt. And different 2802 1.133 knakahar * NIC queue's interrupts use different struct work 2803 1.133 knakahar * (que->wq_cookie). So, "enqueued flag" to avoid 2804 1.133 knakahar * twice workqueue_enqueue() is not required . 2805 1.133 knakahar */ 2806 1.333 msaitoh workqueue_enqueue(sc->que_wq, &que->wq_cookie, curcpu()); 2807 1.319 msaitoh } else 2808 1.133 knakahar softint_schedule(que->que_si); 2809 1.133 knakahar } 2810 1.133 knakahar 2811 1.133 knakahar /************************************************************************ 2812 1.99 msaitoh * ixgbe_msix_que - MSI-X Queue Interrupt Service routine 2813 1.99 msaitoh ************************************************************************/ 2814 1.34 msaitoh static int 2815 1.1 dyoung ixgbe_msix_que(void *arg) 2816 1.1 dyoung { 2817 1.1 dyoung struct ix_queue *que = arg; 2818 1.339 msaitoh struct ixgbe_softc *sc = que->sc; 2819 1.333 msaitoh struct ifnet *ifp = sc->ifp; 2820 1.1 dyoung struct tx_ring *txr = que->txr; 2821 1.1 dyoung struct rx_ring *rxr = que->rxr; 2822 1.1 dyoung u32 newitr = 0; 2823 1.1 dyoung 2824 1.33 msaitoh /* Protect against spurious interrupts */ 2825 1.33 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0) 2826 1.34 msaitoh return 0; 2827 1.33 msaitoh 2828 1.333 msaitoh ixgbe_disable_queue(sc, que->msix); 2829 1.305 msaitoh IXGBE_EVC_ADD(&que->irqs, 1); 2830 1.1 dyoung 2831 1.147 knakahar /* 2832 1.147 knakahar * Don't change "que->txrx_use_workqueue" from this point to avoid 2833 1.147 knakahar * flip-flopping softint/workqueue mode in one deferred processing. 2834 1.147 knakahar */ 2835 1.333 msaitoh que->txrx_use_workqueue = sc->txrx_use_workqueue; 2836 1.147 knakahar 2837 1.1 dyoung IXGBE_TX_LOCK(txr); 2838 1.33 msaitoh ixgbe_txeof(txr); 2839 1.1 dyoung IXGBE_TX_UNLOCK(txr); 2840 1.1 dyoung 2841 1.1 dyoung /* Do AIM now? */ 2842 1.1 dyoung 2843 1.333 msaitoh if (sc->enable_aim == false) 2844 1.1 dyoung goto no_calc; 2845 1.1 dyoung /* 2846 1.99 msaitoh * Do Adaptive Interrupt Moderation: 2847 1.99 msaitoh * - Write out last calculated setting 2848 1.99 msaitoh * - Calculate based on average size over 2849 1.99 msaitoh * the last interval. 2850 1.99 msaitoh */ 2851 1.99 msaitoh if (que->eitr_setting) 2852 1.333 msaitoh ixgbe_eitr_write(sc, que->msix, que->eitr_setting); 2853 1.99 msaitoh 2854 1.98 msaitoh que->eitr_setting = 0; 2855 1.1 dyoung 2856 1.98 msaitoh /* Idle, do nothing */ 2857 1.186 msaitoh if ((txr->bytes == 0) && (rxr->bytes == 0)) 2858 1.186 msaitoh goto no_calc; 2859 1.185 msaitoh 2860 1.1 dyoung if ((txr->bytes) && (txr->packets)) 2861 1.98 msaitoh newitr = txr->bytes/txr->packets; 2862 1.1 dyoung if ((rxr->bytes) && (rxr->packets)) 2863 1.165 riastrad newitr = uimax(newitr, (rxr->bytes / rxr->packets)); 2864 1.1 dyoung newitr += 24; /* account for hardware frame, crc */ 2865 1.1 dyoung 2866 1.1 dyoung /* set an upper boundary */ 2867 1.165 riastrad newitr = uimin(newitr, 3000); 2868 1.1 dyoung 2869 1.1 dyoung /* Be nice to the mid range */ 2870 1.1 dyoung if ((newitr > 300) && (newitr < 1200)) 2871 1.1 dyoung newitr = (newitr / 3); 2872 1.1 dyoung else 2873 1.1 dyoung newitr = (newitr / 2); 2874 1.1 dyoung 2875 1.124 msaitoh /* 2876 1.124 msaitoh * When RSC is used, ITR interval must be larger than RSC_DELAY. 2877 1.124 msaitoh * Currently, we use 2us for RSC_DELAY. The minimum value is always 2878 1.124 msaitoh * greater than 2us on 100M (and 10M?(not documented)), but it's not 2879 1.124 msaitoh * on 1G and higher. 2880 1.124 msaitoh */ 2881 1.333 msaitoh if ((sc->link_speed != IXGBE_LINK_SPEED_100_FULL) 2882 1.333 msaitoh && (sc->link_speed != IXGBE_LINK_SPEED_10_FULL)) 2883 1.124 msaitoh if (newitr < IXGBE_MIN_RSC_EITR_10G1G) 2884 1.124 msaitoh newitr = IXGBE_MIN_RSC_EITR_10G1G; 2885 1.124 msaitoh 2886 1.186 msaitoh /* save for next interrupt */ 2887 1.186 msaitoh que->eitr_setting = newitr; 2888 1.1 dyoung 2889 1.98 msaitoh /* Reset state */ 2890 1.98 msaitoh txr->bytes = 0; 2891 1.98 msaitoh txr->packets = 0; 2892 1.98 msaitoh rxr->bytes = 0; 2893 1.98 msaitoh rxr->packets = 0; 2894 1.1 dyoung 2895 1.1 dyoung no_calc: 2896 1.333 msaitoh ixgbe_sched_handle_que(sc, que); 2897 1.99 msaitoh 2898 1.34 msaitoh return 1; 2899 1.99 msaitoh } /* ixgbe_msix_que */ 2900 1.1 dyoung 2901 1.99 msaitoh /************************************************************************ 2902 1.99 msaitoh * ixgbe_media_status - Media Ioctl callback 2903 1.98 msaitoh * 2904 1.99 msaitoh * Called whenever the user queries the status of 2905 1.99 msaitoh * the interface using ifconfig. 2906 1.99 msaitoh ************************************************************************/ 2907 1.98 msaitoh static void 2908 1.98 msaitoh ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 2909 1.1 dyoung { 2910 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc; 2911 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 2912 1.98 msaitoh int layer; 2913 1.1 dyoung 2914 1.98 msaitoh INIT_DEBUGOUT("ixgbe_media_status: begin"); 2915 1.333 msaitoh ixgbe_update_link_status(sc); 2916 1.1 dyoung 2917 1.1 dyoung ifmr->ifm_status = IFM_AVALID; 2918 1.1 dyoung ifmr->ifm_active = IFM_ETHER; 2919 1.1 dyoung 2920 1.333 msaitoh if (sc->link_active != LINK_STATE_UP) { 2921 1.68 msaitoh ifmr->ifm_active |= IFM_NONE; 2922 1.1 dyoung return; 2923 1.1 dyoung } 2924 1.1 dyoung 2925 1.1 dyoung ifmr->ifm_status |= IFM_ACTIVE; 2926 1.333 msaitoh layer = sc->phy_layer; 2927 1.1 dyoung 2928 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || 2929 1.103 msaitoh layer & IXGBE_PHYSICAL_LAYER_5GBASE_T || 2930 1.103 msaitoh layer & IXGBE_PHYSICAL_LAYER_2500BASE_T || 2931 1.43 msaitoh layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || 2932 1.99 msaitoh layer & IXGBE_PHYSICAL_LAYER_100BASE_TX || 2933 1.99 msaitoh layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 2934 1.333 msaitoh switch (sc->link_speed) { 2935 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL: 2936 1.43 msaitoh ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 2937 1.43 msaitoh break; 2938 1.103 msaitoh case IXGBE_LINK_SPEED_5GB_FULL: 2939 1.103 msaitoh ifmr->ifm_active |= IFM_5000_T | IFM_FDX; 2940 1.103 msaitoh break; 2941 1.103 msaitoh case IXGBE_LINK_SPEED_2_5GB_FULL: 2942 1.103 msaitoh ifmr->ifm_active |= IFM_2500_T | IFM_FDX; 2943 1.103 msaitoh break; 2944 1.43 msaitoh case IXGBE_LINK_SPEED_1GB_FULL: 2945 1.33 msaitoh ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 2946 1.43 msaitoh break; 2947 1.43 msaitoh case IXGBE_LINK_SPEED_100_FULL: 2948 1.24 msaitoh ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 2949 1.43 msaitoh break; 2950 1.99 msaitoh case IXGBE_LINK_SPEED_10_FULL: 2951 1.99 msaitoh ifmr->ifm_active |= IFM_10_T | IFM_FDX; 2952 1.99 msaitoh break; 2953 1.43 msaitoh } 2954 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 2955 1.43 msaitoh layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 2956 1.333 msaitoh switch (sc->link_speed) { 2957 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL: 2958 1.43 msaitoh ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; 2959 1.43 msaitoh break; 2960 1.43 msaitoh } 2961 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 2962 1.333 msaitoh switch (sc->link_speed) { 2963 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL: 2964 1.43 msaitoh ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; 2965 1.43 msaitoh break; 2966 1.43 msaitoh case IXGBE_LINK_SPEED_1GB_FULL: 2967 1.43 msaitoh ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2968 1.43 msaitoh break; 2969 1.43 msaitoh } 2970 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) 2971 1.333 msaitoh switch (sc->link_speed) { 2972 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL: 2973 1.43 msaitoh ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; 2974 1.43 msaitoh break; 2975 1.43 msaitoh case IXGBE_LINK_SPEED_1GB_FULL: 2976 1.43 msaitoh ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2977 1.43 msaitoh break; 2978 1.43 msaitoh } 2979 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || 2980 1.43 msaitoh layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 2981 1.333 msaitoh switch (sc->link_speed) { 2982 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL: 2983 1.43 msaitoh ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2984 1.43 msaitoh break; 2985 1.43 msaitoh case IXGBE_LINK_SPEED_1GB_FULL: 2986 1.28 msaitoh ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 2987 1.43 msaitoh break; 2988 1.43 msaitoh } 2989 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 2990 1.333 msaitoh switch (sc->link_speed) { 2991 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL: 2992 1.43 msaitoh ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2993 1.43 msaitoh break; 2994 1.43 msaitoh } 2995 1.43 msaitoh /* 2996 1.99 msaitoh * XXX: These need to use the proper media types once 2997 1.99 msaitoh * they're added. 2998 1.99 msaitoh */ 2999 1.43 msaitoh if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 3000 1.333 msaitoh switch (sc->link_speed) { 3001 1.43 msaitoh case IXGBE_LINK_SPEED_10GB_FULL: 3002 1.48 msaitoh ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; 3003 1.48 msaitoh break; 3004 1.48 msaitoh case IXGBE_LINK_SPEED_2_5GB_FULL: 3005 1.48 msaitoh ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 3006 1.48 msaitoh break; 3007 1.48 msaitoh case IXGBE_LINK_SPEED_1GB_FULL: 3008 1.48 msaitoh ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 3009 1.48 msaitoh break; 3010 1.48 msaitoh } 3011 1.99 msaitoh else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 3012 1.99 msaitoh layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 3013 1.99 msaitoh layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 3014 1.333 msaitoh switch (sc->link_speed) { 3015 1.48 msaitoh case IXGBE_LINK_SPEED_10GB_FULL: 3016 1.48 msaitoh ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; 3017 1.48 msaitoh break; 3018 1.48 msaitoh case IXGBE_LINK_SPEED_2_5GB_FULL: 3019 1.48 msaitoh ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 3020 1.48 msaitoh break; 3021 1.48 msaitoh case IXGBE_LINK_SPEED_1GB_FULL: 3022 1.48 msaitoh ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 3023 1.48 msaitoh break; 3024 1.48 msaitoh } 3025 1.98 msaitoh 3026 1.43 msaitoh /* If nothing is recognized... */ 3027 1.43 msaitoh #if 0 3028 1.43 msaitoh if (IFM_SUBTYPE(ifmr->ifm_active) == 0) 3029 1.43 msaitoh ifmr->ifm_active |= IFM_UNKNOWN; 3030 1.43 msaitoh #endif 3031 1.98 msaitoh 3032 1.104 msaitoh ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active); 3033 1.104 msaitoh 3034 1.44 msaitoh /* Display current flow control setting used on link */ 3035 1.44 msaitoh if (hw->fc.current_mode == ixgbe_fc_rx_pause || 3036 1.44 msaitoh hw->fc.current_mode == ixgbe_fc_full) 3037 1.43 msaitoh ifmr->ifm_active |= IFM_ETH_RXPAUSE; 3038 1.44 msaitoh if (hw->fc.current_mode == ixgbe_fc_tx_pause || 3039 1.44 msaitoh hw->fc.current_mode == ixgbe_fc_full) 3040 1.43 msaitoh ifmr->ifm_active |= IFM_ETH_TXPAUSE; 3041 1.1 dyoung 3042 1.1 dyoung return; 3043 1.99 msaitoh } /* ixgbe_media_status */ 3044 1.1 dyoung 3045 1.99 msaitoh /************************************************************************ 3046 1.99 msaitoh * ixgbe_media_change - Media Ioctl callback 3047 1.1 dyoung * 3048 1.99 msaitoh * Called when the user changes speed/duplex using 3049 1.99 msaitoh * media/mediopt option with ifconfig. 3050 1.99 msaitoh ************************************************************************/ 3051 1.1 dyoung static int 3052 1.98 msaitoh ixgbe_media_change(struct ifnet *ifp) 3053 1.1 dyoung { 3054 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc; 3055 1.333 msaitoh struct ifmedia *ifm = &sc->media; 3056 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 3057 1.43 msaitoh ixgbe_link_speed speed = 0; 3058 1.94 msaitoh ixgbe_link_speed link_caps = 0; 3059 1.94 msaitoh bool negotiate = false; 3060 1.94 msaitoh s32 err = IXGBE_NOT_IMPLEMENTED; 3061 1.1 dyoung 3062 1.1 dyoung INIT_DEBUGOUT("ixgbe_media_change: begin"); 3063 1.1 dyoung 3064 1.1 dyoung if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3065 1.1 dyoung return (EINVAL); 3066 1.1 dyoung 3067 1.44 msaitoh if (hw->phy.media_type == ixgbe_media_type_backplane) 3068 1.144 msaitoh return (EPERM); 3069 1.44 msaitoh 3070 1.43 msaitoh /* 3071 1.99 msaitoh * We don't actually need to check against the supported 3072 1.99 msaitoh * media types of the adapter; ifmedia will take care of 3073 1.99 msaitoh * that for us. 3074 1.99 msaitoh */ 3075 1.43 msaitoh switch (IFM_SUBTYPE(ifm->ifm_media)) { 3076 1.98 msaitoh case IFM_AUTO: 3077 1.98 msaitoh err = hw->mac.ops.get_link_capabilities(hw, &link_caps, 3078 1.98 msaitoh &negotiate); 3079 1.98 msaitoh if (err != IXGBE_SUCCESS) { 3080 1.333 msaitoh device_printf(sc->dev, "Unable to determine " 3081 1.98 msaitoh "supported advertise speeds\n"); 3082 1.98 msaitoh return (ENODEV); 3083 1.98 msaitoh } 3084 1.98 msaitoh speed |= link_caps; 3085 1.98 msaitoh break; 3086 1.98 msaitoh case IFM_10G_T: 3087 1.98 msaitoh case IFM_10G_LRM: 3088 1.98 msaitoh case IFM_10G_LR: 3089 1.98 msaitoh case IFM_10G_TWINAX: 3090 1.181 msaitoh case IFM_10G_SR: 3091 1.181 msaitoh case IFM_10G_CX4: 3092 1.98 msaitoh case IFM_10G_KR: 3093 1.98 msaitoh case IFM_10G_KX4: 3094 1.98 msaitoh speed |= IXGBE_LINK_SPEED_10GB_FULL; 3095 1.98 msaitoh break; 3096 1.103 msaitoh case IFM_5000_T: 3097 1.103 msaitoh speed |= IXGBE_LINK_SPEED_5GB_FULL; 3098 1.103 msaitoh break; 3099 1.103 msaitoh case IFM_2500_T: 3100 1.99 msaitoh case IFM_2500_KX: 3101 1.99 msaitoh speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 3102 1.99 msaitoh break; 3103 1.98 msaitoh case IFM_1000_T: 3104 1.98 msaitoh case IFM_1000_LX: 3105 1.98 msaitoh case IFM_1000_SX: 3106 1.98 msaitoh case IFM_1000_KX: 3107 1.98 msaitoh speed |= IXGBE_LINK_SPEED_1GB_FULL; 3108 1.98 msaitoh break; 3109 1.98 msaitoh case IFM_100_TX: 3110 1.98 msaitoh speed |= IXGBE_LINK_SPEED_100_FULL; 3111 1.98 msaitoh break; 3112 1.99 msaitoh case IFM_10_T: 3113 1.99 msaitoh speed |= IXGBE_LINK_SPEED_10_FULL; 3114 1.99 msaitoh break; 3115 1.140 msaitoh case IFM_NONE: 3116 1.140 msaitoh break; 3117 1.98 msaitoh default: 3118 1.98 msaitoh goto invalid; 3119 1.48 msaitoh } 3120 1.43 msaitoh 3121 1.43 msaitoh hw->mac.autotry_restart = TRUE; 3122 1.43 msaitoh hw->mac.ops.setup_link(hw, speed, TRUE); 3123 1.333 msaitoh sc->advertise = 0; 3124 1.109 msaitoh if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) { 3125 1.51 msaitoh if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0) 3126 1.333 msaitoh sc->advertise |= 1 << 2; 3127 1.51 msaitoh if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0) 3128 1.333 msaitoh sc->advertise |= 1 << 1; 3129 1.51 msaitoh if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0) 3130 1.333 msaitoh sc->advertise |= 1 << 0; 3131 1.99 msaitoh if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0) 3132 1.333 msaitoh sc->advertise |= 1 << 3; 3133 1.103 msaitoh if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0) 3134 1.333 msaitoh sc->advertise |= 1 << 4; 3135 1.103 msaitoh if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0) 3136 1.333 msaitoh sc->advertise |= 1 << 5; 3137 1.51 msaitoh } 3138 1.1 dyoung 3139 1.1 dyoung return (0); 3140 1.43 msaitoh 3141 1.43 msaitoh invalid: 3142 1.333 msaitoh device_printf(sc->dev, "Invalid media type!\n"); 3143 1.98 msaitoh 3144 1.43 msaitoh return (EINVAL); 3145 1.99 msaitoh } /* ixgbe_media_change */ 3146 1.1 dyoung 3147 1.99 msaitoh /************************************************************************ 3148 1.320 msaitoh * ixgbe_msix_admin - Link status change ISR (MSI-X) 3149 1.99 msaitoh ************************************************************************/ 3150 1.98 msaitoh static int 3151 1.233 msaitoh ixgbe_msix_admin(void *arg) 3152 1.98 msaitoh { 3153 1.333 msaitoh struct ixgbe_softc *sc = arg; 3154 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 3155 1.277 msaitoh u32 eicr; 3156 1.273 msaitoh u32 eims_orig; 3157 1.273 msaitoh u32 eims_disable = 0; 3158 1.98 msaitoh 3159 1.333 msaitoh IXGBE_EVC_ADD(&sc->admin_irqev, 1); 3160 1.98 msaitoh 3161 1.273 msaitoh eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS); 3162 1.273 msaitoh /* Pause other interrupts */ 3163 1.273 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_MSIX_OTHER_CLEAR_MASK); 3164 1.273 msaitoh 3165 1.125 knakahar /* 3166 1.273 msaitoh * First get the cause. 3167 1.273 msaitoh * 3168 1.125 knakahar * The specifications of 82598, 82599, X540 and X550 say EICS register 3169 1.125 knakahar * is write only. However, Linux says it is a workaround for silicon 3170 1.273 msaitoh * errata to read EICS instead of EICR to get interrupt cause. 3171 1.273 msaitoh * At least, reading EICR clears lower 16bits of EIMS on 82598. 3172 1.125 knakahar */ 3173 1.99 msaitoh eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 3174 1.98 msaitoh /* Be sure the queue bits are not cleared */ 3175 1.99 msaitoh eicr &= ~IXGBE_EICR_RTX_QUEUE; 3176 1.265 msaitoh /* Clear all OTHER interrupts with write */ 3177 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 3178 1.1 dyoung 3179 1.333 msaitoh ixgbe_intr_admin_common(sc, eicr, &eims_disable); 3180 1.277 msaitoh 3181 1.277 msaitoh /* Re-enable some OTHER interrupts */ 3182 1.277 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig & ~eims_disable); 3183 1.277 msaitoh 3184 1.277 msaitoh return 1; 3185 1.277 msaitoh } /* ixgbe_msix_admin */ 3186 1.277 msaitoh 3187 1.277 msaitoh static void 3188 1.333 msaitoh ixgbe_intr_admin_common(struct ixgbe_softc *sc, u32 eicr, u32 *eims_disable) 3189 1.277 msaitoh { 3190 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 3191 1.277 msaitoh u32 task_requests = 0; 3192 1.277 msaitoh s32 retval; 3193 1.277 msaitoh 3194 1.266 msaitoh /* Link status change */ 3195 1.266 msaitoh if (eicr & IXGBE_EICR_LSC) { 3196 1.266 msaitoh task_requests |= IXGBE_REQUEST_TASK_LSC; 3197 1.277 msaitoh *eims_disable |= IXGBE_EIMS_LSC; 3198 1.266 msaitoh } 3199 1.266 msaitoh 3200 1.204 msaitoh if (ixgbe_is_sfp(hw)) { 3201 1.310 msaitoh u32 eicr_mask; 3202 1.310 msaitoh 3203 1.204 msaitoh /* Pluggable optics-related interrupt */ 3204 1.204 msaitoh if (hw->mac.type >= ixgbe_mac_X540) 3205 1.204 msaitoh eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 3206 1.204 msaitoh else 3207 1.204 msaitoh eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 3208 1.204 msaitoh 3209 1.204 msaitoh /* 3210 1.204 msaitoh * An interrupt might not arrive when a module is inserted. 3211 1.204 msaitoh * When an link status change interrupt occurred and the driver 3212 1.204 msaitoh * still regard SFP as unplugged, issue the module softint 3213 1.204 msaitoh * and then issue LSC interrupt. 3214 1.204 msaitoh */ 3215 1.204 msaitoh if ((eicr & eicr_mask) 3216 1.204 msaitoh || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present) 3217 1.204 msaitoh && (eicr & IXGBE_EICR_LSC))) { 3218 1.233 msaitoh task_requests |= IXGBE_REQUEST_TASK_MOD; 3219 1.277 msaitoh *eims_disable |= IXGBE_EIMS_LSC; 3220 1.204 msaitoh } 3221 1.204 msaitoh 3222 1.204 msaitoh if ((hw->mac.type == ixgbe_mac_82599EB) && 3223 1.204 msaitoh (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 3224 1.233 msaitoh task_requests |= IXGBE_REQUEST_TASK_MSF; 3225 1.277 msaitoh *eims_disable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw); 3226 1.204 msaitoh } 3227 1.204 msaitoh } 3228 1.204 msaitoh 3229 1.333 msaitoh if (sc->hw.mac.type != ixgbe_mac_82598EB) { 3230 1.311 msaitoh #ifdef IXGBE_FDIR 3231 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_FDIR) && 3232 1.99 msaitoh (eicr & IXGBE_EICR_FLOW_DIR)) { 3233 1.333 msaitoh if (!atomic_cas_uint(&sc->fdir_reinit, 0, 1)) { 3234 1.275 msaitoh task_requests |= IXGBE_REQUEST_TASK_FDIR; 3235 1.275 msaitoh /* Disable the interrupt */ 3236 1.277 msaitoh *eims_disable |= IXGBE_EIMS_FLOW_DIR; 3237 1.275 msaitoh } 3238 1.99 msaitoh } 3239 1.311 msaitoh #endif 3240 1.99 msaitoh 3241 1.99 msaitoh if (eicr & IXGBE_EICR_ECC) { 3242 1.333 msaitoh if (ratecheck(&sc->lasterr_time, 3243 1.312 msaitoh &ixgbe_errlog_intrvl)) 3244 1.333 msaitoh device_printf(sc->dev, 3245 1.312 msaitoh "CRITICAL: ECC ERROR!! Please Reboot!!\n"); 3246 1.98 msaitoh } 3247 1.1 dyoung 3248 1.98 msaitoh /* Check for over temp condition */ 3249 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) { 3250 1.333 msaitoh switch (sc->hw.mac.type) { 3251 1.99 msaitoh case ixgbe_mac_X550EM_a: 3252 1.99 msaitoh if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a)) 3253 1.99 msaitoh break; 3254 1.99 msaitoh retval = hw->phy.ops.check_overtemp(hw); 3255 1.99 msaitoh if (retval != IXGBE_ERR_OVERTEMP) 3256 1.99 msaitoh break; 3257 1.333 msaitoh if (ratecheck(&sc->lasterr_time, 3258 1.312 msaitoh &ixgbe_errlog_intrvl)) { 3259 1.333 msaitoh device_printf(sc->dev, 3260 1.312 msaitoh "CRITICAL: OVER TEMP!! " 3261 1.312 msaitoh "PHY IS SHUT DOWN!!\n"); 3262 1.333 msaitoh device_printf(sc->dev, 3263 1.312 msaitoh "System shutdown required!\n"); 3264 1.312 msaitoh } 3265 1.99 msaitoh break; 3266 1.99 msaitoh default: 3267 1.99 msaitoh if (!(eicr & IXGBE_EICR_TS)) 3268 1.99 msaitoh break; 3269 1.99 msaitoh retval = hw->phy.ops.check_overtemp(hw); 3270 1.99 msaitoh if (retval != IXGBE_ERR_OVERTEMP) 3271 1.99 msaitoh break; 3272 1.333 msaitoh if (ratecheck(&sc->lasterr_time, 3273 1.312 msaitoh &ixgbe_errlog_intrvl)) { 3274 1.333 msaitoh device_printf(sc->dev, 3275 1.312 msaitoh "CRITICAL: OVER TEMP!! " 3276 1.312 msaitoh "PHY IS SHUT DOWN!!\n"); 3277 1.333 msaitoh device_printf(sc->dev, 3278 1.312 msaitoh "System shutdown required!\n"); 3279 1.312 msaitoh } 3280 1.99 msaitoh break; 3281 1.99 msaitoh } 3282 1.1 dyoung } 3283 1.99 msaitoh 3284 1.99 msaitoh /* Check for VF message */ 3285 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_SRIOV) && 3286 1.233 msaitoh (eicr & IXGBE_EICR_MAILBOX)) { 3287 1.233 msaitoh task_requests |= IXGBE_REQUEST_TASK_MBX; 3288 1.277 msaitoh *eims_disable |= IXGBE_EIMS_MAILBOX; 3289 1.233 msaitoh } 3290 1.1 dyoung } 3291 1.1 dyoung 3292 1.98 msaitoh /* Check for fan failure */ 3293 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) 3294 1.333 msaitoh ixgbe_check_fan_failure(sc, eicr, true); 3295 1.1 dyoung 3296 1.98 msaitoh /* External PHY interrupt */ 3297 1.99 msaitoh if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 3298 1.99 msaitoh (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 3299 1.233 msaitoh task_requests |= IXGBE_REQUEST_TASK_PHY; 3300 1.277 msaitoh *eims_disable |= IXGBE_EICR_GPI_SDP0_X540; 3301 1.233 msaitoh } 3302 1.233 msaitoh 3303 1.233 msaitoh if (task_requests != 0) { 3304 1.333 msaitoh mutex_enter(&sc->admin_mtx); 3305 1.333 msaitoh sc->task_requests |= task_requests; 3306 1.333 msaitoh ixgbe_schedule_admin_tasklet(sc); 3307 1.333 msaitoh mutex_exit(&sc->admin_mtx); 3308 1.186 msaitoh } 3309 1.277 msaitoh } 3310 1.1 dyoung 3311 1.124 msaitoh static void 3312 1.333 msaitoh ixgbe_eitr_write(struct ixgbe_softc *sc, uint32_t index, uint32_t itr) 3313 1.124 msaitoh { 3314 1.185 msaitoh 3315 1.333 msaitoh if (sc->hw.mac.type == ixgbe_mac_82598EB) 3316 1.186 msaitoh itr |= itr << 16; 3317 1.186 msaitoh else 3318 1.186 msaitoh itr |= IXGBE_EITR_CNT_WDIS; 3319 1.124 msaitoh 3320 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(index), itr); 3321 1.124 msaitoh } 3322 1.124 msaitoh 3323 1.124 msaitoh 3324 1.99 msaitoh /************************************************************************ 3325 1.99 msaitoh * ixgbe_sysctl_interrupt_rate_handler 3326 1.99 msaitoh ************************************************************************/ 3327 1.98 msaitoh static int 3328 1.98 msaitoh ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS) 3329 1.1 dyoung { 3330 1.98 msaitoh struct sysctlnode node = *rnode; 3331 1.99 msaitoh struct ix_queue *que = (struct ix_queue *)node.sysctl_data; 3332 1.339 msaitoh struct ixgbe_softc *sc; 3333 1.98 msaitoh uint32_t reg, usec, rate; 3334 1.98 msaitoh int error; 3335 1.45 msaitoh 3336 1.98 msaitoh if (que == NULL) 3337 1.98 msaitoh return 0; 3338 1.169 msaitoh 3339 1.333 msaitoh sc = que->sc; 3340 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc)) 3341 1.169 msaitoh return (EPERM); 3342 1.169 msaitoh 3343 1.333 msaitoh reg = IXGBE_READ_REG(&sc->hw, IXGBE_EITR(que->msix)); 3344 1.98 msaitoh usec = ((reg & 0x0FF8) >> 3); 3345 1.98 msaitoh if (usec > 0) 3346 1.98 msaitoh rate = 500000 / usec; 3347 1.98 msaitoh else 3348 1.98 msaitoh rate = 0; 3349 1.98 msaitoh node.sysctl_data = &rate; 3350 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node)); 3351 1.98 msaitoh if (error || newp == NULL) 3352 1.98 msaitoh return error; 3353 1.98 msaitoh reg &= ~0xfff; /* default, no limitation */ 3354 1.98 msaitoh if (rate > 0 && rate < 500000) { 3355 1.98 msaitoh if (rate < 1000) 3356 1.98 msaitoh rate = 1000; 3357 1.228 msaitoh reg |= ((4000000 / rate) & 0xff8); 3358 1.124 msaitoh /* 3359 1.124 msaitoh * When RSC is used, ITR interval must be larger than 3360 1.124 msaitoh * RSC_DELAY. Currently, we use 2us for RSC_DELAY. 3361 1.124 msaitoh * The minimum value is always greater than 2us on 100M 3362 1.124 msaitoh * (and 10M?(not documented)), but it's not on 1G and higher. 3363 1.124 msaitoh */ 3364 1.333 msaitoh if ((sc->link_speed != IXGBE_LINK_SPEED_100_FULL) 3365 1.333 msaitoh && (sc->link_speed != IXGBE_LINK_SPEED_10_FULL)) { 3366 1.333 msaitoh if ((sc->num_queues > 1) 3367 1.124 msaitoh && (reg < IXGBE_MIN_RSC_EITR_10G1G)) 3368 1.124 msaitoh return EINVAL; 3369 1.124 msaitoh } 3370 1.343 msaitoh sc->max_interrupt_rate = rate; 3371 1.124 msaitoh } else 3372 1.343 msaitoh sc->max_interrupt_rate = 0; 3373 1.333 msaitoh ixgbe_eitr_write(sc, que->msix, reg); 3374 1.99 msaitoh 3375 1.99 msaitoh return (0); 3376 1.99 msaitoh } /* ixgbe_sysctl_interrupt_rate_handler */ 3377 1.45 msaitoh 3378 1.98 msaitoh const struct sysctlnode * 3379 1.333 msaitoh ixgbe_sysctl_instance(struct ixgbe_softc *sc) 3380 1.98 msaitoh { 3381 1.98 msaitoh const char *dvname; 3382 1.98 msaitoh struct sysctllog **log; 3383 1.98 msaitoh int rc; 3384 1.98 msaitoh const struct sysctlnode *rnode; 3385 1.1 dyoung 3386 1.333 msaitoh if (sc->sysctltop != NULL) 3387 1.333 msaitoh return sc->sysctltop; 3388 1.1 dyoung 3389 1.333 msaitoh log = &sc->sysctllog; 3390 1.333 msaitoh dvname = device_xname(sc->dev); 3391 1.1 dyoung 3392 1.98 msaitoh if ((rc = sysctl_createv(log, 0, NULL, &rnode, 3393 1.98 msaitoh 0, CTLTYPE_NODE, dvname, 3394 1.98 msaitoh SYSCTL_DESCR("ixgbe information and settings"), 3395 1.98 msaitoh NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) 3396 1.98 msaitoh goto err; 3397 1.63 msaitoh 3398 1.98 msaitoh return rnode; 3399 1.98 msaitoh err: 3400 1.333 msaitoh device_printf(sc->dev, 3401 1.207 msaitoh "%s: sysctl_createv failed, rc = %d\n", __func__, rc); 3402 1.98 msaitoh return NULL; 3403 1.63 msaitoh } 3404 1.63 msaitoh 3405 1.99 msaitoh /************************************************************************ 3406 1.99 msaitoh * ixgbe_add_device_sysctls 3407 1.99 msaitoh ************************************************************************/ 3408 1.63 msaitoh static void 3409 1.333 msaitoh ixgbe_add_device_sysctls(struct ixgbe_softc *sc) 3410 1.1 dyoung { 3411 1.333 msaitoh device_t dev = sc->dev; 3412 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 3413 1.98 msaitoh struct sysctllog **log; 3414 1.98 msaitoh const struct sysctlnode *rnode, *cnode; 3415 1.1 dyoung 3416 1.333 msaitoh log = &sc->sysctllog; 3417 1.1 dyoung 3418 1.333 msaitoh if ((rnode = ixgbe_sysctl_instance(sc)) == NULL) { 3419 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl root\n"); 3420 1.98 msaitoh return; 3421 1.98 msaitoh } 3422 1.1 dyoung 3423 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, 3424 1.158 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT, 3425 1.158 msaitoh "debug", SYSCTL_DESCR("Debug Info"), 3426 1.333 msaitoh ixgbe_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL) 3427 1.280 msaitoh != 0) 3428 1.158 msaitoh aprint_error_dev(dev, "could not create sysctl\n"); 3429 1.158 msaitoh 3430 1.158 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, 3431 1.286 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT, 3432 1.286 msaitoh "rx_copy_len", SYSCTL_DESCR("RX Copy Length"), 3433 1.286 msaitoh ixgbe_sysctl_rx_copy_len, 0, 3434 1.333 msaitoh (void *)sc, 0, CTL_CREATE, CTL_EOL) != 0) 3435 1.286 msaitoh aprint_error_dev(dev, "could not create sysctl\n"); 3436 1.286 msaitoh 3437 1.286 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, 3438 1.98 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, 3439 1.314 msaitoh "num_tx_desc", SYSCTL_DESCR("Number of TX descriptors"), 3440 1.333 msaitoh NULL, 0, &sc->num_tx_desc, 0, CTL_CREATE, CTL_EOL) != 0) 3441 1.314 msaitoh aprint_error_dev(dev, "could not create sysctl\n"); 3442 1.314 msaitoh 3443 1.314 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, 3444 1.314 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, 3445 1.314 msaitoh "num_rx_desc", SYSCTL_DESCR("Number of RX descriptors"), 3446 1.333 msaitoh NULL, 0, &sc->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0) 3447 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n"); 3448 1.1 dyoung 3449 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, 3450 1.313 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT, "rx_process_limit", 3451 1.313 msaitoh SYSCTL_DESCR("max number of RX packets to process"), 3452 1.333 msaitoh ixgbe_sysctl_rx_process_limit, 0, (void *)sc, 0, CTL_CREATE, 3453 1.313 msaitoh CTL_EOL) != 0) 3454 1.313 msaitoh aprint_error_dev(dev, "could not create sysctl\n"); 3455 1.313 msaitoh 3456 1.313 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, 3457 1.313 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT, "tx_process_limit", 3458 1.313 msaitoh SYSCTL_DESCR("max number of TX packets to process"), 3459 1.333 msaitoh ixgbe_sysctl_tx_process_limit, 0, (void *)sc, 0, CTL_CREATE, 3460 1.313 msaitoh CTL_EOL) != 0) 3461 1.313 msaitoh aprint_error_dev(dev, "could not create sysctl\n"); 3462 1.313 msaitoh 3463 1.313 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, 3464 1.98 msaitoh CTLFLAG_READONLY, CTLTYPE_INT, 3465 1.98 msaitoh "num_queues", SYSCTL_DESCR("Number of queues"), 3466 1.333 msaitoh NULL, 0, &sc->num_queues, 0, CTL_CREATE, CTL_EOL) != 0) 3467 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n"); 3468 1.43 msaitoh 3469 1.98 msaitoh /* Sysctls for all devices */ 3470 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3471 1.99 msaitoh CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC), 3472 1.333 msaitoh ixgbe_sysctl_flowcntl, 0, (void *)sc, 0, CTL_CREATE, 3473 1.99 msaitoh CTL_EOL) != 0) 3474 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n"); 3475 1.63 msaitoh 3476 1.333 msaitoh sc->enable_aim = ixgbe_enable_aim; 3477 1.343 msaitoh sc->max_interrupt_rate = ixgbe_max_interrupt_rate; 3478 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3479 1.99 msaitoh CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"), 3480 1.333 msaitoh NULL, 0, &sc->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0) 3481 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n"); 3482 1.1 dyoung 3483 1.98 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, 3484 1.98 msaitoh CTLFLAG_READWRITE, CTLTYPE_INT, 3485 1.98 msaitoh "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED), 3486 1.333 msaitoh ixgbe_sysctl_advertise, 0, (void *)sc, 0, CTL_CREATE, 3487 1.99 msaitoh CTL_EOL) != 0) 3488 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n"); 3489 1.1 dyoung 3490 1.147 knakahar /* 3491 1.147 knakahar * If each "que->txrx_use_workqueue" is changed in sysctl handler, 3492 1.147 knakahar * it causesflip-flopping softint/workqueue mode in one deferred 3493 1.147 knakahar * processing. Therefore, preempt_disable()/preempt_enable() are 3494 1.147 knakahar * required in ixgbe_sched_handle_que() to avoid 3495 1.147 knakahar * KASSERT(ixgbe_sched_handle_que()) in softint_schedule(). 3496 1.147 knakahar * I think changing "que->txrx_use_workqueue" in interrupt handler 3497 1.147 knakahar * is lighter than doing preempt_disable()/preempt_enable() in every 3498 1.147 knakahar * ixgbe_sched_handle_que(). 3499 1.147 knakahar */ 3500 1.333 msaitoh sc->txrx_use_workqueue = ixgbe_txrx_workqueue; 3501 1.128 knakahar if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3502 1.280 msaitoh CTLTYPE_BOOL, "txrx_workqueue", 3503 1.280 msaitoh SYSCTL_DESCR("Use workqueue for packet processing"), 3504 1.333 msaitoh NULL, 0, &sc->txrx_use_workqueue, 0, CTL_CREATE, 3505 1.280 msaitoh CTL_EOL) != 0) 3506 1.128 knakahar aprint_error_dev(dev, "could not create sysctl\n"); 3507 1.128 knakahar 3508 1.98 msaitoh #ifdef IXGBE_DEBUG 3509 1.98 msaitoh /* testing sysctls (for all devices) */ 3510 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3511 1.99 msaitoh CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"), 3512 1.333 msaitoh ixgbe_sysctl_power_state, 0, (void *)sc, 0, CTL_CREATE, 3513 1.99 msaitoh CTL_EOL) != 0) 3514 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n"); 3515 1.45 msaitoh 3516 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY, 3517 1.99 msaitoh CTLTYPE_STRING, "print_rss_config", 3518 1.99 msaitoh SYSCTL_DESCR("Prints RSS Configuration"), 3519 1.333 msaitoh ixgbe_sysctl_print_rss_config, 0, (void *)sc, 0, CTL_CREATE, 3520 1.99 msaitoh CTL_EOL) != 0) 3521 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n"); 3522 1.98 msaitoh #endif 3523 1.98 msaitoh /* for X550 series devices */ 3524 1.98 msaitoh if (hw->mac.type >= ixgbe_mac_X550) 3525 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3526 1.99 msaitoh CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"), 3527 1.333 msaitoh ixgbe_sysctl_dmac, 0, (void *)sc, 0, CTL_CREATE, 3528 1.99 msaitoh CTL_EOL) != 0) 3529 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n"); 3530 1.1 dyoung 3531 1.98 msaitoh /* for WoL-capable devices */ 3532 1.333 msaitoh if (sc->wol_support) { 3533 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3534 1.99 msaitoh CTLTYPE_BOOL, "wol_enable", 3535 1.99 msaitoh SYSCTL_DESCR("Enable/Disable Wake on LAN"), 3536 1.333 msaitoh ixgbe_sysctl_wol_enable, 0, (void *)sc, 0, CTL_CREATE, 3537 1.99 msaitoh CTL_EOL) != 0) 3538 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n"); 3539 1.1 dyoung 3540 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3541 1.99 msaitoh CTLTYPE_INT, "wufc", 3542 1.99 msaitoh SYSCTL_DESCR("Enable/Disable Wake Up Filters"), 3543 1.333 msaitoh ixgbe_sysctl_wufc, 0, (void *)sc, 0, CTL_CREATE, 3544 1.99 msaitoh CTL_EOL) != 0) 3545 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n"); 3546 1.98 msaitoh } 3547 1.1 dyoung 3548 1.98 msaitoh /* for X552/X557-AT devices */ 3549 1.325 msaitoh if ((hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) || 3550 1.325 msaitoh (hw->device_id == IXGBE_DEV_ID_X550EM_A_10G_T)) { 3551 1.98 msaitoh const struct sysctlnode *phy_node; 3552 1.1 dyoung 3553 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE, 3554 1.98 msaitoh "phy", SYSCTL_DESCR("External PHY sysctls"), 3555 1.98 msaitoh NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) { 3556 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n"); 3557 1.98 msaitoh return; 3558 1.98 msaitoh } 3559 1.1 dyoung 3560 1.99 msaitoh if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY, 3561 1.99 msaitoh CTLTYPE_INT, "temp", 3562 1.99 msaitoh SYSCTL_DESCR("Current External PHY Temperature (Celsius)"), 3563 1.333 msaitoh ixgbe_sysctl_phy_temp, 0, (void *)sc, 0, CTL_CREATE, 3564 1.99 msaitoh CTL_EOL) != 0) 3565 1.99 msaitoh aprint_error_dev(dev, "could not create sysctl\n"); 3566 1.99 msaitoh 3567 1.99 msaitoh if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY, 3568 1.99 msaitoh CTLTYPE_INT, "overtemp_occurred", 3569 1.280 msaitoh SYSCTL_DESCR( 3570 1.280 msaitoh "External PHY High Temperature Event Occurred"), 3571 1.333 msaitoh ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)sc, 0, 3572 1.99 msaitoh CTL_CREATE, CTL_EOL) != 0) 3573 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n"); 3574 1.99 msaitoh } 3575 1.33 msaitoh 3576 1.163 msaitoh if ((hw->mac.type == ixgbe_mac_X550EM_a) 3577 1.163 msaitoh && (hw->phy.type == ixgbe_phy_fw)) 3578 1.163 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3579 1.163 msaitoh CTLTYPE_BOOL, "force_10_100_autonego", 3580 1.163 msaitoh SYSCTL_DESCR("Force autonego on 10M and 100M"), 3581 1.163 msaitoh NULL, 0, &hw->phy.force_10_100_autonego, 0, 3582 1.163 msaitoh CTL_CREATE, CTL_EOL) != 0) 3583 1.163 msaitoh aprint_error_dev(dev, "could not create sysctl\n"); 3584 1.163 msaitoh 3585 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_EEE) { 3586 1.99 msaitoh if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3587 1.99 msaitoh CTLTYPE_INT, "eee_state", 3588 1.99 msaitoh SYSCTL_DESCR("EEE Power Save State"), 3589 1.333 msaitoh ixgbe_sysctl_eee_state, 0, (void *)sc, 0, CTL_CREATE, 3590 1.99 msaitoh CTL_EOL) != 0) 3591 1.98 msaitoh aprint_error_dev(dev, "could not create sysctl\n"); 3592 1.98 msaitoh } 3593 1.99 msaitoh } /* ixgbe_add_device_sysctls */ 3594 1.1 dyoung 3595 1.99 msaitoh /************************************************************************ 3596 1.99 msaitoh * ixgbe_allocate_pci_resources 3597 1.99 msaitoh ************************************************************************/ 3598 1.98 msaitoh static int 3599 1.333 msaitoh ixgbe_allocate_pci_resources(struct ixgbe_softc *sc, 3600 1.98 msaitoh const struct pci_attach_args *pa) 3601 1.1 dyoung { 3602 1.346 msaitoh pcireg_t memtype, csr; 3603 1.333 msaitoh device_t dev = sc->dev; 3604 1.98 msaitoh bus_addr_t addr; 3605 1.98 msaitoh int flags; 3606 1.1 dyoung 3607 1.98 msaitoh memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0)); 3608 1.98 msaitoh switch (memtype) { 3609 1.98 msaitoh case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 3610 1.98 msaitoh case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 3611 1.333 msaitoh sc->osdep.mem_bus_space_tag = pa->pa_memt; 3612 1.98 msaitoh if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0), 3613 1.333 msaitoh memtype, &addr, &sc->osdep.mem_size, &flags) != 0) 3614 1.98 msaitoh goto map_err; 3615 1.98 msaitoh if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) { 3616 1.98 msaitoh aprint_normal_dev(dev, "clearing prefetchable bit\n"); 3617 1.98 msaitoh flags &= ~BUS_SPACE_MAP_PREFETCHABLE; 3618 1.98 msaitoh } 3619 1.333 msaitoh if (bus_space_map(sc->osdep.mem_bus_space_tag, addr, 3620 1.333 msaitoh sc->osdep.mem_size, flags, 3621 1.333 msaitoh &sc->osdep.mem_bus_space_handle) != 0) { 3622 1.98 msaitoh map_err: 3623 1.333 msaitoh sc->osdep.mem_size = 0; 3624 1.98 msaitoh aprint_error_dev(dev, "unable to map BAR0\n"); 3625 1.98 msaitoh return ENXIO; 3626 1.98 msaitoh } 3627 1.171 msaitoh /* 3628 1.171 msaitoh * Enable address decoding for memory range in case BIOS or 3629 1.171 msaitoh * UEFI don't set it. 3630 1.171 msaitoh */ 3631 1.171 msaitoh csr = pci_conf_read(pa->pa_pc, pa->pa_tag, 3632 1.171 msaitoh PCI_COMMAND_STATUS_REG); 3633 1.171 msaitoh csr |= PCI_COMMAND_MEM_ENABLE; 3634 1.171 msaitoh pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 3635 1.171 msaitoh csr); 3636 1.98 msaitoh break; 3637 1.98 msaitoh default: 3638 1.98 msaitoh aprint_error_dev(dev, "unexpected type on BAR0\n"); 3639 1.98 msaitoh return ENXIO; 3640 1.98 msaitoh } 3641 1.1 dyoung 3642 1.98 msaitoh return (0); 3643 1.99 msaitoh } /* ixgbe_allocate_pci_resources */ 3644 1.1 dyoung 3645 1.119 msaitoh static void 3646 1.333 msaitoh ixgbe_free_deferred_handlers(struct ixgbe_softc *sc) 3647 1.119 msaitoh { 3648 1.333 msaitoh struct ix_queue *que = sc->queues; 3649 1.333 msaitoh struct tx_ring *txr = sc->tx_rings; 3650 1.119 msaitoh int i; 3651 1.119 msaitoh 3652 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, que++, txr++) { 3653 1.333 msaitoh if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) { 3654 1.119 msaitoh if (txr->txr_si != NULL) 3655 1.119 msaitoh softint_disestablish(txr->txr_si); 3656 1.119 msaitoh } 3657 1.119 msaitoh if (que->que_si != NULL) 3658 1.119 msaitoh softint_disestablish(que->que_si); 3659 1.119 msaitoh } 3660 1.333 msaitoh if (sc->txr_wq != NULL) 3661 1.333 msaitoh workqueue_destroy(sc->txr_wq); 3662 1.333 msaitoh if (sc->txr_wq_enqueued != NULL) 3663 1.333 msaitoh percpu_free(sc->txr_wq_enqueued, sizeof(u_int)); 3664 1.333 msaitoh if (sc->que_wq != NULL) 3665 1.333 msaitoh workqueue_destroy(sc->que_wq); 3666 1.333 msaitoh 3667 1.333 msaitoh if (sc->admin_wq != NULL) { 3668 1.333 msaitoh workqueue_destroy(sc->admin_wq); 3669 1.333 msaitoh sc->admin_wq = NULL; 3670 1.333 msaitoh } 3671 1.333 msaitoh if (sc->timer_wq != NULL) { 3672 1.333 msaitoh workqueue_destroy(sc->timer_wq); 3673 1.333 msaitoh sc->timer_wq = NULL; 3674 1.233 msaitoh } 3675 1.333 msaitoh if (sc->recovery_mode_timer_wq != NULL) { 3676 1.236 msaitoh /* 3677 1.236 msaitoh * ixgbe_ifstop() doesn't call the workqueue_wait() for 3678 1.236 msaitoh * the recovery_mode_timer workqueue, so call it here. 3679 1.236 msaitoh */ 3680 1.333 msaitoh workqueue_wait(sc->recovery_mode_timer_wq, 3681 1.333 msaitoh &sc->recovery_mode_timer_wc); 3682 1.333 msaitoh atomic_store_relaxed(&sc->recovery_mode_timer_pending, 0); 3683 1.333 msaitoh workqueue_destroy(sc->recovery_mode_timer_wq); 3684 1.333 msaitoh sc->recovery_mode_timer_wq = NULL; 3685 1.119 msaitoh } 3686 1.257 msaitoh } /* ixgbe_free_deferred_handlers */ 3687 1.119 msaitoh 3688 1.99 msaitoh /************************************************************************ 3689 1.99 msaitoh * ixgbe_detach - Device removal routine 3690 1.1 dyoung * 3691 1.99 msaitoh * Called when the driver is being removed. 3692 1.99 msaitoh * Stops the adapter and deallocates all the resources 3693 1.99 msaitoh * that were allocated for driver operation. 3694 1.1 dyoung * 3695 1.99 msaitoh * return 0 on success, positive on failure 3696 1.99 msaitoh ************************************************************************/ 3697 1.98 msaitoh static int 3698 1.98 msaitoh ixgbe_detach(device_t dev, int flags) 3699 1.1 dyoung { 3700 1.333 msaitoh struct ixgbe_softc *sc = device_private(dev); 3701 1.333 msaitoh struct rx_ring *rxr = sc->rx_rings; 3702 1.333 msaitoh struct tx_ring *txr = sc->tx_rings; 3703 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 3704 1.333 msaitoh struct ixgbe_hw_stats *stats = &sc->stats.pf; 3705 1.98 msaitoh u32 ctrl_ext; 3706 1.168 msaitoh int i; 3707 1.28 msaitoh 3708 1.98 msaitoh INIT_DEBUGOUT("ixgbe_detach: begin"); 3709 1.333 msaitoh if (sc->osdep.attached == false) 3710 1.98 msaitoh return 0; 3711 1.26 msaitoh 3712 1.99 msaitoh if (ixgbe_pci_iov_detach(dev) != 0) { 3713 1.99 msaitoh device_printf(dev, "SR-IOV in use; detach first.\n"); 3714 1.99 msaitoh return (EBUSY); 3715 1.99 msaitoh } 3716 1.99 msaitoh 3717 1.333 msaitoh if (VLAN_ATTACHED(&sc->osdep.ec) && 3718 1.293 yamaguch (flags & (DETACH_SHUTDOWN | DETACH_FORCE)) == 0) { 3719 1.99 msaitoh aprint_error_dev(dev, "VLANs in use, detach first\n"); 3720 1.99 msaitoh return (EBUSY); 3721 1.26 msaitoh } 3722 1.293 yamaguch 3723 1.333 msaitoh ether_ifdetach(sc->ifp); 3724 1.24 msaitoh 3725 1.333 msaitoh sc->osdep.detaching = true; 3726 1.241 msaitoh /* 3727 1.252 msaitoh * Stop the interface. ixgbe_setup_low_power_mode() calls 3728 1.253 msaitoh * ixgbe_ifstop(), so it's not required to call ixgbe_ifstop() 3729 1.252 msaitoh * directly. 3730 1.241 msaitoh */ 3731 1.333 msaitoh ixgbe_setup_low_power_mode(sc); 3732 1.241 msaitoh 3733 1.333 msaitoh callout_halt(&sc->timer, NULL); 3734 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE) 3735 1.333 msaitoh callout_halt(&sc->recovery_mode_timer, NULL); 3736 1.333 msaitoh 3737 1.333 msaitoh workqueue_wait(sc->admin_wq, &sc->admin_wc); 3738 1.333 msaitoh atomic_store_relaxed(&sc->admin_pending, 0); 3739 1.333 msaitoh workqueue_wait(sc->timer_wq, &sc->timer_wc); 3740 1.333 msaitoh atomic_store_relaxed(&sc->timer_pending, 0); 3741 1.241 msaitoh 3742 1.98 msaitoh pmf_device_deregister(dev); 3743 1.26 msaitoh 3744 1.333 msaitoh ixgbe_free_deferred_handlers(sc); 3745 1.185 msaitoh 3746 1.98 msaitoh /* let hardware know driver is unloading */ 3747 1.333 msaitoh ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT); 3748 1.98 msaitoh ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 3749 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext); 3750 1.24 msaitoh 3751 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_NETMAP) 3752 1.333 msaitoh netmap_detach(sc->ifp); 3753 1.99 msaitoh 3754 1.333 msaitoh ixgbe_free_pci_resources(sc); 3755 1.98 msaitoh #if 0 /* XXX the NetBSD port is probably missing something here */ 3756 1.98 msaitoh bus_generic_detach(dev); 3757 1.98 msaitoh #endif 3758 1.333 msaitoh if_detach(sc->ifp); 3759 1.333 msaitoh ifmedia_fini(&sc->media); 3760 1.333 msaitoh if_percpuq_destroy(sc->ipq); 3761 1.333 msaitoh 3762 1.333 msaitoh sysctl_teardown(&sc->sysctllog); 3763 1.333 msaitoh evcnt_detach(&sc->efbig_tx_dma_setup); 3764 1.333 msaitoh evcnt_detach(&sc->mbuf_defrag_failed); 3765 1.333 msaitoh evcnt_detach(&sc->efbig2_tx_dma_setup); 3766 1.333 msaitoh evcnt_detach(&sc->einval_tx_dma_setup); 3767 1.333 msaitoh evcnt_detach(&sc->other_tx_dma_setup); 3768 1.333 msaitoh evcnt_detach(&sc->eagain_tx_dma_setup); 3769 1.333 msaitoh evcnt_detach(&sc->enomem_tx_dma_setup); 3770 1.333 msaitoh evcnt_detach(&sc->watchdog_events); 3771 1.333 msaitoh evcnt_detach(&sc->tso_err); 3772 1.333 msaitoh evcnt_detach(&sc->admin_irqev); 3773 1.333 msaitoh evcnt_detach(&sc->link_workev); 3774 1.333 msaitoh evcnt_detach(&sc->mod_workev); 3775 1.333 msaitoh evcnt_detach(&sc->msf_workev); 3776 1.333 msaitoh evcnt_detach(&sc->phy_workev); 3777 1.1 dyoung 3778 1.175 msaitoh for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { 3779 1.98 msaitoh if (i < __arraycount(stats->mpc)) { 3780 1.98 msaitoh evcnt_detach(&stats->mpc[i]); 3781 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB) 3782 1.98 msaitoh evcnt_detach(&stats->rnbc[i]); 3783 1.98 msaitoh } 3784 1.98 msaitoh if (i < __arraycount(stats->pxontxc)) { 3785 1.98 msaitoh evcnt_detach(&stats->pxontxc[i]); 3786 1.98 msaitoh evcnt_detach(&stats->pxonrxc[i]); 3787 1.98 msaitoh evcnt_detach(&stats->pxofftxc[i]); 3788 1.98 msaitoh evcnt_detach(&stats->pxoffrxc[i]); 3789 1.151 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB) 3790 1.151 msaitoh evcnt_detach(&stats->pxon2offc[i]); 3791 1.98 msaitoh } 3792 1.168 msaitoh } 3793 1.168 msaitoh 3794 1.333 msaitoh txr = sc->tx_rings; 3795 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, rxr++, txr++) { 3796 1.333 msaitoh evcnt_detach(&sc->queues[i].irqs); 3797 1.333 msaitoh evcnt_detach(&sc->queues[i].handleq); 3798 1.333 msaitoh evcnt_detach(&sc->queues[i].req); 3799 1.168 msaitoh evcnt_detach(&txr->total_packets); 3800 1.168 msaitoh #ifndef IXGBE_LEGACY_TX 3801 1.168 msaitoh evcnt_detach(&txr->pcq_drops); 3802 1.168 msaitoh #endif 3803 1.327 msaitoh evcnt_detach(&txr->no_desc_avail); 3804 1.327 msaitoh evcnt_detach(&txr->tso_tx); 3805 1.168 msaitoh 3806 1.98 msaitoh if (i < __arraycount(stats->qprc)) { 3807 1.98 msaitoh evcnt_detach(&stats->qprc[i]); 3808 1.98 msaitoh evcnt_detach(&stats->qptc[i]); 3809 1.98 msaitoh evcnt_detach(&stats->qbrc[i]); 3810 1.98 msaitoh evcnt_detach(&stats->qbtc[i]); 3811 1.151 msaitoh if (hw->mac.type >= ixgbe_mac_82599EB) 3812 1.151 msaitoh evcnt_detach(&stats->qprdc[i]); 3813 1.34 msaitoh } 3814 1.98 msaitoh 3815 1.98 msaitoh evcnt_detach(&rxr->rx_packets); 3816 1.98 msaitoh evcnt_detach(&rxr->rx_bytes); 3817 1.98 msaitoh evcnt_detach(&rxr->rx_copies); 3818 1.290 msaitoh evcnt_detach(&rxr->no_mbuf); 3819 1.98 msaitoh evcnt_detach(&rxr->rx_discarded); 3820 1.1 dyoung } 3821 1.98 msaitoh evcnt_detach(&stats->ipcs); 3822 1.98 msaitoh evcnt_detach(&stats->l4cs); 3823 1.98 msaitoh evcnt_detach(&stats->ipcs_bad); 3824 1.98 msaitoh evcnt_detach(&stats->l4cs_bad); 3825 1.98 msaitoh evcnt_detach(&stats->intzero); 3826 1.98 msaitoh evcnt_detach(&stats->legint); 3827 1.98 msaitoh evcnt_detach(&stats->crcerrs); 3828 1.98 msaitoh evcnt_detach(&stats->illerrc); 3829 1.98 msaitoh evcnt_detach(&stats->errbc); 3830 1.98 msaitoh evcnt_detach(&stats->mspdc); 3831 1.98 msaitoh if (hw->mac.type >= ixgbe_mac_X550) 3832 1.98 msaitoh evcnt_detach(&stats->mbsdc); 3833 1.98 msaitoh evcnt_detach(&stats->mpctotal); 3834 1.98 msaitoh evcnt_detach(&stats->mlfc); 3835 1.98 msaitoh evcnt_detach(&stats->mrfc); 3836 1.326 msaitoh if (hw->mac.type == ixgbe_mac_X550EM_a) 3837 1.326 msaitoh evcnt_detach(&stats->link_dn_cnt); 3838 1.98 msaitoh evcnt_detach(&stats->rlec); 3839 1.98 msaitoh evcnt_detach(&stats->lxontxc); 3840 1.98 msaitoh evcnt_detach(&stats->lxonrxc); 3841 1.98 msaitoh evcnt_detach(&stats->lxofftxc); 3842 1.98 msaitoh evcnt_detach(&stats->lxoffrxc); 3843 1.98 msaitoh 3844 1.98 msaitoh /* Packet Reception Stats */ 3845 1.98 msaitoh evcnt_detach(&stats->tor); 3846 1.98 msaitoh evcnt_detach(&stats->gorc); 3847 1.98 msaitoh evcnt_detach(&stats->tpr); 3848 1.98 msaitoh evcnt_detach(&stats->gprc); 3849 1.98 msaitoh evcnt_detach(&stats->mprc); 3850 1.98 msaitoh evcnt_detach(&stats->bprc); 3851 1.98 msaitoh evcnt_detach(&stats->prc64); 3852 1.98 msaitoh evcnt_detach(&stats->prc127); 3853 1.98 msaitoh evcnt_detach(&stats->prc255); 3854 1.98 msaitoh evcnt_detach(&stats->prc511); 3855 1.98 msaitoh evcnt_detach(&stats->prc1023); 3856 1.98 msaitoh evcnt_detach(&stats->prc1522); 3857 1.98 msaitoh evcnt_detach(&stats->ruc); 3858 1.98 msaitoh evcnt_detach(&stats->rfc); 3859 1.98 msaitoh evcnt_detach(&stats->roc); 3860 1.98 msaitoh evcnt_detach(&stats->rjc); 3861 1.98 msaitoh evcnt_detach(&stats->mngprc); 3862 1.98 msaitoh evcnt_detach(&stats->mngpdc); 3863 1.98 msaitoh evcnt_detach(&stats->xec); 3864 1.1 dyoung 3865 1.98 msaitoh /* Packet Transmission Stats */ 3866 1.98 msaitoh evcnt_detach(&stats->gotc); 3867 1.98 msaitoh evcnt_detach(&stats->tpt); 3868 1.98 msaitoh evcnt_detach(&stats->gptc); 3869 1.98 msaitoh evcnt_detach(&stats->bptc); 3870 1.98 msaitoh evcnt_detach(&stats->mptc); 3871 1.98 msaitoh evcnt_detach(&stats->mngptc); 3872 1.98 msaitoh evcnt_detach(&stats->ptc64); 3873 1.98 msaitoh evcnt_detach(&stats->ptc127); 3874 1.98 msaitoh evcnt_detach(&stats->ptc255); 3875 1.98 msaitoh evcnt_detach(&stats->ptc511); 3876 1.98 msaitoh evcnt_detach(&stats->ptc1023); 3877 1.98 msaitoh evcnt_detach(&stats->ptc1522); 3878 1.1 dyoung 3879 1.333 msaitoh ixgbe_free_queues(sc); 3880 1.333 msaitoh free(sc->mta, M_DEVBUF); 3881 1.1 dyoung 3882 1.333 msaitoh mutex_destroy(&sc->admin_mtx); /* XXX appropriate order? */ 3883 1.333 msaitoh IXGBE_CORE_LOCK_DESTROY(sc); 3884 1.1 dyoung 3885 1.1 dyoung return (0); 3886 1.99 msaitoh } /* ixgbe_detach */ 3887 1.1 dyoung 3888 1.99 msaitoh /************************************************************************ 3889 1.99 msaitoh * ixgbe_setup_low_power_mode - LPLU/WoL preparation 3890 1.99 msaitoh * 3891 1.99 msaitoh * Prepare the adapter/port for LPLU and/or WoL 3892 1.99 msaitoh ************************************************************************/ 3893 1.1 dyoung static int 3894 1.333 msaitoh ixgbe_setup_low_power_mode(struct ixgbe_softc *sc) 3895 1.1 dyoung { 3896 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 3897 1.333 msaitoh device_t dev = sc->dev; 3898 1.333 msaitoh struct ifnet *ifp = sc->ifp; 3899 1.186 msaitoh s32 error = 0; 3900 1.98 msaitoh 3901 1.98 msaitoh /* Limit power management flow to X550EM baseT */ 3902 1.99 msaitoh if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 3903 1.99 msaitoh hw->phy.ops.enter_lplu) { 3904 1.98 msaitoh /* X550EM baseT adapters need a special LPLU flow */ 3905 1.98 msaitoh hw->phy.reset_disable = true; 3906 1.253 msaitoh ixgbe_ifstop(ifp, 1); 3907 1.98 msaitoh error = hw->phy.ops.enter_lplu(hw); 3908 1.98 msaitoh if (error) 3909 1.98 msaitoh device_printf(dev, 3910 1.98 msaitoh "Error entering LPLU: %d\n", error); 3911 1.98 msaitoh hw->phy.reset_disable = false; 3912 1.98 msaitoh } else { 3913 1.98 msaitoh /* Just stop for other adapters */ 3914 1.253 msaitoh ixgbe_ifstop(ifp, 1); 3915 1.33 msaitoh } 3916 1.1 dyoung 3917 1.333 msaitoh IXGBE_CORE_LOCK(sc); 3918 1.253 msaitoh 3919 1.98 msaitoh if (!hw->wol_enabled) { 3920 1.98 msaitoh ixgbe_set_phy_power(hw, FALSE); 3921 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 3922 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); 3923 1.98 msaitoh } else { 3924 1.98 msaitoh /* Turn off support for APM wakeup. (Using ACPI instead) */ 3925 1.166 msaitoh IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw), 3926 1.166 msaitoh IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2); 3927 1.34 msaitoh 3928 1.35 msaitoh /* 3929 1.98 msaitoh * Clear Wake Up Status register to prevent any previous wakeup 3930 1.98 msaitoh * events from waking us up immediately after we suspend. 3931 1.33 msaitoh */ 3932 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 3933 1.98 msaitoh 3934 1.1 dyoung /* 3935 1.98 msaitoh * Program the Wakeup Filter Control register with user filter 3936 1.98 msaitoh * settings 3937 1.33 msaitoh */ 3938 1.333 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUFC, sc->wufc); 3939 1.98 msaitoh 3940 1.98 msaitoh /* Enable wakeups and power management in Wakeup Control */ 3941 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUC, 3942 1.98 msaitoh IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); 3943 1.1 dyoung } 3944 1.1 dyoung 3945 1.333 msaitoh IXGBE_CORE_UNLOCK(sc); 3946 1.253 msaitoh 3947 1.98 msaitoh return error; 3948 1.99 msaitoh } /* ixgbe_setup_low_power_mode */ 3949 1.98 msaitoh 3950 1.99 msaitoh /************************************************************************ 3951 1.99 msaitoh * ixgbe_shutdown - Shutdown entry point 3952 1.99 msaitoh ************************************************************************/ 3953 1.98 msaitoh #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */ 3954 1.98 msaitoh static int 3955 1.98 msaitoh ixgbe_shutdown(device_t dev) 3956 1.98 msaitoh { 3957 1.333 msaitoh struct ixgbe_softc *sc = device_private(dev); 3958 1.98 msaitoh int error = 0; 3959 1.34 msaitoh 3960 1.98 msaitoh INIT_DEBUGOUT("ixgbe_shutdown: begin"); 3961 1.34 msaitoh 3962 1.333 msaitoh error = ixgbe_setup_low_power_mode(sc); 3963 1.1 dyoung 3964 1.98 msaitoh return (error); 3965 1.99 msaitoh } /* ixgbe_shutdown */ 3966 1.98 msaitoh #endif 3967 1.1 dyoung 3968 1.99 msaitoh /************************************************************************ 3969 1.99 msaitoh * ixgbe_suspend 3970 1.99 msaitoh * 3971 1.99 msaitoh * From D0 to D3 3972 1.99 msaitoh ************************************************************************/ 3973 1.98 msaitoh static bool 3974 1.98 msaitoh ixgbe_suspend(device_t dev, const pmf_qual_t *qual) 3975 1.1 dyoung { 3976 1.333 msaitoh struct ixgbe_softc *sc = device_private(dev); 3977 1.186 msaitoh int error = 0; 3978 1.98 msaitoh 3979 1.98 msaitoh INIT_DEBUGOUT("ixgbe_suspend: begin"); 3980 1.98 msaitoh 3981 1.333 msaitoh error = ixgbe_setup_low_power_mode(sc); 3982 1.1 dyoung 3983 1.98 msaitoh return (error); 3984 1.99 msaitoh } /* ixgbe_suspend */ 3985 1.1 dyoung 3986 1.99 msaitoh /************************************************************************ 3987 1.99 msaitoh * ixgbe_resume 3988 1.99 msaitoh * 3989 1.99 msaitoh * From D3 to D0 3990 1.99 msaitoh ************************************************************************/ 3991 1.98 msaitoh static bool 3992 1.98 msaitoh ixgbe_resume(device_t dev, const pmf_qual_t *qual) 3993 1.98 msaitoh { 3994 1.333 msaitoh struct ixgbe_softc *sc = device_private(dev); 3995 1.333 msaitoh struct ifnet *ifp = sc->ifp; 3996 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 3997 1.186 msaitoh u32 wus; 3998 1.1 dyoung 3999 1.98 msaitoh INIT_DEBUGOUT("ixgbe_resume: begin"); 4000 1.33 msaitoh 4001 1.333 msaitoh IXGBE_CORE_LOCK(sc); 4002 1.43 msaitoh 4003 1.98 msaitoh /* Read & clear WUS register */ 4004 1.98 msaitoh wus = IXGBE_READ_REG(hw, IXGBE_WUS); 4005 1.98 msaitoh if (wus) 4006 1.98 msaitoh device_printf(dev, "Woken up by (WUS): %#010x\n", 4007 1.98 msaitoh IXGBE_READ_REG(hw, IXGBE_WUS)); 4008 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 4009 1.98 msaitoh /* And clear WUFC until next low-power transition */ 4010 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 4011 1.1 dyoung 4012 1.1 dyoung /* 4013 1.98 msaitoh * Required after D3->D0 transition; 4014 1.98 msaitoh * will re-advertise all previous advertised speeds 4015 1.98 msaitoh */ 4016 1.98 msaitoh if (ifp->if_flags & IFF_UP) 4017 1.333 msaitoh ixgbe_init_locked(sc); 4018 1.34 msaitoh 4019 1.333 msaitoh IXGBE_CORE_UNLOCK(sc); 4020 1.1 dyoung 4021 1.98 msaitoh return true; 4022 1.99 msaitoh } /* ixgbe_resume */ 4023 1.1 dyoung 4024 1.98 msaitoh /* 4025 1.98 msaitoh * Set the various hardware offload abilities. 4026 1.98 msaitoh * 4027 1.98 msaitoh * This takes the ifnet's if_capenable flags (e.g. set by the user using 4028 1.98 msaitoh * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what 4029 1.98 msaitoh * mbuf offload flags the driver will understand. 4030 1.98 msaitoh */ 4031 1.1 dyoung static void 4032 1.333 msaitoh ixgbe_set_if_hwassist(struct ixgbe_softc *sc) 4033 1.1 dyoung { 4034 1.98 msaitoh /* XXX */ 4035 1.1 dyoung } 4036 1.1 dyoung 4037 1.99 msaitoh /************************************************************************ 4038 1.99 msaitoh * ixgbe_init_locked - Init entry point 4039 1.99 msaitoh * 4040 1.99 msaitoh * Used in two ways: It is used by the stack as an init 4041 1.99 msaitoh * entry point in network interface structure. It is also 4042 1.99 msaitoh * used by the driver as a hw/sw initialization routine to 4043 1.99 msaitoh * get to a consistent state. 4044 1.1 dyoung * 4045 1.99 msaitoh * return 0 on success, positive on failure 4046 1.99 msaitoh ************************************************************************/ 4047 1.98 msaitoh static void 4048 1.333 msaitoh ixgbe_init_locked(struct ixgbe_softc *sc) 4049 1.1 dyoung { 4050 1.333 msaitoh struct ifnet *ifp = sc->ifp; 4051 1.333 msaitoh device_t dev = sc->dev; 4052 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 4053 1.157 msaitoh struct ix_queue *que; 4054 1.186 msaitoh struct tx_ring *txr; 4055 1.186 msaitoh struct rx_ring *rxr; 4056 1.98 msaitoh u32 txdctl, mhadd; 4057 1.98 msaitoh u32 rxdctl, rxctrl; 4058 1.186 msaitoh u32 ctrl_ext; 4059 1.219 msaitoh bool unsupported_sfp = false; 4060 1.283 msaitoh int i, j, error; 4061 1.1 dyoung 4062 1.98 msaitoh /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */ 4063 1.1 dyoung 4064 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx)); 4065 1.98 msaitoh INIT_DEBUGOUT("ixgbe_init_locked: begin"); 4066 1.1 dyoung 4067 1.219 msaitoh hw->need_unsupported_sfp_recovery = false; 4068 1.98 msaitoh hw->adapter_stopped = FALSE; 4069 1.98 msaitoh ixgbe_stop_adapter(hw); 4070 1.333 msaitoh callout_stop(&sc->timer); 4071 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE) 4072 1.333 msaitoh callout_stop(&sc->recovery_mode_timer); 4073 1.333 msaitoh for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++) 4074 1.157 msaitoh que->disabled_count = 0; 4075 1.1 dyoung 4076 1.98 msaitoh /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */ 4077 1.333 msaitoh sc->max_frame_size = 4078 1.98 msaitoh ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 4079 1.1 dyoung 4080 1.98 msaitoh /* Queue indices may change with IOV mode */ 4081 1.333 msaitoh ixgbe_align_all_queue_indices(sc); 4082 1.99 msaitoh 4083 1.98 msaitoh /* reprogram the RAR[0] in case user changed it. */ 4084 1.333 msaitoh ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, IXGBE_RAH_AV); 4085 1.1 dyoung 4086 1.98 msaitoh /* Get the latest mac address, User can use a LAA */ 4087 1.98 msaitoh memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl), 4088 1.98 msaitoh IXGBE_ETH_LENGTH_OF_ADDRESS); 4089 1.333 msaitoh ixgbe_set_rar(hw, 0, hw->mac.addr, sc->pool, 1); 4090 1.98 msaitoh hw->addr_ctrl.rar_used_count = 1; 4091 1.1 dyoung 4092 1.98 msaitoh /* Set hardware offload abilities from ifnet flags */ 4093 1.333 msaitoh ixgbe_set_if_hwassist(sc); 4094 1.48 msaitoh 4095 1.98 msaitoh /* Prepare transmit descriptors and buffers */ 4096 1.333 msaitoh if (ixgbe_setup_transmit_structures(sc)) { 4097 1.98 msaitoh device_printf(dev, "Could not setup transmit structures\n"); 4098 1.333 msaitoh ixgbe_stop_locked(sc); 4099 1.98 msaitoh return; 4100 1.98 msaitoh } 4101 1.1 dyoung 4102 1.98 msaitoh ixgbe_init_hw(hw); 4103 1.144 msaitoh 4104 1.333 msaitoh ixgbe_initialize_iov(sc); 4105 1.144 msaitoh 4106 1.333 msaitoh ixgbe_initialize_transmit_units(sc); 4107 1.1 dyoung 4108 1.98 msaitoh /* Setup Multicast table */ 4109 1.333 msaitoh ixgbe_set_rxfilter(sc); 4110 1.43 msaitoh 4111 1.289 msaitoh /* Use fixed buffer size, even for jumbo frames */ 4112 1.333 msaitoh sc->rx_mbuf_sz = MCLBYTES; 4113 1.43 msaitoh 4114 1.98 msaitoh /* Prepare receive descriptors and buffers */ 4115 1.333 msaitoh error = ixgbe_setup_receive_structures(sc); 4116 1.283 msaitoh if (error) { 4117 1.283 msaitoh device_printf(dev, 4118 1.283 msaitoh "Could not setup receive structures (err = %d)\n", error); 4119 1.333 msaitoh ixgbe_stop_locked(sc); 4120 1.98 msaitoh return; 4121 1.98 msaitoh } 4122 1.43 msaitoh 4123 1.98 msaitoh /* Configure RX settings */ 4124 1.333 msaitoh ixgbe_initialize_receive_units(sc); 4125 1.43 msaitoh 4126 1.233 msaitoh /* Initialize variable holding task enqueue requests interrupts */ 4127 1.333 msaitoh sc->task_requests = 0; 4128 1.233 msaitoh 4129 1.99 msaitoh /* Enable SDP & MSI-X interrupts based on adapter */ 4130 1.333 msaitoh ixgbe_config_gpie(sc); 4131 1.43 msaitoh 4132 1.98 msaitoh /* Set MTU size */ 4133 1.98 msaitoh if (ifp->if_mtu > ETHERMTU) { 4134 1.98 msaitoh /* aka IXGBE_MAXFRS on 82599 and newer */ 4135 1.98 msaitoh mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 4136 1.98 msaitoh mhadd &= ~IXGBE_MHADD_MFS_MASK; 4137 1.333 msaitoh mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 4138 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 4139 1.55 msaitoh } 4140 1.55 msaitoh 4141 1.98 msaitoh /* Now enable all the queues */ 4142 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) { 4143 1.333 msaitoh txr = &sc->tx_rings[i]; 4144 1.98 msaitoh txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 4145 1.98 msaitoh txdctl |= IXGBE_TXDCTL_ENABLE; 4146 1.98 msaitoh /* Set WTHRESH to 8, burst writeback */ 4147 1.348 msaitoh txdctl &= ~IXGBE_TXDCTL_WTHRESH_MASK; 4148 1.292 msaitoh txdctl |= IXGBE_TX_WTHRESH << IXGBE_TXDCTL_WTHRESH_SHIFT; 4149 1.98 msaitoh /* 4150 1.98 msaitoh * When the internal queue falls below PTHRESH (32), 4151 1.98 msaitoh * start prefetching as long as there are at least 4152 1.98 msaitoh * HTHRESH (1) buffers ready. The values are taken 4153 1.98 msaitoh * from the Intel linux driver 3.8.21. 4154 1.98 msaitoh * Prefetching enables tx line rate even with 1 queue. 4155 1.98 msaitoh */ 4156 1.98 msaitoh txdctl |= (32 << 0) | (1 << 8); 4157 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 4158 1.55 msaitoh } 4159 1.43 msaitoh 4160 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) { 4161 1.333 msaitoh rxr = &sc->rx_rings[i]; 4162 1.98 msaitoh rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 4163 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB) { 4164 1.98 msaitoh /* 4165 1.99 msaitoh * PTHRESH = 21 4166 1.99 msaitoh * HTHRESH = 4 4167 1.99 msaitoh * WTHRESH = 8 4168 1.99 msaitoh */ 4169 1.98 msaitoh rxdctl &= ~0x3FFFFF; 4170 1.98 msaitoh rxdctl |= 0x080420; 4171 1.98 msaitoh } 4172 1.98 msaitoh rxdctl |= IXGBE_RXDCTL_ENABLE; 4173 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); 4174 1.144 msaitoh for (j = 0; j < 10; j++) { 4175 1.98 msaitoh if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & 4176 1.98 msaitoh IXGBE_RXDCTL_ENABLE) 4177 1.98 msaitoh break; 4178 1.98 msaitoh else 4179 1.98 msaitoh msec_delay(1); 4180 1.55 msaitoh } 4181 1.217 msaitoh IXGBE_WRITE_BARRIER(hw); 4182 1.99 msaitoh 4183 1.98 msaitoh /* 4184 1.98 msaitoh * In netmap mode, we must preserve the buffers made 4185 1.98 msaitoh * available to userspace before the if_init() 4186 1.98 msaitoh * (this is true by default on the TX side, because 4187 1.98 msaitoh * init makes all buffers available to userspace). 4188 1.98 msaitoh * 4189 1.98 msaitoh * netmap_reset() and the device specific routines 4190 1.98 msaitoh * (e.g. ixgbe_setup_receive_rings()) map these 4191 1.98 msaitoh * buffers at the end of the NIC ring, so here we 4192 1.98 msaitoh * must set the RDT (tail) register to make sure 4193 1.98 msaitoh * they are not overwritten. 4194 1.98 msaitoh * 4195 1.98 msaitoh * In this driver the NIC ring starts at RDH = 0, 4196 1.98 msaitoh * RDT points to the last slot available for reception (?), 4197 1.98 msaitoh * so RDT = num_rx_desc - 1 means the whole ring is available. 4198 1.98 msaitoh */ 4199 1.99 msaitoh #ifdef DEV_NETMAP 4200 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_NETMAP) && 4201 1.99 msaitoh (ifp->if_capenable & IFCAP_NETMAP)) { 4202 1.333 msaitoh struct netmap_adapter *na = NA(sc->ifp); 4203 1.189 msaitoh struct netmap_kring *kring = na->rx_rings[i]; 4204 1.98 msaitoh int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); 4205 1.98 msaitoh 4206 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t); 4207 1.98 msaitoh } else 4208 1.98 msaitoh #endif /* DEV_NETMAP */ 4209 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), 4210 1.333 msaitoh sc->num_rx_desc - 1); 4211 1.48 msaitoh } 4212 1.98 msaitoh 4213 1.98 msaitoh /* Enable Receive engine */ 4214 1.98 msaitoh rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4215 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB) 4216 1.98 msaitoh rxctrl |= IXGBE_RXCTRL_DMBYPS; 4217 1.98 msaitoh rxctrl |= IXGBE_RXCTRL_RXEN; 4218 1.98 msaitoh ixgbe_enable_rx_dma(hw, rxctrl); 4219 1.98 msaitoh 4220 1.333 msaitoh callout_reset(&sc->timer, hz, ixgbe_local_timer, sc); 4221 1.333 msaitoh atomic_store_relaxed(&sc->timer_pending, 0); 4222 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE) 4223 1.333 msaitoh callout_reset(&sc->recovery_mode_timer, hz, 4224 1.333 msaitoh ixgbe_recovery_mode_timer, sc); 4225 1.98 msaitoh 4226 1.144 msaitoh /* Set up MSI/MSI-X routing */ 4227 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_MSIX) { 4228 1.333 msaitoh ixgbe_configure_ivars(sc); 4229 1.98 msaitoh /* Set up auto-mask */ 4230 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82598EB) 4231 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 4232 1.98 msaitoh else { 4233 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 4234 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 4235 1.55 msaitoh } 4236 1.98 msaitoh } else { /* Simple settings for Legacy/MSI */ 4237 1.333 msaitoh ixgbe_set_ivar(sc, 0, 0, 0); 4238 1.333 msaitoh ixgbe_set_ivar(sc, 0, 0, 1); 4239 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 4240 1.55 msaitoh } 4241 1.43 msaitoh 4242 1.333 msaitoh ixgbe_init_fdir(sc); 4243 1.98 msaitoh 4244 1.98 msaitoh /* 4245 1.98 msaitoh * Check on any SFP devices that 4246 1.98 msaitoh * need to be kick-started 4247 1.98 msaitoh */ 4248 1.98 msaitoh if (hw->phy.type == ixgbe_phy_none) { 4249 1.283 msaitoh error = hw->phy.ops.identify(hw); 4250 1.283 msaitoh if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) 4251 1.219 msaitoh unsupported_sfp = true; 4252 1.219 msaitoh } else if (hw->phy.type == ixgbe_phy_sfp_unsupported) 4253 1.219 msaitoh unsupported_sfp = true; 4254 1.219 msaitoh 4255 1.219 msaitoh if (unsupported_sfp) 4256 1.219 msaitoh device_printf(dev, 4257 1.219 msaitoh "Unsupported SFP+ module type was detected.\n"); 4258 1.98 msaitoh 4259 1.98 msaitoh /* Set moderation on the Link interrupt */ 4260 1.333 msaitoh ixgbe_eitr_write(sc, sc->vector, IXGBE_LINK_ITR); 4261 1.98 msaitoh 4262 1.173 msaitoh /* Enable EEE power saving */ 4263 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_EEE) 4264 1.173 msaitoh hw->mac.ops.setup_eee(hw, 4265 1.333 msaitoh sc->feat_en & IXGBE_FEATURE_EEE); 4266 1.173 msaitoh 4267 1.144 msaitoh /* Enable power to the phy. */ 4268 1.219 msaitoh if (!unsupported_sfp) { 4269 1.219 msaitoh ixgbe_set_phy_power(hw, TRUE); 4270 1.144 msaitoh 4271 1.219 msaitoh /* Config/Enable Link */ 4272 1.333 msaitoh ixgbe_config_link(sc); 4273 1.219 msaitoh } 4274 1.55 msaitoh 4275 1.98 msaitoh /* Hardware Packet Buffer & Flow Control setup */ 4276 1.333 msaitoh ixgbe_config_delay_values(sc); 4277 1.1 dyoung 4278 1.98 msaitoh /* Initialize the FC settings */ 4279 1.98 msaitoh ixgbe_start_hw(hw); 4280 1.1 dyoung 4281 1.98 msaitoh /* Set up VLAN support and filter */ 4282 1.333 msaitoh ixgbe_setup_vlan_hw_support(sc); 4283 1.1 dyoung 4284 1.98 msaitoh /* Setup DMA Coalescing */ 4285 1.333 msaitoh ixgbe_config_dmac(sc); 4286 1.98 msaitoh 4287 1.230 msaitoh /* OK to schedule workqueues. */ 4288 1.333 msaitoh sc->schedule_wqs_ok = true; 4289 1.230 msaitoh 4290 1.98 msaitoh /* Enable the use of the MBX by the VF's */ 4291 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_SRIOV) { 4292 1.99 msaitoh ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 4293 1.99 msaitoh ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 4294 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 4295 1.1 dyoung } 4296 1.98 msaitoh 4297 1.123 msaitoh /* Update saved flags. See ixgbe_ifflags_cb() */ 4298 1.333 msaitoh sc->if_flags = ifp->if_flags; 4299 1.333 msaitoh sc->ec_capenable = sc->osdep.ec.ec_capenable; 4300 1.123 msaitoh 4301 1.337 msaitoh /* Inform the stack we're ready */ 4302 1.98 msaitoh ifp->if_flags |= IFF_RUNNING; 4303 1.98 msaitoh 4304 1.337 msaitoh /* And now turn on interrupts */ 4305 1.337 msaitoh ixgbe_enable_intr(sc); 4306 1.337 msaitoh 4307 1.1 dyoung return; 4308 1.99 msaitoh } /* ixgbe_init_locked */ 4309 1.1 dyoung 4310 1.99 msaitoh /************************************************************************ 4311 1.99 msaitoh * ixgbe_init 4312 1.99 msaitoh ************************************************************************/ 4313 1.98 msaitoh static int 4314 1.98 msaitoh ixgbe_init(struct ifnet *ifp) 4315 1.98 msaitoh { 4316 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc; 4317 1.98 msaitoh 4318 1.333 msaitoh IXGBE_CORE_LOCK(sc); 4319 1.333 msaitoh ixgbe_init_locked(sc); 4320 1.333 msaitoh IXGBE_CORE_UNLOCK(sc); 4321 1.98 msaitoh 4322 1.98 msaitoh return 0; /* XXX ixgbe_init_locked cannot fail? really? */ 4323 1.99 msaitoh } /* ixgbe_init */ 4324 1.43 msaitoh 4325 1.99 msaitoh /************************************************************************ 4326 1.99 msaitoh * ixgbe_set_ivar 4327 1.99 msaitoh * 4328 1.99 msaitoh * Setup the correct IVAR register for a particular MSI-X interrupt 4329 1.99 msaitoh * (yes this is all very magic and confusing :) 4330 1.99 msaitoh * - entry is the register array entry 4331 1.99 msaitoh * - vector is the MSI-X vector for this queue 4332 1.99 msaitoh * - type is RX/TX/MISC 4333 1.99 msaitoh ************************************************************************/ 4334 1.42 msaitoh static void 4335 1.333 msaitoh ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type) 4336 1.1 dyoung { 4337 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 4338 1.98 msaitoh u32 ivar, index; 4339 1.98 msaitoh 4340 1.98 msaitoh vector |= IXGBE_IVAR_ALLOC_VAL; 4341 1.98 msaitoh 4342 1.98 msaitoh switch (hw->mac.type) { 4343 1.98 msaitoh case ixgbe_mac_82598EB: 4344 1.98 msaitoh if (type == -1) 4345 1.98 msaitoh entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; 4346 1.98 msaitoh else 4347 1.98 msaitoh entry += (type * 64); 4348 1.98 msaitoh index = (entry >> 2) & 0x1F; 4349 1.98 msaitoh ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 4350 1.198 msaitoh ivar &= ~(0xffUL << (8 * (entry & 0x3))); 4351 1.198 msaitoh ivar |= ((u32)vector << (8 * (entry & 0x3))); 4352 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar); 4353 1.98 msaitoh break; 4354 1.98 msaitoh case ixgbe_mac_82599EB: 4355 1.98 msaitoh case ixgbe_mac_X540: 4356 1.98 msaitoh case ixgbe_mac_X550: 4357 1.98 msaitoh case ixgbe_mac_X550EM_x: 4358 1.99 msaitoh case ixgbe_mac_X550EM_a: 4359 1.98 msaitoh if (type == -1) { /* MISC IVAR */ 4360 1.98 msaitoh index = (entry & 1) * 8; 4361 1.98 msaitoh ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 4362 1.194 msaitoh ivar &= ~(0xffUL << index); 4363 1.194 msaitoh ivar |= ((u32)vector << index); 4364 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 4365 1.98 msaitoh } else { /* RX/TX IVARS */ 4366 1.98 msaitoh index = (16 * (entry & 1)) + (8 * type); 4367 1.98 msaitoh ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); 4368 1.194 msaitoh ivar &= ~(0xffUL << index); 4369 1.194 msaitoh ivar |= ((u32)vector << index); 4370 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); 4371 1.98 msaitoh } 4372 1.135 msaitoh break; 4373 1.98 msaitoh default: 4374 1.98 msaitoh break; 4375 1.98 msaitoh } 4376 1.99 msaitoh } /* ixgbe_set_ivar */ 4377 1.1 dyoung 4378 1.99 msaitoh /************************************************************************ 4379 1.99 msaitoh * ixgbe_configure_ivars 4380 1.99 msaitoh ************************************************************************/ 4381 1.98 msaitoh static void 4382 1.333 msaitoh ixgbe_configure_ivars(struct ixgbe_softc *sc) 4383 1.98 msaitoh { 4384 1.333 msaitoh struct ix_queue *que = sc->queues; 4385 1.186 msaitoh u32 newitr; 4386 1.1 dyoung 4387 1.343 msaitoh if (sc->max_interrupt_rate > 0) 4388 1.343 msaitoh newitr = (4000000 / sc->max_interrupt_rate) & 0x0FF8; 4389 1.98 msaitoh else { 4390 1.48 msaitoh /* 4391 1.99 msaitoh * Disable DMA coalescing if interrupt moderation is 4392 1.99 msaitoh * disabled. 4393 1.99 msaitoh */ 4394 1.333 msaitoh sc->dmac = 0; 4395 1.98 msaitoh newitr = 0; 4396 1.98 msaitoh } 4397 1.98 msaitoh 4398 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++) { 4399 1.333 msaitoh struct rx_ring *rxr = &sc->rx_rings[i]; 4400 1.333 msaitoh struct tx_ring *txr = &sc->tx_rings[i]; 4401 1.98 msaitoh /* First the RX queue entry */ 4402 1.333 msaitoh ixgbe_set_ivar(sc, rxr->me, que->msix, 0); 4403 1.98 msaitoh /* ... and the TX */ 4404 1.333 msaitoh ixgbe_set_ivar(sc, txr->me, que->msix, 1); 4405 1.98 msaitoh /* Set an Initial EITR value */ 4406 1.333 msaitoh ixgbe_eitr_write(sc, que->msix, newitr); 4407 1.138 knakahar /* 4408 1.138 knakahar * To eliminate influence of the previous state. 4409 1.138 knakahar * At this point, Tx/Rx interrupt handler 4410 1.138 knakahar * (ixgbe_msix_que()) cannot be called, so both 4411 1.138 knakahar * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required. 4412 1.138 knakahar */ 4413 1.138 knakahar que->eitr_setting = 0; 4414 1.98 msaitoh } 4415 1.98 msaitoh 4416 1.98 msaitoh /* For the Link interrupt */ 4417 1.333 msaitoh ixgbe_set_ivar(sc, 1, sc->vector, -1); 4418 1.99 msaitoh } /* ixgbe_configure_ivars */ 4419 1.98 msaitoh 4420 1.99 msaitoh /************************************************************************ 4421 1.99 msaitoh * ixgbe_config_gpie 4422 1.99 msaitoh ************************************************************************/ 4423 1.98 msaitoh static void 4424 1.333 msaitoh ixgbe_config_gpie(struct ixgbe_softc *sc) 4425 1.98 msaitoh { 4426 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 4427 1.186 msaitoh u32 gpie; 4428 1.98 msaitoh 4429 1.98 msaitoh gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 4430 1.98 msaitoh 4431 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_MSIX) { 4432 1.99 msaitoh /* Enable Enhanced MSI-X mode */ 4433 1.99 msaitoh gpie |= IXGBE_GPIE_MSIX_MODE 4434 1.186 msaitoh | IXGBE_GPIE_EIAME 4435 1.186 msaitoh | IXGBE_GPIE_PBA_SUPPORT 4436 1.186 msaitoh | IXGBE_GPIE_OCD; 4437 1.99 msaitoh } 4438 1.99 msaitoh 4439 1.98 msaitoh /* Fan Failure Interrupt */ 4440 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) 4441 1.98 msaitoh gpie |= IXGBE_SDP1_GPIEN; 4442 1.1 dyoung 4443 1.99 msaitoh /* Thermal Sensor Interrupt */ 4444 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_TEMP_SENSOR) 4445 1.99 msaitoh gpie |= IXGBE_SDP0_GPIEN_X540; 4446 1.1 dyoung 4447 1.99 msaitoh /* Link detection */ 4448 1.99 msaitoh switch (hw->mac.type) { 4449 1.99 msaitoh case ixgbe_mac_82599EB: 4450 1.99 msaitoh gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN; 4451 1.99 msaitoh break; 4452 1.99 msaitoh case ixgbe_mac_X550EM_x: 4453 1.99 msaitoh case ixgbe_mac_X550EM_a: 4454 1.98 msaitoh gpie |= IXGBE_SDP0_GPIEN_X540; 4455 1.99 msaitoh break; 4456 1.99 msaitoh default: 4457 1.99 msaitoh break; 4458 1.1 dyoung } 4459 1.1 dyoung 4460 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 4461 1.98 msaitoh 4462 1.99 msaitoh } /* ixgbe_config_gpie */ 4463 1.1 dyoung 4464 1.99 msaitoh /************************************************************************ 4465 1.99 msaitoh * ixgbe_config_delay_values 4466 1.99 msaitoh * 4467 1.333 msaitoh * Requires sc->max_frame_size to be set. 4468 1.99 msaitoh ************************************************************************/ 4469 1.33 msaitoh static void 4470 1.333 msaitoh ixgbe_config_delay_values(struct ixgbe_softc *sc) 4471 1.33 msaitoh { 4472 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 4473 1.186 msaitoh u32 rxpb, frame, size, tmp; 4474 1.33 msaitoh 4475 1.333 msaitoh frame = sc->max_frame_size; 4476 1.33 msaitoh 4477 1.98 msaitoh /* Calculate High Water */ 4478 1.98 msaitoh switch (hw->mac.type) { 4479 1.98 msaitoh case ixgbe_mac_X540: 4480 1.44 msaitoh case ixgbe_mac_X550: 4481 1.44 msaitoh case ixgbe_mac_X550EM_x: 4482 1.99 msaitoh case ixgbe_mac_X550EM_a: 4483 1.98 msaitoh tmp = IXGBE_DV_X540(frame, frame); 4484 1.44 msaitoh break; 4485 1.44 msaitoh default: 4486 1.98 msaitoh tmp = IXGBE_DV(frame, frame); 4487 1.44 msaitoh break; 4488 1.44 msaitoh } 4489 1.98 msaitoh size = IXGBE_BT2KB(tmp); 4490 1.98 msaitoh rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; 4491 1.98 msaitoh hw->fc.high_water[0] = rxpb - size; 4492 1.44 msaitoh 4493 1.98 msaitoh /* Now calculate Low Water */ 4494 1.98 msaitoh switch (hw->mac.type) { 4495 1.98 msaitoh case ixgbe_mac_X540: 4496 1.98 msaitoh case ixgbe_mac_X550: 4497 1.98 msaitoh case ixgbe_mac_X550EM_x: 4498 1.99 msaitoh case ixgbe_mac_X550EM_a: 4499 1.98 msaitoh tmp = IXGBE_LOW_DV_X540(frame); 4500 1.98 msaitoh break; 4501 1.98 msaitoh default: 4502 1.98 msaitoh tmp = IXGBE_LOW_DV(frame); 4503 1.98 msaitoh break; 4504 1.33 msaitoh } 4505 1.98 msaitoh hw->fc.low_water[0] = IXGBE_BT2KB(tmp); 4506 1.33 msaitoh 4507 1.98 msaitoh hw->fc.pause_time = IXGBE_FC_PAUSE; 4508 1.98 msaitoh hw->fc.send_xon = TRUE; 4509 1.99 msaitoh } /* ixgbe_config_delay_values */ 4510 1.33 msaitoh 4511 1.99 msaitoh /************************************************************************ 4512 1.213 msaitoh * ixgbe_set_rxfilter - Multicast Update 4513 1.1 dyoung * 4514 1.99 msaitoh * Called whenever multicast address list is updated. 4515 1.99 msaitoh ************************************************************************/ 4516 1.1 dyoung static void 4517 1.333 msaitoh ixgbe_set_rxfilter(struct ixgbe_softc *sc) 4518 1.1 dyoung { 4519 1.99 msaitoh struct ixgbe_mc_addr *mta; 4520 1.333 msaitoh struct ifnet *ifp = sc->ifp; 4521 1.98 msaitoh u8 *update_ptr; 4522 1.98 msaitoh int mcnt = 0; 4523 1.99 msaitoh u32 fctrl; 4524 1.333 msaitoh struct ethercom *ec = &sc->osdep.ec; 4525 1.98 msaitoh struct ether_multi *enm; 4526 1.98 msaitoh struct ether_multistep step; 4527 1.98 msaitoh 4528 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx)); 4529 1.213 msaitoh IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin"); 4530 1.98 msaitoh 4531 1.333 msaitoh mta = sc->mta; 4532 1.98 msaitoh bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES); 4533 1.1 dyoung 4534 1.105 msaitoh ETHER_LOCK(ec); 4535 1.183 ozaki ec->ec_flags &= ~ETHER_F_ALLMULTI; 4536 1.98 msaitoh ETHER_FIRST_MULTI(step, ec, enm); 4537 1.98 msaitoh while (enm != NULL) { 4538 1.98 msaitoh if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) || 4539 1.98 msaitoh (memcmp(enm->enm_addrlo, enm->enm_addrhi, 4540 1.98 msaitoh ETHER_ADDR_LEN) != 0)) { 4541 1.183 ozaki ec->ec_flags |= ETHER_F_ALLMULTI; 4542 1.98 msaitoh break; 4543 1.98 msaitoh } 4544 1.98 msaitoh bcopy(enm->enm_addrlo, 4545 1.98 msaitoh mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 4546 1.333 msaitoh mta[mcnt].vmdq = sc->pool; 4547 1.98 msaitoh mcnt++; 4548 1.98 msaitoh ETHER_NEXT_MULTI(step, enm); 4549 1.98 msaitoh } 4550 1.1 dyoung 4551 1.333 msaitoh fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL); 4552 1.98 msaitoh if (ifp->if_flags & IFF_PROMISC) 4553 1.98 msaitoh fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4554 1.183 ozaki else if (ec->ec_flags & ETHER_F_ALLMULTI) { 4555 1.98 msaitoh fctrl |= IXGBE_FCTRL_MPE; 4556 1.212 msaitoh fctrl &= ~IXGBE_FCTRL_UPE; 4557 1.212 msaitoh } else 4558 1.212 msaitoh fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4559 1.1 dyoung 4560 1.211 msaitoh /* Update multicast filter entries only when it's not ALLMULTI */ 4561 1.211 msaitoh if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) { 4562 1.211 msaitoh ETHER_UNLOCK(ec); 4563 1.98 msaitoh update_ptr = (u8 *)mta; 4564 1.333 msaitoh ixgbe_update_mc_addr_list(&sc->hw, update_ptr, mcnt, 4565 1.99 msaitoh ixgbe_mc_array_itr, TRUE); 4566 1.211 msaitoh } else 4567 1.211 msaitoh ETHER_UNLOCK(ec); 4568 1.332 msaitoh 4569 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl); 4570 1.213 msaitoh } /* ixgbe_set_rxfilter */ 4571 1.1 dyoung 4572 1.99 msaitoh /************************************************************************ 4573 1.99 msaitoh * ixgbe_mc_array_itr 4574 1.99 msaitoh * 4575 1.99 msaitoh * An iterator function needed by the multicast shared code. 4576 1.99 msaitoh * It feeds the shared code routine the addresses in the 4577 1.213 msaitoh * array of ixgbe_set_rxfilter() one by one. 4578 1.99 msaitoh ************************************************************************/ 4579 1.98 msaitoh static u8 * 4580 1.98 msaitoh ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 4581 1.98 msaitoh { 4582 1.98 msaitoh struct ixgbe_mc_addr *mta; 4583 1.1 dyoung 4584 1.98 msaitoh mta = (struct ixgbe_mc_addr *)*update_ptr; 4585 1.98 msaitoh *vmdq = mta->vmdq; 4586 1.33 msaitoh 4587 1.98 msaitoh *update_ptr = (u8*)(mta + 1); 4588 1.99 msaitoh 4589 1.98 msaitoh return (mta->addr); 4590 1.99 msaitoh } /* ixgbe_mc_array_itr */ 4591 1.82 msaitoh 4592 1.99 msaitoh /************************************************************************ 4593 1.99 msaitoh * ixgbe_local_timer - Timer routine 4594 1.98 msaitoh * 4595 1.99 msaitoh * Checks for link status, updates statistics, 4596 1.99 msaitoh * and runs the watchdog check. 4597 1.99 msaitoh ************************************************************************/ 4598 1.98 msaitoh static void 4599 1.98 msaitoh ixgbe_local_timer(void *arg) 4600 1.98 msaitoh { 4601 1.333 msaitoh struct ixgbe_softc *sc = arg; 4602 1.1 dyoung 4603 1.333 msaitoh if (sc->schedule_wqs_ok) { 4604 1.333 msaitoh if (atomic_cas_uint(&sc->timer_pending, 0, 1) == 0) 4605 1.333 msaitoh workqueue_enqueue(sc->timer_wq, 4606 1.333 msaitoh &sc->timer_wc, NULL); 4607 1.233 msaitoh } 4608 1.98 msaitoh } 4609 1.28 msaitoh 4610 1.98 msaitoh static void 4611 1.233 msaitoh ixgbe_handle_timer(struct work *wk, void *context) 4612 1.98 msaitoh { 4613 1.339 msaitoh struct ixgbe_softc *sc = context; 4614 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 4615 1.333 msaitoh device_t dev = sc->dev; 4616 1.333 msaitoh struct ix_queue *que = sc->queues; 4617 1.153 msaitoh u64 queues = 0; 4618 1.134 msaitoh u64 v0, v1, v2, v3, v4, v5, v6, v7; 4619 1.153 msaitoh int hung = 0; 4620 1.134 msaitoh int i; 4621 1.1 dyoung 4622 1.333 msaitoh IXGBE_CORE_LOCK(sc); 4623 1.1 dyoung 4624 1.98 msaitoh /* Check for pluggable optics */ 4625 1.237 msaitoh if (ixgbe_is_sfp(hw)) { 4626 1.249 msaitoh bool sched_mod_task = false; 4627 1.237 msaitoh 4628 1.249 msaitoh if (hw->mac.type == ixgbe_mac_82598EB) { 4629 1.249 msaitoh /* 4630 1.249 msaitoh * On 82598EB, SFP+'s MOD_ABS pin is not connected to 4631 1.250 msaitoh * any GPIO(SDP). So just schedule TASK_MOD. 4632 1.249 msaitoh */ 4633 1.249 msaitoh sched_mod_task = true; 4634 1.249 msaitoh } else { 4635 1.249 msaitoh bool was_full, is_full; 4636 1.249 msaitoh 4637 1.249 msaitoh was_full = 4638 1.249 msaitoh hw->phy.sfp_type != ixgbe_sfp_type_not_present; 4639 1.251 msaitoh is_full = ixgbe_sfp_cage_full(hw); 4640 1.249 msaitoh 4641 1.249 msaitoh /* Do probe if cage state changed */ 4642 1.249 msaitoh if (was_full ^ is_full) 4643 1.249 msaitoh sched_mod_task = true; 4644 1.249 msaitoh } 4645 1.249 msaitoh if (sched_mod_task) { 4646 1.333 msaitoh mutex_enter(&sc->admin_mtx); 4647 1.333 msaitoh sc->task_requests |= IXGBE_REQUEST_TASK_MOD_WOI; 4648 1.333 msaitoh ixgbe_schedule_admin_tasklet(sc); 4649 1.333 msaitoh mutex_exit(&sc->admin_mtx); 4650 1.239 msaitoh } 4651 1.237 msaitoh } 4652 1.1 dyoung 4653 1.333 msaitoh ixgbe_update_link_status(sc); 4654 1.333 msaitoh ixgbe_update_stats_counters(sc); 4655 1.33 msaitoh 4656 1.134 msaitoh /* Update some event counters */ 4657 1.134 msaitoh v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0; 4658 1.333 msaitoh que = sc->queues; 4659 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, que++) { 4660 1.186 msaitoh struct tx_ring *txr = que->txr; 4661 1.134 msaitoh 4662 1.134 msaitoh v0 += txr->q_efbig_tx_dma_setup; 4663 1.134 msaitoh v1 += txr->q_mbuf_defrag_failed; 4664 1.134 msaitoh v2 += txr->q_efbig2_tx_dma_setup; 4665 1.134 msaitoh v3 += txr->q_einval_tx_dma_setup; 4666 1.134 msaitoh v4 += txr->q_other_tx_dma_setup; 4667 1.134 msaitoh v5 += txr->q_eagain_tx_dma_setup; 4668 1.134 msaitoh v6 += txr->q_enomem_tx_dma_setup; 4669 1.134 msaitoh v7 += txr->q_tso_err; 4670 1.134 msaitoh } 4671 1.333 msaitoh IXGBE_EVC_STORE(&sc->efbig_tx_dma_setup, v0); 4672 1.333 msaitoh IXGBE_EVC_STORE(&sc->mbuf_defrag_failed, v1); 4673 1.333 msaitoh IXGBE_EVC_STORE(&sc->efbig2_tx_dma_setup, v2); 4674 1.333 msaitoh IXGBE_EVC_STORE(&sc->einval_tx_dma_setup, v3); 4675 1.333 msaitoh IXGBE_EVC_STORE(&sc->other_tx_dma_setup, v4); 4676 1.333 msaitoh IXGBE_EVC_STORE(&sc->eagain_tx_dma_setup, v5); 4677 1.333 msaitoh IXGBE_EVC_STORE(&sc->enomem_tx_dma_setup, v6); 4678 1.333 msaitoh IXGBE_EVC_STORE(&sc->tso_err, v7); 4679 1.134 msaitoh 4680 1.153 msaitoh /* 4681 1.153 msaitoh * Check the TX queues status 4682 1.186 msaitoh * - mark hung queues so we don't schedule on them 4683 1.186 msaitoh * - watchdog only if all queues show hung 4684 1.153 msaitoh */ 4685 1.333 msaitoh que = sc->queues; 4686 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, que++) { 4687 1.153 msaitoh /* Keep track of queues with work for soft irq */ 4688 1.153 msaitoh if (que->txr->busy) 4689 1.190 msaitoh queues |= 1ULL << que->me; 4690 1.153 msaitoh /* 4691 1.153 msaitoh * Each time txeof runs without cleaning, but there 4692 1.153 msaitoh * are uncleaned descriptors it increments busy. If 4693 1.153 msaitoh * we get to the MAX we declare it hung. 4694 1.153 msaitoh */ 4695 1.153 msaitoh if (que->busy == IXGBE_QUEUE_HUNG) { 4696 1.153 msaitoh ++hung; 4697 1.153 msaitoh /* Mark the queue as inactive */ 4698 1.333 msaitoh sc->active_queues &= ~(1ULL << que->me); 4699 1.153 msaitoh continue; 4700 1.153 msaitoh } else { 4701 1.153 msaitoh /* Check if we've come back from hung */ 4702 1.333 msaitoh if ((sc->active_queues & (1ULL << que->me)) == 0) 4703 1.333 msaitoh sc->active_queues |= 1ULL << que->me; 4704 1.153 msaitoh } 4705 1.153 msaitoh if (que->busy >= IXGBE_MAX_TX_BUSY) { 4706 1.153 msaitoh device_printf(dev, 4707 1.153 msaitoh "Warning queue %d appears to be hung!\n", i); 4708 1.153 msaitoh que->txr->busy = IXGBE_QUEUE_HUNG; 4709 1.153 msaitoh ++hung; 4710 1.153 msaitoh } 4711 1.150 msaitoh } 4712 1.150 msaitoh 4713 1.232 msaitoh /* Only truly watchdog if all queues show hung */ 4714 1.333 msaitoh if (hung == sc->num_queues) 4715 1.153 msaitoh goto watchdog; 4716 1.160 msaitoh #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */ 4717 1.153 msaitoh else if (queues != 0) { /* Force an IRQ on queues with work */ 4718 1.333 msaitoh que = sc->queues; 4719 1.333 msaitoh for (i = 0; i < sc->num_queues; i++, que++) { 4720 1.139 knakahar mutex_enter(&que->dc_mtx); 4721 1.153 msaitoh if (que->disabled_count == 0) 4722 1.333 msaitoh ixgbe_rearm_queues(sc, 4723 1.153 msaitoh queues & ((u64)1 << i)); 4724 1.139 knakahar mutex_exit(&que->dc_mtx); 4725 1.131 knakahar } 4726 1.98 msaitoh } 4727 1.160 msaitoh #endif 4728 1.150 msaitoh 4729 1.333 msaitoh atomic_store_relaxed(&sc->timer_pending, 0); 4730 1.333 msaitoh IXGBE_CORE_UNLOCK(sc); 4731 1.333 msaitoh callout_reset(&sc->timer, hz, ixgbe_local_timer, sc); 4732 1.153 msaitoh return; 4733 1.1 dyoung 4734 1.153 msaitoh watchdog: 4735 1.333 msaitoh device_printf(sc->dev, "Watchdog timeout -- resetting\n"); 4736 1.333 msaitoh sc->ifp->if_flags &= ~IFF_RUNNING; 4737 1.333 msaitoh IXGBE_EVC_ADD(&sc->watchdog_events, 1); 4738 1.333 msaitoh ixgbe_init_locked(sc); 4739 1.333 msaitoh IXGBE_CORE_UNLOCK(sc); 4740 1.233 msaitoh } /* ixgbe_handle_timer */ 4741 1.43 msaitoh 4742 1.99 msaitoh /************************************************************************ 4743 1.169 msaitoh * ixgbe_recovery_mode_timer - Recovery mode timer routine 4744 1.169 msaitoh ************************************************************************/ 4745 1.169 msaitoh static void 4746 1.169 msaitoh ixgbe_recovery_mode_timer(void *arg) 4747 1.169 msaitoh { 4748 1.333 msaitoh struct ixgbe_softc *sc = arg; 4749 1.233 msaitoh 4750 1.333 msaitoh if (__predict_true(sc->osdep.detaching == false)) { 4751 1.333 msaitoh if (atomic_cas_uint(&sc->recovery_mode_timer_pending, 4752 1.254 msaitoh 0, 1) == 0) { 4753 1.333 msaitoh workqueue_enqueue(sc->recovery_mode_timer_wq, 4754 1.333 msaitoh &sc->recovery_mode_timer_wc, NULL); 4755 1.254 msaitoh } 4756 1.233 msaitoh } 4757 1.233 msaitoh } 4758 1.233 msaitoh 4759 1.233 msaitoh static void 4760 1.233 msaitoh ixgbe_handle_recovery_mode_timer(struct work *wk, void *context) 4761 1.233 msaitoh { 4762 1.333 msaitoh struct ixgbe_softc *sc = context; 4763 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 4764 1.169 msaitoh 4765 1.333 msaitoh IXGBE_CORE_LOCK(sc); 4766 1.169 msaitoh if (ixgbe_fw_recovery_mode(hw)) { 4767 1.333 msaitoh if (atomic_cas_uint(&sc->recovery_mode, 0, 1) == 0) { 4768 1.169 msaitoh /* Firmware error detected, entering recovery mode */ 4769 1.333 msaitoh device_printf(sc->dev, 4770 1.319 msaitoh "Firmware recovery mode detected. Limiting " 4771 1.319 msaitoh "functionality. Refer to the Intel(R) Ethernet " 4772 1.319 msaitoh "Adapters and Devices User Guide for details on " 4773 1.319 msaitoh "firmware recovery mode.\n"); 4774 1.169 msaitoh 4775 1.169 msaitoh if (hw->adapter_stopped == FALSE) 4776 1.333 msaitoh ixgbe_stop_locked(sc); 4777 1.169 msaitoh } 4778 1.169 msaitoh } else 4779 1.333 msaitoh atomic_cas_uint(&sc->recovery_mode, 1, 0); 4780 1.169 msaitoh 4781 1.333 msaitoh atomic_store_relaxed(&sc->recovery_mode_timer_pending, 0); 4782 1.333 msaitoh callout_reset(&sc->recovery_mode_timer, hz, 4783 1.333 msaitoh ixgbe_recovery_mode_timer, sc); 4784 1.333 msaitoh IXGBE_CORE_UNLOCK(sc); 4785 1.233 msaitoh } /* ixgbe_handle_recovery_mode_timer */ 4786 1.169 msaitoh 4787 1.169 msaitoh /************************************************************************ 4788 1.99 msaitoh * ixgbe_handle_mod - Tasklet for SFP module interrupts 4789 1.273 msaitoh * bool int_en: true if it's called when the interrupt is enabled. 4790 1.99 msaitoh ************************************************************************/ 4791 1.1 dyoung static void 4792 1.273 msaitoh ixgbe_handle_mod(void *context, bool int_en) 4793 1.1 dyoung { 4794 1.339 msaitoh struct ixgbe_softc *sc = context; 4795 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 4796 1.333 msaitoh device_t dev = sc->dev; 4797 1.249 msaitoh enum ixgbe_sfp_type last_sfp_type; 4798 1.251 msaitoh u32 err; 4799 1.249 msaitoh bool last_unsupported_sfp_recovery; 4800 1.98 msaitoh 4801 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx)); 4802 1.257 msaitoh 4803 1.249 msaitoh last_sfp_type = hw->phy.sfp_type; 4804 1.249 msaitoh last_unsupported_sfp_recovery = hw->need_unsupported_sfp_recovery; 4805 1.333 msaitoh IXGBE_EVC_ADD(&sc->mod_workev, 1); 4806 1.333 msaitoh if (sc->hw.need_crosstalk_fix) { 4807 1.251 msaitoh if ((hw->mac.type != ixgbe_mac_82598EB) && 4808 1.251 msaitoh !ixgbe_sfp_cage_full(hw)) 4809 1.218 msaitoh goto out; 4810 1.98 msaitoh } 4811 1.98 msaitoh 4812 1.98 msaitoh err = hw->phy.ops.identify_sfp(hw); 4813 1.98 msaitoh if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 4814 1.249 msaitoh if (last_unsupported_sfp_recovery == false) 4815 1.249 msaitoh device_printf(dev, 4816 1.249 msaitoh "Unsupported SFP+ module type was detected.\n"); 4817 1.218 msaitoh goto out; 4818 1.33 msaitoh } 4819 1.33 msaitoh 4820 1.219 msaitoh if (hw->need_unsupported_sfp_recovery) { 4821 1.219 msaitoh device_printf(dev, "Recovering from unsupported SFP\n"); 4822 1.219 msaitoh /* 4823 1.219 msaitoh * We could recover the status by calling setup_sfp(), 4824 1.219 msaitoh * setup_link() and some others. It's complex and might not 4825 1.219 msaitoh * work correctly on some unknown cases. To avoid such type of 4826 1.219 msaitoh * problem, call ixgbe_init_locked(). It's simple and safe 4827 1.219 msaitoh * approach. 4828 1.219 msaitoh */ 4829 1.333 msaitoh ixgbe_init_locked(sc); 4830 1.249 msaitoh } else if ((hw->phy.sfp_type != ixgbe_sfp_type_not_present) && 4831 1.249 msaitoh (hw->phy.sfp_type != last_sfp_type)) { 4832 1.249 msaitoh /* A module is inserted and changed. */ 4833 1.249 msaitoh 4834 1.219 msaitoh if (hw->mac.type == ixgbe_mac_82598EB) 4835 1.219 msaitoh err = hw->phy.ops.reset(hw); 4836 1.219 msaitoh else { 4837 1.219 msaitoh err = hw->mac.ops.setup_sfp(hw); 4838 1.219 msaitoh hw->phy.sfp_setup_needed = FALSE; 4839 1.219 msaitoh } 4840 1.219 msaitoh if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 4841 1.219 msaitoh device_printf(dev, 4842 1.219 msaitoh "Setup failure - unsupported SFP+ module type.\n"); 4843 1.219 msaitoh goto out; 4844 1.219 msaitoh } 4845 1.1 dyoung } 4846 1.233 msaitoh 4847 1.218 msaitoh out: 4848 1.233 msaitoh /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */ 4849 1.333 msaitoh sc->phy_layer = ixgbe_get_supported_physical_layer(hw); 4850 1.233 msaitoh 4851 1.233 msaitoh /* Adjust media types shown in ifconfig */ 4852 1.333 msaitoh IXGBE_CORE_UNLOCK(sc); 4853 1.333 msaitoh ifmedia_removeall(&sc->media); 4854 1.333 msaitoh ixgbe_add_media_types(sc); 4855 1.333 msaitoh ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); 4856 1.333 msaitoh IXGBE_CORE_LOCK(sc); 4857 1.233 msaitoh 4858 1.249 msaitoh /* 4859 1.288 andvar * Don't schedule MSF event if the chip is 82598. 82598 doesn't support 4860 1.249 msaitoh * MSF. At least, calling ixgbe_handle_msf on 82598 DA makes the link 4861 1.250 msaitoh * flap because the function calls setup_link(). 4862 1.249 msaitoh */ 4863 1.260 knakahar if (hw->mac.type != ixgbe_mac_82598EB) { 4864 1.333 msaitoh mutex_enter(&sc->admin_mtx); 4865 1.273 msaitoh if (int_en) 4866 1.333 msaitoh sc->task_requests |= IXGBE_REQUEST_TASK_MSF; 4867 1.273 msaitoh else 4868 1.333 msaitoh sc->task_requests |= IXGBE_REQUEST_TASK_MSF_WOI; 4869 1.333 msaitoh mutex_exit(&sc->admin_mtx); 4870 1.260 knakahar } 4871 1.249 msaitoh 4872 1.233 msaitoh /* 4873 1.233 msaitoh * Don't call ixgbe_schedule_admin_tasklet() because we are on 4874 1.233 msaitoh * the workqueue now. 4875 1.233 msaitoh */ 4876 1.99 msaitoh } /* ixgbe_handle_mod */ 4877 1.1 dyoung 4878 1.1 dyoung 4879 1.99 msaitoh /************************************************************************ 4880 1.99 msaitoh * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts 4881 1.99 msaitoh ************************************************************************/ 4882 1.33 msaitoh static void 4883 1.233 msaitoh ixgbe_handle_msf(void *context) 4884 1.33 msaitoh { 4885 1.339 msaitoh struct ixgbe_softc *sc = context; 4886 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 4887 1.186 msaitoh u32 autoneg; 4888 1.186 msaitoh bool negotiate; 4889 1.33 msaitoh 4890 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx)); 4891 1.257 msaitoh 4892 1.333 msaitoh IXGBE_EVC_ADD(&sc->msf_workev, 1); 4893 1.33 msaitoh 4894 1.98 msaitoh autoneg = hw->phy.autoneg_advertised; 4895 1.98 msaitoh if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 4896 1.98 msaitoh hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); 4897 1.98 msaitoh if (hw->mac.ops.setup_link) 4898 1.98 msaitoh hw->mac.ops.setup_link(hw, autoneg, TRUE); 4899 1.99 msaitoh } /* ixgbe_handle_msf */ 4900 1.33 msaitoh 4901 1.99 msaitoh /************************************************************************ 4902 1.99 msaitoh * ixgbe_handle_phy - Tasklet for external PHY interrupts 4903 1.99 msaitoh ************************************************************************/ 4904 1.1 dyoung static void 4905 1.98 msaitoh ixgbe_handle_phy(void *context) 4906 1.1 dyoung { 4907 1.339 msaitoh struct ixgbe_softc *sc = context; 4908 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 4909 1.98 msaitoh int error; 4910 1.1 dyoung 4911 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx)); 4912 1.257 msaitoh 4913 1.333 msaitoh IXGBE_EVC_ADD(&sc->phy_workev, 1); 4914 1.98 msaitoh error = hw->phy.ops.handle_lasi(hw); 4915 1.98 msaitoh if (error == IXGBE_ERR_OVERTEMP) 4916 1.333 msaitoh device_printf(sc->dev, 4917 1.98 msaitoh "CRITICAL: EXTERNAL PHY OVER TEMP!! " 4918 1.98 msaitoh " PHY will downshift to lower power state!\n"); 4919 1.98 msaitoh else if (error) 4920 1.333 msaitoh device_printf(sc->dev, 4921 1.99 msaitoh "Error handling LASI interrupt: %d\n", error); 4922 1.99 msaitoh } /* ixgbe_handle_phy */ 4923 1.1 dyoung 4924 1.98 msaitoh static void 4925 1.233 msaitoh ixgbe_handle_admin(struct work *wk, void *context) 4926 1.233 msaitoh { 4927 1.339 msaitoh struct ixgbe_softc *sc = context; 4928 1.333 msaitoh struct ifnet *ifp = sc->ifp; 4929 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 4930 1.260 knakahar u32 task_requests; 4931 1.273 msaitoh u32 eims_enable = 0; 4932 1.260 knakahar 4933 1.333 msaitoh mutex_enter(&sc->admin_mtx); 4934 1.333 msaitoh sc->admin_pending = 0; 4935 1.333 msaitoh task_requests = sc->task_requests; 4936 1.333 msaitoh sc->task_requests = 0; 4937 1.333 msaitoh mutex_exit(&sc->admin_mtx); 4938 1.233 msaitoh 4939 1.233 msaitoh /* 4940 1.233 msaitoh * Hold the IFNET_LOCK across this entire call. This will 4941 1.333 msaitoh * prevent additional changes to sc->phy_layer 4942 1.233 msaitoh * and serialize calls to this tasklet. We cannot hold the 4943 1.233 msaitoh * CORE_LOCK while calling into the ifmedia functions as 4944 1.233 msaitoh * they call ifmedia_lock() and the lock is CORE_LOCK. 4945 1.233 msaitoh */ 4946 1.233 msaitoh IFNET_LOCK(ifp); 4947 1.333 msaitoh IXGBE_CORE_LOCK(sc); 4948 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_LSC) != 0) { 4949 1.333 msaitoh ixgbe_handle_link(sc); 4950 1.273 msaitoh eims_enable |= IXGBE_EIMS_LSC; 4951 1.273 msaitoh } 4952 1.319 msaitoh if ((task_requests & IXGBE_REQUEST_TASK_MOD_WOI) != 0) 4953 1.333 msaitoh ixgbe_handle_mod(sc, false); 4954 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_MOD) != 0) { 4955 1.333 msaitoh ixgbe_handle_mod(sc, true); 4956 1.273 msaitoh if (hw->mac.type >= ixgbe_mac_X540) 4957 1.273 msaitoh eims_enable |= IXGBE_EICR_GPI_SDP0_X540; 4958 1.273 msaitoh else 4959 1.273 msaitoh eims_enable |= IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 4960 1.260 knakahar } 4961 1.273 msaitoh if ((task_requests 4962 1.273 msaitoh & (IXGBE_REQUEST_TASK_MSF_WOI | IXGBE_REQUEST_TASK_MSF)) != 0) { 4963 1.333 msaitoh ixgbe_handle_msf(sc); 4964 1.273 msaitoh if (((task_requests & IXGBE_REQUEST_TASK_MSF) != 0) && 4965 1.273 msaitoh (hw->mac.type == ixgbe_mac_82599EB)) 4966 1.273 msaitoh eims_enable |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw); 4967 1.260 knakahar } 4968 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_PHY) != 0) { 4969 1.333 msaitoh ixgbe_handle_phy(sc); 4970 1.273 msaitoh eims_enable |= IXGBE_EICR_GPI_SDP0_X540; 4971 1.260 knakahar } 4972 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_FDIR) != 0) { 4973 1.333 msaitoh ixgbe_reinit_fdir(sc); 4974 1.273 msaitoh eims_enable |= IXGBE_EIMS_FLOW_DIR; 4975 1.260 knakahar } 4976 1.233 msaitoh #if 0 /* notyet */ 4977 1.260 knakahar if ((task_requests & IXGBE_REQUEST_TASK_MBX) != 0) { 4978 1.333 msaitoh ixgbe_handle_mbx(sc); 4979 1.273 msaitoh eims_enable |= IXGBE_EIMS_MAILBOX; 4980 1.260 knakahar } 4981 1.233 msaitoh #endif 4982 1.273 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_enable); 4983 1.233 msaitoh 4984 1.333 msaitoh IXGBE_CORE_UNLOCK(sc); 4985 1.233 msaitoh IFNET_UNLOCK(ifp); 4986 1.233 msaitoh } /* ixgbe_handle_admin */ 4987 1.233 msaitoh 4988 1.233 msaitoh static void 4989 1.98 msaitoh ixgbe_ifstop(struct ifnet *ifp, int disable) 4990 1.98 msaitoh { 4991 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc; 4992 1.1 dyoung 4993 1.333 msaitoh IXGBE_CORE_LOCK(sc); 4994 1.333 msaitoh ixgbe_stop_locked(sc); 4995 1.333 msaitoh IXGBE_CORE_UNLOCK(sc); 4996 1.223 thorpej 4997 1.333 msaitoh workqueue_wait(sc->timer_wq, &sc->timer_wc); 4998 1.333 msaitoh atomic_store_relaxed(&sc->timer_pending, 0); 4999 1.98 msaitoh } 5000 1.1 dyoung 5001 1.99 msaitoh /************************************************************************ 5002 1.252 msaitoh * ixgbe_stop_locked - Stop the hardware 5003 1.98 msaitoh * 5004 1.99 msaitoh * Disables all traffic on the adapter by issuing a 5005 1.99 msaitoh * global reset on the MAC and deallocates TX/RX buffers. 5006 1.99 msaitoh ************************************************************************/ 5007 1.1 dyoung static void 5008 1.252 msaitoh ixgbe_stop_locked(void *arg) 5009 1.1 dyoung { 5010 1.186 msaitoh struct ifnet *ifp; 5011 1.339 msaitoh struct ixgbe_softc *sc = arg; 5012 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 5013 1.99 msaitoh 5014 1.333 msaitoh ifp = sc->ifp; 5015 1.98 msaitoh 5016 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx)); 5017 1.98 msaitoh 5018 1.252 msaitoh INIT_DEBUGOUT("ixgbe_stop_locked: begin\n"); 5019 1.333 msaitoh ixgbe_disable_intr(sc); 5020 1.333 msaitoh callout_stop(&sc->timer); 5021 1.98 msaitoh 5022 1.223 thorpej /* Don't schedule workqueues. */ 5023 1.333 msaitoh sc->schedule_wqs_ok = false; 5024 1.223 thorpej 5025 1.98 msaitoh /* Let the stack know...*/ 5026 1.98 msaitoh ifp->if_flags &= ~IFF_RUNNING; 5027 1.98 msaitoh 5028 1.98 msaitoh ixgbe_reset_hw(hw); 5029 1.98 msaitoh hw->adapter_stopped = FALSE; 5030 1.98 msaitoh ixgbe_stop_adapter(hw); 5031 1.98 msaitoh if (hw->mac.type == ixgbe_mac_82599EB) 5032 1.98 msaitoh ixgbe_stop_mac_link_on_d3_82599(hw); 5033 1.98 msaitoh /* Turn off the laser - noop with no optics */ 5034 1.98 msaitoh ixgbe_disable_tx_laser(hw); 5035 1.1 dyoung 5036 1.98 msaitoh /* Update the stack */ 5037 1.333 msaitoh sc->link_up = FALSE; 5038 1.333 msaitoh ixgbe_update_link_status(sc); 5039 1.1 dyoung 5040 1.98 msaitoh /* reprogram the RAR[0] in case user changed it. */ 5041 1.333 msaitoh ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV); 5042 1.1 dyoung 5043 1.98 msaitoh return; 5044 1.252 msaitoh } /* ixgbe_stop_locked */ 5045 1.1 dyoung 5046 1.99 msaitoh /************************************************************************ 5047 1.99 msaitoh * ixgbe_update_link_status - Update OS on link state 5048 1.99 msaitoh * 5049 1.99 msaitoh * Note: Only updates the OS on the cached link state. 5050 1.186 msaitoh * The real check of the hardware only happens with 5051 1.186 msaitoh * a link interrupt. 5052 1.99 msaitoh ************************************************************************/ 5053 1.98 msaitoh static void 5054 1.333 msaitoh ixgbe_update_link_status(struct ixgbe_softc *sc) 5055 1.1 dyoung { 5056 1.333 msaitoh struct ifnet *ifp = sc->ifp; 5057 1.333 msaitoh device_t dev = sc->dev; 5058 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 5059 1.98 msaitoh 5060 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx)); 5061 1.136 knakahar 5062 1.333 msaitoh if (sc->link_up) { 5063 1.333 msaitoh if (sc->link_active != LINK_STATE_UP) { 5064 1.138 knakahar /* 5065 1.138 knakahar * To eliminate influence of the previous state 5066 1.138 knakahar * in the same way as ixgbe_init_locked(). 5067 1.138 knakahar */ 5068 1.333 msaitoh struct ix_queue *que = sc->queues; 5069 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++) 5070 1.138 knakahar que->eitr_setting = 0; 5071 1.138 knakahar 5072 1.344 msaitoh if (sc->link_speed == IXGBE_LINK_SPEED_10GB_FULL) { 5073 1.98 msaitoh /* 5074 1.98 msaitoh * Discard count for both MAC Local Fault and 5075 1.98 msaitoh * Remote Fault because those registers are 5076 1.98 msaitoh * valid only when the link speed is up and 5077 1.98 msaitoh * 10Gbps. 5078 1.98 msaitoh */ 5079 1.98 msaitoh IXGBE_READ_REG(hw, IXGBE_MLFC); 5080 1.98 msaitoh IXGBE_READ_REG(hw, IXGBE_MRFC); 5081 1.98 msaitoh } 5082 1.98 msaitoh 5083 1.98 msaitoh if (bootverbose) { 5084 1.98 msaitoh const char *bpsmsg; 5085 1.1 dyoung 5086 1.333 msaitoh switch (sc->link_speed) { 5087 1.98 msaitoh case IXGBE_LINK_SPEED_10GB_FULL: 5088 1.98 msaitoh bpsmsg = "10 Gbps"; 5089 1.98 msaitoh break; 5090 1.98 msaitoh case IXGBE_LINK_SPEED_5GB_FULL: 5091 1.98 msaitoh bpsmsg = "5 Gbps"; 5092 1.98 msaitoh break; 5093 1.98 msaitoh case IXGBE_LINK_SPEED_2_5GB_FULL: 5094 1.98 msaitoh bpsmsg = "2.5 Gbps"; 5095 1.98 msaitoh break; 5096 1.98 msaitoh case IXGBE_LINK_SPEED_1GB_FULL: 5097 1.98 msaitoh bpsmsg = "1 Gbps"; 5098 1.98 msaitoh break; 5099 1.98 msaitoh case IXGBE_LINK_SPEED_100_FULL: 5100 1.98 msaitoh bpsmsg = "100 Mbps"; 5101 1.98 msaitoh break; 5102 1.99 msaitoh case IXGBE_LINK_SPEED_10_FULL: 5103 1.99 msaitoh bpsmsg = "10 Mbps"; 5104 1.99 msaitoh break; 5105 1.98 msaitoh default: 5106 1.98 msaitoh bpsmsg = "unknown speed"; 5107 1.98 msaitoh break; 5108 1.98 msaitoh } 5109 1.98 msaitoh device_printf(dev, "Link is up %s %s \n", 5110 1.98 msaitoh bpsmsg, "Full Duplex"); 5111 1.98 msaitoh } 5112 1.333 msaitoh sc->link_active = LINK_STATE_UP; 5113 1.98 msaitoh /* Update any Flow Control changes */ 5114 1.333 msaitoh ixgbe_fc_enable(&sc->hw); 5115 1.98 msaitoh /* Update DMA coalescing config */ 5116 1.333 msaitoh ixgbe_config_dmac(sc); 5117 1.98 msaitoh if_link_state_change(ifp, LINK_STATE_UP); 5118 1.144 msaitoh 5119 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_SRIOV) 5120 1.333 msaitoh ixgbe_ping_all_vfs(sc); 5121 1.98 msaitoh } 5122 1.174 msaitoh } else { 5123 1.174 msaitoh /* 5124 1.174 msaitoh * Do it when link active changes to DOWN. i.e. 5125 1.174 msaitoh * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN 5126 1.186 msaitoh * b) LINK_STATE_UP -> LINK_STATE_DOWN 5127 1.174 msaitoh */ 5128 1.333 msaitoh if (sc->link_active != LINK_STATE_DOWN) { 5129 1.98 msaitoh if (bootverbose) 5130 1.98 msaitoh device_printf(dev, "Link is Down\n"); 5131 1.98 msaitoh if_link_state_change(ifp, LINK_STATE_DOWN); 5132 1.333 msaitoh sc->link_active = LINK_STATE_DOWN; 5133 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_SRIOV) 5134 1.333 msaitoh ixgbe_ping_all_vfs(sc); 5135 1.333 msaitoh ixgbe_drain_all(sc); 5136 1.98 msaitoh } 5137 1.1 dyoung } 5138 1.99 msaitoh } /* ixgbe_update_link_status */ 5139 1.1 dyoung 5140 1.99 msaitoh /************************************************************************ 5141 1.99 msaitoh * ixgbe_config_dmac - Configure DMA Coalescing 5142 1.99 msaitoh ************************************************************************/ 5143 1.1 dyoung static void 5144 1.333 msaitoh ixgbe_config_dmac(struct ixgbe_softc *sc) 5145 1.1 dyoung { 5146 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 5147 1.98 msaitoh struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; 5148 1.1 dyoung 5149 1.99 msaitoh if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config) 5150 1.98 msaitoh return; 5151 1.65 msaitoh 5152 1.333 msaitoh if (dcfg->watchdog_timer ^ sc->dmac || 5153 1.333 msaitoh dcfg->link_speed ^ sc->link_speed) { 5154 1.333 msaitoh dcfg->watchdog_timer = sc->dmac; 5155 1.98 msaitoh dcfg->fcoe_en = false; 5156 1.333 msaitoh dcfg->link_speed = sc->link_speed; 5157 1.98 msaitoh dcfg->num_tcs = 1; 5158 1.51 msaitoh 5159 1.98 msaitoh INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", 5160 1.98 msaitoh dcfg->watchdog_timer, dcfg->link_speed); 5161 1.51 msaitoh 5162 1.98 msaitoh hw->mac.ops.dmac_config(hw); 5163 1.98 msaitoh } 5164 1.99 msaitoh } /* ixgbe_config_dmac */ 5165 1.51 msaitoh 5166 1.99 msaitoh /************************************************************************ 5167 1.99 msaitoh * ixgbe_enable_intr 5168 1.99 msaitoh ************************************************************************/ 5169 1.98 msaitoh static void 5170 1.333 msaitoh ixgbe_enable_intr(struct ixgbe_softc *sc) 5171 1.98 msaitoh { 5172 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 5173 1.333 msaitoh struct ix_queue *que = sc->queues; 5174 1.98 msaitoh u32 mask, fwsm; 5175 1.51 msaitoh 5176 1.98 msaitoh mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 5177 1.45 msaitoh 5178 1.333 msaitoh switch (sc->hw.mac.type) { 5179 1.99 msaitoh case ixgbe_mac_82599EB: 5180 1.99 msaitoh mask |= IXGBE_EIMS_ECC; 5181 1.99 msaitoh /* Temperature sensor on some adapters */ 5182 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP0; 5183 1.99 msaitoh /* SFP+ (RX_LOS_N & MOD_ABS_N) */ 5184 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP1; 5185 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP2; 5186 1.99 msaitoh break; 5187 1.99 msaitoh case ixgbe_mac_X540: 5188 1.99 msaitoh /* Detect if Thermal Sensor is enabled */ 5189 1.99 msaitoh fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 5190 1.99 msaitoh if (fwsm & IXGBE_FWSM_TS_ENABLED) 5191 1.98 msaitoh mask |= IXGBE_EIMS_TS; 5192 1.99 msaitoh mask |= IXGBE_EIMS_ECC; 5193 1.99 msaitoh break; 5194 1.99 msaitoh case ixgbe_mac_X550: 5195 1.99 msaitoh /* MAC thermal sensor is automatically enabled */ 5196 1.99 msaitoh mask |= IXGBE_EIMS_TS; 5197 1.99 msaitoh mask |= IXGBE_EIMS_ECC; 5198 1.99 msaitoh break; 5199 1.99 msaitoh case ixgbe_mac_X550EM_x: 5200 1.99 msaitoh case ixgbe_mac_X550EM_a: 5201 1.99 msaitoh /* Some devices use SDP0 for important information */ 5202 1.99 msaitoh if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 5203 1.99 msaitoh hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP || 5204 1.99 msaitoh hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N || 5205 1.99 msaitoh hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 5206 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 5207 1.99 msaitoh if (hw->phy.type == ixgbe_phy_x550em_ext_t) 5208 1.99 msaitoh mask |= IXGBE_EICR_GPI_SDP0_X540; 5209 1.99 msaitoh mask |= IXGBE_EIMS_ECC; 5210 1.99 msaitoh break; 5211 1.99 msaitoh default: 5212 1.99 msaitoh break; 5213 1.1 dyoung } 5214 1.51 msaitoh 5215 1.99 msaitoh /* Enable Fan Failure detection */ 5216 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) 5217 1.99 msaitoh mask |= IXGBE_EIMS_GPI_SDP1; 5218 1.99 msaitoh /* Enable SR-IOV */ 5219 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_SRIOV) 5220 1.99 msaitoh mask |= IXGBE_EIMS_MAILBOX; 5221 1.99 msaitoh /* Enable Flow Director */ 5222 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_FDIR) 5223 1.99 msaitoh mask |= IXGBE_EIMS_FLOW_DIR; 5224 1.99 msaitoh 5225 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 5226 1.64 msaitoh 5227 1.98 msaitoh /* With MSI-X we use auto clear */ 5228 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_MSIX) != 0) { 5229 1.270 msaitoh /* 5230 1.309 msaitoh * We use auto clear for RTX_QUEUE only. Don't use other 5231 1.309 msaitoh * interrupts (e.g. link interrupt). BTW, we don't use 5232 1.309 msaitoh * TCP_TIMER interrupt itself. 5233 1.270 msaitoh */ 5234 1.270 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIAC, IXGBE_EIMS_RTX_QUEUE); 5235 1.98 msaitoh } 5236 1.1 dyoung 5237 1.98 msaitoh /* 5238 1.99 msaitoh * Now enable all queues, this is done separately to 5239 1.99 msaitoh * allow for handling the extended (beyond 32) MSI-X 5240 1.99 msaitoh * vectors that can be used by 82599 5241 1.99 msaitoh */ 5242 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++) 5243 1.333 msaitoh ixgbe_enable_queue(sc, que->msix); 5244 1.1 dyoung 5245 1.98 msaitoh IXGBE_WRITE_FLUSH(hw); 5246 1.43 msaitoh 5247 1.99 msaitoh } /* ixgbe_enable_intr */ 5248 1.1 dyoung 5249 1.99 msaitoh /************************************************************************ 5250 1.139 knakahar * ixgbe_disable_intr_internal 5251 1.99 msaitoh ************************************************************************/ 5252 1.44 msaitoh static void 5253 1.333 msaitoh ixgbe_disable_intr_internal(struct ixgbe_softc *sc, bool nestok) 5254 1.44 msaitoh { 5255 1.333 msaitoh struct ix_queue *que = sc->queues; 5256 1.127 knakahar 5257 1.127 knakahar /* disable interrupts other than queues */ 5258 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE); 5259 1.127 knakahar 5260 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_MSIX) != 0) 5261 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0); 5262 1.127 knakahar 5263 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++) 5264 1.333 msaitoh ixgbe_disable_queue_internal(sc, que->msix, nestok); 5265 1.127 knakahar 5266 1.333 msaitoh IXGBE_WRITE_FLUSH(&sc->hw); 5267 1.99 msaitoh 5268 1.139 knakahar } /* ixgbe_do_disable_intr_internal */ 5269 1.139 knakahar 5270 1.139 knakahar /************************************************************************ 5271 1.139 knakahar * ixgbe_disable_intr 5272 1.139 knakahar ************************************************************************/ 5273 1.139 knakahar static void 5274 1.333 msaitoh ixgbe_disable_intr(struct ixgbe_softc *sc) 5275 1.139 knakahar { 5276 1.139 knakahar 5277 1.333 msaitoh ixgbe_disable_intr_internal(sc, true); 5278 1.99 msaitoh } /* ixgbe_disable_intr */ 5279 1.98 msaitoh 5280 1.99 msaitoh /************************************************************************ 5281 1.139 knakahar * ixgbe_ensure_disabled_intr 5282 1.139 knakahar ************************************************************************/ 5283 1.139 knakahar void 5284 1.333 msaitoh ixgbe_ensure_disabled_intr(struct ixgbe_softc *sc) 5285 1.139 knakahar { 5286 1.139 knakahar 5287 1.333 msaitoh ixgbe_disable_intr_internal(sc, false); 5288 1.139 knakahar } /* ixgbe_ensure_disabled_intr */ 5289 1.139 knakahar 5290 1.139 knakahar /************************************************************************ 5291 1.99 msaitoh * ixgbe_legacy_irq - Legacy Interrupt Service routine 5292 1.99 msaitoh ************************************************************************/ 5293 1.98 msaitoh static int 5294 1.98 msaitoh ixgbe_legacy_irq(void *arg) 5295 1.1 dyoung { 5296 1.98 msaitoh struct ix_queue *que = arg; 5297 1.333 msaitoh struct ixgbe_softc *sc = que->sc; 5298 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 5299 1.333 msaitoh struct ifnet *ifp = sc->ifp; 5300 1.341 msaitoh struct tx_ring *txr = sc->tx_rings; 5301 1.277 msaitoh u32 eicr; 5302 1.269 msaitoh u32 eims_orig; 5303 1.273 msaitoh u32 eims_enable = 0; 5304 1.273 msaitoh u32 eims_disable = 0; 5305 1.98 msaitoh 5306 1.269 msaitoh eims_orig = IXGBE_READ_REG(hw, IXGBE_EIMS); 5307 1.269 msaitoh /* 5308 1.269 msaitoh * Silicon errata #26 on 82598. Disable all interrupts before reading 5309 1.269 msaitoh * EICR. 5310 1.269 msaitoh */ 5311 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 5312 1.98 msaitoh 5313 1.268 msaitoh /* Read and clear EICR */ 5314 1.99 msaitoh eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 5315 1.44 msaitoh 5316 1.99 msaitoh if (eicr == 0) { 5317 1.333 msaitoh IXGBE_EVC_ADD(&sc->stats.pf.intzero, 1); 5318 1.269 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig); 5319 1.98 msaitoh return 0; 5320 1.98 msaitoh } 5321 1.333 msaitoh IXGBE_EVC_ADD(&sc->stats.pf.legint, 1); 5322 1.44 msaitoh 5323 1.272 msaitoh /* Queue (0) intr */ 5324 1.308 msaitoh if (((ifp->if_flags & IFF_RUNNING) != 0) && 5325 1.308 msaitoh (eicr & IXGBE_EIMC_RTX_QUEUE) != 0) { 5326 1.305 msaitoh IXGBE_EVC_ADD(&que->irqs, 1); 5327 1.272 msaitoh 5328 1.147 knakahar /* 5329 1.265 msaitoh * The same as ixgbe_msix_que() about 5330 1.265 msaitoh * "que->txrx_use_workqueue". 5331 1.147 knakahar */ 5332 1.333 msaitoh que->txrx_use_workqueue = sc->txrx_use_workqueue; 5333 1.147 knakahar 5334 1.98 msaitoh IXGBE_TX_LOCK(txr); 5335 1.98 msaitoh ixgbe_txeof(txr); 5336 1.99 msaitoh #ifdef notyet 5337 1.99 msaitoh if (!ixgbe_ring_empty(ifp, txr->br)) 5338 1.99 msaitoh ixgbe_start_locked(ifp, txr); 5339 1.99 msaitoh #endif 5340 1.98 msaitoh IXGBE_TX_UNLOCK(txr); 5341 1.271 msaitoh 5342 1.305 msaitoh IXGBE_EVC_ADD(&que->req, 1); 5343 1.333 msaitoh ixgbe_sched_handle_que(sc, que); 5344 1.273 msaitoh /* Disable queue 0 interrupt */ 5345 1.273 msaitoh eims_disable |= 1UL << 0; 5346 1.273 msaitoh } else 5347 1.317 msaitoh eims_enable |= eims_orig & IXGBE_EIMC_RTX_QUEUE; 5348 1.44 msaitoh 5349 1.333 msaitoh ixgbe_intr_admin_common(sc, eicr, &eims_disable); 5350 1.233 msaitoh 5351 1.273 msaitoh /* Re-enable some interrupts */ 5352 1.273 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, 5353 1.273 msaitoh (eims_orig & ~eims_disable) | eims_enable); 5354 1.99 msaitoh 5355 1.98 msaitoh return 1; 5356 1.99 msaitoh } /* ixgbe_legacy_irq */ 5357 1.98 msaitoh 5358 1.99 msaitoh /************************************************************************ 5359 1.119 msaitoh * ixgbe_free_pciintr_resources 5360 1.99 msaitoh ************************************************************************/ 5361 1.98 msaitoh static void 5362 1.333 msaitoh ixgbe_free_pciintr_resources(struct ixgbe_softc *sc) 5363 1.44 msaitoh { 5364 1.333 msaitoh struct ix_queue *que = sc->queues; 5365 1.98 msaitoh int rid; 5366 1.44 msaitoh 5367 1.98 msaitoh /* 5368 1.99 msaitoh * Release all msix queue resources: 5369 1.99 msaitoh */ 5370 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, que++) { 5371 1.119 msaitoh if (que->res != NULL) { 5372 1.333 msaitoh pci_intr_disestablish(sc->osdep.pc, sc->osdep.ihs[i]); 5373 1.333 msaitoh sc->osdep.ihs[i] = NULL; 5374 1.119 msaitoh } 5375 1.58 msaitoh } 5376 1.58 msaitoh 5377 1.98 msaitoh /* Clean the Legacy or Link interrupt last */ 5378 1.333 msaitoh if (sc->vector) /* we are doing MSIX */ 5379 1.333 msaitoh rid = sc->vector; 5380 1.98 msaitoh else 5381 1.98 msaitoh rid = 0; 5382 1.44 msaitoh 5383 1.333 msaitoh if (sc->osdep.ihs[rid] != NULL) { 5384 1.333 msaitoh pci_intr_disestablish(sc->osdep.pc, sc->osdep.ihs[rid]); 5385 1.333 msaitoh sc->osdep.ihs[rid] = NULL; 5386 1.98 msaitoh } 5387 1.44 msaitoh 5388 1.333 msaitoh if (sc->osdep.intrs != NULL) { 5389 1.333 msaitoh pci_intr_release(sc->osdep.pc, sc->osdep.intrs, 5390 1.333 msaitoh sc->osdep.nintrs); 5391 1.333 msaitoh sc->osdep.intrs = NULL; 5392 1.119 msaitoh } 5393 1.119 msaitoh } /* ixgbe_free_pciintr_resources */ 5394 1.119 msaitoh 5395 1.119 msaitoh /************************************************************************ 5396 1.119 msaitoh * ixgbe_free_pci_resources 5397 1.119 msaitoh ************************************************************************/ 5398 1.119 msaitoh static void 5399 1.333 msaitoh ixgbe_free_pci_resources(struct ixgbe_softc *sc) 5400 1.119 msaitoh { 5401 1.119 msaitoh 5402 1.333 msaitoh ixgbe_free_pciintr_resources(sc); 5403 1.44 msaitoh 5404 1.333 msaitoh if (sc->osdep.mem_size != 0) { 5405 1.333 msaitoh bus_space_unmap(sc->osdep.mem_bus_space_tag, 5406 1.333 msaitoh sc->osdep.mem_bus_space_handle, 5407 1.333 msaitoh sc->osdep.mem_size); 5408 1.44 msaitoh } 5409 1.99 msaitoh } /* ixgbe_free_pci_resources */ 5410 1.44 msaitoh 5411 1.99 msaitoh /************************************************************************ 5412 1.99 msaitoh * ixgbe_sysctl_flowcntl 5413 1.99 msaitoh * 5414 1.99 msaitoh * SYSCTL wrapper around setting Flow Control 5415 1.99 msaitoh ************************************************************************/ 5416 1.98 msaitoh static int 5417 1.98 msaitoh ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS) 5418 1.98 msaitoh { 5419 1.98 msaitoh struct sysctlnode node = *rnode; 5420 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data; 5421 1.99 msaitoh int error, fc; 5422 1.82 msaitoh 5423 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc)) 5424 1.169 msaitoh return (EPERM); 5425 1.169 msaitoh 5426 1.333 msaitoh fc = sc->hw.fc.current_mode; 5427 1.98 msaitoh node.sysctl_data = &fc; 5428 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node)); 5429 1.98 msaitoh if (error != 0 || newp == NULL) 5430 1.98 msaitoh return error; 5431 1.82 msaitoh 5432 1.98 msaitoh /* Don't bother if it's not changed */ 5433 1.333 msaitoh if (fc == sc->hw.fc.current_mode) 5434 1.98 msaitoh return (0); 5435 1.83 msaitoh 5436 1.333 msaitoh return ixgbe_set_flowcntl(sc, fc); 5437 1.99 msaitoh } /* ixgbe_sysctl_flowcntl */ 5438 1.1 dyoung 5439 1.99 msaitoh /************************************************************************ 5440 1.99 msaitoh * ixgbe_set_flowcntl - Set flow control 5441 1.99 msaitoh * 5442 1.99 msaitoh * Flow control values: 5443 1.99 msaitoh * 0 - off 5444 1.99 msaitoh * 1 - rx pause 5445 1.99 msaitoh * 2 - tx pause 5446 1.99 msaitoh * 3 - full 5447 1.99 msaitoh ************************************************************************/ 5448 1.98 msaitoh static int 5449 1.333 msaitoh ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc) 5450 1.98 msaitoh { 5451 1.98 msaitoh switch (fc) { 5452 1.98 msaitoh case ixgbe_fc_rx_pause: 5453 1.98 msaitoh case ixgbe_fc_tx_pause: 5454 1.98 msaitoh case ixgbe_fc_full: 5455 1.333 msaitoh sc->hw.fc.requested_mode = fc; 5456 1.333 msaitoh if (sc->num_queues > 1) 5457 1.333 msaitoh ixgbe_disable_rx_drop(sc); 5458 1.98 msaitoh break; 5459 1.98 msaitoh case ixgbe_fc_none: 5460 1.333 msaitoh sc->hw.fc.requested_mode = ixgbe_fc_none; 5461 1.333 msaitoh if (sc->num_queues > 1) 5462 1.333 msaitoh ixgbe_enable_rx_drop(sc); 5463 1.98 msaitoh break; 5464 1.98 msaitoh default: 5465 1.98 msaitoh return (EINVAL); 5466 1.1 dyoung } 5467 1.99 msaitoh 5468 1.98 msaitoh #if 0 /* XXX NetBSD */ 5469 1.98 msaitoh /* Don't autoneg if forcing a value */ 5470 1.333 msaitoh sc->hw.fc.disable_fc_autoneg = TRUE; 5471 1.98 msaitoh #endif 5472 1.333 msaitoh ixgbe_fc_enable(&sc->hw); 5473 1.99 msaitoh 5474 1.98 msaitoh return (0); 5475 1.99 msaitoh } /* ixgbe_set_flowcntl */ 5476 1.1 dyoung 5477 1.99 msaitoh /************************************************************************ 5478 1.99 msaitoh * ixgbe_enable_rx_drop 5479 1.99 msaitoh * 5480 1.99 msaitoh * Enable the hardware to drop packets when the buffer is 5481 1.99 msaitoh * full. This is useful with multiqueue, so that no single 5482 1.99 msaitoh * queue being full stalls the entire RX engine. We only 5483 1.99 msaitoh * enable this when Multiqueue is enabled AND Flow Control 5484 1.99 msaitoh * is disabled. 5485 1.99 msaitoh ************************************************************************/ 5486 1.98 msaitoh static void 5487 1.333 msaitoh ixgbe_enable_rx_drop(struct ixgbe_softc *sc) 5488 1.98 msaitoh { 5489 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 5490 1.186 msaitoh struct rx_ring *rxr; 5491 1.186 msaitoh u32 srrctl; 5492 1.1 dyoung 5493 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++) { 5494 1.333 msaitoh rxr = &sc->rx_rings[i]; 5495 1.99 msaitoh srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 5496 1.99 msaitoh srrctl |= IXGBE_SRRCTL_DROP_EN; 5497 1.99 msaitoh IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 5498 1.98 msaitoh } 5499 1.99 msaitoh 5500 1.98 msaitoh /* enable drop for each vf */ 5501 1.333 msaitoh for (int i = 0; i < sc->num_vfs; i++) { 5502 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_QDE, 5503 1.98 msaitoh (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) | 5504 1.98 msaitoh IXGBE_QDE_ENABLE)); 5505 1.98 msaitoh } 5506 1.99 msaitoh } /* ixgbe_enable_rx_drop */ 5507 1.43 msaitoh 5508 1.99 msaitoh /************************************************************************ 5509 1.99 msaitoh * ixgbe_disable_rx_drop 5510 1.99 msaitoh ************************************************************************/ 5511 1.98 msaitoh static void 5512 1.333 msaitoh ixgbe_disable_rx_drop(struct ixgbe_softc *sc) 5513 1.98 msaitoh { 5514 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 5515 1.186 msaitoh struct rx_ring *rxr; 5516 1.186 msaitoh u32 srrctl; 5517 1.43 msaitoh 5518 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++) { 5519 1.333 msaitoh rxr = &sc->rx_rings[i]; 5520 1.186 msaitoh srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 5521 1.186 msaitoh srrctl &= ~IXGBE_SRRCTL_DROP_EN; 5522 1.186 msaitoh IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 5523 1.98 msaitoh } 5524 1.99 msaitoh 5525 1.98 msaitoh /* disable drop for each vf */ 5526 1.333 msaitoh for (int i = 0; i < sc->num_vfs; i++) { 5527 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_QDE, 5528 1.98 msaitoh (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT))); 5529 1.1 dyoung } 5530 1.99 msaitoh } /* ixgbe_disable_rx_drop */ 5531 1.98 msaitoh 5532 1.99 msaitoh /************************************************************************ 5533 1.99 msaitoh * ixgbe_sysctl_advertise 5534 1.99 msaitoh * 5535 1.99 msaitoh * SYSCTL wrapper around setting advertised speed 5536 1.99 msaitoh ************************************************************************/ 5537 1.98 msaitoh static int 5538 1.98 msaitoh ixgbe_sysctl_advertise(SYSCTLFN_ARGS) 5539 1.98 msaitoh { 5540 1.99 msaitoh struct sysctlnode node = *rnode; 5541 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data; 5542 1.186 msaitoh int error = 0, advertise; 5543 1.1 dyoung 5544 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc)) 5545 1.169 msaitoh return (EPERM); 5546 1.169 msaitoh 5547 1.333 msaitoh advertise = sc->advertise; 5548 1.98 msaitoh node.sysctl_data = &advertise; 5549 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node)); 5550 1.98 msaitoh if (error != 0 || newp == NULL) 5551 1.98 msaitoh return error; 5552 1.28 msaitoh 5553 1.333 msaitoh return ixgbe_set_advertise(sc, advertise); 5554 1.99 msaitoh } /* ixgbe_sysctl_advertise */ 5555 1.1 dyoung 5556 1.99 msaitoh /************************************************************************ 5557 1.99 msaitoh * ixgbe_set_advertise - Control advertised link speed 5558 1.99 msaitoh * 5559 1.99 msaitoh * Flags: 5560 1.103 msaitoh * 0x00 - Default (all capable link speed) 5561 1.296 msaitoh * 0x1 - advertise 100 Mb 5562 1.296 msaitoh * 0x2 - advertise 1G 5563 1.296 msaitoh * 0x4 - advertise 10G 5564 1.296 msaitoh * 0x8 - advertise 10 Mb (yes, Mb) 5565 1.103 msaitoh * 0x10 - advertise 2.5G 5566 1.103 msaitoh * 0x20 - advertise 5G 5567 1.99 msaitoh ************************************************************************/ 5568 1.98 msaitoh static int 5569 1.333 msaitoh ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise) 5570 1.1 dyoung { 5571 1.186 msaitoh device_t dev; 5572 1.186 msaitoh struct ixgbe_hw *hw; 5573 1.99 msaitoh ixgbe_link_speed speed = 0; 5574 1.99 msaitoh ixgbe_link_speed link_caps = 0; 5575 1.186 msaitoh s32 err = IXGBE_NOT_IMPLEMENTED; 5576 1.186 msaitoh bool negotiate = FALSE; 5577 1.98 msaitoh 5578 1.98 msaitoh /* Checks to validate new value */ 5579 1.333 msaitoh if (sc->advertise == advertise) /* no change */ 5580 1.98 msaitoh return (0); 5581 1.98 msaitoh 5582 1.333 msaitoh dev = sc->dev; 5583 1.333 msaitoh hw = &sc->hw; 5584 1.98 msaitoh 5585 1.98 msaitoh /* No speed changes for backplane media */ 5586 1.98 msaitoh if (hw->phy.media_type == ixgbe_media_type_backplane) 5587 1.98 msaitoh return (ENODEV); 5588 1.98 msaitoh 5589 1.98 msaitoh if (!((hw->phy.media_type == ixgbe_media_type_copper) || 5590 1.98 msaitoh (hw->phy.multispeed_fiber))) { 5591 1.98 msaitoh device_printf(dev, 5592 1.98 msaitoh "Advertised speed can only be set on copper or " 5593 1.98 msaitoh "multispeed fiber media types.\n"); 5594 1.98 msaitoh return (EINVAL); 5595 1.98 msaitoh } 5596 1.98 msaitoh 5597 1.259 msaitoh if (advertise < 0x0 || advertise > 0x3f) { 5598 1.319 msaitoh device_printf(dev, "Invalid advertised speed; " 5599 1.319 msaitoh "valid modes are 0x0 through 0x3f\n"); 5600 1.98 msaitoh return (EINVAL); 5601 1.98 msaitoh } 5602 1.1 dyoung 5603 1.99 msaitoh if (hw->mac.ops.get_link_capabilities) { 5604 1.99 msaitoh err = hw->mac.ops.get_link_capabilities(hw, &link_caps, 5605 1.99 msaitoh &negotiate); 5606 1.99 msaitoh if (err != IXGBE_SUCCESS) { 5607 1.319 msaitoh device_printf(dev, "Unable to determine supported " 5608 1.319 msaitoh "advertise speeds\n"); 5609 1.99 msaitoh return (ENODEV); 5610 1.99 msaitoh } 5611 1.99 msaitoh } 5612 1.99 msaitoh 5613 1.98 msaitoh /* Set new value and report new advertised mode */ 5614 1.99 msaitoh if (advertise & 0x1) { 5615 1.99 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) { 5616 1.319 msaitoh device_printf(dev, "Interface does not support 100Mb " 5617 1.319 msaitoh "advertised speed\n"); 5618 1.98 msaitoh return (EINVAL); 5619 1.98 msaitoh } 5620 1.98 msaitoh speed |= IXGBE_LINK_SPEED_100_FULL; 5621 1.99 msaitoh } 5622 1.99 msaitoh if (advertise & 0x2) { 5623 1.99 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) { 5624 1.319 msaitoh device_printf(dev, "Interface does not support 1Gb " 5625 1.319 msaitoh "advertised speed\n"); 5626 1.99 msaitoh return (EINVAL); 5627 1.99 msaitoh } 5628 1.98 msaitoh speed |= IXGBE_LINK_SPEED_1GB_FULL; 5629 1.99 msaitoh } 5630 1.99 msaitoh if (advertise & 0x4) { 5631 1.99 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) { 5632 1.319 msaitoh device_printf(dev, "Interface does not support 10Gb " 5633 1.319 msaitoh "advertised speed\n"); 5634 1.99 msaitoh return (EINVAL); 5635 1.99 msaitoh } 5636 1.98 msaitoh speed |= IXGBE_LINK_SPEED_10GB_FULL; 5637 1.99 msaitoh } 5638 1.99 msaitoh if (advertise & 0x8) { 5639 1.99 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) { 5640 1.319 msaitoh device_printf(dev, "Interface does not support 10Mb " 5641 1.319 msaitoh "advertised speed\n"); 5642 1.99 msaitoh return (EINVAL); 5643 1.99 msaitoh } 5644 1.99 msaitoh speed |= IXGBE_LINK_SPEED_10_FULL; 5645 1.99 msaitoh } 5646 1.103 msaitoh if (advertise & 0x10) { 5647 1.103 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) { 5648 1.319 msaitoh device_printf(dev, "Interface does not support 2.5Gb " 5649 1.319 msaitoh "advertised speed\n"); 5650 1.103 msaitoh return (EINVAL); 5651 1.103 msaitoh } 5652 1.103 msaitoh speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 5653 1.103 msaitoh } 5654 1.103 msaitoh if (advertise & 0x20) { 5655 1.103 msaitoh if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) { 5656 1.319 msaitoh device_printf(dev, "Interface does not support 5Gb " 5657 1.319 msaitoh "advertised speed\n"); 5658 1.103 msaitoh return (EINVAL); 5659 1.103 msaitoh } 5660 1.103 msaitoh speed |= IXGBE_LINK_SPEED_5GB_FULL; 5661 1.103 msaitoh } 5662 1.99 msaitoh if (advertise == 0) 5663 1.99 msaitoh speed = link_caps; /* All capable link speed */ 5664 1.1 dyoung 5665 1.98 msaitoh hw->mac.autotry_restart = TRUE; 5666 1.98 msaitoh hw->mac.ops.setup_link(hw, speed, TRUE); 5667 1.333 msaitoh sc->advertise = advertise; 5668 1.1 dyoung 5669 1.99 msaitoh return (0); 5670 1.99 msaitoh } /* ixgbe_set_advertise */ 5671 1.1 dyoung 5672 1.99 msaitoh /************************************************************************ 5673 1.296 msaitoh * ixgbe_get_default_advertise - Get default advertised speed settings 5674 1.99 msaitoh * 5675 1.99 msaitoh * Formatted for sysctl usage. 5676 1.99 msaitoh * Flags: 5677 1.296 msaitoh * 0x1 - advertise 100 Mb 5678 1.296 msaitoh * 0x2 - advertise 1G 5679 1.296 msaitoh * 0x4 - advertise 10G 5680 1.296 msaitoh * 0x8 - advertise 10 Mb (yes, Mb) 5681 1.103 msaitoh * 0x10 - advertise 2.5G 5682 1.103 msaitoh * 0x20 - advertise 5G 5683 1.99 msaitoh ************************************************************************/ 5684 1.98 msaitoh static int 5685 1.333 msaitoh ixgbe_get_default_advertise(struct ixgbe_softc *sc) 5686 1.1 dyoung { 5687 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 5688 1.186 msaitoh int speed; 5689 1.99 msaitoh ixgbe_link_speed link_caps = 0; 5690 1.186 msaitoh s32 err; 5691 1.186 msaitoh bool negotiate = FALSE; 5692 1.98 msaitoh 5693 1.99 msaitoh /* 5694 1.99 msaitoh * Advertised speed means nothing unless it's copper or 5695 1.99 msaitoh * multi-speed fiber 5696 1.99 msaitoh */ 5697 1.99 msaitoh if (!(hw->phy.media_type == ixgbe_media_type_copper) && 5698 1.99 msaitoh !(hw->phy.multispeed_fiber)) 5699 1.99 msaitoh return (0); 5700 1.1 dyoung 5701 1.99 msaitoh err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate); 5702 1.99 msaitoh if (err != IXGBE_SUCCESS) 5703 1.99 msaitoh return (0); 5704 1.1 dyoung 5705 1.99 msaitoh speed = 5706 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x4 : 0) | 5707 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0) | 5708 1.103 msaitoh ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) | 5709 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x2 : 0) | 5710 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x1 : 0) | 5711 1.296 msaitoh ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x8 : 0); 5712 1.99 msaitoh 5713 1.99 msaitoh return speed; 5714 1.296 msaitoh } /* ixgbe_get_default_advertise */ 5715 1.99 msaitoh 5716 1.99 msaitoh /************************************************************************ 5717 1.99 msaitoh * ixgbe_sysctl_dmac - Manage DMA Coalescing 5718 1.99 msaitoh * 5719 1.99 msaitoh * Control values: 5720 1.99 msaitoh * 0/1 - off / on (use default value of 1000) 5721 1.99 msaitoh * 5722 1.99 msaitoh * Legal timer values are: 5723 1.99 msaitoh * 50,100,250,500,1000,2000,5000,10000 5724 1.99 msaitoh * 5725 1.99 msaitoh * Turning off interrupt moderation will also turn this off. 5726 1.99 msaitoh ************************************************************************/ 5727 1.1 dyoung static int 5728 1.98 msaitoh ixgbe_sysctl_dmac(SYSCTLFN_ARGS) 5729 1.1 dyoung { 5730 1.44 msaitoh struct sysctlnode node = *rnode; 5731 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data; 5732 1.333 msaitoh struct ifnet *ifp = sc->ifp; 5733 1.186 msaitoh int error; 5734 1.186 msaitoh int newval; 5735 1.1 dyoung 5736 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc)) 5737 1.169 msaitoh return (EPERM); 5738 1.169 msaitoh 5739 1.333 msaitoh newval = sc->dmac; 5740 1.98 msaitoh node.sysctl_data = &newval; 5741 1.22 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node)); 5742 1.98 msaitoh if ((error) || (newp == NULL)) 5743 1.98 msaitoh return (error); 5744 1.98 msaitoh 5745 1.98 msaitoh switch (newval) { 5746 1.98 msaitoh case 0: 5747 1.98 msaitoh /* Disabled */ 5748 1.333 msaitoh sc->dmac = 0; 5749 1.98 msaitoh break; 5750 1.98 msaitoh case 1: 5751 1.98 msaitoh /* Enable and use default */ 5752 1.333 msaitoh sc->dmac = 1000; 5753 1.98 msaitoh break; 5754 1.98 msaitoh case 50: 5755 1.98 msaitoh case 100: 5756 1.98 msaitoh case 250: 5757 1.98 msaitoh case 500: 5758 1.98 msaitoh case 1000: 5759 1.98 msaitoh case 2000: 5760 1.98 msaitoh case 5000: 5761 1.98 msaitoh case 10000: 5762 1.98 msaitoh /* Legal values - allow */ 5763 1.333 msaitoh sc->dmac = newval; 5764 1.98 msaitoh break; 5765 1.98 msaitoh default: 5766 1.98 msaitoh /* Do nothing, illegal value */ 5767 1.98 msaitoh return (EINVAL); 5768 1.22 msaitoh } 5769 1.1 dyoung 5770 1.98 msaitoh /* Re-initialize hardware if it's already running */ 5771 1.98 msaitoh if (ifp->if_flags & IFF_RUNNING) 5772 1.302 riastrad if_init(ifp); 5773 1.1 dyoung 5774 1.98 msaitoh return (0); 5775 1.1 dyoung } 5776 1.1 dyoung 5777 1.98 msaitoh #ifdef IXGBE_DEBUG 5778 1.99 msaitoh /************************************************************************ 5779 1.99 msaitoh * ixgbe_sysctl_power_state 5780 1.99 msaitoh * 5781 1.99 msaitoh * Sysctl to test power states 5782 1.99 msaitoh * Values: 5783 1.99 msaitoh * 0 - set device to D0 5784 1.99 msaitoh * 3 - set device to D3 5785 1.99 msaitoh * (none) - get current device power state 5786 1.99 msaitoh ************************************************************************/ 5787 1.98 msaitoh static int 5788 1.98 msaitoh ixgbe_sysctl_power_state(SYSCTLFN_ARGS) 5789 1.44 msaitoh { 5790 1.99 msaitoh #ifdef notyet 5791 1.98 msaitoh struct sysctlnode node = *rnode; 5792 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data; 5793 1.333 msaitoh device_t dev = sc->dev; 5794 1.186 msaitoh int curr_ps, new_ps, error = 0; 5795 1.44 msaitoh 5796 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc)) 5797 1.169 msaitoh return (EPERM); 5798 1.169 msaitoh 5799 1.98 msaitoh curr_ps = new_ps = pci_get_powerstate(dev); 5800 1.44 msaitoh 5801 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node)); 5802 1.98 msaitoh if ((error) || (req->newp == NULL)) 5803 1.98 msaitoh return (error); 5804 1.44 msaitoh 5805 1.98 msaitoh if (new_ps == curr_ps) 5806 1.98 msaitoh return (0); 5807 1.44 msaitoh 5808 1.98 msaitoh if (new_ps == 3 && curr_ps == 0) 5809 1.98 msaitoh error = DEVICE_SUSPEND(dev); 5810 1.98 msaitoh else if (new_ps == 0 && curr_ps == 3) 5811 1.98 msaitoh error = DEVICE_RESUME(dev); 5812 1.98 msaitoh else 5813 1.98 msaitoh return (EINVAL); 5814 1.44 msaitoh 5815 1.98 msaitoh device_printf(dev, "New state: %d\n", pci_get_powerstate(dev)); 5816 1.44 msaitoh 5817 1.98 msaitoh return (error); 5818 1.98 msaitoh #else 5819 1.98 msaitoh return 0; 5820 1.98 msaitoh #endif 5821 1.99 msaitoh } /* ixgbe_sysctl_power_state */ 5822 1.98 msaitoh #endif 5823 1.99 msaitoh 5824 1.99 msaitoh /************************************************************************ 5825 1.99 msaitoh * ixgbe_sysctl_wol_enable 5826 1.99 msaitoh * 5827 1.99 msaitoh * Sysctl to enable/disable the WoL capability, 5828 1.99 msaitoh * if supported by the adapter. 5829 1.99 msaitoh * 5830 1.99 msaitoh * Values: 5831 1.99 msaitoh * 0 - disabled 5832 1.99 msaitoh * 1 - enabled 5833 1.99 msaitoh ************************************************************************/ 5834 1.98 msaitoh static int 5835 1.98 msaitoh ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS) 5836 1.98 msaitoh { 5837 1.98 msaitoh struct sysctlnode node = *rnode; 5838 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data; 5839 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 5840 1.186 msaitoh bool new_wol_enabled; 5841 1.186 msaitoh int error = 0; 5842 1.44 msaitoh 5843 1.169 msaitoh /* 5844 1.169 msaitoh * It's not required to check recovery mode because this function never 5845 1.169 msaitoh * touches hardware. 5846 1.169 msaitoh */ 5847 1.98 msaitoh new_wol_enabled = hw->wol_enabled; 5848 1.98 msaitoh node.sysctl_data = &new_wol_enabled; 5849 1.98 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node)); 5850 1.98 msaitoh if ((error) || (newp == NULL)) 5851 1.98 msaitoh return (error); 5852 1.98 msaitoh if (new_wol_enabled == hw->wol_enabled) 5853 1.98 msaitoh return (0); 5854 1.44 msaitoh 5855 1.333 msaitoh if (new_wol_enabled && !sc->wol_support) 5856 1.98 msaitoh return (ENODEV); 5857 1.98 msaitoh else 5858 1.98 msaitoh hw->wol_enabled = new_wol_enabled; 5859 1.44 msaitoh 5860 1.98 msaitoh return (0); 5861 1.99 msaitoh } /* ixgbe_sysctl_wol_enable */ 5862 1.48 msaitoh 5863 1.99 msaitoh /************************************************************************ 5864 1.99 msaitoh * ixgbe_sysctl_wufc - Wake Up Filter Control 5865 1.99 msaitoh * 5866 1.99 msaitoh * Sysctl to enable/disable the types of packets that the 5867 1.99 msaitoh * adapter will wake up on upon receipt. 5868 1.99 msaitoh * Flags: 5869 1.99 msaitoh * 0x1 - Link Status Change 5870 1.99 msaitoh * 0x2 - Magic Packet 5871 1.99 msaitoh * 0x4 - Direct Exact 5872 1.99 msaitoh * 0x8 - Directed Multicast 5873 1.99 msaitoh * 0x10 - Broadcast 5874 1.99 msaitoh * 0x20 - ARP/IPv4 Request Packet 5875 1.99 msaitoh * 0x40 - Direct IPv4 Packet 5876 1.99 msaitoh * 0x80 - Direct IPv6 Packet 5877 1.98 msaitoh * 5878 1.99 msaitoh * Settings not listed above will cause the sysctl to return an error. 5879 1.99 msaitoh ************************************************************************/ 5880 1.1 dyoung static int 5881 1.98 msaitoh ixgbe_sysctl_wufc(SYSCTLFN_ARGS) 5882 1.1 dyoung { 5883 1.98 msaitoh struct sysctlnode node = *rnode; 5884 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data; 5885 1.98 msaitoh int error = 0; 5886 1.98 msaitoh u32 new_wufc; 5887 1.52 msaitoh 5888 1.169 msaitoh /* 5889 1.169 msaitoh * It's not required to check recovery mode because this function never 5890 1.169 msaitoh * touches hardware. 5891 1.169 msaitoh */ 5892 1.333 msaitoh new_wufc = sc->wufc; 5893 1.98 msaitoh node.sysctl_data = &new_wufc; 5894 1.52 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node)); 5895 1.98 msaitoh if ((error) || (newp == NULL)) 5896 1.98 msaitoh return (error); 5897 1.333 msaitoh if (new_wufc == sc->wufc) 5898 1.98 msaitoh return (0); 5899 1.98 msaitoh 5900 1.98 msaitoh if (new_wufc & 0xffffff00) 5901 1.98 msaitoh return (EINVAL); 5902 1.99 msaitoh 5903 1.99 msaitoh new_wufc &= 0xff; 5904 1.333 msaitoh new_wufc |= (0xffffff & sc->wufc); 5905 1.333 msaitoh sc->wufc = new_wufc; 5906 1.52 msaitoh 5907 1.98 msaitoh return (0); 5908 1.99 msaitoh } /* ixgbe_sysctl_wufc */ 5909 1.52 msaitoh 5910 1.98 msaitoh #ifdef IXGBE_DEBUG 5911 1.99 msaitoh /************************************************************************ 5912 1.99 msaitoh * ixgbe_sysctl_print_rss_config 5913 1.99 msaitoh ************************************************************************/ 5914 1.52 msaitoh static int 5915 1.98 msaitoh ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS) 5916 1.52 msaitoh { 5917 1.99 msaitoh #ifdef notyet 5918 1.99 msaitoh struct sysctlnode node = *rnode; 5919 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data; 5920 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 5921 1.333 msaitoh device_t dev = sc->dev; 5922 1.186 msaitoh struct sbuf *buf; 5923 1.186 msaitoh int error = 0, reta_size; 5924 1.186 msaitoh u32 reg; 5925 1.1 dyoung 5926 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc)) 5927 1.169 msaitoh return (EPERM); 5928 1.169 msaitoh 5929 1.98 msaitoh buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5930 1.98 msaitoh if (!buf) { 5931 1.98 msaitoh device_printf(dev, "Could not allocate sbuf for output.\n"); 5932 1.98 msaitoh return (ENOMEM); 5933 1.98 msaitoh } 5934 1.52 msaitoh 5935 1.98 msaitoh // TODO: use sbufs to make a string to print out 5936 1.98 msaitoh /* Set multiplier for RETA setup and table size based on MAC */ 5937 1.333 msaitoh switch (sc->hw.mac.type) { 5938 1.98 msaitoh case ixgbe_mac_X550: 5939 1.98 msaitoh case ixgbe_mac_X550EM_x: 5940 1.99 msaitoh case ixgbe_mac_X550EM_a: 5941 1.98 msaitoh reta_size = 128; 5942 1.98 msaitoh break; 5943 1.98 msaitoh default: 5944 1.98 msaitoh reta_size = 32; 5945 1.98 msaitoh break; 5946 1.43 msaitoh } 5947 1.1 dyoung 5948 1.98 msaitoh /* Print out the redirection table */ 5949 1.98 msaitoh sbuf_cat(buf, "\n"); 5950 1.98 msaitoh for (int i = 0; i < reta_size; i++) { 5951 1.98 msaitoh if (i < 32) { 5952 1.98 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_RETA(i)); 5953 1.98 msaitoh sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg); 5954 1.98 msaitoh } else { 5955 1.98 msaitoh reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32)); 5956 1.98 msaitoh sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg); 5957 1.98 msaitoh } 5958 1.28 msaitoh } 5959 1.1 dyoung 5960 1.98 msaitoh // TODO: print more config 5961 1.43 msaitoh 5962 1.98 msaitoh error = sbuf_finish(buf); 5963 1.98 msaitoh if (error) 5964 1.98 msaitoh device_printf(dev, "Error finishing sbuf: %d\n", error); 5965 1.1 dyoung 5966 1.98 msaitoh sbuf_delete(buf); 5967 1.99 msaitoh #endif 5968 1.98 msaitoh return (0); 5969 1.99 msaitoh } /* ixgbe_sysctl_print_rss_config */ 5970 1.98 msaitoh #endif /* IXGBE_DEBUG */ 5971 1.24 msaitoh 5972 1.99 msaitoh /************************************************************************ 5973 1.99 msaitoh * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY 5974 1.99 msaitoh * 5975 1.99 msaitoh * For X552/X557-AT devices using an external PHY 5976 1.99 msaitoh ************************************************************************/ 5977 1.44 msaitoh static int 5978 1.44 msaitoh ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS) 5979 1.44 msaitoh { 5980 1.44 msaitoh struct sysctlnode node = *rnode; 5981 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data; 5982 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 5983 1.44 msaitoh int val; 5984 1.44 msaitoh u16 reg; 5985 1.44 msaitoh int error; 5986 1.44 msaitoh 5987 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc)) 5988 1.169 msaitoh return (EPERM); 5989 1.169 msaitoh 5990 1.325 msaitoh if ((hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) && 5991 1.325 msaitoh (hw->device_id != IXGBE_DEV_ID_X550EM_A_10G_T)) { 5992 1.333 msaitoh device_printf(sc->dev, 5993 1.44 msaitoh "Device has no supported external thermal sensor.\n"); 5994 1.44 msaitoh return (ENODEV); 5995 1.44 msaitoh } 5996 1.44 msaitoh 5997 1.44 msaitoh if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP, 5998 1.99 msaitoh IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 5999 1.333 msaitoh device_printf(sc->dev, 6000 1.44 msaitoh "Error reading from PHY's current temperature register\n"); 6001 1.44 msaitoh return (EAGAIN); 6002 1.44 msaitoh } 6003 1.44 msaitoh 6004 1.44 msaitoh node.sysctl_data = &val; 6005 1.44 msaitoh 6006 1.44 msaitoh /* Shift temp for output */ 6007 1.44 msaitoh val = reg >> 8; 6008 1.44 msaitoh 6009 1.44 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node)); 6010 1.44 msaitoh if ((error) || (newp == NULL)) 6011 1.44 msaitoh return (error); 6012 1.44 msaitoh 6013 1.44 msaitoh return (0); 6014 1.99 msaitoh } /* ixgbe_sysctl_phy_temp */ 6015 1.44 msaitoh 6016 1.99 msaitoh /************************************************************************ 6017 1.99 msaitoh * ixgbe_sysctl_phy_overtemp_occurred 6018 1.99 msaitoh * 6019 1.99 msaitoh * Reports (directly from the PHY) whether the current PHY 6020 1.99 msaitoh * temperature is over the overtemp threshold. 6021 1.99 msaitoh ************************************************************************/ 6022 1.44 msaitoh static int 6023 1.44 msaitoh ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS) 6024 1.44 msaitoh { 6025 1.44 msaitoh struct sysctlnode node = *rnode; 6026 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data; 6027 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 6028 1.44 msaitoh int val, error; 6029 1.44 msaitoh u16 reg; 6030 1.44 msaitoh 6031 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc)) 6032 1.169 msaitoh return (EPERM); 6033 1.169 msaitoh 6034 1.325 msaitoh if ((hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) && 6035 1.344 msaitoh (hw->device_id != IXGBE_DEV_ID_X550EM_A_10G_T)) { 6036 1.333 msaitoh device_printf(sc->dev, 6037 1.44 msaitoh "Device has no supported external thermal sensor.\n"); 6038 1.44 msaitoh return (ENODEV); 6039 1.44 msaitoh } 6040 1.44 msaitoh 6041 1.44 msaitoh if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS, 6042 1.99 msaitoh IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { 6043 1.333 msaitoh device_printf(sc->dev, 6044 1.44 msaitoh "Error reading from PHY's temperature status register\n"); 6045 1.44 msaitoh return (EAGAIN); 6046 1.44 msaitoh } 6047 1.44 msaitoh 6048 1.44 msaitoh node.sysctl_data = &val; 6049 1.44 msaitoh 6050 1.44 msaitoh /* Get occurrence bit */ 6051 1.44 msaitoh val = !!(reg & 0x4000); 6052 1.44 msaitoh 6053 1.44 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node)); 6054 1.44 msaitoh if ((error) || (newp == NULL)) 6055 1.44 msaitoh return (error); 6056 1.44 msaitoh 6057 1.44 msaitoh return (0); 6058 1.99 msaitoh } /* ixgbe_sysctl_phy_overtemp_occurred */ 6059 1.99 msaitoh 6060 1.99 msaitoh /************************************************************************ 6061 1.99 msaitoh * ixgbe_sysctl_eee_state 6062 1.99 msaitoh * 6063 1.99 msaitoh * Sysctl to set EEE power saving feature 6064 1.99 msaitoh * Values: 6065 1.99 msaitoh * 0 - disable EEE 6066 1.99 msaitoh * 1 - enable EEE 6067 1.99 msaitoh * (none) - get current device EEE state 6068 1.99 msaitoh ************************************************************************/ 6069 1.99 msaitoh static int 6070 1.99 msaitoh ixgbe_sysctl_eee_state(SYSCTLFN_ARGS) 6071 1.99 msaitoh { 6072 1.99 msaitoh struct sysctlnode node = *rnode; 6073 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data; 6074 1.333 msaitoh struct ifnet *ifp = sc->ifp; 6075 1.333 msaitoh device_t dev = sc->dev; 6076 1.186 msaitoh int curr_eee, new_eee, error = 0; 6077 1.186 msaitoh s32 retval; 6078 1.99 msaitoh 6079 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc)) 6080 1.169 msaitoh return (EPERM); 6081 1.169 msaitoh 6082 1.333 msaitoh curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE); 6083 1.99 msaitoh node.sysctl_data = &new_eee; 6084 1.99 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node)); 6085 1.99 msaitoh if ((error) || (newp == NULL)) 6086 1.99 msaitoh return (error); 6087 1.99 msaitoh 6088 1.99 msaitoh /* Nothing to do */ 6089 1.99 msaitoh if (new_eee == curr_eee) 6090 1.99 msaitoh return (0); 6091 1.99 msaitoh 6092 1.99 msaitoh /* Not supported */ 6093 1.333 msaitoh if (!(sc->feat_cap & IXGBE_FEATURE_EEE)) 6094 1.99 msaitoh return (EINVAL); 6095 1.99 msaitoh 6096 1.99 msaitoh /* Bounds checking */ 6097 1.99 msaitoh if ((new_eee < 0) || (new_eee > 1)) 6098 1.99 msaitoh return (EINVAL); 6099 1.99 msaitoh 6100 1.333 msaitoh retval = ixgbe_setup_eee(&sc->hw, new_eee); 6101 1.99 msaitoh if (retval) { 6102 1.99 msaitoh device_printf(dev, "Error in EEE setup: 0x%08X\n", retval); 6103 1.99 msaitoh return (EINVAL); 6104 1.99 msaitoh } 6105 1.99 msaitoh 6106 1.99 msaitoh /* Restart auto-neg */ 6107 1.302 riastrad if_init(ifp); 6108 1.99 msaitoh 6109 1.99 msaitoh device_printf(dev, "New EEE state: %d\n", new_eee); 6110 1.99 msaitoh 6111 1.99 msaitoh /* Cache new value */ 6112 1.99 msaitoh if (new_eee) 6113 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_EEE; 6114 1.99 msaitoh else 6115 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_EEE; 6116 1.99 msaitoh 6117 1.99 msaitoh return (error); 6118 1.99 msaitoh } /* ixgbe_sysctl_eee_state */ 6119 1.99 msaitoh 6120 1.333 msaitoh #define PRINTQS(sc, regname) \ 6121 1.158 msaitoh do { \ 6122 1.333 msaitoh struct ixgbe_hw *_hw = &(sc)->hw; \ 6123 1.158 msaitoh int _i; \ 6124 1.158 msaitoh \ 6125 1.333 msaitoh printf("%s: %s", device_xname((sc)->dev), #regname); \ 6126 1.333 msaitoh for (_i = 0; _i < (sc)->num_queues; _i++) { \ 6127 1.158 msaitoh printf((_i == 0) ? "\t" : " "); \ 6128 1.158 msaitoh printf("%08x", IXGBE_READ_REG(_hw, \ 6129 1.158 msaitoh IXGBE_##regname(_i))); \ 6130 1.158 msaitoh } \ 6131 1.158 msaitoh printf("\n"); \ 6132 1.158 msaitoh } while (0) 6133 1.158 msaitoh 6134 1.158 msaitoh /************************************************************************ 6135 1.158 msaitoh * ixgbe_print_debug_info 6136 1.158 msaitoh * 6137 1.158 msaitoh * Called only when em_display_debug_stats is enabled. 6138 1.158 msaitoh * Provides a way to take a look at important statistics 6139 1.158 msaitoh * maintained by the driver and hardware. 6140 1.158 msaitoh ************************************************************************/ 6141 1.158 msaitoh static void 6142 1.333 msaitoh ixgbe_print_debug_info(struct ixgbe_softc *sc) 6143 1.158 msaitoh { 6144 1.333 msaitoh device_t dev = sc->dev; 6145 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 6146 1.158 msaitoh int table_size; 6147 1.158 msaitoh int i; 6148 1.158 msaitoh 6149 1.333 msaitoh switch (sc->hw.mac.type) { 6150 1.158 msaitoh case ixgbe_mac_X550: 6151 1.158 msaitoh case ixgbe_mac_X550EM_x: 6152 1.158 msaitoh case ixgbe_mac_X550EM_a: 6153 1.158 msaitoh table_size = 128; 6154 1.158 msaitoh break; 6155 1.158 msaitoh default: 6156 1.158 msaitoh table_size = 32; 6157 1.158 msaitoh break; 6158 1.158 msaitoh } 6159 1.185 msaitoh 6160 1.158 msaitoh device_printf(dev, "[E]RETA:\n"); 6161 1.158 msaitoh for (i = 0; i < table_size; i++) { 6162 1.158 msaitoh if (i < 32) 6163 1.158 msaitoh printf("%02x: %08x\n", i, IXGBE_READ_REG(hw, 6164 1.158 msaitoh IXGBE_RETA(i))); 6165 1.158 msaitoh else 6166 1.158 msaitoh printf("%02x: %08x\n", i, IXGBE_READ_REG(hw, 6167 1.158 msaitoh IXGBE_ERETA(i - 32))); 6168 1.158 msaitoh } 6169 1.158 msaitoh 6170 1.158 msaitoh device_printf(dev, "queue:"); 6171 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) { 6172 1.158 msaitoh printf((i == 0) ? "\t" : " "); 6173 1.158 msaitoh printf("%8d", i); 6174 1.158 msaitoh } 6175 1.158 msaitoh printf("\n"); 6176 1.333 msaitoh PRINTQS(sc, RDBAL); 6177 1.333 msaitoh PRINTQS(sc, RDBAH); 6178 1.333 msaitoh PRINTQS(sc, RDLEN); 6179 1.333 msaitoh PRINTQS(sc, SRRCTL); 6180 1.333 msaitoh PRINTQS(sc, RDH); 6181 1.333 msaitoh PRINTQS(sc, RDT); 6182 1.333 msaitoh PRINTQS(sc, RXDCTL); 6183 1.158 msaitoh 6184 1.158 msaitoh device_printf(dev, "RQSMR:"); 6185 1.333 msaitoh for (i = 0; i < sc->num_queues / 4; i++) { 6186 1.158 msaitoh printf((i == 0) ? "\t" : " "); 6187 1.158 msaitoh printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i))); 6188 1.158 msaitoh } 6189 1.158 msaitoh printf("\n"); 6190 1.158 msaitoh 6191 1.158 msaitoh device_printf(dev, "disabled_count:"); 6192 1.333 msaitoh for (i = 0; i < sc->num_queues; i++) { 6193 1.158 msaitoh printf((i == 0) ? "\t" : " "); 6194 1.333 msaitoh printf("%8d", sc->queues[i].disabled_count); 6195 1.158 msaitoh } 6196 1.158 msaitoh printf("\n"); 6197 1.185 msaitoh 6198 1.158 msaitoh device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS)); 6199 1.158 msaitoh if (hw->mac.type != ixgbe_mac_82598EB) { 6200 1.158 msaitoh device_printf(dev, "EIMS_EX(0):\t%08x\n", 6201 1.158 msaitoh IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0))); 6202 1.158 msaitoh device_printf(dev, "EIMS_EX(1):\t%08x\n", 6203 1.158 msaitoh IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1))); 6204 1.158 msaitoh } 6205 1.265 msaitoh device_printf(dev, "EIAM:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAM)); 6206 1.265 msaitoh device_printf(dev, "EIAC:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAC)); 6207 1.158 msaitoh } /* ixgbe_print_debug_info */ 6208 1.158 msaitoh 6209 1.158 msaitoh /************************************************************************ 6210 1.158 msaitoh * ixgbe_sysctl_debug 6211 1.158 msaitoh ************************************************************************/ 6212 1.158 msaitoh static int 6213 1.158 msaitoh ixgbe_sysctl_debug(SYSCTLFN_ARGS) 6214 1.158 msaitoh { 6215 1.158 msaitoh struct sysctlnode node = *rnode; 6216 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data; 6217 1.186 msaitoh int error, result = 0; 6218 1.158 msaitoh 6219 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc)) 6220 1.169 msaitoh return (EPERM); 6221 1.169 msaitoh 6222 1.158 msaitoh node.sysctl_data = &result; 6223 1.158 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node)); 6224 1.158 msaitoh 6225 1.158 msaitoh if (error || newp == NULL) 6226 1.158 msaitoh return error; 6227 1.158 msaitoh 6228 1.158 msaitoh if (result == 1) 6229 1.333 msaitoh ixgbe_print_debug_info(sc); 6230 1.158 msaitoh 6231 1.158 msaitoh return 0; 6232 1.158 msaitoh } /* ixgbe_sysctl_debug */ 6233 1.158 msaitoh 6234 1.99 msaitoh /************************************************************************ 6235 1.286 msaitoh * ixgbe_sysctl_rx_copy_len 6236 1.286 msaitoh ************************************************************************/ 6237 1.286 msaitoh static int 6238 1.286 msaitoh ixgbe_sysctl_rx_copy_len(SYSCTLFN_ARGS) 6239 1.286 msaitoh { 6240 1.286 msaitoh struct sysctlnode node = *rnode; 6241 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data; 6242 1.286 msaitoh int error; 6243 1.333 msaitoh int result = sc->rx_copy_len; 6244 1.286 msaitoh 6245 1.286 msaitoh node.sysctl_data = &result; 6246 1.286 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node)); 6247 1.286 msaitoh 6248 1.286 msaitoh if (error || newp == NULL) 6249 1.286 msaitoh return error; 6250 1.286 msaitoh 6251 1.286 msaitoh if ((result < 0) || (result > IXGBE_RX_COPY_LEN_MAX)) 6252 1.286 msaitoh return EINVAL; 6253 1.286 msaitoh 6254 1.333 msaitoh sc->rx_copy_len = result; 6255 1.286 msaitoh 6256 1.286 msaitoh return 0; 6257 1.286 msaitoh } /* ixgbe_sysctl_rx_copy_len */ 6258 1.286 msaitoh 6259 1.286 msaitoh /************************************************************************ 6260 1.313 msaitoh * ixgbe_sysctl_tx_process_limit 6261 1.313 msaitoh ************************************************************************/ 6262 1.313 msaitoh static int 6263 1.313 msaitoh ixgbe_sysctl_tx_process_limit(SYSCTLFN_ARGS) 6264 1.313 msaitoh { 6265 1.313 msaitoh struct sysctlnode node = *rnode; 6266 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data; 6267 1.313 msaitoh int error; 6268 1.333 msaitoh int result = sc->tx_process_limit; 6269 1.313 msaitoh 6270 1.313 msaitoh node.sysctl_data = &result; 6271 1.313 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node)); 6272 1.313 msaitoh 6273 1.313 msaitoh if (error || newp == NULL) 6274 1.313 msaitoh return error; 6275 1.313 msaitoh 6276 1.333 msaitoh if ((result <= 0) || (result > sc->num_tx_desc)) 6277 1.313 msaitoh return EINVAL; 6278 1.313 msaitoh 6279 1.333 msaitoh sc->tx_process_limit = result; 6280 1.313 msaitoh 6281 1.313 msaitoh return 0; 6282 1.313 msaitoh } /* ixgbe_sysctl_tx_process_limit */ 6283 1.313 msaitoh 6284 1.313 msaitoh /************************************************************************ 6285 1.313 msaitoh * ixgbe_sysctl_rx_process_limit 6286 1.313 msaitoh ************************************************************************/ 6287 1.313 msaitoh static int 6288 1.313 msaitoh ixgbe_sysctl_rx_process_limit(SYSCTLFN_ARGS) 6289 1.313 msaitoh { 6290 1.313 msaitoh struct sysctlnode node = *rnode; 6291 1.333 msaitoh struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data; 6292 1.313 msaitoh int error; 6293 1.333 msaitoh int result = sc->rx_process_limit; 6294 1.313 msaitoh 6295 1.313 msaitoh node.sysctl_data = &result; 6296 1.313 msaitoh error = sysctl_lookup(SYSCTLFN_CALL(&node)); 6297 1.313 msaitoh 6298 1.313 msaitoh if (error || newp == NULL) 6299 1.313 msaitoh return error; 6300 1.313 msaitoh 6301 1.333 msaitoh if ((result <= 0) || (result > sc->num_rx_desc)) 6302 1.313 msaitoh return EINVAL; 6303 1.313 msaitoh 6304 1.333 msaitoh sc->rx_process_limit = result; 6305 1.313 msaitoh 6306 1.313 msaitoh return 0; 6307 1.313 msaitoh } /* ixgbe_sysctl_rx_process_limit */ 6308 1.313 msaitoh 6309 1.313 msaitoh /************************************************************************ 6310 1.99 msaitoh * ixgbe_init_device_features 6311 1.99 msaitoh ************************************************************************/ 6312 1.99 msaitoh static void 6313 1.333 msaitoh ixgbe_init_device_features(struct ixgbe_softc *sc) 6314 1.99 msaitoh { 6315 1.333 msaitoh sc->feat_cap = IXGBE_FEATURE_NETMAP 6316 1.186 msaitoh | IXGBE_FEATURE_RSS 6317 1.186 msaitoh | IXGBE_FEATURE_MSI 6318 1.186 msaitoh | IXGBE_FEATURE_MSIX 6319 1.186 msaitoh | IXGBE_FEATURE_LEGACY_IRQ 6320 1.186 msaitoh | IXGBE_FEATURE_LEGACY_TX; 6321 1.99 msaitoh 6322 1.99 msaitoh /* Set capabilities first... */ 6323 1.333 msaitoh switch (sc->hw.mac.type) { 6324 1.99 msaitoh case ixgbe_mac_82598EB: 6325 1.333 msaitoh if (sc->hw.device_id == IXGBE_DEV_ID_82598AT) 6326 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL; 6327 1.99 msaitoh break; 6328 1.99 msaitoh case ixgbe_mac_X540: 6329 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV; 6330 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR; 6331 1.333 msaitoh if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) && 6332 1.333 msaitoh (sc->hw.bus.func == 0)) 6333 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_BYPASS; 6334 1.99 msaitoh break; 6335 1.99 msaitoh case ixgbe_mac_X550: 6336 1.169 msaitoh /* 6337 1.169 msaitoh * IXGBE_FEATURE_RECOVERY_MODE will be set after reading 6338 1.169 msaitoh * NVM Image version. 6339 1.169 msaitoh */ 6340 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 6341 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV; 6342 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR; 6343 1.99 msaitoh break; 6344 1.99 msaitoh case ixgbe_mac_X550EM_x: 6345 1.169 msaitoh /* 6346 1.169 msaitoh * IXGBE_FEATURE_RECOVERY_MODE will be set after reading 6347 1.169 msaitoh * NVM Image version. 6348 1.169 msaitoh */ 6349 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV; 6350 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR; 6351 1.99 msaitoh break; 6352 1.99 msaitoh case ixgbe_mac_X550EM_a: 6353 1.169 msaitoh /* 6354 1.169 msaitoh * IXGBE_FEATURE_RECOVERY_MODE will be set after reading 6355 1.169 msaitoh * NVM Image version. 6356 1.169 msaitoh */ 6357 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV; 6358 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR; 6359 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 6360 1.333 msaitoh if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) || 6361 1.333 msaitoh (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) { 6362 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 6363 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_EEE; 6364 1.99 msaitoh } 6365 1.99 msaitoh break; 6366 1.99 msaitoh case ixgbe_mac_82599EB: 6367 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_SRIOV; 6368 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_FDIR; 6369 1.333 msaitoh if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) && 6370 1.333 msaitoh (sc->hw.bus.func == 0)) 6371 1.333 msaitoh sc->feat_cap |= IXGBE_FEATURE_BYPASS; 6372 1.333 msaitoh if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) 6373 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 6374 1.99 msaitoh break; 6375 1.99 msaitoh default: 6376 1.99 msaitoh break; 6377 1.99 msaitoh } 6378 1.99 msaitoh 6379 1.99 msaitoh /* Enabled by default... */ 6380 1.99 msaitoh /* Fan failure detection */ 6381 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL) 6382 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_FAN_FAIL; 6383 1.99 msaitoh /* Netmap */ 6384 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_NETMAP) 6385 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_NETMAP; 6386 1.99 msaitoh /* EEE */ 6387 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_EEE) 6388 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_EEE; 6389 1.99 msaitoh /* Thermal Sensor */ 6390 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR) 6391 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR; 6392 1.169 msaitoh /* 6393 1.169 msaitoh * Recovery mode: 6394 1.169 msaitoh * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading 6395 1.169 msaitoh * NVM Image version. 6396 1.169 msaitoh */ 6397 1.99 msaitoh 6398 1.99 msaitoh /* Enabled via global sysctl... */ 6399 1.99 msaitoh /* Flow Director */ 6400 1.99 msaitoh if (ixgbe_enable_fdir) { 6401 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_FDIR) 6402 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_FDIR; 6403 1.99 msaitoh else 6404 1.333 msaitoh device_printf(sc->dev, "Device does not support " 6405 1.320 msaitoh "Flow Director. Leaving disabled."); 6406 1.99 msaitoh } 6407 1.99 msaitoh /* Legacy (single queue) transmit */ 6408 1.333 msaitoh if ((sc->feat_cap & IXGBE_FEATURE_LEGACY_TX) && 6409 1.99 msaitoh ixgbe_enable_legacy_tx) 6410 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_LEGACY_TX; 6411 1.99 msaitoh /* 6412 1.99 msaitoh * Message Signal Interrupts - Extended (MSI-X) 6413 1.99 msaitoh * Normal MSI is only enabled if MSI-X calls fail. 6414 1.99 msaitoh */ 6415 1.99 msaitoh if (!ixgbe_enable_msix) 6416 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_MSIX; 6417 1.99 msaitoh /* Receive-Side Scaling (RSS) */ 6418 1.333 msaitoh if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss) 6419 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_RSS; 6420 1.99 msaitoh 6421 1.99 msaitoh /* Disable features with unmet dependencies... */ 6422 1.99 msaitoh /* No MSI-X */ 6423 1.333 msaitoh if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) { 6424 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_RSS; 6425 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_SRIOV; 6426 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_RSS; 6427 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_SRIOV; 6428 1.99 msaitoh } 6429 1.99 msaitoh } /* ixgbe_init_device_features */ 6430 1.44 msaitoh 6431 1.99 msaitoh /************************************************************************ 6432 1.99 msaitoh * ixgbe_probe - Device identification routine 6433 1.98 msaitoh * 6434 1.99 msaitoh * Determines if the driver should be loaded on 6435 1.99 msaitoh * adapter based on its PCI vendor/device ID. 6436 1.98 msaitoh * 6437 1.99 msaitoh * return BUS_PROBE_DEFAULT on success, positive on failure 6438 1.99 msaitoh ************************************************************************/ 6439 1.98 msaitoh static int 6440 1.98 msaitoh ixgbe_probe(device_t dev, cfdata_t cf, void *aux) 6441 1.98 msaitoh { 6442 1.98 msaitoh const struct pci_attach_args *pa = aux; 6443 1.98 msaitoh 6444 1.98 msaitoh return (ixgbe_lookup(pa) != NULL) ? 1 : 0; 6445 1.98 msaitoh } 6446 1.98 msaitoh 6447 1.159 maxv static const ixgbe_vendor_info_t * 6448 1.98 msaitoh ixgbe_lookup(const struct pci_attach_args *pa) 6449 1.98 msaitoh { 6450 1.159 maxv const ixgbe_vendor_info_t *ent; 6451 1.98 msaitoh pcireg_t subid; 6452 1.98 msaitoh 6453 1.98 msaitoh INIT_DEBUGOUT("ixgbe_lookup: begin"); 6454 1.98 msaitoh 6455 1.98 msaitoh if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID) 6456 1.98 msaitoh return NULL; 6457 1.98 msaitoh 6458 1.98 msaitoh subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 6459 1.98 msaitoh 6460 1.98 msaitoh for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) { 6461 1.99 msaitoh if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) && 6462 1.99 msaitoh (PCI_PRODUCT(pa->pa_id) == ent->device_id) && 6463 1.99 msaitoh ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) || 6464 1.99 msaitoh (ent->subvendor_id == 0)) && 6465 1.99 msaitoh ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) || 6466 1.99 msaitoh (ent->subdevice_id == 0))) { 6467 1.98 msaitoh return ent; 6468 1.98 msaitoh } 6469 1.98 msaitoh } 6470 1.98 msaitoh return NULL; 6471 1.98 msaitoh } 6472 1.98 msaitoh 6473 1.98 msaitoh static int 6474 1.98 msaitoh ixgbe_ifflags_cb(struct ethercom *ec) 6475 1.98 msaitoh { 6476 1.98 msaitoh struct ifnet *ifp = &ec->ec_if; 6477 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc; 6478 1.210 msaitoh u_short change; 6479 1.210 msaitoh int rv = 0; 6480 1.98 msaitoh 6481 1.333 msaitoh IXGBE_CORE_LOCK(sc); 6482 1.98 msaitoh 6483 1.333 msaitoh change = ifp->if_flags ^ sc->if_flags; 6484 1.98 msaitoh if (change != 0) 6485 1.333 msaitoh sc->if_flags = ifp->if_flags; 6486 1.98 msaitoh 6487 1.192 msaitoh if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) { 6488 1.192 msaitoh rv = ENETRESET; 6489 1.192 msaitoh goto out; 6490 1.192 msaitoh } else if ((change & IFF_PROMISC) != 0) 6491 1.333 msaitoh ixgbe_set_rxfilter(sc); 6492 1.98 msaitoh 6493 1.193 msaitoh /* Check for ec_capenable. */ 6494 1.333 msaitoh change = ec->ec_capenable ^ sc->ec_capenable; 6495 1.333 msaitoh sc->ec_capenable = ec->ec_capenable; 6496 1.193 msaitoh if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING 6497 1.193 msaitoh | ETHERCAP_VLAN_HWFILTER)) != 0) { 6498 1.193 msaitoh rv = ENETRESET; 6499 1.193 msaitoh goto out; 6500 1.193 msaitoh } 6501 1.193 msaitoh 6502 1.193 msaitoh /* 6503 1.193 msaitoh * Special handling is not required for ETHERCAP_VLAN_MTU. 6504 1.193 msaitoh * MAXFRS(MHADD) does not include the 4bytes of the VLAN header. 6505 1.193 msaitoh */ 6506 1.193 msaitoh 6507 1.98 msaitoh /* Set up VLAN support and filter */ 6508 1.193 msaitoh if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0) 6509 1.333 msaitoh ixgbe_setup_vlan_hw_support(sc); 6510 1.98 msaitoh 6511 1.192 msaitoh out: 6512 1.333 msaitoh IXGBE_CORE_UNLOCK(sc); 6513 1.98 msaitoh 6514 1.192 msaitoh return rv; 6515 1.98 msaitoh } 6516 1.98 msaitoh 6517 1.99 msaitoh /************************************************************************ 6518 1.99 msaitoh * ixgbe_ioctl - Ioctl entry point 6519 1.98 msaitoh * 6520 1.99 msaitoh * Called when the user wants to configure the interface. 6521 1.98 msaitoh * 6522 1.99 msaitoh * return 0 on success, positive on failure 6523 1.99 msaitoh ************************************************************************/ 6524 1.98 msaitoh static int 6525 1.232 msaitoh ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data) 6526 1.98 msaitoh { 6527 1.333 msaitoh struct ixgbe_softc *sc = ifp->if_softc; 6528 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 6529 1.98 msaitoh struct ifcapreq *ifcr = data; 6530 1.98 msaitoh struct ifreq *ifr = data; 6531 1.186 msaitoh int error = 0; 6532 1.98 msaitoh int l4csum_en; 6533 1.185 msaitoh const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | 6534 1.185 msaitoh IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 6535 1.98 msaitoh 6536 1.333 msaitoh if (ixgbe_fw_recovery_mode_swflag(sc)) 6537 1.169 msaitoh return (EPERM); 6538 1.169 msaitoh 6539 1.98 msaitoh switch (command) { 6540 1.98 msaitoh case SIOCSIFFLAGS: 6541 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); 6542 1.98 msaitoh break; 6543 1.98 msaitoh case SIOCADDMULTI: 6544 1.98 msaitoh case SIOCDELMULTI: 6545 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); 6546 1.98 msaitoh break; 6547 1.98 msaitoh case SIOCSIFMEDIA: 6548 1.98 msaitoh case SIOCGIFMEDIA: 6549 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); 6550 1.98 msaitoh break; 6551 1.98 msaitoh case SIOCSIFCAP: 6552 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); 6553 1.98 msaitoh break; 6554 1.98 msaitoh case SIOCSIFMTU: 6555 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 6556 1.98 msaitoh break; 6557 1.98 msaitoh #ifdef __NetBSD__ 6558 1.98 msaitoh case SIOCINITIFADDR: 6559 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR"); 6560 1.98 msaitoh break; 6561 1.98 msaitoh case SIOCGIFFLAGS: 6562 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS"); 6563 1.98 msaitoh break; 6564 1.98 msaitoh case SIOCGIFAFLAG_IN: 6565 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN"); 6566 1.98 msaitoh break; 6567 1.98 msaitoh case SIOCGIFADDR: 6568 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR"); 6569 1.98 msaitoh break; 6570 1.98 msaitoh case SIOCGIFMTU: 6571 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)"); 6572 1.98 msaitoh break; 6573 1.98 msaitoh case SIOCGIFCAP: 6574 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)"); 6575 1.98 msaitoh break; 6576 1.98 msaitoh case SIOCGETHERCAP: 6577 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)"); 6578 1.98 msaitoh break; 6579 1.98 msaitoh case SIOCGLIFADDR: 6580 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)"); 6581 1.98 msaitoh break; 6582 1.98 msaitoh case SIOCZIFDATA: 6583 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)"); 6584 1.98 msaitoh hw->mac.ops.clear_hw_cntrs(hw); 6585 1.333 msaitoh ixgbe_clear_evcnt(sc); 6586 1.98 msaitoh break; 6587 1.98 msaitoh case SIOCAIFADDR: 6588 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)"); 6589 1.98 msaitoh break; 6590 1.98 msaitoh #endif 6591 1.98 msaitoh default: 6592 1.98 msaitoh IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command); 6593 1.98 msaitoh break; 6594 1.98 msaitoh } 6595 1.24 msaitoh 6596 1.98 msaitoh switch (command) { 6597 1.98 msaitoh case SIOCGI2C: 6598 1.98 msaitoh { 6599 1.98 msaitoh struct ixgbe_i2c_req i2c; 6600 1.24 msaitoh 6601 1.98 msaitoh IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)"); 6602 1.98 msaitoh error = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 6603 1.98 msaitoh if (error != 0) 6604 1.98 msaitoh break; 6605 1.98 msaitoh if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 6606 1.98 msaitoh error = EINVAL; 6607 1.98 msaitoh break; 6608 1.98 msaitoh } 6609 1.98 msaitoh if (i2c.len > sizeof(i2c.data)) { 6610 1.98 msaitoh error = EINVAL; 6611 1.98 msaitoh break; 6612 1.98 msaitoh } 6613 1.24 msaitoh 6614 1.98 msaitoh hw->phy.ops.read_i2c_byte(hw, i2c.offset, 6615 1.98 msaitoh i2c.dev_addr, i2c.data); 6616 1.98 msaitoh error = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 6617 1.98 msaitoh break; 6618 1.98 msaitoh } 6619 1.98 msaitoh case SIOCSIFCAP: 6620 1.98 msaitoh /* Layer-4 Rx checksum offload has to be turned on and 6621 1.98 msaitoh * off as a unit. 6622 1.98 msaitoh */ 6623 1.98 msaitoh l4csum_en = ifcr->ifcr_capenable & l4csum; 6624 1.98 msaitoh if (l4csum_en != l4csum && l4csum_en != 0) 6625 1.98 msaitoh return EINVAL; 6626 1.98 msaitoh /*FALLTHROUGH*/ 6627 1.98 msaitoh case SIOCADDMULTI: 6628 1.98 msaitoh case SIOCDELMULTI: 6629 1.98 msaitoh case SIOCSIFFLAGS: 6630 1.98 msaitoh case SIOCSIFMTU: 6631 1.98 msaitoh default: 6632 1.98 msaitoh if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) 6633 1.98 msaitoh return error; 6634 1.98 msaitoh if ((ifp->if_flags & IFF_RUNNING) == 0) 6635 1.98 msaitoh ; 6636 1.98 msaitoh else if (command == SIOCSIFCAP || command == SIOCSIFMTU) { 6637 1.333 msaitoh IXGBE_CORE_LOCK(sc); 6638 1.135 msaitoh if ((ifp->if_flags & IFF_RUNNING) != 0) 6639 1.333 msaitoh ixgbe_init_locked(sc); 6640 1.333 msaitoh ixgbe_recalculate_max_frame(sc); 6641 1.333 msaitoh IXGBE_CORE_UNLOCK(sc); 6642 1.98 msaitoh } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) { 6643 1.98 msaitoh /* 6644 1.98 msaitoh * Multicast list has changed; set the hardware filter 6645 1.98 msaitoh * accordingly. 6646 1.98 msaitoh */ 6647 1.333 msaitoh IXGBE_CORE_LOCK(sc); 6648 1.333 msaitoh ixgbe_disable_intr(sc); 6649 1.333 msaitoh ixgbe_set_rxfilter(sc); 6650 1.333 msaitoh ixgbe_enable_intr(sc); 6651 1.333 msaitoh IXGBE_CORE_UNLOCK(sc); 6652 1.98 msaitoh } 6653 1.98 msaitoh return 0; 6654 1.24 msaitoh } 6655 1.24 msaitoh 6656 1.98 msaitoh return error; 6657 1.99 msaitoh } /* ixgbe_ioctl */ 6658 1.99 msaitoh 6659 1.99 msaitoh /************************************************************************ 6660 1.99 msaitoh * ixgbe_check_fan_failure 6661 1.99 msaitoh ************************************************************************/ 6662 1.274 msaitoh static int 6663 1.333 msaitoh ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt) 6664 1.99 msaitoh { 6665 1.99 msaitoh u32 mask; 6666 1.99 msaitoh 6667 1.333 msaitoh mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) : 6668 1.99 msaitoh IXGBE_ESDP_SDP1; 6669 1.26 msaitoh 6670 1.312 msaitoh if ((reg & mask) == 0) 6671 1.312 msaitoh return IXGBE_SUCCESS; 6672 1.312 msaitoh 6673 1.312 msaitoh /* 6674 1.312 msaitoh * Use ratecheck() just in case interrupt occur frequently. 6675 1.312 msaitoh * When EXPX9501AT's fan stopped, interrupt occurred only once, 6676 1.312 msaitoh * an red LED on the board turned on and link never up until 6677 1.312 msaitoh * power off. 6678 1.312 msaitoh */ 6679 1.333 msaitoh if (ratecheck(&sc->lasterr_time, &ixgbe_errlog_intrvl)) 6680 1.333 msaitoh device_printf(sc->dev, 6681 1.280 msaitoh "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 6682 1.274 msaitoh 6683 1.312 msaitoh return IXGBE_ERR_FAN_FAILURE; 6684 1.99 msaitoh } /* ixgbe_check_fan_failure */ 6685 1.99 msaitoh 6686 1.99 msaitoh /************************************************************************ 6687 1.99 msaitoh * ixgbe_handle_que 6688 1.99 msaitoh ************************************************************************/ 6689 1.98 msaitoh static void 6690 1.98 msaitoh ixgbe_handle_que(void *context) 6691 1.44 msaitoh { 6692 1.98 msaitoh struct ix_queue *que = context; 6693 1.333 msaitoh struct ixgbe_softc *sc = que->sc; 6694 1.186 msaitoh struct tx_ring *txr = que->txr; 6695 1.333 msaitoh struct ifnet *ifp = sc->ifp; 6696 1.121 msaitoh bool more = false; 6697 1.44 msaitoh 6698 1.305 msaitoh IXGBE_EVC_ADD(&que->handleq, 1); 6699 1.44 msaitoh 6700 1.98 msaitoh if (ifp->if_flags & IFF_RUNNING) { 6701 1.98 msaitoh IXGBE_TX_LOCK(txr); 6702 1.323 msaitoh more = ixgbe_txeof(txr); 6703 1.333 msaitoh if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) 6704 1.99 msaitoh if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq)) 6705 1.99 msaitoh ixgbe_mq_start_locked(ifp, txr); 6706 1.98 msaitoh /* Only for queue 0 */ 6707 1.99 msaitoh /* NetBSD still needs this for CBQ */ 6708 1.333 msaitoh if ((&sc->queues[0] == que) 6709 1.99 msaitoh && (!ixgbe_legacy_ring_empty(ifp, NULL))) 6710 1.99 msaitoh ixgbe_legacy_start_locked(ifp, txr); 6711 1.98 msaitoh IXGBE_TX_UNLOCK(txr); 6712 1.323 msaitoh more |= ixgbe_rxeof(que); 6713 1.44 msaitoh } 6714 1.44 msaitoh 6715 1.128 knakahar if (more) { 6716 1.305 msaitoh IXGBE_EVC_ADD(&que->req, 1); 6717 1.333 msaitoh ixgbe_sched_handle_que(sc, que); 6718 1.128 knakahar } else if (que->res != NULL) { 6719 1.265 msaitoh /* MSIX: Re-enable this interrupt */ 6720 1.333 msaitoh ixgbe_enable_queue(sc, que->msix); 6721 1.265 msaitoh } else { 6722 1.265 msaitoh /* INTx or MSI */ 6723 1.333 msaitoh ixgbe_enable_queue(sc, 0); 6724 1.265 msaitoh } 6725 1.99 msaitoh 6726 1.98 msaitoh return; 6727 1.99 msaitoh } /* ixgbe_handle_que */ 6728 1.44 msaitoh 6729 1.99 msaitoh /************************************************************************ 6730 1.128 knakahar * ixgbe_handle_que_work 6731 1.128 knakahar ************************************************************************/ 6732 1.128 knakahar static void 6733 1.128 knakahar ixgbe_handle_que_work(struct work *wk, void *context) 6734 1.128 knakahar { 6735 1.128 knakahar struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie); 6736 1.128 knakahar 6737 1.128 knakahar /* 6738 1.128 knakahar * "enqueued flag" is not required here. 6739 1.128 knakahar * See ixgbe_msix_que(). 6740 1.128 knakahar */ 6741 1.128 knakahar ixgbe_handle_que(que); 6742 1.128 knakahar } 6743 1.128 knakahar 6744 1.128 knakahar /************************************************************************ 6745 1.99 msaitoh * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler 6746 1.99 msaitoh ************************************************************************/ 6747 1.48 msaitoh static int 6748 1.333 msaitoh ixgbe_allocate_legacy(struct ixgbe_softc *sc, 6749 1.98 msaitoh const struct pci_attach_args *pa) 6750 1.48 msaitoh { 6751 1.333 msaitoh device_t dev = sc->dev; 6752 1.333 msaitoh struct ix_queue *que = sc->queues; 6753 1.333 msaitoh struct tx_ring *txr = sc->tx_rings; 6754 1.98 msaitoh int counts[PCI_INTR_TYPE_SIZE]; 6755 1.98 msaitoh pci_intr_type_t intr_type, max_type; 6756 1.186 msaitoh char intrbuf[PCI_INTRSTR_LEN]; 6757 1.206 knakahar char wqname[MAXCOMLEN]; 6758 1.98 msaitoh const char *intrstr = NULL; 6759 1.206 knakahar int defertx_error = 0, error; 6760 1.185 msaitoh 6761 1.99 msaitoh /* We allocate a single interrupt resource */ 6762 1.98 msaitoh max_type = PCI_INTR_TYPE_MSI; 6763 1.98 msaitoh counts[PCI_INTR_TYPE_MSIX] = 0; 6764 1.99 msaitoh counts[PCI_INTR_TYPE_MSI] = 6765 1.333 msaitoh (sc->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0; 6766 1.118 msaitoh /* Check not feat_en but feat_cap to fallback to INTx */ 6767 1.99 msaitoh counts[PCI_INTR_TYPE_INTX] = 6768 1.333 msaitoh (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0; 6769 1.48 msaitoh 6770 1.98 msaitoh alloc_retry: 6771 1.333 msaitoh if (pci_intr_alloc(pa, &sc->osdep.intrs, counts, max_type) != 0) { 6772 1.98 msaitoh aprint_error_dev(dev, "couldn't alloc interrupt\n"); 6773 1.98 msaitoh return ENXIO; 6774 1.98 msaitoh } 6775 1.333 msaitoh sc->osdep.nintrs = 1; 6776 1.333 msaitoh intrstr = pci_intr_string(sc->osdep.pc, sc->osdep.intrs[0], 6777 1.98 msaitoh intrbuf, sizeof(intrbuf)); 6778 1.333 msaitoh sc->osdep.ihs[0] = pci_intr_establish_xname(sc->osdep.pc, 6779 1.333 msaitoh sc->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que, 6780 1.98 msaitoh device_xname(dev)); 6781 1.333 msaitoh intr_type = pci_intr_type(sc->osdep.pc, sc->osdep.intrs[0]); 6782 1.333 msaitoh if (sc->osdep.ihs[0] == NULL) { 6783 1.98 msaitoh aprint_error_dev(dev,"unable to establish %s\n", 6784 1.98 msaitoh (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx"); 6785 1.333 msaitoh pci_intr_release(sc->osdep.pc, sc->osdep.intrs, 1); 6786 1.333 msaitoh sc->osdep.intrs = NULL; 6787 1.98 msaitoh switch (intr_type) { 6788 1.98 msaitoh case PCI_INTR_TYPE_MSI: 6789 1.98 msaitoh /* The next try is for INTx: Disable MSI */ 6790 1.98 msaitoh max_type = PCI_INTR_TYPE_INTX; 6791 1.98 msaitoh counts[PCI_INTR_TYPE_INTX] = 1; 6792 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_MSI; 6793 1.333 msaitoh if (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) { 6794 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_LEGACY_IRQ; 6795 1.118 msaitoh goto alloc_retry; 6796 1.118 msaitoh } else 6797 1.118 msaitoh break; 6798 1.98 msaitoh case PCI_INTR_TYPE_INTX: 6799 1.98 msaitoh default: 6800 1.98 msaitoh /* See below */ 6801 1.98 msaitoh break; 6802 1.98 msaitoh } 6803 1.98 msaitoh } 6804 1.119 msaitoh if (intr_type == PCI_INTR_TYPE_INTX) { 6805 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_MSI; 6806 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_LEGACY_IRQ; 6807 1.119 msaitoh } 6808 1.333 msaitoh if (sc->osdep.ihs[0] == NULL) { 6809 1.98 msaitoh aprint_error_dev(dev, 6810 1.98 msaitoh "couldn't establish interrupt%s%s\n", 6811 1.98 msaitoh intrstr ? " at " : "", intrstr ? intrstr : ""); 6812 1.333 msaitoh pci_intr_release(sc->osdep.pc, sc->osdep.intrs, 1); 6813 1.333 msaitoh sc->osdep.intrs = NULL; 6814 1.98 msaitoh return ENXIO; 6815 1.98 msaitoh } 6816 1.98 msaitoh aprint_normal_dev(dev, "interrupting at %s\n", intrstr); 6817 1.98 msaitoh /* 6818 1.98 msaitoh * Try allocating a fast interrupt and the associated deferred 6819 1.98 msaitoh * processing contexts. 6820 1.98 msaitoh */ 6821 1.333 msaitoh if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) { 6822 1.99 msaitoh txr->txr_si = 6823 1.354 msaitoh softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 6824 1.99 msaitoh ixgbe_deferred_mq_start, txr); 6825 1.206 knakahar 6826 1.280 msaitoh snprintf(wqname, sizeof(wqname), "%sdeferTx", 6827 1.280 msaitoh device_xname(dev)); 6828 1.333 msaitoh defertx_error = workqueue_create(&sc->txr_wq, wqname, 6829 1.333 msaitoh ixgbe_deferred_mq_start_work, sc, IXGBE_WORKQUEUE_PRI, 6830 1.354 msaitoh IPL_NET, WQ_PERCPU | WQ_MPSAFE); 6831 1.333 msaitoh sc->txr_wq_enqueued = percpu_alloc(sizeof(u_int)); 6832 1.206 knakahar } 6833 1.354 msaitoh que->que_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 6834 1.98 msaitoh ixgbe_handle_que, que); 6835 1.206 knakahar snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev)); 6836 1.333 msaitoh error = workqueue_create(&sc->que_wq, wqname, 6837 1.333 msaitoh ixgbe_handle_que_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET, 6838 1.354 msaitoh WQ_PERCPU | WQ_MPSAFE); 6839 1.48 msaitoh 6840 1.333 msaitoh if ((!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX) 6841 1.206 knakahar && ((txr->txr_si == NULL) || defertx_error != 0)) 6842 1.206 knakahar || (que->que_si == NULL) || error != 0) { 6843 1.98 msaitoh aprint_error_dev(dev, 6844 1.185 msaitoh "could not establish software interrupts\n"); 6845 1.99 msaitoh 6846 1.98 msaitoh return ENXIO; 6847 1.98 msaitoh } 6848 1.98 msaitoh /* For simplicity in the handlers */ 6849 1.333 msaitoh sc->active_queues = IXGBE_EIMS_ENABLE_MASK; 6850 1.44 msaitoh 6851 1.44 msaitoh return (0); 6852 1.99 msaitoh } /* ixgbe_allocate_legacy */ 6853 1.44 msaitoh 6854 1.99 msaitoh /************************************************************************ 6855 1.99 msaitoh * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers 6856 1.99 msaitoh ************************************************************************/ 6857 1.44 msaitoh static int 6858 1.333 msaitoh ixgbe_allocate_msix(struct ixgbe_softc *sc, const struct pci_attach_args *pa) 6859 1.44 msaitoh { 6860 1.333 msaitoh device_t dev = sc->dev; 6861 1.341 msaitoh struct ix_queue *que = sc->queues; 6862 1.341 msaitoh struct tx_ring *txr = sc->tx_rings; 6863 1.98 msaitoh pci_chipset_tag_t pc; 6864 1.98 msaitoh char intrbuf[PCI_INTRSTR_LEN]; 6865 1.98 msaitoh char intr_xname[32]; 6866 1.128 knakahar char wqname[MAXCOMLEN]; 6867 1.98 msaitoh const char *intrstr = NULL; 6868 1.186 msaitoh int error, vector = 0; 6869 1.98 msaitoh int cpu_id = 0; 6870 1.98 msaitoh kcpuset_t *affinity; 6871 1.99 msaitoh #ifdef RSS 6872 1.186 msaitoh unsigned int rss_buckets = 0; 6873 1.99 msaitoh kcpuset_t cpu_mask; 6874 1.98 msaitoh #endif 6875 1.98 msaitoh 6876 1.333 msaitoh pc = sc->osdep.pc; 6877 1.98 msaitoh #ifdef RSS 6878 1.98 msaitoh /* 6879 1.98 msaitoh * If we're doing RSS, the number of queues needs to 6880 1.98 msaitoh * match the number of RSS buckets that are configured. 6881 1.98 msaitoh * 6882 1.98 msaitoh * + If there's more queues than RSS buckets, we'll end 6883 1.98 msaitoh * up with queues that get no traffic. 6884 1.98 msaitoh * 6885 1.98 msaitoh * + If there's more RSS buckets than queues, we'll end 6886 1.98 msaitoh * up having multiple RSS buckets map to the same queue, 6887 1.98 msaitoh * so there'll be some contention. 6888 1.98 msaitoh */ 6889 1.99 msaitoh rss_buckets = rss_getnumbuckets(); 6890 1.333 msaitoh if ((sc->feat_en & IXGBE_FEATURE_RSS) && 6891 1.333 msaitoh (sc->num_queues != rss_buckets)) { 6892 1.98 msaitoh device_printf(dev, 6893 1.98 msaitoh "%s: number of queues (%d) != number of RSS buckets (%d)" 6894 1.98 msaitoh "; performance will be impacted.\n", 6895 1.333 msaitoh __func__, sc->num_queues, rss_buckets); 6896 1.98 msaitoh } 6897 1.98 msaitoh #endif 6898 1.98 msaitoh 6899 1.333 msaitoh sc->osdep.nintrs = sc->num_queues + 1; 6900 1.333 msaitoh if (pci_msix_alloc_exact(pa, &sc->osdep.intrs, 6901 1.333 msaitoh sc->osdep.nintrs) != 0) { 6902 1.98 msaitoh aprint_error_dev(dev, 6903 1.98 msaitoh "failed to allocate MSI-X interrupt\n"); 6904 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_MSIX; 6905 1.98 msaitoh return (ENXIO); 6906 1.98 msaitoh } 6907 1.98 msaitoh 6908 1.98 msaitoh kcpuset_create(&affinity, false); 6909 1.333 msaitoh for (int i = 0; i < sc->num_queues; i++, vector++, que++, txr++) { 6910 1.98 msaitoh snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d", 6911 1.98 msaitoh device_xname(dev), i); 6912 1.333 msaitoh intrstr = pci_intr_string(pc, sc->osdep.intrs[i], intrbuf, 6913 1.98 msaitoh sizeof(intrbuf)); 6914 1.333 msaitoh pci_intr_setattr(pc, &sc->osdep.intrs[i], PCI_INTR_MPSAFE, 6915 1.98 msaitoh true); 6916 1.353 msaitoh 6917 1.98 msaitoh /* Set the handler function */ 6918 1.333 msaitoh que->res = sc->osdep.ihs[i] = pci_intr_establish_xname(pc, 6919 1.333 msaitoh sc->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que, 6920 1.98 msaitoh intr_xname); 6921 1.98 msaitoh if (que->res == NULL) { 6922 1.98 msaitoh aprint_error_dev(dev, 6923 1.98 msaitoh "Failed to register QUE handler\n"); 6924 1.119 msaitoh error = ENXIO; 6925 1.119 msaitoh goto err_out; 6926 1.98 msaitoh } 6927 1.98 msaitoh que->msix = vector; 6928 1.333 msaitoh sc->active_queues |= 1ULL << que->msix; 6929 1.99 msaitoh 6930 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS) { 6931 1.98 msaitoh #ifdef RSS 6932 1.99 msaitoh /* 6933 1.99 msaitoh * The queue ID is used as the RSS layer bucket ID. 6934 1.99 msaitoh * We look up the queue ID -> RSS CPU ID and select 6935 1.99 msaitoh * that. 6936 1.99 msaitoh */ 6937 1.99 msaitoh cpu_id = rss_getcpu(i % rss_getnumbuckets()); 6938 1.99 msaitoh CPU_SETOF(cpu_id, &cpu_mask); 6939 1.98 msaitoh #endif 6940 1.99 msaitoh } else { 6941 1.99 msaitoh /* 6942 1.99 msaitoh * Bind the MSI-X vector, and thus the 6943 1.99 msaitoh * rings to the corresponding CPU. 6944 1.99 msaitoh * 6945 1.99 msaitoh * This just happens to match the default RSS 6946 1.99 msaitoh * round-robin bucket -> queue -> CPU allocation. 6947 1.99 msaitoh */ 6948 1.333 msaitoh if (sc->num_queues > 1) 6949 1.99 msaitoh cpu_id = i; 6950 1.99 msaitoh } 6951 1.98 msaitoh /* Round-robin affinity */ 6952 1.98 msaitoh kcpuset_zero(affinity); 6953 1.98 msaitoh kcpuset_set(affinity, cpu_id % ncpu); 6954 1.333 msaitoh error = interrupt_distribute(sc->osdep.ihs[i], affinity, 6955 1.98 msaitoh NULL); 6956 1.98 msaitoh aprint_normal_dev(dev, "for TX/RX, interrupting at %s", 6957 1.98 msaitoh intrstr); 6958 1.98 msaitoh if (error == 0) { 6959 1.98 msaitoh #if 1 /* def IXGBE_DEBUG */ 6960 1.98 msaitoh #ifdef RSS 6961 1.322 skrll aprint_normal(", bound RSS bucket %d to CPU %d", i, 6962 1.99 msaitoh cpu_id % ncpu); 6963 1.98 msaitoh #else 6964 1.99 msaitoh aprint_normal(", bound queue %d to cpu %d", i, 6965 1.99 msaitoh cpu_id % ncpu); 6966 1.98 msaitoh #endif 6967 1.98 msaitoh #endif /* IXGBE_DEBUG */ 6968 1.98 msaitoh } 6969 1.98 msaitoh aprint_normal("\n"); 6970 1.99 msaitoh 6971 1.333 msaitoh if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) { 6972 1.99 msaitoh txr->txr_si = softint_establish( 6973 1.354 msaitoh SOFTINT_NET | SOFTINT_MPSAFE, 6974 1.99 msaitoh ixgbe_deferred_mq_start, txr); 6975 1.119 msaitoh if (txr->txr_si == NULL) { 6976 1.119 msaitoh aprint_error_dev(dev, 6977 1.119 msaitoh "couldn't establish software interrupt\n"); 6978 1.119 msaitoh error = ENXIO; 6979 1.119 msaitoh goto err_out; 6980 1.119 msaitoh } 6981 1.119 msaitoh } 6982 1.98 msaitoh que->que_si 6983 1.354 msaitoh = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 6984 1.98 msaitoh ixgbe_handle_que, que); 6985 1.98 msaitoh if (que->que_si == NULL) { 6986 1.98 msaitoh aprint_error_dev(dev, 6987 1.185 msaitoh "couldn't establish software interrupt\n"); 6988 1.119 msaitoh error = ENXIO; 6989 1.119 msaitoh goto err_out; 6990 1.98 msaitoh } 6991 1.98 msaitoh } 6992 1.128 knakahar snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev)); 6993 1.333 msaitoh error = workqueue_create(&sc->txr_wq, wqname, 6994 1.333 msaitoh ixgbe_deferred_mq_start_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET, 6995 1.354 msaitoh WQ_PERCPU | WQ_MPSAFE); 6996 1.128 knakahar if (error) { 6997 1.280 msaitoh aprint_error_dev(dev, 6998 1.280 msaitoh "couldn't create workqueue for deferred Tx\n"); 6999 1.128 knakahar goto err_out; 7000 1.128 knakahar } 7001 1.333 msaitoh sc->txr_wq_enqueued = percpu_alloc(sizeof(u_int)); 7002 1.128 knakahar 7003 1.128 knakahar snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev)); 7004 1.333 msaitoh error = workqueue_create(&sc->que_wq, wqname, 7005 1.333 msaitoh ixgbe_handle_que_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET, 7006 1.354 msaitoh WQ_PERCPU | WQ_MPSAFE); 7007 1.128 knakahar if (error) { 7008 1.128 knakahar aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n"); 7009 1.128 knakahar goto err_out; 7010 1.128 knakahar } 7011 1.44 msaitoh 7012 1.98 msaitoh /* and Link */ 7013 1.98 msaitoh cpu_id++; 7014 1.98 msaitoh snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev)); 7015 1.333 msaitoh sc->vector = vector; 7016 1.333 msaitoh intrstr = pci_intr_string(pc, sc->osdep.intrs[vector], intrbuf, 7017 1.98 msaitoh sizeof(intrbuf)); 7018 1.353 msaitoh pci_intr_setattr(pc, &sc->osdep.intrs[vector], PCI_INTR_MPSAFE, true); 7019 1.353 msaitoh 7020 1.98 msaitoh /* Set the link handler function */ 7021 1.333 msaitoh sc->osdep.ihs[vector] = pci_intr_establish_xname(pc, 7022 1.333 msaitoh sc->osdep.intrs[vector], IPL_NET, ixgbe_msix_admin, sc, 7023 1.98 msaitoh intr_xname); 7024 1.333 msaitoh if (sc->osdep.ihs[vector] == NULL) { 7025 1.98 msaitoh aprint_error_dev(dev, "Failed to register LINK handler\n"); 7026 1.119 msaitoh error = ENXIO; 7027 1.119 msaitoh goto err_out; 7028 1.98 msaitoh } 7029 1.98 msaitoh /* Round-robin affinity */ 7030 1.98 msaitoh kcpuset_zero(affinity); 7031 1.98 msaitoh kcpuset_set(affinity, cpu_id % ncpu); 7032 1.333 msaitoh error = interrupt_distribute(sc->osdep.ihs[vector], affinity, 7033 1.119 msaitoh NULL); 7034 1.44 msaitoh 7035 1.98 msaitoh aprint_normal_dev(dev, 7036 1.98 msaitoh "for link, interrupting at %s", intrstr); 7037 1.98 msaitoh if (error == 0) 7038 1.98 msaitoh aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu); 7039 1.44 msaitoh else 7040 1.98 msaitoh aprint_normal("\n"); 7041 1.44 msaitoh 7042 1.98 msaitoh kcpuset_destroy(affinity); 7043 1.119 msaitoh aprint_normal_dev(dev, 7044 1.119 msaitoh "Using MSI-X interrupts with %d vectors\n", vector + 1); 7045 1.99 msaitoh 7046 1.44 msaitoh return (0); 7047 1.119 msaitoh 7048 1.119 msaitoh err_out: 7049 1.119 msaitoh kcpuset_destroy(affinity); 7050 1.333 msaitoh ixgbe_free_deferred_handlers(sc); 7051 1.333 msaitoh ixgbe_free_pciintr_resources(sc); 7052 1.119 msaitoh return (error); 7053 1.99 msaitoh } /* ixgbe_allocate_msix */ 7054 1.44 msaitoh 7055 1.99 msaitoh /************************************************************************ 7056 1.99 msaitoh * ixgbe_configure_interrupts 7057 1.99 msaitoh * 7058 1.99 msaitoh * Setup MSI-X, MSI, or legacy interrupts (in that order). 7059 1.99 msaitoh * This will also depend on user settings. 7060 1.99 msaitoh ************************************************************************/ 7061 1.44 msaitoh static int 7062 1.333 msaitoh ixgbe_configure_interrupts(struct ixgbe_softc *sc) 7063 1.44 msaitoh { 7064 1.333 msaitoh device_t dev = sc->dev; 7065 1.333 msaitoh struct ixgbe_mac_info *mac = &sc->hw.mac; 7066 1.98 msaitoh int want, queues, msgs; 7067 1.44 msaitoh 7068 1.99 msaitoh /* Default to 1 queue if MSI-X setup fails */ 7069 1.333 msaitoh sc->num_queues = 1; 7070 1.99 msaitoh 7071 1.98 msaitoh /* Override by tuneable */ 7072 1.333 msaitoh if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) 7073 1.98 msaitoh goto msi; 7074 1.44 msaitoh 7075 1.118 msaitoh /* 7076 1.118 msaitoh * NetBSD only: Use single vector MSI when number of CPU is 1 to save 7077 1.118 msaitoh * interrupt slot. 7078 1.118 msaitoh */ 7079 1.118 msaitoh if (ncpu == 1) 7080 1.118 msaitoh goto msi; 7081 1.185 msaitoh 7082 1.99 msaitoh /* First try MSI-X */ 7083 1.333 msaitoh msgs = pci_msix_count(sc->osdep.pc, sc->osdep.tag); 7084 1.98 msaitoh msgs = MIN(msgs, IXG_MAX_NINTR); 7085 1.98 msaitoh if (msgs < 2) 7086 1.98 msaitoh goto msi; 7087 1.44 msaitoh 7088 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_MSIX; 7089 1.44 msaitoh 7090 1.98 msaitoh /* Figure out a reasonable auto config value */ 7091 1.98 msaitoh queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu; 7092 1.44 msaitoh 7093 1.98 msaitoh #ifdef RSS 7094 1.98 msaitoh /* If we're doing RSS, clamp at the number of RSS buckets */ 7095 1.333 msaitoh if (sc->feat_en & IXGBE_FEATURE_RSS) 7096 1.165 riastrad queues = uimin(queues, rss_getnumbuckets()); 7097 1.98 msaitoh #endif 7098 1.99 msaitoh if (ixgbe_num_queues > queues) { 7099 1.333 msaitoh aprint_error_dev(sc->dev, 7100 1.319 msaitoh "ixgbe_num_queues (%d) is too large, " 7101 1.319 msaitoh "using reduced amount (%d).\n", ixgbe_num_queues, queues); 7102 1.99 msaitoh ixgbe_num_queues = queues; 7103 1.99 msaitoh } 7104 1.44 msaitoh 7105 1.98 msaitoh if (ixgbe_num_queues != 0) 7106 1.98 msaitoh queues = ixgbe_num_queues; 7107 1.98 msaitoh else 7108 1.165 riastrad queues = uimin(queues, 7109 1.165 riastrad uimin(mac->max_tx_queues, mac->max_rx_queues)); 7110 1.44 msaitoh 7111 1.98 msaitoh /* 7112 1.99 msaitoh * Want one vector (RX/TX pair) per queue 7113 1.99 msaitoh * plus an additional for Link. 7114 1.99 msaitoh */ 7115 1.98 msaitoh want = queues + 1; 7116 1.98 msaitoh if (msgs >= want) 7117 1.98 msaitoh msgs = want; 7118 1.44 msaitoh else { 7119 1.186 msaitoh aprint_error_dev(dev, "MSI-X Configuration Problem, " 7120 1.319 msaitoh "%d vectors but %d queues wanted!\n", msgs, want); 7121 1.98 msaitoh goto msi; 7122 1.44 msaitoh } 7123 1.333 msaitoh sc->num_queues = queues; 7124 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_MSIX; 7125 1.99 msaitoh return (0); 7126 1.44 msaitoh 7127 1.98 msaitoh /* 7128 1.99 msaitoh * MSI-X allocation failed or provided us with 7129 1.99 msaitoh * less vectors than needed. Free MSI-X resources 7130 1.99 msaitoh * and we'll try enabling MSI. 7131 1.99 msaitoh */ 7132 1.98 msaitoh msi: 7133 1.99 msaitoh /* Without MSI-X, some features are no longer supported */ 7134 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_RSS; 7135 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_RSS; 7136 1.333 msaitoh sc->feat_cap &= ~IXGBE_FEATURE_SRIOV; 7137 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_SRIOV; 7138 1.99 msaitoh 7139 1.333 msaitoh msgs = pci_msi_count(sc->osdep.pc, sc->osdep.tag); 7140 1.333 msaitoh sc->feat_en &= ~IXGBE_FEATURE_MSIX; 7141 1.99 msaitoh if (msgs > 1) 7142 1.99 msaitoh msgs = 1; 7143 1.99 msaitoh if (msgs != 0) { 7144 1.99 msaitoh msgs = 1; 7145 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_MSI; 7146 1.99 msaitoh return (0); 7147 1.99 msaitoh } 7148 1.99 msaitoh 7149 1.333 msaitoh if (!(sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) { 7150 1.99 msaitoh aprint_error_dev(dev, 7151 1.99 msaitoh "Device does not support legacy interrupts.\n"); 7152 1.99 msaitoh return 1; 7153 1.99 msaitoh } 7154 1.99 msaitoh 7155 1.333 msaitoh sc->feat_en |= IXGBE_FEATURE_LEGACY_IRQ; 7156 1.99 msaitoh 7157 1.99 msaitoh return (0); 7158 1.99 msaitoh } /* ixgbe_configure_interrupts */ 7159 1.44 msaitoh 7160 1.48 msaitoh 7161 1.99 msaitoh /************************************************************************ 7162 1.99 msaitoh * ixgbe_handle_link - Tasklet for MSI-X Link interrupts 7163 1.99 msaitoh * 7164 1.99 msaitoh * Done outside of interrupt context since the driver might sleep 7165 1.99 msaitoh ************************************************************************/ 7166 1.26 msaitoh static void 7167 1.98 msaitoh ixgbe_handle_link(void *context) 7168 1.26 msaitoh { 7169 1.333 msaitoh struct ixgbe_softc *sc = context; 7170 1.333 msaitoh struct ixgbe_hw *hw = &sc->hw; 7171 1.26 msaitoh 7172 1.333 msaitoh KASSERT(mutex_owned(&sc->core_mtx)); 7173 1.257 msaitoh 7174 1.333 msaitoh IXGBE_EVC_ADD(&sc->link_workev, 1); 7175 1.333 msaitoh ixgbe_check_link(hw, &sc->link_speed, &sc->link_up, 0); 7176 1.333 msaitoh ixgbe_update_link_status(sc); 7177 1.26 msaitoh 7178 1.98 msaitoh /* Re-enable link interrupts */ 7179 1.98 msaitoh IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC); 7180 1.99 msaitoh } /* ixgbe_handle_link */ 7181 1.45 msaitoh 7182 1.161 kamil #if 0 7183 1.99 msaitoh /************************************************************************ 7184 1.99 msaitoh * ixgbe_rearm_queues 7185 1.99 msaitoh ************************************************************************/ 7186 1.160 msaitoh static __inline void 7187 1.333 msaitoh ixgbe_rearm_queues(struct ixgbe_softc *sc, u64 queues) 7188 1.63 msaitoh { 7189 1.63 msaitoh u32 mask; 7190 1.63 msaitoh 7191 1.333 msaitoh switch (sc->hw.mac.type) { 7192 1.63 msaitoh case ixgbe_mac_82598EB: 7193 1.63 msaitoh mask = (IXGBE_EIMS_RTX_QUEUE & queues); 7194 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS, mask); 7195 1.63 msaitoh break; 7196 1.63 msaitoh case ixgbe_mac_82599EB: 7197 1.63 msaitoh case ixgbe_mac_X540: 7198 1.63 msaitoh case ixgbe_mac_X550: 7199 1.63 msaitoh case ixgbe_mac_X550EM_x: 7200 1.99 msaitoh case ixgbe_mac_X550EM_a: 7201 1.63 msaitoh mask = (queues & 0xFFFFFFFF); 7202 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS_EX(0), mask); 7203 1.63 msaitoh mask = (queues >> 32); 7204 1.333 msaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS_EX(1), mask); 7205 1.63 msaitoh break; 7206 1.63 msaitoh default: 7207 1.63 msaitoh break; 7208 1.63 msaitoh } 7209 1.99 msaitoh } /* ixgbe_rearm_queues */ 7210 1.161 kamil #endif 7211