Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.221
      1 /* $NetBSD: ixgbe.c,v 1.221 2020/01/21 14:55:55 msaitoh Exp $ */
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
     36 
     37 /*
     38  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39  * All rights reserved.
     40  *
     41  * This code is derived from software contributed to The NetBSD Foundation
     42  * by Coyote Point Systems, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63  * POSSIBILITY OF SUCH DAMAGE.
     64  */
     65 
     66 #ifdef _KERNEL_OPT
     67 #include "opt_inet.h"
     68 #include "opt_inet6.h"
     69 #include "opt_net_mpsafe.h"
     70 #endif
     71 
     72 #include "ixgbe.h"
     73 #include "ixgbe_sriov.h"
     74 #include "vlan.h"
     75 
     76 #include <sys/cprng.h>
     77 #include <dev/mii/mii.h>
     78 #include <dev/mii/miivar.h>
     79 
     80 /************************************************************************
     81  * Driver version
     82  ************************************************************************/
     83 static const char ixgbe_driver_version[] = "4.0.1-k";
     84 /* XXX NetBSD: + 3.3.10 */
     85 
     86 /************************************************************************
     87  * PCI Device ID Table
     88  *
     89  *   Used by probe to select devices to load on
     90  *   Last field stores an index into ixgbe_strings
     91  *   Last entry must be all 0s
     92  *
     93  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     94  ************************************************************************/
     95 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     96 {
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    111 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    112 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    113 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
    114 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    115 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    116 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    117 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    118 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    119 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
    120 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    121 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
    122 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
    123 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
    124 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
    125 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
    126 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
    127 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
    128 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
    129 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
    130 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
    131 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
    132 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
    133 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
    134 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
    135 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
    136 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
    137 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
    138 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
    139 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
    140 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
    141 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
    142 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
    143 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
    144 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
    145 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
    146 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
    147 	/* required last entry */
    148 	{0, 0, 0, 0, 0}
    149 };
    150 
    151 /************************************************************************
    152  * Table of branding strings
    153  ************************************************************************/
    154 static const char    *ixgbe_strings[] = {
    155 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    156 };
    157 
    158 /************************************************************************
    159  * Function prototypes
    160  ************************************************************************/
    161 static int	ixgbe_probe(device_t, cfdata_t, void *);
    162 static void	ixgbe_attach(device_t, device_t, void *);
    163 static int	ixgbe_detach(device_t, int);
    164 #if 0
    165 static int	ixgbe_shutdown(device_t);
    166 #endif
    167 static bool	ixgbe_suspend(device_t, const pmf_qual_t *);
    168 static bool	ixgbe_resume(device_t, const pmf_qual_t *);
    169 static int	ixgbe_ifflags_cb(struct ethercom *);
    170 static int	ixgbe_ioctl(struct ifnet *, u_long, void *);
    171 static void	ixgbe_ifstop(struct ifnet *, int);
    172 static int	ixgbe_init(struct ifnet *);
    173 static void	ixgbe_init_locked(struct adapter *);
    174 static void	ixgbe_stop(void *);
    175 static void	ixgbe_init_device_features(struct adapter *);
    176 static void	ixgbe_check_fan_failure(struct adapter *, u32, bool);
    177 static void	ixgbe_add_media_types(struct adapter *);
    178 static void	ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    179 static int	ixgbe_media_change(struct ifnet *);
    180 static int	ixgbe_allocate_pci_resources(struct adapter *,
    181 		    const struct pci_attach_args *);
    182 static void	ixgbe_free_softint(struct adapter *);
    183 static void	ixgbe_get_slot_info(struct adapter *);
    184 static int	ixgbe_allocate_msix(struct adapter *,
    185 		    const struct pci_attach_args *);
    186 static int	ixgbe_allocate_legacy(struct adapter *,
    187 		    const struct pci_attach_args *);
    188 static int	ixgbe_configure_interrupts(struct adapter *);
    189 static void	ixgbe_free_pciintr_resources(struct adapter *);
    190 static void	ixgbe_free_pci_resources(struct adapter *);
    191 static void	ixgbe_local_timer(void *);
    192 static void	ixgbe_local_timer1(void *);
    193 static void	ixgbe_recovery_mode_timer(void *);
    194 static int	ixgbe_setup_interface(device_t, struct adapter *);
    195 static void	ixgbe_config_gpie(struct adapter *);
    196 static void	ixgbe_config_dmac(struct adapter *);
    197 static void	ixgbe_config_delay_values(struct adapter *);
    198 static void	ixgbe_config_link(struct adapter *);
    199 static void	ixgbe_check_wol_support(struct adapter *);
    200 static int	ixgbe_setup_low_power_mode(struct adapter *);
    201 #if 0
    202 static void	ixgbe_rearm_queues(struct adapter *, u64);
    203 #endif
    204 
    205 static void	ixgbe_initialize_transmit_units(struct adapter *);
    206 static void	ixgbe_initialize_receive_units(struct adapter *);
    207 static void	ixgbe_enable_rx_drop(struct adapter *);
    208 static void	ixgbe_disable_rx_drop(struct adapter *);
    209 static void	ixgbe_initialize_rss_mapping(struct adapter *);
    210 
    211 static void	ixgbe_enable_intr(struct adapter *);
    212 static void	ixgbe_disable_intr(struct adapter *);
    213 static void	ixgbe_update_stats_counters(struct adapter *);
    214 static void	ixgbe_set_rxfilter(struct adapter *);
    215 static void	ixgbe_update_link_status(struct adapter *);
    216 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    217 static void	ixgbe_configure_ivars(struct adapter *);
    218 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    219 static void	ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
    220 
    221 static void	ixgbe_setup_vlan_hw_tagging(struct adapter *);
    222 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    223 static int	ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
    224 static int	ixgbe_register_vlan(struct adapter *, u16);
    225 static int	ixgbe_unregister_vlan(struct adapter *, u16);
    226 
    227 static void	ixgbe_add_device_sysctls(struct adapter *);
    228 static void	ixgbe_add_hw_stats(struct adapter *);
    229 static void	ixgbe_clear_evcnt(struct adapter *);
    230 static int	ixgbe_set_flowcntl(struct adapter *, int);
    231 static int	ixgbe_set_advertise(struct adapter *, int);
    232 static int	ixgbe_get_advertise(struct adapter *);
    233 
    234 /* Sysctl handlers */
    235 static void	ixgbe_set_sysctl_value(struct adapter *, const char *,
    236 		     const char *, int *, int);
    237 static int	ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
    238 static int	ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
    239 static int	ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    240 static int	ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
    241 static int	ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
    242 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
    243 #ifdef IXGBE_DEBUG
    244 static int	ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
    245 static int	ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
    246 #endif
    247 static int	ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
    248 static int	ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
    249 static int	ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
    250 static int	ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
    251 static int	ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
    252 static int	ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
    253 static int	ixgbe_sysctl_debug(SYSCTLFN_PROTO);
    254 static int	ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
    255 static int	ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
    256 
    257 /* Support for pluggable optic modules */
    258 static bool	ixgbe_sfp_probe(struct adapter *);
    259 
    260 /* Legacy (single vector) interrupt handler */
    261 static int	ixgbe_legacy_irq(void *);
    262 
    263 /* The MSI/MSI-X Interrupt handlers */
    264 static int	ixgbe_msix_que(void *);
    265 static int	ixgbe_msix_link(void *);
    266 
    267 /* Software interrupts for deferred work */
    268 static void	ixgbe_handle_que(void *);
    269 static void	ixgbe_handle_link(void *);
    270 static void	ixgbe_handle_msf(void *);
    271 static void	ixgbe_handle_mod(void *);
    272 static void	ixgbe_handle_phy(void *);
    273 
    274 /* Workqueue handler for deferred work */
    275 static void	ixgbe_handle_que_work(struct work *, void *);
    276 
    277 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    278 
    279 /************************************************************************
    280  *  NetBSD Device Interface Entry Points
    281  ************************************************************************/
    282 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    283     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    284     DVF_DETACH_SHUTDOWN);
    285 
    286 #if 0
    287 devclass_t ix_devclass;
    288 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
    289 
    290 MODULE_DEPEND(ix, pci, 1, 1, 1);
    291 MODULE_DEPEND(ix, ether, 1, 1, 1);
    292 #ifdef DEV_NETMAP
    293 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    294 #endif
    295 #endif
    296 
    297 /*
    298  * TUNEABLE PARAMETERS:
    299  */
    300 
    301 /*
    302  * AIM: Adaptive Interrupt Moderation
    303  * which means that the interrupt rate
    304  * is varied over time based on the
    305  * traffic for that interrupt vector
    306  */
    307 static bool ixgbe_enable_aim = true;
    308 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
    309 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
    310     "Enable adaptive interrupt moderation");
    311 
    312 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    313 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
    314     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
    315 
    316 /* How many packets rxeof tries to clean at a time */
    317 static int ixgbe_rx_process_limit = 256;
    318 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
    319     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
    320 
    321 /* How many packets txeof tries to clean at a time */
    322 static int ixgbe_tx_process_limit = 256;
    323 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
    324     &ixgbe_tx_process_limit, 0,
    325     "Maximum number of sent packets to process at a time, -1 means unlimited");
    326 
    327 /* Flow control setting, default to full */
    328 static int ixgbe_flow_control = ixgbe_fc_full;
    329 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
    330     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
    331 
    332 /* Which packet processing uses workqueue or softint */
    333 static bool ixgbe_txrx_workqueue = false;
    334 
    335 /*
    336  * Smart speed setting, default to on
    337  * this only works as a compile option
    338  * right now as its during attach, set
    339  * this to 'ixgbe_smart_speed_off' to
    340  * disable.
    341  */
    342 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    343 
    344 /*
    345  * MSI-X should be the default for best performance,
    346  * but this allows it to be forced off for testing.
    347  */
    348 static int ixgbe_enable_msix = 1;
    349 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
    350     "Enable MSI-X interrupts");
    351 
    352 /*
    353  * Number of Queues, can be set to 0,
    354  * it then autoconfigures based on the
    355  * number of cpus with a max of 8. This
    356  * can be overridden manually here.
    357  */
    358 static int ixgbe_num_queues = 0;
    359 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
    360     "Number of queues to configure, 0 indicates autoconfigure");
    361 
    362 /*
    363  * Number of TX descriptors per ring,
    364  * setting higher than RX as this seems
    365  * the better performing choice.
    366  */
    367 static int ixgbe_txd = PERFORM_TXD;
    368 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
    369     "Number of transmit descriptors per queue");
    370 
    371 /* Number of RX descriptors per ring */
    372 static int ixgbe_rxd = PERFORM_RXD;
    373 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
    374     "Number of receive descriptors per queue");
    375 
    376 /*
    377  * Defining this on will allow the use
    378  * of unsupported SFP+ modules, note that
    379  * doing so you are on your own :)
    380  */
    381 static int allow_unsupported_sfp = false;
    382 #define TUNABLE_INT(__x, __y)
    383 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
    384 
    385 /*
    386  * Not sure if Flow Director is fully baked,
    387  * so we'll default to turning it off.
    388  */
    389 static int ixgbe_enable_fdir = 0;
    390 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
    391     "Enable Flow Director");
    392 
    393 /* Legacy Transmit (single queue) */
    394 static int ixgbe_enable_legacy_tx = 0;
    395 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
    396     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
    397 
    398 /* Receive-Side Scaling */
    399 static int ixgbe_enable_rss = 1;
    400 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
    401     "Enable Receive-Side Scaling (RSS)");
    402 
    403 #if 0
    404 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
    405 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
    406 #endif
    407 
    408 #ifdef NET_MPSAFE
    409 #define IXGBE_MPSAFE		1
    410 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    411 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    412 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    413 #else
    414 #define IXGBE_CALLOUT_FLAGS	0
    415 #define IXGBE_SOFTINFT_FLAGS	0
    416 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU
    417 #endif
    418 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
    419 
    420 /************************************************************************
    421  * ixgbe_initialize_rss_mapping
    422  ************************************************************************/
    423 static void
    424 ixgbe_initialize_rss_mapping(struct adapter *adapter)
    425 {
    426 	struct ixgbe_hw	*hw = &adapter->hw;
    427 	u32		reta = 0, mrqc, rss_key[10];
    428 	int		queue_id, table_size, index_mult;
    429 	int		i, j;
    430 	u32		rss_hash_config;
    431 
    432 	/* force use default RSS key. */
    433 #ifdef __NetBSD__
    434 	rss_getkey((uint8_t *) &rss_key);
    435 #else
    436 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    437 		/* Fetch the configured RSS key */
    438 		rss_getkey((uint8_t *) &rss_key);
    439 	} else {
    440 		/* set up random bits */
    441 		cprng_fast(&rss_key, sizeof(rss_key));
    442 	}
    443 #endif
    444 
    445 	/* Set multiplier for RETA setup and table size based on MAC */
    446 	index_mult = 0x1;
    447 	table_size = 128;
    448 	switch (adapter->hw.mac.type) {
    449 	case ixgbe_mac_82598EB:
    450 		index_mult = 0x11;
    451 		break;
    452 	case ixgbe_mac_X550:
    453 	case ixgbe_mac_X550EM_x:
    454 	case ixgbe_mac_X550EM_a:
    455 		table_size = 512;
    456 		break;
    457 	default:
    458 		break;
    459 	}
    460 
    461 	/* Set up the redirection table */
    462 	for (i = 0, j = 0; i < table_size; i++, j++) {
    463 		if (j == adapter->num_queues)
    464 			j = 0;
    465 
    466 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    467 			/*
    468 			 * Fetch the RSS bucket id for the given indirection
    469 			 * entry. Cap it at the number of configured buckets
    470 			 * (which is num_queues.)
    471 			 */
    472 			queue_id = rss_get_indirection_to_bucket(i);
    473 			queue_id = queue_id % adapter->num_queues;
    474 		} else
    475 			queue_id = (j * index_mult);
    476 
    477 		/*
    478 		 * The low 8 bits are for hash value (n+0);
    479 		 * The next 8 bits are for hash value (n+1), etc.
    480 		 */
    481 		reta = reta >> 8;
    482 		reta = reta | (((uint32_t) queue_id) << 24);
    483 		if ((i & 3) == 3) {
    484 			if (i < 128)
    485 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
    486 			else
    487 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
    488 				    reta);
    489 			reta = 0;
    490 		}
    491 	}
    492 
    493 	/* Now fill our hash function seeds */
    494 	for (i = 0; i < 10; i++)
    495 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
    496 
    497 	/* Perform hash on these packet types */
    498 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
    499 		rss_hash_config = rss_gethashconfig();
    500 	else {
    501 		/*
    502 		 * Disable UDP - IP fragments aren't currently being handled
    503 		 * and so we end up with a mix of 2-tuple and 4-tuple
    504 		 * traffic.
    505 		 */
    506 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
    507 				| RSS_HASHTYPE_RSS_TCP_IPV4
    508 				| RSS_HASHTYPE_RSS_IPV6
    509 				| RSS_HASHTYPE_RSS_TCP_IPV6
    510 				| RSS_HASHTYPE_RSS_IPV6_EX
    511 				| RSS_HASHTYPE_RSS_TCP_IPV6_EX;
    512 	}
    513 
    514 	mrqc = IXGBE_MRQC_RSSEN;
    515 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
    516 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
    517 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
    518 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
    519 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
    520 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
    521 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
    522 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
    523 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
    524 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
    525 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
    526 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
    527 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
    528 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
    529 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
    530 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
    531 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
    532 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
    533 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
    534 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    535 } /* ixgbe_initialize_rss_mapping */
    536 
    537 /************************************************************************
    538  * ixgbe_initialize_receive_units - Setup receive registers and features.
    539  ************************************************************************/
    540 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
    541 
    542 static void
    543 ixgbe_initialize_receive_units(struct adapter *adapter)
    544 {
    545 	struct	rx_ring	*rxr = adapter->rx_rings;
    546 	struct ixgbe_hw	*hw = &adapter->hw;
    547 	struct ifnet	*ifp = adapter->ifp;
    548 	int		i, j;
    549 	u32		bufsz, fctrl, srrctl, rxcsum;
    550 	u32		hlreg;
    551 
    552 	/*
    553 	 * Make sure receives are disabled while
    554 	 * setting up the descriptor ring
    555 	 */
    556 	ixgbe_disable_rx(hw);
    557 
    558 	/* Enable broadcasts */
    559 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
    560 	fctrl |= IXGBE_FCTRL_BAM;
    561 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
    562 		fctrl |= IXGBE_FCTRL_DPF;
    563 		fctrl |= IXGBE_FCTRL_PMCF;
    564 	}
    565 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
    566 
    567 	/* Set for Jumbo Frames? */
    568 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
    569 	if (ifp->if_mtu > ETHERMTU)
    570 		hlreg |= IXGBE_HLREG0_JUMBOEN;
    571 	else
    572 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
    573 
    574 #ifdef DEV_NETMAP
    575 	/* CRC stripping is conditional in Netmap */
    576 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
    577 	    (ifp->if_capenable & IFCAP_NETMAP) &&
    578 	    !ix_crcstrip)
    579 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
    580 	else
    581 #endif /* DEV_NETMAP */
    582 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
    583 
    584 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
    585 
    586 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
    587 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
    588 
    589 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
    590 		u64 rdba = rxr->rxdma.dma_paddr;
    591 		u32 reg;
    592 		int regnum = i / 4;	/* 1 register per 4 queues */
    593 		int regshift = i % 4;	/* 4 bits per 1 queue */
    594 		j = rxr->me;
    595 
    596 		/* Setup the Base and Length of the Rx Descriptor Ring */
    597 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
    598 		    (rdba & 0x00000000ffffffffULL));
    599 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
    600 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
    601 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
    602 
    603 		/* Set up the SRRCTL register */
    604 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
    605 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
    606 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
    607 		srrctl |= bufsz;
    608 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
    609 
    610 		/* Set RQSMR (Receive Queue Statistic Mapping) register */
    611 		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
    612 		reg &= ~(0x000000ffUL << (regshift * 8));
    613 		reg |= i << (regshift * 8);
    614 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
    615 
    616 		/*
    617 		 * Set DROP_EN iff we have no flow control and >1 queue.
    618 		 * Note that srrctl was cleared shortly before during reset,
    619 		 * so we do not need to clear the bit, but do it just in case
    620 		 * this code is moved elsewhere.
    621 		 */
    622 		if (adapter->num_queues > 1 &&
    623 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
    624 			srrctl |= IXGBE_SRRCTL_DROP_EN;
    625 		} else {
    626 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
    627 		}
    628 
    629 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
    630 
    631 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
    632 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
    633 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
    634 
    635 		/* Set the driver rx tail address */
    636 		rxr->tail =  IXGBE_RDT(rxr->me);
    637 	}
    638 
    639 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    640 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
    641 			    | IXGBE_PSRTYPE_UDPHDR
    642 			    | IXGBE_PSRTYPE_IPV4HDR
    643 			    | IXGBE_PSRTYPE_IPV6HDR;
    644 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
    645 	}
    646 
    647 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
    648 
    649 	ixgbe_initialize_rss_mapping(adapter);
    650 
    651 	if (adapter->num_queues > 1) {
    652 		/* RSS and RX IPP Checksum are mutually exclusive */
    653 		rxcsum |= IXGBE_RXCSUM_PCSD;
    654 	}
    655 
    656 	if (ifp->if_capenable & IFCAP_RXCSUM)
    657 		rxcsum |= IXGBE_RXCSUM_PCSD;
    658 
    659 	/* This is useful for calculating UDP/IP fragment checksums */
    660 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
    661 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
    662 
    663 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
    664 
    665 } /* ixgbe_initialize_receive_units */
    666 
    667 /************************************************************************
    668  * ixgbe_initialize_transmit_units - Enable transmit units.
    669  ************************************************************************/
    670 static void
    671 ixgbe_initialize_transmit_units(struct adapter *adapter)
    672 {
    673 	struct tx_ring	*txr = adapter->tx_rings;
    674 	struct ixgbe_hw	*hw = &adapter->hw;
    675 	int i;
    676 
    677 	/* Setup the Base and Length of the Tx Descriptor Ring */
    678 	for (i = 0; i < adapter->num_queues; i++, txr++) {
    679 		u64 tdba = txr->txdma.dma_paddr;
    680 		u32 txctrl = 0;
    681 		u32 tqsmreg, reg;
    682 		int regnum = i / 4;	/* 1 register per 4 queues */
    683 		int regshift = i % 4;	/* 4 bits per 1 queue */
    684 		int j = txr->me;
    685 
    686 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
    687 		    (tdba & 0x00000000ffffffffULL));
    688 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
    689 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
    690 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
    691 
    692 		/*
    693 		 * Set TQSMR (Transmit Queue Statistic Mapping) register.
    694 		 * Register location is different between 82598 and others.
    695 		 */
    696 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
    697 			tqsmreg = IXGBE_TQSMR(regnum);
    698 		else
    699 			tqsmreg = IXGBE_TQSM(regnum);
    700 		reg = IXGBE_READ_REG(hw, tqsmreg);
    701 		reg &= ~(0x000000ffUL << (regshift * 8));
    702 		reg |= i << (regshift * 8);
    703 		IXGBE_WRITE_REG(hw, tqsmreg, reg);
    704 
    705 		/* Setup the HW Tx Head and Tail descriptor pointers */
    706 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
    707 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
    708 
    709 		/* Cache the tail address */
    710 		txr->tail = IXGBE_TDT(j);
    711 
    712 		txr->txr_no_space = false;
    713 
    714 		/* Disable Head Writeback */
    715 		/*
    716 		 * Note: for X550 series devices, these registers are actually
    717 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
    718 		 * fields remain the same.
    719 		 */
    720 		switch (hw->mac.type) {
    721 		case ixgbe_mac_82598EB:
    722 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
    723 			break;
    724 		default:
    725 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
    726 			break;
    727 		}
    728 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
    729 		switch (hw->mac.type) {
    730 		case ixgbe_mac_82598EB:
    731 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
    732 			break;
    733 		default:
    734 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
    735 			break;
    736 		}
    737 
    738 	}
    739 
    740 	if (hw->mac.type != ixgbe_mac_82598EB) {
    741 		u32 dmatxctl, rttdcs;
    742 
    743 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
    744 		dmatxctl |= IXGBE_DMATXCTL_TE;
    745 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
    746 		/* Disable arbiter to set MTQC */
    747 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
    748 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
    749 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    750 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
    751 		    ixgbe_get_mtqc(adapter->iov_mode));
    752 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
    753 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    754 	}
    755 
    756 	return;
    757 } /* ixgbe_initialize_transmit_units */
    758 
    759 /************************************************************************
    760  * ixgbe_attach - Device initialization routine
    761  *
    762  *   Called when the driver is being loaded.
    763  *   Identifies the type of hardware, allocates all resources
    764  *   and initializes the hardware.
    765  *
    766  *   return 0 on success, positive on failure
    767  ************************************************************************/
    768 static void
    769 ixgbe_attach(device_t parent, device_t dev, void *aux)
    770 {
    771 	struct adapter	*adapter;
    772 	struct ixgbe_hw *hw;
    773 	int		error = -1;
    774 	u32		ctrl_ext;
    775 	u16		high, low, nvmreg;
    776 	pcireg_t	id, subid;
    777 	const ixgbe_vendor_info_t *ent;
    778 	struct pci_attach_args *pa = aux;
    779 	bool unsupported_sfp = false;
    780 	const char *str;
    781 	char buf[256];
    782 
    783 	INIT_DEBUGOUT("ixgbe_attach: begin");
    784 
    785 	/* Allocate, clear, and link in our adapter structure */
    786 	adapter = device_private(dev);
    787 	adapter->hw.back = adapter;
    788 	adapter->dev = dev;
    789 	hw = &adapter->hw;
    790 	adapter->osdep.pc = pa->pa_pc;
    791 	adapter->osdep.tag = pa->pa_tag;
    792 	if (pci_dma64_available(pa))
    793 		adapter->osdep.dmat = pa->pa_dmat64;
    794 	else
    795 		adapter->osdep.dmat = pa->pa_dmat;
    796 	adapter->osdep.attached = false;
    797 
    798 	ent = ixgbe_lookup(pa);
    799 
    800 	KASSERT(ent != NULL);
    801 
    802 	aprint_normal(": %s, Version - %s\n",
    803 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    804 
    805 	/* Core Lock Init*/
    806 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    807 
    808 	/* Set up the timer callout */
    809 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    810 
    811 	/* Determine hardware revision */
    812 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    813 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    814 
    815 	hw->vendor_id = PCI_VENDOR(id);
    816 	hw->device_id = PCI_PRODUCT(id);
    817 	hw->revision_id =
    818 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    819 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    820 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    821 
    822 	/*
    823 	 * Make sure BUSMASTER is set
    824 	 */
    825 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    826 
    827 	/* Do base PCI setup - map BAR0 */
    828 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    829 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    830 		error = ENXIO;
    831 		goto err_out;
    832 	}
    833 
    834 	/* let hardware know driver is loaded */
    835 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    836 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    837 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    838 
    839 	/*
    840 	 * Initialize the shared code
    841 	 */
    842 	if (ixgbe_init_shared_code(hw) != 0) {
    843 		aprint_error_dev(dev, "Unable to initialize the shared code\n");
    844 		error = ENXIO;
    845 		goto err_out;
    846 	}
    847 
    848 	switch (hw->mac.type) {
    849 	case ixgbe_mac_82598EB:
    850 		str = "82598EB";
    851 		break;
    852 	case ixgbe_mac_82599EB:
    853 		str = "82599EB";
    854 		break;
    855 	case ixgbe_mac_X540:
    856 		str = "X540";
    857 		break;
    858 	case ixgbe_mac_X550:
    859 		str = "X550";
    860 		break;
    861 	case ixgbe_mac_X550EM_x:
    862 		str = "X550EM";
    863 		break;
    864 	case ixgbe_mac_X550EM_a:
    865 		str = "X550EM A";
    866 		break;
    867 	default:
    868 		str = "Unknown";
    869 		break;
    870 	}
    871 	aprint_normal_dev(dev, "device %s\n", str);
    872 
    873 	if (hw->mbx.ops.init_params)
    874 		hw->mbx.ops.init_params(hw);
    875 
    876 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
    877 
    878 	/* Pick up the 82599 settings */
    879 	if (hw->mac.type != ixgbe_mac_82598EB) {
    880 		hw->phy.smart_speed = ixgbe_smart_speed;
    881 		adapter->num_segs = IXGBE_82599_SCATTER;
    882 	} else
    883 		adapter->num_segs = IXGBE_82598_SCATTER;
    884 
    885 	/* Ensure SW/FW semaphore is free */
    886 	ixgbe_init_swfw_semaphore(hw);
    887 
    888 	hw->mac.ops.set_lan_id(hw);
    889 	ixgbe_init_device_features(adapter);
    890 
    891 	if (ixgbe_configure_interrupts(adapter)) {
    892 		error = ENXIO;
    893 		goto err_out;
    894 	}
    895 
    896 	/* Allocate multicast array memory. */
    897 	adapter->mta = malloc(sizeof(*adapter->mta) *
    898 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK);
    899 
    900 	/* Enable WoL (if supported) */
    901 	ixgbe_check_wol_support(adapter);
    902 
    903 	/* Register for VLAN events */
    904 	ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
    905 
    906 	/* Verify adapter fan is still functional (if applicable) */
    907 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
    908 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    909 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
    910 	}
    911 
    912 	/* Set an initial default flow control value */
    913 	hw->fc.requested_mode = ixgbe_flow_control;
    914 
    915 	/* Sysctls for limiting the amount of work done in the taskqueues */
    916 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
    917 	    "max number of rx packets to process",
    918 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
    919 
    920 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
    921 	    "max number of tx packets to process",
    922 	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
    923 
    924 	/* Do descriptor calc and sanity checks */
    925 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    926 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    927 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    928 		adapter->num_tx_desc = DEFAULT_TXD;
    929 	} else
    930 		adapter->num_tx_desc = ixgbe_txd;
    931 
    932 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    933 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
    934 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    935 		adapter->num_rx_desc = DEFAULT_RXD;
    936 	} else
    937 		adapter->num_rx_desc = ixgbe_rxd;
    938 
    939 	/* Allocate our TX/RX Queues */
    940 	if (ixgbe_allocate_queues(adapter)) {
    941 		error = ENOMEM;
    942 		goto err_out;
    943 	}
    944 
    945 	hw->phy.reset_if_overtemp = TRUE;
    946 	error = ixgbe_reset_hw(hw);
    947 	hw->phy.reset_if_overtemp = FALSE;
    948 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    949 		/*
    950 		 * No optics in this port, set up
    951 		 * so the timer routine will probe
    952 		 * for later insertion.
    953 		 */
    954 		adapter->sfp_probe = TRUE;
    955 		error = IXGBE_SUCCESS;
    956 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    957 		aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
    958 		unsupported_sfp = true;
    959 		error = IXGBE_SUCCESS;
    960 	} else if (error) {
    961 		aprint_error_dev(dev, "Hardware initialization failed\n");
    962 		error = EIO;
    963 		goto err_late;
    964 	}
    965 
    966 	/* Make sure we have a good EEPROM before we read from it */
    967 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
    968 		aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
    969 		error = EIO;
    970 		goto err_late;
    971 	}
    972 
    973 	aprint_normal("%s:", device_xname(dev));
    974 	/* NVM Image Version */
    975 	high = low = 0;
    976 	switch (hw->mac.type) {
    977 	case ixgbe_mac_X540:
    978 	case ixgbe_mac_X550EM_a:
    979 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    980 		if (nvmreg == 0xffff)
    981 			break;
    982 		high = (nvmreg >> 12) & 0x0f;
    983 		low = (nvmreg >> 4) & 0xff;
    984 		id = nvmreg & 0x0f;
    985 		aprint_normal(" NVM Image Version %u.", high);
    986 		if (hw->mac.type == ixgbe_mac_X540)
    987 			str = "%x";
    988 		else
    989 			str = "%02x";
    990 		aprint_normal(str, low);
    991 		aprint_normal(" ID 0x%x,", id);
    992 		break;
    993 	case ixgbe_mac_X550EM_x:
    994 	case ixgbe_mac_X550:
    995 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    996 		if (nvmreg == 0xffff)
    997 			break;
    998 		high = (nvmreg >> 12) & 0x0f;
    999 		low = nvmreg & 0xff;
   1000 		aprint_normal(" NVM Image Version %u.%02x,", high, low);
   1001 		break;
   1002 	default:
   1003 		break;
   1004 	}
   1005 	hw->eeprom.nvm_image_ver_high = high;
   1006 	hw->eeprom.nvm_image_ver_low = low;
   1007 
   1008 	/* PHY firmware revision */
   1009 	switch (hw->mac.type) {
   1010 	case ixgbe_mac_X540:
   1011 	case ixgbe_mac_X550:
   1012 		hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
   1013 		if (nvmreg == 0xffff)
   1014 			break;
   1015 		high = (nvmreg >> 12) & 0x0f;
   1016 		low = (nvmreg >> 4) & 0xff;
   1017 		id = nvmreg & 0x000f;
   1018 		aprint_normal(" PHY FW Revision %u.", high);
   1019 		if (hw->mac.type == ixgbe_mac_X540)
   1020 			str = "%x";
   1021 		else
   1022 			str = "%02x";
   1023 		aprint_normal(str, low);
   1024 		aprint_normal(" ID 0x%x,", id);
   1025 		break;
   1026 	default:
   1027 		break;
   1028 	}
   1029 
   1030 	/* NVM Map version & OEM NVM Image version */
   1031 	switch (hw->mac.type) {
   1032 	case ixgbe_mac_X550:
   1033 	case ixgbe_mac_X550EM_x:
   1034 	case ixgbe_mac_X550EM_a:
   1035 		hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
   1036 		if (nvmreg != 0xffff) {
   1037 			high = (nvmreg >> 12) & 0x0f;
   1038 			low = nvmreg & 0x00ff;
   1039 			aprint_normal(" NVM Map version %u.%02x,", high, low);
   1040 		}
   1041 		hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
   1042 		if (nvmreg != 0xffff) {
   1043 			high = (nvmreg >> 12) & 0x0f;
   1044 			low = nvmreg & 0x00ff;
   1045 			aprint_verbose(" OEM NVM Image version %u.%02x,", high,
   1046 			    low);
   1047 		}
   1048 		break;
   1049 	default:
   1050 		break;
   1051 	}
   1052 
   1053 	/* Print the ETrackID */
   1054 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
   1055 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
   1056 	aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
   1057 
   1058 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   1059 		error = ixgbe_allocate_msix(adapter, pa);
   1060 		if (error) {
   1061 			/* Free allocated queue structures first */
   1062 			ixgbe_free_queues(adapter);
   1063 
   1064 			/* Fallback to legacy interrupt */
   1065 			adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
   1066 			if (adapter->feat_cap & IXGBE_FEATURE_MSI)
   1067 				adapter->feat_en |= IXGBE_FEATURE_MSI;
   1068 			adapter->num_queues = 1;
   1069 
   1070 			/* Allocate our TX/RX Queues again */
   1071 			if (ixgbe_allocate_queues(adapter)) {
   1072 				error = ENOMEM;
   1073 				goto err_out;
   1074 			}
   1075 		}
   1076 	}
   1077 	/* Recovery mode */
   1078 	switch (adapter->hw.mac.type) {
   1079 	case ixgbe_mac_X550:
   1080 	case ixgbe_mac_X550EM_x:
   1081 	case ixgbe_mac_X550EM_a:
   1082 		/* >= 2.00 */
   1083 		if (hw->eeprom.nvm_image_ver_high >= 2) {
   1084 			adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
   1085 			adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
   1086 		}
   1087 		break;
   1088 	default:
   1089 		break;
   1090 	}
   1091 
   1092 	if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
   1093 		error = ixgbe_allocate_legacy(adapter, pa);
   1094 	if (error)
   1095 		goto err_late;
   1096 
   1097 	/* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
   1098 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   1099 	    ixgbe_handle_link, adapter);
   1100 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1101 	    ixgbe_handle_mod, adapter);
   1102 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1103 	    ixgbe_handle_msf, adapter);
   1104 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1105 	    ixgbe_handle_phy, adapter);
   1106 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   1107 		adapter->fdir_si =
   1108 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1109 			ixgbe_reinit_fdir, adapter);
   1110 	if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
   1111 	    || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
   1112 	    || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
   1113 		&& (adapter->fdir_si == NULL))) {
   1114 		aprint_error_dev(dev,
   1115 		    "could not establish software interrupts ()\n");
   1116 		goto err_out;
   1117 	}
   1118 
   1119 	error = ixgbe_start_hw(hw);
   1120 	switch (error) {
   1121 	case IXGBE_ERR_EEPROM_VERSION:
   1122 		aprint_error_dev(dev, "This device is a pre-production adapter/"
   1123 		    "LOM.  Please be aware there may be issues associated "
   1124 		    "with your hardware.\nIf you are experiencing problems "
   1125 		    "please contact your Intel or hardware representative "
   1126 		    "who provided you with this hardware.\n");
   1127 		break;
   1128 	default:
   1129 		break;
   1130 	}
   1131 
   1132 	/* Setup OS specific network interface */
   1133 	if (ixgbe_setup_interface(dev, adapter) != 0)
   1134 		goto err_late;
   1135 
   1136 	/*
   1137 	 *  Print PHY ID only for copper PHY. On device which has SFP(+) cage
   1138 	 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
   1139 	 */
   1140 	if (hw->phy.media_type == ixgbe_media_type_copper) {
   1141 		uint16_t id1, id2;
   1142 		int oui, model, rev;
   1143 		const char *descr;
   1144 
   1145 		id1 = hw->phy.id >> 16;
   1146 		id2 = hw->phy.id & 0xffff;
   1147 		oui = MII_OUI(id1, id2);
   1148 		model = MII_MODEL(id2);
   1149 		rev = MII_REV(id2);
   1150 		if ((descr = mii_get_descr(oui, model)) != NULL)
   1151 			aprint_normal_dev(dev,
   1152 			    "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
   1153 			    descr, oui, model, rev);
   1154 		else
   1155 			aprint_normal_dev(dev,
   1156 			    "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
   1157 			    oui, model, rev);
   1158 	}
   1159 
   1160 	/* Enable EEE power saving */
   1161 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   1162 		hw->mac.ops.setup_eee(hw,
   1163 		    adapter->feat_en & IXGBE_FEATURE_EEE);
   1164 
   1165 	/* Enable power to the phy. */
   1166 	if (!unsupported_sfp) {
   1167 		/* Enable the optics for 82599 SFP+ fiber */
   1168 		ixgbe_enable_tx_laser(hw);
   1169 
   1170 		/*
   1171 		 * XXX Currently, ixgbe_set_phy_power() supports only copper
   1172 		 * PHY, so it's not required to test with !unsupported_sfp.
   1173 		 */
   1174 		ixgbe_set_phy_power(hw, TRUE);
   1175 	}
   1176 
   1177 	/* Initialize statistics */
   1178 	ixgbe_update_stats_counters(adapter);
   1179 
   1180 	/* Check PCIE slot type/speed/width */
   1181 	ixgbe_get_slot_info(adapter);
   1182 
   1183 	/*
   1184 	 * Do time init and sysctl init here, but
   1185 	 * only on the first port of a bypass adapter.
   1186 	 */
   1187 	ixgbe_bypass_init(adapter);
   1188 
   1189 	/* Set an initial dmac value */
   1190 	adapter->dmac = 0;
   1191 	/* Set initial advertised speeds (if applicable) */
   1192 	adapter->advertise = ixgbe_get_advertise(adapter);
   1193 
   1194 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   1195 		ixgbe_define_iov_schemas(dev, &error);
   1196 
   1197 	/* Add sysctls */
   1198 	ixgbe_add_device_sysctls(adapter);
   1199 	ixgbe_add_hw_stats(adapter);
   1200 
   1201 	/* For Netmap */
   1202 	adapter->init_locked = ixgbe_init_locked;
   1203 	adapter->stop_locked = ixgbe_stop;
   1204 
   1205 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1206 		ixgbe_netmap_attach(adapter);
   1207 
   1208 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
   1209 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
   1210 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
   1211 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
   1212 
   1213 	if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
   1214 		pmf_class_network_register(dev, adapter->ifp);
   1215 	else
   1216 		aprint_error_dev(dev, "couldn't establish power handler\n");
   1217 
   1218 	/* Init recovery mode timer and state variable */
   1219 	if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
   1220 		adapter->recovery_mode = 0;
   1221 
   1222 		/* Set up the timer callout */
   1223 		callout_init(&adapter->recovery_mode_timer,
   1224 		    IXGBE_CALLOUT_FLAGS);
   1225 
   1226 		/* Start the task */
   1227 		callout_reset(&adapter->recovery_mode_timer, hz,
   1228 		    ixgbe_recovery_mode_timer, adapter);
   1229 	}
   1230 
   1231 	INIT_DEBUGOUT("ixgbe_attach: end");
   1232 	adapter->osdep.attached = true;
   1233 
   1234 	return;
   1235 
   1236 err_late:
   1237 	ixgbe_free_queues(adapter);
   1238 err_out:
   1239 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   1240 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   1241 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   1242 	ixgbe_free_softint(adapter);
   1243 	ixgbe_free_pci_resources(adapter);
   1244 	if (adapter->mta != NULL)
   1245 		free(adapter->mta, M_DEVBUF);
   1246 	IXGBE_CORE_LOCK_DESTROY(adapter);
   1247 
   1248 	return;
   1249 } /* ixgbe_attach */
   1250 
   1251 /************************************************************************
   1252  * ixgbe_check_wol_support
   1253  *
   1254  *   Checks whether the adapter's ports are capable of
   1255  *   Wake On LAN by reading the adapter's NVM.
   1256  *
   1257  *   Sets each port's hw->wol_enabled value depending
   1258  *   on the value read here.
   1259  ************************************************************************/
   1260 static void
   1261 ixgbe_check_wol_support(struct adapter *adapter)
   1262 {
   1263 	struct ixgbe_hw *hw = &adapter->hw;
   1264 	u16		dev_caps = 0;
   1265 
   1266 	/* Find out WoL support for port */
   1267 	adapter->wol_support = hw->wol_enabled = 0;
   1268 	ixgbe_get_device_caps(hw, &dev_caps);
   1269 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
   1270 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
   1271 	     hw->bus.func == 0))
   1272 		adapter->wol_support = hw->wol_enabled = 1;
   1273 
   1274 	/* Save initial wake up filter configuration */
   1275 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
   1276 
   1277 	return;
   1278 } /* ixgbe_check_wol_support */
   1279 
   1280 /************************************************************************
   1281  * ixgbe_setup_interface
   1282  *
   1283  *   Setup networking device structure and register an interface.
   1284  ************************************************************************/
   1285 static int
   1286 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   1287 {
   1288 	struct ethercom *ec = &adapter->osdep.ec;
   1289 	struct ifnet   *ifp;
   1290 	int rv;
   1291 
   1292 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   1293 
   1294 	ifp = adapter->ifp = &ec->ec_if;
   1295 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1296 	ifp->if_baudrate = IF_Gbps(10);
   1297 	ifp->if_init = ixgbe_init;
   1298 	ifp->if_stop = ixgbe_ifstop;
   1299 	ifp->if_softc = adapter;
   1300 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1301 #ifdef IXGBE_MPSAFE
   1302 	ifp->if_extflags = IFEF_MPSAFE;
   1303 #endif
   1304 	ifp->if_ioctl = ixgbe_ioctl;
   1305 #if __FreeBSD_version >= 1100045
   1306 	/* TSO parameters */
   1307 	ifp->if_hw_tsomax = 65518;
   1308 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
   1309 	ifp->if_hw_tsomaxsegsize = 2048;
   1310 #endif
   1311 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1312 #if 0
   1313 		ixgbe_start_locked = ixgbe_legacy_start_locked;
   1314 #endif
   1315 	} else {
   1316 		ifp->if_transmit = ixgbe_mq_start;
   1317 #if 0
   1318 		ixgbe_start_locked = ixgbe_mq_start_locked;
   1319 #endif
   1320 	}
   1321 	ifp->if_start = ixgbe_legacy_start;
   1322 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1323 	IFQ_SET_READY(&ifp->if_snd);
   1324 
   1325 	rv = if_initialize(ifp);
   1326 	if (rv != 0) {
   1327 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1328 		return rv;
   1329 	}
   1330 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1331 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1332 	aprint_normal_dev(dev, "Ethernet address %s\n",
   1333 	    ether_sprintf(adapter->hw.mac.addr));
   1334 	/*
   1335 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1336 	 * used.
   1337 	 */
   1338 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   1339 
   1340 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1341 
   1342 	/*
   1343 	 * Tell the upper layer(s) we support long frames.
   1344 	 */
   1345 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1346 
   1347 	/* Set capability flags */
   1348 	ifp->if_capabilities |= IFCAP_RXCSUM
   1349 			     |	IFCAP_TXCSUM
   1350 			     |	IFCAP_TSOv4
   1351 			     |	IFCAP_TSOv6;
   1352 	ifp->if_capenable = 0;
   1353 
   1354 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1355 			    |  ETHERCAP_VLAN_HWCSUM
   1356 			    |  ETHERCAP_JUMBO_MTU
   1357 			    |  ETHERCAP_VLAN_MTU;
   1358 
   1359 	/* Enable the above capabilities by default */
   1360 	ec->ec_capenable = ec->ec_capabilities;
   1361 
   1362 	/*
   1363 	 * Don't turn this on by default, if vlans are
   1364 	 * created on another pseudo device (eg. lagg)
   1365 	 * then vlan events are not passed thru, breaking
   1366 	 * operation, but with HW FILTER off it works. If
   1367 	 * using vlans directly on the ixgbe driver you can
   1368 	 * enable this and get full hardware tag filtering.
   1369 	 */
   1370 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1371 
   1372 	/*
   1373 	 * Specify the media types supported by this adapter and register
   1374 	 * callbacks to update media and link information
   1375 	 */
   1376 	ec->ec_ifmedia = &adapter->media;
   1377 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   1378 	    ixgbe_media_status);
   1379 
   1380 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
   1381 	ixgbe_add_media_types(adapter);
   1382 
   1383 	/* Set autoselect media by default */
   1384 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1385 
   1386 	if_register(ifp);
   1387 
   1388 	return (0);
   1389 } /* ixgbe_setup_interface */
   1390 
   1391 /************************************************************************
   1392  * ixgbe_add_media_types
   1393  ************************************************************************/
   1394 static void
   1395 ixgbe_add_media_types(struct adapter *adapter)
   1396 {
   1397 	struct ixgbe_hw *hw = &adapter->hw;
   1398 	u64		layer;
   1399 
   1400 	layer = adapter->phy_layer;
   1401 
   1402 #define	ADD(mm, dd)							\
   1403 	ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
   1404 
   1405 	ADD(IFM_NONE, 0);
   1406 
   1407 	/* Media types with matching NetBSD media defines */
   1408 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
   1409 		ADD(IFM_10G_T | IFM_FDX, 0);
   1410 	}
   1411 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
   1412 		ADD(IFM_1000_T | IFM_FDX, 0);
   1413 	}
   1414 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
   1415 		ADD(IFM_100_TX | IFM_FDX, 0);
   1416 	}
   1417 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
   1418 		ADD(IFM_10_T | IFM_FDX, 0);
   1419 	}
   1420 
   1421 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   1422 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
   1423 		ADD(IFM_10G_TWINAX | IFM_FDX, 0);
   1424 	}
   1425 
   1426 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
   1427 		ADD(IFM_10G_LR | IFM_FDX, 0);
   1428 		if (hw->phy.multispeed_fiber) {
   1429 			ADD(IFM_1000_LX | IFM_FDX, 0);
   1430 		}
   1431 	}
   1432 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
   1433 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1434 		if (hw->phy.multispeed_fiber) {
   1435 			ADD(IFM_1000_SX | IFM_FDX, 0);
   1436 		}
   1437 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
   1438 		ADD(IFM_1000_SX | IFM_FDX, 0);
   1439 	}
   1440 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
   1441 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1442 	}
   1443 
   1444 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1445 		ADD(IFM_10G_KR | IFM_FDX, 0);
   1446 	}
   1447 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1448 		ADD(IFM_10G_KX4 | IFM_FDX, 0);
   1449 	}
   1450 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
   1451 		ADD(IFM_1000_KX | IFM_FDX, 0);
   1452 	}
   1453 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
   1454 		ADD(IFM_2500_KX | IFM_FDX, 0);
   1455 	}
   1456 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
   1457 		ADD(IFM_2500_T | IFM_FDX, 0);
   1458 	}
   1459 	if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
   1460 		ADD(IFM_5000_T | IFM_FDX, 0);
   1461 	}
   1462 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
   1463 		ADD(IFM_1000_BX10 | IFM_FDX, 0);
   1464 	/* XXX no ifmedia_set? */
   1465 
   1466 	ADD(IFM_AUTO, 0);
   1467 
   1468 #undef ADD
   1469 } /* ixgbe_add_media_types */
   1470 
   1471 /************************************************************************
   1472  * ixgbe_is_sfp
   1473  ************************************************************************/
   1474 static inline bool
   1475 ixgbe_is_sfp(struct ixgbe_hw *hw)
   1476 {
   1477 	switch (hw->mac.type) {
   1478 	case ixgbe_mac_82598EB:
   1479 		if (hw->phy.type == ixgbe_phy_nl)
   1480 			return (TRUE);
   1481 		return (FALSE);
   1482 	case ixgbe_mac_82599EB:
   1483 	case ixgbe_mac_X550EM_x:
   1484 	case ixgbe_mac_X550EM_a:
   1485 		switch (hw->mac.ops.get_media_type(hw)) {
   1486 		case ixgbe_media_type_fiber:
   1487 		case ixgbe_media_type_fiber_qsfp:
   1488 			return (TRUE);
   1489 		default:
   1490 			return (FALSE);
   1491 		}
   1492 	default:
   1493 		return (FALSE);
   1494 	}
   1495 } /* ixgbe_is_sfp */
   1496 
   1497 /************************************************************************
   1498  * ixgbe_config_link
   1499  ************************************************************************/
   1500 static void
   1501 ixgbe_config_link(struct adapter *adapter)
   1502 {
   1503 	struct ixgbe_hw *hw = &adapter->hw;
   1504 	u32		autoneg, err = 0;
   1505 	bool		sfp, negotiate = false;
   1506 
   1507 	sfp = ixgbe_is_sfp(hw);
   1508 
   1509 	if (sfp) {
   1510 		if (hw->phy.multispeed_fiber) {
   1511 			ixgbe_enable_tx_laser(hw);
   1512 			kpreempt_disable();
   1513 			softint_schedule(adapter->msf_si);
   1514 			kpreempt_enable();
   1515 		}
   1516 		kpreempt_disable();
   1517 		softint_schedule(adapter->mod_si);
   1518 		kpreempt_enable();
   1519 	} else {
   1520 		struct ifmedia	*ifm = &adapter->media;
   1521 
   1522 		if (hw->mac.ops.check_link)
   1523 			err = ixgbe_check_link(hw, &adapter->link_speed,
   1524 			    &adapter->link_up, FALSE);
   1525 		if (err)
   1526 			return;
   1527 
   1528 		/*
   1529 		 * Check if it's the first call. If it's the first call,
   1530 		 * get value for auto negotiation.
   1531 		 */
   1532 		autoneg = hw->phy.autoneg_advertised;
   1533 		if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
   1534 		    && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
   1535 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
   1536 			    &negotiate);
   1537 		if (err)
   1538 			return;
   1539 		if (hw->mac.ops.setup_link)
   1540 			err = hw->mac.ops.setup_link(hw, autoneg,
   1541 			    adapter->link_up);
   1542 	}
   1543 
   1544 } /* ixgbe_config_link */
   1545 
   1546 /************************************************************************
   1547  * ixgbe_update_stats_counters - Update board statistics counters.
   1548  ************************************************************************/
   1549 static void
   1550 ixgbe_update_stats_counters(struct adapter *adapter)
   1551 {
   1552 	struct ifnet	      *ifp = adapter->ifp;
   1553 	struct ixgbe_hw	      *hw = &adapter->hw;
   1554 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1555 	u32		      missed_rx = 0, bprc, lxon, lxoff, total;
   1556 	u64		      total_missed_rx = 0;
   1557 	uint64_t	      crcerrs, rlec;
   1558 	unsigned int	      queue_counters;
   1559 	int		      i;
   1560 
   1561 	crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   1562 	stats->crcerrs.ev_count += crcerrs;
   1563 	stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   1564 	stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   1565 	stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   1566 	if (hw->mac.type >= ixgbe_mac_X550)
   1567 		stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
   1568 
   1569 	/* 16 registers exist */
   1570 	queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
   1571 	for (i = 0; i < queue_counters; i++) {
   1572 		stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   1573 		stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   1574 		if (hw->mac.type >= ixgbe_mac_82599EB) {
   1575 			stats->qprdc[i].ev_count
   1576 			    += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   1577 		}
   1578 	}
   1579 
   1580 	/* 8 registers exist */
   1581 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   1582 		uint32_t mp;
   1583 
   1584 		/* MPC */
   1585 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   1586 		/* global total per queue */
   1587 		stats->mpc[i].ev_count += mp;
   1588 		/* running comprehensive total for stats display */
   1589 		total_missed_rx += mp;
   1590 
   1591 		if (hw->mac.type == ixgbe_mac_82598EB)
   1592 			stats->rnbc[i].ev_count
   1593 			    += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   1594 
   1595 		stats->pxontxc[i].ev_count
   1596 		    += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
   1597 		stats->pxofftxc[i].ev_count
   1598 		    += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
   1599 		if (hw->mac.type >= ixgbe_mac_82599EB) {
   1600 			stats->pxonrxc[i].ev_count
   1601 			    += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
   1602 			stats->pxoffrxc[i].ev_count
   1603 			    += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
   1604 			stats->pxon2offc[i].ev_count
   1605 			    += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
   1606 		} else {
   1607 			stats->pxonrxc[i].ev_count
   1608 			    += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
   1609 			stats->pxoffrxc[i].ev_count
   1610 			    += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
   1611 		}
   1612 	}
   1613 	stats->mpctotal.ev_count += total_missed_rx;
   1614 
   1615 	/* Document says M[LR]FC are valid when link is up and 10Gbps */
   1616 	if ((adapter->link_active == LINK_STATE_UP)
   1617 	    && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
   1618 		stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   1619 		stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   1620 	}
   1621 	rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
   1622 	stats->rlec.ev_count += rlec;
   1623 
   1624 	/* Hardware workaround, gprc counts missed packets */
   1625 	stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   1626 
   1627 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   1628 	stats->lxontxc.ev_count += lxon;
   1629 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   1630 	stats->lxofftxc.ev_count += lxoff;
   1631 	total = lxon + lxoff;
   1632 
   1633 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1634 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   1635 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   1636 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   1637 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   1638 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   1639 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   1640 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   1641 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   1642 	} else {
   1643 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   1644 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   1645 		/* 82598 only has a counter in the high register */
   1646 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   1647 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   1648 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   1649 	}
   1650 
   1651 	/*
   1652 	 * Workaround: mprc hardware is incorrectly counting
   1653 	 * broadcasts, so for now we subtract those.
   1654 	 */
   1655 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   1656 	stats->bprc.ev_count += bprc;
   1657 	stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
   1658 	    - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   1659 
   1660 	stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   1661 	stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   1662 	stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   1663 	stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   1664 	stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   1665 	stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   1666 
   1667 	stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   1668 	stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   1669 	stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   1670 
   1671 	stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   1672 	stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   1673 	stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   1674 	stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   1675 	stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   1676 	stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   1677 	stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   1678 	stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   1679 	stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   1680 	stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   1681 	stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   1682 	stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   1683 	stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   1684 	stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   1685 	stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   1686 	stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   1687 	stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   1688 	stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   1689 	/* Only read FCOE on 82599 */
   1690 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1691 		stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   1692 		stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   1693 		stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   1694 		stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   1695 		stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   1696 	}
   1697 
   1698 	/* Fill out the OS statistics structure */
   1699 	/*
   1700 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   1701 	 * adapter->stats counters. It's required to make ifconfig -z
   1702 	 * (SOICZIFDATA) work.
   1703 	 */
   1704 	ifp->if_collisions = 0;
   1705 
   1706 	/* Rx Errors */
   1707 	ifp->if_iqdrops += total_missed_rx;
   1708 	ifp->if_ierrors += crcerrs + rlec;
   1709 } /* ixgbe_update_stats_counters */
   1710 
   1711 /************************************************************************
   1712  * ixgbe_add_hw_stats
   1713  *
   1714  *   Add sysctl variables, one per statistic, to the system.
   1715  ************************************************************************/
   1716 static void
   1717 ixgbe_add_hw_stats(struct adapter *adapter)
   1718 {
   1719 	device_t dev = adapter->dev;
   1720 	const struct sysctlnode *rnode, *cnode;
   1721 	struct sysctllog **log = &adapter->sysctllog;
   1722 	struct tx_ring *txr = adapter->tx_rings;
   1723 	struct rx_ring *rxr = adapter->rx_rings;
   1724 	struct ixgbe_hw *hw = &adapter->hw;
   1725 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1726 	const char *xname = device_xname(dev);
   1727 	int i;
   1728 
   1729 	/* Driver Statistics */
   1730 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   1731 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   1732 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   1733 	    NULL, xname, "m_defrag() failed");
   1734 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   1735 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   1736 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   1737 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   1738 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   1739 	    NULL, xname, "Driver tx dma hard fail other");
   1740 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   1741 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   1742 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   1743 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   1744 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   1745 	    NULL, xname, "Watchdog timeouts");
   1746 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   1747 	    NULL, xname, "TSO errors");
   1748 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   1749 	    NULL, xname, "Link MSI-X IRQ Handled");
   1750 	evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
   1751 	    NULL, xname, "Link softint");
   1752 	evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
   1753 	    NULL, xname, "module softint");
   1754 	evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
   1755 	    NULL, xname, "multimode softint");
   1756 	evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
   1757 	    NULL, xname, "external PHY softint");
   1758 
   1759 	/* Max number of traffic class is 8 */
   1760 	KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
   1761 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   1762 		snprintf(adapter->tcs[i].evnamebuf,
   1763 		    sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
   1764 		    xname, i);
   1765 		if (i < __arraycount(stats->mpc)) {
   1766 			evcnt_attach_dynamic(&stats->mpc[i],
   1767 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1768 			    "RX Missed Packet Count");
   1769 			if (hw->mac.type == ixgbe_mac_82598EB)
   1770 				evcnt_attach_dynamic(&stats->rnbc[i],
   1771 				    EVCNT_TYPE_MISC, NULL,
   1772 				    adapter->tcs[i].evnamebuf,
   1773 				    "Receive No Buffers");
   1774 		}
   1775 		if (i < __arraycount(stats->pxontxc)) {
   1776 			evcnt_attach_dynamic(&stats->pxontxc[i],
   1777 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1778 			    "pxontxc");
   1779 			evcnt_attach_dynamic(&stats->pxonrxc[i],
   1780 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1781 			    "pxonrxc");
   1782 			evcnt_attach_dynamic(&stats->pxofftxc[i],
   1783 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1784 			    "pxofftxc");
   1785 			evcnt_attach_dynamic(&stats->pxoffrxc[i],
   1786 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1787 			    "pxoffrxc");
   1788 			if (hw->mac.type >= ixgbe_mac_82599EB)
   1789 				evcnt_attach_dynamic(&stats->pxon2offc[i],
   1790 				    EVCNT_TYPE_MISC, NULL,
   1791 				    adapter->tcs[i].evnamebuf,
   1792 			    "pxon2offc");
   1793 		}
   1794 	}
   1795 
   1796 	for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1797 #ifdef LRO
   1798 		struct lro_ctrl *lro = &rxr->lro;
   1799 #endif /* LRO */
   1800 
   1801 		snprintf(adapter->queues[i].evnamebuf,
   1802 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   1803 		    xname, i);
   1804 		snprintf(adapter->queues[i].namebuf,
   1805 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   1806 
   1807 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   1808 			aprint_error_dev(dev, "could not create sysctl root\n");
   1809 			break;
   1810 		}
   1811 
   1812 		if (sysctl_createv(log, 0, &rnode, &rnode,
   1813 		    0, CTLTYPE_NODE,
   1814 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   1815 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   1816 			break;
   1817 
   1818 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1819 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   1820 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   1821 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   1822 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   1823 			break;
   1824 
   1825 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1826 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1827 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   1828 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   1829 		    0, CTL_CREATE, CTL_EOL) != 0)
   1830 			break;
   1831 
   1832 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1833 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1834 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   1835 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   1836 		    0, CTL_CREATE, CTL_EOL) != 0)
   1837 			break;
   1838 
   1839 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   1840 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   1841 		evcnt_attach_dynamic(&adapter->queues[i].handleq,
   1842 		    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1843 		    "Handled queue in softint");
   1844 		evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
   1845 		    NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
   1846 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   1847 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   1848 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   1849 		    NULL, adapter->queues[i].evnamebuf,
   1850 		    "Queue No Descriptor Available");
   1851 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   1852 		    NULL, adapter->queues[i].evnamebuf,
   1853 		    "Queue Packets Transmitted");
   1854 #ifndef IXGBE_LEGACY_TX
   1855 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   1856 		    NULL, adapter->queues[i].evnamebuf,
   1857 		    "Packets dropped in pcq");
   1858 #endif
   1859 
   1860 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1861 		    CTLFLAG_READONLY,
   1862 		    CTLTYPE_INT,
   1863 		    "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
   1864 			ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
   1865 		    CTL_CREATE, CTL_EOL) != 0)
   1866 			break;
   1867 
   1868 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1869 		    CTLFLAG_READONLY,
   1870 		    CTLTYPE_INT,
   1871 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   1872 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   1873 		    CTL_CREATE, CTL_EOL) != 0)
   1874 			break;
   1875 
   1876 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1877 		    CTLFLAG_READONLY,
   1878 		    CTLTYPE_INT,
   1879 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   1880 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   1881 		    CTL_CREATE, CTL_EOL) != 0)
   1882 			break;
   1883 
   1884 		if (i < __arraycount(stats->qprc)) {
   1885 			evcnt_attach_dynamic(&stats->qprc[i],
   1886 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1887 			    "qprc");
   1888 			evcnt_attach_dynamic(&stats->qptc[i],
   1889 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1890 			    "qptc");
   1891 			evcnt_attach_dynamic(&stats->qbrc[i],
   1892 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1893 			    "qbrc");
   1894 			evcnt_attach_dynamic(&stats->qbtc[i],
   1895 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1896 			    "qbtc");
   1897 			if (hw->mac.type >= ixgbe_mac_82599EB)
   1898 				evcnt_attach_dynamic(&stats->qprdc[i],
   1899 				    EVCNT_TYPE_MISC, NULL,
   1900 				    adapter->queues[i].evnamebuf, "qprdc");
   1901 		}
   1902 
   1903 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   1904 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   1905 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   1906 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   1907 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   1908 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   1909 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   1910 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   1911 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   1912 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   1913 #ifdef LRO
   1914 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   1915 				CTLFLAG_RD, &lro->lro_queued, 0,
   1916 				"LRO Queued");
   1917 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   1918 				CTLFLAG_RD, &lro->lro_flushed, 0,
   1919 				"LRO Flushed");
   1920 #endif /* LRO */
   1921 	}
   1922 
   1923 	/* MAC stats get their own sub node */
   1924 
   1925 	snprintf(stats->namebuf,
   1926 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   1927 
   1928 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   1929 	    stats->namebuf, "rx csum offload - IP");
   1930 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   1931 	    stats->namebuf, "rx csum offload - L4");
   1932 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   1933 	    stats->namebuf, "rx csum offload - IP bad");
   1934 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   1935 	    stats->namebuf, "rx csum offload - L4 bad");
   1936 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   1937 	    stats->namebuf, "Interrupt conditions zero");
   1938 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   1939 	    stats->namebuf, "Legacy interrupts");
   1940 
   1941 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   1942 	    stats->namebuf, "CRC Errors");
   1943 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   1944 	    stats->namebuf, "Illegal Byte Errors");
   1945 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   1946 	    stats->namebuf, "Byte Errors");
   1947 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   1948 	    stats->namebuf, "MAC Short Packets Discarded");
   1949 	if (hw->mac.type >= ixgbe_mac_X550)
   1950 		evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
   1951 		    stats->namebuf, "Bad SFD");
   1952 	evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
   1953 	    stats->namebuf, "Total Packets Missed");
   1954 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   1955 	    stats->namebuf, "MAC Local Faults");
   1956 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   1957 	    stats->namebuf, "MAC Remote Faults");
   1958 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   1959 	    stats->namebuf, "Receive Length Errors");
   1960 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   1961 	    stats->namebuf, "Link XON Transmitted");
   1962 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   1963 	    stats->namebuf, "Link XON Received");
   1964 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   1965 	    stats->namebuf, "Link XOFF Transmitted");
   1966 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   1967 	    stats->namebuf, "Link XOFF Received");
   1968 
   1969 	/* Packet Reception Stats */
   1970 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   1971 	    stats->namebuf, "Total Octets Received");
   1972 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   1973 	    stats->namebuf, "Good Octets Received");
   1974 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   1975 	    stats->namebuf, "Total Packets Received");
   1976 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   1977 	    stats->namebuf, "Good Packets Received");
   1978 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   1979 	    stats->namebuf, "Multicast Packets Received");
   1980 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   1981 	    stats->namebuf, "Broadcast Packets Received");
   1982 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   1983 	    stats->namebuf, "64 byte frames received ");
   1984 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   1985 	    stats->namebuf, "65-127 byte frames received");
   1986 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   1987 	    stats->namebuf, "128-255 byte frames received");
   1988 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   1989 	    stats->namebuf, "256-511 byte frames received");
   1990 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   1991 	    stats->namebuf, "512-1023 byte frames received");
   1992 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   1993 	    stats->namebuf, "1023-1522 byte frames received");
   1994 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   1995 	    stats->namebuf, "Receive Undersized");
   1996 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   1997 	    stats->namebuf, "Fragmented Packets Received ");
   1998 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   1999 	    stats->namebuf, "Oversized Packets Received");
   2000 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   2001 	    stats->namebuf, "Received Jabber");
   2002 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   2003 	    stats->namebuf, "Management Packets Received");
   2004 	evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
   2005 	    stats->namebuf, "Management Packets Dropped");
   2006 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   2007 	    stats->namebuf, "Checksum Errors");
   2008 
   2009 	/* Packet Transmission Stats */
   2010 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   2011 	    stats->namebuf, "Good Octets Transmitted");
   2012 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   2013 	    stats->namebuf, "Total Packets Transmitted");
   2014 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   2015 	    stats->namebuf, "Good Packets Transmitted");
   2016 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   2017 	    stats->namebuf, "Broadcast Packets Transmitted");
   2018 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   2019 	    stats->namebuf, "Multicast Packets Transmitted");
   2020 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   2021 	    stats->namebuf, "Management Packets Transmitted");
   2022 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   2023 	    stats->namebuf, "64 byte frames transmitted ");
   2024 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   2025 	    stats->namebuf, "65-127 byte frames transmitted");
   2026 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   2027 	    stats->namebuf, "128-255 byte frames transmitted");
   2028 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   2029 	    stats->namebuf, "256-511 byte frames transmitted");
   2030 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   2031 	    stats->namebuf, "512-1023 byte frames transmitted");
   2032 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   2033 	    stats->namebuf, "1024-1522 byte frames transmitted");
   2034 } /* ixgbe_add_hw_stats */
   2035 
   2036 static void
   2037 ixgbe_clear_evcnt(struct adapter *adapter)
   2038 {
   2039 	struct tx_ring *txr = adapter->tx_rings;
   2040 	struct rx_ring *rxr = adapter->rx_rings;
   2041 	struct ixgbe_hw *hw = &adapter->hw;
   2042 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   2043 	int i;
   2044 
   2045 	adapter->efbig_tx_dma_setup.ev_count = 0;
   2046 	adapter->mbuf_defrag_failed.ev_count = 0;
   2047 	adapter->efbig2_tx_dma_setup.ev_count = 0;
   2048 	adapter->einval_tx_dma_setup.ev_count = 0;
   2049 	adapter->other_tx_dma_setup.ev_count = 0;
   2050 	adapter->eagain_tx_dma_setup.ev_count = 0;
   2051 	adapter->enomem_tx_dma_setup.ev_count = 0;
   2052 	adapter->tso_err.ev_count = 0;
   2053 	adapter->watchdog_events.ev_count = 0;
   2054 	adapter->link_irq.ev_count = 0;
   2055 	adapter->link_sicount.ev_count = 0;
   2056 	adapter->mod_sicount.ev_count = 0;
   2057 	adapter->msf_sicount.ev_count = 0;
   2058 	adapter->phy_sicount.ev_count = 0;
   2059 
   2060 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   2061 		if (i < __arraycount(stats->mpc)) {
   2062 			stats->mpc[i].ev_count = 0;
   2063 			if (hw->mac.type == ixgbe_mac_82598EB)
   2064 				stats->rnbc[i].ev_count = 0;
   2065 		}
   2066 		if (i < __arraycount(stats->pxontxc)) {
   2067 			stats->pxontxc[i].ev_count = 0;
   2068 			stats->pxonrxc[i].ev_count = 0;
   2069 			stats->pxofftxc[i].ev_count = 0;
   2070 			stats->pxoffrxc[i].ev_count = 0;
   2071 			if (hw->mac.type >= ixgbe_mac_82599EB)
   2072 				stats->pxon2offc[i].ev_count = 0;
   2073 		}
   2074 	}
   2075 
   2076 	txr = adapter->tx_rings;
   2077 	for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2078 		adapter->queues[i].irqs.ev_count = 0;
   2079 		adapter->queues[i].handleq.ev_count = 0;
   2080 		adapter->queues[i].req.ev_count = 0;
   2081 		txr->no_desc_avail.ev_count = 0;
   2082 		txr->total_packets.ev_count = 0;
   2083 		txr->tso_tx.ev_count = 0;
   2084 #ifndef IXGBE_LEGACY_TX
   2085 		txr->pcq_drops.ev_count = 0;
   2086 #endif
   2087 		txr->q_efbig_tx_dma_setup = 0;
   2088 		txr->q_mbuf_defrag_failed = 0;
   2089 		txr->q_efbig2_tx_dma_setup = 0;
   2090 		txr->q_einval_tx_dma_setup = 0;
   2091 		txr->q_other_tx_dma_setup = 0;
   2092 		txr->q_eagain_tx_dma_setup = 0;
   2093 		txr->q_enomem_tx_dma_setup = 0;
   2094 		txr->q_tso_err = 0;
   2095 
   2096 		if (i < __arraycount(stats->qprc)) {
   2097 			stats->qprc[i].ev_count = 0;
   2098 			stats->qptc[i].ev_count = 0;
   2099 			stats->qbrc[i].ev_count = 0;
   2100 			stats->qbtc[i].ev_count = 0;
   2101 			if (hw->mac.type >= ixgbe_mac_82599EB)
   2102 				stats->qprdc[i].ev_count = 0;
   2103 		}
   2104 
   2105 		rxr->rx_packets.ev_count = 0;
   2106 		rxr->rx_bytes.ev_count = 0;
   2107 		rxr->rx_copies.ev_count = 0;
   2108 		rxr->no_jmbuf.ev_count = 0;
   2109 		rxr->rx_discarded.ev_count = 0;
   2110 	}
   2111 	stats->ipcs.ev_count = 0;
   2112 	stats->l4cs.ev_count = 0;
   2113 	stats->ipcs_bad.ev_count = 0;
   2114 	stats->l4cs_bad.ev_count = 0;
   2115 	stats->intzero.ev_count = 0;
   2116 	stats->legint.ev_count = 0;
   2117 	stats->crcerrs.ev_count = 0;
   2118 	stats->illerrc.ev_count = 0;
   2119 	stats->errbc.ev_count = 0;
   2120 	stats->mspdc.ev_count = 0;
   2121 	if (hw->mac.type >= ixgbe_mac_X550)
   2122 		stats->mbsdc.ev_count = 0;
   2123 	stats->mpctotal.ev_count = 0;
   2124 	stats->mlfc.ev_count = 0;
   2125 	stats->mrfc.ev_count = 0;
   2126 	stats->rlec.ev_count = 0;
   2127 	stats->lxontxc.ev_count = 0;
   2128 	stats->lxonrxc.ev_count = 0;
   2129 	stats->lxofftxc.ev_count = 0;
   2130 	stats->lxoffrxc.ev_count = 0;
   2131 
   2132 	/* Packet Reception Stats */
   2133 	stats->tor.ev_count = 0;
   2134 	stats->gorc.ev_count = 0;
   2135 	stats->tpr.ev_count = 0;
   2136 	stats->gprc.ev_count = 0;
   2137 	stats->mprc.ev_count = 0;
   2138 	stats->bprc.ev_count = 0;
   2139 	stats->prc64.ev_count = 0;
   2140 	stats->prc127.ev_count = 0;
   2141 	stats->prc255.ev_count = 0;
   2142 	stats->prc511.ev_count = 0;
   2143 	stats->prc1023.ev_count = 0;
   2144 	stats->prc1522.ev_count = 0;
   2145 	stats->ruc.ev_count = 0;
   2146 	stats->rfc.ev_count = 0;
   2147 	stats->roc.ev_count = 0;
   2148 	stats->rjc.ev_count = 0;
   2149 	stats->mngprc.ev_count = 0;
   2150 	stats->mngpdc.ev_count = 0;
   2151 	stats->xec.ev_count = 0;
   2152 
   2153 	/* Packet Transmission Stats */
   2154 	stats->gotc.ev_count = 0;
   2155 	stats->tpt.ev_count = 0;
   2156 	stats->gptc.ev_count = 0;
   2157 	stats->bptc.ev_count = 0;
   2158 	stats->mptc.ev_count = 0;
   2159 	stats->mngptc.ev_count = 0;
   2160 	stats->ptc64.ev_count = 0;
   2161 	stats->ptc127.ev_count = 0;
   2162 	stats->ptc255.ev_count = 0;
   2163 	stats->ptc511.ev_count = 0;
   2164 	stats->ptc1023.ev_count = 0;
   2165 	stats->ptc1522.ev_count = 0;
   2166 }
   2167 
   2168 /************************************************************************
   2169  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
   2170  *
   2171  *   Retrieves the TDH value from the hardware
   2172  ************************************************************************/
   2173 static int
   2174 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   2175 {
   2176 	struct sysctlnode node = *rnode;
   2177 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2178 	struct adapter *adapter;
   2179 	uint32_t val;
   2180 
   2181 	if (!txr)
   2182 		return (0);
   2183 
   2184 	adapter = txr->adapter;
   2185 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2186 		return (EPERM);
   2187 
   2188 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
   2189 	node.sysctl_data = &val;
   2190 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2191 } /* ixgbe_sysctl_tdh_handler */
   2192 
   2193 /************************************************************************
   2194  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   2195  *
   2196  *   Retrieves the TDT value from the hardware
   2197  ************************************************************************/
   2198 static int
   2199 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   2200 {
   2201 	struct sysctlnode node = *rnode;
   2202 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2203 	struct adapter *adapter;
   2204 	uint32_t val;
   2205 
   2206 	if (!txr)
   2207 		return (0);
   2208 
   2209 	adapter = txr->adapter;
   2210 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2211 		return (EPERM);
   2212 
   2213 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
   2214 	node.sysctl_data = &val;
   2215 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2216 } /* ixgbe_sysctl_tdt_handler */
   2217 
   2218 /************************************************************************
   2219  * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
   2220  * handler function
   2221  *
   2222  *   Retrieves the next_to_check value
   2223  ************************************************************************/
   2224 static int
   2225 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
   2226 {
   2227 	struct sysctlnode node = *rnode;
   2228 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2229 	struct adapter *adapter;
   2230 	uint32_t val;
   2231 
   2232 	if (!rxr)
   2233 		return (0);
   2234 
   2235 	adapter = rxr->adapter;
   2236 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2237 		return (EPERM);
   2238 
   2239 	val = rxr->next_to_check;
   2240 	node.sysctl_data = &val;
   2241 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2242 } /* ixgbe_sysctl_next_to_check_handler */
   2243 
   2244 /************************************************************************
   2245  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
   2246  *
   2247  *   Retrieves the RDH value from the hardware
   2248  ************************************************************************/
   2249 static int
   2250 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2251 {
   2252 	struct sysctlnode node = *rnode;
   2253 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2254 	struct adapter *adapter;
   2255 	uint32_t val;
   2256 
   2257 	if (!rxr)
   2258 		return (0);
   2259 
   2260 	adapter = rxr->adapter;
   2261 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2262 		return (EPERM);
   2263 
   2264 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
   2265 	node.sysctl_data = &val;
   2266 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2267 } /* ixgbe_sysctl_rdh_handler */
   2268 
   2269 /************************************************************************
   2270  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2271  *
   2272  *   Retrieves the RDT value from the hardware
   2273  ************************************************************************/
   2274 static int
   2275 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2276 {
   2277 	struct sysctlnode node = *rnode;
   2278 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2279 	struct adapter *adapter;
   2280 	uint32_t val;
   2281 
   2282 	if (!rxr)
   2283 		return (0);
   2284 
   2285 	adapter = rxr->adapter;
   2286 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2287 		return (EPERM);
   2288 
   2289 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
   2290 	node.sysctl_data = &val;
   2291 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2292 } /* ixgbe_sysctl_rdt_handler */
   2293 
   2294 static int
   2295 ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
   2296 {
   2297 	struct ifnet *ifp = &ec->ec_if;
   2298 	struct adapter *adapter = ifp->if_softc;
   2299 	int rv;
   2300 
   2301 	if (set)
   2302 		rv = ixgbe_register_vlan(adapter, vid);
   2303 	else
   2304 		rv = ixgbe_unregister_vlan(adapter, vid);
   2305 
   2306 	if (rv != 0)
   2307 		return rv;
   2308 
   2309 	/*
   2310 	 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
   2311 	 * or 0 to 1.
   2312 	 */
   2313 	if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
   2314 		ixgbe_setup_vlan_hw_tagging(adapter);
   2315 
   2316 	return rv;
   2317 }
   2318 
   2319 /************************************************************************
   2320  * ixgbe_register_vlan
   2321  *
   2322  *   Run via vlan config EVENT, it enables us to use the
   2323  *   HW Filter table since we can get the vlan id. This
   2324  *   just creates the entry in the soft version of the
   2325  *   VFTA, init will repopulate the real table.
   2326  ************************************************************************/
   2327 static int
   2328 ixgbe_register_vlan(struct adapter *adapter, u16 vtag)
   2329 {
   2330 	u16		index, bit;
   2331 	int		error;
   2332 
   2333 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2334 		return EINVAL;
   2335 
   2336 	IXGBE_CORE_LOCK(adapter);
   2337 	index = (vtag >> 5) & 0x7F;
   2338 	bit = vtag & 0x1F;
   2339 	adapter->shadow_vfta[index] |= ((u32)1 << bit);
   2340 	error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true,
   2341 	    true);
   2342 	IXGBE_CORE_UNLOCK(adapter);
   2343 	if (error != 0)
   2344 		error = EACCES;
   2345 
   2346 	return error;
   2347 } /* ixgbe_register_vlan */
   2348 
   2349 /************************************************************************
   2350  * ixgbe_unregister_vlan
   2351  *
   2352  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
   2353  ************************************************************************/
   2354 static int
   2355 ixgbe_unregister_vlan(struct adapter *adapter, u16 vtag)
   2356 {
   2357 	u16		index, bit;
   2358 	int		error;
   2359 
   2360 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2361 		return EINVAL;
   2362 
   2363 	IXGBE_CORE_LOCK(adapter);
   2364 	index = (vtag >> 5) & 0x7F;
   2365 	bit = vtag & 0x1F;
   2366 	adapter->shadow_vfta[index] &= ~((u32)1 << bit);
   2367 	error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false,
   2368 	    true);
   2369 	IXGBE_CORE_UNLOCK(adapter);
   2370 	if (error != 0)
   2371 		error = EACCES;
   2372 
   2373 	return error;
   2374 } /* ixgbe_unregister_vlan */
   2375 
   2376 static void
   2377 ixgbe_setup_vlan_hw_tagging(struct adapter *adapter)
   2378 {
   2379 	struct ethercom *ec = &adapter->osdep.ec;
   2380 	struct ixgbe_hw *hw = &adapter->hw;
   2381 	struct rx_ring	*rxr;
   2382 	u32		ctrl;
   2383 	int		i;
   2384 	bool		hwtagging;
   2385 
   2386 	/* Enable HW tagging only if any vlan is attached */
   2387 	hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
   2388 	    && VLAN_ATTACHED(ec);
   2389 
   2390 	/* Setup the queues for vlans */
   2391 	for (i = 0; i < adapter->num_queues; i++) {
   2392 		rxr = &adapter->rx_rings[i];
   2393 		/*
   2394 		 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
   2395 		 */
   2396 		if (hw->mac.type != ixgbe_mac_82598EB) {
   2397 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   2398 			if (hwtagging)
   2399 				ctrl |= IXGBE_RXDCTL_VME;
   2400 			else
   2401 				ctrl &= ~IXGBE_RXDCTL_VME;
   2402 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
   2403 		}
   2404 		rxr->vtag_strip = hwtagging ? TRUE : FALSE;
   2405 	}
   2406 
   2407 	/* VLAN hw tagging for 82598 */
   2408 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2409 		ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2410 		if (hwtagging)
   2411 			ctrl |= IXGBE_VLNCTRL_VME;
   2412 		else
   2413 			ctrl &= ~IXGBE_VLNCTRL_VME;
   2414 		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2415 	}
   2416 } /* ixgbe_setup_vlan_hw_tagging */
   2417 
   2418 static void
   2419 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   2420 {
   2421 	struct ethercom *ec = &adapter->osdep.ec;
   2422 	struct ixgbe_hw *hw = &adapter->hw;
   2423 	int		i;
   2424 	u32		ctrl;
   2425 	struct vlanid_list *vlanidp;
   2426 
   2427 	/*
   2428 	 *  This function is called from both if_init and ifflags_cb()
   2429 	 * on NetBSD.
   2430 	 */
   2431 
   2432 	/*
   2433 	 * Part 1:
   2434 	 * Setup VLAN HW tagging
   2435 	 */
   2436 	ixgbe_setup_vlan_hw_tagging(adapter);
   2437 
   2438 	/*
   2439 	 * Part 2:
   2440 	 * Setup VLAN HW filter
   2441 	 */
   2442 	/* Cleanup shadow_vfta */
   2443 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2444 		adapter->shadow_vfta[i] = 0;
   2445 	/* Generate shadow_vfta from ec_vids */
   2446 	ETHER_LOCK(ec);
   2447 	SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
   2448 		uint32_t idx;
   2449 
   2450 		idx = vlanidp->vid / 32;
   2451 		KASSERT(idx < IXGBE_VFTA_SIZE);
   2452 		adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
   2453 	}
   2454 	ETHER_UNLOCK(ec);
   2455 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2456 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]);
   2457 
   2458 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2459 	/* Enable the Filter Table if enabled */
   2460 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
   2461 		ctrl |= IXGBE_VLNCTRL_VFE;
   2462 	else
   2463 		ctrl &= ~IXGBE_VLNCTRL_VFE;
   2464 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2465 } /* ixgbe_setup_vlan_hw_support */
   2466 
   2467 /************************************************************************
   2468  * ixgbe_get_slot_info
   2469  *
   2470  *   Get the width and transaction speed of
   2471  *   the slot this adapter is plugged into.
   2472  ************************************************************************/
   2473 static void
   2474 ixgbe_get_slot_info(struct adapter *adapter)
   2475 {
   2476 	device_t		dev = adapter->dev;
   2477 	struct ixgbe_hw		*hw = &adapter->hw;
   2478 	u32		      offset;
   2479 	u16			link;
   2480 	int		      bus_info_valid = TRUE;
   2481 
   2482 	/* Some devices are behind an internal bridge */
   2483 	switch (hw->device_id) {
   2484 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
   2485 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
   2486 		goto get_parent_info;
   2487 	default:
   2488 		break;
   2489 	}
   2490 
   2491 	ixgbe_get_bus_info(hw);
   2492 
   2493 	/*
   2494 	 * Some devices don't use PCI-E, but there is no need
   2495 	 * to display "Unknown" for bus speed and width.
   2496 	 */
   2497 	switch (hw->mac.type) {
   2498 	case ixgbe_mac_X550EM_x:
   2499 	case ixgbe_mac_X550EM_a:
   2500 		return;
   2501 	default:
   2502 		goto display;
   2503 	}
   2504 
   2505 get_parent_info:
   2506 	/*
   2507 	 * For the Quad port adapter we need to parse back
   2508 	 * up the PCI tree to find the speed of the expansion
   2509 	 * slot into which this adapter is plugged. A bit more work.
   2510 	 */
   2511 	dev = device_parent(device_parent(dev));
   2512 #if 0
   2513 #ifdef IXGBE_DEBUG
   2514 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
   2515 	    pci_get_slot(dev), pci_get_function(dev));
   2516 #endif
   2517 	dev = device_parent(device_parent(dev));
   2518 #ifdef IXGBE_DEBUG
   2519 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
   2520 	    pci_get_slot(dev), pci_get_function(dev));
   2521 #endif
   2522 #endif
   2523 	/* Now get the PCI Express Capabilities offset */
   2524 	if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
   2525 	    PCI_CAP_PCIEXPRESS, &offset, NULL)) {
   2526 		/*
   2527 		 * Hmm...can't get PCI-Express capabilities.
   2528 		 * Falling back to default method.
   2529 		 */
   2530 		bus_info_valid = FALSE;
   2531 		ixgbe_get_bus_info(hw);
   2532 		goto display;
   2533 	}
   2534 	/* ...and read the Link Status Register */
   2535 	link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
   2536 	    offset + PCIE_LCSR) >> 16;
   2537 	ixgbe_set_pci_config_data_generic(hw, link);
   2538 
   2539 display:
   2540 	device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
   2541 	    ((hw->bus.speed == ixgbe_bus_speed_8000)	? "8.0GT/s" :
   2542 	     (hw->bus.speed == ixgbe_bus_speed_5000)	? "5.0GT/s" :
   2543 	     (hw->bus.speed == ixgbe_bus_speed_2500)	? "2.5GT/s" :
   2544 	     "Unknown"),
   2545 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
   2546 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
   2547 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
   2548 	     "Unknown"));
   2549 
   2550 	if (bus_info_valid) {
   2551 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2552 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
   2553 			(hw->bus.speed == ixgbe_bus_speed_2500))) {
   2554 			device_printf(dev, "PCI-Express bandwidth available"
   2555 			    " for this card\n     is not sufficient for"
   2556 			    " optimal performance.\n");
   2557 			device_printf(dev, "For optimal performance a x8 "
   2558 			    "PCIE, or x4 PCIE Gen2 slot is required.\n");
   2559 		}
   2560 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2561 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
   2562 			(hw->bus.speed < ixgbe_bus_speed_8000))) {
   2563 			device_printf(dev, "PCI-Express bandwidth available"
   2564 			    " for this card\n     is not sufficient for"
   2565 			    " optimal performance.\n");
   2566 			device_printf(dev, "For optimal performance a x8 "
   2567 			    "PCIE Gen3 slot is required.\n");
   2568 		}
   2569 	} else
   2570 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
   2571 
   2572 	return;
   2573 } /* ixgbe_get_slot_info */
   2574 
   2575 /************************************************************************
   2576  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
   2577  ************************************************************************/
   2578 static inline void
   2579 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   2580 {
   2581 	struct ixgbe_hw *hw = &adapter->hw;
   2582 	struct ix_queue *que = &adapter->queues[vector];
   2583 	u64		queue = 1ULL << vector;
   2584 	u32		mask;
   2585 
   2586 	mutex_enter(&que->dc_mtx);
   2587 	if (que->disabled_count > 0 && --que->disabled_count > 0)
   2588 		goto out;
   2589 
   2590 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2591 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2592 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   2593 	} else {
   2594 		mask = (queue & 0xFFFFFFFF);
   2595 		if (mask)
   2596 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   2597 		mask = (queue >> 32);
   2598 		if (mask)
   2599 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   2600 	}
   2601 out:
   2602 	mutex_exit(&que->dc_mtx);
   2603 } /* ixgbe_enable_queue */
   2604 
   2605 /************************************************************************
   2606  * ixgbe_disable_queue_internal
   2607  ************************************************************************/
   2608 static inline void
   2609 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
   2610 {
   2611 	struct ixgbe_hw *hw = &adapter->hw;
   2612 	struct ix_queue *que = &adapter->queues[vector];
   2613 	u64		queue = 1ULL << vector;
   2614 	u32		mask;
   2615 
   2616 	mutex_enter(&que->dc_mtx);
   2617 
   2618 	if (que->disabled_count > 0) {
   2619 		if (nestok)
   2620 			que->disabled_count++;
   2621 		goto out;
   2622 	}
   2623 	que->disabled_count++;
   2624 
   2625 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2626 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2627 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   2628 	} else {
   2629 		mask = (queue & 0xFFFFFFFF);
   2630 		if (mask)
   2631 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   2632 		mask = (queue >> 32);
   2633 		if (mask)
   2634 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   2635 	}
   2636 out:
   2637 	mutex_exit(&que->dc_mtx);
   2638 } /* ixgbe_disable_queue_internal */
   2639 
   2640 /************************************************************************
   2641  * ixgbe_disable_queue
   2642  ************************************************************************/
   2643 static inline void
   2644 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   2645 {
   2646 
   2647 	ixgbe_disable_queue_internal(adapter, vector, true);
   2648 } /* ixgbe_disable_queue */
   2649 
   2650 /************************************************************************
   2651  * ixgbe_sched_handle_que - schedule deferred packet processing
   2652  ************************************************************************/
   2653 static inline void
   2654 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
   2655 {
   2656 
   2657 	if (que->txrx_use_workqueue) {
   2658 		/*
   2659 		 * adapter->que_wq is bound to each CPU instead of
   2660 		 * each NIC queue to reduce workqueue kthread. As we
   2661 		 * should consider about interrupt affinity in this
   2662 		 * function, the workqueue kthread must be WQ_PERCPU.
   2663 		 * If create WQ_PERCPU workqueue kthread for each NIC
   2664 		 * queue, that number of created workqueue kthread is
   2665 		 * (number of used NIC queue) * (number of CPUs) =
   2666 		 * (number of CPUs) ^ 2 most often.
   2667 		 *
   2668 		 * The same NIC queue's interrupts are avoided by
   2669 		 * masking the queue's interrupt. And different
   2670 		 * NIC queue's interrupts use different struct work
   2671 		 * (que->wq_cookie). So, "enqueued flag" to avoid
   2672 		 * twice workqueue_enqueue() is not required .
   2673 		 */
   2674 		workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
   2675 	} else {
   2676 		softint_schedule(que->que_si);
   2677 	}
   2678 }
   2679 
   2680 /************************************************************************
   2681  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
   2682  ************************************************************************/
   2683 static int
   2684 ixgbe_msix_que(void *arg)
   2685 {
   2686 	struct ix_queue	*que = arg;
   2687 	struct adapter	*adapter = que->adapter;
   2688 	struct ifnet	*ifp = adapter->ifp;
   2689 	struct tx_ring	*txr = que->txr;
   2690 	struct rx_ring	*rxr = que->rxr;
   2691 	bool		more;
   2692 	u32		newitr = 0;
   2693 
   2694 	/* Protect against spurious interrupts */
   2695 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   2696 		return 0;
   2697 
   2698 	ixgbe_disable_queue(adapter, que->msix);
   2699 	++que->irqs.ev_count;
   2700 
   2701 	/*
   2702 	 * Don't change "que->txrx_use_workqueue" from this point to avoid
   2703 	 * flip-flopping softint/workqueue mode in one deferred processing.
   2704 	 */
   2705 	que->txrx_use_workqueue = adapter->txrx_use_workqueue;
   2706 
   2707 #ifdef __NetBSD__
   2708 	/* Don't run ixgbe_rxeof in interrupt context */
   2709 	more = true;
   2710 #else
   2711 	more = ixgbe_rxeof(que);
   2712 #endif
   2713 
   2714 	IXGBE_TX_LOCK(txr);
   2715 	ixgbe_txeof(txr);
   2716 	IXGBE_TX_UNLOCK(txr);
   2717 
   2718 	/* Do AIM now? */
   2719 
   2720 	if (adapter->enable_aim == false)
   2721 		goto no_calc;
   2722 	/*
   2723 	 * Do Adaptive Interrupt Moderation:
   2724 	 *  - Write out last calculated setting
   2725 	 *  - Calculate based on average size over
   2726 	 *    the last interval.
   2727 	 */
   2728 	if (que->eitr_setting)
   2729 		ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
   2730 
   2731 	que->eitr_setting = 0;
   2732 
   2733 	/* Idle, do nothing */
   2734 	if ((txr->bytes == 0) && (rxr->bytes == 0))
   2735 		goto no_calc;
   2736 
   2737 	if ((txr->bytes) && (txr->packets))
   2738 		newitr = txr->bytes/txr->packets;
   2739 	if ((rxr->bytes) && (rxr->packets))
   2740 		newitr = uimax(newitr, (rxr->bytes / rxr->packets));
   2741 	newitr += 24; /* account for hardware frame, crc */
   2742 
   2743 	/* set an upper boundary */
   2744 	newitr = uimin(newitr, 3000);
   2745 
   2746 	/* Be nice to the mid range */
   2747 	if ((newitr > 300) && (newitr < 1200))
   2748 		newitr = (newitr / 3);
   2749 	else
   2750 		newitr = (newitr / 2);
   2751 
   2752 	/*
   2753 	 * When RSC is used, ITR interval must be larger than RSC_DELAY.
   2754 	 * Currently, we use 2us for RSC_DELAY. The minimum value is always
   2755 	 * greater than 2us on 100M (and 10M?(not documented)), but it's not
   2756 	 * on 1G and higher.
   2757 	 */
   2758 	if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   2759 	    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   2760 		if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
   2761 			newitr = IXGBE_MIN_RSC_EITR_10G1G;
   2762 	}
   2763 
   2764 	/* save for next interrupt */
   2765 	que->eitr_setting = newitr;
   2766 
   2767 	/* Reset state */
   2768 	txr->bytes = 0;
   2769 	txr->packets = 0;
   2770 	rxr->bytes = 0;
   2771 	rxr->packets = 0;
   2772 
   2773 no_calc:
   2774 	if (more)
   2775 		ixgbe_sched_handle_que(adapter, que);
   2776 	else
   2777 		ixgbe_enable_queue(adapter, que->msix);
   2778 
   2779 	return 1;
   2780 } /* ixgbe_msix_que */
   2781 
   2782 /************************************************************************
   2783  * ixgbe_media_status - Media Ioctl callback
   2784  *
   2785  *   Called whenever the user queries the status of
   2786  *   the interface using ifconfig.
   2787  ************************************************************************/
   2788 static void
   2789 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   2790 {
   2791 	struct adapter *adapter = ifp->if_softc;
   2792 	struct ixgbe_hw *hw = &adapter->hw;
   2793 	int layer;
   2794 
   2795 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   2796 	IXGBE_CORE_LOCK(adapter);
   2797 	ixgbe_update_link_status(adapter);
   2798 
   2799 	ifmr->ifm_status = IFM_AVALID;
   2800 	ifmr->ifm_active = IFM_ETHER;
   2801 
   2802 	if (adapter->link_active != LINK_STATE_UP) {
   2803 		ifmr->ifm_active |= IFM_NONE;
   2804 		IXGBE_CORE_UNLOCK(adapter);
   2805 		return;
   2806 	}
   2807 
   2808 	ifmr->ifm_status |= IFM_ACTIVE;
   2809 	layer = adapter->phy_layer;
   2810 
   2811 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
   2812 	    layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
   2813 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
   2814 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
   2815 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
   2816 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
   2817 		switch (adapter->link_speed) {
   2818 		case IXGBE_LINK_SPEED_10GB_FULL:
   2819 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   2820 			break;
   2821 		case IXGBE_LINK_SPEED_5GB_FULL:
   2822 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   2823 			break;
   2824 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2825 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   2826 			break;
   2827 		case IXGBE_LINK_SPEED_1GB_FULL:
   2828 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   2829 			break;
   2830 		case IXGBE_LINK_SPEED_100_FULL:
   2831 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   2832 			break;
   2833 		case IXGBE_LINK_SPEED_10_FULL:
   2834 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   2835 			break;
   2836 		}
   2837 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   2838 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
   2839 		switch (adapter->link_speed) {
   2840 		case IXGBE_LINK_SPEED_10GB_FULL:
   2841 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
   2842 			break;
   2843 		}
   2844 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
   2845 		switch (adapter->link_speed) {
   2846 		case IXGBE_LINK_SPEED_10GB_FULL:
   2847 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
   2848 			break;
   2849 		case IXGBE_LINK_SPEED_1GB_FULL:
   2850 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2851 			break;
   2852 		}
   2853 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
   2854 		switch (adapter->link_speed) {
   2855 		case IXGBE_LINK_SPEED_10GB_FULL:
   2856 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
   2857 			break;
   2858 		case IXGBE_LINK_SPEED_1GB_FULL:
   2859 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2860 			break;
   2861 		}
   2862 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
   2863 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
   2864 		switch (adapter->link_speed) {
   2865 		case IXGBE_LINK_SPEED_10GB_FULL:
   2866 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2867 			break;
   2868 		case IXGBE_LINK_SPEED_1GB_FULL:
   2869 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
   2870 			break;
   2871 		}
   2872 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
   2873 		switch (adapter->link_speed) {
   2874 		case IXGBE_LINK_SPEED_10GB_FULL:
   2875 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2876 			break;
   2877 		}
   2878 	/*
   2879 	 * XXX: These need to use the proper media types once
   2880 	 * they're added.
   2881 	 */
   2882 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
   2883 		switch (adapter->link_speed) {
   2884 		case IXGBE_LINK_SPEED_10GB_FULL:
   2885 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
   2886 			break;
   2887 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2888 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2889 			break;
   2890 		case IXGBE_LINK_SPEED_1GB_FULL:
   2891 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2892 			break;
   2893 		}
   2894 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
   2895 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
   2896 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
   2897 		switch (adapter->link_speed) {
   2898 		case IXGBE_LINK_SPEED_10GB_FULL:
   2899 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
   2900 			break;
   2901 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2902 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2903 			break;
   2904 		case IXGBE_LINK_SPEED_1GB_FULL:
   2905 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2906 			break;
   2907 		}
   2908 
   2909 	/* If nothing is recognized... */
   2910 #if 0
   2911 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
   2912 		ifmr->ifm_active |= IFM_UNKNOWN;
   2913 #endif
   2914 
   2915 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   2916 
   2917 	/* Display current flow control setting used on link */
   2918 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
   2919 	    hw->fc.current_mode == ixgbe_fc_full)
   2920 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
   2921 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
   2922 	    hw->fc.current_mode == ixgbe_fc_full)
   2923 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
   2924 
   2925 	IXGBE_CORE_UNLOCK(adapter);
   2926 
   2927 	return;
   2928 } /* ixgbe_media_status */
   2929 
   2930 /************************************************************************
   2931  * ixgbe_media_change - Media Ioctl callback
   2932  *
   2933  *   Called when the user changes speed/duplex using
   2934  *   media/mediopt option with ifconfig.
   2935  ************************************************************************/
   2936 static int
   2937 ixgbe_media_change(struct ifnet *ifp)
   2938 {
   2939 	struct adapter	 *adapter = ifp->if_softc;
   2940 	struct ifmedia	 *ifm = &adapter->media;
   2941 	struct ixgbe_hw	 *hw = &adapter->hw;
   2942 	ixgbe_link_speed speed = 0;
   2943 	ixgbe_link_speed link_caps = 0;
   2944 	bool negotiate = false;
   2945 	s32 err = IXGBE_NOT_IMPLEMENTED;
   2946 
   2947 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   2948 
   2949 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   2950 		return (EINVAL);
   2951 
   2952 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   2953 		return (EPERM);
   2954 
   2955 	IXGBE_CORE_LOCK(adapter);
   2956 	/*
   2957 	 * We don't actually need to check against the supported
   2958 	 * media types of the adapter; ifmedia will take care of
   2959 	 * that for us.
   2960 	 */
   2961 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   2962 	case IFM_AUTO:
   2963 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   2964 		    &negotiate);
   2965 		if (err != IXGBE_SUCCESS) {
   2966 			device_printf(adapter->dev, "Unable to determine "
   2967 			    "supported advertise speeds\n");
   2968 			IXGBE_CORE_UNLOCK(adapter);
   2969 			return (ENODEV);
   2970 		}
   2971 		speed |= link_caps;
   2972 		break;
   2973 	case IFM_10G_T:
   2974 	case IFM_10G_LRM:
   2975 	case IFM_10G_LR:
   2976 	case IFM_10G_TWINAX:
   2977 	case IFM_10G_SR:
   2978 	case IFM_10G_CX4:
   2979 	case IFM_10G_KR:
   2980 	case IFM_10G_KX4:
   2981 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   2982 		break;
   2983 	case IFM_5000_T:
   2984 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   2985 		break;
   2986 	case IFM_2500_T:
   2987 	case IFM_2500_KX:
   2988 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   2989 		break;
   2990 	case IFM_1000_T:
   2991 	case IFM_1000_LX:
   2992 	case IFM_1000_SX:
   2993 	case IFM_1000_KX:
   2994 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   2995 		break;
   2996 	case IFM_100_TX:
   2997 		speed |= IXGBE_LINK_SPEED_100_FULL;
   2998 		break;
   2999 	case IFM_10_T:
   3000 		speed |= IXGBE_LINK_SPEED_10_FULL;
   3001 		break;
   3002 	case IFM_NONE:
   3003 		break;
   3004 	default:
   3005 		goto invalid;
   3006 	}
   3007 
   3008 	hw->mac.autotry_restart = TRUE;
   3009 	hw->mac.ops.setup_link(hw, speed, TRUE);
   3010 	adapter->advertise = 0;
   3011 	if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
   3012 		if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
   3013 			adapter->advertise |= 1 << 2;
   3014 		if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
   3015 			adapter->advertise |= 1 << 1;
   3016 		if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
   3017 			adapter->advertise |= 1 << 0;
   3018 		if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
   3019 			adapter->advertise |= 1 << 3;
   3020 		if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
   3021 			adapter->advertise |= 1 << 4;
   3022 		if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
   3023 			adapter->advertise |= 1 << 5;
   3024 	}
   3025 
   3026 	IXGBE_CORE_UNLOCK(adapter);
   3027 	return (0);
   3028 
   3029 invalid:
   3030 	device_printf(adapter->dev, "Invalid media type!\n");
   3031 	IXGBE_CORE_UNLOCK(adapter);
   3032 
   3033 	return (EINVAL);
   3034 } /* ixgbe_media_change */
   3035 
   3036 /************************************************************************
   3037  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
   3038  ************************************************************************/
   3039 static int
   3040 ixgbe_msix_link(void *arg)
   3041 {
   3042 	struct adapter	*adapter = arg;
   3043 	struct ixgbe_hw *hw = &adapter->hw;
   3044 	u32		eicr, eicr_mask;
   3045 	s32		retval;
   3046 
   3047 	++adapter->link_irq.ev_count;
   3048 
   3049 	/* Pause other interrupts */
   3050 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
   3051 
   3052 	/* First get the cause */
   3053 	/*
   3054 	 * The specifications of 82598, 82599, X540 and X550 say EICS register
   3055 	 * is write only. However, Linux says it is a workaround for silicon
   3056 	 * errata to read EICS instead of EICR to get interrupt cause. It seems
   3057 	 * there is a problem about read clear mechanism for EICR register.
   3058 	 */
   3059 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   3060 	/* Be sure the queue bits are not cleared */
   3061 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
   3062 	/* Clear interrupt with write */
   3063 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
   3064 
   3065 	if (ixgbe_is_sfp(hw)) {
   3066 		/* Pluggable optics-related interrupt */
   3067 		if (hw->mac.type >= ixgbe_mac_X540)
   3068 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   3069 		else
   3070 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   3071 
   3072 		/*
   3073 		 *  An interrupt might not arrive when a module is inserted.
   3074 		 * When an link status change interrupt occurred and the driver
   3075 		 * still regard SFP as unplugged, issue the module softint
   3076 		 * and then issue LSC interrupt.
   3077 		 */
   3078 		if ((eicr & eicr_mask)
   3079 		    || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
   3080 			&& (eicr & IXGBE_EICR_LSC))) {
   3081 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   3082 			softint_schedule(adapter->mod_si);
   3083 		}
   3084 
   3085 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   3086 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   3087 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   3088 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   3089 			softint_schedule(adapter->msf_si);
   3090 		}
   3091 	}
   3092 
   3093 	/* Link status change */
   3094 	if (eicr & IXGBE_EICR_LSC) {
   3095 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
   3096 		softint_schedule(adapter->link_si);
   3097 	}
   3098 
   3099 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   3100 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
   3101 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
   3102 			/* This is probably overkill :) */
   3103 			if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
   3104 				return 1;
   3105 			/* Disable the interrupt */
   3106 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
   3107 			softint_schedule(adapter->fdir_si);
   3108 		}
   3109 
   3110 		if (eicr & IXGBE_EICR_ECC) {
   3111 			device_printf(adapter->dev,
   3112 			    "CRITICAL: ECC ERROR!! Please Reboot!!\n");
   3113 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   3114 		}
   3115 
   3116 		/* Check for over temp condition */
   3117 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
   3118 			switch (adapter->hw.mac.type) {
   3119 			case ixgbe_mac_X550EM_a:
   3120 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
   3121 					break;
   3122 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
   3123 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   3124 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
   3125 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   3126 				retval = hw->phy.ops.check_overtemp(hw);
   3127 				if (retval != IXGBE_ERR_OVERTEMP)
   3128 					break;
   3129 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   3130 				device_printf(adapter->dev, "System shutdown required!\n");
   3131 				break;
   3132 			default:
   3133 				if (!(eicr & IXGBE_EICR_TS))
   3134 					break;
   3135 				retval = hw->phy.ops.check_overtemp(hw);
   3136 				if (retval != IXGBE_ERR_OVERTEMP)
   3137 					break;
   3138 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   3139 				device_printf(adapter->dev, "System shutdown required!\n");
   3140 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
   3141 				break;
   3142 			}
   3143 		}
   3144 
   3145 		/* Check for VF message */
   3146 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
   3147 		    (eicr & IXGBE_EICR_MAILBOX))
   3148 			softint_schedule(adapter->mbx_si);
   3149 	}
   3150 
   3151 	/* Check for fan failure */
   3152 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   3153 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
   3154 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   3155 	}
   3156 
   3157 	/* External PHY interrupt */
   3158 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   3159 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
   3160 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
   3161 		softint_schedule(adapter->phy_si);
   3162 	}
   3163 
   3164 	/* Re-enable other interrupts */
   3165 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   3166 	return 1;
   3167 } /* ixgbe_msix_link */
   3168 
   3169 static void
   3170 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
   3171 {
   3172 
   3173 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   3174 		itr |= itr << 16;
   3175 	else
   3176 		itr |= IXGBE_EITR_CNT_WDIS;
   3177 
   3178 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
   3179 }
   3180 
   3181 
   3182 /************************************************************************
   3183  * ixgbe_sysctl_interrupt_rate_handler
   3184  ************************************************************************/
   3185 static int
   3186 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   3187 {
   3188 	struct sysctlnode node = *rnode;
   3189 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   3190 	struct adapter	*adapter;
   3191 	uint32_t reg, usec, rate;
   3192 	int error;
   3193 
   3194 	if (que == NULL)
   3195 		return 0;
   3196 
   3197 	adapter = que->adapter;
   3198 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   3199 		return (EPERM);
   3200 
   3201 	reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
   3202 	usec = ((reg & 0x0FF8) >> 3);
   3203 	if (usec > 0)
   3204 		rate = 500000 / usec;
   3205 	else
   3206 		rate = 0;
   3207 	node.sysctl_data = &rate;
   3208 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   3209 	if (error || newp == NULL)
   3210 		return error;
   3211 	reg &= ~0xfff; /* default, no limitation */
   3212 	if (rate > 0 && rate < 500000) {
   3213 		if (rate < 1000)
   3214 			rate = 1000;
   3215 		reg |= ((4000000/rate) & 0xff8);
   3216 		/*
   3217 		 * When RSC is used, ITR interval must be larger than
   3218 		 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
   3219 		 * The minimum value is always greater than 2us on 100M
   3220 		 * (and 10M?(not documented)), but it's not on 1G and higher.
   3221 		 */
   3222 		if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   3223 		    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   3224 			if ((adapter->num_queues > 1)
   3225 			    && (reg < IXGBE_MIN_RSC_EITR_10G1G))
   3226 				return EINVAL;
   3227 		}
   3228 		ixgbe_max_interrupt_rate = rate;
   3229 	} else
   3230 		ixgbe_max_interrupt_rate = 0;
   3231 	ixgbe_eitr_write(adapter, que->msix, reg);
   3232 
   3233 	return (0);
   3234 } /* ixgbe_sysctl_interrupt_rate_handler */
   3235 
   3236 const struct sysctlnode *
   3237 ixgbe_sysctl_instance(struct adapter *adapter)
   3238 {
   3239 	const char *dvname;
   3240 	struct sysctllog **log;
   3241 	int rc;
   3242 	const struct sysctlnode *rnode;
   3243 
   3244 	if (adapter->sysctltop != NULL)
   3245 		return adapter->sysctltop;
   3246 
   3247 	log = &adapter->sysctllog;
   3248 	dvname = device_xname(adapter->dev);
   3249 
   3250 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   3251 	    0, CTLTYPE_NODE, dvname,
   3252 	    SYSCTL_DESCR("ixgbe information and settings"),
   3253 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   3254 		goto err;
   3255 
   3256 	return rnode;
   3257 err:
   3258 	device_printf(adapter->dev,
   3259 	    "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   3260 	return NULL;
   3261 }
   3262 
   3263 /************************************************************************
   3264  * ixgbe_add_device_sysctls
   3265  ************************************************************************/
   3266 static void
   3267 ixgbe_add_device_sysctls(struct adapter *adapter)
   3268 {
   3269 	device_t	       dev = adapter->dev;
   3270 	struct ixgbe_hw	       *hw = &adapter->hw;
   3271 	struct sysctllog **log;
   3272 	const struct sysctlnode *rnode, *cnode;
   3273 
   3274 	log = &adapter->sysctllog;
   3275 
   3276 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   3277 		aprint_error_dev(dev, "could not create sysctl root\n");
   3278 		return;
   3279 	}
   3280 
   3281 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3282 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   3283 	    "debug", SYSCTL_DESCR("Debug Info"),
   3284 	    ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   3285 		aprint_error_dev(dev, "could not create sysctl\n");
   3286 
   3287 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3288 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3289 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
   3290 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   3291 		aprint_error_dev(dev, "could not create sysctl\n");
   3292 
   3293 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3294 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3295 	    "num_queues", SYSCTL_DESCR("Number of queues"),
   3296 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
   3297 		aprint_error_dev(dev, "could not create sysctl\n");
   3298 
   3299 	/* Sysctls for all devices */
   3300 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3301 	    CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
   3302 	    ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
   3303 	    CTL_EOL) != 0)
   3304 		aprint_error_dev(dev, "could not create sysctl\n");
   3305 
   3306 	adapter->enable_aim = ixgbe_enable_aim;
   3307 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3308 	    CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   3309 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   3310 		aprint_error_dev(dev, "could not create sysctl\n");
   3311 
   3312 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3313 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   3314 	    "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
   3315 	    ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
   3316 	    CTL_EOL) != 0)
   3317 		aprint_error_dev(dev, "could not create sysctl\n");
   3318 
   3319 	/*
   3320 	 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
   3321 	 * it causesflip-flopping softint/workqueue mode in one deferred
   3322 	 * processing. Therefore, preempt_disable()/preempt_enable() are
   3323 	 * required in ixgbe_sched_handle_que() to avoid
   3324 	 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
   3325 	 * I think changing "que->txrx_use_workqueue" in interrupt handler
   3326 	 * is lighter than doing preempt_disable()/preempt_enable() in every
   3327 	 * ixgbe_sched_handle_que().
   3328 	 */
   3329 	adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
   3330 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3331 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   3332 	    NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
   3333 		aprint_error_dev(dev, "could not create sysctl\n");
   3334 
   3335 #ifdef IXGBE_DEBUG
   3336 	/* testing sysctls (for all devices) */
   3337 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3338 	    CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
   3339 	    ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
   3340 	    CTL_EOL) != 0)
   3341 		aprint_error_dev(dev, "could not create sysctl\n");
   3342 
   3343 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
   3344 	    CTLTYPE_STRING, "print_rss_config",
   3345 	    SYSCTL_DESCR("Prints RSS Configuration"),
   3346 	    ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
   3347 	    CTL_EOL) != 0)
   3348 		aprint_error_dev(dev, "could not create sysctl\n");
   3349 #endif
   3350 	/* for X550 series devices */
   3351 	if (hw->mac.type >= ixgbe_mac_X550)
   3352 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3353 		    CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
   3354 		    ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
   3355 		    CTL_EOL) != 0)
   3356 			aprint_error_dev(dev, "could not create sysctl\n");
   3357 
   3358 	/* for WoL-capable devices */
   3359 	if (adapter->wol_support) {
   3360 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3361 		    CTLTYPE_BOOL, "wol_enable",
   3362 		    SYSCTL_DESCR("Enable/Disable Wake on LAN"),
   3363 		    ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
   3364 		    CTL_EOL) != 0)
   3365 			aprint_error_dev(dev, "could not create sysctl\n");
   3366 
   3367 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3368 		    CTLTYPE_INT, "wufc",
   3369 		    SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
   3370 		    ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
   3371 		    CTL_EOL) != 0)
   3372 			aprint_error_dev(dev, "could not create sysctl\n");
   3373 	}
   3374 
   3375 	/* for X552/X557-AT devices */
   3376 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3377 		const struct sysctlnode *phy_node;
   3378 
   3379 		if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
   3380 		    "phy", SYSCTL_DESCR("External PHY sysctls"),
   3381 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
   3382 			aprint_error_dev(dev, "could not create sysctl\n");
   3383 			return;
   3384 		}
   3385 
   3386 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3387 		    CTLTYPE_INT, "temp",
   3388 		    SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
   3389 		    ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
   3390 		    CTL_EOL) != 0)
   3391 			aprint_error_dev(dev, "could not create sysctl\n");
   3392 
   3393 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3394 		    CTLTYPE_INT, "overtemp_occurred",
   3395 		    SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
   3396 		    ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
   3397 		    CTL_CREATE, CTL_EOL) != 0)
   3398 			aprint_error_dev(dev, "could not create sysctl\n");
   3399 	}
   3400 
   3401 	if ((hw->mac.type == ixgbe_mac_X550EM_a)
   3402 	    && (hw->phy.type == ixgbe_phy_fw))
   3403 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3404 		    CTLTYPE_BOOL, "force_10_100_autonego",
   3405 		    SYSCTL_DESCR("Force autonego on 10M and 100M"),
   3406 		    NULL, 0, &hw->phy.force_10_100_autonego, 0,
   3407 		    CTL_CREATE, CTL_EOL) != 0)
   3408 			aprint_error_dev(dev, "could not create sysctl\n");
   3409 
   3410 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
   3411 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3412 		    CTLTYPE_INT, "eee_state",
   3413 		    SYSCTL_DESCR("EEE Power Save State"),
   3414 		    ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
   3415 		    CTL_EOL) != 0)
   3416 			aprint_error_dev(dev, "could not create sysctl\n");
   3417 	}
   3418 } /* ixgbe_add_device_sysctls */
   3419 
   3420 /************************************************************************
   3421  * ixgbe_allocate_pci_resources
   3422  ************************************************************************/
   3423 static int
   3424 ixgbe_allocate_pci_resources(struct adapter *adapter,
   3425     const struct pci_attach_args *pa)
   3426 {
   3427 	pcireg_t	memtype, csr;
   3428 	device_t dev = adapter->dev;
   3429 	bus_addr_t addr;
   3430 	int flags;
   3431 
   3432 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   3433 	switch (memtype) {
   3434 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   3435 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   3436 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   3437 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   3438 		      memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   3439 			goto map_err;
   3440 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   3441 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   3442 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3443 		}
   3444 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   3445 		     adapter->osdep.mem_size, flags,
   3446 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   3447 map_err:
   3448 			adapter->osdep.mem_size = 0;
   3449 			aprint_error_dev(dev, "unable to map BAR0\n");
   3450 			return ENXIO;
   3451 		}
   3452 		/*
   3453 		 * Enable address decoding for memory range in case BIOS or
   3454 		 * UEFI don't set it.
   3455 		 */
   3456 		csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
   3457 		    PCI_COMMAND_STATUS_REG);
   3458 		csr |= PCI_COMMAND_MEM_ENABLE;
   3459 		pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
   3460 		    csr);
   3461 		break;
   3462 	default:
   3463 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   3464 		return ENXIO;
   3465 	}
   3466 
   3467 	return (0);
   3468 } /* ixgbe_allocate_pci_resources */
   3469 
   3470 static void
   3471 ixgbe_free_softint(struct adapter *adapter)
   3472 {
   3473 	struct ix_queue *que = adapter->queues;
   3474 	struct tx_ring *txr = adapter->tx_rings;
   3475 	int i;
   3476 
   3477 	for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
   3478 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   3479 			if (txr->txr_si != NULL)
   3480 				softint_disestablish(txr->txr_si);
   3481 		}
   3482 		if (que->que_si != NULL)
   3483 			softint_disestablish(que->que_si);
   3484 	}
   3485 	if (adapter->txr_wq != NULL)
   3486 		workqueue_destroy(adapter->txr_wq);
   3487 	if (adapter->txr_wq_enqueued != NULL)
   3488 		percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
   3489 	if (adapter->que_wq != NULL)
   3490 		workqueue_destroy(adapter->que_wq);
   3491 
   3492 	/* Drain the Link queue */
   3493 	if (adapter->link_si != NULL) {
   3494 		softint_disestablish(adapter->link_si);
   3495 		adapter->link_si = NULL;
   3496 	}
   3497 	if (adapter->mod_si != NULL) {
   3498 		softint_disestablish(adapter->mod_si);
   3499 		adapter->mod_si = NULL;
   3500 	}
   3501 	if (adapter->msf_si != NULL) {
   3502 		softint_disestablish(adapter->msf_si);
   3503 		adapter->msf_si = NULL;
   3504 	}
   3505 	if (adapter->phy_si != NULL) {
   3506 		softint_disestablish(adapter->phy_si);
   3507 		adapter->phy_si = NULL;
   3508 	}
   3509 	if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
   3510 		if (adapter->fdir_si != NULL) {
   3511 			softint_disestablish(adapter->fdir_si);
   3512 			adapter->fdir_si = NULL;
   3513 		}
   3514 	}
   3515 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   3516 		if (adapter->mbx_si != NULL) {
   3517 			softint_disestablish(adapter->mbx_si);
   3518 			adapter->mbx_si = NULL;
   3519 		}
   3520 	}
   3521 } /* ixgbe_free_softint */
   3522 
   3523 /************************************************************************
   3524  * ixgbe_detach - Device removal routine
   3525  *
   3526  *   Called when the driver is being removed.
   3527  *   Stops the adapter and deallocates all the resources
   3528  *   that were allocated for driver operation.
   3529  *
   3530  *   return 0 on success, positive on failure
   3531  ************************************************************************/
   3532 static int
   3533 ixgbe_detach(device_t dev, int flags)
   3534 {
   3535 	struct adapter *adapter = device_private(dev);
   3536 	struct rx_ring *rxr = adapter->rx_rings;
   3537 	struct tx_ring *txr = adapter->tx_rings;
   3538 	struct ixgbe_hw *hw = &adapter->hw;
   3539 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   3540 	u32	ctrl_ext;
   3541 	int i;
   3542 
   3543 	INIT_DEBUGOUT("ixgbe_detach: begin");
   3544 	if (adapter->osdep.attached == false)
   3545 		return 0;
   3546 
   3547 	if (ixgbe_pci_iov_detach(dev) != 0) {
   3548 		device_printf(dev, "SR-IOV in use; detach first.\n");
   3549 		return (EBUSY);
   3550 	}
   3551 
   3552 	/*
   3553 	 * Stop the interface. ixgbe_setup_low_power_mode() calls ixgbe_stop(),
   3554 	 * so it's not required to call ixgbe_stop() directly.
   3555 	 */
   3556 	IXGBE_CORE_LOCK(adapter);
   3557 	ixgbe_setup_low_power_mode(adapter);
   3558 	IXGBE_CORE_UNLOCK(adapter);
   3559 #if NVLAN > 0
   3560 	/* Make sure VLANs are not using driver */
   3561 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   3562 		;	/* nothing to do: no VLANs */
   3563 	else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
   3564 		vlan_ifdetach(adapter->ifp);
   3565 	else {
   3566 		aprint_error_dev(dev, "VLANs in use, detach first\n");
   3567 		return (EBUSY);
   3568 	}
   3569 #endif
   3570 
   3571 	pmf_device_deregister(dev);
   3572 
   3573 	ether_ifdetach(adapter->ifp);
   3574 
   3575 	ixgbe_free_softint(adapter);
   3576 
   3577 	/* let hardware know driver is unloading */
   3578 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   3579 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   3580 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   3581 
   3582 	callout_halt(&adapter->timer, NULL);
   3583 	if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
   3584 		callout_halt(&adapter->recovery_mode_timer, NULL);
   3585 
   3586 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   3587 		netmap_detach(adapter->ifp);
   3588 
   3589 	ixgbe_free_pci_resources(adapter);
   3590 #if 0	/* XXX the NetBSD port is probably missing something here */
   3591 	bus_generic_detach(dev);
   3592 #endif
   3593 	if_detach(adapter->ifp);
   3594 	if_percpuq_destroy(adapter->ipq);
   3595 
   3596 	sysctl_teardown(&adapter->sysctllog);
   3597 	evcnt_detach(&adapter->efbig_tx_dma_setup);
   3598 	evcnt_detach(&adapter->mbuf_defrag_failed);
   3599 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
   3600 	evcnt_detach(&adapter->einval_tx_dma_setup);
   3601 	evcnt_detach(&adapter->other_tx_dma_setup);
   3602 	evcnt_detach(&adapter->eagain_tx_dma_setup);
   3603 	evcnt_detach(&adapter->enomem_tx_dma_setup);
   3604 	evcnt_detach(&adapter->watchdog_events);
   3605 	evcnt_detach(&adapter->tso_err);
   3606 	evcnt_detach(&adapter->link_irq);
   3607 	evcnt_detach(&adapter->link_sicount);
   3608 	evcnt_detach(&adapter->mod_sicount);
   3609 	evcnt_detach(&adapter->msf_sicount);
   3610 	evcnt_detach(&adapter->phy_sicount);
   3611 
   3612 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   3613 		if (i < __arraycount(stats->mpc)) {
   3614 			evcnt_detach(&stats->mpc[i]);
   3615 			if (hw->mac.type == ixgbe_mac_82598EB)
   3616 				evcnt_detach(&stats->rnbc[i]);
   3617 		}
   3618 		if (i < __arraycount(stats->pxontxc)) {
   3619 			evcnt_detach(&stats->pxontxc[i]);
   3620 			evcnt_detach(&stats->pxonrxc[i]);
   3621 			evcnt_detach(&stats->pxofftxc[i]);
   3622 			evcnt_detach(&stats->pxoffrxc[i]);
   3623 			if (hw->mac.type >= ixgbe_mac_82599EB)
   3624 				evcnt_detach(&stats->pxon2offc[i]);
   3625 		}
   3626 	}
   3627 
   3628 	txr = adapter->tx_rings;
   3629 	for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   3630 		evcnt_detach(&adapter->queues[i].irqs);
   3631 		evcnt_detach(&adapter->queues[i].handleq);
   3632 		evcnt_detach(&adapter->queues[i].req);
   3633 		evcnt_detach(&txr->no_desc_avail);
   3634 		evcnt_detach(&txr->total_packets);
   3635 		evcnt_detach(&txr->tso_tx);
   3636 #ifndef IXGBE_LEGACY_TX
   3637 		evcnt_detach(&txr->pcq_drops);
   3638 #endif
   3639 
   3640 		if (i < __arraycount(stats->qprc)) {
   3641 			evcnt_detach(&stats->qprc[i]);
   3642 			evcnt_detach(&stats->qptc[i]);
   3643 			evcnt_detach(&stats->qbrc[i]);
   3644 			evcnt_detach(&stats->qbtc[i]);
   3645 			if (hw->mac.type >= ixgbe_mac_82599EB)
   3646 				evcnt_detach(&stats->qprdc[i]);
   3647 		}
   3648 
   3649 		evcnt_detach(&rxr->rx_packets);
   3650 		evcnt_detach(&rxr->rx_bytes);
   3651 		evcnt_detach(&rxr->rx_copies);
   3652 		evcnt_detach(&rxr->no_jmbuf);
   3653 		evcnt_detach(&rxr->rx_discarded);
   3654 	}
   3655 	evcnt_detach(&stats->ipcs);
   3656 	evcnt_detach(&stats->l4cs);
   3657 	evcnt_detach(&stats->ipcs_bad);
   3658 	evcnt_detach(&stats->l4cs_bad);
   3659 	evcnt_detach(&stats->intzero);
   3660 	evcnt_detach(&stats->legint);
   3661 	evcnt_detach(&stats->crcerrs);
   3662 	evcnt_detach(&stats->illerrc);
   3663 	evcnt_detach(&stats->errbc);
   3664 	evcnt_detach(&stats->mspdc);
   3665 	if (hw->mac.type >= ixgbe_mac_X550)
   3666 		evcnt_detach(&stats->mbsdc);
   3667 	evcnt_detach(&stats->mpctotal);
   3668 	evcnt_detach(&stats->mlfc);
   3669 	evcnt_detach(&stats->mrfc);
   3670 	evcnt_detach(&stats->rlec);
   3671 	evcnt_detach(&stats->lxontxc);
   3672 	evcnt_detach(&stats->lxonrxc);
   3673 	evcnt_detach(&stats->lxofftxc);
   3674 	evcnt_detach(&stats->lxoffrxc);
   3675 
   3676 	/* Packet Reception Stats */
   3677 	evcnt_detach(&stats->tor);
   3678 	evcnt_detach(&stats->gorc);
   3679 	evcnt_detach(&stats->tpr);
   3680 	evcnt_detach(&stats->gprc);
   3681 	evcnt_detach(&stats->mprc);
   3682 	evcnt_detach(&stats->bprc);
   3683 	evcnt_detach(&stats->prc64);
   3684 	evcnt_detach(&stats->prc127);
   3685 	evcnt_detach(&stats->prc255);
   3686 	evcnt_detach(&stats->prc511);
   3687 	evcnt_detach(&stats->prc1023);
   3688 	evcnt_detach(&stats->prc1522);
   3689 	evcnt_detach(&stats->ruc);
   3690 	evcnt_detach(&stats->rfc);
   3691 	evcnt_detach(&stats->roc);
   3692 	evcnt_detach(&stats->rjc);
   3693 	evcnt_detach(&stats->mngprc);
   3694 	evcnt_detach(&stats->mngpdc);
   3695 	evcnt_detach(&stats->xec);
   3696 
   3697 	/* Packet Transmission Stats */
   3698 	evcnt_detach(&stats->gotc);
   3699 	evcnt_detach(&stats->tpt);
   3700 	evcnt_detach(&stats->gptc);
   3701 	evcnt_detach(&stats->bptc);
   3702 	evcnt_detach(&stats->mptc);
   3703 	evcnt_detach(&stats->mngptc);
   3704 	evcnt_detach(&stats->ptc64);
   3705 	evcnt_detach(&stats->ptc127);
   3706 	evcnt_detach(&stats->ptc255);
   3707 	evcnt_detach(&stats->ptc511);
   3708 	evcnt_detach(&stats->ptc1023);
   3709 	evcnt_detach(&stats->ptc1522);
   3710 
   3711 	ixgbe_free_queues(adapter);
   3712 	free(adapter->mta, M_DEVBUF);
   3713 
   3714 	IXGBE_CORE_LOCK_DESTROY(adapter);
   3715 
   3716 	return (0);
   3717 } /* ixgbe_detach */
   3718 
   3719 /************************************************************************
   3720  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
   3721  *
   3722  *   Prepare the adapter/port for LPLU and/or WoL
   3723  ************************************************************************/
   3724 static int
   3725 ixgbe_setup_low_power_mode(struct adapter *adapter)
   3726 {
   3727 	struct ixgbe_hw *hw = &adapter->hw;
   3728 	device_t	dev = adapter->dev;
   3729 	s32		error = 0;
   3730 
   3731 	KASSERT(mutex_owned(&adapter->core_mtx));
   3732 
   3733 	/* Limit power management flow to X550EM baseT */
   3734 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
   3735 	    hw->phy.ops.enter_lplu) {
   3736 		/* X550EM baseT adapters need a special LPLU flow */
   3737 		hw->phy.reset_disable = true;
   3738 		ixgbe_stop(adapter);
   3739 		error = hw->phy.ops.enter_lplu(hw);
   3740 		if (error)
   3741 			device_printf(dev,
   3742 			    "Error entering LPLU: %d\n", error);
   3743 		hw->phy.reset_disable = false;
   3744 	} else {
   3745 		/* Just stop for other adapters */
   3746 		ixgbe_stop(adapter);
   3747 	}
   3748 
   3749 	if (!hw->wol_enabled) {
   3750 		ixgbe_set_phy_power(hw, FALSE);
   3751 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3752 		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
   3753 	} else {
   3754 		/* Turn off support for APM wakeup. (Using ACPI instead) */
   3755 		IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
   3756 		    IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
   3757 
   3758 		/*
   3759 		 * Clear Wake Up Status register to prevent any previous wakeup
   3760 		 * events from waking us up immediately after we suspend.
   3761 		 */
   3762 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3763 
   3764 		/*
   3765 		 * Program the Wakeup Filter Control register with user filter
   3766 		 * settings
   3767 		 */
   3768 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
   3769 
   3770 		/* Enable wakeups and power management in Wakeup Control */
   3771 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
   3772 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
   3773 
   3774 	}
   3775 
   3776 	return error;
   3777 } /* ixgbe_setup_low_power_mode */
   3778 
   3779 /************************************************************************
   3780  * ixgbe_shutdown - Shutdown entry point
   3781  ************************************************************************/
   3782 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   3783 static int
   3784 ixgbe_shutdown(device_t dev)
   3785 {
   3786 	struct adapter *adapter = device_private(dev);
   3787 	int error = 0;
   3788 
   3789 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
   3790 
   3791 	IXGBE_CORE_LOCK(adapter);
   3792 	error = ixgbe_setup_low_power_mode(adapter);
   3793 	IXGBE_CORE_UNLOCK(adapter);
   3794 
   3795 	return (error);
   3796 } /* ixgbe_shutdown */
   3797 #endif
   3798 
   3799 /************************************************************************
   3800  * ixgbe_suspend
   3801  *
   3802  *   From D0 to D3
   3803  ************************************************************************/
   3804 static bool
   3805 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
   3806 {
   3807 	struct adapter *adapter = device_private(dev);
   3808 	int	       error = 0;
   3809 
   3810 	INIT_DEBUGOUT("ixgbe_suspend: begin");
   3811 
   3812 	IXGBE_CORE_LOCK(adapter);
   3813 
   3814 	error = ixgbe_setup_low_power_mode(adapter);
   3815 
   3816 	IXGBE_CORE_UNLOCK(adapter);
   3817 
   3818 	return (error);
   3819 } /* ixgbe_suspend */
   3820 
   3821 /************************************************************************
   3822  * ixgbe_resume
   3823  *
   3824  *   From D3 to D0
   3825  ************************************************************************/
   3826 static bool
   3827 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
   3828 {
   3829 	struct adapter	*adapter = device_private(dev);
   3830 	struct ifnet	*ifp = adapter->ifp;
   3831 	struct ixgbe_hw *hw = &adapter->hw;
   3832 	u32		wus;
   3833 
   3834 	INIT_DEBUGOUT("ixgbe_resume: begin");
   3835 
   3836 	IXGBE_CORE_LOCK(adapter);
   3837 
   3838 	/* Read & clear WUS register */
   3839 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
   3840 	if (wus)
   3841 		device_printf(dev, "Woken up by (WUS): %#010x\n",
   3842 		    IXGBE_READ_REG(hw, IXGBE_WUS));
   3843 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3844 	/* And clear WUFC until next low-power transition */
   3845 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3846 
   3847 	/*
   3848 	 * Required after D3->D0 transition;
   3849 	 * will re-advertise all previous advertised speeds
   3850 	 */
   3851 	if (ifp->if_flags & IFF_UP)
   3852 		ixgbe_init_locked(adapter);
   3853 
   3854 	IXGBE_CORE_UNLOCK(adapter);
   3855 
   3856 	return true;
   3857 } /* ixgbe_resume */
   3858 
   3859 /*
   3860  * Set the various hardware offload abilities.
   3861  *
   3862  * This takes the ifnet's if_capenable flags (e.g. set by the user using
   3863  * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
   3864  * mbuf offload flags the driver will understand.
   3865  */
   3866 static void
   3867 ixgbe_set_if_hwassist(struct adapter *adapter)
   3868 {
   3869 	/* XXX */
   3870 }
   3871 
   3872 /************************************************************************
   3873  * ixgbe_init_locked - Init entry point
   3874  *
   3875  *   Used in two ways: It is used by the stack as an init
   3876  *   entry point in network interface structure. It is also
   3877  *   used by the driver as a hw/sw initialization routine to
   3878  *   get to a consistent state.
   3879  *
   3880  *   return 0 on success, positive on failure
   3881  ************************************************************************/
   3882 static void
   3883 ixgbe_init_locked(struct adapter *adapter)
   3884 {
   3885 	struct ifnet   *ifp = adapter->ifp;
   3886 	device_t	dev = adapter->dev;
   3887 	struct ixgbe_hw *hw = &adapter->hw;
   3888 	struct ix_queue *que;
   3889 	struct tx_ring	*txr;
   3890 	struct rx_ring	*rxr;
   3891 	u32		txdctl, mhadd;
   3892 	u32		rxdctl, rxctrl;
   3893 	u32		ctrl_ext;
   3894 	bool		unsupported_sfp = false;
   3895 	int		i, j, err;
   3896 
   3897 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   3898 
   3899 	KASSERT(mutex_owned(&adapter->core_mtx));
   3900 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
   3901 
   3902 	hw->need_unsupported_sfp_recovery = false;
   3903 	hw->adapter_stopped = FALSE;
   3904 	ixgbe_stop_adapter(hw);
   3905 	callout_stop(&adapter->timer);
   3906 	for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
   3907 		que->disabled_count = 0;
   3908 
   3909 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   3910 	adapter->max_frame_size =
   3911 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   3912 
   3913 	/* Queue indices may change with IOV mode */
   3914 	ixgbe_align_all_queue_indices(adapter);
   3915 
   3916 	/* reprogram the RAR[0] in case user changed it. */
   3917 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
   3918 
   3919 	/* Get the latest mac address, User can use a LAA */
   3920 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
   3921 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   3922 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
   3923 	hw->addr_ctrl.rar_used_count = 1;
   3924 
   3925 	/* Set hardware offload abilities from ifnet flags */
   3926 	ixgbe_set_if_hwassist(adapter);
   3927 
   3928 	/* Prepare transmit descriptors and buffers */
   3929 	if (ixgbe_setup_transmit_structures(adapter)) {
   3930 		device_printf(dev, "Could not setup transmit structures\n");
   3931 		ixgbe_stop(adapter);
   3932 		return;
   3933 	}
   3934 
   3935 	ixgbe_init_hw(hw);
   3936 
   3937 	ixgbe_initialize_iov(adapter);
   3938 
   3939 	ixgbe_initialize_transmit_units(adapter);
   3940 
   3941 	/* Setup Multicast table */
   3942 	ixgbe_set_rxfilter(adapter);
   3943 
   3944 	/* Determine the correct mbuf pool, based on frame size */
   3945 	if (adapter->max_frame_size <= MCLBYTES)
   3946 		adapter->rx_mbuf_sz = MCLBYTES;
   3947 	else
   3948 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   3949 
   3950 	/* Prepare receive descriptors and buffers */
   3951 	if (ixgbe_setup_receive_structures(adapter)) {
   3952 		device_printf(dev, "Could not setup receive structures\n");
   3953 		ixgbe_stop(adapter);
   3954 		return;
   3955 	}
   3956 
   3957 	/* Configure RX settings */
   3958 	ixgbe_initialize_receive_units(adapter);
   3959 
   3960 	/* Enable SDP & MSI-X interrupts based on adapter */
   3961 	ixgbe_config_gpie(adapter);
   3962 
   3963 	/* Set MTU size */
   3964 	if (ifp->if_mtu > ETHERMTU) {
   3965 		/* aka IXGBE_MAXFRS on 82599 and newer */
   3966 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   3967 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   3968 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   3969 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   3970 	}
   3971 
   3972 	/* Now enable all the queues */
   3973 	for (i = 0; i < adapter->num_queues; i++) {
   3974 		txr = &adapter->tx_rings[i];
   3975 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
   3976 		txdctl |= IXGBE_TXDCTL_ENABLE;
   3977 		/* Set WTHRESH to 8, burst writeback */
   3978 		txdctl |= (8 << 16);
   3979 		/*
   3980 		 * When the internal queue falls below PTHRESH (32),
   3981 		 * start prefetching as long as there are at least
   3982 		 * HTHRESH (1) buffers ready. The values are taken
   3983 		 * from the Intel linux driver 3.8.21.
   3984 		 * Prefetching enables tx line rate even with 1 queue.
   3985 		 */
   3986 		txdctl |= (32 << 0) | (1 << 8);
   3987 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
   3988 	}
   3989 
   3990 	for (i = 0; i < adapter->num_queues; i++) {
   3991 		rxr = &adapter->rx_rings[i];
   3992 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   3993 		if (hw->mac.type == ixgbe_mac_82598EB) {
   3994 			/*
   3995 			 * PTHRESH = 21
   3996 			 * HTHRESH = 4
   3997 			 * WTHRESH = 8
   3998 			 */
   3999 			rxdctl &= ~0x3FFFFF;
   4000 			rxdctl |= 0x080420;
   4001 		}
   4002 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   4003 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
   4004 		for (j = 0; j < 10; j++) {
   4005 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
   4006 			    IXGBE_RXDCTL_ENABLE)
   4007 				break;
   4008 			else
   4009 				msec_delay(1);
   4010 		}
   4011 		IXGBE_WRITE_BARRIER(hw);
   4012 
   4013 		/*
   4014 		 * In netmap mode, we must preserve the buffers made
   4015 		 * available to userspace before the if_init()
   4016 		 * (this is true by default on the TX side, because
   4017 		 * init makes all buffers available to userspace).
   4018 		 *
   4019 		 * netmap_reset() and the device specific routines
   4020 		 * (e.g. ixgbe_setup_receive_rings()) map these
   4021 		 * buffers at the end of the NIC ring, so here we
   4022 		 * must set the RDT (tail) register to make sure
   4023 		 * they are not overwritten.
   4024 		 *
   4025 		 * In this driver the NIC ring starts at RDH = 0,
   4026 		 * RDT points to the last slot available for reception (?),
   4027 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   4028 		 */
   4029 #ifdef DEV_NETMAP
   4030 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   4031 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   4032 			struct netmap_adapter *na = NA(adapter->ifp);
   4033 			struct netmap_kring *kring = na->rx_rings[i];
   4034 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   4035 
   4036 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
   4037 		} else
   4038 #endif /* DEV_NETMAP */
   4039 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
   4040 			    adapter->num_rx_desc - 1);
   4041 	}
   4042 
   4043 	/* Enable Receive engine */
   4044 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   4045 	if (hw->mac.type == ixgbe_mac_82598EB)
   4046 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   4047 	rxctrl |= IXGBE_RXCTRL_RXEN;
   4048 	ixgbe_enable_rx_dma(hw, rxctrl);
   4049 
   4050 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4051 
   4052 	/* Set up MSI/MSI-X routing */
   4053 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   4054 		ixgbe_configure_ivars(adapter);
   4055 		/* Set up auto-mask */
   4056 		if (hw->mac.type == ixgbe_mac_82598EB)
   4057 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   4058 		else {
   4059 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   4060 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   4061 		}
   4062 	} else {  /* Simple settings for Legacy/MSI */
   4063 		ixgbe_set_ivar(adapter, 0, 0, 0);
   4064 		ixgbe_set_ivar(adapter, 0, 0, 1);
   4065 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   4066 	}
   4067 
   4068 	ixgbe_init_fdir(adapter);
   4069 
   4070 	/*
   4071 	 * Check on any SFP devices that
   4072 	 * need to be kick-started
   4073 	 */
   4074 	if (hw->phy.type == ixgbe_phy_none) {
   4075 		err = hw->phy.ops.identify(hw);
   4076 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
   4077 			unsupported_sfp = true;
   4078 	} else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
   4079 		unsupported_sfp = true;
   4080 
   4081 	if (unsupported_sfp)
   4082 		device_printf(dev,
   4083 		    "Unsupported SFP+ module type was detected.\n");
   4084 
   4085 	/* Set moderation on the Link interrupt */
   4086 	ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
   4087 
   4088 	/* Enable EEE power saving */
   4089 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   4090 		hw->mac.ops.setup_eee(hw,
   4091 		    adapter->feat_en & IXGBE_FEATURE_EEE);
   4092 
   4093 	/* Enable power to the phy. */
   4094 	if (!unsupported_sfp) {
   4095 		ixgbe_set_phy_power(hw, TRUE);
   4096 
   4097 		/* Config/Enable Link */
   4098 		ixgbe_config_link(adapter);
   4099 	}
   4100 
   4101 	/* Hardware Packet Buffer & Flow Control setup */
   4102 	ixgbe_config_delay_values(adapter);
   4103 
   4104 	/* Initialize the FC settings */
   4105 	ixgbe_start_hw(hw);
   4106 
   4107 	/* Set up VLAN support and filter */
   4108 	ixgbe_setup_vlan_hw_support(adapter);
   4109 
   4110 	/* Setup DMA Coalescing */
   4111 	ixgbe_config_dmac(adapter);
   4112 
   4113 	/* And now turn on interrupts */
   4114 	ixgbe_enable_intr(adapter);
   4115 
   4116 	/* Enable the use of the MBX by the VF's */
   4117 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
   4118 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
   4119 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
   4120 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
   4121 	}
   4122 
   4123 	/* Update saved flags. See ixgbe_ifflags_cb() */
   4124 	adapter->if_flags = ifp->if_flags;
   4125 	adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
   4126 
   4127 	/* Now inform the stack we're ready */
   4128 	ifp->if_flags |= IFF_RUNNING;
   4129 
   4130 	return;
   4131 } /* ixgbe_init_locked */
   4132 
   4133 /************************************************************************
   4134  * ixgbe_init
   4135  ************************************************************************/
   4136 static int
   4137 ixgbe_init(struct ifnet *ifp)
   4138 {
   4139 	struct adapter *adapter = ifp->if_softc;
   4140 
   4141 	IXGBE_CORE_LOCK(adapter);
   4142 	ixgbe_init_locked(adapter);
   4143 	IXGBE_CORE_UNLOCK(adapter);
   4144 
   4145 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   4146 } /* ixgbe_init */
   4147 
   4148 /************************************************************************
   4149  * ixgbe_set_ivar
   4150  *
   4151  *   Setup the correct IVAR register for a particular MSI-X interrupt
   4152  *     (yes this is all very magic and confusing :)
   4153  *    - entry is the register array entry
   4154  *    - vector is the MSI-X vector for this queue
   4155  *    - type is RX/TX/MISC
   4156  ************************************************************************/
   4157 static void
   4158 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   4159 {
   4160 	struct ixgbe_hw *hw = &adapter->hw;
   4161 	u32 ivar, index;
   4162 
   4163 	vector |= IXGBE_IVAR_ALLOC_VAL;
   4164 
   4165 	switch (hw->mac.type) {
   4166 	case ixgbe_mac_82598EB:
   4167 		if (type == -1)
   4168 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   4169 		else
   4170 			entry += (type * 64);
   4171 		index = (entry >> 2) & 0x1F;
   4172 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   4173 		ivar &= ~(0xffUL << (8 * (entry & 0x3)));
   4174 		ivar |= ((u32)vector << (8 * (entry & 0x3)));
   4175 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   4176 		break;
   4177 	case ixgbe_mac_82599EB:
   4178 	case ixgbe_mac_X540:
   4179 	case ixgbe_mac_X550:
   4180 	case ixgbe_mac_X550EM_x:
   4181 	case ixgbe_mac_X550EM_a:
   4182 		if (type == -1) { /* MISC IVAR */
   4183 			index = (entry & 1) * 8;
   4184 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   4185 			ivar &= ~(0xffUL << index);
   4186 			ivar |= ((u32)vector << index);
   4187 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   4188 		} else {	/* RX/TX IVARS */
   4189 			index = (16 * (entry & 1)) + (8 * type);
   4190 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   4191 			ivar &= ~(0xffUL << index);
   4192 			ivar |= ((u32)vector << index);
   4193 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   4194 		}
   4195 		break;
   4196 	default:
   4197 		break;
   4198 	}
   4199 } /* ixgbe_set_ivar */
   4200 
   4201 /************************************************************************
   4202  * ixgbe_configure_ivars
   4203  ************************************************************************/
   4204 static void
   4205 ixgbe_configure_ivars(struct adapter *adapter)
   4206 {
   4207 	struct ix_queue *que = adapter->queues;
   4208 	u32		newitr;
   4209 
   4210 	if (ixgbe_max_interrupt_rate > 0)
   4211 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   4212 	else {
   4213 		/*
   4214 		 * Disable DMA coalescing if interrupt moderation is
   4215 		 * disabled.
   4216 		 */
   4217 		adapter->dmac = 0;
   4218 		newitr = 0;
   4219 	}
   4220 
   4221 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4222 		struct rx_ring *rxr = &adapter->rx_rings[i];
   4223 		struct tx_ring *txr = &adapter->tx_rings[i];
   4224 		/* First the RX queue entry */
   4225 		ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
   4226 		/* ... and the TX */
   4227 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
   4228 		/* Set an Initial EITR value */
   4229 		ixgbe_eitr_write(adapter, que->msix, newitr);
   4230 		/*
   4231 		 * To eliminate influence of the previous state.
   4232 		 * At this point, Tx/Rx interrupt handler
   4233 		 * (ixgbe_msix_que()) cannot be called, so  both
   4234 		 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
   4235 		 */
   4236 		que->eitr_setting = 0;
   4237 	}
   4238 
   4239 	/* For the Link interrupt */
   4240 	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
   4241 } /* ixgbe_configure_ivars */
   4242 
   4243 /************************************************************************
   4244  * ixgbe_config_gpie
   4245  ************************************************************************/
   4246 static void
   4247 ixgbe_config_gpie(struct adapter *adapter)
   4248 {
   4249 	struct ixgbe_hw *hw = &adapter->hw;
   4250 	u32		gpie;
   4251 
   4252 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
   4253 
   4254 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   4255 		/* Enable Enhanced MSI-X mode */
   4256 		gpie |= IXGBE_GPIE_MSIX_MODE
   4257 		     |	IXGBE_GPIE_EIAME
   4258 		     |	IXGBE_GPIE_PBA_SUPPORT
   4259 		     |	IXGBE_GPIE_OCD;
   4260 	}
   4261 
   4262 	/* Fan Failure Interrupt */
   4263 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4264 		gpie |= IXGBE_SDP1_GPIEN;
   4265 
   4266 	/* Thermal Sensor Interrupt */
   4267 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
   4268 		gpie |= IXGBE_SDP0_GPIEN_X540;
   4269 
   4270 	/* Link detection */
   4271 	switch (hw->mac.type) {
   4272 	case ixgbe_mac_82599EB:
   4273 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
   4274 		break;
   4275 	case ixgbe_mac_X550EM_x:
   4276 	case ixgbe_mac_X550EM_a:
   4277 		gpie |= IXGBE_SDP0_GPIEN_X540;
   4278 		break;
   4279 	default:
   4280 		break;
   4281 	}
   4282 
   4283 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   4284 
   4285 } /* ixgbe_config_gpie */
   4286 
   4287 /************************************************************************
   4288  * ixgbe_config_delay_values
   4289  *
   4290  *   Requires adapter->max_frame_size to be set.
   4291  ************************************************************************/
   4292 static void
   4293 ixgbe_config_delay_values(struct adapter *adapter)
   4294 {
   4295 	struct ixgbe_hw *hw = &adapter->hw;
   4296 	u32		rxpb, frame, size, tmp;
   4297 
   4298 	frame = adapter->max_frame_size;
   4299 
   4300 	/* Calculate High Water */
   4301 	switch (hw->mac.type) {
   4302 	case ixgbe_mac_X540:
   4303 	case ixgbe_mac_X550:
   4304 	case ixgbe_mac_X550EM_x:
   4305 	case ixgbe_mac_X550EM_a:
   4306 		tmp = IXGBE_DV_X540(frame, frame);
   4307 		break;
   4308 	default:
   4309 		tmp = IXGBE_DV(frame, frame);
   4310 		break;
   4311 	}
   4312 	size = IXGBE_BT2KB(tmp);
   4313 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
   4314 	hw->fc.high_water[0] = rxpb - size;
   4315 
   4316 	/* Now calculate Low Water */
   4317 	switch (hw->mac.type) {
   4318 	case ixgbe_mac_X540:
   4319 	case ixgbe_mac_X550:
   4320 	case ixgbe_mac_X550EM_x:
   4321 	case ixgbe_mac_X550EM_a:
   4322 		tmp = IXGBE_LOW_DV_X540(frame);
   4323 		break;
   4324 	default:
   4325 		tmp = IXGBE_LOW_DV(frame);
   4326 		break;
   4327 	}
   4328 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
   4329 
   4330 	hw->fc.pause_time = IXGBE_FC_PAUSE;
   4331 	hw->fc.send_xon = TRUE;
   4332 } /* ixgbe_config_delay_values */
   4333 
   4334 /************************************************************************
   4335  * ixgbe_set_rxfilter - Multicast Update
   4336  *
   4337  *   Called whenever multicast address list is updated.
   4338  ************************************************************************/
   4339 static void
   4340 ixgbe_set_rxfilter(struct adapter *adapter)
   4341 {
   4342 	struct ixgbe_mc_addr	*mta;
   4343 	struct ifnet		*ifp = adapter->ifp;
   4344 	u8			*update_ptr;
   4345 	int			mcnt = 0;
   4346 	u32			fctrl;
   4347 	struct ethercom		*ec = &adapter->osdep.ec;
   4348 	struct ether_multi	*enm;
   4349 	struct ether_multistep	step;
   4350 
   4351 	KASSERT(mutex_owned(&adapter->core_mtx));
   4352 	IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin");
   4353 
   4354 	mta = adapter->mta;
   4355 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
   4356 
   4357 	ETHER_LOCK(ec);
   4358 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4359 	ETHER_FIRST_MULTI(step, ec, enm);
   4360 	while (enm != NULL) {
   4361 		if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
   4362 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   4363 			ETHER_ADDR_LEN) != 0)) {
   4364 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4365 			break;
   4366 		}
   4367 		bcopy(enm->enm_addrlo,
   4368 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
   4369 		mta[mcnt].vmdq = adapter->pool;
   4370 		mcnt++;
   4371 		ETHER_NEXT_MULTI(step, enm);
   4372 	}
   4373 
   4374 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   4375 	if (ifp->if_flags & IFF_PROMISC)
   4376 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4377 	else if (ec->ec_flags & ETHER_F_ALLMULTI) {
   4378 		fctrl |= IXGBE_FCTRL_MPE;
   4379 		fctrl &= ~IXGBE_FCTRL_UPE;
   4380 	} else
   4381 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4382 
   4383 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   4384 
   4385 	/* Update multicast filter entries only when it's not ALLMULTI */
   4386 	if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
   4387 		ETHER_UNLOCK(ec);
   4388 		update_ptr = (u8 *)mta;
   4389 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   4390 		    ixgbe_mc_array_itr, TRUE);
   4391 	} else
   4392 		ETHER_UNLOCK(ec);
   4393 } /* ixgbe_set_rxfilter */
   4394 
   4395 /************************************************************************
   4396  * ixgbe_mc_array_itr
   4397  *
   4398  *   An iterator function needed by the multicast shared code.
   4399  *   It feeds the shared code routine the addresses in the
   4400  *   array of ixgbe_set_rxfilter() one by one.
   4401  ************************************************************************/
   4402 static u8 *
   4403 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   4404 {
   4405 	struct ixgbe_mc_addr *mta;
   4406 
   4407 	mta = (struct ixgbe_mc_addr *)*update_ptr;
   4408 	*vmdq = mta->vmdq;
   4409 
   4410 	*update_ptr = (u8*)(mta + 1);
   4411 
   4412 	return (mta->addr);
   4413 } /* ixgbe_mc_array_itr */
   4414 
   4415 /************************************************************************
   4416  * ixgbe_local_timer - Timer routine
   4417  *
   4418  *   Checks for link status, updates statistics,
   4419  *   and runs the watchdog check.
   4420  ************************************************************************/
   4421 static void
   4422 ixgbe_local_timer(void *arg)
   4423 {
   4424 	struct adapter *adapter = arg;
   4425 
   4426 	IXGBE_CORE_LOCK(adapter);
   4427 	ixgbe_local_timer1(adapter);
   4428 	IXGBE_CORE_UNLOCK(adapter);
   4429 }
   4430 
   4431 static void
   4432 ixgbe_local_timer1(void *arg)
   4433 {
   4434 	struct adapter	*adapter = arg;
   4435 	device_t	dev = adapter->dev;
   4436 	struct ix_queue *que = adapter->queues;
   4437 	u64		queues = 0;
   4438 	u64		v0, v1, v2, v3, v4, v5, v6, v7;
   4439 	int		hung = 0;
   4440 	int		i;
   4441 
   4442 	KASSERT(mutex_owned(&adapter->core_mtx));
   4443 
   4444 	/* Check for pluggable optics */
   4445 	if (adapter->sfp_probe)
   4446 		if (!ixgbe_sfp_probe(adapter))
   4447 			goto out; /* Nothing to do */
   4448 
   4449 	ixgbe_update_link_status(adapter);
   4450 	ixgbe_update_stats_counters(adapter);
   4451 
   4452 	/* Update some event counters */
   4453 	v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
   4454 	que = adapter->queues;
   4455 	for (i = 0; i < adapter->num_queues; i++, que++) {
   4456 		struct tx_ring	*txr = que->txr;
   4457 
   4458 		v0 += txr->q_efbig_tx_dma_setup;
   4459 		v1 += txr->q_mbuf_defrag_failed;
   4460 		v2 += txr->q_efbig2_tx_dma_setup;
   4461 		v3 += txr->q_einval_tx_dma_setup;
   4462 		v4 += txr->q_other_tx_dma_setup;
   4463 		v5 += txr->q_eagain_tx_dma_setup;
   4464 		v6 += txr->q_enomem_tx_dma_setup;
   4465 		v7 += txr->q_tso_err;
   4466 	}
   4467 	adapter->efbig_tx_dma_setup.ev_count = v0;
   4468 	adapter->mbuf_defrag_failed.ev_count = v1;
   4469 	adapter->efbig2_tx_dma_setup.ev_count = v2;
   4470 	adapter->einval_tx_dma_setup.ev_count = v3;
   4471 	adapter->other_tx_dma_setup.ev_count = v4;
   4472 	adapter->eagain_tx_dma_setup.ev_count = v5;
   4473 	adapter->enomem_tx_dma_setup.ev_count = v6;
   4474 	adapter->tso_err.ev_count = v7;
   4475 
   4476 	/*
   4477 	 * Check the TX queues status
   4478 	 *	- mark hung queues so we don't schedule on them
   4479 	 *	- watchdog only if all queues show hung
   4480 	 */
   4481 	que = adapter->queues;
   4482 	for (i = 0; i < adapter->num_queues; i++, que++) {
   4483 		/* Keep track of queues with work for soft irq */
   4484 		if (que->txr->busy)
   4485 			queues |= 1ULL << que->me;
   4486 		/*
   4487 		 * Each time txeof runs without cleaning, but there
   4488 		 * are uncleaned descriptors it increments busy. If
   4489 		 * we get to the MAX we declare it hung.
   4490 		 */
   4491 		if (que->busy == IXGBE_QUEUE_HUNG) {
   4492 			++hung;
   4493 			/* Mark the queue as inactive */
   4494 			adapter->active_queues &= ~(1ULL << que->me);
   4495 			continue;
   4496 		} else {
   4497 			/* Check if we've come back from hung */
   4498 			if ((adapter->active_queues & (1ULL << que->me)) == 0)
   4499 				adapter->active_queues |= 1ULL << que->me;
   4500 		}
   4501 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   4502 			device_printf(dev,
   4503 			    "Warning queue %d appears to be hung!\n", i);
   4504 			que->txr->busy = IXGBE_QUEUE_HUNG;
   4505 			++hung;
   4506 		}
   4507 	}
   4508 
   4509 	/* Only truely watchdog if all queues show hung */
   4510 	if (hung == adapter->num_queues)
   4511 		goto watchdog;
   4512 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
   4513 	else if (queues != 0) { /* Force an IRQ on queues with work */
   4514 		que = adapter->queues;
   4515 		for (i = 0; i < adapter->num_queues; i++, que++) {
   4516 			mutex_enter(&que->dc_mtx);
   4517 			if (que->disabled_count == 0)
   4518 				ixgbe_rearm_queues(adapter,
   4519 				    queues & ((u64)1 << i));
   4520 			mutex_exit(&que->dc_mtx);
   4521 		}
   4522 	}
   4523 #endif
   4524 
   4525 out:
   4526 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4527 	return;
   4528 
   4529 watchdog:
   4530 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   4531 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   4532 	adapter->watchdog_events.ev_count++;
   4533 	ixgbe_init_locked(adapter);
   4534 } /* ixgbe_local_timer */
   4535 
   4536 /************************************************************************
   4537  * ixgbe_recovery_mode_timer - Recovery mode timer routine
   4538  ************************************************************************/
   4539 static void
   4540 ixgbe_recovery_mode_timer(void *arg)
   4541 {
   4542 	struct adapter *adapter = arg;
   4543 	struct ixgbe_hw *hw = &adapter->hw;
   4544 
   4545 	IXGBE_CORE_LOCK(adapter);
   4546 	if (ixgbe_fw_recovery_mode(hw)) {
   4547 		if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
   4548 			/* Firmware error detected, entering recovery mode */
   4549 			device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
   4550 
   4551 			if (hw->adapter_stopped == FALSE)
   4552 				ixgbe_stop(adapter);
   4553 		}
   4554 	} else
   4555 		atomic_cas_uint(&adapter->recovery_mode, 1, 0);
   4556 
   4557 	callout_reset(&adapter->recovery_mode_timer, hz,
   4558 	    ixgbe_recovery_mode_timer, adapter);
   4559 	IXGBE_CORE_UNLOCK(adapter);
   4560 } /* ixgbe_recovery_mode_timer */
   4561 
   4562 /************************************************************************
   4563  * ixgbe_sfp_probe
   4564  *
   4565  *   Determine if a port had optics inserted.
   4566  ************************************************************************/
   4567 static bool
   4568 ixgbe_sfp_probe(struct adapter *adapter)
   4569 {
   4570 	struct ixgbe_hw	*hw = &adapter->hw;
   4571 	device_t	dev = adapter->dev;
   4572 	bool		result = FALSE;
   4573 
   4574 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4575 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4576 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4577 		if (ret)
   4578 			goto out;
   4579 		ret = hw->phy.ops.reset(hw);
   4580 		adapter->sfp_probe = FALSE;
   4581 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4582 			device_printf(dev,"Unsupported SFP+ module detected!");
   4583 			device_printf(dev,
   4584 			    "Reload driver with supported module.\n");
   4585 			goto out;
   4586 		} else
   4587 			device_printf(dev, "SFP+ module detected!\n");
   4588 		/* We now have supported optics */
   4589 		result = TRUE;
   4590 	}
   4591 out:
   4592 
   4593 	return (result);
   4594 } /* ixgbe_sfp_probe */
   4595 
   4596 /************************************************************************
   4597  * ixgbe_handle_mod - Tasklet for SFP module interrupts
   4598  ************************************************************************/
   4599 static void
   4600 ixgbe_handle_mod(void *context)
   4601 {
   4602 	struct adapter	*adapter = context;
   4603 	struct ixgbe_hw *hw = &adapter->hw;
   4604 	device_t	dev = adapter->dev;
   4605 	u32		err, cage_full = 0;
   4606 
   4607 	IXGBE_CORE_LOCK(adapter);
   4608 	++adapter->mod_sicount.ev_count;
   4609 	if (adapter->hw.need_crosstalk_fix) {
   4610 		switch (hw->mac.type) {
   4611 		case ixgbe_mac_82599EB:
   4612 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4613 			    IXGBE_ESDP_SDP2;
   4614 			break;
   4615 		case ixgbe_mac_X550EM_x:
   4616 		case ixgbe_mac_X550EM_a:
   4617 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4618 			    IXGBE_ESDP_SDP0;
   4619 			break;
   4620 		default:
   4621 			break;
   4622 		}
   4623 
   4624 		if (!cage_full)
   4625 			goto out;
   4626 	}
   4627 
   4628 	err = hw->phy.ops.identify_sfp(hw);
   4629 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4630 		device_printf(dev,
   4631 		    "Unsupported SFP+ module type was detected.\n");
   4632 		goto out;
   4633 	}
   4634 
   4635 	if (hw->need_unsupported_sfp_recovery) {
   4636 		device_printf(dev, "Recovering from unsupported SFP\n");
   4637 		/*
   4638 		 *  We could recover the status by calling setup_sfp(),
   4639 		 * setup_link() and some others. It's complex and might not
   4640 		 * work correctly on some unknown cases. To avoid such type of
   4641 		 * problem, call ixgbe_init_locked(). It's simple and safe
   4642 		 * approach.
   4643 		 */
   4644 		ixgbe_init_locked(adapter);
   4645 	} else {
   4646 		if (hw->mac.type == ixgbe_mac_82598EB)
   4647 			err = hw->phy.ops.reset(hw);
   4648 		else {
   4649 			err = hw->mac.ops.setup_sfp(hw);
   4650 			hw->phy.sfp_setup_needed = FALSE;
   4651 		}
   4652 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4653 			device_printf(dev,
   4654 			    "Setup failure - unsupported SFP+ module type.\n");
   4655 			goto out;
   4656 		}
   4657 	}
   4658 	softint_schedule(adapter->msf_si);
   4659 out:
   4660 	IXGBE_CORE_UNLOCK(adapter);
   4661 } /* ixgbe_handle_mod */
   4662 
   4663 
   4664 /************************************************************************
   4665  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
   4666  ************************************************************************/
   4667 static void
   4668 ixgbe_handle_msf(void *context)
   4669 {
   4670 	struct adapter	*adapter = context;
   4671 	struct ixgbe_hw *hw = &adapter->hw;
   4672 	u32		autoneg;
   4673 	bool		negotiate;
   4674 
   4675 	IXGBE_CORE_LOCK(adapter);
   4676 	++adapter->msf_sicount.ev_count;
   4677 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
   4678 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
   4679 
   4680 	autoneg = hw->phy.autoneg_advertised;
   4681 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   4682 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   4683 	else
   4684 		negotiate = 0;
   4685 	if (hw->mac.ops.setup_link)
   4686 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
   4687 
   4688 	/* Adjust media types shown in ifconfig */
   4689 	ifmedia_removeall(&adapter->media);
   4690 	ixgbe_add_media_types(adapter);
   4691 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   4692 	IXGBE_CORE_UNLOCK(adapter);
   4693 } /* ixgbe_handle_msf */
   4694 
   4695 /************************************************************************
   4696  * ixgbe_handle_phy - Tasklet for external PHY interrupts
   4697  ************************************************************************/
   4698 static void
   4699 ixgbe_handle_phy(void *context)
   4700 {
   4701 	struct adapter	*adapter = context;
   4702 	struct ixgbe_hw *hw = &adapter->hw;
   4703 	int error;
   4704 
   4705 	++adapter->phy_sicount.ev_count;
   4706 	error = hw->phy.ops.handle_lasi(hw);
   4707 	if (error == IXGBE_ERR_OVERTEMP)
   4708 		device_printf(adapter->dev,
   4709 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
   4710 		    " PHY will downshift to lower power state!\n");
   4711 	else if (error)
   4712 		device_printf(adapter->dev,
   4713 		    "Error handling LASI interrupt: %d\n", error);
   4714 } /* ixgbe_handle_phy */
   4715 
   4716 static void
   4717 ixgbe_ifstop(struct ifnet *ifp, int disable)
   4718 {
   4719 	struct adapter *adapter = ifp->if_softc;
   4720 
   4721 	IXGBE_CORE_LOCK(adapter);
   4722 	ixgbe_stop(adapter);
   4723 	IXGBE_CORE_UNLOCK(adapter);
   4724 }
   4725 
   4726 /************************************************************************
   4727  * ixgbe_stop - Stop the hardware
   4728  *
   4729  *   Disables all traffic on the adapter by issuing a
   4730  *   global reset on the MAC and deallocates TX/RX buffers.
   4731  ************************************************************************/
   4732 static void
   4733 ixgbe_stop(void *arg)
   4734 {
   4735 	struct ifnet	*ifp;
   4736 	struct adapter	*adapter = arg;
   4737 	struct ixgbe_hw *hw = &adapter->hw;
   4738 
   4739 	ifp = adapter->ifp;
   4740 
   4741 	KASSERT(mutex_owned(&adapter->core_mtx));
   4742 
   4743 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   4744 	ixgbe_disable_intr(adapter);
   4745 	callout_stop(&adapter->timer);
   4746 
   4747 	/* Let the stack know...*/
   4748 	ifp->if_flags &= ~IFF_RUNNING;
   4749 
   4750 	ixgbe_reset_hw(hw);
   4751 	hw->adapter_stopped = FALSE;
   4752 	ixgbe_stop_adapter(hw);
   4753 	if (hw->mac.type == ixgbe_mac_82599EB)
   4754 		ixgbe_stop_mac_link_on_d3_82599(hw);
   4755 	/* Turn off the laser - noop with no optics */
   4756 	ixgbe_disable_tx_laser(hw);
   4757 
   4758 	/* Update the stack */
   4759 	adapter->link_up = FALSE;
   4760 	ixgbe_update_link_status(adapter);
   4761 
   4762 	/* reprogram the RAR[0] in case user changed it. */
   4763 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   4764 
   4765 	return;
   4766 } /* ixgbe_stop */
   4767 
   4768 /************************************************************************
   4769  * ixgbe_update_link_status - Update OS on link state
   4770  *
   4771  * Note: Only updates the OS on the cached link state.
   4772  *	 The real check of the hardware only happens with
   4773  *	 a link interrupt.
   4774  ************************************************************************/
   4775 static void
   4776 ixgbe_update_link_status(struct adapter *adapter)
   4777 {
   4778 	struct ifnet	*ifp = adapter->ifp;
   4779 	device_t	dev = adapter->dev;
   4780 	struct ixgbe_hw *hw = &adapter->hw;
   4781 
   4782 	KASSERT(mutex_owned(&adapter->core_mtx));
   4783 
   4784 	if (adapter->link_up) {
   4785 		if (adapter->link_active != LINK_STATE_UP) {
   4786 			/*
   4787 			 * To eliminate influence of the previous state
   4788 			 * in the same way as ixgbe_init_locked().
   4789 			 */
   4790 			struct ix_queue	*que = adapter->queues;
   4791 			for (int i = 0; i < adapter->num_queues; i++, que++)
   4792 				que->eitr_setting = 0;
   4793 
   4794 			if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
   4795 				/*
   4796 				 *  Discard count for both MAC Local Fault and
   4797 				 * Remote Fault because those registers are
   4798 				 * valid only when the link speed is up and
   4799 				 * 10Gbps.
   4800 				 */
   4801 				IXGBE_READ_REG(hw, IXGBE_MLFC);
   4802 				IXGBE_READ_REG(hw, IXGBE_MRFC);
   4803 			}
   4804 
   4805 			if (bootverbose) {
   4806 				const char *bpsmsg;
   4807 
   4808 				switch (adapter->link_speed) {
   4809 				case IXGBE_LINK_SPEED_10GB_FULL:
   4810 					bpsmsg = "10 Gbps";
   4811 					break;
   4812 				case IXGBE_LINK_SPEED_5GB_FULL:
   4813 					bpsmsg = "5 Gbps";
   4814 					break;
   4815 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   4816 					bpsmsg = "2.5 Gbps";
   4817 					break;
   4818 				case IXGBE_LINK_SPEED_1GB_FULL:
   4819 					bpsmsg = "1 Gbps";
   4820 					break;
   4821 				case IXGBE_LINK_SPEED_100_FULL:
   4822 					bpsmsg = "100 Mbps";
   4823 					break;
   4824 				case IXGBE_LINK_SPEED_10_FULL:
   4825 					bpsmsg = "10 Mbps";
   4826 					break;
   4827 				default:
   4828 					bpsmsg = "unknown speed";
   4829 					break;
   4830 				}
   4831 				device_printf(dev, "Link is up %s %s \n",
   4832 				    bpsmsg, "Full Duplex");
   4833 			}
   4834 			adapter->link_active = LINK_STATE_UP;
   4835 			/* Update any Flow Control changes */
   4836 			ixgbe_fc_enable(&adapter->hw);
   4837 			/* Update DMA coalescing config */
   4838 			ixgbe_config_dmac(adapter);
   4839 			if_link_state_change(ifp, LINK_STATE_UP);
   4840 
   4841 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4842 				ixgbe_ping_all_vfs(adapter);
   4843 		}
   4844 	} else {
   4845 		/*
   4846 		 * Do it when link active changes to DOWN. i.e.
   4847 		 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
   4848 		 * b) LINK_STATE_UP	 -> LINK_STATE_DOWN
   4849 		 */
   4850 		if (adapter->link_active != LINK_STATE_DOWN) {
   4851 			if (bootverbose)
   4852 				device_printf(dev, "Link is Down\n");
   4853 			if_link_state_change(ifp, LINK_STATE_DOWN);
   4854 			adapter->link_active = LINK_STATE_DOWN;
   4855 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4856 				ixgbe_ping_all_vfs(adapter);
   4857 			ixgbe_drain_all(adapter);
   4858 		}
   4859 	}
   4860 } /* ixgbe_update_link_status */
   4861 
   4862 /************************************************************************
   4863  * ixgbe_config_dmac - Configure DMA Coalescing
   4864  ************************************************************************/
   4865 static void
   4866 ixgbe_config_dmac(struct adapter *adapter)
   4867 {
   4868 	struct ixgbe_hw *hw = &adapter->hw;
   4869 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
   4870 
   4871 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
   4872 		return;
   4873 
   4874 	if (dcfg->watchdog_timer ^ adapter->dmac ||
   4875 	    dcfg->link_speed ^ adapter->link_speed) {
   4876 		dcfg->watchdog_timer = adapter->dmac;
   4877 		dcfg->fcoe_en = false;
   4878 		dcfg->link_speed = adapter->link_speed;
   4879 		dcfg->num_tcs = 1;
   4880 
   4881 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
   4882 		    dcfg->watchdog_timer, dcfg->link_speed);
   4883 
   4884 		hw->mac.ops.dmac_config(hw);
   4885 	}
   4886 } /* ixgbe_config_dmac */
   4887 
   4888 /************************************************************************
   4889  * ixgbe_enable_intr
   4890  ************************************************************************/
   4891 static void
   4892 ixgbe_enable_intr(struct adapter *adapter)
   4893 {
   4894 	struct ixgbe_hw	*hw = &adapter->hw;
   4895 	struct ix_queue	*que = adapter->queues;
   4896 	u32		mask, fwsm;
   4897 
   4898 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4899 
   4900 	switch (adapter->hw.mac.type) {
   4901 	case ixgbe_mac_82599EB:
   4902 		mask |= IXGBE_EIMS_ECC;
   4903 		/* Temperature sensor on some adapters */
   4904 		mask |= IXGBE_EIMS_GPI_SDP0;
   4905 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
   4906 		mask |= IXGBE_EIMS_GPI_SDP1;
   4907 		mask |= IXGBE_EIMS_GPI_SDP2;
   4908 		break;
   4909 	case ixgbe_mac_X540:
   4910 		/* Detect if Thermal Sensor is enabled */
   4911 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
   4912 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
   4913 			mask |= IXGBE_EIMS_TS;
   4914 		mask |= IXGBE_EIMS_ECC;
   4915 		break;
   4916 	case ixgbe_mac_X550:
   4917 		/* MAC thermal sensor is automatically enabled */
   4918 		mask |= IXGBE_EIMS_TS;
   4919 		mask |= IXGBE_EIMS_ECC;
   4920 		break;
   4921 	case ixgbe_mac_X550EM_x:
   4922 	case ixgbe_mac_X550EM_a:
   4923 		/* Some devices use SDP0 for important information */
   4924 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
   4925 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
   4926 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
   4927 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
   4928 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
   4929 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
   4930 			mask |= IXGBE_EICR_GPI_SDP0_X540;
   4931 		mask |= IXGBE_EIMS_ECC;
   4932 		break;
   4933 	default:
   4934 		break;
   4935 	}
   4936 
   4937 	/* Enable Fan Failure detection */
   4938 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4939 		mask |= IXGBE_EIMS_GPI_SDP1;
   4940 	/* Enable SR-IOV */
   4941 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4942 		mask |= IXGBE_EIMS_MAILBOX;
   4943 	/* Enable Flow Director */
   4944 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   4945 		mask |= IXGBE_EIMS_FLOW_DIR;
   4946 
   4947 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4948 
   4949 	/* With MSI-X we use auto clear */
   4950 	if (adapter->msix_mem) {
   4951 		mask = IXGBE_EIMS_ENABLE_MASK;
   4952 		/* Don't autoclear Link */
   4953 		mask &= ~IXGBE_EIMS_OTHER;
   4954 		mask &= ~IXGBE_EIMS_LSC;
   4955 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   4956 			mask &= ~IXGBE_EIMS_MAILBOX;
   4957 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4958 	}
   4959 
   4960 	/*
   4961 	 * Now enable all queues, this is done separately to
   4962 	 * allow for handling the extended (beyond 32) MSI-X
   4963 	 * vectors that can be used by 82599
   4964 	 */
   4965 	for (int i = 0; i < adapter->num_queues; i++, que++)
   4966 		ixgbe_enable_queue(adapter, que->msix);
   4967 
   4968 	IXGBE_WRITE_FLUSH(hw);
   4969 
   4970 } /* ixgbe_enable_intr */
   4971 
   4972 /************************************************************************
   4973  * ixgbe_disable_intr_internal
   4974  ************************************************************************/
   4975 static void
   4976 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
   4977 {
   4978 	struct ix_queue	*que = adapter->queues;
   4979 
   4980 	/* disable interrupts other than queues */
   4981 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
   4982 
   4983 	if (adapter->msix_mem)
   4984 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4985 
   4986 	for (int i = 0; i < adapter->num_queues; i++, que++)
   4987 		ixgbe_disable_queue_internal(adapter, que->msix, nestok);
   4988 
   4989 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4990 
   4991 } /* ixgbe_do_disable_intr_internal */
   4992 
   4993 /************************************************************************
   4994  * ixgbe_disable_intr
   4995  ************************************************************************/
   4996 static void
   4997 ixgbe_disable_intr(struct adapter *adapter)
   4998 {
   4999 
   5000 	ixgbe_disable_intr_internal(adapter, true);
   5001 } /* ixgbe_disable_intr */
   5002 
   5003 /************************************************************************
   5004  * ixgbe_ensure_disabled_intr
   5005  ************************************************************************/
   5006 void
   5007 ixgbe_ensure_disabled_intr(struct adapter *adapter)
   5008 {
   5009 
   5010 	ixgbe_disable_intr_internal(adapter, false);
   5011 } /* ixgbe_ensure_disabled_intr */
   5012 
   5013 /************************************************************************
   5014  * ixgbe_legacy_irq - Legacy Interrupt Service routine
   5015  ************************************************************************/
   5016 static int
   5017 ixgbe_legacy_irq(void *arg)
   5018 {
   5019 	struct ix_queue *que = arg;
   5020 	struct adapter	*adapter = que->adapter;
   5021 	struct ixgbe_hw	*hw = &adapter->hw;
   5022 	struct ifnet	*ifp = adapter->ifp;
   5023 	struct		tx_ring *txr = adapter->tx_rings;
   5024 	bool		more = false;
   5025 	u32		eicr, eicr_mask;
   5026 
   5027 	/* Silicon errata #26 on 82598 */
   5028 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
   5029 
   5030 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   5031 
   5032 	adapter->stats.pf.legint.ev_count++;
   5033 	++que->irqs.ev_count;
   5034 	if (eicr == 0) {
   5035 		adapter->stats.pf.intzero.ev_count++;
   5036 		if ((ifp->if_flags & IFF_UP) != 0)
   5037 			ixgbe_enable_intr(adapter);
   5038 		return 0;
   5039 	}
   5040 
   5041 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   5042 		/*
   5043 		 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
   5044 		 */
   5045 		que->txrx_use_workqueue = adapter->txrx_use_workqueue;
   5046 
   5047 #ifdef __NetBSD__
   5048 		/* Don't run ixgbe_rxeof in interrupt context */
   5049 		more = true;
   5050 #else
   5051 		more = ixgbe_rxeof(que);
   5052 #endif
   5053 
   5054 		IXGBE_TX_LOCK(txr);
   5055 		ixgbe_txeof(txr);
   5056 #ifdef notyet
   5057 		if (!ixgbe_ring_empty(ifp, txr->br))
   5058 			ixgbe_start_locked(ifp, txr);
   5059 #endif
   5060 		IXGBE_TX_UNLOCK(txr);
   5061 	}
   5062 
   5063 	/* Check for fan failure */
   5064 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   5065 		ixgbe_check_fan_failure(adapter, eicr, true);
   5066 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   5067 	}
   5068 
   5069 	/* Link status change */
   5070 	if (eicr & IXGBE_EICR_LSC)
   5071 		softint_schedule(adapter->link_si);
   5072 
   5073 	if (ixgbe_is_sfp(hw)) {
   5074 		/* Pluggable optics-related interrupt */
   5075 		if (hw->mac.type >= ixgbe_mac_X540)
   5076 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   5077 		else
   5078 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   5079 
   5080 		if (eicr & eicr_mask) {
   5081 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   5082 			softint_schedule(adapter->mod_si);
   5083 		}
   5084 
   5085 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   5086 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   5087 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   5088 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   5089 			softint_schedule(adapter->msf_si);
   5090 		}
   5091 	}
   5092 
   5093 	/* External PHY interrupt */
   5094 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   5095 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
   5096 		softint_schedule(adapter->phy_si);
   5097 
   5098 	if (more) {
   5099 		que->req.ev_count++;
   5100 		ixgbe_sched_handle_que(adapter, que);
   5101 	} else
   5102 		ixgbe_enable_intr(adapter);
   5103 
   5104 	return 1;
   5105 } /* ixgbe_legacy_irq */
   5106 
   5107 /************************************************************************
   5108  * ixgbe_free_pciintr_resources
   5109  ************************************************************************/
   5110 static void
   5111 ixgbe_free_pciintr_resources(struct adapter *adapter)
   5112 {
   5113 	struct ix_queue *que = adapter->queues;
   5114 	int		rid;
   5115 
   5116 	/*
   5117 	 * Release all msix queue resources:
   5118 	 */
   5119 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   5120 		if (que->res != NULL) {
   5121 			pci_intr_disestablish(adapter->osdep.pc,
   5122 			    adapter->osdep.ihs[i]);
   5123 			adapter->osdep.ihs[i] = NULL;
   5124 		}
   5125 	}
   5126 
   5127 	/* Clean the Legacy or Link interrupt last */
   5128 	if (adapter->vector) /* we are doing MSIX */
   5129 		rid = adapter->vector;
   5130 	else
   5131 		rid = 0;
   5132 
   5133 	if (adapter->osdep.ihs[rid] != NULL) {
   5134 		pci_intr_disestablish(adapter->osdep.pc,
   5135 		    adapter->osdep.ihs[rid]);
   5136 		adapter->osdep.ihs[rid] = NULL;
   5137 	}
   5138 
   5139 	if (adapter->osdep.intrs != NULL) {
   5140 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   5141 		    adapter->osdep.nintrs);
   5142 		adapter->osdep.intrs = NULL;
   5143 	}
   5144 } /* ixgbe_free_pciintr_resources */
   5145 
   5146 /************************************************************************
   5147  * ixgbe_free_pci_resources
   5148  ************************************************************************/
   5149 static void
   5150 ixgbe_free_pci_resources(struct adapter *adapter)
   5151 {
   5152 
   5153 	ixgbe_free_pciintr_resources(adapter);
   5154 
   5155 	if (adapter->osdep.mem_size != 0) {
   5156 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   5157 		    adapter->osdep.mem_bus_space_handle,
   5158 		    adapter->osdep.mem_size);
   5159 	}
   5160 
   5161 } /* ixgbe_free_pci_resources */
   5162 
   5163 /************************************************************************
   5164  * ixgbe_set_sysctl_value
   5165  ************************************************************************/
   5166 static void
   5167 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
   5168     const char *description, int *limit, int value)
   5169 {
   5170 	device_t dev =	adapter->dev;
   5171 	struct sysctllog **log;
   5172 	const struct sysctlnode *rnode, *cnode;
   5173 
   5174 	/*
   5175 	 * It's not required to check recovery mode because this function never
   5176 	 * touches hardware.
   5177 	 */
   5178 
   5179 	log = &adapter->sysctllog;
   5180 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   5181 		aprint_error_dev(dev, "could not create sysctl root\n");
   5182 		return;
   5183 	}
   5184 	if (sysctl_createv(log, 0, &rnode, &cnode,
   5185 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   5186 	    name, SYSCTL_DESCR(description),
   5187 		NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   5188 		aprint_error_dev(dev, "could not create sysctl\n");
   5189 	*limit = value;
   5190 } /* ixgbe_set_sysctl_value */
   5191 
   5192 /************************************************************************
   5193  * ixgbe_sysctl_flowcntl
   5194  *
   5195  *   SYSCTL wrapper around setting Flow Control
   5196  ************************************************************************/
   5197 static int
   5198 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
   5199 {
   5200 	struct sysctlnode node = *rnode;
   5201 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5202 	int error, fc;
   5203 
   5204 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5205 		return (EPERM);
   5206 
   5207 	fc = adapter->hw.fc.current_mode;
   5208 	node.sysctl_data = &fc;
   5209 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5210 	if (error != 0 || newp == NULL)
   5211 		return error;
   5212 
   5213 	/* Don't bother if it's not changed */
   5214 	if (fc == adapter->hw.fc.current_mode)
   5215 		return (0);
   5216 
   5217 	return ixgbe_set_flowcntl(adapter, fc);
   5218 } /* ixgbe_sysctl_flowcntl */
   5219 
   5220 /************************************************************************
   5221  * ixgbe_set_flowcntl - Set flow control
   5222  *
   5223  *   Flow control values:
   5224  *     0 - off
   5225  *     1 - rx pause
   5226  *     2 - tx pause
   5227  *     3 - full
   5228  ************************************************************************/
   5229 static int
   5230 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
   5231 {
   5232 	switch (fc) {
   5233 		case ixgbe_fc_rx_pause:
   5234 		case ixgbe_fc_tx_pause:
   5235 		case ixgbe_fc_full:
   5236 			adapter->hw.fc.requested_mode = fc;
   5237 			if (adapter->num_queues > 1)
   5238 				ixgbe_disable_rx_drop(adapter);
   5239 			break;
   5240 		case ixgbe_fc_none:
   5241 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   5242 			if (adapter->num_queues > 1)
   5243 				ixgbe_enable_rx_drop(adapter);
   5244 			break;
   5245 		default:
   5246 			return (EINVAL);
   5247 	}
   5248 
   5249 #if 0 /* XXX NetBSD */
   5250 	/* Don't autoneg if forcing a value */
   5251 	adapter->hw.fc.disable_fc_autoneg = TRUE;
   5252 #endif
   5253 	ixgbe_fc_enable(&adapter->hw);
   5254 
   5255 	return (0);
   5256 } /* ixgbe_set_flowcntl */
   5257 
   5258 /************************************************************************
   5259  * ixgbe_enable_rx_drop
   5260  *
   5261  *   Enable the hardware to drop packets when the buffer is
   5262  *   full. This is useful with multiqueue, so that no single
   5263  *   queue being full stalls the entire RX engine. We only
   5264  *   enable this when Multiqueue is enabled AND Flow Control
   5265  *   is disabled.
   5266  ************************************************************************/
   5267 static void
   5268 ixgbe_enable_rx_drop(struct adapter *adapter)
   5269 {
   5270 	struct ixgbe_hw *hw = &adapter->hw;
   5271 	struct rx_ring	*rxr;
   5272 	u32		srrctl;
   5273 
   5274 	for (int i = 0; i < adapter->num_queues; i++) {
   5275 		rxr = &adapter->rx_rings[i];
   5276 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   5277 		srrctl |= IXGBE_SRRCTL_DROP_EN;
   5278 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   5279 	}
   5280 
   5281 	/* enable drop for each vf */
   5282 	for (int i = 0; i < adapter->num_vfs; i++) {
   5283 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   5284 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
   5285 		    IXGBE_QDE_ENABLE));
   5286 	}
   5287 } /* ixgbe_enable_rx_drop */
   5288 
   5289 /************************************************************************
   5290  * ixgbe_disable_rx_drop
   5291  ************************************************************************/
   5292 static void
   5293 ixgbe_disable_rx_drop(struct adapter *adapter)
   5294 {
   5295 	struct ixgbe_hw *hw = &adapter->hw;
   5296 	struct rx_ring	*rxr;
   5297 	u32		srrctl;
   5298 
   5299 	for (int i = 0; i < adapter->num_queues; i++) {
   5300 		rxr = &adapter->rx_rings[i];
   5301 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   5302 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
   5303 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   5304 	}
   5305 
   5306 	/* disable drop for each vf */
   5307 	for (int i = 0; i < adapter->num_vfs; i++) {
   5308 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   5309 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
   5310 	}
   5311 } /* ixgbe_disable_rx_drop */
   5312 
   5313 /************************************************************************
   5314  * ixgbe_sysctl_advertise
   5315  *
   5316  *   SYSCTL wrapper around setting advertised speed
   5317  ************************************************************************/
   5318 static int
   5319 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
   5320 {
   5321 	struct sysctlnode node = *rnode;
   5322 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5323 	int	       error = 0, advertise;
   5324 
   5325 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5326 		return (EPERM);
   5327 
   5328 	advertise = adapter->advertise;
   5329 	node.sysctl_data = &advertise;
   5330 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5331 	if (error != 0 || newp == NULL)
   5332 		return error;
   5333 
   5334 	return ixgbe_set_advertise(adapter, advertise);
   5335 } /* ixgbe_sysctl_advertise */
   5336 
   5337 /************************************************************************
   5338  * ixgbe_set_advertise - Control advertised link speed
   5339  *
   5340  *   Flags:
   5341  *     0x00 - Default (all capable link speed)
   5342  *     0x01 - advertise 100 Mb
   5343  *     0x02 - advertise 1G
   5344  *     0x04 - advertise 10G
   5345  *     0x08 - advertise 10 Mb
   5346  *     0x10 - advertise 2.5G
   5347  *     0x20 - advertise 5G
   5348  ************************************************************************/
   5349 static int
   5350 ixgbe_set_advertise(struct adapter *adapter, int advertise)
   5351 {
   5352 	device_t	 dev;
   5353 	struct ixgbe_hw	 *hw;
   5354 	ixgbe_link_speed speed = 0;
   5355 	ixgbe_link_speed link_caps = 0;
   5356 	s32		 err = IXGBE_NOT_IMPLEMENTED;
   5357 	bool		 negotiate = FALSE;
   5358 
   5359 	/* Checks to validate new value */
   5360 	if (adapter->advertise == advertise) /* no change */
   5361 		return (0);
   5362 
   5363 	dev = adapter->dev;
   5364 	hw = &adapter->hw;
   5365 
   5366 	/* No speed changes for backplane media */
   5367 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   5368 		return (ENODEV);
   5369 
   5370 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   5371 	    (hw->phy.multispeed_fiber))) {
   5372 		device_printf(dev,
   5373 		    "Advertised speed can only be set on copper or "
   5374 		    "multispeed fiber media types.\n");
   5375 		return (EINVAL);
   5376 	}
   5377 
   5378 	if (advertise < 0x0 || advertise > 0x2f) {
   5379 		device_printf(dev,
   5380 		    "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
   5381 		return (EINVAL);
   5382 	}
   5383 
   5384 	if (hw->mac.ops.get_link_capabilities) {
   5385 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   5386 		    &negotiate);
   5387 		if (err != IXGBE_SUCCESS) {
   5388 			device_printf(dev, "Unable to determine supported advertise speeds\n");
   5389 			return (ENODEV);
   5390 		}
   5391 	}
   5392 
   5393 	/* Set new value and report new advertised mode */
   5394 	if (advertise & 0x1) {
   5395 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
   5396 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
   5397 			return (EINVAL);
   5398 		}
   5399 		speed |= IXGBE_LINK_SPEED_100_FULL;
   5400 	}
   5401 	if (advertise & 0x2) {
   5402 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
   5403 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
   5404 			return (EINVAL);
   5405 		}
   5406 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   5407 	}
   5408 	if (advertise & 0x4) {
   5409 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
   5410 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
   5411 			return (EINVAL);
   5412 		}
   5413 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   5414 	}
   5415 	if (advertise & 0x8) {
   5416 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
   5417 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
   5418 			return (EINVAL);
   5419 		}
   5420 		speed |= IXGBE_LINK_SPEED_10_FULL;
   5421 	}
   5422 	if (advertise & 0x10) {
   5423 		if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
   5424 			device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
   5425 			return (EINVAL);
   5426 		}
   5427 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   5428 	}
   5429 	if (advertise & 0x20) {
   5430 		if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
   5431 			device_printf(dev, "Interface does not support 5Gb advertised speed\n");
   5432 			return (EINVAL);
   5433 		}
   5434 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   5435 	}
   5436 	if (advertise == 0)
   5437 		speed = link_caps; /* All capable link speed */
   5438 
   5439 	hw->mac.autotry_restart = TRUE;
   5440 	hw->mac.ops.setup_link(hw, speed, TRUE);
   5441 	adapter->advertise = advertise;
   5442 
   5443 	return (0);
   5444 } /* ixgbe_set_advertise */
   5445 
   5446 /************************************************************************
   5447  * ixgbe_get_advertise - Get current advertised speed settings
   5448  *
   5449  *   Formatted for sysctl usage.
   5450  *   Flags:
   5451  *     0x01 - advertise 100 Mb
   5452  *     0x02 - advertise 1G
   5453  *     0x04 - advertise 10G
   5454  *     0x08 - advertise 10 Mb (yes, Mb)
   5455  *     0x10 - advertise 2.5G
   5456  *     0x20 - advertise 5G
   5457  ************************************************************************/
   5458 static int
   5459 ixgbe_get_advertise(struct adapter *adapter)
   5460 {
   5461 	struct ixgbe_hw	 *hw = &adapter->hw;
   5462 	int		 speed;
   5463 	ixgbe_link_speed link_caps = 0;
   5464 	s32		 err;
   5465 	bool		 negotiate = FALSE;
   5466 
   5467 	/*
   5468 	 * Advertised speed means nothing unless it's copper or
   5469 	 * multi-speed fiber
   5470 	 */
   5471 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
   5472 	    !(hw->phy.multispeed_fiber))
   5473 		return (0);
   5474 
   5475 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
   5476 	if (err != IXGBE_SUCCESS)
   5477 		return (0);
   5478 
   5479 	speed =
   5480 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x04 : 0) |
   5481 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x02 : 0) |
   5482 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)   ? 0x01 : 0) |
   5483 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)    ? 0x08 : 0) |
   5484 	    ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
   5485 	    ((link_caps & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0);
   5486 
   5487 	return speed;
   5488 } /* ixgbe_get_advertise */
   5489 
   5490 /************************************************************************
   5491  * ixgbe_sysctl_dmac - Manage DMA Coalescing
   5492  *
   5493  *   Control values:
   5494  *     0/1 - off / on (use default value of 1000)
   5495  *
   5496  *     Legal timer values are:
   5497  *     50,100,250,500,1000,2000,5000,10000
   5498  *
   5499  *     Turning off interrupt moderation will also turn this off.
   5500  ************************************************************************/
   5501 static int
   5502 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
   5503 {
   5504 	struct sysctlnode node = *rnode;
   5505 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5506 	struct ifnet   *ifp = adapter->ifp;
   5507 	int	       error;
   5508 	int	       newval;
   5509 
   5510 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5511 		return (EPERM);
   5512 
   5513 	newval = adapter->dmac;
   5514 	node.sysctl_data = &newval;
   5515 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5516 	if ((error) || (newp == NULL))
   5517 		return (error);
   5518 
   5519 	switch (newval) {
   5520 	case 0:
   5521 		/* Disabled */
   5522 		adapter->dmac = 0;
   5523 		break;
   5524 	case 1:
   5525 		/* Enable and use default */
   5526 		adapter->dmac = 1000;
   5527 		break;
   5528 	case 50:
   5529 	case 100:
   5530 	case 250:
   5531 	case 500:
   5532 	case 1000:
   5533 	case 2000:
   5534 	case 5000:
   5535 	case 10000:
   5536 		/* Legal values - allow */
   5537 		adapter->dmac = newval;
   5538 		break;
   5539 	default:
   5540 		/* Do nothing, illegal value */
   5541 		return (EINVAL);
   5542 	}
   5543 
   5544 	/* Re-initialize hardware if it's already running */
   5545 	if (ifp->if_flags & IFF_RUNNING)
   5546 		ifp->if_init(ifp);
   5547 
   5548 	return (0);
   5549 }
   5550 
   5551 #ifdef IXGBE_DEBUG
   5552 /************************************************************************
   5553  * ixgbe_sysctl_power_state
   5554  *
   5555  *   Sysctl to test power states
   5556  *   Values:
   5557  *     0      - set device to D0
   5558  *     3      - set device to D3
   5559  *     (none) - get current device power state
   5560  ************************************************************************/
   5561 static int
   5562 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
   5563 {
   5564 #ifdef notyet
   5565 	struct sysctlnode node = *rnode;
   5566 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5567 	device_t       dev =  adapter->dev;
   5568 	int	       curr_ps, new_ps, error = 0;
   5569 
   5570 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5571 		return (EPERM);
   5572 
   5573 	curr_ps = new_ps = pci_get_powerstate(dev);
   5574 
   5575 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5576 	if ((error) || (req->newp == NULL))
   5577 		return (error);
   5578 
   5579 	if (new_ps == curr_ps)
   5580 		return (0);
   5581 
   5582 	if (new_ps == 3 && curr_ps == 0)
   5583 		error = DEVICE_SUSPEND(dev);
   5584 	else if (new_ps == 0 && curr_ps == 3)
   5585 		error = DEVICE_RESUME(dev);
   5586 	else
   5587 		return (EINVAL);
   5588 
   5589 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
   5590 
   5591 	return (error);
   5592 #else
   5593 	return 0;
   5594 #endif
   5595 } /* ixgbe_sysctl_power_state */
   5596 #endif
   5597 
   5598 /************************************************************************
   5599  * ixgbe_sysctl_wol_enable
   5600  *
   5601  *   Sysctl to enable/disable the WoL capability,
   5602  *   if supported by the adapter.
   5603  *
   5604  *   Values:
   5605  *     0 - disabled
   5606  *     1 - enabled
   5607  ************************************************************************/
   5608 static int
   5609 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
   5610 {
   5611 	struct sysctlnode node = *rnode;
   5612 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5613 	struct ixgbe_hw *hw = &adapter->hw;
   5614 	bool		new_wol_enabled;
   5615 	int		error = 0;
   5616 
   5617 	/*
   5618 	 * It's not required to check recovery mode because this function never
   5619 	 * touches hardware.
   5620 	 */
   5621 	new_wol_enabled = hw->wol_enabled;
   5622 	node.sysctl_data = &new_wol_enabled;
   5623 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5624 	if ((error) || (newp == NULL))
   5625 		return (error);
   5626 	if (new_wol_enabled == hw->wol_enabled)
   5627 		return (0);
   5628 
   5629 	if (new_wol_enabled && !adapter->wol_support)
   5630 		return (ENODEV);
   5631 	else
   5632 		hw->wol_enabled = new_wol_enabled;
   5633 
   5634 	return (0);
   5635 } /* ixgbe_sysctl_wol_enable */
   5636 
   5637 /************************************************************************
   5638  * ixgbe_sysctl_wufc - Wake Up Filter Control
   5639  *
   5640  *   Sysctl to enable/disable the types of packets that the
   5641  *   adapter will wake up on upon receipt.
   5642  *   Flags:
   5643  *     0x1  - Link Status Change
   5644  *     0x2  - Magic Packet
   5645  *     0x4  - Direct Exact
   5646  *     0x8  - Directed Multicast
   5647  *     0x10 - Broadcast
   5648  *     0x20 - ARP/IPv4 Request Packet
   5649  *     0x40 - Direct IPv4 Packet
   5650  *     0x80 - Direct IPv6 Packet
   5651  *
   5652  *   Settings not listed above will cause the sysctl to return an error.
   5653  ************************************************************************/
   5654 static int
   5655 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
   5656 {
   5657 	struct sysctlnode node = *rnode;
   5658 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5659 	int error = 0;
   5660 	u32 new_wufc;
   5661 
   5662 	/*
   5663 	 * It's not required to check recovery mode because this function never
   5664 	 * touches hardware.
   5665 	 */
   5666 	new_wufc = adapter->wufc;
   5667 	node.sysctl_data = &new_wufc;
   5668 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5669 	if ((error) || (newp == NULL))
   5670 		return (error);
   5671 	if (new_wufc == adapter->wufc)
   5672 		return (0);
   5673 
   5674 	if (new_wufc & 0xffffff00)
   5675 		return (EINVAL);
   5676 
   5677 	new_wufc &= 0xff;
   5678 	new_wufc |= (0xffffff & adapter->wufc);
   5679 	adapter->wufc = new_wufc;
   5680 
   5681 	return (0);
   5682 } /* ixgbe_sysctl_wufc */
   5683 
   5684 #ifdef IXGBE_DEBUG
   5685 /************************************************************************
   5686  * ixgbe_sysctl_print_rss_config
   5687  ************************************************************************/
   5688 static int
   5689 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
   5690 {
   5691 #ifdef notyet
   5692 	struct sysctlnode node = *rnode;
   5693 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5694 	struct ixgbe_hw *hw = &adapter->hw;
   5695 	device_t	dev = adapter->dev;
   5696 	struct sbuf	*buf;
   5697 	int		error = 0, reta_size;
   5698 	u32		reg;
   5699 
   5700 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5701 		return (EPERM);
   5702 
   5703 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
   5704 	if (!buf) {
   5705 		device_printf(dev, "Could not allocate sbuf for output.\n");
   5706 		return (ENOMEM);
   5707 	}
   5708 
   5709 	// TODO: use sbufs to make a string to print out
   5710 	/* Set multiplier for RETA setup and table size based on MAC */
   5711 	switch (adapter->hw.mac.type) {
   5712 	case ixgbe_mac_X550:
   5713 	case ixgbe_mac_X550EM_x:
   5714 	case ixgbe_mac_X550EM_a:
   5715 		reta_size = 128;
   5716 		break;
   5717 	default:
   5718 		reta_size = 32;
   5719 		break;
   5720 	}
   5721 
   5722 	/* Print out the redirection table */
   5723 	sbuf_cat(buf, "\n");
   5724 	for (int i = 0; i < reta_size; i++) {
   5725 		if (i < 32) {
   5726 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
   5727 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
   5728 		} else {
   5729 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
   5730 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
   5731 		}
   5732 	}
   5733 
   5734 	// TODO: print more config
   5735 
   5736 	error = sbuf_finish(buf);
   5737 	if (error)
   5738 		device_printf(dev, "Error finishing sbuf: %d\n", error);
   5739 
   5740 	sbuf_delete(buf);
   5741 #endif
   5742 	return (0);
   5743 } /* ixgbe_sysctl_print_rss_config */
   5744 #endif /* IXGBE_DEBUG */
   5745 
   5746 /************************************************************************
   5747  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
   5748  *
   5749  *   For X552/X557-AT devices using an external PHY
   5750  ************************************************************************/
   5751 static int
   5752 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
   5753 {
   5754 	struct sysctlnode node = *rnode;
   5755 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5756 	struct ixgbe_hw *hw = &adapter->hw;
   5757 	int val;
   5758 	u16 reg;
   5759 	int		error;
   5760 
   5761 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5762 		return (EPERM);
   5763 
   5764 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5765 		device_printf(adapter->dev,
   5766 		    "Device has no supported external thermal sensor.\n");
   5767 		return (ENODEV);
   5768 	}
   5769 
   5770 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
   5771 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5772 		device_printf(adapter->dev,
   5773 		    "Error reading from PHY's current temperature register\n");
   5774 		return (EAGAIN);
   5775 	}
   5776 
   5777 	node.sysctl_data = &val;
   5778 
   5779 	/* Shift temp for output */
   5780 	val = reg >> 8;
   5781 
   5782 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5783 	if ((error) || (newp == NULL))
   5784 		return (error);
   5785 
   5786 	return (0);
   5787 } /* ixgbe_sysctl_phy_temp */
   5788 
   5789 /************************************************************************
   5790  * ixgbe_sysctl_phy_overtemp_occurred
   5791  *
   5792  *   Reports (directly from the PHY) whether the current PHY
   5793  *   temperature is over the overtemp threshold.
   5794  ************************************************************************/
   5795 static int
   5796 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
   5797 {
   5798 	struct sysctlnode node = *rnode;
   5799 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5800 	struct ixgbe_hw *hw = &adapter->hw;
   5801 	int val, error;
   5802 	u16 reg;
   5803 
   5804 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5805 		return (EPERM);
   5806 
   5807 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5808 		device_printf(adapter->dev,
   5809 		    "Device has no supported external thermal sensor.\n");
   5810 		return (ENODEV);
   5811 	}
   5812 
   5813 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
   5814 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5815 		device_printf(adapter->dev,
   5816 		    "Error reading from PHY's temperature status register\n");
   5817 		return (EAGAIN);
   5818 	}
   5819 
   5820 	node.sysctl_data = &val;
   5821 
   5822 	/* Get occurrence bit */
   5823 	val = !!(reg & 0x4000);
   5824 
   5825 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5826 	if ((error) || (newp == NULL))
   5827 		return (error);
   5828 
   5829 	return (0);
   5830 } /* ixgbe_sysctl_phy_overtemp_occurred */
   5831 
   5832 /************************************************************************
   5833  * ixgbe_sysctl_eee_state
   5834  *
   5835  *   Sysctl to set EEE power saving feature
   5836  *   Values:
   5837  *     0      - disable EEE
   5838  *     1      - enable EEE
   5839  *     (none) - get current device EEE state
   5840  ************************************************************************/
   5841 static int
   5842 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
   5843 {
   5844 	struct sysctlnode node = *rnode;
   5845 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5846 	struct ifnet   *ifp = adapter->ifp;
   5847 	device_t       dev = adapter->dev;
   5848 	int	       curr_eee, new_eee, error = 0;
   5849 	s32	       retval;
   5850 
   5851 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5852 		return (EPERM);
   5853 
   5854 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
   5855 	node.sysctl_data = &new_eee;
   5856 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5857 	if ((error) || (newp == NULL))
   5858 		return (error);
   5859 
   5860 	/* Nothing to do */
   5861 	if (new_eee == curr_eee)
   5862 		return (0);
   5863 
   5864 	/* Not supported */
   5865 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
   5866 		return (EINVAL);
   5867 
   5868 	/* Bounds checking */
   5869 	if ((new_eee < 0) || (new_eee > 1))
   5870 		return (EINVAL);
   5871 
   5872 	retval = ixgbe_setup_eee(&adapter->hw, new_eee);
   5873 	if (retval) {
   5874 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
   5875 		return (EINVAL);
   5876 	}
   5877 
   5878 	/* Restart auto-neg */
   5879 	ifp->if_init(ifp);
   5880 
   5881 	device_printf(dev, "New EEE state: %d\n", new_eee);
   5882 
   5883 	/* Cache new value */
   5884 	if (new_eee)
   5885 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5886 	else
   5887 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
   5888 
   5889 	return (error);
   5890 } /* ixgbe_sysctl_eee_state */
   5891 
   5892 #define PRINTQS(adapter, regname)					\
   5893 	do {								\
   5894 		struct ixgbe_hw	*_hw = &(adapter)->hw;			\
   5895 		int _i;							\
   5896 									\
   5897 		printf("%s: %s", device_xname((adapter)->dev), #regname); \
   5898 		for (_i = 0; _i < (adapter)->num_queues; _i++) {	\
   5899 			printf((_i == 0) ? "\t" : " ");			\
   5900 			printf("%08x", IXGBE_READ_REG(_hw,		\
   5901 				IXGBE_##regname(_i)));			\
   5902 		}							\
   5903 		printf("\n");						\
   5904 	} while (0)
   5905 
   5906 /************************************************************************
   5907  * ixgbe_print_debug_info
   5908  *
   5909  *   Called only when em_display_debug_stats is enabled.
   5910  *   Provides a way to take a look at important statistics
   5911  *   maintained by the driver and hardware.
   5912  ************************************************************************/
   5913 static void
   5914 ixgbe_print_debug_info(struct adapter *adapter)
   5915 {
   5916 	device_t	dev = adapter->dev;
   5917 	struct ixgbe_hw *hw = &adapter->hw;
   5918 	int table_size;
   5919 	int i;
   5920 
   5921 	switch (adapter->hw.mac.type) {
   5922 	case ixgbe_mac_X550:
   5923 	case ixgbe_mac_X550EM_x:
   5924 	case ixgbe_mac_X550EM_a:
   5925 		table_size = 128;
   5926 		break;
   5927 	default:
   5928 		table_size = 32;
   5929 		break;
   5930 	}
   5931 
   5932 	device_printf(dev, "[E]RETA:\n");
   5933 	for (i = 0; i < table_size; i++) {
   5934 		if (i < 32)
   5935 			printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
   5936 				IXGBE_RETA(i)));
   5937 		else
   5938 			printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
   5939 				IXGBE_ERETA(i - 32)));
   5940 	}
   5941 
   5942 	device_printf(dev, "queue:");
   5943 	for (i = 0; i < adapter->num_queues; i++) {
   5944 		printf((i == 0) ? "\t" : " ");
   5945 		printf("%8d", i);
   5946 	}
   5947 	printf("\n");
   5948 	PRINTQS(adapter, RDBAL);
   5949 	PRINTQS(adapter, RDBAH);
   5950 	PRINTQS(adapter, RDLEN);
   5951 	PRINTQS(adapter, SRRCTL);
   5952 	PRINTQS(adapter, RDH);
   5953 	PRINTQS(adapter, RDT);
   5954 	PRINTQS(adapter, RXDCTL);
   5955 
   5956 	device_printf(dev, "RQSMR:");
   5957 	for (i = 0; i < adapter->num_queues / 4; i++) {
   5958 		printf((i == 0) ? "\t" : " ");
   5959 		printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
   5960 	}
   5961 	printf("\n");
   5962 
   5963 	device_printf(dev, "disabled_count:");
   5964 	for (i = 0; i < adapter->num_queues; i++) {
   5965 		printf((i == 0) ? "\t" : " ");
   5966 		printf("%8d", adapter->queues[i].disabled_count);
   5967 	}
   5968 	printf("\n");
   5969 
   5970 	device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
   5971 	if (hw->mac.type != ixgbe_mac_82598EB) {
   5972 		device_printf(dev, "EIMS_EX(0):\t%08x\n",
   5973 			      IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
   5974 		device_printf(dev, "EIMS_EX(1):\t%08x\n",
   5975 			      IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
   5976 	}
   5977 } /* ixgbe_print_debug_info */
   5978 
   5979 /************************************************************************
   5980  * ixgbe_sysctl_debug
   5981  ************************************************************************/
   5982 static int
   5983 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
   5984 {
   5985 	struct sysctlnode node = *rnode;
   5986 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5987 	int	       error, result = 0;
   5988 
   5989 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5990 		return (EPERM);
   5991 
   5992 	node.sysctl_data = &result;
   5993 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5994 
   5995 	if (error || newp == NULL)
   5996 		return error;
   5997 
   5998 	if (result == 1)
   5999 		ixgbe_print_debug_info(adapter);
   6000 
   6001 	return 0;
   6002 } /* ixgbe_sysctl_debug */
   6003 
   6004 /************************************************************************
   6005  * ixgbe_init_device_features
   6006  ************************************************************************/
   6007 static void
   6008 ixgbe_init_device_features(struct adapter *adapter)
   6009 {
   6010 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   6011 			  | IXGBE_FEATURE_RSS
   6012 			  | IXGBE_FEATURE_MSI
   6013 			  | IXGBE_FEATURE_MSIX
   6014 			  | IXGBE_FEATURE_LEGACY_IRQ
   6015 			  | IXGBE_FEATURE_LEGACY_TX;
   6016 
   6017 	/* Set capabilities first... */
   6018 	switch (adapter->hw.mac.type) {
   6019 	case ixgbe_mac_82598EB:
   6020 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
   6021 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
   6022 		break;
   6023 	case ixgbe_mac_X540:
   6024 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6025 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6026 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
   6027 		    (adapter->hw.bus.func == 0))
   6028 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   6029 		break;
   6030 	case ixgbe_mac_X550:
   6031 		/*
   6032 		 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
   6033 		 * NVM Image version.
   6034 		 */
   6035 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   6036 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6037 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6038 		break;
   6039 	case ixgbe_mac_X550EM_x:
   6040 		/*
   6041 		 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
   6042 		 * NVM Image version.
   6043 		 */
   6044 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6045 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6046 		break;
   6047 	case ixgbe_mac_X550EM_a:
   6048 		/*
   6049 		 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
   6050 		 * NVM Image version.
   6051 		 */
   6052 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6053 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6054 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   6055 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
   6056 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
   6057 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   6058 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   6059 		}
   6060 		break;
   6061 	case ixgbe_mac_82599EB:
   6062 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6063 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6064 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
   6065 		    (adapter->hw.bus.func == 0))
   6066 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   6067 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
   6068 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   6069 		break;
   6070 	default:
   6071 		break;
   6072 	}
   6073 
   6074 	/* Enabled by default... */
   6075 	/* Fan failure detection */
   6076 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
   6077 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
   6078 	/* Netmap */
   6079 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   6080 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   6081 	/* EEE */
   6082 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   6083 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   6084 	/* Thermal Sensor */
   6085 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
   6086 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
   6087 	/*
   6088 	 * Recovery mode:
   6089 	 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
   6090 	 * NVM Image version.
   6091 	 */
   6092 
   6093 	/* Enabled via global sysctl... */
   6094 	/* Flow Director */
   6095 	if (ixgbe_enable_fdir) {
   6096 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
   6097 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
   6098 		else
   6099 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
   6100 	}
   6101 	/* Legacy (single queue) transmit */
   6102 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   6103 	    ixgbe_enable_legacy_tx)
   6104 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   6105 	/*
   6106 	 * Message Signal Interrupts - Extended (MSI-X)
   6107 	 * Normal MSI is only enabled if MSI-X calls fail.
   6108 	 */
   6109 	if (!ixgbe_enable_msix)
   6110 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
   6111 	/* Receive-Side Scaling (RSS) */
   6112 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
   6113 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   6114 
   6115 	/* Disable features with unmet dependencies... */
   6116 	/* No MSI-X */
   6117 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
   6118 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6119 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6120 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
   6121 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
   6122 	}
   6123 } /* ixgbe_init_device_features */
   6124 
   6125 /************************************************************************
   6126  * ixgbe_probe - Device identification routine
   6127  *
   6128  *   Determines if the driver should be loaded on
   6129  *   adapter based on its PCI vendor/device ID.
   6130  *
   6131  *   return BUS_PROBE_DEFAULT on success, positive on failure
   6132  ************************************************************************/
   6133 static int
   6134 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
   6135 {
   6136 	const struct pci_attach_args *pa = aux;
   6137 
   6138 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
   6139 }
   6140 
   6141 static const ixgbe_vendor_info_t *
   6142 ixgbe_lookup(const struct pci_attach_args *pa)
   6143 {
   6144 	const ixgbe_vendor_info_t *ent;
   6145 	pcireg_t subid;
   6146 
   6147 	INIT_DEBUGOUT("ixgbe_lookup: begin");
   6148 
   6149 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
   6150 		return NULL;
   6151 
   6152 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
   6153 
   6154 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
   6155 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
   6156 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
   6157 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
   6158 			(ent->subvendor_id == 0)) &&
   6159 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
   6160 			(ent->subdevice_id == 0))) {
   6161 			return ent;
   6162 		}
   6163 	}
   6164 	return NULL;
   6165 }
   6166 
   6167 static int
   6168 ixgbe_ifflags_cb(struct ethercom *ec)
   6169 {
   6170 	struct ifnet *ifp = &ec->ec_if;
   6171 	struct adapter *adapter = ifp->if_softc;
   6172 	u_short change;
   6173 	int rv = 0;
   6174 
   6175 	IXGBE_CORE_LOCK(adapter);
   6176 
   6177 	change = ifp->if_flags ^ adapter->if_flags;
   6178 	if (change != 0)
   6179 		adapter->if_flags = ifp->if_flags;
   6180 
   6181 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   6182 		rv = ENETRESET;
   6183 		goto out;
   6184 	} else if ((change & IFF_PROMISC) != 0)
   6185 		ixgbe_set_rxfilter(adapter);
   6186 
   6187 	/* Check for ec_capenable. */
   6188 	change = ec->ec_capenable ^ adapter->ec_capenable;
   6189 	adapter->ec_capenable = ec->ec_capenable;
   6190 	if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
   6191 	    | ETHERCAP_VLAN_HWFILTER)) != 0) {
   6192 		rv = ENETRESET;
   6193 		goto out;
   6194 	}
   6195 
   6196 	/*
   6197 	 * Special handling is not required for ETHERCAP_VLAN_MTU.
   6198 	 * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
   6199 	 */
   6200 
   6201 	/* Set up VLAN support and filter */
   6202 	if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
   6203 		ixgbe_setup_vlan_hw_support(adapter);
   6204 
   6205 out:
   6206 	IXGBE_CORE_UNLOCK(adapter);
   6207 
   6208 	return rv;
   6209 }
   6210 
   6211 /************************************************************************
   6212  * ixgbe_ioctl - Ioctl entry point
   6213  *
   6214  *   Called when the user wants to configure the interface.
   6215  *
   6216  *   return 0 on success, positive on failure
   6217  ************************************************************************/
   6218 static int
   6219 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   6220 {
   6221 	struct adapter	*adapter = ifp->if_softc;
   6222 	struct ixgbe_hw *hw = &adapter->hw;
   6223 	struct ifcapreq *ifcr = data;
   6224 	struct ifreq	*ifr = data;
   6225 	int		error = 0;
   6226 	int l4csum_en;
   6227 	const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
   6228 	     IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   6229 
   6230 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   6231 		return (EPERM);
   6232 
   6233 	switch (command) {
   6234 	case SIOCSIFFLAGS:
   6235 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   6236 		break;
   6237 	case SIOCADDMULTI:
   6238 	case SIOCDELMULTI:
   6239 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   6240 		break;
   6241 	case SIOCSIFMEDIA:
   6242 	case SIOCGIFMEDIA:
   6243 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   6244 		break;
   6245 	case SIOCSIFCAP:
   6246 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   6247 		break;
   6248 	case SIOCSIFMTU:
   6249 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   6250 		break;
   6251 #ifdef __NetBSD__
   6252 	case SIOCINITIFADDR:
   6253 		IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
   6254 		break;
   6255 	case SIOCGIFFLAGS:
   6256 		IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
   6257 		break;
   6258 	case SIOCGIFAFLAG_IN:
   6259 		IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
   6260 		break;
   6261 	case SIOCGIFADDR:
   6262 		IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
   6263 		break;
   6264 	case SIOCGIFMTU:
   6265 		IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
   6266 		break;
   6267 	case SIOCGIFCAP:
   6268 		IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
   6269 		break;
   6270 	case SIOCGETHERCAP:
   6271 		IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
   6272 		break;
   6273 	case SIOCGLIFADDR:
   6274 		IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
   6275 		break;
   6276 	case SIOCZIFDATA:
   6277 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   6278 		hw->mac.ops.clear_hw_cntrs(hw);
   6279 		ixgbe_clear_evcnt(adapter);
   6280 		break;
   6281 	case SIOCAIFADDR:
   6282 		IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
   6283 		break;
   6284 #endif
   6285 	default:
   6286 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   6287 		break;
   6288 	}
   6289 
   6290 	switch (command) {
   6291 	case SIOCGI2C:
   6292 	{
   6293 		struct ixgbe_i2c_req	i2c;
   6294 
   6295 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
   6296 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
   6297 		if (error != 0)
   6298 			break;
   6299 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
   6300 			error = EINVAL;
   6301 			break;
   6302 		}
   6303 		if (i2c.len > sizeof(i2c.data)) {
   6304 			error = EINVAL;
   6305 			break;
   6306 		}
   6307 
   6308 		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
   6309 		    i2c.dev_addr, i2c.data);
   6310 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
   6311 		break;
   6312 	}
   6313 	case SIOCSIFCAP:
   6314 		/* Layer-4 Rx checksum offload has to be turned on and
   6315 		 * off as a unit.
   6316 		 */
   6317 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   6318 		if (l4csum_en != l4csum && l4csum_en != 0)
   6319 			return EINVAL;
   6320 		/*FALLTHROUGH*/
   6321 	case SIOCADDMULTI:
   6322 	case SIOCDELMULTI:
   6323 	case SIOCSIFFLAGS:
   6324 	case SIOCSIFMTU:
   6325 	default:
   6326 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   6327 			return error;
   6328 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   6329 			;
   6330 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   6331 			IXGBE_CORE_LOCK(adapter);
   6332 			if ((ifp->if_flags & IFF_RUNNING) != 0)
   6333 				ixgbe_init_locked(adapter);
   6334 			ixgbe_recalculate_max_frame(adapter);
   6335 			IXGBE_CORE_UNLOCK(adapter);
   6336 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   6337 			/*
   6338 			 * Multicast list has changed; set the hardware filter
   6339 			 * accordingly.
   6340 			 */
   6341 			IXGBE_CORE_LOCK(adapter);
   6342 			ixgbe_disable_intr(adapter);
   6343 			ixgbe_set_rxfilter(adapter);
   6344 			ixgbe_enable_intr(adapter);
   6345 			IXGBE_CORE_UNLOCK(adapter);
   6346 		}
   6347 		return 0;
   6348 	}
   6349 
   6350 	return error;
   6351 } /* ixgbe_ioctl */
   6352 
   6353 /************************************************************************
   6354  * ixgbe_check_fan_failure
   6355  ************************************************************************/
   6356 static void
   6357 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
   6358 {
   6359 	u32 mask;
   6360 
   6361 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
   6362 	    IXGBE_ESDP_SDP1;
   6363 
   6364 	if (reg & mask)
   6365 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
   6366 } /* ixgbe_check_fan_failure */
   6367 
   6368 /************************************************************************
   6369  * ixgbe_handle_que
   6370  ************************************************************************/
   6371 static void
   6372 ixgbe_handle_que(void *context)
   6373 {
   6374 	struct ix_queue *que = context;
   6375 	struct adapter	*adapter = que->adapter;
   6376 	struct tx_ring	*txr = que->txr;
   6377 	struct ifnet	*ifp = adapter->ifp;
   6378 	bool		more = false;
   6379 
   6380 	que->handleq.ev_count++;
   6381 
   6382 	if (ifp->if_flags & IFF_RUNNING) {
   6383 		more = ixgbe_rxeof(que);
   6384 		IXGBE_TX_LOCK(txr);
   6385 		more |= ixgbe_txeof(txr);
   6386 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   6387 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   6388 				ixgbe_mq_start_locked(ifp, txr);
   6389 		/* Only for queue 0 */
   6390 		/* NetBSD still needs this for CBQ */
   6391 		if ((&adapter->queues[0] == que)
   6392 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   6393 			ixgbe_legacy_start_locked(ifp, txr);
   6394 		IXGBE_TX_UNLOCK(txr);
   6395 	}
   6396 
   6397 	if (more) {
   6398 		que->req.ev_count++;
   6399 		ixgbe_sched_handle_que(adapter, que);
   6400 	} else if (que->res != NULL) {
   6401 		/* Re-enable this interrupt */
   6402 		ixgbe_enable_queue(adapter, que->msix);
   6403 	} else
   6404 		ixgbe_enable_intr(adapter);
   6405 
   6406 	return;
   6407 } /* ixgbe_handle_que */
   6408 
   6409 /************************************************************************
   6410  * ixgbe_handle_que_work
   6411  ************************************************************************/
   6412 static void
   6413 ixgbe_handle_que_work(struct work *wk, void *context)
   6414 {
   6415 	struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
   6416 
   6417 	/*
   6418 	 * "enqueued flag" is not required here.
   6419 	 * See ixgbe_msix_que().
   6420 	 */
   6421 	ixgbe_handle_que(que);
   6422 }
   6423 
   6424 /************************************************************************
   6425  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
   6426  ************************************************************************/
   6427 static int
   6428 ixgbe_allocate_legacy(struct adapter *adapter,
   6429     const struct pci_attach_args *pa)
   6430 {
   6431 	device_t	dev = adapter->dev;
   6432 	struct ix_queue *que = adapter->queues;
   6433 	struct tx_ring	*txr = adapter->tx_rings;
   6434 	int		counts[PCI_INTR_TYPE_SIZE];
   6435 	pci_intr_type_t intr_type, max_type;
   6436 	char		intrbuf[PCI_INTRSTR_LEN];
   6437 	char		wqname[MAXCOMLEN];
   6438 	const char	*intrstr = NULL;
   6439 	int defertx_error = 0, error;
   6440 
   6441 	/* We allocate a single interrupt resource */
   6442 	max_type = PCI_INTR_TYPE_MSI;
   6443 	counts[PCI_INTR_TYPE_MSIX] = 0;
   6444 	counts[PCI_INTR_TYPE_MSI] =
   6445 	    (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
   6446 	/* Check not feat_en but feat_cap to fallback to INTx */
   6447 	counts[PCI_INTR_TYPE_INTX] =
   6448 	    (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
   6449 
   6450 alloc_retry:
   6451 	if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
   6452 		aprint_error_dev(dev, "couldn't alloc interrupt\n");
   6453 		return ENXIO;
   6454 	}
   6455 	adapter->osdep.nintrs = 1;
   6456 	intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
   6457 	    intrbuf, sizeof(intrbuf));
   6458 	adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
   6459 	    adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
   6460 	    device_xname(dev));
   6461 	intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
   6462 	if (adapter->osdep.ihs[0] == NULL) {
   6463 		aprint_error_dev(dev,"unable to establish %s\n",
   6464 		    (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6465 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   6466 		adapter->osdep.intrs = NULL;
   6467 		switch (intr_type) {
   6468 		case PCI_INTR_TYPE_MSI:
   6469 			/* The next try is for INTx: Disable MSI */
   6470 			max_type = PCI_INTR_TYPE_INTX;
   6471 			counts[PCI_INTR_TYPE_INTX] = 1;
   6472 			adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   6473 			if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
   6474 				adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6475 				goto alloc_retry;
   6476 			} else
   6477 				break;
   6478 		case PCI_INTR_TYPE_INTX:
   6479 		default:
   6480 			/* See below */
   6481 			break;
   6482 		}
   6483 	}
   6484 	if (intr_type == PCI_INTR_TYPE_INTX) {
   6485 		adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   6486 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6487 	}
   6488 	if (adapter->osdep.ihs[0] == NULL) {
   6489 		aprint_error_dev(dev,
   6490 		    "couldn't establish interrupt%s%s\n",
   6491 		    intrstr ? " at " : "", intrstr ? intrstr : "");
   6492 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   6493 		adapter->osdep.intrs = NULL;
   6494 		return ENXIO;
   6495 	}
   6496 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   6497 	/*
   6498 	 * Try allocating a fast interrupt and the associated deferred
   6499 	 * processing contexts.
   6500 	 */
   6501 	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   6502 		txr->txr_si =
   6503 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6504 			ixgbe_deferred_mq_start, txr);
   6505 
   6506 		snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
   6507 		defertx_error = workqueue_create(&adapter->txr_wq, wqname,
   6508 		    ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI,
   6509 		    IPL_NET, IXGBE_WORKQUEUE_FLAGS);
   6510 		adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
   6511 	}
   6512 	que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6513 	    ixgbe_handle_que, que);
   6514 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
   6515 	error = workqueue_create(&adapter->que_wq, wqname,
   6516 	    ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   6517 	    IXGBE_WORKQUEUE_FLAGS);
   6518 
   6519 	if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
   6520 		&& ((txr->txr_si == NULL) || defertx_error != 0))
   6521 	    || (que->que_si == NULL) || error != 0) {
   6522 		aprint_error_dev(dev,
   6523 		    "could not establish software interrupts\n");
   6524 
   6525 		return ENXIO;
   6526 	}
   6527 	/* For simplicity in the handlers */
   6528 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
   6529 
   6530 	return (0);
   6531 } /* ixgbe_allocate_legacy */
   6532 
   6533 /************************************************************************
   6534  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
   6535  ************************************************************************/
   6536 static int
   6537 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   6538 {
   6539 	device_t	dev = adapter->dev;
   6540 	struct		ix_queue *que = adapter->queues;
   6541 	struct		tx_ring *txr = adapter->tx_rings;
   6542 	pci_chipset_tag_t pc;
   6543 	char		intrbuf[PCI_INTRSTR_LEN];
   6544 	char		intr_xname[32];
   6545 	char		wqname[MAXCOMLEN];
   6546 	const char	*intrstr = NULL;
   6547 	int		error, vector = 0;
   6548 	int		cpu_id = 0;
   6549 	kcpuset_t	*affinity;
   6550 #ifdef RSS
   6551 	unsigned int	rss_buckets = 0;
   6552 	kcpuset_t	cpu_mask;
   6553 #endif
   6554 
   6555 	pc = adapter->osdep.pc;
   6556 #ifdef	RSS
   6557 	/*
   6558 	 * If we're doing RSS, the number of queues needs to
   6559 	 * match the number of RSS buckets that are configured.
   6560 	 *
   6561 	 * + If there's more queues than RSS buckets, we'll end
   6562 	 *   up with queues that get no traffic.
   6563 	 *
   6564 	 * + If there's more RSS buckets than queues, we'll end
   6565 	 *   up having multiple RSS buckets map to the same queue,
   6566 	 *   so there'll be some contention.
   6567 	 */
   6568 	rss_buckets = rss_getnumbuckets();
   6569 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
   6570 	    (adapter->num_queues != rss_buckets)) {
   6571 		device_printf(dev,
   6572 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
   6573 		    "; performance will be impacted.\n",
   6574 		    __func__, adapter->num_queues, rss_buckets);
   6575 	}
   6576 #endif
   6577 
   6578 	adapter->osdep.nintrs = adapter->num_queues + 1;
   6579 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   6580 	    adapter->osdep.nintrs) != 0) {
   6581 		aprint_error_dev(dev,
   6582 		    "failed to allocate MSI-X interrupt\n");
   6583 		return (ENXIO);
   6584 	}
   6585 
   6586 	kcpuset_create(&affinity, false);
   6587 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   6588 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   6589 		    device_xname(dev), i);
   6590 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   6591 		    sizeof(intrbuf));
   6592 #ifdef IXGBE_MPSAFE
   6593 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   6594 		    true);
   6595 #endif
   6596 		/* Set the handler function */
   6597 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   6598 		    adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
   6599 		    intr_xname);
   6600 		if (que->res == NULL) {
   6601 			aprint_error_dev(dev,
   6602 			    "Failed to register QUE handler\n");
   6603 			error = ENXIO;
   6604 			goto err_out;
   6605 		}
   6606 		que->msix = vector;
   6607 		adapter->active_queues |= 1ULL << que->msix;
   6608 
   6609 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   6610 #ifdef	RSS
   6611 			/*
   6612 			 * The queue ID is used as the RSS layer bucket ID.
   6613 			 * We look up the queue ID -> RSS CPU ID and select
   6614 			 * that.
   6615 			 */
   6616 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
   6617 			CPU_SETOF(cpu_id, &cpu_mask);
   6618 #endif
   6619 		} else {
   6620 			/*
   6621 			 * Bind the MSI-X vector, and thus the
   6622 			 * rings to the corresponding CPU.
   6623 			 *
   6624 			 * This just happens to match the default RSS
   6625 			 * round-robin bucket -> queue -> CPU allocation.
   6626 			 */
   6627 			if (adapter->num_queues > 1)
   6628 				cpu_id = i;
   6629 		}
   6630 		/* Round-robin affinity */
   6631 		kcpuset_zero(affinity);
   6632 		kcpuset_set(affinity, cpu_id % ncpu);
   6633 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   6634 		    NULL);
   6635 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   6636 		    intrstr);
   6637 		if (error == 0) {
   6638 #if 1 /* def IXGBE_DEBUG */
   6639 #ifdef	RSS
   6640 			aprintf_normal(", bound RSS bucket %d to CPU %d", i,
   6641 			    cpu_id % ncpu);
   6642 #else
   6643 			aprint_normal(", bound queue %d to cpu %d", i,
   6644 			    cpu_id % ncpu);
   6645 #endif
   6646 #endif /* IXGBE_DEBUG */
   6647 		}
   6648 		aprint_normal("\n");
   6649 
   6650 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   6651 			txr->txr_si = softint_establish(
   6652 				SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6653 				ixgbe_deferred_mq_start, txr);
   6654 			if (txr->txr_si == NULL) {
   6655 				aprint_error_dev(dev,
   6656 				    "couldn't establish software interrupt\n");
   6657 				error = ENXIO;
   6658 				goto err_out;
   6659 			}
   6660 		}
   6661 		que->que_si
   6662 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6663 			ixgbe_handle_que, que);
   6664 		if (que->que_si == NULL) {
   6665 			aprint_error_dev(dev,
   6666 			    "couldn't establish software interrupt\n");
   6667 			error = ENXIO;
   6668 			goto err_out;
   6669 		}
   6670 	}
   6671 	snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
   6672 	error = workqueue_create(&adapter->txr_wq, wqname,
   6673 	    ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   6674 	    IXGBE_WORKQUEUE_FLAGS);
   6675 	if (error) {
   6676 		aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
   6677 		goto err_out;
   6678 	}
   6679 	adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
   6680 
   6681 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
   6682 	error = workqueue_create(&adapter->que_wq, wqname,
   6683 	    ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   6684 	    IXGBE_WORKQUEUE_FLAGS);
   6685 	if (error) {
   6686 		aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
   6687 		goto err_out;
   6688 	}
   6689 
   6690 	/* and Link */
   6691 	cpu_id++;
   6692 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   6693 	adapter->vector = vector;
   6694 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   6695 	    sizeof(intrbuf));
   6696 #ifdef IXGBE_MPSAFE
   6697 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   6698 	    true);
   6699 #endif
   6700 	/* Set the link handler function */
   6701 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   6702 	    adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
   6703 	    intr_xname);
   6704 	if (adapter->osdep.ihs[vector] == NULL) {
   6705 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   6706 		error = ENXIO;
   6707 		goto err_out;
   6708 	}
   6709 	/* Round-robin affinity */
   6710 	kcpuset_zero(affinity);
   6711 	kcpuset_set(affinity, cpu_id % ncpu);
   6712 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
   6713 	    NULL);
   6714 
   6715 	aprint_normal_dev(dev,
   6716 	    "for link, interrupting at %s", intrstr);
   6717 	if (error == 0)
   6718 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   6719 	else
   6720 		aprint_normal("\n");
   6721 
   6722 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   6723 		adapter->mbx_si =
   6724 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6725 			ixgbe_handle_mbx, adapter);
   6726 		if (adapter->mbx_si == NULL) {
   6727 			aprint_error_dev(dev,
   6728 			    "could not establish software interrupts\n");
   6729 
   6730 			error = ENXIO;
   6731 			goto err_out;
   6732 		}
   6733 	}
   6734 
   6735 	kcpuset_destroy(affinity);
   6736 	aprint_normal_dev(dev,
   6737 	    "Using MSI-X interrupts with %d vectors\n", vector + 1);
   6738 
   6739 	return (0);
   6740 
   6741 err_out:
   6742 	kcpuset_destroy(affinity);
   6743 	ixgbe_free_softint(adapter);
   6744 	ixgbe_free_pciintr_resources(adapter);
   6745 	return (error);
   6746 } /* ixgbe_allocate_msix */
   6747 
   6748 /************************************************************************
   6749  * ixgbe_configure_interrupts
   6750  *
   6751  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
   6752  *   This will also depend on user settings.
   6753  ************************************************************************/
   6754 static int
   6755 ixgbe_configure_interrupts(struct adapter *adapter)
   6756 {
   6757 	device_t dev = adapter->dev;
   6758 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
   6759 	int want, queues, msgs;
   6760 
   6761 	/* Default to 1 queue if MSI-X setup fails */
   6762 	adapter->num_queues = 1;
   6763 
   6764 	/* Override by tuneable */
   6765 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
   6766 		goto msi;
   6767 
   6768 	/*
   6769 	 *  NetBSD only: Use single vector MSI when number of CPU is 1 to save
   6770 	 * interrupt slot.
   6771 	 */
   6772 	if (ncpu == 1)
   6773 		goto msi;
   6774 
   6775 	/* First try MSI-X */
   6776 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   6777 	msgs = MIN(msgs, IXG_MAX_NINTR);
   6778 	if (msgs < 2)
   6779 		goto msi;
   6780 
   6781 	adapter->msix_mem = (void *)1; /* XXX */
   6782 
   6783 	/* Figure out a reasonable auto config value */
   6784 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   6785 
   6786 #ifdef	RSS
   6787 	/* If we're doing RSS, clamp at the number of RSS buckets */
   6788 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   6789 		queues = uimin(queues, rss_getnumbuckets());
   6790 #endif
   6791 	if (ixgbe_num_queues > queues) {
   6792 		aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
   6793 		ixgbe_num_queues = queues;
   6794 	}
   6795 
   6796 	if (ixgbe_num_queues != 0)
   6797 		queues = ixgbe_num_queues;
   6798 	else
   6799 		queues = uimin(queues,
   6800 		    uimin(mac->max_tx_queues, mac->max_rx_queues));
   6801 
   6802 	/* reflect correct sysctl value */
   6803 	ixgbe_num_queues = queues;
   6804 
   6805 	/*
   6806 	 * Want one vector (RX/TX pair) per queue
   6807 	 * plus an additional for Link.
   6808 	 */
   6809 	want = queues + 1;
   6810 	if (msgs >= want)
   6811 		msgs = want;
   6812 	else {
   6813 		aprint_error_dev(dev, "MSI-X Configuration Problem, "
   6814 		    "%d vectors but %d queues wanted!\n",
   6815 		    msgs, want);
   6816 		goto msi;
   6817 	}
   6818 	adapter->num_queues = queues;
   6819 	adapter->feat_en |= IXGBE_FEATURE_MSIX;
   6820 	return (0);
   6821 
   6822 	/*
   6823 	 * MSI-X allocation failed or provided us with
   6824 	 * less vectors than needed. Free MSI-X resources
   6825 	 * and we'll try enabling MSI.
   6826 	 */
   6827 msi:
   6828 	/* Without MSI-X, some features are no longer supported */
   6829 	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6830 	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
   6831 	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6832 	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
   6833 
   6834 	msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
   6835 	adapter->msix_mem = NULL; /* XXX */
   6836 	if (msgs > 1)
   6837 		msgs = 1;
   6838 	if (msgs != 0) {
   6839 		msgs = 1;
   6840 		adapter->feat_en |= IXGBE_FEATURE_MSI;
   6841 		return (0);
   6842 	}
   6843 
   6844 	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
   6845 		aprint_error_dev(dev,
   6846 		    "Device does not support legacy interrupts.\n");
   6847 		return 1;
   6848 	}
   6849 
   6850 	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6851 
   6852 	return (0);
   6853 } /* ixgbe_configure_interrupts */
   6854 
   6855 
   6856 /************************************************************************
   6857  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
   6858  *
   6859  *   Done outside of interrupt context since the driver might sleep
   6860  ************************************************************************/
   6861 static void
   6862 ixgbe_handle_link(void *context)
   6863 {
   6864 	struct adapter	*adapter = context;
   6865 	struct ixgbe_hw *hw = &adapter->hw;
   6866 
   6867 	IXGBE_CORE_LOCK(adapter);
   6868 	++adapter->link_sicount.ev_count;
   6869 	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
   6870 	ixgbe_update_link_status(adapter);
   6871 
   6872 	/* Re-enable link interrupts */
   6873 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
   6874 
   6875 	IXGBE_CORE_UNLOCK(adapter);
   6876 } /* ixgbe_handle_link */
   6877 
   6878 #if 0
   6879 /************************************************************************
   6880  * ixgbe_rearm_queues
   6881  ************************************************************************/
   6882 static __inline void
   6883 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   6884 {
   6885 	u32 mask;
   6886 
   6887 	switch (adapter->hw.mac.type) {
   6888 	case ixgbe_mac_82598EB:
   6889 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   6890 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   6891 		break;
   6892 	case ixgbe_mac_82599EB:
   6893 	case ixgbe_mac_X540:
   6894 	case ixgbe_mac_X550:
   6895 	case ixgbe_mac_X550EM_x:
   6896 	case ixgbe_mac_X550EM_a:
   6897 		mask = (queues & 0xFFFFFFFF);
   6898 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   6899 		mask = (queues >> 32);
   6900 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   6901 		break;
   6902 	default:
   6903 		break;
   6904 	}
   6905 } /* ixgbe_rearm_queues */
   6906 #endif
   6907