Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.212
      1 /* $NetBSD: ixgbe.c,v 1.212 2019/09/18 06:06:59 msaitoh Exp $ */
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
     36 
     37 /*
     38  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39  * All rights reserved.
     40  *
     41  * This code is derived from software contributed to The NetBSD Foundation
     42  * by Coyote Point Systems, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63  * POSSIBILITY OF SUCH DAMAGE.
     64  */
     65 
     66 #ifdef _KERNEL_OPT
     67 #include "opt_inet.h"
     68 #include "opt_inet6.h"
     69 #include "opt_net_mpsafe.h"
     70 #endif
     71 
     72 #include "ixgbe.h"
     73 #include "ixgbe_sriov.h"
     74 #include "vlan.h"
     75 
     76 #include <sys/cprng.h>
     77 #include <dev/mii/mii.h>
     78 #include <dev/mii/miivar.h>
     79 
     80 /************************************************************************
     81  * Driver version
     82  ************************************************************************/
     83 static const char ixgbe_driver_version[] = "4.0.1-k";
     84 /* XXX NetBSD: + 3.3.10 */
     85 
     86 /************************************************************************
     87  * PCI Device ID Table
     88  *
     89  *   Used by probe to select devices to load on
     90  *   Last field stores an index into ixgbe_strings
     91  *   Last entry must be all 0s
     92  *
     93  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     94  ************************************************************************/
     95 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     96 {
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    111 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    112 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    113 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
    114 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    115 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    116 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    117 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    118 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    119 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
    120 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    121 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
    122 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
    123 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
    124 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
    125 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
    126 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
    127 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
    128 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
    129 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
    130 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
    131 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
    132 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
    133 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
    134 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
    135 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
    136 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
    137 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
    138 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
    139 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
    140 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
    141 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
    142 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
    143 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
    144 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
    145 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
    146 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
    147 	/* required last entry */
    148 	{0, 0, 0, 0, 0}
    149 };
    150 
    151 /************************************************************************
    152  * Table of branding strings
    153  ************************************************************************/
    154 static const char    *ixgbe_strings[] = {
    155 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    156 };
    157 
    158 /************************************************************************
    159  * Function prototypes
    160  ************************************************************************/
    161 static int	ixgbe_probe(device_t, cfdata_t, void *);
    162 static void	ixgbe_attach(device_t, device_t, void *);
    163 static int	ixgbe_detach(device_t, int);
    164 #if 0
    165 static int	ixgbe_shutdown(device_t);
    166 #endif
    167 static bool	ixgbe_suspend(device_t, const pmf_qual_t *);
    168 static bool	ixgbe_resume(device_t, const pmf_qual_t *);
    169 static int	ixgbe_ifflags_cb(struct ethercom *);
    170 static int	ixgbe_ioctl(struct ifnet *, u_long, void *);
    171 static void	ixgbe_ifstop(struct ifnet *, int);
    172 static int	ixgbe_init(struct ifnet *);
    173 static void	ixgbe_init_locked(struct adapter *);
    174 static void	ixgbe_stop(void *);
    175 static void	ixgbe_init_device_features(struct adapter *);
    176 static void	ixgbe_check_fan_failure(struct adapter *, u32, bool);
    177 static void	ixgbe_add_media_types(struct adapter *);
    178 static void	ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    179 static int	ixgbe_media_change(struct ifnet *);
    180 static int	ixgbe_allocate_pci_resources(struct adapter *,
    181 		    const struct pci_attach_args *);
    182 static void	ixgbe_free_softint(struct adapter *);
    183 static void	ixgbe_get_slot_info(struct adapter *);
    184 static int	ixgbe_allocate_msix(struct adapter *,
    185 		    const struct pci_attach_args *);
    186 static int	ixgbe_allocate_legacy(struct adapter *,
    187 		    const struct pci_attach_args *);
    188 static int	ixgbe_configure_interrupts(struct adapter *);
    189 static void	ixgbe_free_pciintr_resources(struct adapter *);
    190 static void	ixgbe_free_pci_resources(struct adapter *);
    191 static void	ixgbe_local_timer(void *);
    192 static void	ixgbe_local_timer1(void *);
    193 static void	ixgbe_recovery_mode_timer(void *);
    194 static int	ixgbe_setup_interface(device_t, struct adapter *);
    195 static void	ixgbe_config_gpie(struct adapter *);
    196 static void	ixgbe_config_dmac(struct adapter *);
    197 static void	ixgbe_config_delay_values(struct adapter *);
    198 static void	ixgbe_config_link(struct adapter *);
    199 static void	ixgbe_check_wol_support(struct adapter *);
    200 static int	ixgbe_setup_low_power_mode(struct adapter *);
    201 #if 0
    202 static void	ixgbe_rearm_queues(struct adapter *, u64);
    203 #endif
    204 
    205 static void	ixgbe_initialize_transmit_units(struct adapter *);
    206 static void	ixgbe_initialize_receive_units(struct adapter *);
    207 static void	ixgbe_enable_rx_drop(struct adapter *);
    208 static void	ixgbe_disable_rx_drop(struct adapter *);
    209 static void	ixgbe_initialize_rss_mapping(struct adapter *);
    210 
    211 static void	ixgbe_enable_intr(struct adapter *);
    212 static void	ixgbe_disable_intr(struct adapter *);
    213 static void	ixgbe_update_stats_counters(struct adapter *);
    214 static void	ixgbe_set_multi(struct adapter *);
    215 static void	ixgbe_update_link_status(struct adapter *);
    216 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    217 static void	ixgbe_configure_ivars(struct adapter *);
    218 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    219 static void	ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
    220 
    221 static void	ixgbe_setup_vlan_hw_tagging(struct adapter *);
    222 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    223 static int	ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
    224 static int	ixgbe_register_vlan(struct adapter *, u16);
    225 static int	ixgbe_unregister_vlan(struct adapter *, u16);
    226 
    227 static void	ixgbe_add_device_sysctls(struct adapter *);
    228 static void	ixgbe_add_hw_stats(struct adapter *);
    229 static void	ixgbe_clear_evcnt(struct adapter *);
    230 static int	ixgbe_set_flowcntl(struct adapter *, int);
    231 static int	ixgbe_set_advertise(struct adapter *, int);
    232 static int	ixgbe_get_advertise(struct adapter *);
    233 
    234 /* Sysctl handlers */
    235 static void	ixgbe_set_sysctl_value(struct adapter *, const char *,
    236 		     const char *, int *, int);
    237 static int	ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
    238 static int	ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
    239 static int	ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    240 static int	ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
    241 static int	ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
    242 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
    243 #ifdef IXGBE_DEBUG
    244 static int	ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
    245 static int	ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
    246 #endif
    247 static int	ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
    248 static int	ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
    249 static int	ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
    250 static int	ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
    251 static int	ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
    252 static int	ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
    253 static int	ixgbe_sysctl_debug(SYSCTLFN_PROTO);
    254 static int	ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
    255 static int	ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
    256 
    257 /* Support for pluggable optic modules */
    258 static bool	ixgbe_sfp_probe(struct adapter *);
    259 
    260 /* Legacy (single vector) interrupt handler */
    261 static int	ixgbe_legacy_irq(void *);
    262 
    263 /* The MSI/MSI-X Interrupt handlers */
    264 static int	ixgbe_msix_que(void *);
    265 static int	ixgbe_msix_link(void *);
    266 
    267 /* Software interrupts for deferred work */
    268 static void	ixgbe_handle_que(void *);
    269 static void	ixgbe_handle_link(void *);
    270 static void	ixgbe_handle_msf(void *);
    271 static void	ixgbe_handle_mod(void *);
    272 static void	ixgbe_handle_phy(void *);
    273 
    274 /* Workqueue handler for deferred work */
    275 static void	ixgbe_handle_que_work(struct work *, void *);
    276 
    277 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    278 
    279 /************************************************************************
    280  *  NetBSD Device Interface Entry Points
    281  ************************************************************************/
    282 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    283     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    284     DVF_DETACH_SHUTDOWN);
    285 
    286 #if 0
    287 devclass_t ix_devclass;
    288 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
    289 
    290 MODULE_DEPEND(ix, pci, 1, 1, 1);
    291 MODULE_DEPEND(ix, ether, 1, 1, 1);
    292 #ifdef DEV_NETMAP
    293 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    294 #endif
    295 #endif
    296 
    297 /*
    298  * TUNEABLE PARAMETERS:
    299  */
    300 
    301 /*
    302  * AIM: Adaptive Interrupt Moderation
    303  * which means that the interrupt rate
    304  * is varied over time based on the
    305  * traffic for that interrupt vector
    306  */
    307 static bool ixgbe_enable_aim = true;
    308 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
    309 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
    310     "Enable adaptive interrupt moderation");
    311 
    312 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    313 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
    314     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
    315 
    316 /* How many packets rxeof tries to clean at a time */
    317 static int ixgbe_rx_process_limit = 256;
    318 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
    319     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
    320 
    321 /* How many packets txeof tries to clean at a time */
    322 static int ixgbe_tx_process_limit = 256;
    323 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
    324     &ixgbe_tx_process_limit, 0,
    325     "Maximum number of sent packets to process at a time, -1 means unlimited");
    326 
    327 /* Flow control setting, default to full */
    328 static int ixgbe_flow_control = ixgbe_fc_full;
    329 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
    330     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
    331 
    332 /* Which packet processing uses workqueue or softint */
    333 static bool ixgbe_txrx_workqueue = false;
    334 
    335 /*
    336  * Smart speed setting, default to on
    337  * this only works as a compile option
    338  * right now as its during attach, set
    339  * this to 'ixgbe_smart_speed_off' to
    340  * disable.
    341  */
    342 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    343 
    344 /*
    345  * MSI-X should be the default for best performance,
    346  * but this allows it to be forced off for testing.
    347  */
    348 static int ixgbe_enable_msix = 1;
    349 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
    350     "Enable MSI-X interrupts");
    351 
    352 /*
    353  * Number of Queues, can be set to 0,
    354  * it then autoconfigures based on the
    355  * number of cpus with a max of 8. This
    356  * can be overriden manually here.
    357  */
    358 static int ixgbe_num_queues = 0;
    359 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
    360     "Number of queues to configure, 0 indicates autoconfigure");
    361 
    362 /*
    363  * Number of TX descriptors per ring,
    364  * setting higher than RX as this seems
    365  * the better performing choice.
    366  */
    367 static int ixgbe_txd = PERFORM_TXD;
    368 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
    369     "Number of transmit descriptors per queue");
    370 
    371 /* Number of RX descriptors per ring */
    372 static int ixgbe_rxd = PERFORM_RXD;
    373 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
    374     "Number of receive descriptors per queue");
    375 
    376 /*
    377  * Defining this on will allow the use
    378  * of unsupported SFP+ modules, note that
    379  * doing so you are on your own :)
    380  */
    381 static int allow_unsupported_sfp = false;
    382 #define TUNABLE_INT(__x, __y)
    383 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
    384 
    385 /*
    386  * Not sure if Flow Director is fully baked,
    387  * so we'll default to turning it off.
    388  */
    389 static int ixgbe_enable_fdir = 0;
    390 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
    391     "Enable Flow Director");
    392 
    393 /* Legacy Transmit (single queue) */
    394 static int ixgbe_enable_legacy_tx = 0;
    395 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
    396     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
    397 
    398 /* Receive-Side Scaling */
    399 static int ixgbe_enable_rss = 1;
    400 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
    401     "Enable Receive-Side Scaling (RSS)");
    402 
    403 #if 0
    404 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
    405 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
    406 #endif
    407 
    408 #ifdef NET_MPSAFE
    409 #define IXGBE_MPSAFE		1
    410 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    411 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    412 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    413 #else
    414 #define IXGBE_CALLOUT_FLAGS	0
    415 #define IXGBE_SOFTINFT_FLAGS	0
    416 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU
    417 #endif
    418 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
    419 
    420 /************************************************************************
    421  * ixgbe_initialize_rss_mapping
    422  ************************************************************************/
    423 static void
    424 ixgbe_initialize_rss_mapping(struct adapter *adapter)
    425 {
    426 	struct ixgbe_hw	*hw = &adapter->hw;
    427 	u32		reta = 0, mrqc, rss_key[10];
    428 	int		queue_id, table_size, index_mult;
    429 	int		i, j;
    430 	u32		rss_hash_config;
    431 
    432 	/* force use default RSS key. */
    433 #ifdef __NetBSD__
    434 	rss_getkey((uint8_t *) &rss_key);
    435 #else
    436 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    437 		/* Fetch the configured RSS key */
    438 		rss_getkey((uint8_t *) &rss_key);
    439 	} else {
    440 		/* set up random bits */
    441 		cprng_fast(&rss_key, sizeof(rss_key));
    442 	}
    443 #endif
    444 
    445 	/* Set multiplier for RETA setup and table size based on MAC */
    446 	index_mult = 0x1;
    447 	table_size = 128;
    448 	switch (adapter->hw.mac.type) {
    449 	case ixgbe_mac_82598EB:
    450 		index_mult = 0x11;
    451 		break;
    452 	case ixgbe_mac_X550:
    453 	case ixgbe_mac_X550EM_x:
    454 	case ixgbe_mac_X550EM_a:
    455 		table_size = 512;
    456 		break;
    457 	default:
    458 		break;
    459 	}
    460 
    461 	/* Set up the redirection table */
    462 	for (i = 0, j = 0; i < table_size; i++, j++) {
    463 		if (j == adapter->num_queues)
    464 			j = 0;
    465 
    466 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    467 			/*
    468 			 * Fetch the RSS bucket id for the given indirection
    469 			 * entry. Cap it at the number of configured buckets
    470 			 * (which is num_queues.)
    471 			 */
    472 			queue_id = rss_get_indirection_to_bucket(i);
    473 			queue_id = queue_id % adapter->num_queues;
    474 		} else
    475 			queue_id = (j * index_mult);
    476 
    477 		/*
    478 		 * The low 8 bits are for hash value (n+0);
    479 		 * The next 8 bits are for hash value (n+1), etc.
    480 		 */
    481 		reta = reta >> 8;
    482 		reta = reta | (((uint32_t) queue_id) << 24);
    483 		if ((i & 3) == 3) {
    484 			if (i < 128)
    485 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
    486 			else
    487 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
    488 				    reta);
    489 			reta = 0;
    490 		}
    491 	}
    492 
    493 	/* Now fill our hash function seeds */
    494 	for (i = 0; i < 10; i++)
    495 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
    496 
    497 	/* Perform hash on these packet types */
    498 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
    499 		rss_hash_config = rss_gethashconfig();
    500 	else {
    501 		/*
    502 		 * Disable UDP - IP fragments aren't currently being handled
    503 		 * and so we end up with a mix of 2-tuple and 4-tuple
    504 		 * traffic.
    505 		 */
    506 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
    507 				| RSS_HASHTYPE_RSS_TCP_IPV4
    508 				| RSS_HASHTYPE_RSS_IPV6
    509 				| RSS_HASHTYPE_RSS_TCP_IPV6
    510 				| RSS_HASHTYPE_RSS_IPV6_EX
    511 				| RSS_HASHTYPE_RSS_TCP_IPV6_EX;
    512 	}
    513 
    514 	mrqc = IXGBE_MRQC_RSSEN;
    515 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
    516 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
    517 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
    518 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
    519 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
    520 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
    521 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
    522 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
    523 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
    524 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
    525 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
    526 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
    527 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
    528 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
    529 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
    530 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
    531 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
    532 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
    533 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
    534 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    535 } /* ixgbe_initialize_rss_mapping */
    536 
    537 /************************************************************************
    538  * ixgbe_initialize_receive_units - Setup receive registers and features.
    539  ************************************************************************/
    540 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
    541 
    542 static void
    543 ixgbe_initialize_receive_units(struct adapter *adapter)
    544 {
    545 	struct	rx_ring	*rxr = adapter->rx_rings;
    546 	struct ixgbe_hw	*hw = &adapter->hw;
    547 	struct ifnet	*ifp = adapter->ifp;
    548 	int		i, j;
    549 	u32		bufsz, fctrl, srrctl, rxcsum;
    550 	u32		hlreg;
    551 
    552 	/*
    553 	 * Make sure receives are disabled while
    554 	 * setting up the descriptor ring
    555 	 */
    556 	ixgbe_disable_rx(hw);
    557 
    558 	/* Enable broadcasts */
    559 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
    560 	fctrl |= IXGBE_FCTRL_BAM;
    561 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
    562 		fctrl |= IXGBE_FCTRL_DPF;
    563 		fctrl |= IXGBE_FCTRL_PMCF;
    564 	}
    565 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
    566 
    567 	/* Set for Jumbo Frames? */
    568 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
    569 	if (ifp->if_mtu > ETHERMTU)
    570 		hlreg |= IXGBE_HLREG0_JUMBOEN;
    571 	else
    572 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
    573 
    574 #ifdef DEV_NETMAP
    575 	/* CRC stripping is conditional in Netmap */
    576 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
    577 	    (ifp->if_capenable & IFCAP_NETMAP) &&
    578 	    !ix_crcstrip)
    579 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
    580 	else
    581 #endif /* DEV_NETMAP */
    582 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
    583 
    584 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
    585 
    586 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
    587 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
    588 
    589 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
    590 		u64 rdba = rxr->rxdma.dma_paddr;
    591 		u32 reg;
    592 		int regnum = i / 4;	/* 1 register per 4 queues */
    593 		int regshift = i % 4;	/* 4 bits per 1 queue */
    594 		j = rxr->me;
    595 
    596 		/* Setup the Base and Length of the Rx Descriptor Ring */
    597 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
    598 		    (rdba & 0x00000000ffffffffULL));
    599 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
    600 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
    601 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
    602 
    603 		/* Set up the SRRCTL register */
    604 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
    605 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
    606 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
    607 		srrctl |= bufsz;
    608 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
    609 
    610 		/* Set RQSMR (Receive Queue Statistic Mapping) register */
    611 		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
    612 		reg &= ~(0x000000ffUL << (regshift * 8));
    613 		reg |= i << (regshift * 8);
    614 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
    615 
    616 		/*
    617 		 * Set DROP_EN iff we have no flow control and >1 queue.
    618 		 * Note that srrctl was cleared shortly before during reset,
    619 		 * so we do not need to clear the bit, but do it just in case
    620 		 * this code is moved elsewhere.
    621 		 */
    622 		if (adapter->num_queues > 1 &&
    623 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
    624 			srrctl |= IXGBE_SRRCTL_DROP_EN;
    625 		} else {
    626 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
    627 		}
    628 
    629 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
    630 
    631 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
    632 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
    633 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
    634 
    635 		/* Set the driver rx tail address */
    636 		rxr->tail =  IXGBE_RDT(rxr->me);
    637 	}
    638 
    639 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    640 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
    641 			    | IXGBE_PSRTYPE_UDPHDR
    642 			    | IXGBE_PSRTYPE_IPV4HDR
    643 			    | IXGBE_PSRTYPE_IPV6HDR;
    644 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
    645 	}
    646 
    647 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
    648 
    649 	ixgbe_initialize_rss_mapping(adapter);
    650 
    651 	if (adapter->num_queues > 1) {
    652 		/* RSS and RX IPP Checksum are mutually exclusive */
    653 		rxcsum |= IXGBE_RXCSUM_PCSD;
    654 	}
    655 
    656 	if (ifp->if_capenable & IFCAP_RXCSUM)
    657 		rxcsum |= IXGBE_RXCSUM_PCSD;
    658 
    659 	/* This is useful for calculating UDP/IP fragment checksums */
    660 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
    661 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
    662 
    663 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
    664 
    665 } /* ixgbe_initialize_receive_units */
    666 
    667 /************************************************************************
    668  * ixgbe_initialize_transmit_units - Enable transmit units.
    669  ************************************************************************/
    670 static void
    671 ixgbe_initialize_transmit_units(struct adapter *adapter)
    672 {
    673 	struct tx_ring	*txr = adapter->tx_rings;
    674 	struct ixgbe_hw	*hw = &adapter->hw;
    675 	int i;
    676 
    677 	/* Setup the Base and Length of the Tx Descriptor Ring */
    678 	for (i = 0; i < adapter->num_queues; i++, txr++) {
    679 		u64 tdba = txr->txdma.dma_paddr;
    680 		u32 txctrl = 0;
    681 		u32 tqsmreg, reg;
    682 		int regnum = i / 4;	/* 1 register per 4 queues */
    683 		int regshift = i % 4;	/* 4 bits per 1 queue */
    684 		int j = txr->me;
    685 
    686 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
    687 		    (tdba & 0x00000000ffffffffULL));
    688 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
    689 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
    690 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
    691 
    692 		/*
    693 		 * Set TQSMR (Transmit Queue Statistic Mapping) register.
    694 		 * Register location is different between 82598 and others.
    695 		 */
    696 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
    697 			tqsmreg = IXGBE_TQSMR(regnum);
    698 		else
    699 			tqsmreg = IXGBE_TQSM(regnum);
    700 		reg = IXGBE_READ_REG(hw, tqsmreg);
    701 		reg &= ~(0x000000ffUL << (regshift * 8));
    702 		reg |= i << (regshift * 8);
    703 		IXGBE_WRITE_REG(hw, tqsmreg, reg);
    704 
    705 		/* Setup the HW Tx Head and Tail descriptor pointers */
    706 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
    707 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
    708 
    709 		/* Cache the tail address */
    710 		txr->tail = IXGBE_TDT(j);
    711 
    712 		txr->txr_no_space = false;
    713 
    714 		/* Disable Head Writeback */
    715 		/*
    716 		 * Note: for X550 series devices, these registers are actually
    717 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
    718 		 * fields remain the same.
    719 		 */
    720 		switch (hw->mac.type) {
    721 		case ixgbe_mac_82598EB:
    722 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
    723 			break;
    724 		default:
    725 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
    726 			break;
    727 		}
    728 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
    729 		switch (hw->mac.type) {
    730 		case ixgbe_mac_82598EB:
    731 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
    732 			break;
    733 		default:
    734 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
    735 			break;
    736 		}
    737 
    738 	}
    739 
    740 	if (hw->mac.type != ixgbe_mac_82598EB) {
    741 		u32 dmatxctl, rttdcs;
    742 
    743 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
    744 		dmatxctl |= IXGBE_DMATXCTL_TE;
    745 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
    746 		/* Disable arbiter to set MTQC */
    747 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
    748 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
    749 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    750 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
    751 		    ixgbe_get_mtqc(adapter->iov_mode));
    752 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
    753 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    754 	}
    755 
    756 	return;
    757 } /* ixgbe_initialize_transmit_units */
    758 
    759 /************************************************************************
    760  * ixgbe_attach - Device initialization routine
    761  *
    762  *   Called when the driver is being loaded.
    763  *   Identifies the type of hardware, allocates all resources
    764  *   and initializes the hardware.
    765  *
    766  *   return 0 on success, positive on failure
    767  ************************************************************************/
    768 static void
    769 ixgbe_attach(device_t parent, device_t dev, void *aux)
    770 {
    771 	struct adapter	*adapter;
    772 	struct ixgbe_hw *hw;
    773 	int		error = -1;
    774 	u32		ctrl_ext;
    775 	u16		high, low, nvmreg;
    776 	pcireg_t	id, subid;
    777 	const ixgbe_vendor_info_t *ent;
    778 	struct pci_attach_args *pa = aux;
    779 	const char *str;
    780 	char buf[256];
    781 
    782 	INIT_DEBUGOUT("ixgbe_attach: begin");
    783 
    784 	/* Allocate, clear, and link in our adapter structure */
    785 	adapter = device_private(dev);
    786 	adapter->hw.back = adapter;
    787 	adapter->dev = dev;
    788 	hw = &adapter->hw;
    789 	adapter->osdep.pc = pa->pa_pc;
    790 	adapter->osdep.tag = pa->pa_tag;
    791 	if (pci_dma64_available(pa))
    792 		adapter->osdep.dmat = pa->pa_dmat64;
    793 	else
    794 		adapter->osdep.dmat = pa->pa_dmat;
    795 	adapter->osdep.attached = false;
    796 
    797 	ent = ixgbe_lookup(pa);
    798 
    799 	KASSERT(ent != NULL);
    800 
    801 	aprint_normal(": %s, Version - %s\n",
    802 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    803 
    804 	/* Core Lock Init*/
    805 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    806 
    807 	/* Set up the timer callout */
    808 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    809 
    810 	/* Determine hardware revision */
    811 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    812 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    813 
    814 	hw->vendor_id = PCI_VENDOR(id);
    815 	hw->device_id = PCI_PRODUCT(id);
    816 	hw->revision_id =
    817 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    818 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    819 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    820 
    821 	/*
    822 	 * Make sure BUSMASTER is set
    823 	 */
    824 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    825 
    826 	/* Do base PCI setup - map BAR0 */
    827 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    828 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    829 		error = ENXIO;
    830 		goto err_out;
    831 	}
    832 
    833 	/* let hardware know driver is loaded */
    834 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    835 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    836 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    837 
    838 	/*
    839 	 * Initialize the shared code
    840 	 */
    841 	if (ixgbe_init_shared_code(hw) != 0) {
    842 		aprint_error_dev(dev, "Unable to initialize the shared code\n");
    843 		error = ENXIO;
    844 		goto err_out;
    845 	}
    846 
    847 	switch (hw->mac.type) {
    848 	case ixgbe_mac_82598EB:
    849 		str = "82598EB";
    850 		break;
    851 	case ixgbe_mac_82599EB:
    852 		str = "82599EB";
    853 		break;
    854 	case ixgbe_mac_X540:
    855 		str = "X540";
    856 		break;
    857 	case ixgbe_mac_X550:
    858 		str = "X550";
    859 		break;
    860 	case ixgbe_mac_X550EM_x:
    861 		str = "X550EM";
    862 		break;
    863 	case ixgbe_mac_X550EM_a:
    864 		str = "X550EM A";
    865 		break;
    866 	default:
    867 		str = "Unknown";
    868 		break;
    869 	}
    870 	aprint_normal_dev(dev, "device %s\n", str);
    871 
    872 	if (hw->mbx.ops.init_params)
    873 		hw->mbx.ops.init_params(hw);
    874 
    875 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
    876 
    877 	/* Pick up the 82599 settings */
    878 	if (hw->mac.type != ixgbe_mac_82598EB) {
    879 		hw->phy.smart_speed = ixgbe_smart_speed;
    880 		adapter->num_segs = IXGBE_82599_SCATTER;
    881 	} else
    882 		adapter->num_segs = IXGBE_82598_SCATTER;
    883 
    884 	/* Ensure SW/FW semaphore is free */
    885 	ixgbe_init_swfw_semaphore(hw);
    886 
    887 	hw->mac.ops.set_lan_id(hw);
    888 	ixgbe_init_device_features(adapter);
    889 
    890 	if (ixgbe_configure_interrupts(adapter)) {
    891 		error = ENXIO;
    892 		goto err_out;
    893 	}
    894 
    895 	/* Allocate multicast array memory. */
    896 	adapter->mta = malloc(sizeof(*adapter->mta) *
    897 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    898 	if (adapter->mta == NULL) {
    899 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    900 		error = ENOMEM;
    901 		goto err_out;
    902 	}
    903 
    904 	/* Enable WoL (if supported) */
    905 	ixgbe_check_wol_support(adapter);
    906 
    907 	/* Register for VLAN events */
    908 	ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
    909 
    910 	/* Verify adapter fan is still functional (if applicable) */
    911 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
    912 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    913 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
    914 	}
    915 
    916 	/* Set an initial default flow control value */
    917 	hw->fc.requested_mode = ixgbe_flow_control;
    918 
    919 	/* Sysctls for limiting the amount of work done in the taskqueues */
    920 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
    921 	    "max number of rx packets to process",
    922 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
    923 
    924 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
    925 	    "max number of tx packets to process",
    926 	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
    927 
    928 	/* Do descriptor calc and sanity checks */
    929 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    930 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    931 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    932 		adapter->num_tx_desc = DEFAULT_TXD;
    933 	} else
    934 		adapter->num_tx_desc = ixgbe_txd;
    935 
    936 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    937 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
    938 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    939 		adapter->num_rx_desc = DEFAULT_RXD;
    940 	} else
    941 		adapter->num_rx_desc = ixgbe_rxd;
    942 
    943 	/* Allocate our TX/RX Queues */
    944 	if (ixgbe_allocate_queues(adapter)) {
    945 		error = ENOMEM;
    946 		goto err_out;
    947 	}
    948 
    949 	hw->phy.reset_if_overtemp = TRUE;
    950 	error = ixgbe_reset_hw(hw);
    951 	hw->phy.reset_if_overtemp = FALSE;
    952 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    953 		/*
    954 		 * No optics in this port, set up
    955 		 * so the timer routine will probe
    956 		 * for later insertion.
    957 		 */
    958 		adapter->sfp_probe = TRUE;
    959 		error = IXGBE_SUCCESS;
    960 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    961 		aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
    962 		error = EIO;
    963 		goto err_late;
    964 	} else if (error) {
    965 		aprint_error_dev(dev, "Hardware initialization failed\n");
    966 		error = EIO;
    967 		goto err_late;
    968 	}
    969 
    970 	/* Make sure we have a good EEPROM before we read from it */
    971 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
    972 		aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
    973 		error = EIO;
    974 		goto err_late;
    975 	}
    976 
    977 	aprint_normal("%s:", device_xname(dev));
    978 	/* NVM Image Version */
    979 	high = low = 0;
    980 	switch (hw->mac.type) {
    981 	case ixgbe_mac_X540:
    982 	case ixgbe_mac_X550EM_a:
    983 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    984 		if (nvmreg == 0xffff)
    985 			break;
    986 		high = (nvmreg >> 12) & 0x0f;
    987 		low = (nvmreg >> 4) & 0xff;
    988 		id = nvmreg & 0x0f;
    989 		aprint_normal(" NVM Image Version %u.", high);
    990 		if (hw->mac.type == ixgbe_mac_X540)
    991 			str = "%x";
    992 		else
    993 			str = "%02x";
    994 		aprint_normal(str, low);
    995 		aprint_normal(" ID 0x%x,", id);
    996 		break;
    997 	case ixgbe_mac_X550EM_x:
    998 	case ixgbe_mac_X550:
    999 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
   1000 		if (nvmreg == 0xffff)
   1001 			break;
   1002 		high = (nvmreg >> 12) & 0x0f;
   1003 		low = nvmreg & 0xff;
   1004 		aprint_normal(" NVM Image Version %u.%02x,", high, low);
   1005 		break;
   1006 	default:
   1007 		break;
   1008 	}
   1009 	hw->eeprom.nvm_image_ver_high = high;
   1010 	hw->eeprom.nvm_image_ver_low = low;
   1011 
   1012 	/* PHY firmware revision */
   1013 	switch (hw->mac.type) {
   1014 	case ixgbe_mac_X540:
   1015 	case ixgbe_mac_X550:
   1016 		hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
   1017 		if (nvmreg == 0xffff)
   1018 			break;
   1019 		high = (nvmreg >> 12) & 0x0f;
   1020 		low = (nvmreg >> 4) & 0xff;
   1021 		id = nvmreg & 0x000f;
   1022 		aprint_normal(" PHY FW Revision %u.", high);
   1023 		if (hw->mac.type == ixgbe_mac_X540)
   1024 			str = "%x";
   1025 		else
   1026 			str = "%02x";
   1027 		aprint_normal(str, low);
   1028 		aprint_normal(" ID 0x%x,", id);
   1029 		break;
   1030 	default:
   1031 		break;
   1032 	}
   1033 
   1034 	/* NVM Map version & OEM NVM Image version */
   1035 	switch (hw->mac.type) {
   1036 	case ixgbe_mac_X550:
   1037 	case ixgbe_mac_X550EM_x:
   1038 	case ixgbe_mac_X550EM_a:
   1039 		hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
   1040 		if (nvmreg != 0xffff) {
   1041 			high = (nvmreg >> 12) & 0x0f;
   1042 			low = nvmreg & 0x00ff;
   1043 			aprint_normal(" NVM Map version %u.%02x,", high, low);
   1044 		}
   1045 		hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
   1046 		if (nvmreg != 0xffff) {
   1047 			high = (nvmreg >> 12) & 0x0f;
   1048 			low = nvmreg & 0x00ff;
   1049 			aprint_verbose(" OEM NVM Image version %u.%02x,", high,
   1050 			    low);
   1051 		}
   1052 		break;
   1053 	default:
   1054 		break;
   1055 	}
   1056 
   1057 	/* Print the ETrackID */
   1058 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
   1059 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
   1060 	aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
   1061 
   1062 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   1063 		error = ixgbe_allocate_msix(adapter, pa);
   1064 		if (error) {
   1065 			/* Free allocated queue structures first */
   1066 			ixgbe_free_transmit_structures(adapter);
   1067 			ixgbe_free_receive_structures(adapter);
   1068 			free(adapter->queues, M_DEVBUF);
   1069 
   1070 			/* Fallback to legacy interrupt */
   1071 			adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
   1072 			if (adapter->feat_cap & IXGBE_FEATURE_MSI)
   1073 				adapter->feat_en |= IXGBE_FEATURE_MSI;
   1074 			adapter->num_queues = 1;
   1075 
   1076 			/* Allocate our TX/RX Queues again */
   1077 			if (ixgbe_allocate_queues(adapter)) {
   1078 				error = ENOMEM;
   1079 				goto err_out;
   1080 			}
   1081 		}
   1082 	}
   1083 	/* Recovery mode */
   1084 	switch (adapter->hw.mac.type) {
   1085 	case ixgbe_mac_X550:
   1086 	case ixgbe_mac_X550EM_x:
   1087 	case ixgbe_mac_X550EM_a:
   1088 		/* >= 2.00 */
   1089 		if (hw->eeprom.nvm_image_ver_high >= 2) {
   1090 			adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
   1091 			adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
   1092 		}
   1093 		break;
   1094 	default:
   1095 		break;
   1096 	}
   1097 
   1098 	if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
   1099 		error = ixgbe_allocate_legacy(adapter, pa);
   1100 	if (error)
   1101 		goto err_late;
   1102 
   1103 	/* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
   1104 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   1105 	    ixgbe_handle_link, adapter);
   1106 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1107 	    ixgbe_handle_mod, adapter);
   1108 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1109 	    ixgbe_handle_msf, adapter);
   1110 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1111 	    ixgbe_handle_phy, adapter);
   1112 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   1113 		adapter->fdir_si =
   1114 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1115 			ixgbe_reinit_fdir, adapter);
   1116 	if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
   1117 	    || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
   1118 	    || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
   1119 		&& (adapter->fdir_si == NULL))) {
   1120 		aprint_error_dev(dev,
   1121 		    "could not establish software interrupts ()\n");
   1122 		goto err_out;
   1123 	}
   1124 
   1125 	error = ixgbe_start_hw(hw);
   1126 	switch (error) {
   1127 	case IXGBE_ERR_EEPROM_VERSION:
   1128 		aprint_error_dev(dev, "This device is a pre-production adapter/"
   1129 		    "LOM.  Please be aware there may be issues associated "
   1130 		    "with your hardware.\nIf you are experiencing problems "
   1131 		    "please contact your Intel or hardware representative "
   1132 		    "who provided you with this hardware.\n");
   1133 		break;
   1134 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
   1135 		aprint_error_dev(dev, "Unsupported SFP+ Module\n");
   1136 		error = EIO;
   1137 		goto err_late;
   1138 	case IXGBE_ERR_SFP_NOT_PRESENT:
   1139 		aprint_error_dev(dev, "No SFP+ Module found\n");
   1140 		/* falls thru */
   1141 	default:
   1142 		break;
   1143 	}
   1144 
   1145 	/* Setup OS specific network interface */
   1146 	if (ixgbe_setup_interface(dev, adapter) != 0)
   1147 		goto err_late;
   1148 
   1149 	/*
   1150 	 *  Print PHY ID only for copper PHY. On device which has SFP(+) cage
   1151 	 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
   1152 	 */
   1153 	if (hw->phy.media_type == ixgbe_media_type_copper) {
   1154 		uint16_t id1, id2;
   1155 		int oui, model, rev;
   1156 		const char *descr;
   1157 
   1158 		id1 = hw->phy.id >> 16;
   1159 		id2 = hw->phy.id & 0xffff;
   1160 		oui = MII_OUI(id1, id2);
   1161 		model = MII_MODEL(id2);
   1162 		rev = MII_REV(id2);
   1163 		if ((descr = mii_get_descr(oui, model)) != NULL)
   1164 			aprint_normal_dev(dev,
   1165 			    "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
   1166 			    descr, oui, model, rev);
   1167 		else
   1168 			aprint_normal_dev(dev,
   1169 			    "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
   1170 			    oui, model, rev);
   1171 	}
   1172 
   1173 	/* Enable the optics for 82599 SFP+ fiber */
   1174 	ixgbe_enable_tx_laser(hw);
   1175 
   1176 	/* Enable EEE power saving */
   1177 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   1178 		hw->mac.ops.setup_eee(hw,
   1179 		    adapter->feat_en & IXGBE_FEATURE_EEE);
   1180 
   1181 	/* Enable power to the phy. */
   1182 	ixgbe_set_phy_power(hw, TRUE);
   1183 
   1184 	/* Initialize statistics */
   1185 	ixgbe_update_stats_counters(adapter);
   1186 
   1187 	/* Check PCIE slot type/speed/width */
   1188 	ixgbe_get_slot_info(adapter);
   1189 
   1190 	/*
   1191 	 * Do time init and sysctl init here, but
   1192 	 * only on the first port of a bypass adapter.
   1193 	 */
   1194 	ixgbe_bypass_init(adapter);
   1195 
   1196 	/* Set an initial dmac value */
   1197 	adapter->dmac = 0;
   1198 	/* Set initial advertised speeds (if applicable) */
   1199 	adapter->advertise = ixgbe_get_advertise(adapter);
   1200 
   1201 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   1202 		ixgbe_define_iov_schemas(dev, &error);
   1203 
   1204 	/* Add sysctls */
   1205 	ixgbe_add_device_sysctls(adapter);
   1206 	ixgbe_add_hw_stats(adapter);
   1207 
   1208 	/* For Netmap */
   1209 	adapter->init_locked = ixgbe_init_locked;
   1210 	adapter->stop_locked = ixgbe_stop;
   1211 
   1212 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1213 		ixgbe_netmap_attach(adapter);
   1214 
   1215 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
   1216 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
   1217 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
   1218 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
   1219 
   1220 	if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
   1221 		pmf_class_network_register(dev, adapter->ifp);
   1222 	else
   1223 		aprint_error_dev(dev, "couldn't establish power handler\n");
   1224 
   1225 	/* Init recovery mode timer and state variable */
   1226 	if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
   1227 		adapter->recovery_mode = 0;
   1228 
   1229 		/* Set up the timer callout */
   1230 		callout_init(&adapter->recovery_mode_timer,
   1231 		    IXGBE_CALLOUT_FLAGS);
   1232 
   1233 		/* Start the task */
   1234 		callout_reset(&adapter->recovery_mode_timer, hz,
   1235 		    ixgbe_recovery_mode_timer, adapter);
   1236 	}
   1237 
   1238 	INIT_DEBUGOUT("ixgbe_attach: end");
   1239 	adapter->osdep.attached = true;
   1240 
   1241 	return;
   1242 
   1243 err_late:
   1244 	ixgbe_free_transmit_structures(adapter);
   1245 	ixgbe_free_receive_structures(adapter);
   1246 	free(adapter->queues, M_DEVBUF);
   1247 err_out:
   1248 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   1249 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   1250 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   1251 	ixgbe_free_softint(adapter);
   1252 	ixgbe_free_pci_resources(adapter);
   1253 	if (adapter->mta != NULL)
   1254 		free(adapter->mta, M_DEVBUF);
   1255 	IXGBE_CORE_LOCK_DESTROY(adapter);
   1256 
   1257 	return;
   1258 } /* ixgbe_attach */
   1259 
   1260 /************************************************************************
   1261  * ixgbe_check_wol_support
   1262  *
   1263  *   Checks whether the adapter's ports are capable of
   1264  *   Wake On LAN by reading the adapter's NVM.
   1265  *
   1266  *   Sets each port's hw->wol_enabled value depending
   1267  *   on the value read here.
   1268  ************************************************************************/
   1269 static void
   1270 ixgbe_check_wol_support(struct adapter *adapter)
   1271 {
   1272 	struct ixgbe_hw *hw = &adapter->hw;
   1273 	u16		dev_caps = 0;
   1274 
   1275 	/* Find out WoL support for port */
   1276 	adapter->wol_support = hw->wol_enabled = 0;
   1277 	ixgbe_get_device_caps(hw, &dev_caps);
   1278 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
   1279 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
   1280 	     hw->bus.func == 0))
   1281 		adapter->wol_support = hw->wol_enabled = 1;
   1282 
   1283 	/* Save initial wake up filter configuration */
   1284 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
   1285 
   1286 	return;
   1287 } /* ixgbe_check_wol_support */
   1288 
   1289 /************************************************************************
   1290  * ixgbe_setup_interface
   1291  *
   1292  *   Setup networking device structure and register an interface.
   1293  ************************************************************************/
   1294 static int
   1295 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   1296 {
   1297 	struct ethercom *ec = &adapter->osdep.ec;
   1298 	struct ifnet   *ifp;
   1299 	int rv;
   1300 
   1301 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   1302 
   1303 	ifp = adapter->ifp = &ec->ec_if;
   1304 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1305 	ifp->if_baudrate = IF_Gbps(10);
   1306 	ifp->if_init = ixgbe_init;
   1307 	ifp->if_stop = ixgbe_ifstop;
   1308 	ifp->if_softc = adapter;
   1309 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1310 #ifdef IXGBE_MPSAFE
   1311 	ifp->if_extflags = IFEF_MPSAFE;
   1312 #endif
   1313 	ifp->if_ioctl = ixgbe_ioctl;
   1314 #if __FreeBSD_version >= 1100045
   1315 	/* TSO parameters */
   1316 	ifp->if_hw_tsomax = 65518;
   1317 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
   1318 	ifp->if_hw_tsomaxsegsize = 2048;
   1319 #endif
   1320 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1321 #if 0
   1322 		ixgbe_start_locked = ixgbe_legacy_start_locked;
   1323 #endif
   1324 	} else {
   1325 		ifp->if_transmit = ixgbe_mq_start;
   1326 #if 0
   1327 		ixgbe_start_locked = ixgbe_mq_start_locked;
   1328 #endif
   1329 	}
   1330 	ifp->if_start = ixgbe_legacy_start;
   1331 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1332 	IFQ_SET_READY(&ifp->if_snd);
   1333 
   1334 	rv = if_initialize(ifp);
   1335 	if (rv != 0) {
   1336 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1337 		return rv;
   1338 	}
   1339 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1340 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1341 	/*
   1342 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1343 	 * used.
   1344 	 */
   1345 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   1346 
   1347 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1348 
   1349 	/*
   1350 	 * Tell the upper layer(s) we support long frames.
   1351 	 */
   1352 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1353 
   1354 	/* Set capability flags */
   1355 	ifp->if_capabilities |= IFCAP_RXCSUM
   1356 			     |	IFCAP_TXCSUM
   1357 			     |	IFCAP_TSOv4
   1358 			     |	IFCAP_TSOv6;
   1359 	ifp->if_capenable = 0;
   1360 
   1361 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1362 			    |  ETHERCAP_VLAN_HWCSUM
   1363 			    |  ETHERCAP_JUMBO_MTU
   1364 			    |  ETHERCAP_VLAN_MTU;
   1365 
   1366 	/* Enable the above capabilities by default */
   1367 	ec->ec_capenable = ec->ec_capabilities;
   1368 
   1369 	/*
   1370 	 * Don't turn this on by default, if vlans are
   1371 	 * created on another pseudo device (eg. lagg)
   1372 	 * then vlan events are not passed thru, breaking
   1373 	 * operation, but with HW FILTER off it works. If
   1374 	 * using vlans directly on the ixgbe driver you can
   1375 	 * enable this and get full hardware tag filtering.
   1376 	 */
   1377 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1378 
   1379 	/*
   1380 	 * Specify the media types supported by this adapter and register
   1381 	 * callbacks to update media and link information
   1382 	 */
   1383 	ec->ec_ifmedia = &adapter->media;
   1384 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   1385 	    ixgbe_media_status);
   1386 
   1387 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
   1388 	ixgbe_add_media_types(adapter);
   1389 
   1390 	/* Set autoselect media by default */
   1391 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1392 
   1393 	if_register(ifp);
   1394 
   1395 	return (0);
   1396 } /* ixgbe_setup_interface */
   1397 
   1398 /************************************************************************
   1399  * ixgbe_add_media_types
   1400  ************************************************************************/
   1401 static void
   1402 ixgbe_add_media_types(struct adapter *adapter)
   1403 {
   1404 	struct ixgbe_hw *hw = &adapter->hw;
   1405 	u64		layer;
   1406 
   1407 	layer = adapter->phy_layer;
   1408 
   1409 #define	ADD(mm, dd)							\
   1410 	ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
   1411 
   1412 	ADD(IFM_NONE, 0);
   1413 
   1414 	/* Media types with matching NetBSD media defines */
   1415 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
   1416 		ADD(IFM_10G_T | IFM_FDX, 0);
   1417 	}
   1418 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
   1419 		ADD(IFM_1000_T | IFM_FDX, 0);
   1420 	}
   1421 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
   1422 		ADD(IFM_100_TX | IFM_FDX, 0);
   1423 	}
   1424 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
   1425 		ADD(IFM_10_T | IFM_FDX, 0);
   1426 	}
   1427 
   1428 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   1429 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
   1430 		ADD(IFM_10G_TWINAX | IFM_FDX, 0);
   1431 	}
   1432 
   1433 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
   1434 		ADD(IFM_10G_LR | IFM_FDX, 0);
   1435 		if (hw->phy.multispeed_fiber) {
   1436 			ADD(IFM_1000_LX | IFM_FDX, 0);
   1437 		}
   1438 	}
   1439 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
   1440 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1441 		if (hw->phy.multispeed_fiber) {
   1442 			ADD(IFM_1000_SX | IFM_FDX, 0);
   1443 		}
   1444 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
   1445 		ADD(IFM_1000_SX | IFM_FDX, 0);
   1446 	}
   1447 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
   1448 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1449 	}
   1450 
   1451 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1452 		ADD(IFM_10G_KR | IFM_FDX, 0);
   1453 	}
   1454 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1455 		ADD(IFM_10G_KX4 | IFM_FDX, 0);
   1456 	}
   1457 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
   1458 		ADD(IFM_1000_KX | IFM_FDX, 0);
   1459 	}
   1460 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
   1461 		ADD(IFM_2500_KX | IFM_FDX, 0);
   1462 	}
   1463 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
   1464 		ADD(IFM_2500_T | IFM_FDX, 0);
   1465 	}
   1466 	if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
   1467 		ADD(IFM_5000_T | IFM_FDX, 0);
   1468 	}
   1469 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
   1470 		ADD(IFM_1000_BX10 | IFM_FDX, 0);
   1471 	/* XXX no ifmedia_set? */
   1472 
   1473 	ADD(IFM_AUTO, 0);
   1474 
   1475 #undef ADD
   1476 } /* ixgbe_add_media_types */
   1477 
   1478 /************************************************************************
   1479  * ixgbe_is_sfp
   1480  ************************************************************************/
   1481 static inline bool
   1482 ixgbe_is_sfp(struct ixgbe_hw *hw)
   1483 {
   1484 	switch (hw->mac.type) {
   1485 	case ixgbe_mac_82598EB:
   1486 		if (hw->phy.type == ixgbe_phy_nl)
   1487 			return (TRUE);
   1488 		return (FALSE);
   1489 	case ixgbe_mac_82599EB:
   1490 	case ixgbe_mac_X550EM_x:
   1491 	case ixgbe_mac_X550EM_a:
   1492 		switch (hw->mac.ops.get_media_type(hw)) {
   1493 		case ixgbe_media_type_fiber:
   1494 		case ixgbe_media_type_fiber_qsfp:
   1495 			return (TRUE);
   1496 		default:
   1497 			return (FALSE);
   1498 		}
   1499 	default:
   1500 		return (FALSE);
   1501 	}
   1502 } /* ixgbe_is_sfp */
   1503 
   1504 /************************************************************************
   1505  * ixgbe_config_link
   1506  ************************************************************************/
   1507 static void
   1508 ixgbe_config_link(struct adapter *adapter)
   1509 {
   1510 	struct ixgbe_hw *hw = &adapter->hw;
   1511 	u32		autoneg, err = 0;
   1512 	bool		sfp, negotiate = false;
   1513 
   1514 	sfp = ixgbe_is_sfp(hw);
   1515 
   1516 	if (sfp) {
   1517 		if (hw->phy.multispeed_fiber) {
   1518 			ixgbe_enable_tx_laser(hw);
   1519 			kpreempt_disable();
   1520 			softint_schedule(adapter->msf_si);
   1521 			kpreempt_enable();
   1522 		}
   1523 		kpreempt_disable();
   1524 		softint_schedule(adapter->mod_si);
   1525 		kpreempt_enable();
   1526 	} else {
   1527 		struct ifmedia	*ifm = &adapter->media;
   1528 
   1529 		if (hw->mac.ops.check_link)
   1530 			err = ixgbe_check_link(hw, &adapter->link_speed,
   1531 			    &adapter->link_up, FALSE);
   1532 		if (err)
   1533 			return;
   1534 
   1535 		/*
   1536 		 * Check if it's the first call. If it's the first call,
   1537 		 * get value for auto negotiation.
   1538 		 */
   1539 		autoneg = hw->phy.autoneg_advertised;
   1540 		if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
   1541 		    && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
   1542 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
   1543 			    &negotiate);
   1544 		if (err)
   1545 			return;
   1546 		if (hw->mac.ops.setup_link)
   1547 			err = hw->mac.ops.setup_link(hw, autoneg,
   1548 			    adapter->link_up);
   1549 	}
   1550 
   1551 } /* ixgbe_config_link */
   1552 
   1553 /************************************************************************
   1554  * ixgbe_update_stats_counters - Update board statistics counters.
   1555  ************************************************************************/
   1556 static void
   1557 ixgbe_update_stats_counters(struct adapter *adapter)
   1558 {
   1559 	struct ifnet	      *ifp = adapter->ifp;
   1560 	struct ixgbe_hw	      *hw = &adapter->hw;
   1561 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1562 	u32		      missed_rx = 0, bprc, lxon, lxoff, total;
   1563 	u64		      total_missed_rx = 0;
   1564 	uint64_t	      crcerrs, rlec;
   1565 	unsigned int	      queue_counters;
   1566 	int		      i;
   1567 
   1568 	crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   1569 	stats->crcerrs.ev_count += crcerrs;
   1570 	stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   1571 	stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   1572 	stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   1573 	if (hw->mac.type >= ixgbe_mac_X550)
   1574 		stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
   1575 
   1576 	/* 16 registers exist */
   1577 	queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
   1578 	for (i = 0; i < queue_counters; i++) {
   1579 		stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   1580 		stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   1581 		if (hw->mac.type >= ixgbe_mac_82599EB) {
   1582 			stats->qprdc[i].ev_count
   1583 			    += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   1584 		}
   1585 	}
   1586 
   1587 	/* 8 registers exist */
   1588 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   1589 		uint32_t mp;
   1590 
   1591 		/* MPC */
   1592 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   1593 		/* global total per queue */
   1594 		stats->mpc[i].ev_count += mp;
   1595 		/* running comprehensive total for stats display */
   1596 		total_missed_rx += mp;
   1597 
   1598 		if (hw->mac.type == ixgbe_mac_82598EB)
   1599 			stats->rnbc[i].ev_count
   1600 			    += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   1601 
   1602 		stats->pxontxc[i].ev_count
   1603 		    += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
   1604 		stats->pxofftxc[i].ev_count
   1605 		    += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
   1606 		if (hw->mac.type >= ixgbe_mac_82599EB) {
   1607 			stats->pxonrxc[i].ev_count
   1608 			    += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
   1609 			stats->pxoffrxc[i].ev_count
   1610 			    += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
   1611 			stats->pxon2offc[i].ev_count
   1612 			    += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
   1613 		} else {
   1614 			stats->pxonrxc[i].ev_count
   1615 			    += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
   1616 			stats->pxoffrxc[i].ev_count
   1617 			    += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
   1618 		}
   1619 	}
   1620 	stats->mpctotal.ev_count += total_missed_rx;
   1621 
   1622 	/* Document says M[LR]FC are valid when link is up and 10Gbps */
   1623 	if ((adapter->link_active == LINK_STATE_UP)
   1624 	    && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
   1625 		stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   1626 		stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   1627 	}
   1628 	rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
   1629 	stats->rlec.ev_count += rlec;
   1630 
   1631 	/* Hardware workaround, gprc counts missed packets */
   1632 	stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   1633 
   1634 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   1635 	stats->lxontxc.ev_count += lxon;
   1636 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   1637 	stats->lxofftxc.ev_count += lxoff;
   1638 	total = lxon + lxoff;
   1639 
   1640 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1641 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   1642 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   1643 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   1644 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   1645 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   1646 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   1647 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   1648 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   1649 	} else {
   1650 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   1651 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   1652 		/* 82598 only has a counter in the high register */
   1653 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   1654 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   1655 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   1656 	}
   1657 
   1658 	/*
   1659 	 * Workaround: mprc hardware is incorrectly counting
   1660 	 * broadcasts, so for now we subtract those.
   1661 	 */
   1662 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   1663 	stats->bprc.ev_count += bprc;
   1664 	stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
   1665 	    - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   1666 
   1667 	stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   1668 	stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   1669 	stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   1670 	stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   1671 	stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   1672 	stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   1673 
   1674 	stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   1675 	stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   1676 	stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   1677 
   1678 	stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   1679 	stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   1680 	stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   1681 	stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   1682 	stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   1683 	stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   1684 	stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   1685 	stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   1686 	stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   1687 	stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   1688 	stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   1689 	stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   1690 	stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   1691 	stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   1692 	stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   1693 	stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   1694 	stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   1695 	stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   1696 	/* Only read FCOE on 82599 */
   1697 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1698 		stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   1699 		stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   1700 		stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   1701 		stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   1702 		stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   1703 	}
   1704 
   1705 	/* Fill out the OS statistics structure */
   1706 	/*
   1707 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   1708 	 * adapter->stats counters. It's required to make ifconfig -z
   1709 	 * (SOICZIFDATA) work.
   1710 	 */
   1711 	ifp->if_collisions = 0;
   1712 
   1713 	/* Rx Errors */
   1714 	ifp->if_iqdrops += total_missed_rx;
   1715 	ifp->if_ierrors += crcerrs + rlec;
   1716 } /* ixgbe_update_stats_counters */
   1717 
   1718 /************************************************************************
   1719  * ixgbe_add_hw_stats
   1720  *
   1721  *   Add sysctl variables, one per statistic, to the system.
   1722  ************************************************************************/
   1723 static void
   1724 ixgbe_add_hw_stats(struct adapter *adapter)
   1725 {
   1726 	device_t dev = adapter->dev;
   1727 	const struct sysctlnode *rnode, *cnode;
   1728 	struct sysctllog **log = &adapter->sysctllog;
   1729 	struct tx_ring *txr = adapter->tx_rings;
   1730 	struct rx_ring *rxr = adapter->rx_rings;
   1731 	struct ixgbe_hw *hw = &adapter->hw;
   1732 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1733 	const char *xname = device_xname(dev);
   1734 	int i;
   1735 
   1736 	/* Driver Statistics */
   1737 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   1738 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   1739 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   1740 	    NULL, xname, "m_defrag() failed");
   1741 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   1742 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   1743 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   1744 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   1745 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   1746 	    NULL, xname, "Driver tx dma hard fail other");
   1747 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   1748 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   1749 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   1750 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   1751 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   1752 	    NULL, xname, "Watchdog timeouts");
   1753 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   1754 	    NULL, xname, "TSO errors");
   1755 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   1756 	    NULL, xname, "Link MSI-X IRQ Handled");
   1757 	evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
   1758 	    NULL, xname, "Link softint");
   1759 	evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
   1760 	    NULL, xname, "module softint");
   1761 	evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
   1762 	    NULL, xname, "multimode softint");
   1763 	evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
   1764 	    NULL, xname, "external PHY softint");
   1765 
   1766 	/* Max number of traffic class is 8 */
   1767 	KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
   1768 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   1769 		snprintf(adapter->tcs[i].evnamebuf,
   1770 		    sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
   1771 		    xname, i);
   1772 		if (i < __arraycount(stats->mpc)) {
   1773 			evcnt_attach_dynamic(&stats->mpc[i],
   1774 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1775 			    "RX Missed Packet Count");
   1776 			if (hw->mac.type == ixgbe_mac_82598EB)
   1777 				evcnt_attach_dynamic(&stats->rnbc[i],
   1778 				    EVCNT_TYPE_MISC, NULL,
   1779 				    adapter->tcs[i].evnamebuf,
   1780 				    "Receive No Buffers");
   1781 		}
   1782 		if (i < __arraycount(stats->pxontxc)) {
   1783 			evcnt_attach_dynamic(&stats->pxontxc[i],
   1784 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1785 			    "pxontxc");
   1786 			evcnt_attach_dynamic(&stats->pxonrxc[i],
   1787 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1788 			    "pxonrxc");
   1789 			evcnt_attach_dynamic(&stats->pxofftxc[i],
   1790 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1791 			    "pxofftxc");
   1792 			evcnt_attach_dynamic(&stats->pxoffrxc[i],
   1793 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1794 			    "pxoffrxc");
   1795 			if (hw->mac.type >= ixgbe_mac_82599EB)
   1796 				evcnt_attach_dynamic(&stats->pxon2offc[i],
   1797 				    EVCNT_TYPE_MISC, NULL,
   1798 				    adapter->tcs[i].evnamebuf,
   1799 			    "pxon2offc");
   1800 		}
   1801 	}
   1802 
   1803 	for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1804 #ifdef LRO
   1805 		struct lro_ctrl *lro = &rxr->lro;
   1806 #endif /* LRO */
   1807 
   1808 		snprintf(adapter->queues[i].evnamebuf,
   1809 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   1810 		    xname, i);
   1811 		snprintf(adapter->queues[i].namebuf,
   1812 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   1813 
   1814 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   1815 			aprint_error_dev(dev, "could not create sysctl root\n");
   1816 			break;
   1817 		}
   1818 
   1819 		if (sysctl_createv(log, 0, &rnode, &rnode,
   1820 		    0, CTLTYPE_NODE,
   1821 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   1822 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   1823 			break;
   1824 
   1825 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1826 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   1827 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   1828 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   1829 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   1830 			break;
   1831 
   1832 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1833 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1834 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   1835 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   1836 		    0, CTL_CREATE, CTL_EOL) != 0)
   1837 			break;
   1838 
   1839 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1840 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1841 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   1842 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   1843 		    0, CTL_CREATE, CTL_EOL) != 0)
   1844 			break;
   1845 
   1846 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   1847 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   1848 		evcnt_attach_dynamic(&adapter->queues[i].handleq,
   1849 		    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1850 		    "Handled queue in softint");
   1851 		evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
   1852 		    NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
   1853 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   1854 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   1855 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   1856 		    NULL, adapter->queues[i].evnamebuf,
   1857 		    "Queue No Descriptor Available");
   1858 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   1859 		    NULL, adapter->queues[i].evnamebuf,
   1860 		    "Queue Packets Transmitted");
   1861 #ifndef IXGBE_LEGACY_TX
   1862 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   1863 		    NULL, adapter->queues[i].evnamebuf,
   1864 		    "Packets dropped in pcq");
   1865 #endif
   1866 
   1867 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1868 		    CTLFLAG_READONLY,
   1869 		    CTLTYPE_INT,
   1870 		    "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
   1871 			ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
   1872 		    CTL_CREATE, CTL_EOL) != 0)
   1873 			break;
   1874 
   1875 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1876 		    CTLFLAG_READONLY,
   1877 		    CTLTYPE_INT,
   1878 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   1879 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   1880 		    CTL_CREATE, CTL_EOL) != 0)
   1881 			break;
   1882 
   1883 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1884 		    CTLFLAG_READONLY,
   1885 		    CTLTYPE_INT,
   1886 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   1887 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   1888 		    CTL_CREATE, CTL_EOL) != 0)
   1889 			break;
   1890 
   1891 		if (i < __arraycount(stats->qprc)) {
   1892 			evcnt_attach_dynamic(&stats->qprc[i],
   1893 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1894 			    "qprc");
   1895 			evcnt_attach_dynamic(&stats->qptc[i],
   1896 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1897 			    "qptc");
   1898 			evcnt_attach_dynamic(&stats->qbrc[i],
   1899 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1900 			    "qbrc");
   1901 			evcnt_attach_dynamic(&stats->qbtc[i],
   1902 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1903 			    "qbtc");
   1904 			if (hw->mac.type >= ixgbe_mac_82599EB)
   1905 				evcnt_attach_dynamic(&stats->qprdc[i],
   1906 				    EVCNT_TYPE_MISC, NULL,
   1907 				    adapter->queues[i].evnamebuf, "qprdc");
   1908 		}
   1909 
   1910 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   1911 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   1912 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   1913 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   1914 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   1915 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   1916 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   1917 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   1918 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   1919 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   1920 #ifdef LRO
   1921 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   1922 				CTLFLAG_RD, &lro->lro_queued, 0,
   1923 				"LRO Queued");
   1924 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   1925 				CTLFLAG_RD, &lro->lro_flushed, 0,
   1926 				"LRO Flushed");
   1927 #endif /* LRO */
   1928 	}
   1929 
   1930 	/* MAC stats get their own sub node */
   1931 
   1932 	snprintf(stats->namebuf,
   1933 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   1934 
   1935 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   1936 	    stats->namebuf, "rx csum offload - IP");
   1937 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   1938 	    stats->namebuf, "rx csum offload - L4");
   1939 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   1940 	    stats->namebuf, "rx csum offload - IP bad");
   1941 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   1942 	    stats->namebuf, "rx csum offload - L4 bad");
   1943 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   1944 	    stats->namebuf, "Interrupt conditions zero");
   1945 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   1946 	    stats->namebuf, "Legacy interrupts");
   1947 
   1948 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   1949 	    stats->namebuf, "CRC Errors");
   1950 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   1951 	    stats->namebuf, "Illegal Byte Errors");
   1952 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   1953 	    stats->namebuf, "Byte Errors");
   1954 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   1955 	    stats->namebuf, "MAC Short Packets Discarded");
   1956 	if (hw->mac.type >= ixgbe_mac_X550)
   1957 		evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
   1958 		    stats->namebuf, "Bad SFD");
   1959 	evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
   1960 	    stats->namebuf, "Total Packets Missed");
   1961 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   1962 	    stats->namebuf, "MAC Local Faults");
   1963 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   1964 	    stats->namebuf, "MAC Remote Faults");
   1965 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   1966 	    stats->namebuf, "Receive Length Errors");
   1967 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   1968 	    stats->namebuf, "Link XON Transmitted");
   1969 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   1970 	    stats->namebuf, "Link XON Received");
   1971 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   1972 	    stats->namebuf, "Link XOFF Transmitted");
   1973 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   1974 	    stats->namebuf, "Link XOFF Received");
   1975 
   1976 	/* Packet Reception Stats */
   1977 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   1978 	    stats->namebuf, "Total Octets Received");
   1979 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   1980 	    stats->namebuf, "Good Octets Received");
   1981 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   1982 	    stats->namebuf, "Total Packets Received");
   1983 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   1984 	    stats->namebuf, "Good Packets Received");
   1985 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   1986 	    stats->namebuf, "Multicast Packets Received");
   1987 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   1988 	    stats->namebuf, "Broadcast Packets Received");
   1989 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   1990 	    stats->namebuf, "64 byte frames received ");
   1991 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   1992 	    stats->namebuf, "65-127 byte frames received");
   1993 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   1994 	    stats->namebuf, "128-255 byte frames received");
   1995 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   1996 	    stats->namebuf, "256-511 byte frames received");
   1997 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   1998 	    stats->namebuf, "512-1023 byte frames received");
   1999 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   2000 	    stats->namebuf, "1023-1522 byte frames received");
   2001 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   2002 	    stats->namebuf, "Receive Undersized");
   2003 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   2004 	    stats->namebuf, "Fragmented Packets Received ");
   2005 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   2006 	    stats->namebuf, "Oversized Packets Received");
   2007 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   2008 	    stats->namebuf, "Received Jabber");
   2009 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   2010 	    stats->namebuf, "Management Packets Received");
   2011 	evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
   2012 	    stats->namebuf, "Management Packets Dropped");
   2013 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   2014 	    stats->namebuf, "Checksum Errors");
   2015 
   2016 	/* Packet Transmission Stats */
   2017 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   2018 	    stats->namebuf, "Good Octets Transmitted");
   2019 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   2020 	    stats->namebuf, "Total Packets Transmitted");
   2021 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   2022 	    stats->namebuf, "Good Packets Transmitted");
   2023 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   2024 	    stats->namebuf, "Broadcast Packets Transmitted");
   2025 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   2026 	    stats->namebuf, "Multicast Packets Transmitted");
   2027 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   2028 	    stats->namebuf, "Management Packets Transmitted");
   2029 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   2030 	    stats->namebuf, "64 byte frames transmitted ");
   2031 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   2032 	    stats->namebuf, "65-127 byte frames transmitted");
   2033 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   2034 	    stats->namebuf, "128-255 byte frames transmitted");
   2035 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   2036 	    stats->namebuf, "256-511 byte frames transmitted");
   2037 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   2038 	    stats->namebuf, "512-1023 byte frames transmitted");
   2039 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   2040 	    stats->namebuf, "1024-1522 byte frames transmitted");
   2041 } /* ixgbe_add_hw_stats */
   2042 
   2043 static void
   2044 ixgbe_clear_evcnt(struct adapter *adapter)
   2045 {
   2046 	struct tx_ring *txr = adapter->tx_rings;
   2047 	struct rx_ring *rxr = adapter->rx_rings;
   2048 	struct ixgbe_hw *hw = &adapter->hw;
   2049 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   2050 	int i;
   2051 
   2052 	adapter->efbig_tx_dma_setup.ev_count = 0;
   2053 	adapter->mbuf_defrag_failed.ev_count = 0;
   2054 	adapter->efbig2_tx_dma_setup.ev_count = 0;
   2055 	adapter->einval_tx_dma_setup.ev_count = 0;
   2056 	adapter->other_tx_dma_setup.ev_count = 0;
   2057 	adapter->eagain_tx_dma_setup.ev_count = 0;
   2058 	adapter->enomem_tx_dma_setup.ev_count = 0;
   2059 	adapter->tso_err.ev_count = 0;
   2060 	adapter->watchdog_events.ev_count = 0;
   2061 	adapter->link_irq.ev_count = 0;
   2062 	adapter->link_sicount.ev_count = 0;
   2063 	adapter->mod_sicount.ev_count = 0;
   2064 	adapter->msf_sicount.ev_count = 0;
   2065 	adapter->phy_sicount.ev_count = 0;
   2066 
   2067 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   2068 		if (i < __arraycount(stats->mpc)) {
   2069 			stats->mpc[i].ev_count = 0;
   2070 			if (hw->mac.type == ixgbe_mac_82598EB)
   2071 				stats->rnbc[i].ev_count = 0;
   2072 		}
   2073 		if (i < __arraycount(stats->pxontxc)) {
   2074 			stats->pxontxc[i].ev_count = 0;
   2075 			stats->pxonrxc[i].ev_count = 0;
   2076 			stats->pxofftxc[i].ev_count = 0;
   2077 			stats->pxoffrxc[i].ev_count = 0;
   2078 			if (hw->mac.type >= ixgbe_mac_82599EB)
   2079 				stats->pxon2offc[i].ev_count = 0;
   2080 		}
   2081 	}
   2082 
   2083 	txr = adapter->tx_rings;
   2084 	for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2085 		adapter->queues[i].irqs.ev_count = 0;
   2086 		adapter->queues[i].handleq.ev_count = 0;
   2087 		adapter->queues[i].req.ev_count = 0;
   2088 		txr->no_desc_avail.ev_count = 0;
   2089 		txr->total_packets.ev_count = 0;
   2090 		txr->tso_tx.ev_count = 0;
   2091 #ifndef IXGBE_LEGACY_TX
   2092 		txr->pcq_drops.ev_count = 0;
   2093 #endif
   2094 		txr->q_efbig_tx_dma_setup = 0;
   2095 		txr->q_mbuf_defrag_failed = 0;
   2096 		txr->q_efbig2_tx_dma_setup = 0;
   2097 		txr->q_einval_tx_dma_setup = 0;
   2098 		txr->q_other_tx_dma_setup = 0;
   2099 		txr->q_eagain_tx_dma_setup = 0;
   2100 		txr->q_enomem_tx_dma_setup = 0;
   2101 		txr->q_tso_err = 0;
   2102 
   2103 		if (i < __arraycount(stats->qprc)) {
   2104 			stats->qprc[i].ev_count = 0;
   2105 			stats->qptc[i].ev_count = 0;
   2106 			stats->qbrc[i].ev_count = 0;
   2107 			stats->qbtc[i].ev_count = 0;
   2108 			if (hw->mac.type >= ixgbe_mac_82599EB)
   2109 				stats->qprdc[i].ev_count = 0;
   2110 		}
   2111 
   2112 		rxr->rx_packets.ev_count = 0;
   2113 		rxr->rx_bytes.ev_count = 0;
   2114 		rxr->rx_copies.ev_count = 0;
   2115 		rxr->no_jmbuf.ev_count = 0;
   2116 		rxr->rx_discarded.ev_count = 0;
   2117 	}
   2118 	stats->ipcs.ev_count = 0;
   2119 	stats->l4cs.ev_count = 0;
   2120 	stats->ipcs_bad.ev_count = 0;
   2121 	stats->l4cs_bad.ev_count = 0;
   2122 	stats->intzero.ev_count = 0;
   2123 	stats->legint.ev_count = 0;
   2124 	stats->crcerrs.ev_count = 0;
   2125 	stats->illerrc.ev_count = 0;
   2126 	stats->errbc.ev_count = 0;
   2127 	stats->mspdc.ev_count = 0;
   2128 	if (hw->mac.type >= ixgbe_mac_X550)
   2129 		stats->mbsdc.ev_count = 0;
   2130 	stats->mpctotal.ev_count = 0;
   2131 	stats->mlfc.ev_count = 0;
   2132 	stats->mrfc.ev_count = 0;
   2133 	stats->rlec.ev_count = 0;
   2134 	stats->lxontxc.ev_count = 0;
   2135 	stats->lxonrxc.ev_count = 0;
   2136 	stats->lxofftxc.ev_count = 0;
   2137 	stats->lxoffrxc.ev_count = 0;
   2138 
   2139 	/* Packet Reception Stats */
   2140 	stats->tor.ev_count = 0;
   2141 	stats->gorc.ev_count = 0;
   2142 	stats->tpr.ev_count = 0;
   2143 	stats->gprc.ev_count = 0;
   2144 	stats->mprc.ev_count = 0;
   2145 	stats->bprc.ev_count = 0;
   2146 	stats->prc64.ev_count = 0;
   2147 	stats->prc127.ev_count = 0;
   2148 	stats->prc255.ev_count = 0;
   2149 	stats->prc511.ev_count = 0;
   2150 	stats->prc1023.ev_count = 0;
   2151 	stats->prc1522.ev_count = 0;
   2152 	stats->ruc.ev_count = 0;
   2153 	stats->rfc.ev_count = 0;
   2154 	stats->roc.ev_count = 0;
   2155 	stats->rjc.ev_count = 0;
   2156 	stats->mngprc.ev_count = 0;
   2157 	stats->mngpdc.ev_count = 0;
   2158 	stats->xec.ev_count = 0;
   2159 
   2160 	/* Packet Transmission Stats */
   2161 	stats->gotc.ev_count = 0;
   2162 	stats->tpt.ev_count = 0;
   2163 	stats->gptc.ev_count = 0;
   2164 	stats->bptc.ev_count = 0;
   2165 	stats->mptc.ev_count = 0;
   2166 	stats->mngptc.ev_count = 0;
   2167 	stats->ptc64.ev_count = 0;
   2168 	stats->ptc127.ev_count = 0;
   2169 	stats->ptc255.ev_count = 0;
   2170 	stats->ptc511.ev_count = 0;
   2171 	stats->ptc1023.ev_count = 0;
   2172 	stats->ptc1522.ev_count = 0;
   2173 }
   2174 
   2175 /************************************************************************
   2176  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
   2177  *
   2178  *   Retrieves the TDH value from the hardware
   2179  ************************************************************************/
   2180 static int
   2181 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   2182 {
   2183 	struct sysctlnode node = *rnode;
   2184 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2185 	struct adapter *adapter;
   2186 	uint32_t val;
   2187 
   2188 	if (!txr)
   2189 		return (0);
   2190 
   2191 	adapter = txr->adapter;
   2192 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2193 		return (EPERM);
   2194 
   2195 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
   2196 	node.sysctl_data = &val;
   2197 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2198 } /* ixgbe_sysctl_tdh_handler */
   2199 
   2200 /************************************************************************
   2201  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   2202  *
   2203  *   Retrieves the TDT value from the hardware
   2204  ************************************************************************/
   2205 static int
   2206 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   2207 {
   2208 	struct sysctlnode node = *rnode;
   2209 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2210 	struct adapter *adapter;
   2211 	uint32_t val;
   2212 
   2213 	if (!txr)
   2214 		return (0);
   2215 
   2216 	adapter = txr->adapter;
   2217 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2218 		return (EPERM);
   2219 
   2220 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
   2221 	node.sysctl_data = &val;
   2222 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2223 } /* ixgbe_sysctl_tdt_handler */
   2224 
   2225 /************************************************************************
   2226  * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
   2227  * handler function
   2228  *
   2229  *   Retrieves the next_to_check value
   2230  ************************************************************************/
   2231 static int
   2232 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
   2233 {
   2234 	struct sysctlnode node = *rnode;
   2235 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2236 	struct adapter *adapter;
   2237 	uint32_t val;
   2238 
   2239 	if (!rxr)
   2240 		return (0);
   2241 
   2242 	adapter = rxr->adapter;
   2243 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2244 		return (EPERM);
   2245 
   2246 	val = rxr->next_to_check;
   2247 	node.sysctl_data = &val;
   2248 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2249 } /* ixgbe_sysctl_next_to_check_handler */
   2250 
   2251 /************************************************************************
   2252  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
   2253  *
   2254  *   Retrieves the RDH value from the hardware
   2255  ************************************************************************/
   2256 static int
   2257 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2258 {
   2259 	struct sysctlnode node = *rnode;
   2260 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2261 	struct adapter *adapter;
   2262 	uint32_t val;
   2263 
   2264 	if (!rxr)
   2265 		return (0);
   2266 
   2267 	adapter = rxr->adapter;
   2268 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2269 		return (EPERM);
   2270 
   2271 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
   2272 	node.sysctl_data = &val;
   2273 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2274 } /* ixgbe_sysctl_rdh_handler */
   2275 
   2276 /************************************************************************
   2277  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2278  *
   2279  *   Retrieves the RDT value from the hardware
   2280  ************************************************************************/
   2281 static int
   2282 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2283 {
   2284 	struct sysctlnode node = *rnode;
   2285 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2286 	struct adapter *adapter;
   2287 	uint32_t val;
   2288 
   2289 	if (!rxr)
   2290 		return (0);
   2291 
   2292 	adapter = rxr->adapter;
   2293 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2294 		return (EPERM);
   2295 
   2296 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
   2297 	node.sysctl_data = &val;
   2298 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2299 } /* ixgbe_sysctl_rdt_handler */
   2300 
   2301 static int
   2302 ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
   2303 {
   2304 	struct ifnet *ifp = &ec->ec_if;
   2305 	struct adapter *adapter = ifp->if_softc;
   2306 	int rv;
   2307 
   2308 	if (set)
   2309 		rv = ixgbe_register_vlan(adapter, vid);
   2310 	else
   2311 		rv = ixgbe_unregister_vlan(adapter, vid);
   2312 
   2313 	if (rv != 0)
   2314 		return rv;
   2315 
   2316 	/*
   2317 	 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
   2318 	 * or 0 to 1.
   2319 	 */
   2320 	if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
   2321 		ixgbe_setup_vlan_hw_tagging(adapter);
   2322 
   2323 	return rv;
   2324 }
   2325 
   2326 /************************************************************************
   2327  * ixgbe_register_vlan
   2328  *
   2329  *   Run via vlan config EVENT, it enables us to use the
   2330  *   HW Filter table since we can get the vlan id. This
   2331  *   just creates the entry in the soft version of the
   2332  *   VFTA, init will repopulate the real table.
   2333  ************************************************************************/
   2334 static int
   2335 ixgbe_register_vlan(struct adapter *adapter, u16 vtag)
   2336 {
   2337 	u16		index, bit;
   2338 	int		error;
   2339 
   2340 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2341 		return EINVAL;
   2342 
   2343 	IXGBE_CORE_LOCK(adapter);
   2344 	index = (vtag >> 5) & 0x7F;
   2345 	bit = vtag & 0x1F;
   2346 	adapter->shadow_vfta[index] |= ((u32)1 << bit);
   2347 	error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true,
   2348 	    true);
   2349 	IXGBE_CORE_UNLOCK(adapter);
   2350 	if (error != 0)
   2351 		error = EACCES;
   2352 
   2353 	return error;
   2354 } /* ixgbe_register_vlan */
   2355 
   2356 /************************************************************************
   2357  * ixgbe_unregister_vlan
   2358  *
   2359  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
   2360  ************************************************************************/
   2361 static int
   2362 ixgbe_unregister_vlan(struct adapter *adapter, u16 vtag)
   2363 {
   2364 	u16		index, bit;
   2365 	int		error;
   2366 
   2367 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2368 		return EINVAL;
   2369 
   2370 	IXGBE_CORE_LOCK(adapter);
   2371 	index = (vtag >> 5) & 0x7F;
   2372 	bit = vtag & 0x1F;
   2373 	adapter->shadow_vfta[index] &= ~((u32)1 << bit);
   2374 	error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false,
   2375 	    true);
   2376 	IXGBE_CORE_UNLOCK(adapter);
   2377 	if (error != 0)
   2378 		error = EACCES;
   2379 
   2380 	return error;
   2381 } /* ixgbe_unregister_vlan */
   2382 
   2383 static void
   2384 ixgbe_setup_vlan_hw_tagging(struct adapter *adapter)
   2385 {
   2386 	struct ethercom *ec = &adapter->osdep.ec;
   2387 	struct ixgbe_hw *hw = &adapter->hw;
   2388 	struct rx_ring	*rxr;
   2389 	u32		ctrl;
   2390 	int		i;
   2391 	bool		hwtagging;
   2392 
   2393 	/* Enable HW tagging only if any vlan is attached */
   2394 	hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
   2395 	    && VLAN_ATTACHED(ec);
   2396 
   2397 	/* Setup the queues for vlans */
   2398 	for (i = 0; i < adapter->num_queues; i++) {
   2399 		rxr = &adapter->rx_rings[i];
   2400 		/*
   2401 		 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
   2402 		 */
   2403 		if (hw->mac.type != ixgbe_mac_82598EB) {
   2404 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   2405 			if (hwtagging)
   2406 				ctrl |= IXGBE_RXDCTL_VME;
   2407 			else
   2408 				ctrl &= ~IXGBE_RXDCTL_VME;
   2409 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
   2410 		}
   2411 		rxr->vtag_strip = hwtagging ? TRUE : FALSE;
   2412 	}
   2413 
   2414 	/* VLAN hw tagging for 82598 */
   2415 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2416 		ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2417 		if (hwtagging)
   2418 			ctrl |= IXGBE_VLNCTRL_VME;
   2419 		else
   2420 			ctrl &= ~IXGBE_VLNCTRL_VME;
   2421 		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2422 	}
   2423 } /* ixgbe_setup_vlan_hw_tagging */
   2424 
   2425 static void
   2426 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   2427 {
   2428 	struct ethercom *ec = &adapter->osdep.ec;
   2429 	struct ixgbe_hw *hw = &adapter->hw;
   2430 	int		i;
   2431 	u32		ctrl;
   2432 	struct vlanid_list *vlanidp;
   2433 
   2434 	/*
   2435 	 *  This function is called from both if_init and ifflags_cb()
   2436 	 * on NetBSD.
   2437 	 */
   2438 
   2439 	/*
   2440 	 * Part 1:
   2441 	 * Setup VLAN HW tagging
   2442 	 */
   2443 	ixgbe_setup_vlan_hw_tagging(adapter);
   2444 
   2445 	/*
   2446 	 * Part 2:
   2447 	 * Setup VLAN HW filter
   2448 	 */
   2449 	/* Cleanup shadow_vfta */
   2450 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2451 		adapter->shadow_vfta[i] = 0;
   2452 	/* Generate shadow_vfta from ec_vids */
   2453 	ETHER_LOCK(ec);
   2454 	SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
   2455 		uint32_t idx;
   2456 
   2457 		idx = vlanidp->vid / 32;
   2458 		KASSERT(idx < IXGBE_VFTA_SIZE);
   2459 		adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
   2460 	}
   2461 	ETHER_UNLOCK(ec);
   2462 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2463 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]);
   2464 
   2465 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2466 	/* Enable the Filter Table if enabled */
   2467 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
   2468 		ctrl |= IXGBE_VLNCTRL_VFE;
   2469 	else
   2470 		ctrl &= ~IXGBE_VLNCTRL_VFE;
   2471 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2472 } /* ixgbe_setup_vlan_hw_support */
   2473 
   2474 /************************************************************************
   2475  * ixgbe_get_slot_info
   2476  *
   2477  *   Get the width and transaction speed of
   2478  *   the slot this adapter is plugged into.
   2479  ************************************************************************/
   2480 static void
   2481 ixgbe_get_slot_info(struct adapter *adapter)
   2482 {
   2483 	device_t		dev = adapter->dev;
   2484 	struct ixgbe_hw		*hw = &adapter->hw;
   2485 	u32		      offset;
   2486 	u16			link;
   2487 	int		      bus_info_valid = TRUE;
   2488 
   2489 	/* Some devices are behind an internal bridge */
   2490 	switch (hw->device_id) {
   2491 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
   2492 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
   2493 		goto get_parent_info;
   2494 	default:
   2495 		break;
   2496 	}
   2497 
   2498 	ixgbe_get_bus_info(hw);
   2499 
   2500 	/*
   2501 	 * Some devices don't use PCI-E, but there is no need
   2502 	 * to display "Unknown" for bus speed and width.
   2503 	 */
   2504 	switch (hw->mac.type) {
   2505 	case ixgbe_mac_X550EM_x:
   2506 	case ixgbe_mac_X550EM_a:
   2507 		return;
   2508 	default:
   2509 		goto display;
   2510 	}
   2511 
   2512 get_parent_info:
   2513 	/*
   2514 	 * For the Quad port adapter we need to parse back
   2515 	 * up the PCI tree to find the speed of the expansion
   2516 	 * slot into which this adapter is plugged. A bit more work.
   2517 	 */
   2518 	dev = device_parent(device_parent(dev));
   2519 #if 0
   2520 #ifdef IXGBE_DEBUG
   2521 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
   2522 	    pci_get_slot(dev), pci_get_function(dev));
   2523 #endif
   2524 	dev = device_parent(device_parent(dev));
   2525 #ifdef IXGBE_DEBUG
   2526 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
   2527 	    pci_get_slot(dev), pci_get_function(dev));
   2528 #endif
   2529 #endif
   2530 	/* Now get the PCI Express Capabilities offset */
   2531 	if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
   2532 	    PCI_CAP_PCIEXPRESS, &offset, NULL)) {
   2533 		/*
   2534 		 * Hmm...can't get PCI-Express capabilities.
   2535 		 * Falling back to default method.
   2536 		 */
   2537 		bus_info_valid = FALSE;
   2538 		ixgbe_get_bus_info(hw);
   2539 		goto display;
   2540 	}
   2541 	/* ...and read the Link Status Register */
   2542 	link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
   2543 	    offset + PCIE_LCSR) >> 16;
   2544 	ixgbe_set_pci_config_data_generic(hw, link);
   2545 
   2546 display:
   2547 	device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
   2548 	    ((hw->bus.speed == ixgbe_bus_speed_8000)	? "8.0GT/s" :
   2549 	     (hw->bus.speed == ixgbe_bus_speed_5000)	? "5.0GT/s" :
   2550 	     (hw->bus.speed == ixgbe_bus_speed_2500)	? "2.5GT/s" :
   2551 	     "Unknown"),
   2552 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
   2553 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
   2554 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
   2555 	     "Unknown"));
   2556 
   2557 	if (bus_info_valid) {
   2558 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2559 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
   2560 			(hw->bus.speed == ixgbe_bus_speed_2500))) {
   2561 			device_printf(dev, "PCI-Express bandwidth available"
   2562 			    " for this card\n     is not sufficient for"
   2563 			    " optimal performance.\n");
   2564 			device_printf(dev, "For optimal performance a x8 "
   2565 			    "PCIE, or x4 PCIE Gen2 slot is required.\n");
   2566 		}
   2567 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2568 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
   2569 			(hw->bus.speed < ixgbe_bus_speed_8000))) {
   2570 			device_printf(dev, "PCI-Express bandwidth available"
   2571 			    " for this card\n     is not sufficient for"
   2572 			    " optimal performance.\n");
   2573 			device_printf(dev, "For optimal performance a x8 "
   2574 			    "PCIE Gen3 slot is required.\n");
   2575 		}
   2576 	} else
   2577 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
   2578 
   2579 	return;
   2580 } /* ixgbe_get_slot_info */
   2581 
   2582 /************************************************************************
   2583  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
   2584  ************************************************************************/
   2585 static inline void
   2586 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   2587 {
   2588 	struct ixgbe_hw *hw = &adapter->hw;
   2589 	struct ix_queue *que = &adapter->queues[vector];
   2590 	u64		queue = 1ULL << vector;
   2591 	u32		mask;
   2592 
   2593 	mutex_enter(&que->dc_mtx);
   2594 	if (que->disabled_count > 0 && --que->disabled_count > 0)
   2595 		goto out;
   2596 
   2597 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2598 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2599 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   2600 	} else {
   2601 		mask = (queue & 0xFFFFFFFF);
   2602 		if (mask)
   2603 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   2604 		mask = (queue >> 32);
   2605 		if (mask)
   2606 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   2607 	}
   2608 out:
   2609 	mutex_exit(&que->dc_mtx);
   2610 } /* ixgbe_enable_queue */
   2611 
   2612 /************************************************************************
   2613  * ixgbe_disable_queue_internal
   2614  ************************************************************************/
   2615 static inline void
   2616 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
   2617 {
   2618 	struct ixgbe_hw *hw = &adapter->hw;
   2619 	struct ix_queue *que = &adapter->queues[vector];
   2620 	u64		queue = 1ULL << vector;
   2621 	u32		mask;
   2622 
   2623 	mutex_enter(&que->dc_mtx);
   2624 
   2625 	if (que->disabled_count > 0) {
   2626 		if (nestok)
   2627 			que->disabled_count++;
   2628 		goto out;
   2629 	}
   2630 	que->disabled_count++;
   2631 
   2632 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2633 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2634 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   2635 	} else {
   2636 		mask = (queue & 0xFFFFFFFF);
   2637 		if (mask)
   2638 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   2639 		mask = (queue >> 32);
   2640 		if (mask)
   2641 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   2642 	}
   2643 out:
   2644 	mutex_exit(&que->dc_mtx);
   2645 } /* ixgbe_disable_queue_internal */
   2646 
   2647 /************************************************************************
   2648  * ixgbe_disable_queue
   2649  ************************************************************************/
   2650 static inline void
   2651 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   2652 {
   2653 
   2654 	ixgbe_disable_queue_internal(adapter, vector, true);
   2655 } /* ixgbe_disable_queue */
   2656 
   2657 /************************************************************************
   2658  * ixgbe_sched_handle_que - schedule deferred packet processing
   2659  ************************************************************************/
   2660 static inline void
   2661 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
   2662 {
   2663 
   2664 	if (que->txrx_use_workqueue) {
   2665 		/*
   2666 		 * adapter->que_wq is bound to each CPU instead of
   2667 		 * each NIC queue to reduce workqueue kthread. As we
   2668 		 * should consider about interrupt affinity in this
   2669 		 * function, the workqueue kthread must be WQ_PERCPU.
   2670 		 * If create WQ_PERCPU workqueue kthread for each NIC
   2671 		 * queue, that number of created workqueue kthread is
   2672 		 * (number of used NIC queue) * (number of CPUs) =
   2673 		 * (number of CPUs) ^ 2 most often.
   2674 		 *
   2675 		 * The same NIC queue's interrupts are avoided by
   2676 		 * masking the queue's interrupt. And different
   2677 		 * NIC queue's interrupts use different struct work
   2678 		 * (que->wq_cookie). So, "enqueued flag" to avoid
   2679 		 * twice workqueue_enqueue() is not required .
   2680 		 */
   2681 		workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
   2682 	} else {
   2683 		softint_schedule(que->que_si);
   2684 	}
   2685 }
   2686 
   2687 /************************************************************************
   2688  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
   2689  ************************************************************************/
   2690 static int
   2691 ixgbe_msix_que(void *arg)
   2692 {
   2693 	struct ix_queue	*que = arg;
   2694 	struct adapter	*adapter = que->adapter;
   2695 	struct ifnet	*ifp = adapter->ifp;
   2696 	struct tx_ring	*txr = que->txr;
   2697 	struct rx_ring	*rxr = que->rxr;
   2698 	bool		more;
   2699 	u32		newitr = 0;
   2700 
   2701 	/* Protect against spurious interrupts */
   2702 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   2703 		return 0;
   2704 
   2705 	ixgbe_disable_queue(adapter, que->msix);
   2706 	++que->irqs.ev_count;
   2707 
   2708 	/*
   2709 	 * Don't change "que->txrx_use_workqueue" from this point to avoid
   2710 	 * flip-flopping softint/workqueue mode in one deferred processing.
   2711 	 */
   2712 	que->txrx_use_workqueue = adapter->txrx_use_workqueue;
   2713 
   2714 #ifdef __NetBSD__
   2715 	/* Don't run ixgbe_rxeof in interrupt context */
   2716 	more = true;
   2717 #else
   2718 	more = ixgbe_rxeof(que);
   2719 #endif
   2720 
   2721 	IXGBE_TX_LOCK(txr);
   2722 	ixgbe_txeof(txr);
   2723 	IXGBE_TX_UNLOCK(txr);
   2724 
   2725 	/* Do AIM now? */
   2726 
   2727 	if (adapter->enable_aim == false)
   2728 		goto no_calc;
   2729 	/*
   2730 	 * Do Adaptive Interrupt Moderation:
   2731 	 *  - Write out last calculated setting
   2732 	 *  - Calculate based on average size over
   2733 	 *    the last interval.
   2734 	 */
   2735 	if (que->eitr_setting)
   2736 		ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
   2737 
   2738 	que->eitr_setting = 0;
   2739 
   2740 	/* Idle, do nothing */
   2741 	if ((txr->bytes == 0) && (rxr->bytes == 0))
   2742 		goto no_calc;
   2743 
   2744 	if ((txr->bytes) && (txr->packets))
   2745 		newitr = txr->bytes/txr->packets;
   2746 	if ((rxr->bytes) && (rxr->packets))
   2747 		newitr = uimax(newitr, (rxr->bytes / rxr->packets));
   2748 	newitr += 24; /* account for hardware frame, crc */
   2749 
   2750 	/* set an upper boundary */
   2751 	newitr = uimin(newitr, 3000);
   2752 
   2753 	/* Be nice to the mid range */
   2754 	if ((newitr > 300) && (newitr < 1200))
   2755 		newitr = (newitr / 3);
   2756 	else
   2757 		newitr = (newitr / 2);
   2758 
   2759 	/*
   2760 	 * When RSC is used, ITR interval must be larger than RSC_DELAY.
   2761 	 * Currently, we use 2us for RSC_DELAY. The minimum value is always
   2762 	 * greater than 2us on 100M (and 10M?(not documented)), but it's not
   2763 	 * on 1G and higher.
   2764 	 */
   2765 	if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   2766 	    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   2767 		if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
   2768 			newitr = IXGBE_MIN_RSC_EITR_10G1G;
   2769 	}
   2770 
   2771 	/* save for next interrupt */
   2772 	que->eitr_setting = newitr;
   2773 
   2774 	/* Reset state */
   2775 	txr->bytes = 0;
   2776 	txr->packets = 0;
   2777 	rxr->bytes = 0;
   2778 	rxr->packets = 0;
   2779 
   2780 no_calc:
   2781 	if (more)
   2782 		ixgbe_sched_handle_que(adapter, que);
   2783 	else
   2784 		ixgbe_enable_queue(adapter, que->msix);
   2785 
   2786 	return 1;
   2787 } /* ixgbe_msix_que */
   2788 
   2789 /************************************************************************
   2790  * ixgbe_media_status - Media Ioctl callback
   2791  *
   2792  *   Called whenever the user queries the status of
   2793  *   the interface using ifconfig.
   2794  ************************************************************************/
   2795 static void
   2796 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   2797 {
   2798 	struct adapter *adapter = ifp->if_softc;
   2799 	struct ixgbe_hw *hw = &adapter->hw;
   2800 	int layer;
   2801 
   2802 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   2803 	IXGBE_CORE_LOCK(adapter);
   2804 	ixgbe_update_link_status(adapter);
   2805 
   2806 	ifmr->ifm_status = IFM_AVALID;
   2807 	ifmr->ifm_active = IFM_ETHER;
   2808 
   2809 	if (adapter->link_active != LINK_STATE_UP) {
   2810 		ifmr->ifm_active |= IFM_NONE;
   2811 		IXGBE_CORE_UNLOCK(adapter);
   2812 		return;
   2813 	}
   2814 
   2815 	ifmr->ifm_status |= IFM_ACTIVE;
   2816 	layer = adapter->phy_layer;
   2817 
   2818 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
   2819 	    layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
   2820 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
   2821 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
   2822 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
   2823 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
   2824 		switch (adapter->link_speed) {
   2825 		case IXGBE_LINK_SPEED_10GB_FULL:
   2826 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   2827 			break;
   2828 		case IXGBE_LINK_SPEED_5GB_FULL:
   2829 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   2830 			break;
   2831 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2832 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   2833 			break;
   2834 		case IXGBE_LINK_SPEED_1GB_FULL:
   2835 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   2836 			break;
   2837 		case IXGBE_LINK_SPEED_100_FULL:
   2838 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   2839 			break;
   2840 		case IXGBE_LINK_SPEED_10_FULL:
   2841 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   2842 			break;
   2843 		}
   2844 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   2845 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
   2846 		switch (adapter->link_speed) {
   2847 		case IXGBE_LINK_SPEED_10GB_FULL:
   2848 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
   2849 			break;
   2850 		}
   2851 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
   2852 		switch (adapter->link_speed) {
   2853 		case IXGBE_LINK_SPEED_10GB_FULL:
   2854 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
   2855 			break;
   2856 		case IXGBE_LINK_SPEED_1GB_FULL:
   2857 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2858 			break;
   2859 		}
   2860 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
   2861 		switch (adapter->link_speed) {
   2862 		case IXGBE_LINK_SPEED_10GB_FULL:
   2863 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
   2864 			break;
   2865 		case IXGBE_LINK_SPEED_1GB_FULL:
   2866 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2867 			break;
   2868 		}
   2869 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
   2870 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
   2871 		switch (adapter->link_speed) {
   2872 		case IXGBE_LINK_SPEED_10GB_FULL:
   2873 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2874 			break;
   2875 		case IXGBE_LINK_SPEED_1GB_FULL:
   2876 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
   2877 			break;
   2878 		}
   2879 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
   2880 		switch (adapter->link_speed) {
   2881 		case IXGBE_LINK_SPEED_10GB_FULL:
   2882 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2883 			break;
   2884 		}
   2885 	/*
   2886 	 * XXX: These need to use the proper media types once
   2887 	 * they're added.
   2888 	 */
   2889 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
   2890 		switch (adapter->link_speed) {
   2891 		case IXGBE_LINK_SPEED_10GB_FULL:
   2892 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
   2893 			break;
   2894 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2895 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2896 			break;
   2897 		case IXGBE_LINK_SPEED_1GB_FULL:
   2898 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2899 			break;
   2900 		}
   2901 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
   2902 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
   2903 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
   2904 		switch (adapter->link_speed) {
   2905 		case IXGBE_LINK_SPEED_10GB_FULL:
   2906 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
   2907 			break;
   2908 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2909 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2910 			break;
   2911 		case IXGBE_LINK_SPEED_1GB_FULL:
   2912 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2913 			break;
   2914 		}
   2915 
   2916 	/* If nothing is recognized... */
   2917 #if 0
   2918 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
   2919 		ifmr->ifm_active |= IFM_UNKNOWN;
   2920 #endif
   2921 
   2922 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   2923 
   2924 	/* Display current flow control setting used on link */
   2925 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
   2926 	    hw->fc.current_mode == ixgbe_fc_full)
   2927 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
   2928 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
   2929 	    hw->fc.current_mode == ixgbe_fc_full)
   2930 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
   2931 
   2932 	IXGBE_CORE_UNLOCK(adapter);
   2933 
   2934 	return;
   2935 } /* ixgbe_media_status */
   2936 
   2937 /************************************************************************
   2938  * ixgbe_media_change - Media Ioctl callback
   2939  *
   2940  *   Called when the user changes speed/duplex using
   2941  *   media/mediopt option with ifconfig.
   2942  ************************************************************************/
   2943 static int
   2944 ixgbe_media_change(struct ifnet *ifp)
   2945 {
   2946 	struct adapter	 *adapter = ifp->if_softc;
   2947 	struct ifmedia	 *ifm = &adapter->media;
   2948 	struct ixgbe_hw	 *hw = &adapter->hw;
   2949 	ixgbe_link_speed speed = 0;
   2950 	ixgbe_link_speed link_caps = 0;
   2951 	bool negotiate = false;
   2952 	s32 err = IXGBE_NOT_IMPLEMENTED;
   2953 
   2954 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   2955 
   2956 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   2957 		return (EINVAL);
   2958 
   2959 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   2960 		return (EPERM);
   2961 
   2962 	IXGBE_CORE_LOCK(adapter);
   2963 	/*
   2964 	 * We don't actually need to check against the supported
   2965 	 * media types of the adapter; ifmedia will take care of
   2966 	 * that for us.
   2967 	 */
   2968 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   2969 	case IFM_AUTO:
   2970 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   2971 		    &negotiate);
   2972 		if (err != IXGBE_SUCCESS) {
   2973 			device_printf(adapter->dev, "Unable to determine "
   2974 			    "supported advertise speeds\n");
   2975 			IXGBE_CORE_UNLOCK(adapter);
   2976 			return (ENODEV);
   2977 		}
   2978 		speed |= link_caps;
   2979 		break;
   2980 	case IFM_10G_T:
   2981 	case IFM_10G_LRM:
   2982 	case IFM_10G_LR:
   2983 	case IFM_10G_TWINAX:
   2984 	case IFM_10G_SR:
   2985 	case IFM_10G_CX4:
   2986 	case IFM_10G_KR:
   2987 	case IFM_10G_KX4:
   2988 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   2989 		break;
   2990 	case IFM_5000_T:
   2991 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   2992 		break;
   2993 	case IFM_2500_T:
   2994 	case IFM_2500_KX:
   2995 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   2996 		break;
   2997 	case IFM_1000_T:
   2998 	case IFM_1000_LX:
   2999 	case IFM_1000_SX:
   3000 	case IFM_1000_KX:
   3001 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   3002 		break;
   3003 	case IFM_100_TX:
   3004 		speed |= IXGBE_LINK_SPEED_100_FULL;
   3005 		break;
   3006 	case IFM_10_T:
   3007 		speed |= IXGBE_LINK_SPEED_10_FULL;
   3008 		break;
   3009 	case IFM_NONE:
   3010 		break;
   3011 	default:
   3012 		goto invalid;
   3013 	}
   3014 
   3015 	hw->mac.autotry_restart = TRUE;
   3016 	hw->mac.ops.setup_link(hw, speed, TRUE);
   3017 	adapter->advertise = 0;
   3018 	if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
   3019 		if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
   3020 			adapter->advertise |= 1 << 2;
   3021 		if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
   3022 			adapter->advertise |= 1 << 1;
   3023 		if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
   3024 			adapter->advertise |= 1 << 0;
   3025 		if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
   3026 			adapter->advertise |= 1 << 3;
   3027 		if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
   3028 			adapter->advertise |= 1 << 4;
   3029 		if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
   3030 			adapter->advertise |= 1 << 5;
   3031 	}
   3032 
   3033 	IXGBE_CORE_UNLOCK(adapter);
   3034 	return (0);
   3035 
   3036 invalid:
   3037 	device_printf(adapter->dev, "Invalid media type!\n");
   3038 	IXGBE_CORE_UNLOCK(adapter);
   3039 
   3040 	return (EINVAL);
   3041 } /* ixgbe_media_change */
   3042 
   3043 /************************************************************************
   3044  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
   3045  ************************************************************************/
   3046 static int
   3047 ixgbe_msix_link(void *arg)
   3048 {
   3049 	struct adapter	*adapter = arg;
   3050 	struct ixgbe_hw *hw = &adapter->hw;
   3051 	u32		eicr, eicr_mask;
   3052 	s32		retval;
   3053 
   3054 	++adapter->link_irq.ev_count;
   3055 
   3056 	/* Pause other interrupts */
   3057 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
   3058 
   3059 	/* First get the cause */
   3060 	/*
   3061 	 * The specifications of 82598, 82599, X540 and X550 say EICS register
   3062 	 * is write only. However, Linux says it is a workaround for silicon
   3063 	 * errata to read EICS instead of EICR to get interrupt cause. It seems
   3064 	 * there is a problem about read clear mechanism for EICR register.
   3065 	 */
   3066 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   3067 	/* Be sure the queue bits are not cleared */
   3068 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
   3069 	/* Clear interrupt with write */
   3070 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
   3071 
   3072 	if (ixgbe_is_sfp(hw)) {
   3073 		/* Pluggable optics-related interrupt */
   3074 		if (hw->mac.type >= ixgbe_mac_X540)
   3075 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   3076 		else
   3077 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   3078 
   3079 		/*
   3080 		 *  An interrupt might not arrive when a module is inserted.
   3081 		 * When an link status change interrupt occurred and the driver
   3082 		 * still regard SFP as unplugged, issue the module softint
   3083 		 * and then issue LSC interrupt.
   3084 		 */
   3085 		if ((eicr & eicr_mask)
   3086 		    || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
   3087 			&& (eicr & IXGBE_EICR_LSC))) {
   3088 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   3089 			softint_schedule(adapter->mod_si);
   3090 		}
   3091 
   3092 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   3093 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   3094 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   3095 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   3096 			softint_schedule(adapter->msf_si);
   3097 		}
   3098 	}
   3099 
   3100 	/* Link status change */
   3101 	if (eicr & IXGBE_EICR_LSC) {
   3102 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
   3103 		softint_schedule(adapter->link_si);
   3104 	}
   3105 
   3106 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   3107 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
   3108 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
   3109 			/* This is probably overkill :) */
   3110 			if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
   3111 				return 1;
   3112 			/* Disable the interrupt */
   3113 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
   3114 			softint_schedule(adapter->fdir_si);
   3115 		}
   3116 
   3117 		if (eicr & IXGBE_EICR_ECC) {
   3118 			device_printf(adapter->dev,
   3119 			    "CRITICAL: ECC ERROR!! Please Reboot!!\n");
   3120 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   3121 		}
   3122 
   3123 		/* Check for over temp condition */
   3124 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
   3125 			switch (adapter->hw.mac.type) {
   3126 			case ixgbe_mac_X550EM_a:
   3127 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
   3128 					break;
   3129 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
   3130 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   3131 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
   3132 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   3133 				retval = hw->phy.ops.check_overtemp(hw);
   3134 				if (retval != IXGBE_ERR_OVERTEMP)
   3135 					break;
   3136 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   3137 				device_printf(adapter->dev, "System shutdown required!\n");
   3138 				break;
   3139 			default:
   3140 				if (!(eicr & IXGBE_EICR_TS))
   3141 					break;
   3142 				retval = hw->phy.ops.check_overtemp(hw);
   3143 				if (retval != IXGBE_ERR_OVERTEMP)
   3144 					break;
   3145 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   3146 				device_printf(adapter->dev, "System shutdown required!\n");
   3147 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
   3148 				break;
   3149 			}
   3150 		}
   3151 
   3152 		/* Check for VF message */
   3153 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
   3154 		    (eicr & IXGBE_EICR_MAILBOX))
   3155 			softint_schedule(adapter->mbx_si);
   3156 	}
   3157 
   3158 	/* Check for fan failure */
   3159 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   3160 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
   3161 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   3162 	}
   3163 
   3164 	/* External PHY interrupt */
   3165 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   3166 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
   3167 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
   3168 		softint_schedule(adapter->phy_si);
   3169 	}
   3170 
   3171 	/* Re-enable other interrupts */
   3172 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   3173 	return 1;
   3174 } /* ixgbe_msix_link */
   3175 
   3176 static void
   3177 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
   3178 {
   3179 
   3180 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   3181 		itr |= itr << 16;
   3182 	else
   3183 		itr |= IXGBE_EITR_CNT_WDIS;
   3184 
   3185 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
   3186 }
   3187 
   3188 
   3189 /************************************************************************
   3190  * ixgbe_sysctl_interrupt_rate_handler
   3191  ************************************************************************/
   3192 static int
   3193 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   3194 {
   3195 	struct sysctlnode node = *rnode;
   3196 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   3197 	struct adapter	*adapter;
   3198 	uint32_t reg, usec, rate;
   3199 	int error;
   3200 
   3201 	if (que == NULL)
   3202 		return 0;
   3203 
   3204 	adapter = que->adapter;
   3205 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   3206 		return (EPERM);
   3207 
   3208 	reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
   3209 	usec = ((reg & 0x0FF8) >> 3);
   3210 	if (usec > 0)
   3211 		rate = 500000 / usec;
   3212 	else
   3213 		rate = 0;
   3214 	node.sysctl_data = &rate;
   3215 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   3216 	if (error || newp == NULL)
   3217 		return error;
   3218 	reg &= ~0xfff; /* default, no limitation */
   3219 	if (rate > 0 && rate < 500000) {
   3220 		if (rate < 1000)
   3221 			rate = 1000;
   3222 		reg |= ((4000000/rate) & 0xff8);
   3223 		/*
   3224 		 * When RSC is used, ITR interval must be larger than
   3225 		 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
   3226 		 * The minimum value is always greater than 2us on 100M
   3227 		 * (and 10M?(not documented)), but it's not on 1G and higher.
   3228 		 */
   3229 		if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   3230 		    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   3231 			if ((adapter->num_queues > 1)
   3232 			    && (reg < IXGBE_MIN_RSC_EITR_10G1G))
   3233 				return EINVAL;
   3234 		}
   3235 		ixgbe_max_interrupt_rate = rate;
   3236 	} else
   3237 		ixgbe_max_interrupt_rate = 0;
   3238 	ixgbe_eitr_write(adapter, que->msix, reg);
   3239 
   3240 	return (0);
   3241 } /* ixgbe_sysctl_interrupt_rate_handler */
   3242 
   3243 const struct sysctlnode *
   3244 ixgbe_sysctl_instance(struct adapter *adapter)
   3245 {
   3246 	const char *dvname;
   3247 	struct sysctllog **log;
   3248 	int rc;
   3249 	const struct sysctlnode *rnode;
   3250 
   3251 	if (adapter->sysctltop != NULL)
   3252 		return adapter->sysctltop;
   3253 
   3254 	log = &adapter->sysctllog;
   3255 	dvname = device_xname(adapter->dev);
   3256 
   3257 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   3258 	    0, CTLTYPE_NODE, dvname,
   3259 	    SYSCTL_DESCR("ixgbe information and settings"),
   3260 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   3261 		goto err;
   3262 
   3263 	return rnode;
   3264 err:
   3265 	device_printf(adapter->dev,
   3266 	    "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   3267 	return NULL;
   3268 }
   3269 
   3270 /************************************************************************
   3271  * ixgbe_add_device_sysctls
   3272  ************************************************************************/
   3273 static void
   3274 ixgbe_add_device_sysctls(struct adapter *adapter)
   3275 {
   3276 	device_t	       dev = adapter->dev;
   3277 	struct ixgbe_hw	       *hw = &adapter->hw;
   3278 	struct sysctllog **log;
   3279 	const struct sysctlnode *rnode, *cnode;
   3280 
   3281 	log = &adapter->sysctllog;
   3282 
   3283 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   3284 		aprint_error_dev(dev, "could not create sysctl root\n");
   3285 		return;
   3286 	}
   3287 
   3288 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3289 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   3290 	    "debug", SYSCTL_DESCR("Debug Info"),
   3291 	    ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   3292 		aprint_error_dev(dev, "could not create sysctl\n");
   3293 
   3294 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3295 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3296 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
   3297 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   3298 		aprint_error_dev(dev, "could not create sysctl\n");
   3299 
   3300 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3301 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3302 	    "num_queues", SYSCTL_DESCR("Number of queues"),
   3303 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
   3304 		aprint_error_dev(dev, "could not create sysctl\n");
   3305 
   3306 	/* Sysctls for all devices */
   3307 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3308 	    CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
   3309 	    ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
   3310 	    CTL_EOL) != 0)
   3311 		aprint_error_dev(dev, "could not create sysctl\n");
   3312 
   3313 	adapter->enable_aim = ixgbe_enable_aim;
   3314 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3315 	    CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   3316 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   3317 		aprint_error_dev(dev, "could not create sysctl\n");
   3318 
   3319 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3320 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   3321 	    "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
   3322 	    ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
   3323 	    CTL_EOL) != 0)
   3324 		aprint_error_dev(dev, "could not create sysctl\n");
   3325 
   3326 	/*
   3327 	 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
   3328 	 * it causesflip-flopping softint/workqueue mode in one deferred
   3329 	 * processing. Therefore, preempt_disable()/preempt_enable() are
   3330 	 * required in ixgbe_sched_handle_que() to avoid
   3331 	 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
   3332 	 * I think changing "que->txrx_use_workqueue" in interrupt handler
   3333 	 * is lighter than doing preempt_disable()/preempt_enable() in every
   3334 	 * ixgbe_sched_handle_que().
   3335 	 */
   3336 	adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
   3337 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3338 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   3339 	    NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
   3340 		aprint_error_dev(dev, "could not create sysctl\n");
   3341 
   3342 #ifdef IXGBE_DEBUG
   3343 	/* testing sysctls (for all devices) */
   3344 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3345 	    CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
   3346 	    ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
   3347 	    CTL_EOL) != 0)
   3348 		aprint_error_dev(dev, "could not create sysctl\n");
   3349 
   3350 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
   3351 	    CTLTYPE_STRING, "print_rss_config",
   3352 	    SYSCTL_DESCR("Prints RSS Configuration"),
   3353 	    ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
   3354 	    CTL_EOL) != 0)
   3355 		aprint_error_dev(dev, "could not create sysctl\n");
   3356 #endif
   3357 	/* for X550 series devices */
   3358 	if (hw->mac.type >= ixgbe_mac_X550)
   3359 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3360 		    CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
   3361 		    ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
   3362 		    CTL_EOL) != 0)
   3363 			aprint_error_dev(dev, "could not create sysctl\n");
   3364 
   3365 	/* for WoL-capable devices */
   3366 	if (adapter->wol_support) {
   3367 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3368 		    CTLTYPE_BOOL, "wol_enable",
   3369 		    SYSCTL_DESCR("Enable/Disable Wake on LAN"),
   3370 		    ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
   3371 		    CTL_EOL) != 0)
   3372 			aprint_error_dev(dev, "could not create sysctl\n");
   3373 
   3374 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3375 		    CTLTYPE_INT, "wufc",
   3376 		    SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
   3377 		    ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
   3378 		    CTL_EOL) != 0)
   3379 			aprint_error_dev(dev, "could not create sysctl\n");
   3380 	}
   3381 
   3382 	/* for X552/X557-AT devices */
   3383 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3384 		const struct sysctlnode *phy_node;
   3385 
   3386 		if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
   3387 		    "phy", SYSCTL_DESCR("External PHY sysctls"),
   3388 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
   3389 			aprint_error_dev(dev, "could not create sysctl\n");
   3390 			return;
   3391 		}
   3392 
   3393 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3394 		    CTLTYPE_INT, "temp",
   3395 		    SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
   3396 		    ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
   3397 		    CTL_EOL) != 0)
   3398 			aprint_error_dev(dev, "could not create sysctl\n");
   3399 
   3400 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3401 		    CTLTYPE_INT, "overtemp_occurred",
   3402 		    SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
   3403 		    ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
   3404 		    CTL_CREATE, CTL_EOL) != 0)
   3405 			aprint_error_dev(dev, "could not create sysctl\n");
   3406 	}
   3407 
   3408 	if ((hw->mac.type == ixgbe_mac_X550EM_a)
   3409 	    && (hw->phy.type == ixgbe_phy_fw))
   3410 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3411 		    CTLTYPE_BOOL, "force_10_100_autonego",
   3412 		    SYSCTL_DESCR("Force autonego on 10M and 100M"),
   3413 		    NULL, 0, &hw->phy.force_10_100_autonego, 0,
   3414 		    CTL_CREATE, CTL_EOL) != 0)
   3415 			aprint_error_dev(dev, "could not create sysctl\n");
   3416 
   3417 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
   3418 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3419 		    CTLTYPE_INT, "eee_state",
   3420 		    SYSCTL_DESCR("EEE Power Save State"),
   3421 		    ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
   3422 		    CTL_EOL) != 0)
   3423 			aprint_error_dev(dev, "could not create sysctl\n");
   3424 	}
   3425 } /* ixgbe_add_device_sysctls */
   3426 
   3427 /************************************************************************
   3428  * ixgbe_allocate_pci_resources
   3429  ************************************************************************/
   3430 static int
   3431 ixgbe_allocate_pci_resources(struct adapter *adapter,
   3432     const struct pci_attach_args *pa)
   3433 {
   3434 	pcireg_t	memtype, csr;
   3435 	device_t dev = adapter->dev;
   3436 	bus_addr_t addr;
   3437 	int flags;
   3438 
   3439 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   3440 	switch (memtype) {
   3441 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   3442 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   3443 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   3444 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   3445 		      memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   3446 			goto map_err;
   3447 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   3448 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   3449 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3450 		}
   3451 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   3452 		     adapter->osdep.mem_size, flags,
   3453 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   3454 map_err:
   3455 			adapter->osdep.mem_size = 0;
   3456 			aprint_error_dev(dev, "unable to map BAR0\n");
   3457 			return ENXIO;
   3458 		}
   3459 		/*
   3460 		 * Enable address decoding for memory range in case BIOS or
   3461 		 * UEFI don't set it.
   3462 		 */
   3463 		csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
   3464 		    PCI_COMMAND_STATUS_REG);
   3465 		csr |= PCI_COMMAND_MEM_ENABLE;
   3466 		pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
   3467 		    csr);
   3468 		break;
   3469 	default:
   3470 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   3471 		return ENXIO;
   3472 	}
   3473 
   3474 	return (0);
   3475 } /* ixgbe_allocate_pci_resources */
   3476 
   3477 static void
   3478 ixgbe_free_softint(struct adapter *adapter)
   3479 {
   3480 	struct ix_queue *que = adapter->queues;
   3481 	struct tx_ring *txr = adapter->tx_rings;
   3482 	int i;
   3483 
   3484 	for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
   3485 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   3486 			if (txr->txr_si != NULL)
   3487 				softint_disestablish(txr->txr_si);
   3488 		}
   3489 		if (que->que_si != NULL)
   3490 			softint_disestablish(que->que_si);
   3491 	}
   3492 	if (adapter->txr_wq != NULL)
   3493 		workqueue_destroy(adapter->txr_wq);
   3494 	if (adapter->txr_wq_enqueued != NULL)
   3495 		percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
   3496 	if (adapter->que_wq != NULL)
   3497 		workqueue_destroy(adapter->que_wq);
   3498 
   3499 	/* Drain the Link queue */
   3500 	if (adapter->link_si != NULL) {
   3501 		softint_disestablish(adapter->link_si);
   3502 		adapter->link_si = NULL;
   3503 	}
   3504 	if (adapter->mod_si != NULL) {
   3505 		softint_disestablish(adapter->mod_si);
   3506 		adapter->mod_si = NULL;
   3507 	}
   3508 	if (adapter->msf_si != NULL) {
   3509 		softint_disestablish(adapter->msf_si);
   3510 		adapter->msf_si = NULL;
   3511 	}
   3512 	if (adapter->phy_si != NULL) {
   3513 		softint_disestablish(adapter->phy_si);
   3514 		adapter->phy_si = NULL;
   3515 	}
   3516 	if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
   3517 		if (adapter->fdir_si != NULL) {
   3518 			softint_disestablish(adapter->fdir_si);
   3519 			adapter->fdir_si = NULL;
   3520 		}
   3521 	}
   3522 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   3523 		if (adapter->mbx_si != NULL) {
   3524 			softint_disestablish(adapter->mbx_si);
   3525 			adapter->mbx_si = NULL;
   3526 		}
   3527 	}
   3528 } /* ixgbe_free_softint */
   3529 
   3530 /************************************************************************
   3531  * ixgbe_detach - Device removal routine
   3532  *
   3533  *   Called when the driver is being removed.
   3534  *   Stops the adapter and deallocates all the resources
   3535  *   that were allocated for driver operation.
   3536  *
   3537  *   return 0 on success, positive on failure
   3538  ************************************************************************/
   3539 static int
   3540 ixgbe_detach(device_t dev, int flags)
   3541 {
   3542 	struct adapter *adapter = device_private(dev);
   3543 	struct rx_ring *rxr = adapter->rx_rings;
   3544 	struct tx_ring *txr = adapter->tx_rings;
   3545 	struct ixgbe_hw *hw = &adapter->hw;
   3546 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   3547 	u32	ctrl_ext;
   3548 	int i;
   3549 
   3550 	INIT_DEBUGOUT("ixgbe_detach: begin");
   3551 	if (adapter->osdep.attached == false)
   3552 		return 0;
   3553 
   3554 	if (ixgbe_pci_iov_detach(dev) != 0) {
   3555 		device_printf(dev, "SR-IOV in use; detach first.\n");
   3556 		return (EBUSY);
   3557 	}
   3558 
   3559 	/*
   3560 	 * Stop the interface. ixgbe_setup_low_power_mode() calls ixgbe_stop(),
   3561 	 * so it's not required to call ixgbe_stop() directly.
   3562 	 */
   3563 	IXGBE_CORE_LOCK(adapter);
   3564 	ixgbe_setup_low_power_mode(adapter);
   3565 	IXGBE_CORE_UNLOCK(adapter);
   3566 #if NVLAN > 0
   3567 	/* Make sure VLANs are not using driver */
   3568 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   3569 		;	/* nothing to do: no VLANs */
   3570 	else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
   3571 		vlan_ifdetach(adapter->ifp);
   3572 	else {
   3573 		aprint_error_dev(dev, "VLANs in use, detach first\n");
   3574 		return (EBUSY);
   3575 	}
   3576 #endif
   3577 
   3578 	pmf_device_deregister(dev);
   3579 
   3580 	ether_ifdetach(adapter->ifp);
   3581 
   3582 	ixgbe_free_softint(adapter);
   3583 
   3584 	/* let hardware know driver is unloading */
   3585 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   3586 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   3587 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   3588 
   3589 	callout_halt(&adapter->timer, NULL);
   3590 	if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
   3591 		callout_halt(&adapter->recovery_mode_timer, NULL);
   3592 
   3593 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   3594 		netmap_detach(adapter->ifp);
   3595 
   3596 	ixgbe_free_pci_resources(adapter);
   3597 #if 0	/* XXX the NetBSD port is probably missing something here */
   3598 	bus_generic_detach(dev);
   3599 #endif
   3600 	if_detach(adapter->ifp);
   3601 	if_percpuq_destroy(adapter->ipq);
   3602 
   3603 	sysctl_teardown(&adapter->sysctllog);
   3604 	evcnt_detach(&adapter->efbig_tx_dma_setup);
   3605 	evcnt_detach(&adapter->mbuf_defrag_failed);
   3606 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
   3607 	evcnt_detach(&adapter->einval_tx_dma_setup);
   3608 	evcnt_detach(&adapter->other_tx_dma_setup);
   3609 	evcnt_detach(&adapter->eagain_tx_dma_setup);
   3610 	evcnt_detach(&adapter->enomem_tx_dma_setup);
   3611 	evcnt_detach(&adapter->watchdog_events);
   3612 	evcnt_detach(&adapter->tso_err);
   3613 	evcnt_detach(&adapter->link_irq);
   3614 	evcnt_detach(&adapter->link_sicount);
   3615 	evcnt_detach(&adapter->mod_sicount);
   3616 	evcnt_detach(&adapter->msf_sicount);
   3617 	evcnt_detach(&adapter->phy_sicount);
   3618 
   3619 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   3620 		if (i < __arraycount(stats->mpc)) {
   3621 			evcnt_detach(&stats->mpc[i]);
   3622 			if (hw->mac.type == ixgbe_mac_82598EB)
   3623 				evcnt_detach(&stats->rnbc[i]);
   3624 		}
   3625 		if (i < __arraycount(stats->pxontxc)) {
   3626 			evcnt_detach(&stats->pxontxc[i]);
   3627 			evcnt_detach(&stats->pxonrxc[i]);
   3628 			evcnt_detach(&stats->pxofftxc[i]);
   3629 			evcnt_detach(&stats->pxoffrxc[i]);
   3630 			if (hw->mac.type >= ixgbe_mac_82599EB)
   3631 				evcnt_detach(&stats->pxon2offc[i]);
   3632 		}
   3633 	}
   3634 
   3635 	txr = adapter->tx_rings;
   3636 	for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   3637 		evcnt_detach(&adapter->queues[i].irqs);
   3638 		evcnt_detach(&adapter->queues[i].handleq);
   3639 		evcnt_detach(&adapter->queues[i].req);
   3640 		evcnt_detach(&txr->no_desc_avail);
   3641 		evcnt_detach(&txr->total_packets);
   3642 		evcnt_detach(&txr->tso_tx);
   3643 #ifndef IXGBE_LEGACY_TX
   3644 		evcnt_detach(&txr->pcq_drops);
   3645 #endif
   3646 
   3647 		if (i < __arraycount(stats->qprc)) {
   3648 			evcnt_detach(&stats->qprc[i]);
   3649 			evcnt_detach(&stats->qptc[i]);
   3650 			evcnt_detach(&stats->qbrc[i]);
   3651 			evcnt_detach(&stats->qbtc[i]);
   3652 			if (hw->mac.type >= ixgbe_mac_82599EB)
   3653 				evcnt_detach(&stats->qprdc[i]);
   3654 		}
   3655 
   3656 		evcnt_detach(&rxr->rx_packets);
   3657 		evcnt_detach(&rxr->rx_bytes);
   3658 		evcnt_detach(&rxr->rx_copies);
   3659 		evcnt_detach(&rxr->no_jmbuf);
   3660 		evcnt_detach(&rxr->rx_discarded);
   3661 	}
   3662 	evcnt_detach(&stats->ipcs);
   3663 	evcnt_detach(&stats->l4cs);
   3664 	evcnt_detach(&stats->ipcs_bad);
   3665 	evcnt_detach(&stats->l4cs_bad);
   3666 	evcnt_detach(&stats->intzero);
   3667 	evcnt_detach(&stats->legint);
   3668 	evcnt_detach(&stats->crcerrs);
   3669 	evcnt_detach(&stats->illerrc);
   3670 	evcnt_detach(&stats->errbc);
   3671 	evcnt_detach(&stats->mspdc);
   3672 	if (hw->mac.type >= ixgbe_mac_X550)
   3673 		evcnt_detach(&stats->mbsdc);
   3674 	evcnt_detach(&stats->mpctotal);
   3675 	evcnt_detach(&stats->mlfc);
   3676 	evcnt_detach(&stats->mrfc);
   3677 	evcnt_detach(&stats->rlec);
   3678 	evcnt_detach(&stats->lxontxc);
   3679 	evcnt_detach(&stats->lxonrxc);
   3680 	evcnt_detach(&stats->lxofftxc);
   3681 	evcnt_detach(&stats->lxoffrxc);
   3682 
   3683 	/* Packet Reception Stats */
   3684 	evcnt_detach(&stats->tor);
   3685 	evcnt_detach(&stats->gorc);
   3686 	evcnt_detach(&stats->tpr);
   3687 	evcnt_detach(&stats->gprc);
   3688 	evcnt_detach(&stats->mprc);
   3689 	evcnt_detach(&stats->bprc);
   3690 	evcnt_detach(&stats->prc64);
   3691 	evcnt_detach(&stats->prc127);
   3692 	evcnt_detach(&stats->prc255);
   3693 	evcnt_detach(&stats->prc511);
   3694 	evcnt_detach(&stats->prc1023);
   3695 	evcnt_detach(&stats->prc1522);
   3696 	evcnt_detach(&stats->ruc);
   3697 	evcnt_detach(&stats->rfc);
   3698 	evcnt_detach(&stats->roc);
   3699 	evcnt_detach(&stats->rjc);
   3700 	evcnt_detach(&stats->mngprc);
   3701 	evcnt_detach(&stats->mngpdc);
   3702 	evcnt_detach(&stats->xec);
   3703 
   3704 	/* Packet Transmission Stats */
   3705 	evcnt_detach(&stats->gotc);
   3706 	evcnt_detach(&stats->tpt);
   3707 	evcnt_detach(&stats->gptc);
   3708 	evcnt_detach(&stats->bptc);
   3709 	evcnt_detach(&stats->mptc);
   3710 	evcnt_detach(&stats->mngptc);
   3711 	evcnt_detach(&stats->ptc64);
   3712 	evcnt_detach(&stats->ptc127);
   3713 	evcnt_detach(&stats->ptc255);
   3714 	evcnt_detach(&stats->ptc511);
   3715 	evcnt_detach(&stats->ptc1023);
   3716 	evcnt_detach(&stats->ptc1522);
   3717 
   3718 	ixgbe_free_transmit_structures(adapter);
   3719 	ixgbe_free_receive_structures(adapter);
   3720 	for (i = 0; i < adapter->num_queues; i++) {
   3721 		struct ix_queue * que = &adapter->queues[i];
   3722 		mutex_destroy(&que->dc_mtx);
   3723 	}
   3724 	free(adapter->queues, M_DEVBUF);
   3725 	free(adapter->mta, M_DEVBUF);
   3726 
   3727 	IXGBE_CORE_LOCK_DESTROY(adapter);
   3728 
   3729 	return (0);
   3730 } /* ixgbe_detach */
   3731 
   3732 /************************************************************************
   3733  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
   3734  *
   3735  *   Prepare the adapter/port for LPLU and/or WoL
   3736  ************************************************************************/
   3737 static int
   3738 ixgbe_setup_low_power_mode(struct adapter *adapter)
   3739 {
   3740 	struct ixgbe_hw *hw = &adapter->hw;
   3741 	device_t	dev = adapter->dev;
   3742 	s32		error = 0;
   3743 
   3744 	KASSERT(mutex_owned(&adapter->core_mtx));
   3745 
   3746 	/* Limit power management flow to X550EM baseT */
   3747 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
   3748 	    hw->phy.ops.enter_lplu) {
   3749 		/* X550EM baseT adapters need a special LPLU flow */
   3750 		hw->phy.reset_disable = true;
   3751 		ixgbe_stop(adapter);
   3752 		error = hw->phy.ops.enter_lplu(hw);
   3753 		if (error)
   3754 			device_printf(dev,
   3755 			    "Error entering LPLU: %d\n", error);
   3756 		hw->phy.reset_disable = false;
   3757 	} else {
   3758 		/* Just stop for other adapters */
   3759 		ixgbe_stop(adapter);
   3760 	}
   3761 
   3762 	if (!hw->wol_enabled) {
   3763 		ixgbe_set_phy_power(hw, FALSE);
   3764 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3765 		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
   3766 	} else {
   3767 		/* Turn off support for APM wakeup. (Using ACPI instead) */
   3768 		IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
   3769 		    IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
   3770 
   3771 		/*
   3772 		 * Clear Wake Up Status register to prevent any previous wakeup
   3773 		 * events from waking us up immediately after we suspend.
   3774 		 */
   3775 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3776 
   3777 		/*
   3778 		 * Program the Wakeup Filter Control register with user filter
   3779 		 * settings
   3780 		 */
   3781 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
   3782 
   3783 		/* Enable wakeups and power management in Wakeup Control */
   3784 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
   3785 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
   3786 
   3787 	}
   3788 
   3789 	return error;
   3790 } /* ixgbe_setup_low_power_mode */
   3791 
   3792 /************************************************************************
   3793  * ixgbe_shutdown - Shutdown entry point
   3794  ************************************************************************/
   3795 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   3796 static int
   3797 ixgbe_shutdown(device_t dev)
   3798 {
   3799 	struct adapter *adapter = device_private(dev);
   3800 	int error = 0;
   3801 
   3802 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
   3803 
   3804 	IXGBE_CORE_LOCK(adapter);
   3805 	error = ixgbe_setup_low_power_mode(adapter);
   3806 	IXGBE_CORE_UNLOCK(adapter);
   3807 
   3808 	return (error);
   3809 } /* ixgbe_shutdown */
   3810 #endif
   3811 
   3812 /************************************************************************
   3813  * ixgbe_suspend
   3814  *
   3815  *   From D0 to D3
   3816  ************************************************************************/
   3817 static bool
   3818 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
   3819 {
   3820 	struct adapter *adapter = device_private(dev);
   3821 	int	       error = 0;
   3822 
   3823 	INIT_DEBUGOUT("ixgbe_suspend: begin");
   3824 
   3825 	IXGBE_CORE_LOCK(adapter);
   3826 
   3827 	error = ixgbe_setup_low_power_mode(adapter);
   3828 
   3829 	IXGBE_CORE_UNLOCK(adapter);
   3830 
   3831 	return (error);
   3832 } /* ixgbe_suspend */
   3833 
   3834 /************************************************************************
   3835  * ixgbe_resume
   3836  *
   3837  *   From D3 to D0
   3838  ************************************************************************/
   3839 static bool
   3840 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
   3841 {
   3842 	struct adapter	*adapter = device_private(dev);
   3843 	struct ifnet	*ifp = adapter->ifp;
   3844 	struct ixgbe_hw *hw = &adapter->hw;
   3845 	u32		wus;
   3846 
   3847 	INIT_DEBUGOUT("ixgbe_resume: begin");
   3848 
   3849 	IXGBE_CORE_LOCK(adapter);
   3850 
   3851 	/* Read & clear WUS register */
   3852 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
   3853 	if (wus)
   3854 		device_printf(dev, "Woken up by (WUS): %#010x\n",
   3855 		    IXGBE_READ_REG(hw, IXGBE_WUS));
   3856 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3857 	/* And clear WUFC until next low-power transition */
   3858 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3859 
   3860 	/*
   3861 	 * Required after D3->D0 transition;
   3862 	 * will re-advertise all previous advertised speeds
   3863 	 */
   3864 	if (ifp->if_flags & IFF_UP)
   3865 		ixgbe_init_locked(adapter);
   3866 
   3867 	IXGBE_CORE_UNLOCK(adapter);
   3868 
   3869 	return true;
   3870 } /* ixgbe_resume */
   3871 
   3872 /*
   3873  * Set the various hardware offload abilities.
   3874  *
   3875  * This takes the ifnet's if_capenable flags (e.g. set by the user using
   3876  * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
   3877  * mbuf offload flags the driver will understand.
   3878  */
   3879 static void
   3880 ixgbe_set_if_hwassist(struct adapter *adapter)
   3881 {
   3882 	/* XXX */
   3883 }
   3884 
   3885 /************************************************************************
   3886  * ixgbe_init_locked - Init entry point
   3887  *
   3888  *   Used in two ways: It is used by the stack as an init
   3889  *   entry point in network interface structure. It is also
   3890  *   used by the driver as a hw/sw initialization routine to
   3891  *   get to a consistent state.
   3892  *
   3893  *   return 0 on success, positive on failure
   3894  ************************************************************************/
   3895 static void
   3896 ixgbe_init_locked(struct adapter *adapter)
   3897 {
   3898 	struct ifnet   *ifp = adapter->ifp;
   3899 	device_t	dev = adapter->dev;
   3900 	struct ixgbe_hw *hw = &adapter->hw;
   3901 	struct ix_queue *que;
   3902 	struct tx_ring	*txr;
   3903 	struct rx_ring	*rxr;
   3904 	u32		txdctl, mhadd;
   3905 	u32		rxdctl, rxctrl;
   3906 	u32		ctrl_ext;
   3907 	int		i, j, err;
   3908 
   3909 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   3910 
   3911 	KASSERT(mutex_owned(&adapter->core_mtx));
   3912 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
   3913 
   3914 	hw->adapter_stopped = FALSE;
   3915 	ixgbe_stop_adapter(hw);
   3916 	callout_stop(&adapter->timer);
   3917 	for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
   3918 		que->disabled_count = 0;
   3919 
   3920 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   3921 	adapter->max_frame_size =
   3922 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   3923 
   3924 	/* Queue indices may change with IOV mode */
   3925 	ixgbe_align_all_queue_indices(adapter);
   3926 
   3927 	/* reprogram the RAR[0] in case user changed it. */
   3928 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
   3929 
   3930 	/* Get the latest mac address, User can use a LAA */
   3931 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
   3932 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   3933 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
   3934 	hw->addr_ctrl.rar_used_count = 1;
   3935 
   3936 	/* Set hardware offload abilities from ifnet flags */
   3937 	ixgbe_set_if_hwassist(adapter);
   3938 
   3939 	/* Prepare transmit descriptors and buffers */
   3940 	if (ixgbe_setup_transmit_structures(adapter)) {
   3941 		device_printf(dev, "Could not setup transmit structures\n");
   3942 		ixgbe_stop(adapter);
   3943 		return;
   3944 	}
   3945 
   3946 	ixgbe_init_hw(hw);
   3947 
   3948 	ixgbe_initialize_iov(adapter);
   3949 
   3950 	ixgbe_initialize_transmit_units(adapter);
   3951 
   3952 	/* Setup Multicast table */
   3953 	ixgbe_set_multi(adapter);
   3954 
   3955 	/* Determine the correct mbuf pool, based on frame size */
   3956 	if (adapter->max_frame_size <= MCLBYTES)
   3957 		adapter->rx_mbuf_sz = MCLBYTES;
   3958 	else
   3959 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   3960 
   3961 	/* Prepare receive descriptors and buffers */
   3962 	if (ixgbe_setup_receive_structures(adapter)) {
   3963 		device_printf(dev, "Could not setup receive structures\n");
   3964 		ixgbe_stop(adapter);
   3965 		return;
   3966 	}
   3967 
   3968 	/* Configure RX settings */
   3969 	ixgbe_initialize_receive_units(adapter);
   3970 
   3971 	/* Enable SDP & MSI-X interrupts based on adapter */
   3972 	ixgbe_config_gpie(adapter);
   3973 
   3974 	/* Set MTU size */
   3975 	if (ifp->if_mtu > ETHERMTU) {
   3976 		/* aka IXGBE_MAXFRS on 82599 and newer */
   3977 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   3978 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   3979 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   3980 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   3981 	}
   3982 
   3983 	/* Now enable all the queues */
   3984 	for (i = 0; i < adapter->num_queues; i++) {
   3985 		txr = &adapter->tx_rings[i];
   3986 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
   3987 		txdctl |= IXGBE_TXDCTL_ENABLE;
   3988 		/* Set WTHRESH to 8, burst writeback */
   3989 		txdctl |= (8 << 16);
   3990 		/*
   3991 		 * When the internal queue falls below PTHRESH (32),
   3992 		 * start prefetching as long as there are at least
   3993 		 * HTHRESH (1) buffers ready. The values are taken
   3994 		 * from the Intel linux driver 3.8.21.
   3995 		 * Prefetching enables tx line rate even with 1 queue.
   3996 		 */
   3997 		txdctl |= (32 << 0) | (1 << 8);
   3998 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
   3999 	}
   4000 
   4001 	for (i = 0; i < adapter->num_queues; i++) {
   4002 		rxr = &adapter->rx_rings[i];
   4003 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   4004 		if (hw->mac.type == ixgbe_mac_82598EB) {
   4005 			/*
   4006 			 * PTHRESH = 21
   4007 			 * HTHRESH = 4
   4008 			 * WTHRESH = 8
   4009 			 */
   4010 			rxdctl &= ~0x3FFFFF;
   4011 			rxdctl |= 0x080420;
   4012 		}
   4013 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   4014 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
   4015 		for (j = 0; j < 10; j++) {
   4016 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
   4017 			    IXGBE_RXDCTL_ENABLE)
   4018 				break;
   4019 			else
   4020 				msec_delay(1);
   4021 		}
   4022 		wmb();
   4023 
   4024 		/*
   4025 		 * In netmap mode, we must preserve the buffers made
   4026 		 * available to userspace before the if_init()
   4027 		 * (this is true by default on the TX side, because
   4028 		 * init makes all buffers available to userspace).
   4029 		 *
   4030 		 * netmap_reset() and the device specific routines
   4031 		 * (e.g. ixgbe_setup_receive_rings()) map these
   4032 		 * buffers at the end of the NIC ring, so here we
   4033 		 * must set the RDT (tail) register to make sure
   4034 		 * they are not overwritten.
   4035 		 *
   4036 		 * In this driver the NIC ring starts at RDH = 0,
   4037 		 * RDT points to the last slot available for reception (?),
   4038 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   4039 		 */
   4040 #ifdef DEV_NETMAP
   4041 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   4042 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   4043 			struct netmap_adapter *na = NA(adapter->ifp);
   4044 			struct netmap_kring *kring = na->rx_rings[i];
   4045 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   4046 
   4047 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
   4048 		} else
   4049 #endif /* DEV_NETMAP */
   4050 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
   4051 			    adapter->num_rx_desc - 1);
   4052 	}
   4053 
   4054 	/* Enable Receive engine */
   4055 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   4056 	if (hw->mac.type == ixgbe_mac_82598EB)
   4057 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   4058 	rxctrl |= IXGBE_RXCTRL_RXEN;
   4059 	ixgbe_enable_rx_dma(hw, rxctrl);
   4060 
   4061 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4062 
   4063 	/* Set up MSI/MSI-X routing */
   4064 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   4065 		ixgbe_configure_ivars(adapter);
   4066 		/* Set up auto-mask */
   4067 		if (hw->mac.type == ixgbe_mac_82598EB)
   4068 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   4069 		else {
   4070 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   4071 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   4072 		}
   4073 	} else {  /* Simple settings for Legacy/MSI */
   4074 		ixgbe_set_ivar(adapter, 0, 0, 0);
   4075 		ixgbe_set_ivar(adapter, 0, 0, 1);
   4076 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   4077 	}
   4078 
   4079 	ixgbe_init_fdir(adapter);
   4080 
   4081 	/*
   4082 	 * Check on any SFP devices that
   4083 	 * need to be kick-started
   4084 	 */
   4085 	if (hw->phy.type == ixgbe_phy_none) {
   4086 		err = hw->phy.ops.identify(hw);
   4087 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4088 			device_printf(dev,
   4089 			    "Unsupported SFP+ module type was detected.\n");
   4090 			return;
   4091 		}
   4092 	}
   4093 
   4094 	/* Set moderation on the Link interrupt */
   4095 	ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
   4096 
   4097 	/* Enable EEE power saving */
   4098 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   4099 		hw->mac.ops.setup_eee(hw,
   4100 		    adapter->feat_en & IXGBE_FEATURE_EEE);
   4101 
   4102 	/* Enable power to the phy. */
   4103 	ixgbe_set_phy_power(hw, TRUE);
   4104 
   4105 	/* Config/Enable Link */
   4106 	ixgbe_config_link(adapter);
   4107 
   4108 	/* Hardware Packet Buffer & Flow Control setup */
   4109 	ixgbe_config_delay_values(adapter);
   4110 
   4111 	/* Initialize the FC settings */
   4112 	ixgbe_start_hw(hw);
   4113 
   4114 	/* Set up VLAN support and filter */
   4115 	ixgbe_setup_vlan_hw_support(adapter);
   4116 
   4117 	/* Setup DMA Coalescing */
   4118 	ixgbe_config_dmac(adapter);
   4119 
   4120 	/* And now turn on interrupts */
   4121 	ixgbe_enable_intr(adapter);
   4122 
   4123 	/* Enable the use of the MBX by the VF's */
   4124 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
   4125 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
   4126 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
   4127 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
   4128 	}
   4129 
   4130 	/* Update saved flags. See ixgbe_ifflags_cb() */
   4131 	adapter->if_flags = ifp->if_flags;
   4132 	adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
   4133 
   4134 	/* Now inform the stack we're ready */
   4135 	ifp->if_flags |= IFF_RUNNING;
   4136 
   4137 	return;
   4138 } /* ixgbe_init_locked */
   4139 
   4140 /************************************************************************
   4141  * ixgbe_init
   4142  ************************************************************************/
   4143 static int
   4144 ixgbe_init(struct ifnet *ifp)
   4145 {
   4146 	struct adapter *adapter = ifp->if_softc;
   4147 
   4148 	IXGBE_CORE_LOCK(adapter);
   4149 	ixgbe_init_locked(adapter);
   4150 	IXGBE_CORE_UNLOCK(adapter);
   4151 
   4152 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   4153 } /* ixgbe_init */
   4154 
   4155 /************************************************************************
   4156  * ixgbe_set_ivar
   4157  *
   4158  *   Setup the correct IVAR register for a particular MSI-X interrupt
   4159  *     (yes this is all very magic and confusing :)
   4160  *    - entry is the register array entry
   4161  *    - vector is the MSI-X vector for this queue
   4162  *    - type is RX/TX/MISC
   4163  ************************************************************************/
   4164 static void
   4165 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   4166 {
   4167 	struct ixgbe_hw *hw = &adapter->hw;
   4168 	u32 ivar, index;
   4169 
   4170 	vector |= IXGBE_IVAR_ALLOC_VAL;
   4171 
   4172 	switch (hw->mac.type) {
   4173 	case ixgbe_mac_82598EB:
   4174 		if (type == -1)
   4175 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   4176 		else
   4177 			entry += (type * 64);
   4178 		index = (entry >> 2) & 0x1F;
   4179 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   4180 		ivar &= ~(0xffUL << (8 * (entry & 0x3)));
   4181 		ivar |= ((u32)vector << (8 * (entry & 0x3)));
   4182 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   4183 		break;
   4184 	case ixgbe_mac_82599EB:
   4185 	case ixgbe_mac_X540:
   4186 	case ixgbe_mac_X550:
   4187 	case ixgbe_mac_X550EM_x:
   4188 	case ixgbe_mac_X550EM_a:
   4189 		if (type == -1) { /* MISC IVAR */
   4190 			index = (entry & 1) * 8;
   4191 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   4192 			ivar &= ~(0xffUL << index);
   4193 			ivar |= ((u32)vector << index);
   4194 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   4195 		} else {	/* RX/TX IVARS */
   4196 			index = (16 * (entry & 1)) + (8 * type);
   4197 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   4198 			ivar &= ~(0xffUL << index);
   4199 			ivar |= ((u32)vector << index);
   4200 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   4201 		}
   4202 		break;
   4203 	default:
   4204 		break;
   4205 	}
   4206 } /* ixgbe_set_ivar */
   4207 
   4208 /************************************************************************
   4209  * ixgbe_configure_ivars
   4210  ************************************************************************/
   4211 static void
   4212 ixgbe_configure_ivars(struct adapter *adapter)
   4213 {
   4214 	struct ix_queue *que = adapter->queues;
   4215 	u32		newitr;
   4216 
   4217 	if (ixgbe_max_interrupt_rate > 0)
   4218 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   4219 	else {
   4220 		/*
   4221 		 * Disable DMA coalescing if interrupt moderation is
   4222 		 * disabled.
   4223 		 */
   4224 		adapter->dmac = 0;
   4225 		newitr = 0;
   4226 	}
   4227 
   4228 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4229 		struct rx_ring *rxr = &adapter->rx_rings[i];
   4230 		struct tx_ring *txr = &adapter->tx_rings[i];
   4231 		/* First the RX queue entry */
   4232 		ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
   4233 		/* ... and the TX */
   4234 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
   4235 		/* Set an Initial EITR value */
   4236 		ixgbe_eitr_write(adapter, que->msix, newitr);
   4237 		/*
   4238 		 * To eliminate influence of the previous state.
   4239 		 * At this point, Tx/Rx interrupt handler
   4240 		 * (ixgbe_msix_que()) cannot be called, so  both
   4241 		 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
   4242 		 */
   4243 		que->eitr_setting = 0;
   4244 	}
   4245 
   4246 	/* For the Link interrupt */
   4247 	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
   4248 } /* ixgbe_configure_ivars */
   4249 
   4250 /************************************************************************
   4251  * ixgbe_config_gpie
   4252  ************************************************************************/
   4253 static void
   4254 ixgbe_config_gpie(struct adapter *adapter)
   4255 {
   4256 	struct ixgbe_hw *hw = &adapter->hw;
   4257 	u32		gpie;
   4258 
   4259 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
   4260 
   4261 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   4262 		/* Enable Enhanced MSI-X mode */
   4263 		gpie |= IXGBE_GPIE_MSIX_MODE
   4264 		     |	IXGBE_GPIE_EIAME
   4265 		     |	IXGBE_GPIE_PBA_SUPPORT
   4266 		     |	IXGBE_GPIE_OCD;
   4267 	}
   4268 
   4269 	/* Fan Failure Interrupt */
   4270 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4271 		gpie |= IXGBE_SDP1_GPIEN;
   4272 
   4273 	/* Thermal Sensor Interrupt */
   4274 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
   4275 		gpie |= IXGBE_SDP0_GPIEN_X540;
   4276 
   4277 	/* Link detection */
   4278 	switch (hw->mac.type) {
   4279 	case ixgbe_mac_82599EB:
   4280 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
   4281 		break;
   4282 	case ixgbe_mac_X550EM_x:
   4283 	case ixgbe_mac_X550EM_a:
   4284 		gpie |= IXGBE_SDP0_GPIEN_X540;
   4285 		break;
   4286 	default:
   4287 		break;
   4288 	}
   4289 
   4290 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   4291 
   4292 } /* ixgbe_config_gpie */
   4293 
   4294 /************************************************************************
   4295  * ixgbe_config_delay_values
   4296  *
   4297  *   Requires adapter->max_frame_size to be set.
   4298  ************************************************************************/
   4299 static void
   4300 ixgbe_config_delay_values(struct adapter *adapter)
   4301 {
   4302 	struct ixgbe_hw *hw = &adapter->hw;
   4303 	u32		rxpb, frame, size, tmp;
   4304 
   4305 	frame = adapter->max_frame_size;
   4306 
   4307 	/* Calculate High Water */
   4308 	switch (hw->mac.type) {
   4309 	case ixgbe_mac_X540:
   4310 	case ixgbe_mac_X550:
   4311 	case ixgbe_mac_X550EM_x:
   4312 	case ixgbe_mac_X550EM_a:
   4313 		tmp = IXGBE_DV_X540(frame, frame);
   4314 		break;
   4315 	default:
   4316 		tmp = IXGBE_DV(frame, frame);
   4317 		break;
   4318 	}
   4319 	size = IXGBE_BT2KB(tmp);
   4320 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
   4321 	hw->fc.high_water[0] = rxpb - size;
   4322 
   4323 	/* Now calculate Low Water */
   4324 	switch (hw->mac.type) {
   4325 	case ixgbe_mac_X540:
   4326 	case ixgbe_mac_X550:
   4327 	case ixgbe_mac_X550EM_x:
   4328 	case ixgbe_mac_X550EM_a:
   4329 		tmp = IXGBE_LOW_DV_X540(frame);
   4330 		break;
   4331 	default:
   4332 		tmp = IXGBE_LOW_DV(frame);
   4333 		break;
   4334 	}
   4335 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
   4336 
   4337 	hw->fc.pause_time = IXGBE_FC_PAUSE;
   4338 	hw->fc.send_xon = TRUE;
   4339 } /* ixgbe_config_delay_values */
   4340 
   4341 /************************************************************************
   4342  * ixgbe_set_multi - Multicast Update
   4343  *
   4344  *   Called whenever multicast address list is updated.
   4345  ************************************************************************/
   4346 static void
   4347 ixgbe_set_multi(struct adapter *adapter)
   4348 {
   4349 	struct ixgbe_mc_addr	*mta;
   4350 	struct ifnet		*ifp = adapter->ifp;
   4351 	u8			*update_ptr;
   4352 	int			mcnt = 0;
   4353 	u32			fctrl;
   4354 	struct ethercom		*ec = &adapter->osdep.ec;
   4355 	struct ether_multi	*enm;
   4356 	struct ether_multistep	step;
   4357 
   4358 	KASSERT(mutex_owned(&adapter->core_mtx));
   4359 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   4360 
   4361 	mta = adapter->mta;
   4362 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
   4363 
   4364 	ETHER_LOCK(ec);
   4365 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4366 	ETHER_FIRST_MULTI(step, ec, enm);
   4367 	while (enm != NULL) {
   4368 		if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
   4369 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   4370 			ETHER_ADDR_LEN) != 0)) {
   4371 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4372 			break;
   4373 		}
   4374 		bcopy(enm->enm_addrlo,
   4375 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
   4376 		mta[mcnt].vmdq = adapter->pool;
   4377 		mcnt++;
   4378 		ETHER_NEXT_MULTI(step, enm);
   4379 	}
   4380 
   4381 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   4382 	if (ifp->if_flags & IFF_PROMISC)
   4383 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4384 	else if (ec->ec_flags & ETHER_F_ALLMULTI) {
   4385 		fctrl |= IXGBE_FCTRL_MPE;
   4386 		fctrl &= ~IXGBE_FCTRL_UPE;
   4387 	} else
   4388 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4389 
   4390 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   4391 
   4392 	/* Update multicast filter entries only when it's not ALLMULTI */
   4393 	if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
   4394 		ETHER_UNLOCK(ec);
   4395 		update_ptr = (u8 *)mta;
   4396 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   4397 		    ixgbe_mc_array_itr, TRUE);
   4398 	} else
   4399 		ETHER_UNLOCK(ec);
   4400 } /* ixgbe_set_multi */
   4401 
   4402 /************************************************************************
   4403  * ixgbe_mc_array_itr
   4404  *
   4405  *   An iterator function needed by the multicast shared code.
   4406  *   It feeds the shared code routine the addresses in the
   4407  *   array of ixgbe_set_multi() one by one.
   4408  ************************************************************************/
   4409 static u8 *
   4410 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   4411 {
   4412 	struct ixgbe_mc_addr *mta;
   4413 
   4414 	mta = (struct ixgbe_mc_addr *)*update_ptr;
   4415 	*vmdq = mta->vmdq;
   4416 
   4417 	*update_ptr = (u8*)(mta + 1);
   4418 
   4419 	return (mta->addr);
   4420 } /* ixgbe_mc_array_itr */
   4421 
   4422 /************************************************************************
   4423  * ixgbe_local_timer - Timer routine
   4424  *
   4425  *   Checks for link status, updates statistics,
   4426  *   and runs the watchdog check.
   4427  ************************************************************************/
   4428 static void
   4429 ixgbe_local_timer(void *arg)
   4430 {
   4431 	struct adapter *adapter = arg;
   4432 
   4433 	IXGBE_CORE_LOCK(adapter);
   4434 	ixgbe_local_timer1(adapter);
   4435 	IXGBE_CORE_UNLOCK(adapter);
   4436 }
   4437 
   4438 static void
   4439 ixgbe_local_timer1(void *arg)
   4440 {
   4441 	struct adapter	*adapter = arg;
   4442 	device_t	dev = adapter->dev;
   4443 	struct ix_queue *que = adapter->queues;
   4444 	u64		queues = 0;
   4445 	u64		v0, v1, v2, v3, v4, v5, v6, v7;
   4446 	int		hung = 0;
   4447 	int		i;
   4448 
   4449 	KASSERT(mutex_owned(&adapter->core_mtx));
   4450 
   4451 	/* Check for pluggable optics */
   4452 	if (adapter->sfp_probe)
   4453 		if (!ixgbe_sfp_probe(adapter))
   4454 			goto out; /* Nothing to do */
   4455 
   4456 	ixgbe_update_link_status(adapter);
   4457 	ixgbe_update_stats_counters(adapter);
   4458 
   4459 	/* Update some event counters */
   4460 	v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
   4461 	que = adapter->queues;
   4462 	for (i = 0; i < adapter->num_queues; i++, que++) {
   4463 		struct tx_ring	*txr = que->txr;
   4464 
   4465 		v0 += txr->q_efbig_tx_dma_setup;
   4466 		v1 += txr->q_mbuf_defrag_failed;
   4467 		v2 += txr->q_efbig2_tx_dma_setup;
   4468 		v3 += txr->q_einval_tx_dma_setup;
   4469 		v4 += txr->q_other_tx_dma_setup;
   4470 		v5 += txr->q_eagain_tx_dma_setup;
   4471 		v6 += txr->q_enomem_tx_dma_setup;
   4472 		v7 += txr->q_tso_err;
   4473 	}
   4474 	adapter->efbig_tx_dma_setup.ev_count = v0;
   4475 	adapter->mbuf_defrag_failed.ev_count = v1;
   4476 	adapter->efbig2_tx_dma_setup.ev_count = v2;
   4477 	adapter->einval_tx_dma_setup.ev_count = v3;
   4478 	adapter->other_tx_dma_setup.ev_count = v4;
   4479 	adapter->eagain_tx_dma_setup.ev_count = v5;
   4480 	adapter->enomem_tx_dma_setup.ev_count = v6;
   4481 	adapter->tso_err.ev_count = v7;
   4482 
   4483 	/*
   4484 	 * Check the TX queues status
   4485 	 *	- mark hung queues so we don't schedule on them
   4486 	 *	- watchdog only if all queues show hung
   4487 	 */
   4488 	que = adapter->queues;
   4489 	for (i = 0; i < adapter->num_queues; i++, que++) {
   4490 		/* Keep track of queues with work for soft irq */
   4491 		if (que->txr->busy)
   4492 			queues |= 1ULL << que->me;
   4493 		/*
   4494 		 * Each time txeof runs without cleaning, but there
   4495 		 * are uncleaned descriptors it increments busy. If
   4496 		 * we get to the MAX we declare it hung.
   4497 		 */
   4498 		if (que->busy == IXGBE_QUEUE_HUNG) {
   4499 			++hung;
   4500 			/* Mark the queue as inactive */
   4501 			adapter->active_queues &= ~(1ULL << que->me);
   4502 			continue;
   4503 		} else {
   4504 			/* Check if we've come back from hung */
   4505 			if ((adapter->active_queues & (1ULL << que->me)) == 0)
   4506 				adapter->active_queues |= 1ULL << que->me;
   4507 		}
   4508 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   4509 			device_printf(dev,
   4510 			    "Warning queue %d appears to be hung!\n", i);
   4511 			que->txr->busy = IXGBE_QUEUE_HUNG;
   4512 			++hung;
   4513 		}
   4514 	}
   4515 
   4516 	/* Only truely watchdog if all queues show hung */
   4517 	if (hung == adapter->num_queues)
   4518 		goto watchdog;
   4519 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
   4520 	else if (queues != 0) { /* Force an IRQ on queues with work */
   4521 		que = adapter->queues;
   4522 		for (i = 0; i < adapter->num_queues; i++, que++) {
   4523 			mutex_enter(&que->dc_mtx);
   4524 			if (que->disabled_count == 0)
   4525 				ixgbe_rearm_queues(adapter,
   4526 				    queues & ((u64)1 << i));
   4527 			mutex_exit(&que->dc_mtx);
   4528 		}
   4529 	}
   4530 #endif
   4531 
   4532 out:
   4533 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4534 	return;
   4535 
   4536 watchdog:
   4537 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   4538 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   4539 	adapter->watchdog_events.ev_count++;
   4540 	ixgbe_init_locked(adapter);
   4541 } /* ixgbe_local_timer */
   4542 
   4543 /************************************************************************
   4544  * ixgbe_recovery_mode_timer - Recovery mode timer routine
   4545  ************************************************************************/
   4546 static void
   4547 ixgbe_recovery_mode_timer(void *arg)
   4548 {
   4549 	struct adapter *adapter = arg;
   4550 	struct ixgbe_hw *hw = &adapter->hw;
   4551 
   4552 	IXGBE_CORE_LOCK(adapter);
   4553 	if (ixgbe_fw_recovery_mode(hw)) {
   4554 		if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
   4555 			/* Firmware error detected, entering recovery mode */
   4556 			device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
   4557 
   4558 			if (hw->adapter_stopped == FALSE)
   4559 				ixgbe_stop(adapter);
   4560 		}
   4561 	} else
   4562 		atomic_cas_uint(&adapter->recovery_mode, 1, 0);
   4563 
   4564 	callout_reset(&adapter->recovery_mode_timer, hz,
   4565 	    ixgbe_recovery_mode_timer, adapter);
   4566 	IXGBE_CORE_UNLOCK(adapter);
   4567 } /* ixgbe_recovery_mode_timer */
   4568 
   4569 /************************************************************************
   4570  * ixgbe_sfp_probe
   4571  *
   4572  *   Determine if a port had optics inserted.
   4573  ************************************************************************/
   4574 static bool
   4575 ixgbe_sfp_probe(struct adapter *adapter)
   4576 {
   4577 	struct ixgbe_hw	*hw = &adapter->hw;
   4578 	device_t	dev = adapter->dev;
   4579 	bool		result = FALSE;
   4580 
   4581 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4582 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4583 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4584 		if (ret)
   4585 			goto out;
   4586 		ret = hw->phy.ops.reset(hw);
   4587 		adapter->sfp_probe = FALSE;
   4588 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4589 			device_printf(dev,"Unsupported SFP+ module detected!");
   4590 			device_printf(dev,
   4591 			    "Reload driver with supported module.\n");
   4592 			goto out;
   4593 		} else
   4594 			device_printf(dev, "SFP+ module detected!\n");
   4595 		/* We now have supported optics */
   4596 		result = TRUE;
   4597 	}
   4598 out:
   4599 
   4600 	return (result);
   4601 } /* ixgbe_sfp_probe */
   4602 
   4603 /************************************************************************
   4604  * ixgbe_handle_mod - Tasklet for SFP module interrupts
   4605  ************************************************************************/
   4606 static void
   4607 ixgbe_handle_mod(void *context)
   4608 {
   4609 	struct adapter	*adapter = context;
   4610 	struct ixgbe_hw *hw = &adapter->hw;
   4611 	device_t	dev = adapter->dev;
   4612 	u32		err, cage_full = 0;
   4613 
   4614 	++adapter->mod_sicount.ev_count;
   4615 	if (adapter->hw.need_crosstalk_fix) {
   4616 		switch (hw->mac.type) {
   4617 		case ixgbe_mac_82599EB:
   4618 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4619 			    IXGBE_ESDP_SDP2;
   4620 			break;
   4621 		case ixgbe_mac_X550EM_x:
   4622 		case ixgbe_mac_X550EM_a:
   4623 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4624 			    IXGBE_ESDP_SDP0;
   4625 			break;
   4626 		default:
   4627 			break;
   4628 		}
   4629 
   4630 		if (!cage_full)
   4631 			return;
   4632 	}
   4633 
   4634 	err = hw->phy.ops.identify_sfp(hw);
   4635 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4636 		device_printf(dev,
   4637 		    "Unsupported SFP+ module type was detected.\n");
   4638 		return;
   4639 	}
   4640 
   4641 	if (hw->mac.type == ixgbe_mac_82598EB)
   4642 		err = hw->phy.ops.reset(hw);
   4643 	else
   4644 		err = hw->mac.ops.setup_sfp(hw);
   4645 
   4646 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4647 		device_printf(dev,
   4648 		    "Setup failure - unsupported SFP+ module type.\n");
   4649 		return;
   4650 	}
   4651 	softint_schedule(adapter->msf_si);
   4652 } /* ixgbe_handle_mod */
   4653 
   4654 
   4655 /************************************************************************
   4656  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
   4657  ************************************************************************/
   4658 static void
   4659 ixgbe_handle_msf(void *context)
   4660 {
   4661 	struct adapter	*adapter = context;
   4662 	struct ixgbe_hw *hw = &adapter->hw;
   4663 	u32		autoneg;
   4664 	bool		negotiate;
   4665 
   4666 	IXGBE_CORE_LOCK(adapter);
   4667 	++adapter->msf_sicount.ev_count;
   4668 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
   4669 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
   4670 
   4671 	autoneg = hw->phy.autoneg_advertised;
   4672 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   4673 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   4674 	else
   4675 		negotiate = 0;
   4676 	if (hw->mac.ops.setup_link)
   4677 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
   4678 
   4679 	/* Adjust media types shown in ifconfig */
   4680 	ifmedia_removeall(&adapter->media);
   4681 	ixgbe_add_media_types(adapter);
   4682 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   4683 	IXGBE_CORE_UNLOCK(adapter);
   4684 } /* ixgbe_handle_msf */
   4685 
   4686 /************************************************************************
   4687  * ixgbe_handle_phy - Tasklet for external PHY interrupts
   4688  ************************************************************************/
   4689 static void
   4690 ixgbe_handle_phy(void *context)
   4691 {
   4692 	struct adapter	*adapter = context;
   4693 	struct ixgbe_hw *hw = &adapter->hw;
   4694 	int error;
   4695 
   4696 	++adapter->phy_sicount.ev_count;
   4697 	error = hw->phy.ops.handle_lasi(hw);
   4698 	if (error == IXGBE_ERR_OVERTEMP)
   4699 		device_printf(adapter->dev,
   4700 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
   4701 		    " PHY will downshift to lower power state!\n");
   4702 	else if (error)
   4703 		device_printf(adapter->dev,
   4704 		    "Error handling LASI interrupt: %d\n", error);
   4705 } /* ixgbe_handle_phy */
   4706 
   4707 static void
   4708 ixgbe_ifstop(struct ifnet *ifp, int disable)
   4709 {
   4710 	struct adapter *adapter = ifp->if_softc;
   4711 
   4712 	IXGBE_CORE_LOCK(adapter);
   4713 	ixgbe_stop(adapter);
   4714 	IXGBE_CORE_UNLOCK(adapter);
   4715 }
   4716 
   4717 /************************************************************************
   4718  * ixgbe_stop - Stop the hardware
   4719  *
   4720  *   Disables all traffic on the adapter by issuing a
   4721  *   global reset on the MAC and deallocates TX/RX buffers.
   4722  ************************************************************************/
   4723 static void
   4724 ixgbe_stop(void *arg)
   4725 {
   4726 	struct ifnet	*ifp;
   4727 	struct adapter	*adapter = arg;
   4728 	struct ixgbe_hw *hw = &adapter->hw;
   4729 
   4730 	ifp = adapter->ifp;
   4731 
   4732 	KASSERT(mutex_owned(&adapter->core_mtx));
   4733 
   4734 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   4735 	ixgbe_disable_intr(adapter);
   4736 	callout_stop(&adapter->timer);
   4737 
   4738 	/* Let the stack know...*/
   4739 	ifp->if_flags &= ~IFF_RUNNING;
   4740 
   4741 	ixgbe_reset_hw(hw);
   4742 	hw->adapter_stopped = FALSE;
   4743 	ixgbe_stop_adapter(hw);
   4744 	if (hw->mac.type == ixgbe_mac_82599EB)
   4745 		ixgbe_stop_mac_link_on_d3_82599(hw);
   4746 	/* Turn off the laser - noop with no optics */
   4747 	ixgbe_disable_tx_laser(hw);
   4748 
   4749 	/* Update the stack */
   4750 	adapter->link_up = FALSE;
   4751 	ixgbe_update_link_status(adapter);
   4752 
   4753 	/* reprogram the RAR[0] in case user changed it. */
   4754 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   4755 
   4756 	return;
   4757 } /* ixgbe_stop */
   4758 
   4759 /************************************************************************
   4760  * ixgbe_update_link_status - Update OS on link state
   4761  *
   4762  * Note: Only updates the OS on the cached link state.
   4763  *	 The real check of the hardware only happens with
   4764  *	 a link interrupt.
   4765  ************************************************************************/
   4766 static void
   4767 ixgbe_update_link_status(struct adapter *adapter)
   4768 {
   4769 	struct ifnet	*ifp = adapter->ifp;
   4770 	device_t	dev = adapter->dev;
   4771 	struct ixgbe_hw *hw = &adapter->hw;
   4772 
   4773 	KASSERT(mutex_owned(&adapter->core_mtx));
   4774 
   4775 	if (adapter->link_up) {
   4776 		if (adapter->link_active != LINK_STATE_UP) {
   4777 			/*
   4778 			 * To eliminate influence of the previous state
   4779 			 * in the same way as ixgbe_init_locked().
   4780 			 */
   4781 			struct ix_queue	*que = adapter->queues;
   4782 			for (int i = 0; i < adapter->num_queues; i++, que++)
   4783 				que->eitr_setting = 0;
   4784 
   4785 			if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
   4786 				/*
   4787 				 *  Discard count for both MAC Local Fault and
   4788 				 * Remote Fault because those registers are
   4789 				 * valid only when the link speed is up and
   4790 				 * 10Gbps.
   4791 				 */
   4792 				IXGBE_READ_REG(hw, IXGBE_MLFC);
   4793 				IXGBE_READ_REG(hw, IXGBE_MRFC);
   4794 			}
   4795 
   4796 			if (bootverbose) {
   4797 				const char *bpsmsg;
   4798 
   4799 				switch (adapter->link_speed) {
   4800 				case IXGBE_LINK_SPEED_10GB_FULL:
   4801 					bpsmsg = "10 Gbps";
   4802 					break;
   4803 				case IXGBE_LINK_SPEED_5GB_FULL:
   4804 					bpsmsg = "5 Gbps";
   4805 					break;
   4806 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   4807 					bpsmsg = "2.5 Gbps";
   4808 					break;
   4809 				case IXGBE_LINK_SPEED_1GB_FULL:
   4810 					bpsmsg = "1 Gbps";
   4811 					break;
   4812 				case IXGBE_LINK_SPEED_100_FULL:
   4813 					bpsmsg = "100 Mbps";
   4814 					break;
   4815 				case IXGBE_LINK_SPEED_10_FULL:
   4816 					bpsmsg = "10 Mbps";
   4817 					break;
   4818 				default:
   4819 					bpsmsg = "unknown speed";
   4820 					break;
   4821 				}
   4822 				device_printf(dev, "Link is up %s %s \n",
   4823 				    bpsmsg, "Full Duplex");
   4824 			}
   4825 			adapter->link_active = LINK_STATE_UP;
   4826 			/* Update any Flow Control changes */
   4827 			ixgbe_fc_enable(&adapter->hw);
   4828 			/* Update DMA coalescing config */
   4829 			ixgbe_config_dmac(adapter);
   4830 			if_link_state_change(ifp, LINK_STATE_UP);
   4831 
   4832 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4833 				ixgbe_ping_all_vfs(adapter);
   4834 		}
   4835 	} else {
   4836 		/*
   4837 		 * Do it when link active changes to DOWN. i.e.
   4838 		 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
   4839 		 * b) LINK_STATE_UP	 -> LINK_STATE_DOWN
   4840 		 */
   4841 		if (adapter->link_active != LINK_STATE_DOWN) {
   4842 			if (bootverbose)
   4843 				device_printf(dev, "Link is Down\n");
   4844 			if_link_state_change(ifp, LINK_STATE_DOWN);
   4845 			adapter->link_active = LINK_STATE_DOWN;
   4846 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4847 				ixgbe_ping_all_vfs(adapter);
   4848 			ixgbe_drain_all(adapter);
   4849 		}
   4850 	}
   4851 } /* ixgbe_update_link_status */
   4852 
   4853 /************************************************************************
   4854  * ixgbe_config_dmac - Configure DMA Coalescing
   4855  ************************************************************************/
   4856 static void
   4857 ixgbe_config_dmac(struct adapter *adapter)
   4858 {
   4859 	struct ixgbe_hw *hw = &adapter->hw;
   4860 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
   4861 
   4862 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
   4863 		return;
   4864 
   4865 	if (dcfg->watchdog_timer ^ adapter->dmac ||
   4866 	    dcfg->link_speed ^ adapter->link_speed) {
   4867 		dcfg->watchdog_timer = adapter->dmac;
   4868 		dcfg->fcoe_en = false;
   4869 		dcfg->link_speed = adapter->link_speed;
   4870 		dcfg->num_tcs = 1;
   4871 
   4872 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
   4873 		    dcfg->watchdog_timer, dcfg->link_speed);
   4874 
   4875 		hw->mac.ops.dmac_config(hw);
   4876 	}
   4877 } /* ixgbe_config_dmac */
   4878 
   4879 /************************************************************************
   4880  * ixgbe_enable_intr
   4881  ************************************************************************/
   4882 static void
   4883 ixgbe_enable_intr(struct adapter *adapter)
   4884 {
   4885 	struct ixgbe_hw	*hw = &adapter->hw;
   4886 	struct ix_queue	*que = adapter->queues;
   4887 	u32		mask, fwsm;
   4888 
   4889 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4890 
   4891 	switch (adapter->hw.mac.type) {
   4892 	case ixgbe_mac_82599EB:
   4893 		mask |= IXGBE_EIMS_ECC;
   4894 		/* Temperature sensor on some adapters */
   4895 		mask |= IXGBE_EIMS_GPI_SDP0;
   4896 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
   4897 		mask |= IXGBE_EIMS_GPI_SDP1;
   4898 		mask |= IXGBE_EIMS_GPI_SDP2;
   4899 		break;
   4900 	case ixgbe_mac_X540:
   4901 		/* Detect if Thermal Sensor is enabled */
   4902 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
   4903 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
   4904 			mask |= IXGBE_EIMS_TS;
   4905 		mask |= IXGBE_EIMS_ECC;
   4906 		break;
   4907 	case ixgbe_mac_X550:
   4908 		/* MAC thermal sensor is automatically enabled */
   4909 		mask |= IXGBE_EIMS_TS;
   4910 		mask |= IXGBE_EIMS_ECC;
   4911 		break;
   4912 	case ixgbe_mac_X550EM_x:
   4913 	case ixgbe_mac_X550EM_a:
   4914 		/* Some devices use SDP0 for important information */
   4915 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
   4916 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
   4917 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
   4918 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
   4919 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
   4920 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
   4921 			mask |= IXGBE_EICR_GPI_SDP0_X540;
   4922 		mask |= IXGBE_EIMS_ECC;
   4923 		break;
   4924 	default:
   4925 		break;
   4926 	}
   4927 
   4928 	/* Enable Fan Failure detection */
   4929 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4930 		mask |= IXGBE_EIMS_GPI_SDP1;
   4931 	/* Enable SR-IOV */
   4932 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4933 		mask |= IXGBE_EIMS_MAILBOX;
   4934 	/* Enable Flow Director */
   4935 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   4936 		mask |= IXGBE_EIMS_FLOW_DIR;
   4937 
   4938 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4939 
   4940 	/* With MSI-X we use auto clear */
   4941 	if (adapter->msix_mem) {
   4942 		mask = IXGBE_EIMS_ENABLE_MASK;
   4943 		/* Don't autoclear Link */
   4944 		mask &= ~IXGBE_EIMS_OTHER;
   4945 		mask &= ~IXGBE_EIMS_LSC;
   4946 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   4947 			mask &= ~IXGBE_EIMS_MAILBOX;
   4948 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4949 	}
   4950 
   4951 	/*
   4952 	 * Now enable all queues, this is done separately to
   4953 	 * allow for handling the extended (beyond 32) MSI-X
   4954 	 * vectors that can be used by 82599
   4955 	 */
   4956 	for (int i = 0; i < adapter->num_queues; i++, que++)
   4957 		ixgbe_enable_queue(adapter, que->msix);
   4958 
   4959 	IXGBE_WRITE_FLUSH(hw);
   4960 
   4961 } /* ixgbe_enable_intr */
   4962 
   4963 /************************************************************************
   4964  * ixgbe_disable_intr_internal
   4965  ************************************************************************/
   4966 static void
   4967 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
   4968 {
   4969 	struct ix_queue	*que = adapter->queues;
   4970 
   4971 	/* disable interrupts other than queues */
   4972 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
   4973 
   4974 	if (adapter->msix_mem)
   4975 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4976 
   4977 	for (int i = 0; i < adapter->num_queues; i++, que++)
   4978 		ixgbe_disable_queue_internal(adapter, que->msix, nestok);
   4979 
   4980 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4981 
   4982 } /* ixgbe_do_disable_intr_internal */
   4983 
   4984 /************************************************************************
   4985  * ixgbe_disable_intr
   4986  ************************************************************************/
   4987 static void
   4988 ixgbe_disable_intr(struct adapter *adapter)
   4989 {
   4990 
   4991 	ixgbe_disable_intr_internal(adapter, true);
   4992 } /* ixgbe_disable_intr */
   4993 
   4994 /************************************************************************
   4995  * ixgbe_ensure_disabled_intr
   4996  ************************************************************************/
   4997 void
   4998 ixgbe_ensure_disabled_intr(struct adapter *adapter)
   4999 {
   5000 
   5001 	ixgbe_disable_intr_internal(adapter, false);
   5002 } /* ixgbe_ensure_disabled_intr */
   5003 
   5004 /************************************************************************
   5005  * ixgbe_legacy_irq - Legacy Interrupt Service routine
   5006  ************************************************************************/
   5007 static int
   5008 ixgbe_legacy_irq(void *arg)
   5009 {
   5010 	struct ix_queue *que = arg;
   5011 	struct adapter	*adapter = que->adapter;
   5012 	struct ixgbe_hw	*hw = &adapter->hw;
   5013 	struct ifnet	*ifp = adapter->ifp;
   5014 	struct		tx_ring *txr = adapter->tx_rings;
   5015 	bool		more = false;
   5016 	u32		eicr, eicr_mask;
   5017 
   5018 	/* Silicon errata #26 on 82598 */
   5019 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
   5020 
   5021 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   5022 
   5023 	adapter->stats.pf.legint.ev_count++;
   5024 	++que->irqs.ev_count;
   5025 	if (eicr == 0) {
   5026 		adapter->stats.pf.intzero.ev_count++;
   5027 		if ((ifp->if_flags & IFF_UP) != 0)
   5028 			ixgbe_enable_intr(adapter);
   5029 		return 0;
   5030 	}
   5031 
   5032 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   5033 		/*
   5034 		 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
   5035 		 */
   5036 		que->txrx_use_workqueue = adapter->txrx_use_workqueue;
   5037 
   5038 #ifdef __NetBSD__
   5039 		/* Don't run ixgbe_rxeof in interrupt context */
   5040 		more = true;
   5041 #else
   5042 		more = ixgbe_rxeof(que);
   5043 #endif
   5044 
   5045 		IXGBE_TX_LOCK(txr);
   5046 		ixgbe_txeof(txr);
   5047 #ifdef notyet
   5048 		if (!ixgbe_ring_empty(ifp, txr->br))
   5049 			ixgbe_start_locked(ifp, txr);
   5050 #endif
   5051 		IXGBE_TX_UNLOCK(txr);
   5052 	}
   5053 
   5054 	/* Check for fan failure */
   5055 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   5056 		ixgbe_check_fan_failure(adapter, eicr, true);
   5057 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   5058 	}
   5059 
   5060 	/* Link status change */
   5061 	if (eicr & IXGBE_EICR_LSC)
   5062 		softint_schedule(adapter->link_si);
   5063 
   5064 	if (ixgbe_is_sfp(hw)) {
   5065 		/* Pluggable optics-related interrupt */
   5066 		if (hw->mac.type >= ixgbe_mac_X540)
   5067 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   5068 		else
   5069 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   5070 
   5071 		if (eicr & eicr_mask) {
   5072 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   5073 			softint_schedule(adapter->mod_si);
   5074 		}
   5075 
   5076 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   5077 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   5078 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   5079 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   5080 			softint_schedule(adapter->msf_si);
   5081 		}
   5082 	}
   5083 
   5084 	/* External PHY interrupt */
   5085 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   5086 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
   5087 		softint_schedule(adapter->phy_si);
   5088 
   5089 	if (more) {
   5090 		que->req.ev_count++;
   5091 		ixgbe_sched_handle_que(adapter, que);
   5092 	} else
   5093 		ixgbe_enable_intr(adapter);
   5094 
   5095 	return 1;
   5096 } /* ixgbe_legacy_irq */
   5097 
   5098 /************************************************************************
   5099  * ixgbe_free_pciintr_resources
   5100  ************************************************************************/
   5101 static void
   5102 ixgbe_free_pciintr_resources(struct adapter *adapter)
   5103 {
   5104 	struct ix_queue *que = adapter->queues;
   5105 	int		rid;
   5106 
   5107 	/*
   5108 	 * Release all msix queue resources:
   5109 	 */
   5110 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   5111 		if (que->res != NULL) {
   5112 			pci_intr_disestablish(adapter->osdep.pc,
   5113 			    adapter->osdep.ihs[i]);
   5114 			adapter->osdep.ihs[i] = NULL;
   5115 		}
   5116 	}
   5117 
   5118 	/* Clean the Legacy or Link interrupt last */
   5119 	if (adapter->vector) /* we are doing MSIX */
   5120 		rid = adapter->vector;
   5121 	else
   5122 		rid = 0;
   5123 
   5124 	if (adapter->osdep.ihs[rid] != NULL) {
   5125 		pci_intr_disestablish(adapter->osdep.pc,
   5126 		    adapter->osdep.ihs[rid]);
   5127 		adapter->osdep.ihs[rid] = NULL;
   5128 	}
   5129 
   5130 	if (adapter->osdep.intrs != NULL) {
   5131 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   5132 		    adapter->osdep.nintrs);
   5133 		adapter->osdep.intrs = NULL;
   5134 	}
   5135 } /* ixgbe_free_pciintr_resources */
   5136 
   5137 /************************************************************************
   5138  * ixgbe_free_pci_resources
   5139  ************************************************************************/
   5140 static void
   5141 ixgbe_free_pci_resources(struct adapter *adapter)
   5142 {
   5143 
   5144 	ixgbe_free_pciintr_resources(adapter);
   5145 
   5146 	if (adapter->osdep.mem_size != 0) {
   5147 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   5148 		    adapter->osdep.mem_bus_space_handle,
   5149 		    adapter->osdep.mem_size);
   5150 	}
   5151 
   5152 } /* ixgbe_free_pci_resources */
   5153 
   5154 /************************************************************************
   5155  * ixgbe_set_sysctl_value
   5156  ************************************************************************/
   5157 static void
   5158 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
   5159     const char *description, int *limit, int value)
   5160 {
   5161 	device_t dev =	adapter->dev;
   5162 	struct sysctllog **log;
   5163 	const struct sysctlnode *rnode, *cnode;
   5164 
   5165 	/*
   5166 	 * It's not required to check recovery mode because this function never
   5167 	 * touches hardware.
   5168 	 */
   5169 
   5170 	log = &adapter->sysctllog;
   5171 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   5172 		aprint_error_dev(dev, "could not create sysctl root\n");
   5173 		return;
   5174 	}
   5175 	if (sysctl_createv(log, 0, &rnode, &cnode,
   5176 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   5177 	    name, SYSCTL_DESCR(description),
   5178 		NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   5179 		aprint_error_dev(dev, "could not create sysctl\n");
   5180 	*limit = value;
   5181 } /* ixgbe_set_sysctl_value */
   5182 
   5183 /************************************************************************
   5184  * ixgbe_sysctl_flowcntl
   5185  *
   5186  *   SYSCTL wrapper around setting Flow Control
   5187  ************************************************************************/
   5188 static int
   5189 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
   5190 {
   5191 	struct sysctlnode node = *rnode;
   5192 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5193 	int error, fc;
   5194 
   5195 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5196 		return (EPERM);
   5197 
   5198 	fc = adapter->hw.fc.current_mode;
   5199 	node.sysctl_data = &fc;
   5200 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5201 	if (error != 0 || newp == NULL)
   5202 		return error;
   5203 
   5204 	/* Don't bother if it's not changed */
   5205 	if (fc == adapter->hw.fc.current_mode)
   5206 		return (0);
   5207 
   5208 	return ixgbe_set_flowcntl(adapter, fc);
   5209 } /* ixgbe_sysctl_flowcntl */
   5210 
   5211 /************************************************************************
   5212  * ixgbe_set_flowcntl - Set flow control
   5213  *
   5214  *   Flow control values:
   5215  *     0 - off
   5216  *     1 - rx pause
   5217  *     2 - tx pause
   5218  *     3 - full
   5219  ************************************************************************/
   5220 static int
   5221 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
   5222 {
   5223 	switch (fc) {
   5224 		case ixgbe_fc_rx_pause:
   5225 		case ixgbe_fc_tx_pause:
   5226 		case ixgbe_fc_full:
   5227 			adapter->hw.fc.requested_mode = fc;
   5228 			if (adapter->num_queues > 1)
   5229 				ixgbe_disable_rx_drop(adapter);
   5230 			break;
   5231 		case ixgbe_fc_none:
   5232 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   5233 			if (adapter->num_queues > 1)
   5234 				ixgbe_enable_rx_drop(adapter);
   5235 			break;
   5236 		default:
   5237 			return (EINVAL);
   5238 	}
   5239 
   5240 #if 0 /* XXX NetBSD */
   5241 	/* Don't autoneg if forcing a value */
   5242 	adapter->hw.fc.disable_fc_autoneg = TRUE;
   5243 #endif
   5244 	ixgbe_fc_enable(&adapter->hw);
   5245 
   5246 	return (0);
   5247 } /* ixgbe_set_flowcntl */
   5248 
   5249 /************************************************************************
   5250  * ixgbe_enable_rx_drop
   5251  *
   5252  *   Enable the hardware to drop packets when the buffer is
   5253  *   full. This is useful with multiqueue, so that no single
   5254  *   queue being full stalls the entire RX engine. We only
   5255  *   enable this when Multiqueue is enabled AND Flow Control
   5256  *   is disabled.
   5257  ************************************************************************/
   5258 static void
   5259 ixgbe_enable_rx_drop(struct adapter *adapter)
   5260 {
   5261 	struct ixgbe_hw *hw = &adapter->hw;
   5262 	struct rx_ring	*rxr;
   5263 	u32		srrctl;
   5264 
   5265 	for (int i = 0; i < adapter->num_queues; i++) {
   5266 		rxr = &adapter->rx_rings[i];
   5267 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   5268 		srrctl |= IXGBE_SRRCTL_DROP_EN;
   5269 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   5270 	}
   5271 
   5272 	/* enable drop for each vf */
   5273 	for (int i = 0; i < adapter->num_vfs; i++) {
   5274 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   5275 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
   5276 		    IXGBE_QDE_ENABLE));
   5277 	}
   5278 } /* ixgbe_enable_rx_drop */
   5279 
   5280 /************************************************************************
   5281  * ixgbe_disable_rx_drop
   5282  ************************************************************************/
   5283 static void
   5284 ixgbe_disable_rx_drop(struct adapter *adapter)
   5285 {
   5286 	struct ixgbe_hw *hw = &adapter->hw;
   5287 	struct rx_ring	*rxr;
   5288 	u32		srrctl;
   5289 
   5290 	for (int i = 0; i < adapter->num_queues; i++) {
   5291 		rxr = &adapter->rx_rings[i];
   5292 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   5293 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
   5294 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   5295 	}
   5296 
   5297 	/* disable drop for each vf */
   5298 	for (int i = 0; i < adapter->num_vfs; i++) {
   5299 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   5300 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
   5301 	}
   5302 } /* ixgbe_disable_rx_drop */
   5303 
   5304 /************************************************************************
   5305  * ixgbe_sysctl_advertise
   5306  *
   5307  *   SYSCTL wrapper around setting advertised speed
   5308  ************************************************************************/
   5309 static int
   5310 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
   5311 {
   5312 	struct sysctlnode node = *rnode;
   5313 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5314 	int	       error = 0, advertise;
   5315 
   5316 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5317 		return (EPERM);
   5318 
   5319 	advertise = adapter->advertise;
   5320 	node.sysctl_data = &advertise;
   5321 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5322 	if (error != 0 || newp == NULL)
   5323 		return error;
   5324 
   5325 	return ixgbe_set_advertise(adapter, advertise);
   5326 } /* ixgbe_sysctl_advertise */
   5327 
   5328 /************************************************************************
   5329  * ixgbe_set_advertise - Control advertised link speed
   5330  *
   5331  *   Flags:
   5332  *     0x00 - Default (all capable link speed)
   5333  *     0x01 - advertise 100 Mb
   5334  *     0x02 - advertise 1G
   5335  *     0x04 - advertise 10G
   5336  *     0x08 - advertise 10 Mb
   5337  *     0x10 - advertise 2.5G
   5338  *     0x20 - advertise 5G
   5339  ************************************************************************/
   5340 static int
   5341 ixgbe_set_advertise(struct adapter *adapter, int advertise)
   5342 {
   5343 	device_t	 dev;
   5344 	struct ixgbe_hw	 *hw;
   5345 	ixgbe_link_speed speed = 0;
   5346 	ixgbe_link_speed link_caps = 0;
   5347 	s32		 err = IXGBE_NOT_IMPLEMENTED;
   5348 	bool		 negotiate = FALSE;
   5349 
   5350 	/* Checks to validate new value */
   5351 	if (adapter->advertise == advertise) /* no change */
   5352 		return (0);
   5353 
   5354 	dev = adapter->dev;
   5355 	hw = &adapter->hw;
   5356 
   5357 	/* No speed changes for backplane media */
   5358 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   5359 		return (ENODEV);
   5360 
   5361 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   5362 	    (hw->phy.multispeed_fiber))) {
   5363 		device_printf(dev,
   5364 		    "Advertised speed can only be set on copper or "
   5365 		    "multispeed fiber media types.\n");
   5366 		return (EINVAL);
   5367 	}
   5368 
   5369 	if (advertise < 0x0 || advertise > 0x2f) {
   5370 		device_printf(dev,
   5371 		    "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
   5372 		return (EINVAL);
   5373 	}
   5374 
   5375 	if (hw->mac.ops.get_link_capabilities) {
   5376 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   5377 		    &negotiate);
   5378 		if (err != IXGBE_SUCCESS) {
   5379 			device_printf(dev, "Unable to determine supported advertise speeds\n");
   5380 			return (ENODEV);
   5381 		}
   5382 	}
   5383 
   5384 	/* Set new value and report new advertised mode */
   5385 	if (advertise & 0x1) {
   5386 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
   5387 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
   5388 			return (EINVAL);
   5389 		}
   5390 		speed |= IXGBE_LINK_SPEED_100_FULL;
   5391 	}
   5392 	if (advertise & 0x2) {
   5393 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
   5394 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
   5395 			return (EINVAL);
   5396 		}
   5397 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   5398 	}
   5399 	if (advertise & 0x4) {
   5400 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
   5401 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
   5402 			return (EINVAL);
   5403 		}
   5404 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   5405 	}
   5406 	if (advertise & 0x8) {
   5407 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
   5408 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
   5409 			return (EINVAL);
   5410 		}
   5411 		speed |= IXGBE_LINK_SPEED_10_FULL;
   5412 	}
   5413 	if (advertise & 0x10) {
   5414 		if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
   5415 			device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
   5416 			return (EINVAL);
   5417 		}
   5418 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   5419 	}
   5420 	if (advertise & 0x20) {
   5421 		if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
   5422 			device_printf(dev, "Interface does not support 5Gb advertised speed\n");
   5423 			return (EINVAL);
   5424 		}
   5425 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   5426 	}
   5427 	if (advertise == 0)
   5428 		speed = link_caps; /* All capable link speed */
   5429 
   5430 	hw->mac.autotry_restart = TRUE;
   5431 	hw->mac.ops.setup_link(hw, speed, TRUE);
   5432 	adapter->advertise = advertise;
   5433 
   5434 	return (0);
   5435 } /* ixgbe_set_advertise */
   5436 
   5437 /************************************************************************
   5438  * ixgbe_get_advertise - Get current advertised speed settings
   5439  *
   5440  *   Formatted for sysctl usage.
   5441  *   Flags:
   5442  *     0x01 - advertise 100 Mb
   5443  *     0x02 - advertise 1G
   5444  *     0x04 - advertise 10G
   5445  *     0x08 - advertise 10 Mb (yes, Mb)
   5446  *     0x10 - advertise 2.5G
   5447  *     0x20 - advertise 5G
   5448  ************************************************************************/
   5449 static int
   5450 ixgbe_get_advertise(struct adapter *adapter)
   5451 {
   5452 	struct ixgbe_hw	 *hw = &adapter->hw;
   5453 	int		 speed;
   5454 	ixgbe_link_speed link_caps = 0;
   5455 	s32		 err;
   5456 	bool		 negotiate = FALSE;
   5457 
   5458 	/*
   5459 	 * Advertised speed means nothing unless it's copper or
   5460 	 * multi-speed fiber
   5461 	 */
   5462 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
   5463 	    !(hw->phy.multispeed_fiber))
   5464 		return (0);
   5465 
   5466 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
   5467 	if (err != IXGBE_SUCCESS)
   5468 		return (0);
   5469 
   5470 	speed =
   5471 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x04 : 0) |
   5472 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x02 : 0) |
   5473 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)   ? 0x01 : 0) |
   5474 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)    ? 0x08 : 0) |
   5475 	    ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
   5476 	    ((link_caps & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0);
   5477 
   5478 	return speed;
   5479 } /* ixgbe_get_advertise */
   5480 
   5481 /************************************************************************
   5482  * ixgbe_sysctl_dmac - Manage DMA Coalescing
   5483  *
   5484  *   Control values:
   5485  *     0/1 - off / on (use default value of 1000)
   5486  *
   5487  *     Legal timer values are:
   5488  *     50,100,250,500,1000,2000,5000,10000
   5489  *
   5490  *     Turning off interrupt moderation will also turn this off.
   5491  ************************************************************************/
   5492 static int
   5493 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
   5494 {
   5495 	struct sysctlnode node = *rnode;
   5496 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5497 	struct ifnet   *ifp = adapter->ifp;
   5498 	int	       error;
   5499 	int	       newval;
   5500 
   5501 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5502 		return (EPERM);
   5503 
   5504 	newval = adapter->dmac;
   5505 	node.sysctl_data = &newval;
   5506 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5507 	if ((error) || (newp == NULL))
   5508 		return (error);
   5509 
   5510 	switch (newval) {
   5511 	case 0:
   5512 		/* Disabled */
   5513 		adapter->dmac = 0;
   5514 		break;
   5515 	case 1:
   5516 		/* Enable and use default */
   5517 		adapter->dmac = 1000;
   5518 		break;
   5519 	case 50:
   5520 	case 100:
   5521 	case 250:
   5522 	case 500:
   5523 	case 1000:
   5524 	case 2000:
   5525 	case 5000:
   5526 	case 10000:
   5527 		/* Legal values - allow */
   5528 		adapter->dmac = newval;
   5529 		break;
   5530 	default:
   5531 		/* Do nothing, illegal value */
   5532 		return (EINVAL);
   5533 	}
   5534 
   5535 	/* Re-initialize hardware if it's already running */
   5536 	if (ifp->if_flags & IFF_RUNNING)
   5537 		ifp->if_init(ifp);
   5538 
   5539 	return (0);
   5540 }
   5541 
   5542 #ifdef IXGBE_DEBUG
   5543 /************************************************************************
   5544  * ixgbe_sysctl_power_state
   5545  *
   5546  *   Sysctl to test power states
   5547  *   Values:
   5548  *     0      - set device to D0
   5549  *     3      - set device to D3
   5550  *     (none) - get current device power state
   5551  ************************************************************************/
   5552 static int
   5553 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
   5554 {
   5555 #ifdef notyet
   5556 	struct sysctlnode node = *rnode;
   5557 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5558 	device_t       dev =  adapter->dev;
   5559 	int	       curr_ps, new_ps, error = 0;
   5560 
   5561 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5562 		return (EPERM);
   5563 
   5564 	curr_ps = new_ps = pci_get_powerstate(dev);
   5565 
   5566 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5567 	if ((error) || (req->newp == NULL))
   5568 		return (error);
   5569 
   5570 	if (new_ps == curr_ps)
   5571 		return (0);
   5572 
   5573 	if (new_ps == 3 && curr_ps == 0)
   5574 		error = DEVICE_SUSPEND(dev);
   5575 	else if (new_ps == 0 && curr_ps == 3)
   5576 		error = DEVICE_RESUME(dev);
   5577 	else
   5578 		return (EINVAL);
   5579 
   5580 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
   5581 
   5582 	return (error);
   5583 #else
   5584 	return 0;
   5585 #endif
   5586 } /* ixgbe_sysctl_power_state */
   5587 #endif
   5588 
   5589 /************************************************************************
   5590  * ixgbe_sysctl_wol_enable
   5591  *
   5592  *   Sysctl to enable/disable the WoL capability,
   5593  *   if supported by the adapter.
   5594  *
   5595  *   Values:
   5596  *     0 - disabled
   5597  *     1 - enabled
   5598  ************************************************************************/
   5599 static int
   5600 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
   5601 {
   5602 	struct sysctlnode node = *rnode;
   5603 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5604 	struct ixgbe_hw *hw = &adapter->hw;
   5605 	bool		new_wol_enabled;
   5606 	int		error = 0;
   5607 
   5608 	/*
   5609 	 * It's not required to check recovery mode because this function never
   5610 	 * touches hardware.
   5611 	 */
   5612 	new_wol_enabled = hw->wol_enabled;
   5613 	node.sysctl_data = &new_wol_enabled;
   5614 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5615 	if ((error) || (newp == NULL))
   5616 		return (error);
   5617 	if (new_wol_enabled == hw->wol_enabled)
   5618 		return (0);
   5619 
   5620 	if (new_wol_enabled && !adapter->wol_support)
   5621 		return (ENODEV);
   5622 	else
   5623 		hw->wol_enabled = new_wol_enabled;
   5624 
   5625 	return (0);
   5626 } /* ixgbe_sysctl_wol_enable */
   5627 
   5628 /************************************************************************
   5629  * ixgbe_sysctl_wufc - Wake Up Filter Control
   5630  *
   5631  *   Sysctl to enable/disable the types of packets that the
   5632  *   adapter will wake up on upon receipt.
   5633  *   Flags:
   5634  *     0x1  - Link Status Change
   5635  *     0x2  - Magic Packet
   5636  *     0x4  - Direct Exact
   5637  *     0x8  - Directed Multicast
   5638  *     0x10 - Broadcast
   5639  *     0x20 - ARP/IPv4 Request Packet
   5640  *     0x40 - Direct IPv4 Packet
   5641  *     0x80 - Direct IPv6 Packet
   5642  *
   5643  *   Settings not listed above will cause the sysctl to return an error.
   5644  ************************************************************************/
   5645 static int
   5646 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
   5647 {
   5648 	struct sysctlnode node = *rnode;
   5649 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5650 	int error = 0;
   5651 	u32 new_wufc;
   5652 
   5653 	/*
   5654 	 * It's not required to check recovery mode because this function never
   5655 	 * touches hardware.
   5656 	 */
   5657 	new_wufc = adapter->wufc;
   5658 	node.sysctl_data = &new_wufc;
   5659 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5660 	if ((error) || (newp == NULL))
   5661 		return (error);
   5662 	if (new_wufc == adapter->wufc)
   5663 		return (0);
   5664 
   5665 	if (new_wufc & 0xffffff00)
   5666 		return (EINVAL);
   5667 
   5668 	new_wufc &= 0xff;
   5669 	new_wufc |= (0xffffff & adapter->wufc);
   5670 	adapter->wufc = new_wufc;
   5671 
   5672 	return (0);
   5673 } /* ixgbe_sysctl_wufc */
   5674 
   5675 #ifdef IXGBE_DEBUG
   5676 /************************************************************************
   5677  * ixgbe_sysctl_print_rss_config
   5678  ************************************************************************/
   5679 static int
   5680 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
   5681 {
   5682 #ifdef notyet
   5683 	struct sysctlnode node = *rnode;
   5684 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5685 	struct ixgbe_hw *hw = &adapter->hw;
   5686 	device_t	dev = adapter->dev;
   5687 	struct sbuf	*buf;
   5688 	int		error = 0, reta_size;
   5689 	u32		reg;
   5690 
   5691 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5692 		return (EPERM);
   5693 
   5694 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
   5695 	if (!buf) {
   5696 		device_printf(dev, "Could not allocate sbuf for output.\n");
   5697 		return (ENOMEM);
   5698 	}
   5699 
   5700 	// TODO: use sbufs to make a string to print out
   5701 	/* Set multiplier for RETA setup and table size based on MAC */
   5702 	switch (adapter->hw.mac.type) {
   5703 	case ixgbe_mac_X550:
   5704 	case ixgbe_mac_X550EM_x:
   5705 	case ixgbe_mac_X550EM_a:
   5706 		reta_size = 128;
   5707 		break;
   5708 	default:
   5709 		reta_size = 32;
   5710 		break;
   5711 	}
   5712 
   5713 	/* Print out the redirection table */
   5714 	sbuf_cat(buf, "\n");
   5715 	for (int i = 0; i < reta_size; i++) {
   5716 		if (i < 32) {
   5717 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
   5718 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
   5719 		} else {
   5720 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
   5721 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
   5722 		}
   5723 	}
   5724 
   5725 	// TODO: print more config
   5726 
   5727 	error = sbuf_finish(buf);
   5728 	if (error)
   5729 		device_printf(dev, "Error finishing sbuf: %d\n", error);
   5730 
   5731 	sbuf_delete(buf);
   5732 #endif
   5733 	return (0);
   5734 } /* ixgbe_sysctl_print_rss_config */
   5735 #endif /* IXGBE_DEBUG */
   5736 
   5737 /************************************************************************
   5738  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
   5739  *
   5740  *   For X552/X557-AT devices using an external PHY
   5741  ************************************************************************/
   5742 static int
   5743 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
   5744 {
   5745 	struct sysctlnode node = *rnode;
   5746 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5747 	struct ixgbe_hw *hw = &adapter->hw;
   5748 	int val;
   5749 	u16 reg;
   5750 	int		error;
   5751 
   5752 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5753 		return (EPERM);
   5754 
   5755 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5756 		device_printf(adapter->dev,
   5757 		    "Device has no supported external thermal sensor.\n");
   5758 		return (ENODEV);
   5759 	}
   5760 
   5761 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
   5762 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5763 		device_printf(adapter->dev,
   5764 		    "Error reading from PHY's current temperature register\n");
   5765 		return (EAGAIN);
   5766 	}
   5767 
   5768 	node.sysctl_data = &val;
   5769 
   5770 	/* Shift temp for output */
   5771 	val = reg >> 8;
   5772 
   5773 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5774 	if ((error) || (newp == NULL))
   5775 		return (error);
   5776 
   5777 	return (0);
   5778 } /* ixgbe_sysctl_phy_temp */
   5779 
   5780 /************************************************************************
   5781  * ixgbe_sysctl_phy_overtemp_occurred
   5782  *
   5783  *   Reports (directly from the PHY) whether the current PHY
   5784  *   temperature is over the overtemp threshold.
   5785  ************************************************************************/
   5786 static int
   5787 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
   5788 {
   5789 	struct sysctlnode node = *rnode;
   5790 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5791 	struct ixgbe_hw *hw = &adapter->hw;
   5792 	int val, error;
   5793 	u16 reg;
   5794 
   5795 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5796 		return (EPERM);
   5797 
   5798 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5799 		device_printf(adapter->dev,
   5800 		    "Device has no supported external thermal sensor.\n");
   5801 		return (ENODEV);
   5802 	}
   5803 
   5804 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
   5805 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5806 		device_printf(adapter->dev,
   5807 		    "Error reading from PHY's temperature status register\n");
   5808 		return (EAGAIN);
   5809 	}
   5810 
   5811 	node.sysctl_data = &val;
   5812 
   5813 	/* Get occurrence bit */
   5814 	val = !!(reg & 0x4000);
   5815 
   5816 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5817 	if ((error) || (newp == NULL))
   5818 		return (error);
   5819 
   5820 	return (0);
   5821 } /* ixgbe_sysctl_phy_overtemp_occurred */
   5822 
   5823 /************************************************************************
   5824  * ixgbe_sysctl_eee_state
   5825  *
   5826  *   Sysctl to set EEE power saving feature
   5827  *   Values:
   5828  *     0      - disable EEE
   5829  *     1      - enable EEE
   5830  *     (none) - get current device EEE state
   5831  ************************************************************************/
   5832 static int
   5833 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
   5834 {
   5835 	struct sysctlnode node = *rnode;
   5836 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5837 	struct ifnet   *ifp = adapter->ifp;
   5838 	device_t       dev = adapter->dev;
   5839 	int	       curr_eee, new_eee, error = 0;
   5840 	s32	       retval;
   5841 
   5842 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5843 		return (EPERM);
   5844 
   5845 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
   5846 	node.sysctl_data = &new_eee;
   5847 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5848 	if ((error) || (newp == NULL))
   5849 		return (error);
   5850 
   5851 	/* Nothing to do */
   5852 	if (new_eee == curr_eee)
   5853 		return (0);
   5854 
   5855 	/* Not supported */
   5856 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
   5857 		return (EINVAL);
   5858 
   5859 	/* Bounds checking */
   5860 	if ((new_eee < 0) || (new_eee > 1))
   5861 		return (EINVAL);
   5862 
   5863 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
   5864 	if (retval) {
   5865 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
   5866 		return (EINVAL);
   5867 	}
   5868 
   5869 	/* Restart auto-neg */
   5870 	ifp->if_init(ifp);
   5871 
   5872 	device_printf(dev, "New EEE state: %d\n", new_eee);
   5873 
   5874 	/* Cache new value */
   5875 	if (new_eee)
   5876 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5877 	else
   5878 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
   5879 
   5880 	return (error);
   5881 } /* ixgbe_sysctl_eee_state */
   5882 
   5883 #define PRINTQS(adapter, regname)					\
   5884 	do {								\
   5885 		struct ixgbe_hw	*_hw = &(adapter)->hw;			\
   5886 		int _i;							\
   5887 									\
   5888 		printf("%s: %s", device_xname((adapter)->dev), #regname); \
   5889 		for (_i = 0; _i < (adapter)->num_queues; _i++) {	\
   5890 			printf((_i == 0) ? "\t" : " ");			\
   5891 			printf("%08x", IXGBE_READ_REG(_hw,		\
   5892 				IXGBE_##regname(_i)));			\
   5893 		}							\
   5894 		printf("\n");						\
   5895 	} while (0)
   5896 
   5897 /************************************************************************
   5898  * ixgbe_print_debug_info
   5899  *
   5900  *   Called only when em_display_debug_stats is enabled.
   5901  *   Provides a way to take a look at important statistics
   5902  *   maintained by the driver and hardware.
   5903  ************************************************************************/
   5904 static void
   5905 ixgbe_print_debug_info(struct adapter *adapter)
   5906 {
   5907 	device_t	dev = adapter->dev;
   5908 	struct ixgbe_hw *hw = &adapter->hw;
   5909 	int table_size;
   5910 	int i;
   5911 
   5912 	switch (adapter->hw.mac.type) {
   5913 	case ixgbe_mac_X550:
   5914 	case ixgbe_mac_X550EM_x:
   5915 	case ixgbe_mac_X550EM_a:
   5916 		table_size = 128;
   5917 		break;
   5918 	default:
   5919 		table_size = 32;
   5920 		break;
   5921 	}
   5922 
   5923 	device_printf(dev, "[E]RETA:\n");
   5924 	for (i = 0; i < table_size; i++) {
   5925 		if (i < 32)
   5926 			printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
   5927 				IXGBE_RETA(i)));
   5928 		else
   5929 			printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
   5930 				IXGBE_ERETA(i - 32)));
   5931 	}
   5932 
   5933 	device_printf(dev, "queue:");
   5934 	for (i = 0; i < adapter->num_queues; i++) {
   5935 		printf((i == 0) ? "\t" : " ");
   5936 		printf("%8d", i);
   5937 	}
   5938 	printf("\n");
   5939 	PRINTQS(adapter, RDBAL);
   5940 	PRINTQS(adapter, RDBAH);
   5941 	PRINTQS(adapter, RDLEN);
   5942 	PRINTQS(adapter, SRRCTL);
   5943 	PRINTQS(adapter, RDH);
   5944 	PRINTQS(adapter, RDT);
   5945 	PRINTQS(adapter, RXDCTL);
   5946 
   5947 	device_printf(dev, "RQSMR:");
   5948 	for (i = 0; i < adapter->num_queues / 4; i++) {
   5949 		printf((i == 0) ? "\t" : " ");
   5950 		printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
   5951 	}
   5952 	printf("\n");
   5953 
   5954 	device_printf(dev, "disabled_count:");
   5955 	for (i = 0; i < adapter->num_queues; i++) {
   5956 		printf((i == 0) ? "\t" : " ");
   5957 		printf("%8d", adapter->queues[i].disabled_count);
   5958 	}
   5959 	printf("\n");
   5960 
   5961 	device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
   5962 	if (hw->mac.type != ixgbe_mac_82598EB) {
   5963 		device_printf(dev, "EIMS_EX(0):\t%08x\n",
   5964 			      IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
   5965 		device_printf(dev, "EIMS_EX(1):\t%08x\n",
   5966 			      IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
   5967 	}
   5968 } /* ixgbe_print_debug_info */
   5969 
   5970 /************************************************************************
   5971  * ixgbe_sysctl_debug
   5972  ************************************************************************/
   5973 static int
   5974 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
   5975 {
   5976 	struct sysctlnode node = *rnode;
   5977 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5978 	int	       error, result = 0;
   5979 
   5980 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5981 		return (EPERM);
   5982 
   5983 	node.sysctl_data = &result;
   5984 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5985 
   5986 	if (error || newp == NULL)
   5987 		return error;
   5988 
   5989 	if (result == 1)
   5990 		ixgbe_print_debug_info(adapter);
   5991 
   5992 	return 0;
   5993 } /* ixgbe_sysctl_debug */
   5994 
   5995 /************************************************************************
   5996  * ixgbe_init_device_features
   5997  ************************************************************************/
   5998 static void
   5999 ixgbe_init_device_features(struct adapter *adapter)
   6000 {
   6001 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   6002 			  | IXGBE_FEATURE_RSS
   6003 			  | IXGBE_FEATURE_MSI
   6004 			  | IXGBE_FEATURE_MSIX
   6005 			  | IXGBE_FEATURE_LEGACY_IRQ
   6006 			  | IXGBE_FEATURE_LEGACY_TX;
   6007 
   6008 	/* Set capabilities first... */
   6009 	switch (adapter->hw.mac.type) {
   6010 	case ixgbe_mac_82598EB:
   6011 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
   6012 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
   6013 		break;
   6014 	case ixgbe_mac_X540:
   6015 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6016 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6017 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
   6018 		    (adapter->hw.bus.func == 0))
   6019 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   6020 		break;
   6021 	case ixgbe_mac_X550:
   6022 		/*
   6023 		 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
   6024 		 * NVM Image version.
   6025 		 */
   6026 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   6027 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6028 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6029 		break;
   6030 	case ixgbe_mac_X550EM_x:
   6031 		/*
   6032 		 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
   6033 		 * NVM Image version.
   6034 		 */
   6035 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6036 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6037 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
   6038 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   6039 		break;
   6040 	case ixgbe_mac_X550EM_a:
   6041 		/*
   6042 		 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
   6043 		 * NVM Image version.
   6044 		 */
   6045 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6046 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6047 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   6048 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
   6049 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
   6050 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   6051 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   6052 		}
   6053 		break;
   6054 	case ixgbe_mac_82599EB:
   6055 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6056 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6057 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
   6058 		    (adapter->hw.bus.func == 0))
   6059 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   6060 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
   6061 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   6062 		break;
   6063 	default:
   6064 		break;
   6065 	}
   6066 
   6067 	/* Enabled by default... */
   6068 	/* Fan failure detection */
   6069 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
   6070 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
   6071 	/* Netmap */
   6072 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   6073 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   6074 	/* EEE */
   6075 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   6076 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   6077 	/* Thermal Sensor */
   6078 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
   6079 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
   6080 	/*
   6081 	 * Recovery mode:
   6082 	 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
   6083 	 * NVM Image version.
   6084 	 */
   6085 
   6086 	/* Enabled via global sysctl... */
   6087 	/* Flow Director */
   6088 	if (ixgbe_enable_fdir) {
   6089 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
   6090 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
   6091 		else
   6092 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
   6093 	}
   6094 	/* Legacy (single queue) transmit */
   6095 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   6096 	    ixgbe_enable_legacy_tx)
   6097 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   6098 	/*
   6099 	 * Message Signal Interrupts - Extended (MSI-X)
   6100 	 * Normal MSI is only enabled if MSI-X calls fail.
   6101 	 */
   6102 	if (!ixgbe_enable_msix)
   6103 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
   6104 	/* Receive-Side Scaling (RSS) */
   6105 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
   6106 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   6107 
   6108 	/* Disable features with unmet dependencies... */
   6109 	/* No MSI-X */
   6110 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
   6111 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6112 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6113 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
   6114 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
   6115 	}
   6116 } /* ixgbe_init_device_features */
   6117 
   6118 /************************************************************************
   6119  * ixgbe_probe - Device identification routine
   6120  *
   6121  *   Determines if the driver should be loaded on
   6122  *   adapter based on its PCI vendor/device ID.
   6123  *
   6124  *   return BUS_PROBE_DEFAULT on success, positive on failure
   6125  ************************************************************************/
   6126 static int
   6127 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
   6128 {
   6129 	const struct pci_attach_args *pa = aux;
   6130 
   6131 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
   6132 }
   6133 
   6134 static const ixgbe_vendor_info_t *
   6135 ixgbe_lookup(const struct pci_attach_args *pa)
   6136 {
   6137 	const ixgbe_vendor_info_t *ent;
   6138 	pcireg_t subid;
   6139 
   6140 	INIT_DEBUGOUT("ixgbe_lookup: begin");
   6141 
   6142 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
   6143 		return NULL;
   6144 
   6145 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
   6146 
   6147 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
   6148 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
   6149 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
   6150 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
   6151 			(ent->subvendor_id == 0)) &&
   6152 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
   6153 			(ent->subdevice_id == 0))) {
   6154 			return ent;
   6155 		}
   6156 	}
   6157 	return NULL;
   6158 }
   6159 
   6160 static int
   6161 ixgbe_ifflags_cb(struct ethercom *ec)
   6162 {
   6163 	struct ifnet *ifp = &ec->ec_if;
   6164 	struct adapter *adapter = ifp->if_softc;
   6165 	u_short change;
   6166 	int rv = 0;
   6167 
   6168 	IXGBE_CORE_LOCK(adapter);
   6169 
   6170 	change = ifp->if_flags ^ adapter->if_flags;
   6171 	if (change != 0)
   6172 		adapter->if_flags = ifp->if_flags;
   6173 
   6174 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   6175 		rv = ENETRESET;
   6176 		goto out;
   6177 	} else if ((change & IFF_PROMISC) != 0)
   6178 		ixgbe_set_multi(adapter);
   6179 
   6180 	/* Check for ec_capenable. */
   6181 	change = ec->ec_capenable ^ adapter->ec_capenable;
   6182 	adapter->ec_capenable = ec->ec_capenable;
   6183 	if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
   6184 	    | ETHERCAP_VLAN_HWFILTER)) != 0) {
   6185 		rv = ENETRESET;
   6186 		goto out;
   6187 	}
   6188 
   6189 	/*
   6190 	 * Special handling is not required for ETHERCAP_VLAN_MTU.
   6191 	 * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
   6192 	 */
   6193 
   6194 	/* Set up VLAN support and filter */
   6195 	if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
   6196 		ixgbe_setup_vlan_hw_support(adapter);
   6197 
   6198 out:
   6199 	IXGBE_CORE_UNLOCK(adapter);
   6200 
   6201 	return rv;
   6202 }
   6203 
   6204 /************************************************************************
   6205  * ixgbe_ioctl - Ioctl entry point
   6206  *
   6207  *   Called when the user wants to configure the interface.
   6208  *
   6209  *   return 0 on success, positive on failure
   6210  ************************************************************************/
   6211 static int
   6212 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   6213 {
   6214 	struct adapter	*adapter = ifp->if_softc;
   6215 	struct ixgbe_hw *hw = &adapter->hw;
   6216 	struct ifcapreq *ifcr = data;
   6217 	struct ifreq	*ifr = data;
   6218 	int		error = 0;
   6219 	int l4csum_en;
   6220 	const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
   6221 	     IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   6222 
   6223 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   6224 		return (EPERM);
   6225 
   6226 	switch (command) {
   6227 	case SIOCSIFFLAGS:
   6228 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   6229 		break;
   6230 	case SIOCADDMULTI:
   6231 	case SIOCDELMULTI:
   6232 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   6233 		break;
   6234 	case SIOCSIFMEDIA:
   6235 	case SIOCGIFMEDIA:
   6236 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   6237 		break;
   6238 	case SIOCSIFCAP:
   6239 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   6240 		break;
   6241 	case SIOCSIFMTU:
   6242 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   6243 		break;
   6244 #ifdef __NetBSD__
   6245 	case SIOCINITIFADDR:
   6246 		IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
   6247 		break;
   6248 	case SIOCGIFFLAGS:
   6249 		IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
   6250 		break;
   6251 	case SIOCGIFAFLAG_IN:
   6252 		IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
   6253 		break;
   6254 	case SIOCGIFADDR:
   6255 		IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
   6256 		break;
   6257 	case SIOCGIFMTU:
   6258 		IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
   6259 		break;
   6260 	case SIOCGIFCAP:
   6261 		IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
   6262 		break;
   6263 	case SIOCGETHERCAP:
   6264 		IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
   6265 		break;
   6266 	case SIOCGLIFADDR:
   6267 		IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
   6268 		break;
   6269 	case SIOCZIFDATA:
   6270 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   6271 		hw->mac.ops.clear_hw_cntrs(hw);
   6272 		ixgbe_clear_evcnt(adapter);
   6273 		break;
   6274 	case SIOCAIFADDR:
   6275 		IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
   6276 		break;
   6277 #endif
   6278 	default:
   6279 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   6280 		break;
   6281 	}
   6282 
   6283 	switch (command) {
   6284 	case SIOCGI2C:
   6285 	{
   6286 		struct ixgbe_i2c_req	i2c;
   6287 
   6288 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
   6289 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
   6290 		if (error != 0)
   6291 			break;
   6292 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
   6293 			error = EINVAL;
   6294 			break;
   6295 		}
   6296 		if (i2c.len > sizeof(i2c.data)) {
   6297 			error = EINVAL;
   6298 			break;
   6299 		}
   6300 
   6301 		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
   6302 		    i2c.dev_addr, i2c.data);
   6303 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
   6304 		break;
   6305 	}
   6306 	case SIOCSIFCAP:
   6307 		/* Layer-4 Rx checksum offload has to be turned on and
   6308 		 * off as a unit.
   6309 		 */
   6310 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   6311 		if (l4csum_en != l4csum && l4csum_en != 0)
   6312 			return EINVAL;
   6313 		/*FALLTHROUGH*/
   6314 	case SIOCADDMULTI:
   6315 	case SIOCDELMULTI:
   6316 	case SIOCSIFFLAGS:
   6317 	case SIOCSIFMTU:
   6318 	default:
   6319 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   6320 			return error;
   6321 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   6322 			;
   6323 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   6324 			IXGBE_CORE_LOCK(adapter);
   6325 			if ((ifp->if_flags & IFF_RUNNING) != 0)
   6326 				ixgbe_init_locked(adapter);
   6327 			ixgbe_recalculate_max_frame(adapter);
   6328 			IXGBE_CORE_UNLOCK(adapter);
   6329 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   6330 			/*
   6331 			 * Multicast list has changed; set the hardware filter
   6332 			 * accordingly.
   6333 			 */
   6334 			IXGBE_CORE_LOCK(adapter);
   6335 			ixgbe_disable_intr(adapter);
   6336 			ixgbe_set_multi(adapter);
   6337 			ixgbe_enable_intr(adapter);
   6338 			IXGBE_CORE_UNLOCK(adapter);
   6339 		}
   6340 		return 0;
   6341 	}
   6342 
   6343 	return error;
   6344 } /* ixgbe_ioctl */
   6345 
   6346 /************************************************************************
   6347  * ixgbe_check_fan_failure
   6348  ************************************************************************/
   6349 static void
   6350 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
   6351 {
   6352 	u32 mask;
   6353 
   6354 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
   6355 	    IXGBE_ESDP_SDP1;
   6356 
   6357 	if (reg & mask)
   6358 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
   6359 } /* ixgbe_check_fan_failure */
   6360 
   6361 /************************************************************************
   6362  * ixgbe_handle_que
   6363  ************************************************************************/
   6364 static void
   6365 ixgbe_handle_que(void *context)
   6366 {
   6367 	struct ix_queue *que = context;
   6368 	struct adapter	*adapter = que->adapter;
   6369 	struct tx_ring	*txr = que->txr;
   6370 	struct ifnet	*ifp = adapter->ifp;
   6371 	bool		more = false;
   6372 
   6373 	que->handleq.ev_count++;
   6374 
   6375 	if (ifp->if_flags & IFF_RUNNING) {
   6376 		more = ixgbe_rxeof(que);
   6377 		IXGBE_TX_LOCK(txr);
   6378 		more |= ixgbe_txeof(txr);
   6379 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   6380 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   6381 				ixgbe_mq_start_locked(ifp, txr);
   6382 		/* Only for queue 0 */
   6383 		/* NetBSD still needs this for CBQ */
   6384 		if ((&adapter->queues[0] == que)
   6385 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   6386 			ixgbe_legacy_start_locked(ifp, txr);
   6387 		IXGBE_TX_UNLOCK(txr);
   6388 	}
   6389 
   6390 	if (more) {
   6391 		que->req.ev_count++;
   6392 		ixgbe_sched_handle_que(adapter, que);
   6393 	} else if (que->res != NULL) {
   6394 		/* Re-enable this interrupt */
   6395 		ixgbe_enable_queue(adapter, que->msix);
   6396 	} else
   6397 		ixgbe_enable_intr(adapter);
   6398 
   6399 	return;
   6400 } /* ixgbe_handle_que */
   6401 
   6402 /************************************************************************
   6403  * ixgbe_handle_que_work
   6404  ************************************************************************/
   6405 static void
   6406 ixgbe_handle_que_work(struct work *wk, void *context)
   6407 {
   6408 	struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
   6409 
   6410 	/*
   6411 	 * "enqueued flag" is not required here.
   6412 	 * See ixgbe_msix_que().
   6413 	 */
   6414 	ixgbe_handle_que(que);
   6415 }
   6416 
   6417 /************************************************************************
   6418  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
   6419  ************************************************************************/
   6420 static int
   6421 ixgbe_allocate_legacy(struct adapter *adapter,
   6422     const struct pci_attach_args *pa)
   6423 {
   6424 	device_t	dev = adapter->dev;
   6425 	struct ix_queue *que = adapter->queues;
   6426 	struct tx_ring	*txr = adapter->tx_rings;
   6427 	int		counts[PCI_INTR_TYPE_SIZE];
   6428 	pci_intr_type_t intr_type, max_type;
   6429 	char		intrbuf[PCI_INTRSTR_LEN];
   6430 	char		wqname[MAXCOMLEN];
   6431 	const char	*intrstr = NULL;
   6432 	int defertx_error = 0, error;
   6433 
   6434 	/* We allocate a single interrupt resource */
   6435 	max_type = PCI_INTR_TYPE_MSI;
   6436 	counts[PCI_INTR_TYPE_MSIX] = 0;
   6437 	counts[PCI_INTR_TYPE_MSI] =
   6438 	    (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
   6439 	/* Check not feat_en but feat_cap to fallback to INTx */
   6440 	counts[PCI_INTR_TYPE_INTX] =
   6441 	    (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
   6442 
   6443 alloc_retry:
   6444 	if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
   6445 		aprint_error_dev(dev, "couldn't alloc interrupt\n");
   6446 		return ENXIO;
   6447 	}
   6448 	adapter->osdep.nintrs = 1;
   6449 	intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
   6450 	    intrbuf, sizeof(intrbuf));
   6451 	adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
   6452 	    adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
   6453 	    device_xname(dev));
   6454 	intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
   6455 	if (adapter->osdep.ihs[0] == NULL) {
   6456 		aprint_error_dev(dev,"unable to establish %s\n",
   6457 		    (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6458 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   6459 		adapter->osdep.intrs = NULL;
   6460 		switch (intr_type) {
   6461 		case PCI_INTR_TYPE_MSI:
   6462 			/* The next try is for INTx: Disable MSI */
   6463 			max_type = PCI_INTR_TYPE_INTX;
   6464 			counts[PCI_INTR_TYPE_INTX] = 1;
   6465 			adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   6466 			if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
   6467 				adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6468 				goto alloc_retry;
   6469 			} else
   6470 				break;
   6471 		case PCI_INTR_TYPE_INTX:
   6472 		default:
   6473 			/* See below */
   6474 			break;
   6475 		}
   6476 	}
   6477 	if (intr_type == PCI_INTR_TYPE_INTX) {
   6478 		adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   6479 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6480 	}
   6481 	if (adapter->osdep.ihs[0] == NULL) {
   6482 		aprint_error_dev(dev,
   6483 		    "couldn't establish interrupt%s%s\n",
   6484 		    intrstr ? " at " : "", intrstr ? intrstr : "");
   6485 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   6486 		adapter->osdep.intrs = NULL;
   6487 		return ENXIO;
   6488 	}
   6489 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   6490 	/*
   6491 	 * Try allocating a fast interrupt and the associated deferred
   6492 	 * processing contexts.
   6493 	 */
   6494 	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   6495 		txr->txr_si =
   6496 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6497 			ixgbe_deferred_mq_start, txr);
   6498 
   6499 		snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
   6500 		defertx_error = workqueue_create(&adapter->txr_wq, wqname,
   6501 		    ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI,
   6502 		    IPL_NET, IXGBE_WORKQUEUE_FLAGS);
   6503 		adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
   6504 	}
   6505 	que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6506 	    ixgbe_handle_que, que);
   6507 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
   6508 	error = workqueue_create(&adapter->que_wq, wqname,
   6509 	    ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   6510 	    IXGBE_WORKQUEUE_FLAGS);
   6511 
   6512 	if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
   6513 		&& ((txr->txr_si == NULL) || defertx_error != 0))
   6514 	    || (que->que_si == NULL) || error != 0) {
   6515 		aprint_error_dev(dev,
   6516 		    "could not establish software interrupts\n");
   6517 
   6518 		return ENXIO;
   6519 	}
   6520 	/* For simplicity in the handlers */
   6521 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
   6522 
   6523 	return (0);
   6524 } /* ixgbe_allocate_legacy */
   6525 
   6526 /************************************************************************
   6527  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
   6528  ************************************************************************/
   6529 static int
   6530 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   6531 {
   6532 	device_t	dev = adapter->dev;
   6533 	struct		ix_queue *que = adapter->queues;
   6534 	struct		tx_ring *txr = adapter->tx_rings;
   6535 	pci_chipset_tag_t pc;
   6536 	char		intrbuf[PCI_INTRSTR_LEN];
   6537 	char		intr_xname[32];
   6538 	char		wqname[MAXCOMLEN];
   6539 	const char	*intrstr = NULL;
   6540 	int		error, vector = 0;
   6541 	int		cpu_id = 0;
   6542 	kcpuset_t	*affinity;
   6543 #ifdef RSS
   6544 	unsigned int	rss_buckets = 0;
   6545 	kcpuset_t	cpu_mask;
   6546 #endif
   6547 
   6548 	pc = adapter->osdep.pc;
   6549 #ifdef	RSS
   6550 	/*
   6551 	 * If we're doing RSS, the number of queues needs to
   6552 	 * match the number of RSS buckets that are configured.
   6553 	 *
   6554 	 * + If there's more queues than RSS buckets, we'll end
   6555 	 *   up with queues that get no traffic.
   6556 	 *
   6557 	 * + If there's more RSS buckets than queues, we'll end
   6558 	 *   up having multiple RSS buckets map to the same queue,
   6559 	 *   so there'll be some contention.
   6560 	 */
   6561 	rss_buckets = rss_getnumbuckets();
   6562 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
   6563 	    (adapter->num_queues != rss_buckets)) {
   6564 		device_printf(dev,
   6565 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
   6566 		    "; performance will be impacted.\n",
   6567 		    __func__, adapter->num_queues, rss_buckets);
   6568 	}
   6569 #endif
   6570 
   6571 	adapter->osdep.nintrs = adapter->num_queues + 1;
   6572 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   6573 	    adapter->osdep.nintrs) != 0) {
   6574 		aprint_error_dev(dev,
   6575 		    "failed to allocate MSI-X interrupt\n");
   6576 		return (ENXIO);
   6577 	}
   6578 
   6579 	kcpuset_create(&affinity, false);
   6580 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   6581 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   6582 		    device_xname(dev), i);
   6583 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   6584 		    sizeof(intrbuf));
   6585 #ifdef IXGBE_MPSAFE
   6586 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   6587 		    true);
   6588 #endif
   6589 		/* Set the handler function */
   6590 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   6591 		    adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
   6592 		    intr_xname);
   6593 		if (que->res == NULL) {
   6594 			aprint_error_dev(dev,
   6595 			    "Failed to register QUE handler\n");
   6596 			error = ENXIO;
   6597 			goto err_out;
   6598 		}
   6599 		que->msix = vector;
   6600 		adapter->active_queues |= 1ULL << que->msix;
   6601 
   6602 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   6603 #ifdef	RSS
   6604 			/*
   6605 			 * The queue ID is used as the RSS layer bucket ID.
   6606 			 * We look up the queue ID -> RSS CPU ID and select
   6607 			 * that.
   6608 			 */
   6609 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
   6610 			CPU_SETOF(cpu_id, &cpu_mask);
   6611 #endif
   6612 		} else {
   6613 			/*
   6614 			 * Bind the MSI-X vector, and thus the
   6615 			 * rings to the corresponding CPU.
   6616 			 *
   6617 			 * This just happens to match the default RSS
   6618 			 * round-robin bucket -> queue -> CPU allocation.
   6619 			 */
   6620 			if (adapter->num_queues > 1)
   6621 				cpu_id = i;
   6622 		}
   6623 		/* Round-robin affinity */
   6624 		kcpuset_zero(affinity);
   6625 		kcpuset_set(affinity, cpu_id % ncpu);
   6626 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   6627 		    NULL);
   6628 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   6629 		    intrstr);
   6630 		if (error == 0) {
   6631 #if 1 /* def IXGBE_DEBUG */
   6632 #ifdef	RSS
   6633 			aprintf_normal(", bound RSS bucket %d to CPU %d", i,
   6634 			    cpu_id % ncpu);
   6635 #else
   6636 			aprint_normal(", bound queue %d to cpu %d", i,
   6637 			    cpu_id % ncpu);
   6638 #endif
   6639 #endif /* IXGBE_DEBUG */
   6640 		}
   6641 		aprint_normal("\n");
   6642 
   6643 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   6644 			txr->txr_si = softint_establish(
   6645 				SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6646 				ixgbe_deferred_mq_start, txr);
   6647 			if (txr->txr_si == NULL) {
   6648 				aprint_error_dev(dev,
   6649 				    "couldn't establish software interrupt\n");
   6650 				error = ENXIO;
   6651 				goto err_out;
   6652 			}
   6653 		}
   6654 		que->que_si
   6655 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6656 			ixgbe_handle_que, que);
   6657 		if (que->que_si == NULL) {
   6658 			aprint_error_dev(dev,
   6659 			    "couldn't establish software interrupt\n");
   6660 			error = ENXIO;
   6661 			goto err_out;
   6662 		}
   6663 	}
   6664 	snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
   6665 	error = workqueue_create(&adapter->txr_wq, wqname,
   6666 	    ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   6667 	    IXGBE_WORKQUEUE_FLAGS);
   6668 	if (error) {
   6669 		aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
   6670 		goto err_out;
   6671 	}
   6672 	adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
   6673 
   6674 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
   6675 	error = workqueue_create(&adapter->que_wq, wqname,
   6676 	    ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   6677 	    IXGBE_WORKQUEUE_FLAGS);
   6678 	if (error) {
   6679 		aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
   6680 		goto err_out;
   6681 	}
   6682 
   6683 	/* and Link */
   6684 	cpu_id++;
   6685 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   6686 	adapter->vector = vector;
   6687 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   6688 	    sizeof(intrbuf));
   6689 #ifdef IXGBE_MPSAFE
   6690 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   6691 	    true);
   6692 #endif
   6693 	/* Set the link handler function */
   6694 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   6695 	    adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
   6696 	    intr_xname);
   6697 	if (adapter->osdep.ihs[vector] == NULL) {
   6698 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   6699 		error = ENXIO;
   6700 		goto err_out;
   6701 	}
   6702 	/* Round-robin affinity */
   6703 	kcpuset_zero(affinity);
   6704 	kcpuset_set(affinity, cpu_id % ncpu);
   6705 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
   6706 	    NULL);
   6707 
   6708 	aprint_normal_dev(dev,
   6709 	    "for link, interrupting at %s", intrstr);
   6710 	if (error == 0)
   6711 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   6712 	else
   6713 		aprint_normal("\n");
   6714 
   6715 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   6716 		adapter->mbx_si =
   6717 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6718 			ixgbe_handle_mbx, adapter);
   6719 		if (adapter->mbx_si == NULL) {
   6720 			aprint_error_dev(dev,
   6721 			    "could not establish software interrupts\n");
   6722 
   6723 			error = ENXIO;
   6724 			goto err_out;
   6725 		}
   6726 	}
   6727 
   6728 	kcpuset_destroy(affinity);
   6729 	aprint_normal_dev(dev,
   6730 	    "Using MSI-X interrupts with %d vectors\n", vector + 1);
   6731 
   6732 	return (0);
   6733 
   6734 err_out:
   6735 	kcpuset_destroy(affinity);
   6736 	ixgbe_free_softint(adapter);
   6737 	ixgbe_free_pciintr_resources(adapter);
   6738 	return (error);
   6739 } /* ixgbe_allocate_msix */
   6740 
   6741 /************************************************************************
   6742  * ixgbe_configure_interrupts
   6743  *
   6744  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
   6745  *   This will also depend on user settings.
   6746  ************************************************************************/
   6747 static int
   6748 ixgbe_configure_interrupts(struct adapter *adapter)
   6749 {
   6750 	device_t dev = adapter->dev;
   6751 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
   6752 	int want, queues, msgs;
   6753 
   6754 	/* Default to 1 queue if MSI-X setup fails */
   6755 	adapter->num_queues = 1;
   6756 
   6757 	/* Override by tuneable */
   6758 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
   6759 		goto msi;
   6760 
   6761 	/*
   6762 	 *  NetBSD only: Use single vector MSI when number of CPU is 1 to save
   6763 	 * interrupt slot.
   6764 	 */
   6765 	if (ncpu == 1)
   6766 		goto msi;
   6767 
   6768 	/* First try MSI-X */
   6769 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   6770 	msgs = MIN(msgs, IXG_MAX_NINTR);
   6771 	if (msgs < 2)
   6772 		goto msi;
   6773 
   6774 	adapter->msix_mem = (void *)1; /* XXX */
   6775 
   6776 	/* Figure out a reasonable auto config value */
   6777 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   6778 
   6779 #ifdef	RSS
   6780 	/* If we're doing RSS, clamp at the number of RSS buckets */
   6781 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   6782 		queues = uimin(queues, rss_getnumbuckets());
   6783 #endif
   6784 	if (ixgbe_num_queues > queues) {
   6785 		aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
   6786 		ixgbe_num_queues = queues;
   6787 	}
   6788 
   6789 	if (ixgbe_num_queues != 0)
   6790 		queues = ixgbe_num_queues;
   6791 	else
   6792 		queues = uimin(queues,
   6793 		    uimin(mac->max_tx_queues, mac->max_rx_queues));
   6794 
   6795 	/* reflect correct sysctl value */
   6796 	ixgbe_num_queues = queues;
   6797 
   6798 	/*
   6799 	 * Want one vector (RX/TX pair) per queue
   6800 	 * plus an additional for Link.
   6801 	 */
   6802 	want = queues + 1;
   6803 	if (msgs >= want)
   6804 		msgs = want;
   6805 	else {
   6806 		aprint_error_dev(dev, "MSI-X Configuration Problem, "
   6807 		    "%d vectors but %d queues wanted!\n",
   6808 		    msgs, want);
   6809 		goto msi;
   6810 	}
   6811 	adapter->num_queues = queues;
   6812 	adapter->feat_en |= IXGBE_FEATURE_MSIX;
   6813 	return (0);
   6814 
   6815 	/*
   6816 	 * MSI-X allocation failed or provided us with
   6817 	 * less vectors than needed. Free MSI-X resources
   6818 	 * and we'll try enabling MSI.
   6819 	 */
   6820 msi:
   6821 	/* Without MSI-X, some features are no longer supported */
   6822 	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6823 	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
   6824 	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6825 	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
   6826 
   6827 	msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
   6828 	adapter->msix_mem = NULL; /* XXX */
   6829 	if (msgs > 1)
   6830 		msgs = 1;
   6831 	if (msgs != 0) {
   6832 		msgs = 1;
   6833 		adapter->feat_en |= IXGBE_FEATURE_MSI;
   6834 		return (0);
   6835 	}
   6836 
   6837 	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
   6838 		aprint_error_dev(dev,
   6839 		    "Device does not support legacy interrupts.\n");
   6840 		return 1;
   6841 	}
   6842 
   6843 	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6844 
   6845 	return (0);
   6846 } /* ixgbe_configure_interrupts */
   6847 
   6848 
   6849 /************************************************************************
   6850  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
   6851  *
   6852  *   Done outside of interrupt context since the driver might sleep
   6853  ************************************************************************/
   6854 static void
   6855 ixgbe_handle_link(void *context)
   6856 {
   6857 	struct adapter	*adapter = context;
   6858 	struct ixgbe_hw *hw = &adapter->hw;
   6859 
   6860 	IXGBE_CORE_LOCK(adapter);
   6861 	++adapter->link_sicount.ev_count;
   6862 	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
   6863 	ixgbe_update_link_status(adapter);
   6864 
   6865 	/* Re-enable link interrupts */
   6866 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
   6867 
   6868 	IXGBE_CORE_UNLOCK(adapter);
   6869 } /* ixgbe_handle_link */
   6870 
   6871 #if 0
   6872 /************************************************************************
   6873  * ixgbe_rearm_queues
   6874  ************************************************************************/
   6875 static __inline void
   6876 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   6877 {
   6878 	u32 mask;
   6879 
   6880 	switch (adapter->hw.mac.type) {
   6881 	case ixgbe_mac_82598EB:
   6882 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   6883 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   6884 		break;
   6885 	case ixgbe_mac_82599EB:
   6886 	case ixgbe_mac_X540:
   6887 	case ixgbe_mac_X550:
   6888 	case ixgbe_mac_X550EM_x:
   6889 	case ixgbe_mac_X550EM_a:
   6890 		mask = (queues & 0xFFFFFFFF);
   6891 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   6892 		mask = (queues >> 32);
   6893 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   6894 		break;
   6895 	default:
   6896 		break;
   6897 	}
   6898 } /* ixgbe_rearm_queues */
   6899 #endif
   6900