Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.88.2.31
      1 /* $NetBSD: ixgbe.c,v 1.88.2.31 2019/08/01 14:14:30 martin Exp $ */
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
     36 
     37 /*
     38  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39  * All rights reserved.
     40  *
     41  * This code is derived from software contributed to The NetBSD Foundation
     42  * by Coyote Point Systems, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63  * POSSIBILITY OF SUCH DAMAGE.
     64  */
     65 
     66 #ifdef _KERNEL_OPT
     67 #include "opt_inet.h"
     68 #include "opt_inet6.h"
     69 #include "opt_net_mpsafe.h"
     70 #endif
     71 
     72 #include "ixgbe.h"
     73 #include "ixgbe_sriov.h"
     74 #include "vlan.h"
     75 
     76 #include <sys/cprng.h>
     77 #include <dev/mii/mii.h>
     78 #include <dev/mii/miivar.h>
     79 
     80 /************************************************************************
     81  * Driver version
     82  ************************************************************************/
     83 static const char ixgbe_driver_version[] = "4.0.1-k";
     84 /* XXX NetBSD: + 3.3.10 */
     85 
     86 /************************************************************************
     87  * PCI Device ID Table
     88  *
     89  *   Used by probe to select devices to load on
     90  *   Last field stores an index into ixgbe_strings
     91  *   Last entry must be all 0s
     92  *
     93  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     94  ************************************************************************/
     95 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     96 {
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    111 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    112 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    113 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
    114 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    115 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    116 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    117 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    118 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    119 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
    120 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    121 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
    122 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
    123 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
    124 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
    125 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
    126 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
    127 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
    128 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
    129 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
    130 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
    131 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
    132 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
    133 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
    134 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
    135 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
    136 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
    137 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
    138 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
    139 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
    140 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
    141 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
    142 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
    143 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
    144 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
    145 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
    146 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
    147 	/* required last entry */
    148 	{0, 0, 0, 0, 0}
    149 };
    150 
    151 /************************************************************************
    152  * Table of branding strings
    153  ************************************************************************/
    154 static const char    *ixgbe_strings[] = {
    155 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    156 };
    157 
    158 /************************************************************************
    159  * Function prototypes
    160  ************************************************************************/
    161 static int	ixgbe_probe(device_t, cfdata_t, void *);
    162 static void	ixgbe_attach(device_t, device_t, void *);
    163 static int	ixgbe_detach(device_t, int);
    164 #if 0
    165 static int	ixgbe_shutdown(device_t);
    166 #endif
    167 static bool	ixgbe_suspend(device_t, const pmf_qual_t *);
    168 static bool	ixgbe_resume(device_t, const pmf_qual_t *);
    169 static int	ixgbe_ifflags_cb(struct ethercom *);
    170 static int	ixgbe_ioctl(struct ifnet *, u_long, void *);
    171 static void	ixgbe_ifstop(struct ifnet *, int);
    172 static int	ixgbe_init(struct ifnet *);
    173 static void	ixgbe_init_locked(struct adapter *);
    174 static void	ixgbe_stop(void *);
    175 static void	ixgbe_init_device_features(struct adapter *);
    176 static void	ixgbe_check_fan_failure(struct adapter *, u32, bool);
    177 static void	ixgbe_add_media_types(struct adapter *);
    178 static void	ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    179 static int	ixgbe_media_change(struct ifnet *);
    180 static int	ixgbe_allocate_pci_resources(struct adapter *,
    181 		    const struct pci_attach_args *);
    182 static void	ixgbe_free_softint(struct adapter *);
    183 static void	ixgbe_get_slot_info(struct adapter *);
    184 static int	ixgbe_allocate_msix(struct adapter *,
    185 		    const struct pci_attach_args *);
    186 static int	ixgbe_allocate_legacy(struct adapter *,
    187 		    const struct pci_attach_args *);
    188 static int	ixgbe_configure_interrupts(struct adapter *);
    189 static void	ixgbe_free_pciintr_resources(struct adapter *);
    190 static void	ixgbe_free_pci_resources(struct adapter *);
    191 static void	ixgbe_local_timer(void *);
    192 static void	ixgbe_local_timer1(void *);
    193 static void	ixgbe_recovery_mode_timer(void *);
    194 static int	ixgbe_setup_interface(device_t, struct adapter *);
    195 static void	ixgbe_config_gpie(struct adapter *);
    196 static void	ixgbe_config_dmac(struct adapter *);
    197 static void	ixgbe_config_delay_values(struct adapter *);
    198 static void	ixgbe_config_link(struct adapter *);
    199 static void	ixgbe_check_wol_support(struct adapter *);
    200 static int	ixgbe_setup_low_power_mode(struct adapter *);
    201 static void	ixgbe_rearm_queues(struct adapter *, u64);
    202 
    203 static void	ixgbe_initialize_transmit_units(struct adapter *);
    204 static void	ixgbe_initialize_receive_units(struct adapter *);
    205 static void	ixgbe_enable_rx_drop(struct adapter *);
    206 static void	ixgbe_disable_rx_drop(struct adapter *);
    207 static void	ixgbe_initialize_rss_mapping(struct adapter *);
    208 
    209 static void	ixgbe_enable_intr(struct adapter *);
    210 static void	ixgbe_disable_intr(struct adapter *);
    211 static void	ixgbe_update_stats_counters(struct adapter *);
    212 static void	ixgbe_set_promisc(struct adapter *);
    213 static void	ixgbe_set_multi(struct adapter *);
    214 static void	ixgbe_update_link_status(struct adapter *);
    215 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    216 static void	ixgbe_configure_ivars(struct adapter *);
    217 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    218 static void	ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
    219 
    220 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    221 #if 0
    222 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
    223 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
    224 #endif
    225 
    226 static void	ixgbe_add_device_sysctls(struct adapter *);
    227 static void	ixgbe_add_hw_stats(struct adapter *);
    228 static void	ixgbe_clear_evcnt(struct adapter *);
    229 static int	ixgbe_set_flowcntl(struct adapter *, int);
    230 static int	ixgbe_set_advertise(struct adapter *, int);
    231 static int	ixgbe_get_advertise(struct adapter *);
    232 
    233 /* Sysctl handlers */
    234 static void	ixgbe_set_sysctl_value(struct adapter *, const char *,
    235 		     const char *, int *, int);
    236 static int	ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
    237 static int	ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
    238 static int	ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    239 static int	ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
    240 static int	ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
    241 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
    242 #ifdef IXGBE_DEBUG
    243 static int	ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
    244 static int	ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
    245 #endif
    246 static int	ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
    247 static int	ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
    248 static int	ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
    249 static int	ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
    250 static int	ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
    251 static int	ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
    252 static int	ixgbe_sysctl_debug(SYSCTLFN_PROTO);
    253 static int	ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
    254 static int	ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
    255 
    256 /* Support for pluggable optic modules */
    257 static bool	ixgbe_sfp_probe(struct adapter *);
    258 
    259 /* Legacy (single vector) interrupt handler */
    260 static int	ixgbe_legacy_irq(void *);
    261 
    262 /* The MSI/MSI-X Interrupt handlers */
    263 static int	ixgbe_msix_que(void *);
    264 static int	ixgbe_msix_link(void *);
    265 
    266 /* Software interrupts for deferred work */
    267 static void	ixgbe_handle_que(void *);
    268 static void	ixgbe_handle_link(void *);
    269 static void	ixgbe_handle_msf(void *);
    270 static void	ixgbe_handle_mod(void *);
    271 static void	ixgbe_handle_phy(void *);
    272 
    273 /* Workqueue handler for deferred work */
    274 static void	ixgbe_handle_que_work(struct work *, void *);
    275 
    276 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    277 
    278 /************************************************************************
    279  *  NetBSD Device Interface Entry Points
    280  ************************************************************************/
    281 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    282     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    283     DVF_DETACH_SHUTDOWN);
    284 
    285 #if 0
    286 devclass_t ix_devclass;
    287 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
    288 
    289 MODULE_DEPEND(ix, pci, 1, 1, 1);
    290 MODULE_DEPEND(ix, ether, 1, 1, 1);
    291 #ifdef DEV_NETMAP
    292 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    293 #endif
    294 #endif
    295 
    296 /*
    297  * TUNEABLE PARAMETERS:
    298  */
    299 
    300 /*
    301  * AIM: Adaptive Interrupt Moderation
    302  * which means that the interrupt rate
    303  * is varied over time based on the
    304  * traffic for that interrupt vector
    305  */
    306 static bool ixgbe_enable_aim = true;
    307 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
    308 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
    309     "Enable adaptive interrupt moderation");
    310 
    311 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    312 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
    313     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
    314 
    315 /* How many packets rxeof tries to clean at a time */
    316 static int ixgbe_rx_process_limit = 256;
    317 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
    318     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
    319 
    320 /* How many packets txeof tries to clean at a time */
    321 static int ixgbe_tx_process_limit = 256;
    322 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
    323     &ixgbe_tx_process_limit, 0,
    324     "Maximum number of sent packets to process at a time, -1 means unlimited");
    325 
    326 /* Flow control setting, default to full */
    327 static int ixgbe_flow_control = ixgbe_fc_full;
    328 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
    329     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
    330 
    331 /* Which packet processing uses workqueue or softint */
    332 static bool ixgbe_txrx_workqueue = false;
    333 
    334 /*
    335  * Smart speed setting, default to on
    336  * this only works as a compile option
    337  * right now as its during attach, set
    338  * this to 'ixgbe_smart_speed_off' to
    339  * disable.
    340  */
    341 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    342 
    343 /*
    344  * MSI-X should be the default for best performance,
    345  * but this allows it to be forced off for testing.
    346  */
    347 static int ixgbe_enable_msix = 1;
    348 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
    349     "Enable MSI-X interrupts");
    350 
    351 /*
    352  * Number of Queues, can be set to 0,
    353  * it then autoconfigures based on the
    354  * number of cpus with a max of 8. This
    355  * can be overriden manually here.
    356  */
    357 static int ixgbe_num_queues = 0;
    358 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
    359     "Number of queues to configure, 0 indicates autoconfigure");
    360 
    361 /*
    362  * Number of TX descriptors per ring,
    363  * setting higher than RX as this seems
    364  * the better performing choice.
    365  */
    366 static int ixgbe_txd = PERFORM_TXD;
    367 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
    368     "Number of transmit descriptors per queue");
    369 
    370 /* Number of RX descriptors per ring */
    371 static int ixgbe_rxd = PERFORM_RXD;
    372 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
    373     "Number of receive descriptors per queue");
    374 
    375 /*
    376  * Defining this on will allow the use
    377  * of unsupported SFP+ modules, note that
    378  * doing so you are on your own :)
    379  */
    380 static int allow_unsupported_sfp = false;
    381 #define TUNABLE_INT(__x, __y)
    382 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
    383 
    384 /*
    385  * Not sure if Flow Director is fully baked,
    386  * so we'll default to turning it off.
    387  */
    388 static int ixgbe_enable_fdir = 0;
    389 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
    390     "Enable Flow Director");
    391 
    392 /* Legacy Transmit (single queue) */
    393 static int ixgbe_enable_legacy_tx = 0;
    394 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
    395     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
    396 
    397 /* Receive-Side Scaling */
    398 static int ixgbe_enable_rss = 1;
    399 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
    400     "Enable Receive-Side Scaling (RSS)");
    401 
    402 #if 0
    403 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
    404 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
    405 #endif
    406 
    407 #ifdef NET_MPSAFE
    408 #define IXGBE_MPSAFE		1
    409 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    410 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    411 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    412 #else
    413 #define IXGBE_CALLOUT_FLAGS	0
    414 #define IXGBE_SOFTINFT_FLAGS	0
    415 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU
    416 #endif
    417 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
    418 
    419 /************************************************************************
    420  * ixgbe_initialize_rss_mapping
    421  ************************************************************************/
    422 static void
    423 ixgbe_initialize_rss_mapping(struct adapter *adapter)
    424 {
    425 	struct ixgbe_hw	*hw = &adapter->hw;
    426 	u32		reta = 0, mrqc, rss_key[10];
    427 	int		queue_id, table_size, index_mult;
    428 	int		i, j;
    429 	u32		rss_hash_config;
    430 
    431 	/* force use default RSS key. */
    432 #ifdef __NetBSD__
    433 	rss_getkey((uint8_t *) &rss_key);
    434 #else
    435 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    436 		/* Fetch the configured RSS key */
    437 		rss_getkey((uint8_t *) &rss_key);
    438 	} else {
    439 		/* set up random bits */
    440 		cprng_fast(&rss_key, sizeof(rss_key));
    441 	}
    442 #endif
    443 
    444 	/* Set multiplier for RETA setup and table size based on MAC */
    445 	index_mult = 0x1;
    446 	table_size = 128;
    447 	switch (adapter->hw.mac.type) {
    448 	case ixgbe_mac_82598EB:
    449 		index_mult = 0x11;
    450 		break;
    451 	case ixgbe_mac_X550:
    452 	case ixgbe_mac_X550EM_x:
    453 	case ixgbe_mac_X550EM_a:
    454 		table_size = 512;
    455 		break;
    456 	default:
    457 		break;
    458 	}
    459 
    460 	/* Set up the redirection table */
    461 	for (i = 0, j = 0; i < table_size; i++, j++) {
    462 		if (j == adapter->num_queues)
    463 			j = 0;
    464 
    465 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    466 			/*
    467 			 * Fetch the RSS bucket id for the given indirection
    468 			 * entry. Cap it at the number of configured buckets
    469 			 * (which is num_queues.)
    470 			 */
    471 			queue_id = rss_get_indirection_to_bucket(i);
    472 			queue_id = queue_id % adapter->num_queues;
    473 		} else
    474 			queue_id = (j * index_mult);
    475 
    476 		/*
    477 		 * The low 8 bits are for hash value (n+0);
    478 		 * The next 8 bits are for hash value (n+1), etc.
    479 		 */
    480 		reta = reta >> 8;
    481 		reta = reta | (((uint32_t) queue_id) << 24);
    482 		if ((i & 3) == 3) {
    483 			if (i < 128)
    484 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
    485 			else
    486 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
    487 				    reta);
    488 			reta = 0;
    489 		}
    490 	}
    491 
    492 	/* Now fill our hash function seeds */
    493 	for (i = 0; i < 10; i++)
    494 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
    495 
    496 	/* Perform hash on these packet types */
    497 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
    498 		rss_hash_config = rss_gethashconfig();
    499 	else {
    500 		/*
    501 		 * Disable UDP - IP fragments aren't currently being handled
    502 		 * and so we end up with a mix of 2-tuple and 4-tuple
    503 		 * traffic.
    504 		 */
    505 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
    506 				| RSS_HASHTYPE_RSS_TCP_IPV4
    507 				| RSS_HASHTYPE_RSS_IPV6
    508 				| RSS_HASHTYPE_RSS_TCP_IPV6
    509 				| RSS_HASHTYPE_RSS_IPV6_EX
    510 				| RSS_HASHTYPE_RSS_TCP_IPV6_EX;
    511 	}
    512 
    513 	mrqc = IXGBE_MRQC_RSSEN;
    514 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
    515 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
    516 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
    517 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
    518 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
    519 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
    520 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
    521 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
    522 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
    523 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
    524 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
    525 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
    526 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
    527 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
    528 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
    529 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
    530 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
    531 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
    532 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
    533 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    534 } /* ixgbe_initialize_rss_mapping */
    535 
    536 /************************************************************************
    537  * ixgbe_initialize_receive_units - Setup receive registers and features.
    538  ************************************************************************/
    539 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
    540 
    541 static void
    542 ixgbe_initialize_receive_units(struct adapter *adapter)
    543 {
    544 	struct	rx_ring	*rxr = adapter->rx_rings;
    545 	struct ixgbe_hw	*hw = &adapter->hw;
    546 	struct ifnet	*ifp = adapter->ifp;
    547 	int		i, j;
    548 	u32		bufsz, fctrl, srrctl, rxcsum;
    549 	u32		hlreg;
    550 
    551 	/*
    552 	 * Make sure receives are disabled while
    553 	 * setting up the descriptor ring
    554 	 */
    555 	ixgbe_disable_rx(hw);
    556 
    557 	/* Enable broadcasts */
    558 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
    559 	fctrl |= IXGBE_FCTRL_BAM;
    560 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
    561 		fctrl |= IXGBE_FCTRL_DPF;
    562 		fctrl |= IXGBE_FCTRL_PMCF;
    563 	}
    564 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
    565 
    566 	/* Set for Jumbo Frames? */
    567 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
    568 	if (ifp->if_mtu > ETHERMTU)
    569 		hlreg |= IXGBE_HLREG0_JUMBOEN;
    570 	else
    571 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
    572 
    573 #ifdef DEV_NETMAP
    574 	/* CRC stripping is conditional in Netmap */
    575 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
    576 	    (ifp->if_capenable & IFCAP_NETMAP) &&
    577 	    !ix_crcstrip)
    578 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
    579 	else
    580 #endif /* DEV_NETMAP */
    581 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
    582 
    583 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
    584 
    585 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
    586 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
    587 
    588 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
    589 		u64 rdba = rxr->rxdma.dma_paddr;
    590 		u32 reg;
    591 		int regnum = i / 4;	/* 1 register per 4 queues */
    592 		int regshift = i % 4;	/* 4 bits per 1 queue */
    593 		j = rxr->me;
    594 
    595 		/* Setup the Base and Length of the Rx Descriptor Ring */
    596 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
    597 		    (rdba & 0x00000000ffffffffULL));
    598 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
    599 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
    600 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
    601 
    602 		/* Set up the SRRCTL register */
    603 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
    604 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
    605 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
    606 		srrctl |= bufsz;
    607 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
    608 
    609 		/* Set RQSMR (Receive Queue Statistic Mapping) register */
    610 		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
    611 		reg &= ~(0x000000ffUL << (regshift * 8));
    612 		reg |= i << (regshift * 8);
    613 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
    614 
    615 		/*
    616 		 * Set DROP_EN iff we have no flow control and >1 queue.
    617 		 * Note that srrctl was cleared shortly before during reset,
    618 		 * so we do not need to clear the bit, but do it just in case
    619 		 * this code is moved elsewhere.
    620 		 */
    621 		if (adapter->num_queues > 1 &&
    622 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
    623 			srrctl |= IXGBE_SRRCTL_DROP_EN;
    624 		} else {
    625 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
    626 		}
    627 
    628 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
    629 
    630 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
    631 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
    632 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
    633 
    634 		/* Set the driver rx tail address */
    635 		rxr->tail =  IXGBE_RDT(rxr->me);
    636 	}
    637 
    638 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    639 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
    640 			    | IXGBE_PSRTYPE_UDPHDR
    641 			    | IXGBE_PSRTYPE_IPV4HDR
    642 			    | IXGBE_PSRTYPE_IPV6HDR;
    643 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
    644 	}
    645 
    646 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
    647 
    648 	ixgbe_initialize_rss_mapping(adapter);
    649 
    650 	if (adapter->num_queues > 1) {
    651 		/* RSS and RX IPP Checksum are mutually exclusive */
    652 		rxcsum |= IXGBE_RXCSUM_PCSD;
    653 	}
    654 
    655 	if (ifp->if_capenable & IFCAP_RXCSUM)
    656 		rxcsum |= IXGBE_RXCSUM_PCSD;
    657 
    658 	/* This is useful for calculating UDP/IP fragment checksums */
    659 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
    660 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
    661 
    662 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
    663 
    664 } /* ixgbe_initialize_receive_units */
    665 
    666 /************************************************************************
    667  * ixgbe_initialize_transmit_units - Enable transmit units.
    668  ************************************************************************/
    669 static void
    670 ixgbe_initialize_transmit_units(struct adapter *adapter)
    671 {
    672 	struct tx_ring	*txr = adapter->tx_rings;
    673 	struct ixgbe_hw	*hw = &adapter->hw;
    674 	int i;
    675 
    676 	/* Setup the Base and Length of the Tx Descriptor Ring */
    677 	for (i = 0; i < adapter->num_queues; i++, txr++) {
    678 		u64 tdba = txr->txdma.dma_paddr;
    679 		u32 txctrl = 0;
    680 		u32 tqsmreg, reg;
    681 		int regnum = i / 4;	/* 1 register per 4 queues */
    682 		int regshift = i % 4;	/* 4 bits per 1 queue */
    683 		int j = txr->me;
    684 
    685 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
    686 		    (tdba & 0x00000000ffffffffULL));
    687 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
    688 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
    689 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
    690 
    691 		/*
    692 		 * Set TQSMR (Transmit Queue Statistic Mapping) register.
    693 		 * Register location is different between 82598 and others.
    694 		 */
    695 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
    696 			tqsmreg = IXGBE_TQSMR(regnum);
    697 		else
    698 			tqsmreg = IXGBE_TQSM(regnum);
    699 		reg = IXGBE_READ_REG(hw, tqsmreg);
    700 		reg &= ~(0x000000ffUL << (regshift * 8));
    701 		reg |= i << (regshift * 8);
    702 		IXGBE_WRITE_REG(hw, tqsmreg, reg);
    703 
    704 		/* Setup the HW Tx Head and Tail descriptor pointers */
    705 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
    706 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
    707 
    708 		/* Cache the tail address */
    709 		txr->tail = IXGBE_TDT(j);
    710 
    711 		txr->txr_no_space = false;
    712 
    713 		/* Disable Head Writeback */
    714 		/*
    715 		 * Note: for X550 series devices, these registers are actually
    716 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
    717 		 * fields remain the same.
    718 		 */
    719 		switch (hw->mac.type) {
    720 		case ixgbe_mac_82598EB:
    721 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
    722 			break;
    723 		default:
    724 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
    725 			break;
    726 		}
    727 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
    728 		switch (hw->mac.type) {
    729 		case ixgbe_mac_82598EB:
    730 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
    731 			break;
    732 		default:
    733 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
    734 			break;
    735 		}
    736 
    737 	}
    738 
    739 	if (hw->mac.type != ixgbe_mac_82598EB) {
    740 		u32 dmatxctl, rttdcs;
    741 
    742 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
    743 		dmatxctl |= IXGBE_DMATXCTL_TE;
    744 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
    745 		/* Disable arbiter to set MTQC */
    746 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
    747 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
    748 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    749 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
    750 		    ixgbe_get_mtqc(adapter->iov_mode));
    751 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
    752 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    753 	}
    754 
    755 	return;
    756 } /* ixgbe_initialize_transmit_units */
    757 
    758 /************************************************************************
    759  * ixgbe_attach - Device initialization routine
    760  *
    761  *   Called when the driver is being loaded.
    762  *   Identifies the type of hardware, allocates all resources
    763  *   and initializes the hardware.
    764  *
    765  *   return 0 on success, positive on failure
    766  ************************************************************************/
    767 static void
    768 ixgbe_attach(device_t parent, device_t dev, void *aux)
    769 {
    770 	struct adapter	*adapter;
    771 	struct ixgbe_hw *hw;
    772 	int		error = -1;
    773 	u32		ctrl_ext;
    774 	u16		high, low, nvmreg;
    775 	pcireg_t	id, subid;
    776 	const ixgbe_vendor_info_t *ent;
    777 	struct pci_attach_args *pa = aux;
    778 	const char *str;
    779 	char buf[256];
    780 
    781 	INIT_DEBUGOUT("ixgbe_attach: begin");
    782 
    783 	/* Allocate, clear, and link in our adapter structure */
    784 	adapter = device_private(dev);
    785 	adapter->hw.back = adapter;
    786 	adapter->dev = dev;
    787 	hw = &adapter->hw;
    788 	adapter->osdep.pc = pa->pa_pc;
    789 	adapter->osdep.tag = pa->pa_tag;
    790 	if (pci_dma64_available(pa))
    791 		adapter->osdep.dmat = pa->pa_dmat64;
    792 	else
    793 		adapter->osdep.dmat = pa->pa_dmat;
    794 	adapter->osdep.attached = false;
    795 
    796 	ent = ixgbe_lookup(pa);
    797 
    798 	KASSERT(ent != NULL);
    799 
    800 	aprint_normal(": %s, Version - %s\n",
    801 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    802 
    803 	/* Core Lock Init*/
    804 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    805 
    806 	/* Set up the timer callout */
    807 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    808 
    809 	/* Determine hardware revision */
    810 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    811 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    812 
    813 	hw->vendor_id = PCI_VENDOR(id);
    814 	hw->device_id = PCI_PRODUCT(id);
    815 	hw->revision_id =
    816 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    817 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    818 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    819 
    820 	/*
    821 	 * Make sure BUSMASTER is set
    822 	 */
    823 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    824 
    825 	/* Do base PCI setup - map BAR0 */
    826 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    827 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    828 		error = ENXIO;
    829 		goto err_out;
    830 	}
    831 
    832 	/* let hardware know driver is loaded */
    833 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    834 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    835 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    836 
    837 	/*
    838 	 * Initialize the shared code
    839 	 */
    840 	if (ixgbe_init_shared_code(hw) != 0) {
    841 		aprint_error_dev(dev, "Unable to initialize the shared code\n");
    842 		error = ENXIO;
    843 		goto err_out;
    844 	}
    845 
    846 	switch (hw->mac.type) {
    847 	case ixgbe_mac_82598EB:
    848 		str = "82598EB";
    849 		break;
    850 	case ixgbe_mac_82599EB:
    851 		str = "82599EB";
    852 		break;
    853 	case ixgbe_mac_X540:
    854 		str = "X540";
    855 		break;
    856 	case ixgbe_mac_X550:
    857 		str = "X550";
    858 		break;
    859 	case ixgbe_mac_X550EM_x:
    860 		str = "X550EM";
    861 		break;
    862 	case ixgbe_mac_X550EM_a:
    863 		str = "X550EM A";
    864 		break;
    865 	default:
    866 		str = "Unknown";
    867 		break;
    868 	}
    869 	aprint_normal_dev(dev, "device %s\n", str);
    870 
    871 	if (hw->mbx.ops.init_params)
    872 		hw->mbx.ops.init_params(hw);
    873 
    874 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
    875 
    876 	/* Pick up the 82599 settings */
    877 	if (hw->mac.type != ixgbe_mac_82598EB) {
    878 		hw->phy.smart_speed = ixgbe_smart_speed;
    879 		adapter->num_segs = IXGBE_82599_SCATTER;
    880 	} else
    881 		adapter->num_segs = IXGBE_82598_SCATTER;
    882 
    883 	/* Ensure SW/FW semaphore is free */
    884 	ixgbe_init_swfw_semaphore(hw);
    885 
    886 	hw->mac.ops.set_lan_id(hw);
    887 	ixgbe_init_device_features(adapter);
    888 
    889 	if (ixgbe_configure_interrupts(adapter)) {
    890 		error = ENXIO;
    891 		goto err_out;
    892 	}
    893 
    894 	/* Allocate multicast array memory. */
    895 	adapter->mta = malloc(sizeof(*adapter->mta) *
    896 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    897 	if (adapter->mta == NULL) {
    898 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    899 		error = ENOMEM;
    900 		goto err_out;
    901 	}
    902 
    903 	/* Enable WoL (if supported) */
    904 	ixgbe_check_wol_support(adapter);
    905 
    906 	/* Verify adapter fan is still functional (if applicable) */
    907 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
    908 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    909 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
    910 	}
    911 
    912 	/* Set an initial default flow control value */
    913 	hw->fc.requested_mode = ixgbe_flow_control;
    914 
    915 	/* Sysctls for limiting the amount of work done in the taskqueues */
    916 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
    917 	    "max number of rx packets to process",
    918 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
    919 
    920 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
    921 	    "max number of tx packets to process",
    922 	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
    923 
    924 	/* Do descriptor calc and sanity checks */
    925 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    926 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    927 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    928 		adapter->num_tx_desc = DEFAULT_TXD;
    929 	} else
    930 		adapter->num_tx_desc = ixgbe_txd;
    931 
    932 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    933 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
    934 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    935 		adapter->num_rx_desc = DEFAULT_RXD;
    936 	} else
    937 		adapter->num_rx_desc = ixgbe_rxd;
    938 
    939 	/* Allocate our TX/RX Queues */
    940 	if (ixgbe_allocate_queues(adapter)) {
    941 		error = ENOMEM;
    942 		goto err_out;
    943 	}
    944 
    945 	hw->phy.reset_if_overtemp = TRUE;
    946 	error = ixgbe_reset_hw(hw);
    947 	hw->phy.reset_if_overtemp = FALSE;
    948 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    949 		/*
    950 		 * No optics in this port, set up
    951 		 * so the timer routine will probe
    952 		 * for later insertion.
    953 		 */
    954 		adapter->sfp_probe = TRUE;
    955 		error = IXGBE_SUCCESS;
    956 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    957 		aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
    958 		error = EIO;
    959 		goto err_late;
    960 	} else if (error) {
    961 		aprint_error_dev(dev, "Hardware initialization failed\n");
    962 		error = EIO;
    963 		goto err_late;
    964 	}
    965 
    966 	/* Make sure we have a good EEPROM before we read from it */
    967 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
    968 		aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
    969 		error = EIO;
    970 		goto err_late;
    971 	}
    972 
    973 	aprint_normal("%s:", device_xname(dev));
    974 	/* NVM Image Version */
    975 	high = low = 0;
    976 	switch (hw->mac.type) {
    977 	case ixgbe_mac_X540:
    978 	case ixgbe_mac_X550EM_a:
    979 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    980 		if (nvmreg == 0xffff)
    981 			break;
    982 		high = (nvmreg >> 12) & 0x0f;
    983 		low = (nvmreg >> 4) & 0xff;
    984 		id = nvmreg & 0x0f;
    985 		aprint_normal(" NVM Image Version %u.", high);
    986 		if (hw->mac.type == ixgbe_mac_X540)
    987 			str = "%x";
    988 		else
    989 			str = "%02x";
    990 		aprint_normal(str, low);
    991 		aprint_normal(" ID 0x%x,", id);
    992 		break;
    993 	case ixgbe_mac_X550EM_x:
    994 	case ixgbe_mac_X550:
    995 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    996 		if (nvmreg == 0xffff)
    997 			break;
    998 		high = (nvmreg >> 12) & 0x0f;
    999 		low = nvmreg & 0xff;
   1000 		aprint_normal(" NVM Image Version %u.%02x,", high, low);
   1001 		break;
   1002 	default:
   1003 		break;
   1004 	}
   1005 	hw->eeprom.nvm_image_ver_high = high;
   1006 	hw->eeprom.nvm_image_ver_low = low;
   1007 
   1008 	/* PHY firmware revision */
   1009 	switch (hw->mac.type) {
   1010 	case ixgbe_mac_X540:
   1011 	case ixgbe_mac_X550:
   1012 		hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
   1013 		if (nvmreg == 0xffff)
   1014 			break;
   1015 		high = (nvmreg >> 12) & 0x0f;
   1016 		low = (nvmreg >> 4) & 0xff;
   1017 		id = nvmreg & 0x000f;
   1018 		aprint_normal(" PHY FW Revision %u.", high);
   1019 		if (hw->mac.type == ixgbe_mac_X540)
   1020 			str = "%x";
   1021 		else
   1022 			str = "%02x";
   1023 		aprint_normal(str, low);
   1024 		aprint_normal(" ID 0x%x,", id);
   1025 		break;
   1026 	default:
   1027 		break;
   1028 	}
   1029 
   1030 	/* NVM Map version & OEM NVM Image version */
   1031 	switch (hw->mac.type) {
   1032 	case ixgbe_mac_X550:
   1033 	case ixgbe_mac_X550EM_x:
   1034 	case ixgbe_mac_X550EM_a:
   1035 		hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
   1036 		if (nvmreg != 0xffff) {
   1037 			high = (nvmreg >> 12) & 0x0f;
   1038 			low = nvmreg & 0x00ff;
   1039 			aprint_normal(" NVM Map version %u.%02x,", high, low);
   1040 		}
   1041 		hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
   1042 		if (nvmreg != 0xffff) {
   1043 			high = (nvmreg >> 12) & 0x0f;
   1044 			low = nvmreg & 0x00ff;
   1045 			aprint_verbose(" OEM NVM Image version %u.%02x,", high,
   1046 			    low);
   1047 		}
   1048 		break;
   1049 	default:
   1050 		break;
   1051 	}
   1052 
   1053 	/* Print the ETrackID */
   1054 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
   1055 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
   1056 	aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
   1057 
   1058 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   1059 		error = ixgbe_allocate_msix(adapter, pa);
   1060 		if (error) {
   1061 			/* Free allocated queue structures first */
   1062 			ixgbe_free_transmit_structures(adapter);
   1063 			ixgbe_free_receive_structures(adapter);
   1064 			free(adapter->queues, M_DEVBUF);
   1065 
   1066 			/* Fallback to legacy interrupt */
   1067 			adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
   1068 			if (adapter->feat_cap & IXGBE_FEATURE_MSI)
   1069 				adapter->feat_en |= IXGBE_FEATURE_MSI;
   1070 			adapter->num_queues = 1;
   1071 
   1072 			/* Allocate our TX/RX Queues again */
   1073 			if (ixgbe_allocate_queues(adapter)) {
   1074 				error = ENOMEM;
   1075 				goto err_out;
   1076 			}
   1077 		}
   1078 	}
   1079 	/* Recovery mode */
   1080 	switch (adapter->hw.mac.type) {
   1081 	case ixgbe_mac_X550:
   1082 	case ixgbe_mac_X550EM_x:
   1083 	case ixgbe_mac_X550EM_a:
   1084 		/* >= 2.00 */
   1085 		if (hw->eeprom.nvm_image_ver_high >= 2) {
   1086 			adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
   1087 			adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
   1088 		}
   1089 		break;
   1090 	default:
   1091 		break;
   1092 	}
   1093 
   1094 	if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
   1095 		error = ixgbe_allocate_legacy(adapter, pa);
   1096 	if (error)
   1097 		goto err_late;
   1098 
   1099 	/* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
   1100 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   1101 	    ixgbe_handle_link, adapter);
   1102 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1103 	    ixgbe_handle_mod, adapter);
   1104 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1105 	    ixgbe_handle_msf, adapter);
   1106 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1107 	    ixgbe_handle_phy, adapter);
   1108 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   1109 		adapter->fdir_si =
   1110 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1111 			ixgbe_reinit_fdir, adapter);
   1112 	if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
   1113 	    || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
   1114 	    || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
   1115 		&& (adapter->fdir_si == NULL))) {
   1116 		aprint_error_dev(dev,
   1117 		    "could not establish software interrupts ()\n");
   1118 		goto err_out;
   1119 	}
   1120 
   1121 	error = ixgbe_start_hw(hw);
   1122 	switch (error) {
   1123 	case IXGBE_ERR_EEPROM_VERSION:
   1124 		aprint_error_dev(dev, "This device is a pre-production adapter/"
   1125 		    "LOM.  Please be aware there may be issues associated "
   1126 		    "with your hardware.\nIf you are experiencing problems "
   1127 		    "please contact your Intel or hardware representative "
   1128 		    "who provided you with this hardware.\n");
   1129 		break;
   1130 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
   1131 		aprint_error_dev(dev, "Unsupported SFP+ Module\n");
   1132 		error = EIO;
   1133 		goto err_late;
   1134 	case IXGBE_ERR_SFP_NOT_PRESENT:
   1135 		aprint_error_dev(dev, "No SFP+ Module found\n");
   1136 		/* falls thru */
   1137 	default:
   1138 		break;
   1139 	}
   1140 
   1141 	/* Setup OS specific network interface */
   1142 	if (ixgbe_setup_interface(dev, adapter) != 0)
   1143 		goto err_late;
   1144 
   1145 	/*
   1146 	 *  Print PHY ID only for copper PHY. On device which has SFP(+) cage
   1147 	 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
   1148 	 */
   1149 	if (hw->phy.media_type == ixgbe_media_type_copper) {
   1150 		uint16_t id1, id2;
   1151 		int oui, model, rev;
   1152 		const char *descr;
   1153 
   1154 		id1 = hw->phy.id >> 16;
   1155 		id2 = hw->phy.id & 0xffff;
   1156 		oui = MII_OUI(id1, id2);
   1157 		model = MII_MODEL(id2);
   1158 		rev = MII_REV(id2);
   1159 		if ((descr = mii_get_descr(oui, model)) != NULL)
   1160 			aprint_normal_dev(dev,
   1161 			    "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
   1162 			    descr, oui, model, rev);
   1163 		else
   1164 			aprint_normal_dev(dev,
   1165 			    "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
   1166 			    oui, model, rev);
   1167 	}
   1168 
   1169 	/* Enable the optics for 82599 SFP+ fiber */
   1170 	ixgbe_enable_tx_laser(hw);
   1171 
   1172 	/* Enable EEE power saving */
   1173 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   1174 		hw->mac.ops.setup_eee(hw,
   1175 		    adapter->feat_en & IXGBE_FEATURE_EEE);
   1176 
   1177 	/* Enable power to the phy. */
   1178 	ixgbe_set_phy_power(hw, TRUE);
   1179 
   1180 	/* Initialize statistics */
   1181 	ixgbe_update_stats_counters(adapter);
   1182 
   1183 	/* Check PCIE slot type/speed/width */
   1184 	ixgbe_get_slot_info(adapter);
   1185 
   1186 	/*
   1187 	 * Do time init and sysctl init here, but
   1188 	 * only on the first port of a bypass adapter.
   1189 	 */
   1190 	ixgbe_bypass_init(adapter);
   1191 
   1192 	/* Set an initial dmac value */
   1193 	adapter->dmac = 0;
   1194 	/* Set initial advertised speeds (if applicable) */
   1195 	adapter->advertise = ixgbe_get_advertise(adapter);
   1196 
   1197 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   1198 		ixgbe_define_iov_schemas(dev, &error);
   1199 
   1200 	/* Add sysctls */
   1201 	ixgbe_add_device_sysctls(adapter);
   1202 	ixgbe_add_hw_stats(adapter);
   1203 
   1204 	/* For Netmap */
   1205 	adapter->init_locked = ixgbe_init_locked;
   1206 	adapter->stop_locked = ixgbe_stop;
   1207 
   1208 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1209 		ixgbe_netmap_attach(adapter);
   1210 
   1211 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
   1212 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
   1213 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
   1214 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
   1215 
   1216 	if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
   1217 		pmf_class_network_register(dev, adapter->ifp);
   1218 	else
   1219 		aprint_error_dev(dev, "couldn't establish power handler\n");
   1220 
   1221 	/* Init recovery mode timer and state variable */
   1222 	if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
   1223 		adapter->recovery_mode = 0;
   1224 
   1225 		/* Set up the timer callout */
   1226 		callout_init(&adapter->recovery_mode_timer,
   1227 		    IXGBE_CALLOUT_FLAGS);
   1228 
   1229 		/* Start the task */
   1230 		callout_reset(&adapter->recovery_mode_timer, hz,
   1231 		    ixgbe_recovery_mode_timer, adapter);
   1232 	}
   1233 
   1234 	INIT_DEBUGOUT("ixgbe_attach: end");
   1235 	adapter->osdep.attached = true;
   1236 
   1237 	return;
   1238 
   1239 err_late:
   1240 	ixgbe_free_transmit_structures(adapter);
   1241 	ixgbe_free_receive_structures(adapter);
   1242 	free(adapter->queues, M_DEVBUF);
   1243 err_out:
   1244 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   1245 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   1246 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   1247 	ixgbe_free_softint(adapter);
   1248 	ixgbe_free_pci_resources(adapter);
   1249 	if (adapter->mta != NULL)
   1250 		free(adapter->mta, M_DEVBUF);
   1251 	IXGBE_CORE_LOCK_DESTROY(adapter);
   1252 
   1253 	return;
   1254 } /* ixgbe_attach */
   1255 
   1256 /************************************************************************
   1257  * ixgbe_check_wol_support
   1258  *
   1259  *   Checks whether the adapter's ports are capable of
   1260  *   Wake On LAN by reading the adapter's NVM.
   1261  *
   1262  *   Sets each port's hw->wol_enabled value depending
   1263  *   on the value read here.
   1264  ************************************************************************/
   1265 static void
   1266 ixgbe_check_wol_support(struct adapter *adapter)
   1267 {
   1268 	struct ixgbe_hw *hw = &adapter->hw;
   1269 	u16		dev_caps = 0;
   1270 
   1271 	/* Find out WoL support for port */
   1272 	adapter->wol_support = hw->wol_enabled = 0;
   1273 	ixgbe_get_device_caps(hw, &dev_caps);
   1274 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
   1275 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
   1276 	     hw->bus.func == 0))
   1277 		adapter->wol_support = hw->wol_enabled = 1;
   1278 
   1279 	/* Save initial wake up filter configuration */
   1280 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
   1281 
   1282 	return;
   1283 } /* ixgbe_check_wol_support */
   1284 
   1285 /************************************************************************
   1286  * ixgbe_setup_interface
   1287  *
   1288  *   Setup networking device structure and register an interface.
   1289  ************************************************************************/
   1290 static int
   1291 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   1292 {
   1293 	struct ethercom *ec = &adapter->osdep.ec;
   1294 	struct ifnet   *ifp;
   1295 	int rv;
   1296 
   1297 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   1298 
   1299 	ifp = adapter->ifp = &ec->ec_if;
   1300 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1301 	ifp->if_baudrate = IF_Gbps(10);
   1302 	ifp->if_init = ixgbe_init;
   1303 	ifp->if_stop = ixgbe_ifstop;
   1304 	ifp->if_softc = adapter;
   1305 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1306 #ifdef IXGBE_MPSAFE
   1307 	ifp->if_extflags = IFEF_MPSAFE;
   1308 #endif
   1309 	ifp->if_ioctl = ixgbe_ioctl;
   1310 #if __FreeBSD_version >= 1100045
   1311 	/* TSO parameters */
   1312 	ifp->if_hw_tsomax = 65518;
   1313 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
   1314 	ifp->if_hw_tsomaxsegsize = 2048;
   1315 #endif
   1316 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1317 #if 0
   1318 		ixgbe_start_locked = ixgbe_legacy_start_locked;
   1319 #endif
   1320 	} else {
   1321 		ifp->if_transmit = ixgbe_mq_start;
   1322 #if 0
   1323 		ixgbe_start_locked = ixgbe_mq_start_locked;
   1324 #endif
   1325 	}
   1326 	ifp->if_start = ixgbe_legacy_start;
   1327 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1328 	IFQ_SET_READY(&ifp->if_snd);
   1329 
   1330 	rv = if_initialize(ifp);
   1331 	if (rv != 0) {
   1332 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1333 		return rv;
   1334 	}
   1335 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1336 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1337 	/*
   1338 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1339 	 * used.
   1340 	 */
   1341 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   1342 
   1343 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1344 
   1345 	/*
   1346 	 * Tell the upper layer(s) we support long frames.
   1347 	 */
   1348 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1349 
   1350 	/* Set capability flags */
   1351 	ifp->if_capabilities |= IFCAP_RXCSUM
   1352 			     |	IFCAP_TXCSUM
   1353 			     |	IFCAP_TSOv4
   1354 			     |	IFCAP_TSOv6;
   1355 	ifp->if_capenable = 0;
   1356 
   1357 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1358 			    |  ETHERCAP_VLAN_HWCSUM
   1359 			    |  ETHERCAP_JUMBO_MTU
   1360 			    |  ETHERCAP_VLAN_MTU;
   1361 
   1362 	/* Enable the above capabilities by default */
   1363 	ec->ec_capenable = ec->ec_capabilities;
   1364 
   1365 	/*
   1366 	 * Don't turn this on by default, if vlans are
   1367 	 * created on another pseudo device (eg. lagg)
   1368 	 * then vlan events are not passed thru, breaking
   1369 	 * operation, but with HW FILTER off it works. If
   1370 	 * using vlans directly on the ixgbe driver you can
   1371 	 * enable this and get full hardware tag filtering.
   1372 	 */
   1373 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1374 
   1375 	/*
   1376 	 * Specify the media types supported by this adapter and register
   1377 	 * callbacks to update media and link information
   1378 	 */
   1379 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   1380 	    ixgbe_media_status);
   1381 
   1382 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
   1383 	ixgbe_add_media_types(adapter);
   1384 
   1385 	/* Set autoselect media by default */
   1386 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1387 
   1388 	if_register(ifp);
   1389 
   1390 	return (0);
   1391 } /* ixgbe_setup_interface */
   1392 
   1393 /************************************************************************
   1394  * ixgbe_add_media_types
   1395  ************************************************************************/
   1396 static void
   1397 ixgbe_add_media_types(struct adapter *adapter)
   1398 {
   1399 	struct ixgbe_hw *hw = &adapter->hw;
   1400 	device_t	dev = adapter->dev;
   1401 	u64		layer;
   1402 
   1403 	layer = adapter->phy_layer;
   1404 
   1405 #define	ADD(mm, dd)							\
   1406 	ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
   1407 
   1408 	ADD(IFM_NONE, 0);
   1409 
   1410 	/* Media types with matching NetBSD media defines */
   1411 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
   1412 		ADD(IFM_10G_T | IFM_FDX, 0);
   1413 	}
   1414 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
   1415 		ADD(IFM_1000_T | IFM_FDX, 0);
   1416 	}
   1417 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
   1418 		ADD(IFM_100_TX | IFM_FDX, 0);
   1419 	}
   1420 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
   1421 		ADD(IFM_10_T | IFM_FDX, 0);
   1422 	}
   1423 
   1424 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   1425 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
   1426 		ADD(IFM_10G_TWINAX | IFM_FDX, 0);
   1427 	}
   1428 
   1429 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
   1430 		ADD(IFM_10G_LR | IFM_FDX, 0);
   1431 		if (hw->phy.multispeed_fiber) {
   1432 			ADD(IFM_1000_LX | IFM_FDX, 0);
   1433 		}
   1434 	}
   1435 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
   1436 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1437 		if (hw->phy.multispeed_fiber) {
   1438 			ADD(IFM_1000_SX | IFM_FDX, 0);
   1439 		}
   1440 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
   1441 		ADD(IFM_1000_SX | IFM_FDX, 0);
   1442 	}
   1443 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
   1444 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1445 	}
   1446 
   1447 #ifdef IFM_ETH_XTYPE
   1448 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1449 		ADD(IFM_10G_KR | IFM_FDX, 0);
   1450 	}
   1451 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1452 		ADD(AIFM_10G_KX4 | IFM_FDX, 0);
   1453 	}
   1454 #else
   1455 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1456 		device_printf(dev, "Media supported: 10GbaseKR\n");
   1457 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
   1458 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1459 	}
   1460 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1461 		device_printf(dev, "Media supported: 10GbaseKX4\n");
   1462 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
   1463 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1464 	}
   1465 #endif
   1466 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
   1467 		ADD(IFM_1000_KX | IFM_FDX, 0);
   1468 	}
   1469 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
   1470 		ADD(IFM_2500_KX | IFM_FDX, 0);
   1471 	}
   1472 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
   1473 		ADD(IFM_2500_T | IFM_FDX, 0);
   1474 	}
   1475 	if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
   1476 		ADD(IFM_5000_T | IFM_FDX, 0);
   1477 	}
   1478 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
   1479 		device_printf(dev, "Media supported: 1000baseBX\n");
   1480 	/* XXX no ifmedia_set? */
   1481 
   1482 	ADD(IFM_AUTO, 0);
   1483 
   1484 #undef ADD
   1485 } /* ixgbe_add_media_types */
   1486 
   1487 /************************************************************************
   1488  * ixgbe_is_sfp
   1489  ************************************************************************/
   1490 static inline bool
   1491 ixgbe_is_sfp(struct ixgbe_hw *hw)
   1492 {
   1493 	switch (hw->mac.type) {
   1494 	case ixgbe_mac_82598EB:
   1495 		if (hw->phy.type == ixgbe_phy_nl)
   1496 			return (TRUE);
   1497 		return (FALSE);
   1498 	case ixgbe_mac_82599EB:
   1499 		switch (hw->mac.ops.get_media_type(hw)) {
   1500 		case ixgbe_media_type_fiber:
   1501 		case ixgbe_media_type_fiber_qsfp:
   1502 			return (TRUE);
   1503 		default:
   1504 			return (FALSE);
   1505 		}
   1506 	case ixgbe_mac_X550EM_x:
   1507 	case ixgbe_mac_X550EM_a:
   1508 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
   1509 			return (TRUE);
   1510 		return (FALSE);
   1511 	default:
   1512 		return (FALSE);
   1513 	}
   1514 } /* ixgbe_is_sfp */
   1515 
   1516 /************************************************************************
   1517  * ixgbe_config_link
   1518  ************************************************************************/
   1519 static void
   1520 ixgbe_config_link(struct adapter *adapter)
   1521 {
   1522 	struct ixgbe_hw *hw = &adapter->hw;
   1523 	u32		autoneg, err = 0;
   1524 	bool		sfp, negotiate = false;
   1525 
   1526 	sfp = ixgbe_is_sfp(hw);
   1527 
   1528 	if (sfp) {
   1529 		if (hw->phy.multispeed_fiber) {
   1530 			ixgbe_enable_tx_laser(hw);
   1531 			kpreempt_disable();
   1532 			softint_schedule(adapter->msf_si);
   1533 			kpreempt_enable();
   1534 		}
   1535 		kpreempt_disable();
   1536 		softint_schedule(adapter->mod_si);
   1537 		kpreempt_enable();
   1538 	} else {
   1539 		struct ifmedia	*ifm = &adapter->media;
   1540 
   1541 		if (hw->mac.ops.check_link)
   1542 			err = ixgbe_check_link(hw, &adapter->link_speed,
   1543 			    &adapter->link_up, FALSE);
   1544 		if (err)
   1545 			return;
   1546 
   1547 		/*
   1548 		 * Check if it's the first call. If it's the first call,
   1549 		 * get value for auto negotiation.
   1550 		 */
   1551 		autoneg = hw->phy.autoneg_advertised;
   1552 		if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
   1553 		    && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
   1554 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
   1555 			    &negotiate);
   1556 		if (err)
   1557 			return;
   1558 		if (hw->mac.ops.setup_link)
   1559 			err = hw->mac.ops.setup_link(hw, autoneg,
   1560 			    adapter->link_up);
   1561 	}
   1562 
   1563 } /* ixgbe_config_link */
   1564 
   1565 /************************************************************************
   1566  * ixgbe_update_stats_counters - Update board statistics counters.
   1567  ************************************************************************/
   1568 static void
   1569 ixgbe_update_stats_counters(struct adapter *adapter)
   1570 {
   1571 	struct ifnet	      *ifp = adapter->ifp;
   1572 	struct ixgbe_hw	      *hw = &adapter->hw;
   1573 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1574 	u32		      missed_rx = 0, bprc, lxon, lxoff, total;
   1575 	u64		      total_missed_rx = 0;
   1576 	uint64_t	      crcerrs, rlec;
   1577 	unsigned int	      queue_counters;
   1578 	int		      i;
   1579 
   1580 	crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   1581 	stats->crcerrs.ev_count += crcerrs;
   1582 	stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   1583 	stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   1584 	stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   1585 	if (hw->mac.type == ixgbe_mac_X550)
   1586 		stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
   1587 
   1588 	/* 16 registers exist */
   1589 	queue_counters = min(__arraycount(stats->qprc), adapter->num_queues);
   1590 	for (i = 0; i < queue_counters; i++) {
   1591 		stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   1592 		stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   1593 		if (hw->mac.type >= ixgbe_mac_82599EB) {
   1594 			stats->qprdc[i].ev_count
   1595 			    += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   1596 		}
   1597 	}
   1598 
   1599 	/* 8 registers exist */
   1600 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   1601 		uint32_t mp;
   1602 
   1603 		/* MPC */
   1604 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   1605 		/* global total per queue */
   1606 		stats->mpc[i].ev_count += mp;
   1607 		/* running comprehensive total for stats display */
   1608 		total_missed_rx += mp;
   1609 
   1610 		if (hw->mac.type == ixgbe_mac_82598EB)
   1611 			stats->rnbc[i].ev_count
   1612 			    += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   1613 
   1614 		stats->pxontxc[i].ev_count
   1615 		    += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
   1616 		stats->pxofftxc[i].ev_count
   1617 		    += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
   1618 		if (hw->mac.type >= ixgbe_mac_82599EB) {
   1619 			stats->pxonrxc[i].ev_count
   1620 			    += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
   1621 			stats->pxoffrxc[i].ev_count
   1622 			    += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
   1623 			stats->pxon2offc[i].ev_count
   1624 			    += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
   1625 		} else {
   1626 			stats->pxonrxc[i].ev_count
   1627 			    += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
   1628 			stats->pxoffrxc[i].ev_count
   1629 			    += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
   1630 		}
   1631 	}
   1632 	stats->mpctotal.ev_count += total_missed_rx;
   1633 
   1634 	/* Document says M[LR]FC are valid when link is up and 10Gbps */
   1635 	if ((adapter->link_active == LINK_STATE_UP)
   1636 	    && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
   1637 		stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   1638 		stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   1639 	}
   1640 	rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
   1641 	stats->rlec.ev_count += rlec;
   1642 
   1643 	/* Hardware workaround, gprc counts missed packets */
   1644 	stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   1645 
   1646 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   1647 	stats->lxontxc.ev_count += lxon;
   1648 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   1649 	stats->lxofftxc.ev_count += lxoff;
   1650 	total = lxon + lxoff;
   1651 
   1652 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1653 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   1654 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   1655 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   1656 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   1657 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   1658 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   1659 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   1660 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   1661 	} else {
   1662 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   1663 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   1664 		/* 82598 only has a counter in the high register */
   1665 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   1666 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   1667 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   1668 	}
   1669 
   1670 	/*
   1671 	 * Workaround: mprc hardware is incorrectly counting
   1672 	 * broadcasts, so for now we subtract those.
   1673 	 */
   1674 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   1675 	stats->bprc.ev_count += bprc;
   1676 	stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
   1677 	    - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   1678 
   1679 	stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   1680 	stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   1681 	stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   1682 	stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   1683 	stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   1684 	stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   1685 
   1686 	stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   1687 	stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   1688 	stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   1689 
   1690 	stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   1691 	stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   1692 	stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   1693 	stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   1694 	stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   1695 	stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   1696 	stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   1697 	stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   1698 	stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   1699 	stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   1700 	stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   1701 	stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   1702 	stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   1703 	stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   1704 	stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   1705 	stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   1706 	stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   1707 	stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   1708 	/* Only read FCOE on 82599 */
   1709 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1710 		stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   1711 		stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   1712 		stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   1713 		stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   1714 		stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   1715 	}
   1716 
   1717 	/* Fill out the OS statistics structure */
   1718 	/*
   1719 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   1720 	 * adapter->stats counters. It's required to make ifconfig -z
   1721 	 * (SOICZIFDATA) work.
   1722 	 */
   1723 	ifp->if_collisions = 0;
   1724 
   1725 	/* Rx Errors */
   1726 	ifp->if_iqdrops += total_missed_rx;
   1727 	ifp->if_ierrors += crcerrs + rlec;
   1728 } /* ixgbe_update_stats_counters */
   1729 
   1730 /************************************************************************
   1731  * ixgbe_add_hw_stats
   1732  *
   1733  *   Add sysctl variables, one per statistic, to the system.
   1734  ************************************************************************/
   1735 static void
   1736 ixgbe_add_hw_stats(struct adapter *adapter)
   1737 {
   1738 	device_t dev = adapter->dev;
   1739 	const struct sysctlnode *rnode, *cnode;
   1740 	struct sysctllog **log = &adapter->sysctllog;
   1741 	struct tx_ring *txr = adapter->tx_rings;
   1742 	struct rx_ring *rxr = adapter->rx_rings;
   1743 	struct ixgbe_hw *hw = &adapter->hw;
   1744 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1745 	const char *xname = device_xname(dev);
   1746 	int i;
   1747 
   1748 	/* Driver Statistics */
   1749 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   1750 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   1751 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   1752 	    NULL, xname, "m_defrag() failed");
   1753 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   1754 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   1755 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   1756 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   1757 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   1758 	    NULL, xname, "Driver tx dma hard fail other");
   1759 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   1760 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   1761 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   1762 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   1763 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   1764 	    NULL, xname, "Watchdog timeouts");
   1765 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   1766 	    NULL, xname, "TSO errors");
   1767 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   1768 	    NULL, xname, "Link MSI-X IRQ Handled");
   1769 	evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
   1770 	    NULL, xname, "Link softint");
   1771 	evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
   1772 	    NULL, xname, "module softint");
   1773 	evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
   1774 	    NULL, xname, "multimode softint");
   1775 	evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
   1776 	    NULL, xname, "external PHY softint");
   1777 
   1778 	/* Max number of traffic class is 8 */
   1779 	KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
   1780 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   1781 		snprintf(adapter->tcs[i].evnamebuf,
   1782 		    sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
   1783 		    xname, i);
   1784 		if (i < __arraycount(stats->mpc)) {
   1785 			evcnt_attach_dynamic(&stats->mpc[i],
   1786 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1787 			    "RX Missed Packet Count");
   1788 			if (hw->mac.type == ixgbe_mac_82598EB)
   1789 				evcnt_attach_dynamic(&stats->rnbc[i],
   1790 				    EVCNT_TYPE_MISC, NULL,
   1791 				    adapter->tcs[i].evnamebuf,
   1792 				    "Receive No Buffers");
   1793 		}
   1794 		if (i < __arraycount(stats->pxontxc)) {
   1795 			evcnt_attach_dynamic(&stats->pxontxc[i],
   1796 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1797 			    "pxontxc");
   1798 			evcnt_attach_dynamic(&stats->pxonrxc[i],
   1799 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1800 			    "pxonrxc");
   1801 			evcnt_attach_dynamic(&stats->pxofftxc[i],
   1802 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1803 			    "pxofftxc");
   1804 			evcnt_attach_dynamic(&stats->pxoffrxc[i],
   1805 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1806 			    "pxoffrxc");
   1807 			if (hw->mac.type >= ixgbe_mac_82599EB)
   1808 				evcnt_attach_dynamic(&stats->pxon2offc[i],
   1809 				    EVCNT_TYPE_MISC, NULL,
   1810 				    adapter->tcs[i].evnamebuf,
   1811 			    "pxon2offc");
   1812 		}
   1813 	}
   1814 
   1815 	for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1816 #ifdef LRO
   1817 		struct lro_ctrl *lro = &rxr->lro;
   1818 #endif /* LRO */
   1819 
   1820 		snprintf(adapter->queues[i].evnamebuf,
   1821 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   1822 		    xname, i);
   1823 		snprintf(adapter->queues[i].namebuf,
   1824 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   1825 
   1826 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   1827 			aprint_error_dev(dev, "could not create sysctl root\n");
   1828 			break;
   1829 		}
   1830 
   1831 		if (sysctl_createv(log, 0, &rnode, &rnode,
   1832 		    0, CTLTYPE_NODE,
   1833 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   1834 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   1835 			break;
   1836 
   1837 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1838 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   1839 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   1840 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   1841 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   1842 			break;
   1843 
   1844 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1845 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1846 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   1847 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   1848 		    0, CTL_CREATE, CTL_EOL) != 0)
   1849 			break;
   1850 
   1851 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1852 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1853 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   1854 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   1855 		    0, CTL_CREATE, CTL_EOL) != 0)
   1856 			break;
   1857 
   1858 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   1859 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   1860 		evcnt_attach_dynamic(&adapter->queues[i].handleq,
   1861 		    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1862 		    "Handled queue in softint");
   1863 		evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
   1864 		    NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
   1865 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   1866 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   1867 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   1868 		    NULL, adapter->queues[i].evnamebuf,
   1869 		    "Queue No Descriptor Available");
   1870 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   1871 		    NULL, adapter->queues[i].evnamebuf,
   1872 		    "Queue Packets Transmitted");
   1873 #ifndef IXGBE_LEGACY_TX
   1874 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   1875 		    NULL, adapter->queues[i].evnamebuf,
   1876 		    "Packets dropped in pcq");
   1877 #endif
   1878 
   1879 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1880 		    CTLFLAG_READONLY,
   1881 		    CTLTYPE_INT,
   1882 		    "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
   1883 			ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
   1884 		    CTL_CREATE, CTL_EOL) != 0)
   1885 			break;
   1886 
   1887 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1888 		    CTLFLAG_READONLY,
   1889 		    CTLTYPE_INT,
   1890 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   1891 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   1892 		    CTL_CREATE, CTL_EOL) != 0)
   1893 			break;
   1894 
   1895 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1896 		    CTLFLAG_READONLY,
   1897 		    CTLTYPE_INT,
   1898 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   1899 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   1900 		    CTL_CREATE, CTL_EOL) != 0)
   1901 			break;
   1902 
   1903 		if (i < __arraycount(stats->qprc)) {
   1904 			evcnt_attach_dynamic(&stats->qprc[i],
   1905 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1906 			    "qprc");
   1907 			evcnt_attach_dynamic(&stats->qptc[i],
   1908 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1909 			    "qptc");
   1910 			evcnt_attach_dynamic(&stats->qbrc[i],
   1911 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1912 			    "qbrc");
   1913 			evcnt_attach_dynamic(&stats->qbtc[i],
   1914 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1915 			    "qbtc");
   1916 			if (hw->mac.type >= ixgbe_mac_82599EB)
   1917 				evcnt_attach_dynamic(&stats->qprdc[i],
   1918 				    EVCNT_TYPE_MISC, NULL,
   1919 				    adapter->queues[i].evnamebuf, "qprdc");
   1920 		}
   1921 
   1922 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   1923 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   1924 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   1925 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   1926 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   1927 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   1928 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   1929 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   1930 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   1931 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   1932 #ifdef LRO
   1933 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   1934 				CTLFLAG_RD, &lro->lro_queued, 0,
   1935 				"LRO Queued");
   1936 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   1937 				CTLFLAG_RD, &lro->lro_flushed, 0,
   1938 				"LRO Flushed");
   1939 #endif /* LRO */
   1940 	}
   1941 
   1942 	/* MAC stats get their own sub node */
   1943 
   1944 	snprintf(stats->namebuf,
   1945 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   1946 
   1947 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   1948 	    stats->namebuf, "rx csum offload - IP");
   1949 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   1950 	    stats->namebuf, "rx csum offload - L4");
   1951 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   1952 	    stats->namebuf, "rx csum offload - IP bad");
   1953 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   1954 	    stats->namebuf, "rx csum offload - L4 bad");
   1955 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   1956 	    stats->namebuf, "Interrupt conditions zero");
   1957 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   1958 	    stats->namebuf, "Legacy interrupts");
   1959 
   1960 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   1961 	    stats->namebuf, "CRC Errors");
   1962 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   1963 	    stats->namebuf, "Illegal Byte Errors");
   1964 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   1965 	    stats->namebuf, "Byte Errors");
   1966 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   1967 	    stats->namebuf, "MAC Short Packets Discarded");
   1968 	if (hw->mac.type >= ixgbe_mac_X550)
   1969 		evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
   1970 		    stats->namebuf, "Bad SFD");
   1971 	evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
   1972 	    stats->namebuf, "Total Packets Missed");
   1973 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   1974 	    stats->namebuf, "MAC Local Faults");
   1975 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   1976 	    stats->namebuf, "MAC Remote Faults");
   1977 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   1978 	    stats->namebuf, "Receive Length Errors");
   1979 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   1980 	    stats->namebuf, "Link XON Transmitted");
   1981 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   1982 	    stats->namebuf, "Link XON Received");
   1983 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   1984 	    stats->namebuf, "Link XOFF Transmitted");
   1985 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   1986 	    stats->namebuf, "Link XOFF Received");
   1987 
   1988 	/* Packet Reception Stats */
   1989 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   1990 	    stats->namebuf, "Total Octets Received");
   1991 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   1992 	    stats->namebuf, "Good Octets Received");
   1993 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   1994 	    stats->namebuf, "Total Packets Received");
   1995 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   1996 	    stats->namebuf, "Good Packets Received");
   1997 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   1998 	    stats->namebuf, "Multicast Packets Received");
   1999 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   2000 	    stats->namebuf, "Broadcast Packets Received");
   2001 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   2002 	    stats->namebuf, "64 byte frames received ");
   2003 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   2004 	    stats->namebuf, "65-127 byte frames received");
   2005 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   2006 	    stats->namebuf, "128-255 byte frames received");
   2007 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   2008 	    stats->namebuf, "256-511 byte frames received");
   2009 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   2010 	    stats->namebuf, "512-1023 byte frames received");
   2011 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   2012 	    stats->namebuf, "1023-1522 byte frames received");
   2013 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   2014 	    stats->namebuf, "Receive Undersized");
   2015 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   2016 	    stats->namebuf, "Fragmented Packets Received ");
   2017 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   2018 	    stats->namebuf, "Oversized Packets Received");
   2019 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   2020 	    stats->namebuf, "Received Jabber");
   2021 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   2022 	    stats->namebuf, "Management Packets Received");
   2023 	evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
   2024 	    stats->namebuf, "Management Packets Dropped");
   2025 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   2026 	    stats->namebuf, "Checksum Errors");
   2027 
   2028 	/* Packet Transmission Stats */
   2029 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   2030 	    stats->namebuf, "Good Octets Transmitted");
   2031 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   2032 	    stats->namebuf, "Total Packets Transmitted");
   2033 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   2034 	    stats->namebuf, "Good Packets Transmitted");
   2035 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   2036 	    stats->namebuf, "Broadcast Packets Transmitted");
   2037 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   2038 	    stats->namebuf, "Multicast Packets Transmitted");
   2039 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   2040 	    stats->namebuf, "Management Packets Transmitted");
   2041 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   2042 	    stats->namebuf, "64 byte frames transmitted ");
   2043 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   2044 	    stats->namebuf, "65-127 byte frames transmitted");
   2045 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   2046 	    stats->namebuf, "128-255 byte frames transmitted");
   2047 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   2048 	    stats->namebuf, "256-511 byte frames transmitted");
   2049 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   2050 	    stats->namebuf, "512-1023 byte frames transmitted");
   2051 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   2052 	    stats->namebuf, "1024-1522 byte frames transmitted");
   2053 } /* ixgbe_add_hw_stats */
   2054 
   2055 static void
   2056 ixgbe_clear_evcnt(struct adapter *adapter)
   2057 {
   2058 	struct tx_ring *txr = adapter->tx_rings;
   2059 	struct rx_ring *rxr = adapter->rx_rings;
   2060 	struct ixgbe_hw *hw = &adapter->hw;
   2061 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   2062 	int i;
   2063 
   2064 	adapter->efbig_tx_dma_setup.ev_count = 0;
   2065 	adapter->mbuf_defrag_failed.ev_count = 0;
   2066 	adapter->efbig2_tx_dma_setup.ev_count = 0;
   2067 	adapter->einval_tx_dma_setup.ev_count = 0;
   2068 	adapter->other_tx_dma_setup.ev_count = 0;
   2069 	adapter->eagain_tx_dma_setup.ev_count = 0;
   2070 	adapter->enomem_tx_dma_setup.ev_count = 0;
   2071 	adapter->tso_err.ev_count = 0;
   2072 	adapter->watchdog_events.ev_count = 0;
   2073 	adapter->link_irq.ev_count = 0;
   2074 	adapter->link_sicount.ev_count = 0;
   2075 	adapter->mod_sicount.ev_count = 0;
   2076 	adapter->msf_sicount.ev_count = 0;
   2077 	adapter->phy_sicount.ev_count = 0;
   2078 
   2079 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   2080 		if (i < __arraycount(stats->mpc)) {
   2081 			stats->mpc[i].ev_count = 0;
   2082 			if (hw->mac.type == ixgbe_mac_82598EB)
   2083 				stats->rnbc[i].ev_count = 0;
   2084 		}
   2085 		if (i < __arraycount(stats->pxontxc)) {
   2086 			stats->pxontxc[i].ev_count = 0;
   2087 			stats->pxonrxc[i].ev_count = 0;
   2088 			stats->pxofftxc[i].ev_count = 0;
   2089 			stats->pxoffrxc[i].ev_count = 0;
   2090 			if (hw->mac.type >= ixgbe_mac_82599EB)
   2091 				stats->pxon2offc[i].ev_count = 0;
   2092 		}
   2093 	}
   2094 
   2095 	txr = adapter->tx_rings;
   2096 	for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2097 		adapter->queues[i].irqs.ev_count = 0;
   2098 		adapter->queues[i].handleq.ev_count = 0;
   2099 		adapter->queues[i].req.ev_count = 0;
   2100 		txr->no_desc_avail.ev_count = 0;
   2101 		txr->total_packets.ev_count = 0;
   2102 		txr->tso_tx.ev_count = 0;
   2103 #ifndef IXGBE_LEGACY_TX
   2104 		txr->pcq_drops.ev_count = 0;
   2105 #endif
   2106 		txr->q_efbig_tx_dma_setup = 0;
   2107 		txr->q_mbuf_defrag_failed = 0;
   2108 		txr->q_efbig2_tx_dma_setup = 0;
   2109 		txr->q_einval_tx_dma_setup = 0;
   2110 		txr->q_other_tx_dma_setup = 0;
   2111 		txr->q_eagain_tx_dma_setup = 0;
   2112 		txr->q_enomem_tx_dma_setup = 0;
   2113 		txr->q_tso_err = 0;
   2114 
   2115 		if (i < __arraycount(stats->qprc)) {
   2116 			stats->qprc[i].ev_count = 0;
   2117 			stats->qptc[i].ev_count = 0;
   2118 			stats->qbrc[i].ev_count = 0;
   2119 			stats->qbtc[i].ev_count = 0;
   2120 			if (hw->mac.type >= ixgbe_mac_82599EB)
   2121 				stats->qprdc[i].ev_count = 0;
   2122 		}
   2123 
   2124 		rxr->rx_packets.ev_count = 0;
   2125 		rxr->rx_bytes.ev_count = 0;
   2126 		rxr->rx_copies.ev_count = 0;
   2127 		rxr->no_jmbuf.ev_count = 0;
   2128 		rxr->rx_discarded.ev_count = 0;
   2129 	}
   2130 	stats->ipcs.ev_count = 0;
   2131 	stats->l4cs.ev_count = 0;
   2132 	stats->ipcs_bad.ev_count = 0;
   2133 	stats->l4cs_bad.ev_count = 0;
   2134 	stats->intzero.ev_count = 0;
   2135 	stats->legint.ev_count = 0;
   2136 	stats->crcerrs.ev_count = 0;
   2137 	stats->illerrc.ev_count = 0;
   2138 	stats->errbc.ev_count = 0;
   2139 	stats->mspdc.ev_count = 0;
   2140 	stats->mbsdc.ev_count = 0;
   2141 	stats->mpctotal.ev_count = 0;
   2142 	stats->mlfc.ev_count = 0;
   2143 	stats->mrfc.ev_count = 0;
   2144 	stats->rlec.ev_count = 0;
   2145 	stats->lxontxc.ev_count = 0;
   2146 	stats->lxonrxc.ev_count = 0;
   2147 	stats->lxofftxc.ev_count = 0;
   2148 	stats->lxoffrxc.ev_count = 0;
   2149 
   2150 	/* Packet Reception Stats */
   2151 	stats->tor.ev_count = 0;
   2152 	stats->gorc.ev_count = 0;
   2153 	stats->tpr.ev_count = 0;
   2154 	stats->gprc.ev_count = 0;
   2155 	stats->mprc.ev_count = 0;
   2156 	stats->bprc.ev_count = 0;
   2157 	stats->prc64.ev_count = 0;
   2158 	stats->prc127.ev_count = 0;
   2159 	stats->prc255.ev_count = 0;
   2160 	stats->prc511.ev_count = 0;
   2161 	stats->prc1023.ev_count = 0;
   2162 	stats->prc1522.ev_count = 0;
   2163 	stats->ruc.ev_count = 0;
   2164 	stats->rfc.ev_count = 0;
   2165 	stats->roc.ev_count = 0;
   2166 	stats->rjc.ev_count = 0;
   2167 	stats->mngprc.ev_count = 0;
   2168 	stats->mngpdc.ev_count = 0;
   2169 	stats->xec.ev_count = 0;
   2170 
   2171 	/* Packet Transmission Stats */
   2172 	stats->gotc.ev_count = 0;
   2173 	stats->tpt.ev_count = 0;
   2174 	stats->gptc.ev_count = 0;
   2175 	stats->bptc.ev_count = 0;
   2176 	stats->mptc.ev_count = 0;
   2177 	stats->mngptc.ev_count = 0;
   2178 	stats->ptc64.ev_count = 0;
   2179 	stats->ptc127.ev_count = 0;
   2180 	stats->ptc255.ev_count = 0;
   2181 	stats->ptc511.ev_count = 0;
   2182 	stats->ptc1023.ev_count = 0;
   2183 	stats->ptc1522.ev_count = 0;
   2184 }
   2185 
   2186 /************************************************************************
   2187  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
   2188  *
   2189  *   Retrieves the TDH value from the hardware
   2190  ************************************************************************/
   2191 static int
   2192 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   2193 {
   2194 	struct sysctlnode node = *rnode;
   2195 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2196 	struct adapter *adapter;
   2197 	uint32_t val;
   2198 
   2199 	if (!txr)
   2200 		return (0);
   2201 
   2202 	adapter = txr->adapter;
   2203 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2204 		return (EPERM);
   2205 
   2206 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
   2207 	node.sysctl_data = &val;
   2208 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2209 } /* ixgbe_sysctl_tdh_handler */
   2210 
   2211 /************************************************************************
   2212  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   2213  *
   2214  *   Retrieves the TDT value from the hardware
   2215  ************************************************************************/
   2216 static int
   2217 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   2218 {
   2219 	struct sysctlnode node = *rnode;
   2220 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2221 	struct adapter *adapter;
   2222 	uint32_t val;
   2223 
   2224 	if (!txr)
   2225 		return (0);
   2226 
   2227 	adapter = txr->adapter;
   2228 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2229 		return (EPERM);
   2230 
   2231 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
   2232 	node.sysctl_data = &val;
   2233 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2234 } /* ixgbe_sysctl_tdt_handler */
   2235 
   2236 /************************************************************************
   2237  * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
   2238  * handler function
   2239  *
   2240  *   Retrieves the next_to_check value
   2241  ************************************************************************/
   2242 static int
   2243 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
   2244 {
   2245 	struct sysctlnode node = *rnode;
   2246 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2247 	struct adapter *adapter;
   2248 	uint32_t val;
   2249 
   2250 	if (!rxr)
   2251 		return (0);
   2252 
   2253 	adapter = rxr->adapter;
   2254 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2255 		return (EPERM);
   2256 
   2257 	val = rxr->next_to_check;
   2258 	node.sysctl_data = &val;
   2259 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2260 } /* ixgbe_sysctl_next_to_check_handler */
   2261 
   2262 /************************************************************************
   2263  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
   2264  *
   2265  *   Retrieves the RDH value from the hardware
   2266  ************************************************************************/
   2267 static int
   2268 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2269 {
   2270 	struct sysctlnode node = *rnode;
   2271 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2272 	struct adapter *adapter;
   2273 	uint32_t val;
   2274 
   2275 	if (!rxr)
   2276 		return (0);
   2277 
   2278 	adapter = rxr->adapter;
   2279 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2280 		return (EPERM);
   2281 
   2282 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
   2283 	node.sysctl_data = &val;
   2284 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2285 } /* ixgbe_sysctl_rdh_handler */
   2286 
   2287 /************************************************************************
   2288  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2289  *
   2290  *   Retrieves the RDT value from the hardware
   2291  ************************************************************************/
   2292 static int
   2293 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2294 {
   2295 	struct sysctlnode node = *rnode;
   2296 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2297 	struct adapter *adapter;
   2298 	uint32_t val;
   2299 
   2300 	if (!rxr)
   2301 		return (0);
   2302 
   2303 	adapter = rxr->adapter;
   2304 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2305 		return (EPERM);
   2306 
   2307 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
   2308 	node.sysctl_data = &val;
   2309 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2310 } /* ixgbe_sysctl_rdt_handler */
   2311 
   2312 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   2313 /************************************************************************
   2314  * ixgbe_register_vlan
   2315  *
   2316  *   Run via vlan config EVENT, it enables us to use the
   2317  *   HW Filter table since we can get the vlan id. This
   2318  *   just creates the entry in the soft version of the
   2319  *   VFTA, init will repopulate the real table.
   2320  ************************************************************************/
   2321 static void
   2322 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2323 {
   2324 	struct adapter	*adapter = ifp->if_softc;
   2325 	u16		index, bit;
   2326 
   2327 	if (ifp->if_softc != arg)   /* Not our event */
   2328 		return;
   2329 
   2330 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2331 		return;
   2332 
   2333 	IXGBE_CORE_LOCK(adapter);
   2334 	index = (vtag >> 5) & 0x7F;
   2335 	bit = vtag & 0x1F;
   2336 	adapter->shadow_vfta[index] |= (1 << bit);
   2337 	ixgbe_setup_vlan_hw_support(adapter);
   2338 	IXGBE_CORE_UNLOCK(adapter);
   2339 } /* ixgbe_register_vlan */
   2340 
   2341 /************************************************************************
   2342  * ixgbe_unregister_vlan
   2343  *
   2344  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
   2345  ************************************************************************/
   2346 static void
   2347 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2348 {
   2349 	struct adapter	*adapter = ifp->if_softc;
   2350 	u16		index, bit;
   2351 
   2352 	if (ifp->if_softc != arg)
   2353 		return;
   2354 
   2355 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2356 		return;
   2357 
   2358 	IXGBE_CORE_LOCK(adapter);
   2359 	index = (vtag >> 5) & 0x7F;
   2360 	bit = vtag & 0x1F;
   2361 	adapter->shadow_vfta[index] &= ~(1 << bit);
   2362 	/* Re-init to load the changes */
   2363 	ixgbe_setup_vlan_hw_support(adapter);
   2364 	IXGBE_CORE_UNLOCK(adapter);
   2365 } /* ixgbe_unregister_vlan */
   2366 #endif
   2367 
   2368 static void
   2369 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   2370 {
   2371 	struct ethercom *ec = &adapter->osdep.ec;
   2372 	struct ixgbe_hw *hw = &adapter->hw;
   2373 	struct rx_ring	*rxr;
   2374 	int		i;
   2375 	u32		ctrl;
   2376 	bool		hwtagging;
   2377 
   2378 	/*
   2379 	 *  This function is called from both if_init and ifflags_cb()
   2380 	 * on NetBSD.
   2381 	 */
   2382 
   2383 	/* Enable HW tagging only if any vlan is attached */
   2384 	hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
   2385 	    && VLAN_ATTACHED(ec);
   2386 
   2387 	/* Setup the queues for vlans */
   2388 	for (i = 0; i < adapter->num_queues; i++) {
   2389 		rxr = &adapter->rx_rings[i];
   2390 		/*
   2391 		 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
   2392 		 */
   2393 		if (hw->mac.type != ixgbe_mac_82598EB) {
   2394 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   2395 			if (hwtagging)
   2396 				ctrl |= IXGBE_RXDCTL_VME;
   2397 			else
   2398 				ctrl &= ~IXGBE_RXDCTL_VME;
   2399 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
   2400 		}
   2401 		rxr->vtag_strip = hwtagging ? TRUE : FALSE;
   2402 	}
   2403 
   2404 	/*
   2405 	 * A soft reset zero's out the VFTA, so
   2406 	 * we need to repopulate it now.
   2407 	 */
   2408 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2409 		if (adapter->shadow_vfta[i] != 0)
   2410 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
   2411 			    adapter->shadow_vfta[i]);
   2412 
   2413 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2414 	/* Enable the Filter Table if enabled */
   2415 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
   2416 		ctrl |= IXGBE_VLNCTRL_VFE;
   2417 	else
   2418 		ctrl &= ~IXGBE_VLNCTRL_VFE;
   2419 	/* VLAN hw tagging for 82598 */
   2420 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2421 		if (hwtagging)
   2422 			ctrl |= IXGBE_VLNCTRL_VME;
   2423 		else
   2424 			ctrl &= ~IXGBE_VLNCTRL_VME;
   2425 	}
   2426 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2427 } /* ixgbe_setup_vlan_hw_support */
   2428 
   2429 /************************************************************************
   2430  * ixgbe_get_slot_info
   2431  *
   2432  *   Get the width and transaction speed of
   2433  *   the slot this adapter is plugged into.
   2434  ************************************************************************/
   2435 static void
   2436 ixgbe_get_slot_info(struct adapter *adapter)
   2437 {
   2438 	device_t		dev = adapter->dev;
   2439 	struct ixgbe_hw		*hw = &adapter->hw;
   2440 	u32		      offset;
   2441 	u16			link;
   2442 	int		      bus_info_valid = TRUE;
   2443 
   2444 	/* Some devices are behind an internal bridge */
   2445 	switch (hw->device_id) {
   2446 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
   2447 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
   2448 		goto get_parent_info;
   2449 	default:
   2450 		break;
   2451 	}
   2452 
   2453 	ixgbe_get_bus_info(hw);
   2454 
   2455 	/*
   2456 	 * Some devices don't use PCI-E, but there is no need
   2457 	 * to display "Unknown" for bus speed and width.
   2458 	 */
   2459 	switch (hw->mac.type) {
   2460 	case ixgbe_mac_X550EM_x:
   2461 	case ixgbe_mac_X550EM_a:
   2462 		return;
   2463 	default:
   2464 		goto display;
   2465 	}
   2466 
   2467 get_parent_info:
   2468 	/*
   2469 	 * For the Quad port adapter we need to parse back
   2470 	 * up the PCI tree to find the speed of the expansion
   2471 	 * slot into which this adapter is plugged. A bit more work.
   2472 	 */
   2473 	dev = device_parent(device_parent(dev));
   2474 #if 0
   2475 #ifdef IXGBE_DEBUG
   2476 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
   2477 	    pci_get_slot(dev), pci_get_function(dev));
   2478 #endif
   2479 	dev = device_parent(device_parent(dev));
   2480 #ifdef IXGBE_DEBUG
   2481 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
   2482 	    pci_get_slot(dev), pci_get_function(dev));
   2483 #endif
   2484 #endif
   2485 	/* Now get the PCI Express Capabilities offset */
   2486 	if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
   2487 	    PCI_CAP_PCIEXPRESS, &offset, NULL)) {
   2488 		/*
   2489 		 * Hmm...can't get PCI-Express capabilities.
   2490 		 * Falling back to default method.
   2491 		 */
   2492 		bus_info_valid = FALSE;
   2493 		ixgbe_get_bus_info(hw);
   2494 		goto display;
   2495 	}
   2496 	/* ...and read the Link Status Register */
   2497 	link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
   2498 	    offset + PCIE_LCSR) >> 16;
   2499 	ixgbe_set_pci_config_data_generic(hw, link);
   2500 
   2501 display:
   2502 	device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
   2503 	    ((hw->bus.speed == ixgbe_bus_speed_8000)	? "8.0GT/s" :
   2504 	     (hw->bus.speed == ixgbe_bus_speed_5000)	? "5.0GT/s" :
   2505 	     (hw->bus.speed == ixgbe_bus_speed_2500)	? "2.5GT/s" :
   2506 	     "Unknown"),
   2507 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
   2508 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
   2509 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
   2510 	     "Unknown"));
   2511 
   2512 	if (bus_info_valid) {
   2513 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2514 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
   2515 			(hw->bus.speed == ixgbe_bus_speed_2500))) {
   2516 			device_printf(dev, "PCI-Express bandwidth available"
   2517 			    " for this card\n     is not sufficient for"
   2518 			    " optimal performance.\n");
   2519 			device_printf(dev, "For optimal performance a x8 "
   2520 			    "PCIE, or x4 PCIE Gen2 slot is required.\n");
   2521 		}
   2522 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2523 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
   2524 			(hw->bus.speed < ixgbe_bus_speed_8000))) {
   2525 			device_printf(dev, "PCI-Express bandwidth available"
   2526 			    " for this card\n     is not sufficient for"
   2527 			    " optimal performance.\n");
   2528 			device_printf(dev, "For optimal performance a x8 "
   2529 			    "PCIE Gen3 slot is required.\n");
   2530 		}
   2531 	} else
   2532 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
   2533 
   2534 	return;
   2535 } /* ixgbe_get_slot_info */
   2536 
   2537 /************************************************************************
   2538  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
   2539  ************************************************************************/
   2540 static inline void
   2541 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   2542 {
   2543 	struct ixgbe_hw *hw = &adapter->hw;
   2544 	struct ix_queue *que = &adapter->queues[vector];
   2545 	u64		queue = (u64)(1ULL << vector);
   2546 	u32		mask;
   2547 
   2548 	mutex_enter(&que->dc_mtx);
   2549 	if (que->disabled_count > 0 && --que->disabled_count > 0)
   2550 		goto out;
   2551 
   2552 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2553 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2554 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   2555 	} else {
   2556 		mask = (queue & 0xFFFFFFFF);
   2557 		if (mask)
   2558 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   2559 		mask = (queue >> 32);
   2560 		if (mask)
   2561 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   2562 	}
   2563 out:
   2564 	mutex_exit(&que->dc_mtx);
   2565 } /* ixgbe_enable_queue */
   2566 
   2567 /************************************************************************
   2568  * ixgbe_disable_queue_internal
   2569  ************************************************************************/
   2570 static inline void
   2571 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
   2572 {
   2573 	struct ixgbe_hw *hw = &adapter->hw;
   2574 	struct ix_queue *que = &adapter->queues[vector];
   2575 	u64		queue = 1ULL << vector;
   2576 	u32		mask;
   2577 
   2578 	mutex_enter(&que->dc_mtx);
   2579 
   2580 	if (que->disabled_count > 0) {
   2581 		if (nestok)
   2582 			que->disabled_count++;
   2583 		goto out;
   2584 	}
   2585 	que->disabled_count++;
   2586 
   2587 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2588 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2589 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   2590 	} else {
   2591 		mask = (queue & 0xFFFFFFFF);
   2592 		if (mask)
   2593 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   2594 		mask = (queue >> 32);
   2595 		if (mask)
   2596 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   2597 	}
   2598 out:
   2599 	mutex_exit(&que->dc_mtx);
   2600 } /* ixgbe_disable_queue_internal */
   2601 
   2602 /************************************************************************
   2603  * ixgbe_disable_queue
   2604  ************************************************************************/
   2605 static inline void
   2606 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   2607 {
   2608 
   2609 	ixgbe_disable_queue_internal(adapter, vector, true);
   2610 } /* ixgbe_disable_queue */
   2611 
   2612 /************************************************************************
   2613  * ixgbe_sched_handle_que - schedule deferred packet processing
   2614  ************************************************************************/
   2615 static inline void
   2616 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
   2617 {
   2618 
   2619 	if (que->txrx_use_workqueue) {
   2620 		/*
   2621 		 * adapter->que_wq is bound to each CPU instead of
   2622 		 * each NIC queue to reduce workqueue kthread. As we
   2623 		 * should consider about interrupt affinity in this
   2624 		 * function, the workqueue kthread must be WQ_PERCPU.
   2625 		 * If create WQ_PERCPU workqueue kthread for each NIC
   2626 		 * queue, that number of created workqueue kthread is
   2627 		 * (number of used NIC queue) * (number of CPUs) =
   2628 		 * (number of CPUs) ^ 2 most often.
   2629 		 *
   2630 		 * The same NIC queue's interrupts are avoided by
   2631 		 * masking the queue's interrupt. And different
   2632 		 * NIC queue's interrupts use different struct work
   2633 		 * (que->wq_cookie). So, "enqueued flag" to avoid
   2634 		 * twice workqueue_enqueue() is not required .
   2635 		 */
   2636 		workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
   2637 	} else {
   2638 		softint_schedule(que->que_si);
   2639 	}
   2640 }
   2641 
   2642 /************************************************************************
   2643  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
   2644  ************************************************************************/
   2645 static int
   2646 ixgbe_msix_que(void *arg)
   2647 {
   2648 	struct ix_queue	*que = arg;
   2649 	struct adapter	*adapter = que->adapter;
   2650 	struct ifnet	*ifp = adapter->ifp;
   2651 	struct tx_ring	*txr = que->txr;
   2652 	struct rx_ring	*rxr = que->rxr;
   2653 	bool		more;
   2654 	u32		newitr = 0;
   2655 
   2656 	/* Protect against spurious interrupts */
   2657 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   2658 		return 0;
   2659 
   2660 	ixgbe_disable_queue(adapter, que->msix);
   2661 	++que->irqs.ev_count;
   2662 
   2663 	/*
   2664 	 * Don't change "que->txrx_use_workqueue" from this point to avoid
   2665 	 * flip-flopping softint/workqueue mode in one deferred processing.
   2666 	 */
   2667 	que->txrx_use_workqueue = adapter->txrx_use_workqueue;
   2668 
   2669 #ifdef __NetBSD__
   2670 	/* Don't run ixgbe_rxeof in interrupt context */
   2671 	more = true;
   2672 #else
   2673 	more = ixgbe_rxeof(que);
   2674 #endif
   2675 
   2676 	IXGBE_TX_LOCK(txr);
   2677 	ixgbe_txeof(txr);
   2678 	IXGBE_TX_UNLOCK(txr);
   2679 
   2680 	/* Do AIM now? */
   2681 
   2682 	if (adapter->enable_aim == false)
   2683 		goto no_calc;
   2684 	/*
   2685 	 * Do Adaptive Interrupt Moderation:
   2686 	 *  - Write out last calculated setting
   2687 	 *  - Calculate based on average size over
   2688 	 *    the last interval.
   2689 	 */
   2690 	if (que->eitr_setting)
   2691 		ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
   2692 
   2693 	que->eitr_setting = 0;
   2694 
   2695 	/* Idle, do nothing */
   2696 	if ((txr->bytes == 0) && (rxr->bytes == 0))
   2697 		goto no_calc;
   2698 
   2699 	if ((txr->bytes) && (txr->packets))
   2700 		newitr = txr->bytes/txr->packets;
   2701 	if ((rxr->bytes) && (rxr->packets))
   2702 		newitr = max(newitr, (rxr->bytes / rxr->packets));
   2703 	newitr += 24; /* account for hardware frame, crc */
   2704 
   2705 	/* set an upper boundary */
   2706 	newitr = min(newitr, 3000);
   2707 
   2708 	/* Be nice to the mid range */
   2709 	if ((newitr > 300) && (newitr < 1200))
   2710 		newitr = (newitr / 3);
   2711 	else
   2712 		newitr = (newitr / 2);
   2713 
   2714 	/*
   2715 	 * When RSC is used, ITR interval must be larger than RSC_DELAY.
   2716 	 * Currently, we use 2us for RSC_DELAY. The minimum value is always
   2717 	 * greater than 2us on 100M (and 10M?(not documented)), but it's not
   2718 	 * on 1G and higher.
   2719 	 */
   2720 	if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   2721 	    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   2722 		if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
   2723 			newitr = IXGBE_MIN_RSC_EITR_10G1G;
   2724 	}
   2725 
   2726 	/* save for next interrupt */
   2727 	que->eitr_setting = newitr;
   2728 
   2729 	/* Reset state */
   2730 	txr->bytes = 0;
   2731 	txr->packets = 0;
   2732 	rxr->bytes = 0;
   2733 	rxr->packets = 0;
   2734 
   2735 no_calc:
   2736 	if (more)
   2737 		ixgbe_sched_handle_que(adapter, que);
   2738 	else
   2739 		ixgbe_enable_queue(adapter, que->msix);
   2740 
   2741 	return 1;
   2742 } /* ixgbe_msix_que */
   2743 
   2744 /************************************************************************
   2745  * ixgbe_media_status - Media Ioctl callback
   2746  *
   2747  *   Called whenever the user queries the status of
   2748  *   the interface using ifconfig.
   2749  ************************************************************************/
   2750 static void
   2751 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   2752 {
   2753 	struct adapter *adapter = ifp->if_softc;
   2754 	struct ixgbe_hw *hw = &adapter->hw;
   2755 	int layer;
   2756 
   2757 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   2758 	IXGBE_CORE_LOCK(adapter);
   2759 	ixgbe_update_link_status(adapter);
   2760 
   2761 	ifmr->ifm_status = IFM_AVALID;
   2762 	ifmr->ifm_active = IFM_ETHER;
   2763 
   2764 	if (adapter->link_active != LINK_STATE_UP) {
   2765 		ifmr->ifm_active |= IFM_NONE;
   2766 		IXGBE_CORE_UNLOCK(adapter);
   2767 		return;
   2768 	}
   2769 
   2770 	ifmr->ifm_status |= IFM_ACTIVE;
   2771 	layer = adapter->phy_layer;
   2772 
   2773 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
   2774 	    layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
   2775 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
   2776 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
   2777 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
   2778 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
   2779 		switch (adapter->link_speed) {
   2780 		case IXGBE_LINK_SPEED_10GB_FULL:
   2781 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   2782 			break;
   2783 		case IXGBE_LINK_SPEED_5GB_FULL:
   2784 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   2785 			break;
   2786 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2787 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   2788 			break;
   2789 		case IXGBE_LINK_SPEED_1GB_FULL:
   2790 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   2791 			break;
   2792 		case IXGBE_LINK_SPEED_100_FULL:
   2793 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   2794 			break;
   2795 		case IXGBE_LINK_SPEED_10_FULL:
   2796 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   2797 			break;
   2798 		}
   2799 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   2800 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
   2801 		switch (adapter->link_speed) {
   2802 		case IXGBE_LINK_SPEED_10GB_FULL:
   2803 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
   2804 			break;
   2805 		}
   2806 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
   2807 		switch (adapter->link_speed) {
   2808 		case IXGBE_LINK_SPEED_10GB_FULL:
   2809 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
   2810 			break;
   2811 		case IXGBE_LINK_SPEED_1GB_FULL:
   2812 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2813 			break;
   2814 		}
   2815 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
   2816 		switch (adapter->link_speed) {
   2817 		case IXGBE_LINK_SPEED_10GB_FULL:
   2818 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
   2819 			break;
   2820 		case IXGBE_LINK_SPEED_1GB_FULL:
   2821 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2822 			break;
   2823 		}
   2824 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
   2825 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
   2826 		switch (adapter->link_speed) {
   2827 		case IXGBE_LINK_SPEED_10GB_FULL:
   2828 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2829 			break;
   2830 		case IXGBE_LINK_SPEED_1GB_FULL:
   2831 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
   2832 			break;
   2833 		}
   2834 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
   2835 		switch (adapter->link_speed) {
   2836 		case IXGBE_LINK_SPEED_10GB_FULL:
   2837 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2838 			break;
   2839 		}
   2840 	/*
   2841 	 * XXX: These need to use the proper media types once
   2842 	 * they're added.
   2843 	 */
   2844 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
   2845 		switch (adapter->link_speed) {
   2846 		case IXGBE_LINK_SPEED_10GB_FULL:
   2847 #ifndef IFM_ETH_XTYPE
   2848 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2849 #else
   2850 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
   2851 #endif
   2852 			break;
   2853 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2854 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2855 			break;
   2856 		case IXGBE_LINK_SPEED_1GB_FULL:
   2857 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2858 			break;
   2859 		}
   2860 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
   2861 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
   2862 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
   2863 		switch (adapter->link_speed) {
   2864 		case IXGBE_LINK_SPEED_10GB_FULL:
   2865 #ifndef IFM_ETH_XTYPE
   2866 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2867 #else
   2868 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
   2869 #endif
   2870 			break;
   2871 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2872 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2873 			break;
   2874 		case IXGBE_LINK_SPEED_1GB_FULL:
   2875 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2876 			break;
   2877 		}
   2878 
   2879 	/* If nothing is recognized... */
   2880 #if 0
   2881 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
   2882 		ifmr->ifm_active |= IFM_UNKNOWN;
   2883 #endif
   2884 
   2885 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   2886 
   2887 	/* Display current flow control setting used on link */
   2888 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
   2889 	    hw->fc.current_mode == ixgbe_fc_full)
   2890 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
   2891 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
   2892 	    hw->fc.current_mode == ixgbe_fc_full)
   2893 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
   2894 
   2895 	IXGBE_CORE_UNLOCK(adapter);
   2896 
   2897 	return;
   2898 } /* ixgbe_media_status */
   2899 
   2900 /************************************************************************
   2901  * ixgbe_media_change - Media Ioctl callback
   2902  *
   2903  *   Called when the user changes speed/duplex using
   2904  *   media/mediopt option with ifconfig.
   2905  ************************************************************************/
   2906 static int
   2907 ixgbe_media_change(struct ifnet *ifp)
   2908 {
   2909 	struct adapter	 *adapter = ifp->if_softc;
   2910 	struct ifmedia	 *ifm = &adapter->media;
   2911 	struct ixgbe_hw	 *hw = &adapter->hw;
   2912 	ixgbe_link_speed speed = 0;
   2913 	ixgbe_link_speed link_caps = 0;
   2914 	bool negotiate = false;
   2915 	s32 err = IXGBE_NOT_IMPLEMENTED;
   2916 
   2917 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   2918 
   2919 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   2920 		return (EINVAL);
   2921 
   2922 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   2923 		return (EPERM);
   2924 
   2925 	IXGBE_CORE_LOCK(adapter);
   2926 	/*
   2927 	 * We don't actually need to check against the supported
   2928 	 * media types of the adapter; ifmedia will take care of
   2929 	 * that for us.
   2930 	 */
   2931 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   2932 	case IFM_AUTO:
   2933 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   2934 		    &negotiate);
   2935 		if (err != IXGBE_SUCCESS) {
   2936 			device_printf(adapter->dev, "Unable to determine "
   2937 			    "supported advertise speeds\n");
   2938 			IXGBE_CORE_UNLOCK(adapter);
   2939 			return (ENODEV);
   2940 		}
   2941 		speed |= link_caps;
   2942 		break;
   2943 	case IFM_10G_T:
   2944 	case IFM_10G_LRM:
   2945 	case IFM_10G_LR:
   2946 	case IFM_10G_TWINAX:
   2947 	case IFM_10G_SR:
   2948 	case IFM_10G_CX4:
   2949 #ifdef IFM_ETH_XTYPE
   2950 	case IFM_10G_KR:
   2951 	case IFM_10G_KX4:
   2952 #endif
   2953 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   2954 		break;
   2955 	case IFM_5000_T:
   2956 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   2957 		break;
   2958 	case IFM_2500_T:
   2959 	case IFM_2500_KX:
   2960 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   2961 		break;
   2962 	case IFM_1000_T:
   2963 	case IFM_1000_LX:
   2964 	case IFM_1000_SX:
   2965 	case IFM_1000_KX:
   2966 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   2967 		break;
   2968 	case IFM_100_TX:
   2969 		speed |= IXGBE_LINK_SPEED_100_FULL;
   2970 		break;
   2971 	case IFM_10_T:
   2972 		speed |= IXGBE_LINK_SPEED_10_FULL;
   2973 		break;
   2974 	case IFM_NONE:
   2975 		break;
   2976 	default:
   2977 		goto invalid;
   2978 	}
   2979 
   2980 	hw->mac.autotry_restart = TRUE;
   2981 	hw->mac.ops.setup_link(hw, speed, TRUE);
   2982 	adapter->advertise = 0;
   2983 	if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
   2984 		if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
   2985 			adapter->advertise |= 1 << 2;
   2986 		if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
   2987 			adapter->advertise |= 1 << 1;
   2988 		if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
   2989 			adapter->advertise |= 1 << 0;
   2990 		if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
   2991 			adapter->advertise |= 1 << 3;
   2992 		if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
   2993 			adapter->advertise |= 1 << 4;
   2994 		if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
   2995 			adapter->advertise |= 1 << 5;
   2996 	}
   2997 
   2998 	IXGBE_CORE_UNLOCK(adapter);
   2999 	return (0);
   3000 
   3001 invalid:
   3002 	device_printf(adapter->dev, "Invalid media type!\n");
   3003 	IXGBE_CORE_UNLOCK(adapter);
   3004 
   3005 	return (EINVAL);
   3006 } /* ixgbe_media_change */
   3007 
   3008 /************************************************************************
   3009  * ixgbe_set_promisc
   3010  ************************************************************************/
   3011 static void
   3012 ixgbe_set_promisc(struct adapter *adapter)
   3013 {
   3014 	struct ifnet *ifp = adapter->ifp;
   3015 	int	     mcnt = 0;
   3016 	u32	     rctl;
   3017 	struct ether_multi *enm;
   3018 	struct ether_multistep step;
   3019 	struct ethercom *ec = &adapter->osdep.ec;
   3020 
   3021 	KASSERT(mutex_owned(&adapter->core_mtx));
   3022 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   3023 	rctl &= (~IXGBE_FCTRL_UPE);
   3024 	if (ifp->if_flags & IFF_ALLMULTI)
   3025 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
   3026 	else {
   3027 		ETHER_LOCK(ec);
   3028 		ETHER_FIRST_MULTI(step, ec, enm);
   3029 		while (enm != NULL) {
   3030 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
   3031 				break;
   3032 			mcnt++;
   3033 			ETHER_NEXT_MULTI(step, enm);
   3034 		}
   3035 		ETHER_UNLOCK(ec);
   3036 	}
   3037 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
   3038 		rctl &= (~IXGBE_FCTRL_MPE);
   3039 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   3040 
   3041 	if (ifp->if_flags & IFF_PROMISC) {
   3042 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   3043 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   3044 	} else if (ifp->if_flags & IFF_ALLMULTI) {
   3045 		rctl |= IXGBE_FCTRL_MPE;
   3046 		rctl &= ~IXGBE_FCTRL_UPE;
   3047 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   3048 	}
   3049 } /* ixgbe_set_promisc */
   3050 
   3051 /************************************************************************
   3052  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
   3053  ************************************************************************/
   3054 static int
   3055 ixgbe_msix_link(void *arg)
   3056 {
   3057 	struct adapter	*adapter = arg;
   3058 	struct ixgbe_hw *hw = &adapter->hw;
   3059 	u32		eicr, eicr_mask;
   3060 	s32		retval;
   3061 
   3062 	++adapter->link_irq.ev_count;
   3063 
   3064 	/* Pause other interrupts */
   3065 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
   3066 
   3067 	/* First get the cause */
   3068 	/*
   3069 	 * The specifications of 82598, 82599, X540 and X550 say EICS register
   3070 	 * is write only. However, Linux says it is a workaround for silicon
   3071 	 * errata to read EICS instead of EICR to get interrupt cause. It seems
   3072 	 * there is a problem about read clear mechanism for EICR register.
   3073 	 */
   3074 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   3075 	/* Be sure the queue bits are not cleared */
   3076 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
   3077 	/* Clear interrupt with write */
   3078 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
   3079 
   3080 	/* Link status change */
   3081 	if (eicr & IXGBE_EICR_LSC) {
   3082 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
   3083 		softint_schedule(adapter->link_si);
   3084 	}
   3085 
   3086 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   3087 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
   3088 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
   3089 			/* This is probably overkill :) */
   3090 			if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
   3091 				return 1;
   3092 			/* Disable the interrupt */
   3093 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
   3094 			softint_schedule(adapter->fdir_si);
   3095 		}
   3096 
   3097 		if (eicr & IXGBE_EICR_ECC) {
   3098 			device_printf(adapter->dev,
   3099 			    "CRITICAL: ECC ERROR!! Please Reboot!!\n");
   3100 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   3101 		}
   3102 
   3103 		/* Check for over temp condition */
   3104 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
   3105 			switch (adapter->hw.mac.type) {
   3106 			case ixgbe_mac_X550EM_a:
   3107 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
   3108 					break;
   3109 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
   3110 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   3111 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
   3112 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   3113 				retval = hw->phy.ops.check_overtemp(hw);
   3114 				if (retval != IXGBE_ERR_OVERTEMP)
   3115 					break;
   3116 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   3117 				device_printf(adapter->dev, "System shutdown required!\n");
   3118 				break;
   3119 			default:
   3120 				if (!(eicr & IXGBE_EICR_TS))
   3121 					break;
   3122 				retval = hw->phy.ops.check_overtemp(hw);
   3123 				if (retval != IXGBE_ERR_OVERTEMP)
   3124 					break;
   3125 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   3126 				device_printf(adapter->dev, "System shutdown required!\n");
   3127 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
   3128 				break;
   3129 			}
   3130 		}
   3131 
   3132 		/* Check for VF message */
   3133 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
   3134 		    (eicr & IXGBE_EICR_MAILBOX))
   3135 			softint_schedule(adapter->mbx_si);
   3136 	}
   3137 
   3138 	if (ixgbe_is_sfp(hw)) {
   3139 		/* Pluggable optics-related interrupt */
   3140 		if (hw->mac.type >= ixgbe_mac_X540)
   3141 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   3142 		else
   3143 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   3144 
   3145 		if (eicr & eicr_mask) {
   3146 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   3147 			softint_schedule(adapter->mod_si);
   3148 		}
   3149 
   3150 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   3151 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   3152 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   3153 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   3154 			softint_schedule(adapter->msf_si);
   3155 		}
   3156 	}
   3157 
   3158 	/* Check for fan failure */
   3159 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   3160 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
   3161 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   3162 	}
   3163 
   3164 	/* External PHY interrupt */
   3165 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   3166 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
   3167 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
   3168 		softint_schedule(adapter->phy_si);
   3169 	}
   3170 
   3171 	/* Re-enable other interrupts */
   3172 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   3173 	return 1;
   3174 } /* ixgbe_msix_link */
   3175 
   3176 static void
   3177 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
   3178 {
   3179 
   3180 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   3181 		itr |= itr << 16;
   3182 	else
   3183 		itr |= IXGBE_EITR_CNT_WDIS;
   3184 
   3185 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
   3186 }
   3187 
   3188 
   3189 /************************************************************************
   3190  * ixgbe_sysctl_interrupt_rate_handler
   3191  ************************************************************************/
   3192 static int
   3193 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   3194 {
   3195 	struct sysctlnode node = *rnode;
   3196 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   3197 	struct adapter	*adapter;
   3198 	uint32_t reg, usec, rate;
   3199 	int error;
   3200 
   3201 	if (que == NULL)
   3202 		return 0;
   3203 
   3204 	adapter = que->adapter;
   3205 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   3206 		return (EPERM);
   3207 
   3208 	reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
   3209 	usec = ((reg & 0x0FF8) >> 3);
   3210 	if (usec > 0)
   3211 		rate = 500000 / usec;
   3212 	else
   3213 		rate = 0;
   3214 	node.sysctl_data = &rate;
   3215 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   3216 	if (error || newp == NULL)
   3217 		return error;
   3218 	reg &= ~0xfff; /* default, no limitation */
   3219 	if (rate > 0 && rate < 500000) {
   3220 		if (rate < 1000)
   3221 			rate = 1000;
   3222 		reg |= ((4000000/rate) & 0xff8);
   3223 		/*
   3224 		 * When RSC is used, ITR interval must be larger than
   3225 		 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
   3226 		 * The minimum value is always greater than 2us on 100M
   3227 		 * (and 10M?(not documented)), but it's not on 1G and higher.
   3228 		 */
   3229 		if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   3230 		    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   3231 			if ((adapter->num_queues > 1)
   3232 			    && (reg < IXGBE_MIN_RSC_EITR_10G1G))
   3233 				return EINVAL;
   3234 		}
   3235 		ixgbe_max_interrupt_rate = rate;
   3236 	} else
   3237 		ixgbe_max_interrupt_rate = 0;
   3238 	ixgbe_eitr_write(adapter, que->msix, reg);
   3239 
   3240 	return (0);
   3241 } /* ixgbe_sysctl_interrupt_rate_handler */
   3242 
   3243 const struct sysctlnode *
   3244 ixgbe_sysctl_instance(struct adapter *adapter)
   3245 {
   3246 	const char *dvname;
   3247 	struct sysctllog **log;
   3248 	int rc;
   3249 	const struct sysctlnode *rnode;
   3250 
   3251 	if (adapter->sysctltop != NULL)
   3252 		return adapter->sysctltop;
   3253 
   3254 	log = &adapter->sysctllog;
   3255 	dvname = device_xname(adapter->dev);
   3256 
   3257 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   3258 	    0, CTLTYPE_NODE, dvname,
   3259 	    SYSCTL_DESCR("ixgbe information and settings"),
   3260 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   3261 		goto err;
   3262 
   3263 	return rnode;
   3264 err:
   3265 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   3266 	return NULL;
   3267 }
   3268 
   3269 /************************************************************************
   3270  * ixgbe_add_device_sysctls
   3271  ************************************************************************/
   3272 static void
   3273 ixgbe_add_device_sysctls(struct adapter *adapter)
   3274 {
   3275 	device_t	       dev = adapter->dev;
   3276 	struct ixgbe_hw	       *hw = &adapter->hw;
   3277 	struct sysctllog **log;
   3278 	const struct sysctlnode *rnode, *cnode;
   3279 
   3280 	log = &adapter->sysctllog;
   3281 
   3282 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   3283 		aprint_error_dev(dev, "could not create sysctl root\n");
   3284 		return;
   3285 	}
   3286 
   3287 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3288 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   3289 	    "debug", SYSCTL_DESCR("Debug Info"),
   3290 	    ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   3291 		aprint_error_dev(dev, "could not create sysctl\n");
   3292 
   3293 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3294 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3295 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
   3296 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   3297 		aprint_error_dev(dev, "could not create sysctl\n");
   3298 
   3299 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3300 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3301 	    "num_queues", SYSCTL_DESCR("Number of queues"),
   3302 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
   3303 		aprint_error_dev(dev, "could not create sysctl\n");
   3304 
   3305 	/* Sysctls for all devices */
   3306 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3307 	    CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
   3308 	    ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
   3309 	    CTL_EOL) != 0)
   3310 		aprint_error_dev(dev, "could not create sysctl\n");
   3311 
   3312 	adapter->enable_aim = ixgbe_enable_aim;
   3313 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3314 	    CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   3315 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   3316 		aprint_error_dev(dev, "could not create sysctl\n");
   3317 
   3318 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3319 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   3320 	    "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
   3321 	    ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
   3322 	    CTL_EOL) != 0)
   3323 		aprint_error_dev(dev, "could not create sysctl\n");
   3324 
   3325 	/*
   3326 	 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
   3327 	 * it causesflip-flopping softint/workqueue mode in one deferred
   3328 	 * processing. Therefore, preempt_disable()/preempt_enable() are
   3329 	 * required in ixgbe_sched_handle_que() to avoid
   3330 	 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
   3331 	 * I think changing "que->txrx_use_workqueue" in interrupt handler
   3332 	 * is lighter than doing preempt_disable()/preempt_enable() in every
   3333 	 * ixgbe_sched_handle_que().
   3334 	 */
   3335 	adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
   3336 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3337 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   3338 	    NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
   3339 		aprint_error_dev(dev, "could not create sysctl\n");
   3340 
   3341 #ifdef IXGBE_DEBUG
   3342 	/* testing sysctls (for all devices) */
   3343 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3344 	    CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
   3345 	    ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
   3346 	    CTL_EOL) != 0)
   3347 		aprint_error_dev(dev, "could not create sysctl\n");
   3348 
   3349 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
   3350 	    CTLTYPE_STRING, "print_rss_config",
   3351 	    SYSCTL_DESCR("Prints RSS Configuration"),
   3352 	    ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
   3353 	    CTL_EOL) != 0)
   3354 		aprint_error_dev(dev, "could not create sysctl\n");
   3355 #endif
   3356 	/* for X550 series devices */
   3357 	if (hw->mac.type >= ixgbe_mac_X550)
   3358 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3359 		    CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
   3360 		    ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
   3361 		    CTL_EOL) != 0)
   3362 			aprint_error_dev(dev, "could not create sysctl\n");
   3363 
   3364 	/* for WoL-capable devices */
   3365 	if (adapter->wol_support) {
   3366 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3367 		    CTLTYPE_BOOL, "wol_enable",
   3368 		    SYSCTL_DESCR("Enable/Disable Wake on LAN"),
   3369 		    ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
   3370 		    CTL_EOL) != 0)
   3371 			aprint_error_dev(dev, "could not create sysctl\n");
   3372 
   3373 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3374 		    CTLTYPE_INT, "wufc",
   3375 		    SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
   3376 		    ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
   3377 		    CTL_EOL) != 0)
   3378 			aprint_error_dev(dev, "could not create sysctl\n");
   3379 	}
   3380 
   3381 	/* for X552/X557-AT devices */
   3382 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3383 		const struct sysctlnode *phy_node;
   3384 
   3385 		if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
   3386 		    "phy", SYSCTL_DESCR("External PHY sysctls"),
   3387 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
   3388 			aprint_error_dev(dev, "could not create sysctl\n");
   3389 			return;
   3390 		}
   3391 
   3392 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3393 		    CTLTYPE_INT, "temp",
   3394 		    SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
   3395 		    ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
   3396 		    CTL_EOL) != 0)
   3397 			aprint_error_dev(dev, "could not create sysctl\n");
   3398 
   3399 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3400 		    CTLTYPE_INT, "overtemp_occurred",
   3401 		    SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
   3402 		    ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
   3403 		    CTL_CREATE, CTL_EOL) != 0)
   3404 			aprint_error_dev(dev, "could not create sysctl\n");
   3405 	}
   3406 
   3407 	if ((hw->mac.type == ixgbe_mac_X550EM_a)
   3408 	    && (hw->phy.type == ixgbe_phy_fw))
   3409 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3410 		    CTLTYPE_BOOL, "force_10_100_autonego",
   3411 		    SYSCTL_DESCR("Force autonego on 10M and 100M"),
   3412 		    NULL, 0, &hw->phy.force_10_100_autonego, 0,
   3413 		    CTL_CREATE, CTL_EOL) != 0)
   3414 			aprint_error_dev(dev, "could not create sysctl\n");
   3415 
   3416 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
   3417 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3418 		    CTLTYPE_INT, "eee_state",
   3419 		    SYSCTL_DESCR("EEE Power Save State"),
   3420 		    ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
   3421 		    CTL_EOL) != 0)
   3422 			aprint_error_dev(dev, "could not create sysctl\n");
   3423 	}
   3424 } /* ixgbe_add_device_sysctls */
   3425 
   3426 /************************************************************************
   3427  * ixgbe_allocate_pci_resources
   3428  ************************************************************************/
   3429 static int
   3430 ixgbe_allocate_pci_resources(struct adapter *adapter,
   3431     const struct pci_attach_args *pa)
   3432 {
   3433 	pcireg_t	memtype, csr;
   3434 	device_t dev = adapter->dev;
   3435 	bus_addr_t addr;
   3436 	int flags;
   3437 
   3438 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   3439 	switch (memtype) {
   3440 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   3441 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   3442 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   3443 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   3444 		      memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   3445 			goto map_err;
   3446 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   3447 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   3448 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3449 		}
   3450 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   3451 		     adapter->osdep.mem_size, flags,
   3452 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   3453 map_err:
   3454 			adapter->osdep.mem_size = 0;
   3455 			aprint_error_dev(dev, "unable to map BAR0\n");
   3456 			return ENXIO;
   3457 		}
   3458 		/*
   3459 		 * Enable address decoding for memory range in case BIOS or
   3460 		 * UEFI don't set it.
   3461 		 */
   3462 		csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
   3463 		    PCI_COMMAND_STATUS_REG);
   3464 		csr |= PCI_COMMAND_MEM_ENABLE;
   3465 		pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
   3466 		    csr);
   3467 		break;
   3468 	default:
   3469 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   3470 		return ENXIO;
   3471 	}
   3472 
   3473 	return (0);
   3474 } /* ixgbe_allocate_pci_resources */
   3475 
   3476 static void
   3477 ixgbe_free_softint(struct adapter *adapter)
   3478 {
   3479 	struct ix_queue *que = adapter->queues;
   3480 	struct tx_ring *txr = adapter->tx_rings;
   3481 	int i;
   3482 
   3483 	for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
   3484 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   3485 			if (txr->txr_si != NULL)
   3486 				softint_disestablish(txr->txr_si);
   3487 		}
   3488 		if (que->que_si != NULL)
   3489 			softint_disestablish(que->que_si);
   3490 	}
   3491 	if (adapter->txr_wq != NULL)
   3492 		workqueue_destroy(adapter->txr_wq);
   3493 	if (adapter->txr_wq_enqueued != NULL)
   3494 		percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
   3495 	if (adapter->que_wq != NULL)
   3496 		workqueue_destroy(adapter->que_wq);
   3497 
   3498 	/* Drain the Link queue */
   3499 	if (adapter->link_si != NULL) {
   3500 		softint_disestablish(adapter->link_si);
   3501 		adapter->link_si = NULL;
   3502 	}
   3503 	if (adapter->mod_si != NULL) {
   3504 		softint_disestablish(adapter->mod_si);
   3505 		adapter->mod_si = NULL;
   3506 	}
   3507 	if (adapter->msf_si != NULL) {
   3508 		softint_disestablish(adapter->msf_si);
   3509 		adapter->msf_si = NULL;
   3510 	}
   3511 	if (adapter->phy_si != NULL) {
   3512 		softint_disestablish(adapter->phy_si);
   3513 		adapter->phy_si = NULL;
   3514 	}
   3515 	if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
   3516 		if (adapter->fdir_si != NULL) {
   3517 			softint_disestablish(adapter->fdir_si);
   3518 			adapter->fdir_si = NULL;
   3519 		}
   3520 	}
   3521 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   3522 		if (adapter->mbx_si != NULL) {
   3523 			softint_disestablish(adapter->mbx_si);
   3524 			adapter->mbx_si = NULL;
   3525 		}
   3526 	}
   3527 } /* ixgbe_free_softint */
   3528 
   3529 /************************************************************************
   3530  * ixgbe_detach - Device removal routine
   3531  *
   3532  *   Called when the driver is being removed.
   3533  *   Stops the adapter and deallocates all the resources
   3534  *   that were allocated for driver operation.
   3535  *
   3536  *   return 0 on success, positive on failure
   3537  ************************************************************************/
   3538 static int
   3539 ixgbe_detach(device_t dev, int flags)
   3540 {
   3541 	struct adapter *adapter = device_private(dev);
   3542 	struct rx_ring *rxr = adapter->rx_rings;
   3543 	struct tx_ring *txr = adapter->tx_rings;
   3544 	struct ixgbe_hw *hw = &adapter->hw;
   3545 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   3546 	u32	ctrl_ext;
   3547 	int i;
   3548 
   3549 	INIT_DEBUGOUT("ixgbe_detach: begin");
   3550 	if (adapter->osdep.attached == false)
   3551 		return 0;
   3552 
   3553 	if (ixgbe_pci_iov_detach(dev) != 0) {
   3554 		device_printf(dev, "SR-IOV in use; detach first.\n");
   3555 		return (EBUSY);
   3556 	}
   3557 
   3558 	/*
   3559 	 * Stop the interface. ixgbe_setup_low_power_mode() calls ixgbe_stop(),
   3560 	 * so it's not required to call ixgbe_stop() directly.
   3561 	 */
   3562 	IXGBE_CORE_LOCK(adapter);
   3563 	ixgbe_setup_low_power_mode(adapter);
   3564 	IXGBE_CORE_UNLOCK(adapter);
   3565 #if NVLAN > 0
   3566 	/* Make sure VLANs are not using driver */
   3567 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   3568 		;	/* nothing to do: no VLANs */
   3569 	else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
   3570 		vlan_ifdetach(adapter->ifp);
   3571 	else {
   3572 		aprint_error_dev(dev, "VLANs in use, detach first\n");
   3573 		return (EBUSY);
   3574 	}
   3575 #endif
   3576 
   3577 	pmf_device_deregister(dev);
   3578 
   3579 	ether_ifdetach(adapter->ifp);
   3580 
   3581 	ixgbe_free_softint(adapter);
   3582 
   3583 	/* let hardware know driver is unloading */
   3584 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   3585 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   3586 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   3587 
   3588 	callout_halt(&adapter->timer, NULL);
   3589 	if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
   3590 		callout_halt(&adapter->recovery_mode_timer, NULL);
   3591 
   3592 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   3593 		netmap_detach(adapter->ifp);
   3594 
   3595 	ixgbe_free_pci_resources(adapter);
   3596 #if 0	/* XXX the NetBSD port is probably missing something here */
   3597 	bus_generic_detach(dev);
   3598 #endif
   3599 	if_detach(adapter->ifp);
   3600 	if_percpuq_destroy(adapter->ipq);
   3601 
   3602 	sysctl_teardown(&adapter->sysctllog);
   3603 	evcnt_detach(&adapter->efbig_tx_dma_setup);
   3604 	evcnt_detach(&adapter->mbuf_defrag_failed);
   3605 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
   3606 	evcnt_detach(&adapter->einval_tx_dma_setup);
   3607 	evcnt_detach(&adapter->other_tx_dma_setup);
   3608 	evcnt_detach(&adapter->eagain_tx_dma_setup);
   3609 	evcnt_detach(&adapter->enomem_tx_dma_setup);
   3610 	evcnt_detach(&adapter->watchdog_events);
   3611 	evcnt_detach(&adapter->tso_err);
   3612 	evcnt_detach(&adapter->link_irq);
   3613 	evcnt_detach(&adapter->link_sicount);
   3614 	evcnt_detach(&adapter->mod_sicount);
   3615 	evcnt_detach(&adapter->msf_sicount);
   3616 	evcnt_detach(&adapter->phy_sicount);
   3617 
   3618 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   3619 		if (i < __arraycount(stats->mpc)) {
   3620 			evcnt_detach(&stats->mpc[i]);
   3621 			if (hw->mac.type == ixgbe_mac_82598EB)
   3622 				evcnt_detach(&stats->rnbc[i]);
   3623 		}
   3624 		if (i < __arraycount(stats->pxontxc)) {
   3625 			evcnt_detach(&stats->pxontxc[i]);
   3626 			evcnt_detach(&stats->pxonrxc[i]);
   3627 			evcnt_detach(&stats->pxofftxc[i]);
   3628 			evcnt_detach(&stats->pxoffrxc[i]);
   3629 			if (hw->mac.type >= ixgbe_mac_82599EB)
   3630 				evcnt_detach(&stats->pxon2offc[i]);
   3631 		}
   3632 	}
   3633 
   3634 	txr = adapter->tx_rings;
   3635 	for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   3636 		evcnt_detach(&adapter->queues[i].irqs);
   3637 		evcnt_detach(&adapter->queues[i].handleq);
   3638 		evcnt_detach(&adapter->queues[i].req);
   3639 		evcnt_detach(&txr->no_desc_avail);
   3640 		evcnt_detach(&txr->total_packets);
   3641 		evcnt_detach(&txr->tso_tx);
   3642 #ifndef IXGBE_LEGACY_TX
   3643 		evcnt_detach(&txr->pcq_drops);
   3644 #endif
   3645 
   3646 		if (i < __arraycount(stats->qprc)) {
   3647 			evcnt_detach(&stats->qprc[i]);
   3648 			evcnt_detach(&stats->qptc[i]);
   3649 			evcnt_detach(&stats->qbrc[i]);
   3650 			evcnt_detach(&stats->qbtc[i]);
   3651 			if (hw->mac.type >= ixgbe_mac_82599EB)
   3652 				evcnt_detach(&stats->qprdc[i]);
   3653 		}
   3654 
   3655 		evcnt_detach(&rxr->rx_packets);
   3656 		evcnt_detach(&rxr->rx_bytes);
   3657 		evcnt_detach(&rxr->rx_copies);
   3658 		evcnt_detach(&rxr->no_jmbuf);
   3659 		evcnt_detach(&rxr->rx_discarded);
   3660 	}
   3661 	evcnt_detach(&stats->ipcs);
   3662 	evcnt_detach(&stats->l4cs);
   3663 	evcnt_detach(&stats->ipcs_bad);
   3664 	evcnt_detach(&stats->l4cs_bad);
   3665 	evcnt_detach(&stats->intzero);
   3666 	evcnt_detach(&stats->legint);
   3667 	evcnt_detach(&stats->crcerrs);
   3668 	evcnt_detach(&stats->illerrc);
   3669 	evcnt_detach(&stats->errbc);
   3670 	evcnt_detach(&stats->mspdc);
   3671 	if (hw->mac.type >= ixgbe_mac_X550)
   3672 		evcnt_detach(&stats->mbsdc);
   3673 	evcnt_detach(&stats->mpctotal);
   3674 	evcnt_detach(&stats->mlfc);
   3675 	evcnt_detach(&stats->mrfc);
   3676 	evcnt_detach(&stats->rlec);
   3677 	evcnt_detach(&stats->lxontxc);
   3678 	evcnt_detach(&stats->lxonrxc);
   3679 	evcnt_detach(&stats->lxofftxc);
   3680 	evcnt_detach(&stats->lxoffrxc);
   3681 
   3682 	/* Packet Reception Stats */
   3683 	evcnt_detach(&stats->tor);
   3684 	evcnt_detach(&stats->gorc);
   3685 	evcnt_detach(&stats->tpr);
   3686 	evcnt_detach(&stats->gprc);
   3687 	evcnt_detach(&stats->mprc);
   3688 	evcnt_detach(&stats->bprc);
   3689 	evcnt_detach(&stats->prc64);
   3690 	evcnt_detach(&stats->prc127);
   3691 	evcnt_detach(&stats->prc255);
   3692 	evcnt_detach(&stats->prc511);
   3693 	evcnt_detach(&stats->prc1023);
   3694 	evcnt_detach(&stats->prc1522);
   3695 	evcnt_detach(&stats->ruc);
   3696 	evcnt_detach(&stats->rfc);
   3697 	evcnt_detach(&stats->roc);
   3698 	evcnt_detach(&stats->rjc);
   3699 	evcnt_detach(&stats->mngprc);
   3700 	evcnt_detach(&stats->mngpdc);
   3701 	evcnt_detach(&stats->xec);
   3702 
   3703 	/* Packet Transmission Stats */
   3704 	evcnt_detach(&stats->gotc);
   3705 	evcnt_detach(&stats->tpt);
   3706 	evcnt_detach(&stats->gptc);
   3707 	evcnt_detach(&stats->bptc);
   3708 	evcnt_detach(&stats->mptc);
   3709 	evcnt_detach(&stats->mngptc);
   3710 	evcnt_detach(&stats->ptc64);
   3711 	evcnt_detach(&stats->ptc127);
   3712 	evcnt_detach(&stats->ptc255);
   3713 	evcnt_detach(&stats->ptc511);
   3714 	evcnt_detach(&stats->ptc1023);
   3715 	evcnt_detach(&stats->ptc1522);
   3716 
   3717 	ixgbe_free_transmit_structures(adapter);
   3718 	ixgbe_free_receive_structures(adapter);
   3719 	for (i = 0; i < adapter->num_queues; i++) {
   3720 		struct ix_queue * que = &adapter->queues[i];
   3721 		mutex_destroy(&que->dc_mtx);
   3722 	}
   3723 	free(adapter->queues, M_DEVBUF);
   3724 	free(adapter->mta, M_DEVBUF);
   3725 
   3726 	IXGBE_CORE_LOCK_DESTROY(adapter);
   3727 
   3728 	return (0);
   3729 } /* ixgbe_detach */
   3730 
   3731 /************************************************************************
   3732  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
   3733  *
   3734  *   Prepare the adapter/port for LPLU and/or WoL
   3735  ************************************************************************/
   3736 static int
   3737 ixgbe_setup_low_power_mode(struct adapter *adapter)
   3738 {
   3739 	struct ixgbe_hw *hw = &adapter->hw;
   3740 	device_t	dev = adapter->dev;
   3741 	s32		error = 0;
   3742 
   3743 	KASSERT(mutex_owned(&adapter->core_mtx));
   3744 
   3745 	/* Limit power management flow to X550EM baseT */
   3746 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
   3747 	    hw->phy.ops.enter_lplu) {
   3748 		/* X550EM baseT adapters need a special LPLU flow */
   3749 		hw->phy.reset_disable = true;
   3750 		ixgbe_stop(adapter);
   3751 		error = hw->phy.ops.enter_lplu(hw);
   3752 		if (error)
   3753 			device_printf(dev,
   3754 			    "Error entering LPLU: %d\n", error);
   3755 		hw->phy.reset_disable = false;
   3756 	} else {
   3757 		/* Just stop for other adapters */
   3758 		ixgbe_stop(adapter);
   3759 	}
   3760 
   3761 	if (!hw->wol_enabled) {
   3762 		ixgbe_set_phy_power(hw, FALSE);
   3763 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3764 		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
   3765 	} else {
   3766 		/* Turn off support for APM wakeup. (Using ACPI instead) */
   3767 		IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
   3768 		    IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
   3769 
   3770 		/*
   3771 		 * Clear Wake Up Status register to prevent any previous wakeup
   3772 		 * events from waking us up immediately after we suspend.
   3773 		 */
   3774 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3775 
   3776 		/*
   3777 		 * Program the Wakeup Filter Control register with user filter
   3778 		 * settings
   3779 		 */
   3780 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
   3781 
   3782 		/* Enable wakeups and power management in Wakeup Control */
   3783 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
   3784 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
   3785 
   3786 	}
   3787 
   3788 	return error;
   3789 } /* ixgbe_setup_low_power_mode */
   3790 
   3791 /************************************************************************
   3792  * ixgbe_shutdown - Shutdown entry point
   3793  ************************************************************************/
   3794 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   3795 static int
   3796 ixgbe_shutdown(device_t dev)
   3797 {
   3798 	struct adapter *adapter = device_private(dev);
   3799 	int error = 0;
   3800 
   3801 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
   3802 
   3803 	IXGBE_CORE_LOCK(adapter);
   3804 	error = ixgbe_setup_low_power_mode(adapter);
   3805 	IXGBE_CORE_UNLOCK(adapter);
   3806 
   3807 	return (error);
   3808 } /* ixgbe_shutdown */
   3809 #endif
   3810 
   3811 /************************************************************************
   3812  * ixgbe_suspend
   3813  *
   3814  *   From D0 to D3
   3815  ************************************************************************/
   3816 static bool
   3817 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
   3818 {
   3819 	struct adapter *adapter = device_private(dev);
   3820 	int	       error = 0;
   3821 
   3822 	INIT_DEBUGOUT("ixgbe_suspend: begin");
   3823 
   3824 	IXGBE_CORE_LOCK(adapter);
   3825 
   3826 	error = ixgbe_setup_low_power_mode(adapter);
   3827 
   3828 	IXGBE_CORE_UNLOCK(adapter);
   3829 
   3830 	return (error);
   3831 } /* ixgbe_suspend */
   3832 
   3833 /************************************************************************
   3834  * ixgbe_resume
   3835  *
   3836  *   From D3 to D0
   3837  ************************************************************************/
   3838 static bool
   3839 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
   3840 {
   3841 	struct adapter	*adapter = device_private(dev);
   3842 	struct ifnet	*ifp = adapter->ifp;
   3843 	struct ixgbe_hw *hw = &adapter->hw;
   3844 	u32		wus;
   3845 
   3846 	INIT_DEBUGOUT("ixgbe_resume: begin");
   3847 
   3848 	IXGBE_CORE_LOCK(adapter);
   3849 
   3850 	/* Read & clear WUS register */
   3851 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
   3852 	if (wus)
   3853 		device_printf(dev, "Woken up by (WUS): %#010x\n",
   3854 		    IXGBE_READ_REG(hw, IXGBE_WUS));
   3855 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3856 	/* And clear WUFC until next low-power transition */
   3857 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3858 
   3859 	/*
   3860 	 * Required after D3->D0 transition;
   3861 	 * will re-advertise all previous advertised speeds
   3862 	 */
   3863 	if (ifp->if_flags & IFF_UP)
   3864 		ixgbe_init_locked(adapter);
   3865 
   3866 	IXGBE_CORE_UNLOCK(adapter);
   3867 
   3868 	return true;
   3869 } /* ixgbe_resume */
   3870 
   3871 /*
   3872  * Set the various hardware offload abilities.
   3873  *
   3874  * This takes the ifnet's if_capenable flags (e.g. set by the user using
   3875  * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
   3876  * mbuf offload flags the driver will understand.
   3877  */
   3878 static void
   3879 ixgbe_set_if_hwassist(struct adapter *adapter)
   3880 {
   3881 	/* XXX */
   3882 }
   3883 
   3884 /************************************************************************
   3885  * ixgbe_init_locked - Init entry point
   3886  *
   3887  *   Used in two ways: It is used by the stack as an init
   3888  *   entry point in network interface structure. It is also
   3889  *   used by the driver as a hw/sw initialization routine to
   3890  *   get to a consistent state.
   3891  *
   3892  *   return 0 on success, positive on failure
   3893  ************************************************************************/
   3894 static void
   3895 ixgbe_init_locked(struct adapter *adapter)
   3896 {
   3897 	struct ifnet   *ifp = adapter->ifp;
   3898 	device_t	dev = adapter->dev;
   3899 	struct ixgbe_hw *hw = &adapter->hw;
   3900 	struct ix_queue *que;
   3901 	struct tx_ring	*txr;
   3902 	struct rx_ring	*rxr;
   3903 	u32		txdctl, mhadd;
   3904 	u32		rxdctl, rxctrl;
   3905 	u32		ctrl_ext;
   3906 	int		i, j, err;
   3907 
   3908 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   3909 
   3910 	KASSERT(mutex_owned(&adapter->core_mtx));
   3911 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
   3912 
   3913 	hw->adapter_stopped = FALSE;
   3914 	ixgbe_stop_adapter(hw);
   3915 	callout_stop(&adapter->timer);
   3916 	for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
   3917 		que->disabled_count = 0;
   3918 
   3919 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   3920 	adapter->max_frame_size =
   3921 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   3922 
   3923 	/* Queue indices may change with IOV mode */
   3924 	ixgbe_align_all_queue_indices(adapter);
   3925 
   3926 	/* reprogram the RAR[0] in case user changed it. */
   3927 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
   3928 
   3929 	/* Get the latest mac address, User can use a LAA */
   3930 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
   3931 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   3932 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
   3933 	hw->addr_ctrl.rar_used_count = 1;
   3934 
   3935 	/* Set hardware offload abilities from ifnet flags */
   3936 	ixgbe_set_if_hwassist(adapter);
   3937 
   3938 	/* Prepare transmit descriptors and buffers */
   3939 	if (ixgbe_setup_transmit_structures(adapter)) {
   3940 		device_printf(dev, "Could not setup transmit structures\n");
   3941 		ixgbe_stop(adapter);
   3942 		return;
   3943 	}
   3944 
   3945 	ixgbe_init_hw(hw);
   3946 
   3947 	ixgbe_initialize_iov(adapter);
   3948 
   3949 	ixgbe_initialize_transmit_units(adapter);
   3950 
   3951 	/* Setup Multicast table */
   3952 	ixgbe_set_multi(adapter);
   3953 
   3954 	/* Determine the correct mbuf pool, based on frame size */
   3955 	if (adapter->max_frame_size <= MCLBYTES)
   3956 		adapter->rx_mbuf_sz = MCLBYTES;
   3957 	else
   3958 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   3959 
   3960 	/* Prepare receive descriptors and buffers */
   3961 	if (ixgbe_setup_receive_structures(adapter)) {
   3962 		device_printf(dev, "Could not setup receive structures\n");
   3963 		ixgbe_stop(adapter);
   3964 		return;
   3965 	}
   3966 
   3967 	/* Configure RX settings */
   3968 	ixgbe_initialize_receive_units(adapter);
   3969 
   3970 	/* Enable SDP & MSI-X interrupts based on adapter */
   3971 	ixgbe_config_gpie(adapter);
   3972 
   3973 	/* Set MTU size */
   3974 	if (ifp->if_mtu > ETHERMTU) {
   3975 		/* aka IXGBE_MAXFRS on 82599 and newer */
   3976 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   3977 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   3978 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   3979 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   3980 	}
   3981 
   3982 	/* Now enable all the queues */
   3983 	for (i = 0; i < adapter->num_queues; i++) {
   3984 		txr = &adapter->tx_rings[i];
   3985 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
   3986 		txdctl |= IXGBE_TXDCTL_ENABLE;
   3987 		/* Set WTHRESH to 8, burst writeback */
   3988 		txdctl |= (8 << 16);
   3989 		/*
   3990 		 * When the internal queue falls below PTHRESH (32),
   3991 		 * start prefetching as long as there are at least
   3992 		 * HTHRESH (1) buffers ready. The values are taken
   3993 		 * from the Intel linux driver 3.8.21.
   3994 		 * Prefetching enables tx line rate even with 1 queue.
   3995 		 */
   3996 		txdctl |= (32 << 0) | (1 << 8);
   3997 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
   3998 	}
   3999 
   4000 	for (i = 0; i < adapter->num_queues; i++) {
   4001 		rxr = &adapter->rx_rings[i];
   4002 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   4003 		if (hw->mac.type == ixgbe_mac_82598EB) {
   4004 			/*
   4005 			 * PTHRESH = 21
   4006 			 * HTHRESH = 4
   4007 			 * WTHRESH = 8
   4008 			 */
   4009 			rxdctl &= ~0x3FFFFF;
   4010 			rxdctl |= 0x080420;
   4011 		}
   4012 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   4013 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
   4014 		for (j = 0; j < 10; j++) {
   4015 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
   4016 			    IXGBE_RXDCTL_ENABLE)
   4017 				break;
   4018 			else
   4019 				msec_delay(1);
   4020 		}
   4021 		wmb();
   4022 
   4023 		/*
   4024 		 * In netmap mode, we must preserve the buffers made
   4025 		 * available to userspace before the if_init()
   4026 		 * (this is true by default on the TX side, because
   4027 		 * init makes all buffers available to userspace).
   4028 		 *
   4029 		 * netmap_reset() and the device specific routines
   4030 		 * (e.g. ixgbe_setup_receive_rings()) map these
   4031 		 * buffers at the end of the NIC ring, so here we
   4032 		 * must set the RDT (tail) register to make sure
   4033 		 * they are not overwritten.
   4034 		 *
   4035 		 * In this driver the NIC ring starts at RDH = 0,
   4036 		 * RDT points to the last slot available for reception (?),
   4037 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   4038 		 */
   4039 #ifdef DEV_NETMAP
   4040 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   4041 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   4042 			struct netmap_adapter *na = NA(adapter->ifp);
   4043 			struct netmap_kring *kring = na->rx_rings[i];
   4044 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   4045 
   4046 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
   4047 		} else
   4048 #endif /* DEV_NETMAP */
   4049 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
   4050 			    adapter->num_rx_desc - 1);
   4051 	}
   4052 
   4053 	/* Enable Receive engine */
   4054 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   4055 	if (hw->mac.type == ixgbe_mac_82598EB)
   4056 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   4057 	rxctrl |= IXGBE_RXCTRL_RXEN;
   4058 	ixgbe_enable_rx_dma(hw, rxctrl);
   4059 
   4060 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4061 
   4062 	/* Set up MSI/MSI-X routing */
   4063 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   4064 		ixgbe_configure_ivars(adapter);
   4065 		/* Set up auto-mask */
   4066 		if (hw->mac.type == ixgbe_mac_82598EB)
   4067 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   4068 		else {
   4069 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   4070 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   4071 		}
   4072 	} else {  /* Simple settings for Legacy/MSI */
   4073 		ixgbe_set_ivar(adapter, 0, 0, 0);
   4074 		ixgbe_set_ivar(adapter, 0, 0, 1);
   4075 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   4076 	}
   4077 
   4078 	ixgbe_init_fdir(adapter);
   4079 
   4080 	/*
   4081 	 * Check on any SFP devices that
   4082 	 * need to be kick-started
   4083 	 */
   4084 	if (hw->phy.type == ixgbe_phy_none) {
   4085 		err = hw->phy.ops.identify(hw);
   4086 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4087 			device_printf(dev,
   4088 			    "Unsupported SFP+ module type was detected.\n");
   4089 			return;
   4090 		}
   4091 	}
   4092 
   4093 	/* Set moderation on the Link interrupt */
   4094 	ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
   4095 
   4096 	/* Enable EEE power saving */
   4097 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   4098 		hw->mac.ops.setup_eee(hw,
   4099 		    adapter->feat_en & IXGBE_FEATURE_EEE);
   4100 
   4101 	/* Enable power to the phy. */
   4102 	ixgbe_set_phy_power(hw, TRUE);
   4103 
   4104 	/* Config/Enable Link */
   4105 	ixgbe_config_link(adapter);
   4106 
   4107 	/* Hardware Packet Buffer & Flow Control setup */
   4108 	ixgbe_config_delay_values(adapter);
   4109 
   4110 	/* Initialize the FC settings */
   4111 	ixgbe_start_hw(hw);
   4112 
   4113 	/* Set up VLAN support and filter */
   4114 	ixgbe_setup_vlan_hw_support(adapter);
   4115 
   4116 	/* Setup DMA Coalescing */
   4117 	ixgbe_config_dmac(adapter);
   4118 
   4119 	/* And now turn on interrupts */
   4120 	ixgbe_enable_intr(adapter);
   4121 
   4122 	/* Enable the use of the MBX by the VF's */
   4123 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
   4124 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
   4125 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
   4126 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
   4127 	}
   4128 
   4129 	/* Update saved flags. See ixgbe_ifflags_cb() */
   4130 	adapter->if_flags = ifp->if_flags;
   4131 
   4132 	/* Now inform the stack we're ready */
   4133 	ifp->if_flags |= IFF_RUNNING;
   4134 
   4135 	return;
   4136 } /* ixgbe_init_locked */
   4137 
   4138 /************************************************************************
   4139  * ixgbe_init
   4140  ************************************************************************/
   4141 static int
   4142 ixgbe_init(struct ifnet *ifp)
   4143 {
   4144 	struct adapter *adapter = ifp->if_softc;
   4145 
   4146 	IXGBE_CORE_LOCK(adapter);
   4147 	ixgbe_init_locked(adapter);
   4148 	IXGBE_CORE_UNLOCK(adapter);
   4149 
   4150 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   4151 } /* ixgbe_init */
   4152 
   4153 /************************************************************************
   4154  * ixgbe_set_ivar
   4155  *
   4156  *   Setup the correct IVAR register for a particular MSI-X interrupt
   4157  *     (yes this is all very magic and confusing :)
   4158  *    - entry is the register array entry
   4159  *    - vector is the MSI-X vector for this queue
   4160  *    - type is RX/TX/MISC
   4161  ************************************************************************/
   4162 static void
   4163 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   4164 {
   4165 	struct ixgbe_hw *hw = &adapter->hw;
   4166 	u32 ivar, index;
   4167 
   4168 	vector |= IXGBE_IVAR_ALLOC_VAL;
   4169 
   4170 	switch (hw->mac.type) {
   4171 	case ixgbe_mac_82598EB:
   4172 		if (type == -1)
   4173 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   4174 		else
   4175 			entry += (type * 64);
   4176 		index = (entry >> 2) & 0x1F;
   4177 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   4178 		ivar &= ~(0xffUL << (8 * (entry & 0x3)));
   4179 		ivar |= ((u32)vector << (8 * (entry & 0x3)));
   4180 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   4181 		break;
   4182 	case ixgbe_mac_82599EB:
   4183 	case ixgbe_mac_X540:
   4184 	case ixgbe_mac_X550:
   4185 	case ixgbe_mac_X550EM_x:
   4186 	case ixgbe_mac_X550EM_a:
   4187 		if (type == -1) { /* MISC IVAR */
   4188 			index = (entry & 1) * 8;
   4189 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   4190 			ivar &= ~(0xffUL << index);
   4191 			ivar |= ((u32)vector << index);
   4192 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   4193 		} else {	/* RX/TX IVARS */
   4194 			index = (16 * (entry & 1)) + (8 * type);
   4195 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   4196 			ivar &= ~(0xffUL << index);
   4197 			ivar |= ((u32)vector << index);
   4198 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   4199 		}
   4200 		break;
   4201 	default:
   4202 		break;
   4203 	}
   4204 } /* ixgbe_set_ivar */
   4205 
   4206 /************************************************************************
   4207  * ixgbe_configure_ivars
   4208  ************************************************************************/
   4209 static void
   4210 ixgbe_configure_ivars(struct adapter *adapter)
   4211 {
   4212 	struct ix_queue *que = adapter->queues;
   4213 	u32		newitr;
   4214 
   4215 	if (ixgbe_max_interrupt_rate > 0)
   4216 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   4217 	else {
   4218 		/*
   4219 		 * Disable DMA coalescing if interrupt moderation is
   4220 		 * disabled.
   4221 		 */
   4222 		adapter->dmac = 0;
   4223 		newitr = 0;
   4224 	}
   4225 
   4226 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4227 		struct rx_ring *rxr = &adapter->rx_rings[i];
   4228 		struct tx_ring *txr = &adapter->tx_rings[i];
   4229 		/* First the RX queue entry */
   4230 		ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
   4231 		/* ... and the TX */
   4232 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
   4233 		/* Set an Initial EITR value */
   4234 		ixgbe_eitr_write(adapter, que->msix, newitr);
   4235 		/*
   4236 		 * To eliminate influence of the previous state.
   4237 		 * At this point, Tx/Rx interrupt handler
   4238 		 * (ixgbe_msix_que()) cannot be called, so  both
   4239 		 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
   4240 		 */
   4241 		que->eitr_setting = 0;
   4242 	}
   4243 
   4244 	/* For the Link interrupt */
   4245 	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
   4246 } /* ixgbe_configure_ivars */
   4247 
   4248 /************************************************************************
   4249  * ixgbe_config_gpie
   4250  ************************************************************************/
   4251 static void
   4252 ixgbe_config_gpie(struct adapter *adapter)
   4253 {
   4254 	struct ixgbe_hw *hw = &adapter->hw;
   4255 	u32		gpie;
   4256 
   4257 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
   4258 
   4259 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   4260 		/* Enable Enhanced MSI-X mode */
   4261 		gpie |= IXGBE_GPIE_MSIX_MODE
   4262 		     |	IXGBE_GPIE_EIAME
   4263 		     |	IXGBE_GPIE_PBA_SUPPORT
   4264 		     |	IXGBE_GPIE_OCD;
   4265 	}
   4266 
   4267 	/* Fan Failure Interrupt */
   4268 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4269 		gpie |= IXGBE_SDP1_GPIEN;
   4270 
   4271 	/* Thermal Sensor Interrupt */
   4272 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
   4273 		gpie |= IXGBE_SDP0_GPIEN_X540;
   4274 
   4275 	/* Link detection */
   4276 	switch (hw->mac.type) {
   4277 	case ixgbe_mac_82599EB:
   4278 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
   4279 		break;
   4280 	case ixgbe_mac_X550EM_x:
   4281 	case ixgbe_mac_X550EM_a:
   4282 		gpie |= IXGBE_SDP0_GPIEN_X540;
   4283 		break;
   4284 	default:
   4285 		break;
   4286 	}
   4287 
   4288 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   4289 
   4290 } /* ixgbe_config_gpie */
   4291 
   4292 /************************************************************************
   4293  * ixgbe_config_delay_values
   4294  *
   4295  *   Requires adapter->max_frame_size to be set.
   4296  ************************************************************************/
   4297 static void
   4298 ixgbe_config_delay_values(struct adapter *adapter)
   4299 {
   4300 	struct ixgbe_hw *hw = &adapter->hw;
   4301 	u32		rxpb, frame, size, tmp;
   4302 
   4303 	frame = adapter->max_frame_size;
   4304 
   4305 	/* Calculate High Water */
   4306 	switch (hw->mac.type) {
   4307 	case ixgbe_mac_X540:
   4308 	case ixgbe_mac_X550:
   4309 	case ixgbe_mac_X550EM_x:
   4310 	case ixgbe_mac_X550EM_a:
   4311 		tmp = IXGBE_DV_X540(frame, frame);
   4312 		break;
   4313 	default:
   4314 		tmp = IXGBE_DV(frame, frame);
   4315 		break;
   4316 	}
   4317 	size = IXGBE_BT2KB(tmp);
   4318 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
   4319 	hw->fc.high_water[0] = rxpb - size;
   4320 
   4321 	/* Now calculate Low Water */
   4322 	switch (hw->mac.type) {
   4323 	case ixgbe_mac_X540:
   4324 	case ixgbe_mac_X550:
   4325 	case ixgbe_mac_X550EM_x:
   4326 	case ixgbe_mac_X550EM_a:
   4327 		tmp = IXGBE_LOW_DV_X540(frame);
   4328 		break;
   4329 	default:
   4330 		tmp = IXGBE_LOW_DV(frame);
   4331 		break;
   4332 	}
   4333 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
   4334 
   4335 	hw->fc.pause_time = IXGBE_FC_PAUSE;
   4336 	hw->fc.send_xon = TRUE;
   4337 } /* ixgbe_config_delay_values */
   4338 
   4339 /************************************************************************
   4340  * ixgbe_set_multi - Multicast Update
   4341  *
   4342  *   Called whenever multicast address list is updated.
   4343  ************************************************************************/
   4344 static void
   4345 ixgbe_set_multi(struct adapter *adapter)
   4346 {
   4347 	struct ixgbe_mc_addr	*mta;
   4348 	struct ifnet		*ifp = adapter->ifp;
   4349 	u8			*update_ptr;
   4350 	int			mcnt = 0;
   4351 	u32			fctrl;
   4352 	struct ethercom		*ec = &adapter->osdep.ec;
   4353 	struct ether_multi	*enm;
   4354 	struct ether_multistep	step;
   4355 
   4356 	KASSERT(mutex_owned(&adapter->core_mtx));
   4357 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   4358 
   4359 	mta = adapter->mta;
   4360 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
   4361 
   4362 	ifp->if_flags &= ~IFF_ALLMULTI;
   4363 	ETHER_LOCK(ec);
   4364 	ETHER_FIRST_MULTI(step, ec, enm);
   4365 	while (enm != NULL) {
   4366 		if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
   4367 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   4368 			ETHER_ADDR_LEN) != 0)) {
   4369 			ifp->if_flags |= IFF_ALLMULTI;
   4370 			break;
   4371 		}
   4372 		bcopy(enm->enm_addrlo,
   4373 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
   4374 		mta[mcnt].vmdq = adapter->pool;
   4375 		mcnt++;
   4376 		ETHER_NEXT_MULTI(step, enm);
   4377 	}
   4378 	ETHER_UNLOCK(ec);
   4379 
   4380 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   4381 	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4382 	if (ifp->if_flags & IFF_PROMISC)
   4383 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4384 	else if (ifp->if_flags & IFF_ALLMULTI) {
   4385 		fctrl |= IXGBE_FCTRL_MPE;
   4386 	}
   4387 
   4388 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   4389 
   4390 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
   4391 		update_ptr = (u8 *)mta;
   4392 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   4393 		    ixgbe_mc_array_itr, TRUE);
   4394 	}
   4395 
   4396 } /* ixgbe_set_multi */
   4397 
   4398 /************************************************************************
   4399  * ixgbe_mc_array_itr
   4400  *
   4401  *   An iterator function needed by the multicast shared code.
   4402  *   It feeds the shared code routine the addresses in the
   4403  *   array of ixgbe_set_multi() one by one.
   4404  ************************************************************************/
   4405 static u8 *
   4406 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   4407 {
   4408 	struct ixgbe_mc_addr *mta;
   4409 
   4410 	mta = (struct ixgbe_mc_addr *)*update_ptr;
   4411 	*vmdq = mta->vmdq;
   4412 
   4413 	*update_ptr = (u8*)(mta + 1);
   4414 
   4415 	return (mta->addr);
   4416 } /* ixgbe_mc_array_itr */
   4417 
   4418 /************************************************************************
   4419  * ixgbe_local_timer - Timer routine
   4420  *
   4421  *   Checks for link status, updates statistics,
   4422  *   and runs the watchdog check.
   4423  ************************************************************************/
   4424 static void
   4425 ixgbe_local_timer(void *arg)
   4426 {
   4427 	struct adapter *adapter = arg;
   4428 
   4429 	IXGBE_CORE_LOCK(adapter);
   4430 	ixgbe_local_timer1(adapter);
   4431 	IXGBE_CORE_UNLOCK(adapter);
   4432 }
   4433 
   4434 static void
   4435 ixgbe_local_timer1(void *arg)
   4436 {
   4437 	struct adapter	*adapter = arg;
   4438 	device_t	dev = adapter->dev;
   4439 	struct ix_queue *que = adapter->queues;
   4440 	u64		queues = 0;
   4441 	u64		v0, v1, v2, v3, v4, v5, v6, v7;
   4442 	int		hung = 0;
   4443 	int		i;
   4444 
   4445 	KASSERT(mutex_owned(&adapter->core_mtx));
   4446 
   4447 	/* Check for pluggable optics */
   4448 	if (adapter->sfp_probe)
   4449 		if (!ixgbe_sfp_probe(adapter))
   4450 			goto out; /* Nothing to do */
   4451 
   4452 	ixgbe_update_link_status(adapter);
   4453 	ixgbe_update_stats_counters(adapter);
   4454 
   4455 	/* Update some event counters */
   4456 	v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
   4457 	que = adapter->queues;
   4458 	for (i = 0; i < adapter->num_queues; i++, que++) {
   4459 		struct tx_ring	*txr = que->txr;
   4460 
   4461 		v0 += txr->q_efbig_tx_dma_setup;
   4462 		v1 += txr->q_mbuf_defrag_failed;
   4463 		v2 += txr->q_efbig2_tx_dma_setup;
   4464 		v3 += txr->q_einval_tx_dma_setup;
   4465 		v4 += txr->q_other_tx_dma_setup;
   4466 		v5 += txr->q_eagain_tx_dma_setup;
   4467 		v6 += txr->q_enomem_tx_dma_setup;
   4468 		v7 += txr->q_tso_err;
   4469 	}
   4470 	adapter->efbig_tx_dma_setup.ev_count = v0;
   4471 	adapter->mbuf_defrag_failed.ev_count = v1;
   4472 	adapter->efbig2_tx_dma_setup.ev_count = v2;
   4473 	adapter->einval_tx_dma_setup.ev_count = v3;
   4474 	adapter->other_tx_dma_setup.ev_count = v4;
   4475 	adapter->eagain_tx_dma_setup.ev_count = v5;
   4476 	adapter->enomem_tx_dma_setup.ev_count = v6;
   4477 	adapter->tso_err.ev_count = v7;
   4478 
   4479 	/*
   4480 	 * Check the TX queues status
   4481 	 *	- mark hung queues so we don't schedule on them
   4482 	 *	- watchdog only if all queues show hung
   4483 	 */
   4484 	que = adapter->queues;
   4485 	for (i = 0; i < adapter->num_queues; i++, que++) {
   4486 		/* Keep track of queues with work for soft irq */
   4487 		if (que->txr->busy)
   4488 			queues |= 1ULL << que->me;
   4489 		/*
   4490 		 * Each time txeof runs without cleaning, but there
   4491 		 * are uncleaned descriptors it increments busy. If
   4492 		 * we get to the MAX we declare it hung.
   4493 		 */
   4494 		if (que->busy == IXGBE_QUEUE_HUNG) {
   4495 			++hung;
   4496 			/* Mark the queue as inactive */
   4497 			adapter->active_queues &= ~(1ULL << que->me);
   4498 			continue;
   4499 		} else {
   4500 			/* Check if we've come back from hung */
   4501 			if ((adapter->active_queues & (1ULL << que->me)) == 0)
   4502 				adapter->active_queues |= 1ULL << que->me;
   4503 		}
   4504 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   4505 			device_printf(dev,
   4506 			    "Warning queue %d appears to be hung!\n", i);
   4507 			que->txr->busy = IXGBE_QUEUE_HUNG;
   4508 			++hung;
   4509 		}
   4510 	}
   4511 
   4512 	/* Only truely watchdog if all queues show hung */
   4513 	if (hung == adapter->num_queues)
   4514 		goto watchdog;
   4515 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
   4516 	else if (queues != 0) { /* Force an IRQ on queues with work */
   4517 		que = adapter->queues;
   4518 		for (i = 0; i < adapter->num_queues; i++, que++) {
   4519 			mutex_enter(&que->dc_mtx);
   4520 			if (que->disabled_count == 0)
   4521 				ixgbe_rearm_queues(adapter,
   4522 				    queues & ((u64)1 << i));
   4523 			mutex_exit(&que->dc_mtx);
   4524 		}
   4525 	}
   4526 #endif
   4527 
   4528 out:
   4529 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4530 	return;
   4531 
   4532 watchdog:
   4533 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   4534 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   4535 	adapter->watchdog_events.ev_count++;
   4536 	ixgbe_init_locked(adapter);
   4537 } /* ixgbe_local_timer */
   4538 
   4539 /************************************************************************
   4540  * ixgbe_recovery_mode_timer - Recovery mode timer routine
   4541  ************************************************************************/
   4542 static void
   4543 ixgbe_recovery_mode_timer(void *arg)
   4544 {
   4545 	struct adapter *adapter = arg;
   4546 	struct ixgbe_hw *hw = &adapter->hw;
   4547 
   4548 	IXGBE_CORE_LOCK(adapter);
   4549 	if (ixgbe_fw_recovery_mode(hw)) {
   4550 		if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
   4551 			/* Firmware error detected, entering recovery mode */
   4552 			device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
   4553 
   4554 			if (hw->adapter_stopped == FALSE)
   4555 				ixgbe_stop(adapter);
   4556 		}
   4557 	} else
   4558 		atomic_cas_uint(&adapter->recovery_mode, 1, 0);
   4559 
   4560 	callout_reset(&adapter->recovery_mode_timer, hz,
   4561 	    ixgbe_recovery_mode_timer, adapter);
   4562 	IXGBE_CORE_UNLOCK(adapter);
   4563 } /* ixgbe_recovery_mode_timer */
   4564 
   4565 /************************************************************************
   4566  * ixgbe_sfp_probe
   4567  *
   4568  *   Determine if a port had optics inserted.
   4569  ************************************************************************/
   4570 static bool
   4571 ixgbe_sfp_probe(struct adapter *adapter)
   4572 {
   4573 	struct ixgbe_hw	*hw = &adapter->hw;
   4574 	device_t	dev = adapter->dev;
   4575 	bool		result = FALSE;
   4576 
   4577 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4578 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4579 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4580 		if (ret)
   4581 			goto out;
   4582 		ret = hw->phy.ops.reset(hw);
   4583 		adapter->sfp_probe = FALSE;
   4584 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4585 			device_printf(dev,"Unsupported SFP+ module detected!");
   4586 			device_printf(dev,
   4587 			    "Reload driver with supported module.\n");
   4588 			goto out;
   4589 		} else
   4590 			device_printf(dev, "SFP+ module detected!\n");
   4591 		/* We now have supported optics */
   4592 		result = TRUE;
   4593 	}
   4594 out:
   4595 
   4596 	return (result);
   4597 } /* ixgbe_sfp_probe */
   4598 
   4599 /************************************************************************
   4600  * ixgbe_handle_mod - Tasklet for SFP module interrupts
   4601  ************************************************************************/
   4602 static void
   4603 ixgbe_handle_mod(void *context)
   4604 {
   4605 	struct adapter	*adapter = context;
   4606 	struct ixgbe_hw *hw = &adapter->hw;
   4607 	device_t	dev = adapter->dev;
   4608 	u32		err, cage_full = 0;
   4609 
   4610 	++adapter->mod_sicount.ev_count;
   4611 	if (adapter->hw.need_crosstalk_fix) {
   4612 		switch (hw->mac.type) {
   4613 		case ixgbe_mac_82599EB:
   4614 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4615 			    IXGBE_ESDP_SDP2;
   4616 			break;
   4617 		case ixgbe_mac_X550EM_x:
   4618 		case ixgbe_mac_X550EM_a:
   4619 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4620 			    IXGBE_ESDP_SDP0;
   4621 			break;
   4622 		default:
   4623 			break;
   4624 		}
   4625 
   4626 		if (!cage_full)
   4627 			return;
   4628 	}
   4629 
   4630 	err = hw->phy.ops.identify_sfp(hw);
   4631 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4632 		device_printf(dev,
   4633 		    "Unsupported SFP+ module type was detected.\n");
   4634 		return;
   4635 	}
   4636 
   4637 	if (hw->mac.type == ixgbe_mac_82598EB)
   4638 		err = hw->phy.ops.reset(hw);
   4639 	else
   4640 		err = hw->mac.ops.setup_sfp(hw);
   4641 
   4642 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4643 		device_printf(dev,
   4644 		    "Setup failure - unsupported SFP+ module type.\n");
   4645 		return;
   4646 	}
   4647 	softint_schedule(adapter->msf_si);
   4648 } /* ixgbe_handle_mod */
   4649 
   4650 
   4651 /************************************************************************
   4652  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
   4653  ************************************************************************/
   4654 static void
   4655 ixgbe_handle_msf(void *context)
   4656 {
   4657 	struct adapter	*adapter = context;
   4658 	struct ixgbe_hw *hw = &adapter->hw;
   4659 	u32		autoneg;
   4660 	bool		negotiate;
   4661 
   4662 	IXGBE_CORE_LOCK(adapter);
   4663 	++adapter->msf_sicount.ev_count;
   4664 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
   4665 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
   4666 
   4667 	autoneg = hw->phy.autoneg_advertised;
   4668 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   4669 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   4670 	else
   4671 		negotiate = 0;
   4672 	if (hw->mac.ops.setup_link)
   4673 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
   4674 
   4675 	/* Adjust media types shown in ifconfig */
   4676 	ifmedia_removeall(&adapter->media);
   4677 	ixgbe_add_media_types(adapter);
   4678 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   4679 	IXGBE_CORE_UNLOCK(adapter);
   4680 } /* ixgbe_handle_msf */
   4681 
   4682 /************************************************************************
   4683  * ixgbe_handle_phy - Tasklet for external PHY interrupts
   4684  ************************************************************************/
   4685 static void
   4686 ixgbe_handle_phy(void *context)
   4687 {
   4688 	struct adapter	*adapter = context;
   4689 	struct ixgbe_hw *hw = &adapter->hw;
   4690 	int error;
   4691 
   4692 	++adapter->phy_sicount.ev_count;
   4693 	error = hw->phy.ops.handle_lasi(hw);
   4694 	if (error == IXGBE_ERR_OVERTEMP)
   4695 		device_printf(adapter->dev,
   4696 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
   4697 		    " PHY will downshift to lower power state!\n");
   4698 	else if (error)
   4699 		device_printf(adapter->dev,
   4700 		    "Error handling LASI interrupt: %d\n", error);
   4701 } /* ixgbe_handle_phy */
   4702 
   4703 static void
   4704 ixgbe_ifstop(struct ifnet *ifp, int disable)
   4705 {
   4706 	struct adapter *adapter = ifp->if_softc;
   4707 
   4708 	IXGBE_CORE_LOCK(adapter);
   4709 	ixgbe_stop(adapter);
   4710 	IXGBE_CORE_UNLOCK(adapter);
   4711 }
   4712 
   4713 /************************************************************************
   4714  * ixgbe_stop - Stop the hardware
   4715  *
   4716  *   Disables all traffic on the adapter by issuing a
   4717  *   global reset on the MAC and deallocates TX/RX buffers.
   4718  ************************************************************************/
   4719 static void
   4720 ixgbe_stop(void *arg)
   4721 {
   4722 	struct ifnet	*ifp;
   4723 	struct adapter	*adapter = arg;
   4724 	struct ixgbe_hw *hw = &adapter->hw;
   4725 
   4726 	ifp = adapter->ifp;
   4727 
   4728 	KASSERT(mutex_owned(&adapter->core_mtx));
   4729 
   4730 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   4731 	ixgbe_disable_intr(adapter);
   4732 	callout_stop(&adapter->timer);
   4733 
   4734 	/* Let the stack know...*/
   4735 	ifp->if_flags &= ~IFF_RUNNING;
   4736 
   4737 	ixgbe_reset_hw(hw);
   4738 	hw->adapter_stopped = FALSE;
   4739 	ixgbe_stop_adapter(hw);
   4740 	if (hw->mac.type == ixgbe_mac_82599EB)
   4741 		ixgbe_stop_mac_link_on_d3_82599(hw);
   4742 	/* Turn off the laser - noop with no optics */
   4743 	ixgbe_disable_tx_laser(hw);
   4744 
   4745 	/* Update the stack */
   4746 	adapter->link_up = FALSE;
   4747 	ixgbe_update_link_status(adapter);
   4748 
   4749 	/* reprogram the RAR[0] in case user changed it. */
   4750 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   4751 
   4752 	return;
   4753 } /* ixgbe_stop */
   4754 
   4755 /************************************************************************
   4756  * ixgbe_update_link_status - Update OS on link state
   4757  *
   4758  * Note: Only updates the OS on the cached link state.
   4759  *	 The real check of the hardware only happens with
   4760  *	 a link interrupt.
   4761  ************************************************************************/
   4762 static void
   4763 ixgbe_update_link_status(struct adapter *adapter)
   4764 {
   4765 	struct ifnet	*ifp = adapter->ifp;
   4766 	device_t	dev = adapter->dev;
   4767 	struct ixgbe_hw *hw = &adapter->hw;
   4768 
   4769 	KASSERT(mutex_owned(&adapter->core_mtx));
   4770 
   4771 	if (adapter->link_up) {
   4772 		if (adapter->link_active != LINK_STATE_UP) {
   4773 			/*
   4774 			 * To eliminate influence of the previous state
   4775 			 * in the same way as ixgbe_init_locked().
   4776 			 */
   4777 			struct ix_queue	*que = adapter->queues;
   4778 			for (int i = 0; i < adapter->num_queues; i++, que++)
   4779 				que->eitr_setting = 0;
   4780 
   4781 			if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
   4782 				/*
   4783 				 *  Discard count for both MAC Local Fault and
   4784 				 * Remote Fault because those registers are
   4785 				 * valid only when the link speed is up and
   4786 				 * 10Gbps.
   4787 				 */
   4788 				IXGBE_READ_REG(hw, IXGBE_MLFC);
   4789 				IXGBE_READ_REG(hw, IXGBE_MRFC);
   4790 			}
   4791 
   4792 			if (bootverbose) {
   4793 				const char *bpsmsg;
   4794 
   4795 				switch (adapter->link_speed) {
   4796 				case IXGBE_LINK_SPEED_10GB_FULL:
   4797 					bpsmsg = "10 Gbps";
   4798 					break;
   4799 				case IXGBE_LINK_SPEED_5GB_FULL:
   4800 					bpsmsg = "5 Gbps";
   4801 					break;
   4802 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   4803 					bpsmsg = "2.5 Gbps";
   4804 					break;
   4805 				case IXGBE_LINK_SPEED_1GB_FULL:
   4806 					bpsmsg = "1 Gbps";
   4807 					break;
   4808 				case IXGBE_LINK_SPEED_100_FULL:
   4809 					bpsmsg = "100 Mbps";
   4810 					break;
   4811 				case IXGBE_LINK_SPEED_10_FULL:
   4812 					bpsmsg = "10 Mbps";
   4813 					break;
   4814 				default:
   4815 					bpsmsg = "unknown speed";
   4816 					break;
   4817 				}
   4818 				device_printf(dev, "Link is up %s %s \n",
   4819 				    bpsmsg, "Full Duplex");
   4820 			}
   4821 			adapter->link_active = LINK_STATE_UP;
   4822 			/* Update any Flow Control changes */
   4823 			ixgbe_fc_enable(&adapter->hw);
   4824 			/* Update DMA coalescing config */
   4825 			ixgbe_config_dmac(adapter);
   4826 			if_link_state_change(ifp, LINK_STATE_UP);
   4827 
   4828 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4829 				ixgbe_ping_all_vfs(adapter);
   4830 		}
   4831 	} else {
   4832 		/*
   4833 		 * Do it when link active changes to DOWN. i.e.
   4834 		 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
   4835 		 * b) LINK_STATE_UP	 -> LINK_STATE_DOWN
   4836 		 */
   4837 		if (adapter->link_active != LINK_STATE_DOWN) {
   4838 			if (bootverbose)
   4839 				device_printf(dev, "Link is Down\n");
   4840 			if_link_state_change(ifp, LINK_STATE_DOWN);
   4841 			adapter->link_active = LINK_STATE_DOWN;
   4842 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4843 				ixgbe_ping_all_vfs(adapter);
   4844 			ixgbe_drain_all(adapter);
   4845 		}
   4846 	}
   4847 } /* ixgbe_update_link_status */
   4848 
   4849 /************************************************************************
   4850  * ixgbe_config_dmac - Configure DMA Coalescing
   4851  ************************************************************************/
   4852 static void
   4853 ixgbe_config_dmac(struct adapter *adapter)
   4854 {
   4855 	struct ixgbe_hw *hw = &adapter->hw;
   4856 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
   4857 
   4858 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
   4859 		return;
   4860 
   4861 	if (dcfg->watchdog_timer ^ adapter->dmac ||
   4862 	    dcfg->link_speed ^ adapter->link_speed) {
   4863 		dcfg->watchdog_timer = adapter->dmac;
   4864 		dcfg->fcoe_en = false;
   4865 		dcfg->link_speed = adapter->link_speed;
   4866 		dcfg->num_tcs = 1;
   4867 
   4868 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
   4869 		    dcfg->watchdog_timer, dcfg->link_speed);
   4870 
   4871 		hw->mac.ops.dmac_config(hw);
   4872 	}
   4873 } /* ixgbe_config_dmac */
   4874 
   4875 /************************************************************************
   4876  * ixgbe_enable_intr
   4877  ************************************************************************/
   4878 static void
   4879 ixgbe_enable_intr(struct adapter *adapter)
   4880 {
   4881 	struct ixgbe_hw	*hw = &adapter->hw;
   4882 	struct ix_queue	*que = adapter->queues;
   4883 	u32		mask, fwsm;
   4884 
   4885 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4886 
   4887 	switch (adapter->hw.mac.type) {
   4888 	case ixgbe_mac_82599EB:
   4889 		mask |= IXGBE_EIMS_ECC;
   4890 		/* Temperature sensor on some adapters */
   4891 		mask |= IXGBE_EIMS_GPI_SDP0;
   4892 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
   4893 		mask |= IXGBE_EIMS_GPI_SDP1;
   4894 		mask |= IXGBE_EIMS_GPI_SDP2;
   4895 		break;
   4896 	case ixgbe_mac_X540:
   4897 		/* Detect if Thermal Sensor is enabled */
   4898 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
   4899 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
   4900 			mask |= IXGBE_EIMS_TS;
   4901 		mask |= IXGBE_EIMS_ECC;
   4902 		break;
   4903 	case ixgbe_mac_X550:
   4904 		/* MAC thermal sensor is automatically enabled */
   4905 		mask |= IXGBE_EIMS_TS;
   4906 		mask |= IXGBE_EIMS_ECC;
   4907 		break;
   4908 	case ixgbe_mac_X550EM_x:
   4909 	case ixgbe_mac_X550EM_a:
   4910 		/* Some devices use SDP0 for important information */
   4911 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
   4912 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
   4913 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
   4914 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
   4915 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
   4916 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
   4917 			mask |= IXGBE_EICR_GPI_SDP0_X540;
   4918 		mask |= IXGBE_EIMS_ECC;
   4919 		break;
   4920 	default:
   4921 		break;
   4922 	}
   4923 
   4924 	/* Enable Fan Failure detection */
   4925 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4926 		mask |= IXGBE_EIMS_GPI_SDP1;
   4927 	/* Enable SR-IOV */
   4928 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4929 		mask |= IXGBE_EIMS_MAILBOX;
   4930 	/* Enable Flow Director */
   4931 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   4932 		mask |= IXGBE_EIMS_FLOW_DIR;
   4933 
   4934 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4935 
   4936 	/* With MSI-X we use auto clear */
   4937 	if (adapter->msix_mem) {
   4938 		mask = IXGBE_EIMS_ENABLE_MASK;
   4939 		/* Don't autoclear Link */
   4940 		mask &= ~IXGBE_EIMS_OTHER;
   4941 		mask &= ~IXGBE_EIMS_LSC;
   4942 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   4943 			mask &= ~IXGBE_EIMS_MAILBOX;
   4944 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4945 	}
   4946 
   4947 	/*
   4948 	 * Now enable all queues, this is done separately to
   4949 	 * allow for handling the extended (beyond 32) MSI-X
   4950 	 * vectors that can be used by 82599
   4951 	 */
   4952 	for (int i = 0; i < adapter->num_queues; i++, que++)
   4953 		ixgbe_enable_queue(adapter, que->msix);
   4954 
   4955 	IXGBE_WRITE_FLUSH(hw);
   4956 
   4957 } /* ixgbe_enable_intr */
   4958 
   4959 /************************************************************************
   4960  * ixgbe_disable_intr_internal
   4961  ************************************************************************/
   4962 static void
   4963 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
   4964 {
   4965 	struct ix_queue	*que = adapter->queues;
   4966 
   4967 	/* disable interrupts other than queues */
   4968 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
   4969 
   4970 	if (adapter->msix_mem)
   4971 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4972 
   4973 	for (int i = 0; i < adapter->num_queues; i++, que++)
   4974 		ixgbe_disable_queue_internal(adapter, que->msix, nestok);
   4975 
   4976 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4977 
   4978 } /* ixgbe_do_disable_intr_internal */
   4979 
   4980 /************************************************************************
   4981  * ixgbe_disable_intr
   4982  ************************************************************************/
   4983 static void
   4984 ixgbe_disable_intr(struct adapter *adapter)
   4985 {
   4986 
   4987 	ixgbe_disable_intr_internal(adapter, true);
   4988 } /* ixgbe_disable_intr */
   4989 
   4990 /************************************************************************
   4991  * ixgbe_ensure_disabled_intr
   4992  ************************************************************************/
   4993 void
   4994 ixgbe_ensure_disabled_intr(struct adapter *adapter)
   4995 {
   4996 
   4997 	ixgbe_disable_intr_internal(adapter, false);
   4998 } /* ixgbe_ensure_disabled_intr */
   4999 
   5000 /************************************************************************
   5001  * ixgbe_legacy_irq - Legacy Interrupt Service routine
   5002  ************************************************************************/
   5003 static int
   5004 ixgbe_legacy_irq(void *arg)
   5005 {
   5006 	struct ix_queue *que = arg;
   5007 	struct adapter	*adapter = que->adapter;
   5008 	struct ixgbe_hw	*hw = &adapter->hw;
   5009 	struct ifnet	*ifp = adapter->ifp;
   5010 	struct		tx_ring *txr = adapter->tx_rings;
   5011 	bool		more = false;
   5012 	u32		eicr, eicr_mask;
   5013 
   5014 	/* Silicon errata #26 on 82598 */
   5015 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
   5016 
   5017 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   5018 
   5019 	adapter->stats.pf.legint.ev_count++;
   5020 	++que->irqs.ev_count;
   5021 	if (eicr == 0) {
   5022 		adapter->stats.pf.intzero.ev_count++;
   5023 		if ((ifp->if_flags & IFF_UP) != 0)
   5024 			ixgbe_enable_intr(adapter);
   5025 		return 0;
   5026 	}
   5027 
   5028 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   5029 		/*
   5030 		 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
   5031 		 */
   5032 		que->txrx_use_workqueue = adapter->txrx_use_workqueue;
   5033 
   5034 #ifdef __NetBSD__
   5035 		/* Don't run ixgbe_rxeof in interrupt context */
   5036 		more = true;
   5037 #else
   5038 		more = ixgbe_rxeof(que);
   5039 #endif
   5040 
   5041 		IXGBE_TX_LOCK(txr);
   5042 		ixgbe_txeof(txr);
   5043 #ifdef notyet
   5044 		if (!ixgbe_ring_empty(ifp, txr->br))
   5045 			ixgbe_start_locked(ifp, txr);
   5046 #endif
   5047 		IXGBE_TX_UNLOCK(txr);
   5048 	}
   5049 
   5050 	/* Check for fan failure */
   5051 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   5052 		ixgbe_check_fan_failure(adapter, eicr, true);
   5053 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   5054 	}
   5055 
   5056 	/* Link status change */
   5057 	if (eicr & IXGBE_EICR_LSC)
   5058 		softint_schedule(adapter->link_si);
   5059 
   5060 	if (ixgbe_is_sfp(hw)) {
   5061 		/* Pluggable optics-related interrupt */
   5062 		if (hw->mac.type >= ixgbe_mac_X540)
   5063 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   5064 		else
   5065 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   5066 
   5067 		if (eicr & eicr_mask) {
   5068 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   5069 			softint_schedule(adapter->mod_si);
   5070 		}
   5071 
   5072 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   5073 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   5074 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   5075 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   5076 			softint_schedule(adapter->msf_si);
   5077 		}
   5078 	}
   5079 
   5080 	/* External PHY interrupt */
   5081 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   5082 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
   5083 		softint_schedule(adapter->phy_si);
   5084 
   5085 	if (more) {
   5086 		que->req.ev_count++;
   5087 		ixgbe_sched_handle_que(adapter, que);
   5088 	} else
   5089 		ixgbe_enable_intr(adapter);
   5090 
   5091 	return 1;
   5092 } /* ixgbe_legacy_irq */
   5093 
   5094 /************************************************************************
   5095  * ixgbe_free_pciintr_resources
   5096  ************************************************************************/
   5097 static void
   5098 ixgbe_free_pciintr_resources(struct adapter *adapter)
   5099 {
   5100 	struct ix_queue *que = adapter->queues;
   5101 	int		rid;
   5102 
   5103 	/*
   5104 	 * Release all msix queue resources:
   5105 	 */
   5106 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   5107 		if (que->res != NULL) {
   5108 			pci_intr_disestablish(adapter->osdep.pc,
   5109 			    adapter->osdep.ihs[i]);
   5110 			adapter->osdep.ihs[i] = NULL;
   5111 		}
   5112 	}
   5113 
   5114 	/* Clean the Legacy or Link interrupt last */
   5115 	if (adapter->vector) /* we are doing MSIX */
   5116 		rid = adapter->vector;
   5117 	else
   5118 		rid = 0;
   5119 
   5120 	if (adapter->osdep.ihs[rid] != NULL) {
   5121 		pci_intr_disestablish(adapter->osdep.pc,
   5122 		    adapter->osdep.ihs[rid]);
   5123 		adapter->osdep.ihs[rid] = NULL;
   5124 	}
   5125 
   5126 	if (adapter->osdep.intrs != NULL) {
   5127 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   5128 		    adapter->osdep.nintrs);
   5129 		adapter->osdep.intrs = NULL;
   5130 	}
   5131 } /* ixgbe_free_pciintr_resources */
   5132 
   5133 /************************************************************************
   5134  * ixgbe_free_pci_resources
   5135  ************************************************************************/
   5136 static void
   5137 ixgbe_free_pci_resources(struct adapter *adapter)
   5138 {
   5139 
   5140 	ixgbe_free_pciintr_resources(adapter);
   5141 
   5142 	if (adapter->osdep.mem_size != 0) {
   5143 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   5144 		    adapter->osdep.mem_bus_space_handle,
   5145 		    adapter->osdep.mem_size);
   5146 	}
   5147 
   5148 } /* ixgbe_free_pci_resources */
   5149 
   5150 /************************************************************************
   5151  * ixgbe_set_sysctl_value
   5152  ************************************************************************/
   5153 static void
   5154 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
   5155     const char *description, int *limit, int value)
   5156 {
   5157 	device_t dev =	adapter->dev;
   5158 	struct sysctllog **log;
   5159 	const struct sysctlnode *rnode, *cnode;
   5160 
   5161 	/*
   5162 	 * It's not required to check recovery mode because this function never
   5163 	 * touches hardware.
   5164 	 */
   5165 
   5166 	log = &adapter->sysctllog;
   5167 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   5168 		aprint_error_dev(dev, "could not create sysctl root\n");
   5169 		return;
   5170 	}
   5171 	if (sysctl_createv(log, 0, &rnode, &cnode,
   5172 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   5173 	    name, SYSCTL_DESCR(description),
   5174 		NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   5175 		aprint_error_dev(dev, "could not create sysctl\n");
   5176 	*limit = value;
   5177 } /* ixgbe_set_sysctl_value */
   5178 
   5179 /************************************************************************
   5180  * ixgbe_sysctl_flowcntl
   5181  *
   5182  *   SYSCTL wrapper around setting Flow Control
   5183  ************************************************************************/
   5184 static int
   5185 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
   5186 {
   5187 	struct sysctlnode node = *rnode;
   5188 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5189 	int error, fc;
   5190 
   5191 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5192 		return (EPERM);
   5193 
   5194 	fc = adapter->hw.fc.current_mode;
   5195 	node.sysctl_data = &fc;
   5196 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5197 	if (error != 0 || newp == NULL)
   5198 		return error;
   5199 
   5200 	/* Don't bother if it's not changed */
   5201 	if (fc == adapter->hw.fc.current_mode)
   5202 		return (0);
   5203 
   5204 	return ixgbe_set_flowcntl(adapter, fc);
   5205 } /* ixgbe_sysctl_flowcntl */
   5206 
   5207 /************************************************************************
   5208  * ixgbe_set_flowcntl - Set flow control
   5209  *
   5210  *   Flow control values:
   5211  *     0 - off
   5212  *     1 - rx pause
   5213  *     2 - tx pause
   5214  *     3 - full
   5215  ************************************************************************/
   5216 static int
   5217 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
   5218 {
   5219 	switch (fc) {
   5220 		case ixgbe_fc_rx_pause:
   5221 		case ixgbe_fc_tx_pause:
   5222 		case ixgbe_fc_full:
   5223 			adapter->hw.fc.requested_mode = fc;
   5224 			if (adapter->num_queues > 1)
   5225 				ixgbe_disable_rx_drop(adapter);
   5226 			break;
   5227 		case ixgbe_fc_none:
   5228 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   5229 			if (adapter->num_queues > 1)
   5230 				ixgbe_enable_rx_drop(adapter);
   5231 			break;
   5232 		default:
   5233 			return (EINVAL);
   5234 	}
   5235 
   5236 #if 0 /* XXX NetBSD */
   5237 	/* Don't autoneg if forcing a value */
   5238 	adapter->hw.fc.disable_fc_autoneg = TRUE;
   5239 #endif
   5240 	ixgbe_fc_enable(&adapter->hw);
   5241 
   5242 	return (0);
   5243 } /* ixgbe_set_flowcntl */
   5244 
   5245 /************************************************************************
   5246  * ixgbe_enable_rx_drop
   5247  *
   5248  *   Enable the hardware to drop packets when the buffer is
   5249  *   full. This is useful with multiqueue, so that no single
   5250  *   queue being full stalls the entire RX engine. We only
   5251  *   enable this when Multiqueue is enabled AND Flow Control
   5252  *   is disabled.
   5253  ************************************************************************/
   5254 static void
   5255 ixgbe_enable_rx_drop(struct adapter *adapter)
   5256 {
   5257 	struct ixgbe_hw *hw = &adapter->hw;
   5258 	struct rx_ring	*rxr;
   5259 	u32		srrctl;
   5260 
   5261 	for (int i = 0; i < adapter->num_queues; i++) {
   5262 		rxr = &adapter->rx_rings[i];
   5263 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   5264 		srrctl |= IXGBE_SRRCTL_DROP_EN;
   5265 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   5266 	}
   5267 
   5268 	/* enable drop for each vf */
   5269 	for (int i = 0; i < adapter->num_vfs; i++) {
   5270 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   5271 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
   5272 		    IXGBE_QDE_ENABLE));
   5273 	}
   5274 } /* ixgbe_enable_rx_drop */
   5275 
   5276 /************************************************************************
   5277  * ixgbe_disable_rx_drop
   5278  ************************************************************************/
   5279 static void
   5280 ixgbe_disable_rx_drop(struct adapter *adapter)
   5281 {
   5282 	struct ixgbe_hw *hw = &adapter->hw;
   5283 	struct rx_ring	*rxr;
   5284 	u32		srrctl;
   5285 
   5286 	for (int i = 0; i < adapter->num_queues; i++) {
   5287 		rxr = &adapter->rx_rings[i];
   5288 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   5289 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
   5290 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   5291 	}
   5292 
   5293 	/* disable drop for each vf */
   5294 	for (int i = 0; i < adapter->num_vfs; i++) {
   5295 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   5296 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
   5297 	}
   5298 } /* ixgbe_disable_rx_drop */
   5299 
   5300 /************************************************************************
   5301  * ixgbe_sysctl_advertise
   5302  *
   5303  *   SYSCTL wrapper around setting advertised speed
   5304  ************************************************************************/
   5305 static int
   5306 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
   5307 {
   5308 	struct sysctlnode node = *rnode;
   5309 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5310 	int	       error = 0, advertise;
   5311 
   5312 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5313 		return (EPERM);
   5314 
   5315 	advertise = adapter->advertise;
   5316 	node.sysctl_data = &advertise;
   5317 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5318 	if (error != 0 || newp == NULL)
   5319 		return error;
   5320 
   5321 	return ixgbe_set_advertise(adapter, advertise);
   5322 } /* ixgbe_sysctl_advertise */
   5323 
   5324 /************************************************************************
   5325  * ixgbe_set_advertise - Control advertised link speed
   5326  *
   5327  *   Flags:
   5328  *     0x00 - Default (all capable link speed)
   5329  *     0x01 - advertise 100 Mb
   5330  *     0x02 - advertise 1G
   5331  *     0x04 - advertise 10G
   5332  *     0x08 - advertise 10 Mb
   5333  *     0x10 - advertise 2.5G
   5334  *     0x20 - advertise 5G
   5335  ************************************************************************/
   5336 static int
   5337 ixgbe_set_advertise(struct adapter *adapter, int advertise)
   5338 {
   5339 	device_t	 dev;
   5340 	struct ixgbe_hw	 *hw;
   5341 	ixgbe_link_speed speed = 0;
   5342 	ixgbe_link_speed link_caps = 0;
   5343 	s32		 err = IXGBE_NOT_IMPLEMENTED;
   5344 	bool		 negotiate = FALSE;
   5345 
   5346 	/* Checks to validate new value */
   5347 	if (adapter->advertise == advertise) /* no change */
   5348 		return (0);
   5349 
   5350 	dev = adapter->dev;
   5351 	hw = &adapter->hw;
   5352 
   5353 	/* No speed changes for backplane media */
   5354 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   5355 		return (ENODEV);
   5356 
   5357 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   5358 	    (hw->phy.multispeed_fiber))) {
   5359 		device_printf(dev,
   5360 		    "Advertised speed can only be set on copper or "
   5361 		    "multispeed fiber media types.\n");
   5362 		return (EINVAL);
   5363 	}
   5364 
   5365 	if (advertise < 0x0 || advertise > 0x2f) {
   5366 		device_printf(dev,
   5367 		    "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
   5368 		return (EINVAL);
   5369 	}
   5370 
   5371 	if (hw->mac.ops.get_link_capabilities) {
   5372 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   5373 		    &negotiate);
   5374 		if (err != IXGBE_SUCCESS) {
   5375 			device_printf(dev, "Unable to determine supported advertise speeds\n");
   5376 			return (ENODEV);
   5377 		}
   5378 	}
   5379 
   5380 	/* Set new value and report new advertised mode */
   5381 	if (advertise & 0x1) {
   5382 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
   5383 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
   5384 			return (EINVAL);
   5385 		}
   5386 		speed |= IXGBE_LINK_SPEED_100_FULL;
   5387 	}
   5388 	if (advertise & 0x2) {
   5389 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
   5390 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
   5391 			return (EINVAL);
   5392 		}
   5393 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   5394 	}
   5395 	if (advertise & 0x4) {
   5396 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
   5397 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
   5398 			return (EINVAL);
   5399 		}
   5400 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   5401 	}
   5402 	if (advertise & 0x8) {
   5403 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
   5404 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
   5405 			return (EINVAL);
   5406 		}
   5407 		speed |= IXGBE_LINK_SPEED_10_FULL;
   5408 	}
   5409 	if (advertise & 0x10) {
   5410 		if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
   5411 			device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
   5412 			return (EINVAL);
   5413 		}
   5414 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   5415 	}
   5416 	if (advertise & 0x20) {
   5417 		if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
   5418 			device_printf(dev, "Interface does not support 5Gb advertised speed\n");
   5419 			return (EINVAL);
   5420 		}
   5421 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   5422 	}
   5423 	if (advertise == 0)
   5424 		speed = link_caps; /* All capable link speed */
   5425 
   5426 	hw->mac.autotry_restart = TRUE;
   5427 	hw->mac.ops.setup_link(hw, speed, TRUE);
   5428 	adapter->advertise = advertise;
   5429 
   5430 	return (0);
   5431 } /* ixgbe_set_advertise */
   5432 
   5433 /************************************************************************
   5434  * ixgbe_get_advertise - Get current advertised speed settings
   5435  *
   5436  *   Formatted for sysctl usage.
   5437  *   Flags:
   5438  *     0x01 - advertise 100 Mb
   5439  *     0x02 - advertise 1G
   5440  *     0x04 - advertise 10G
   5441  *     0x08 - advertise 10 Mb (yes, Mb)
   5442  *     0x10 - advertise 2.5G
   5443  *     0x20 - advertise 5G
   5444  ************************************************************************/
   5445 static int
   5446 ixgbe_get_advertise(struct adapter *adapter)
   5447 {
   5448 	struct ixgbe_hw	 *hw = &adapter->hw;
   5449 	int		 speed;
   5450 	ixgbe_link_speed link_caps = 0;
   5451 	s32		 err;
   5452 	bool		 negotiate = FALSE;
   5453 
   5454 	/*
   5455 	 * Advertised speed means nothing unless it's copper or
   5456 	 * multi-speed fiber
   5457 	 */
   5458 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
   5459 	    !(hw->phy.multispeed_fiber))
   5460 		return (0);
   5461 
   5462 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
   5463 	if (err != IXGBE_SUCCESS)
   5464 		return (0);
   5465 
   5466 	speed =
   5467 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x04 : 0) |
   5468 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x02 : 0) |
   5469 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)   ? 0x01 : 0) |
   5470 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)    ? 0x08 : 0) |
   5471 	    ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
   5472 	    ((link_caps & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0);
   5473 
   5474 	return speed;
   5475 } /* ixgbe_get_advertise */
   5476 
   5477 /************************************************************************
   5478  * ixgbe_sysctl_dmac - Manage DMA Coalescing
   5479  *
   5480  *   Control values:
   5481  *     0/1 - off / on (use default value of 1000)
   5482  *
   5483  *     Legal timer values are:
   5484  *     50,100,250,500,1000,2000,5000,10000
   5485  *
   5486  *     Turning off interrupt moderation will also turn this off.
   5487  ************************************************************************/
   5488 static int
   5489 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
   5490 {
   5491 	struct sysctlnode node = *rnode;
   5492 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5493 	struct ifnet   *ifp = adapter->ifp;
   5494 	int	       error;
   5495 	int	       newval;
   5496 
   5497 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5498 		return (EPERM);
   5499 
   5500 	newval = adapter->dmac;
   5501 	node.sysctl_data = &newval;
   5502 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5503 	if ((error) || (newp == NULL))
   5504 		return (error);
   5505 
   5506 	switch (newval) {
   5507 	case 0:
   5508 		/* Disabled */
   5509 		adapter->dmac = 0;
   5510 		break;
   5511 	case 1:
   5512 		/* Enable and use default */
   5513 		adapter->dmac = 1000;
   5514 		break;
   5515 	case 50:
   5516 	case 100:
   5517 	case 250:
   5518 	case 500:
   5519 	case 1000:
   5520 	case 2000:
   5521 	case 5000:
   5522 	case 10000:
   5523 		/* Legal values - allow */
   5524 		adapter->dmac = newval;
   5525 		break;
   5526 	default:
   5527 		/* Do nothing, illegal value */
   5528 		return (EINVAL);
   5529 	}
   5530 
   5531 	/* Re-initialize hardware if it's already running */
   5532 	if (ifp->if_flags & IFF_RUNNING)
   5533 		ifp->if_init(ifp);
   5534 
   5535 	return (0);
   5536 }
   5537 
   5538 #ifdef IXGBE_DEBUG
   5539 /************************************************************************
   5540  * ixgbe_sysctl_power_state
   5541  *
   5542  *   Sysctl to test power states
   5543  *   Values:
   5544  *     0      - set device to D0
   5545  *     3      - set device to D3
   5546  *     (none) - get current device power state
   5547  ************************************************************************/
   5548 static int
   5549 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
   5550 {
   5551 #ifdef notyet
   5552 	struct sysctlnode node = *rnode;
   5553 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5554 	device_t       dev =  adapter->dev;
   5555 	int	       curr_ps, new_ps, error = 0;
   5556 
   5557 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5558 		return (EPERM);
   5559 
   5560 	curr_ps = new_ps = pci_get_powerstate(dev);
   5561 
   5562 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5563 	if ((error) || (req->newp == NULL))
   5564 		return (error);
   5565 
   5566 	if (new_ps == curr_ps)
   5567 		return (0);
   5568 
   5569 	if (new_ps == 3 && curr_ps == 0)
   5570 		error = DEVICE_SUSPEND(dev);
   5571 	else if (new_ps == 0 && curr_ps == 3)
   5572 		error = DEVICE_RESUME(dev);
   5573 	else
   5574 		return (EINVAL);
   5575 
   5576 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
   5577 
   5578 	return (error);
   5579 #else
   5580 	return 0;
   5581 #endif
   5582 } /* ixgbe_sysctl_power_state */
   5583 #endif
   5584 
   5585 /************************************************************************
   5586  * ixgbe_sysctl_wol_enable
   5587  *
   5588  *   Sysctl to enable/disable the WoL capability,
   5589  *   if supported by the adapter.
   5590  *
   5591  *   Values:
   5592  *     0 - disabled
   5593  *     1 - enabled
   5594  ************************************************************************/
   5595 static int
   5596 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
   5597 {
   5598 	struct sysctlnode node = *rnode;
   5599 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5600 	struct ixgbe_hw *hw = &adapter->hw;
   5601 	bool		new_wol_enabled;
   5602 	int		error = 0;
   5603 
   5604 	/*
   5605 	 * It's not required to check recovery mode because this function never
   5606 	 * touches hardware.
   5607 	 */
   5608 	new_wol_enabled = hw->wol_enabled;
   5609 	node.sysctl_data = &new_wol_enabled;
   5610 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5611 	if ((error) || (newp == NULL))
   5612 		return (error);
   5613 	if (new_wol_enabled == hw->wol_enabled)
   5614 		return (0);
   5615 
   5616 	if (new_wol_enabled && !adapter->wol_support)
   5617 		return (ENODEV);
   5618 	else
   5619 		hw->wol_enabled = new_wol_enabled;
   5620 
   5621 	return (0);
   5622 } /* ixgbe_sysctl_wol_enable */
   5623 
   5624 /************************************************************************
   5625  * ixgbe_sysctl_wufc - Wake Up Filter Control
   5626  *
   5627  *   Sysctl to enable/disable the types of packets that the
   5628  *   adapter will wake up on upon receipt.
   5629  *   Flags:
   5630  *     0x1  - Link Status Change
   5631  *     0x2  - Magic Packet
   5632  *     0x4  - Direct Exact
   5633  *     0x8  - Directed Multicast
   5634  *     0x10 - Broadcast
   5635  *     0x20 - ARP/IPv4 Request Packet
   5636  *     0x40 - Direct IPv4 Packet
   5637  *     0x80 - Direct IPv6 Packet
   5638  *
   5639  *   Settings not listed above will cause the sysctl to return an error.
   5640  ************************************************************************/
   5641 static int
   5642 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
   5643 {
   5644 	struct sysctlnode node = *rnode;
   5645 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5646 	int error = 0;
   5647 	u32 new_wufc;
   5648 
   5649 	/*
   5650 	 * It's not required to check recovery mode because this function never
   5651 	 * touches hardware.
   5652 	 */
   5653 	new_wufc = adapter->wufc;
   5654 	node.sysctl_data = &new_wufc;
   5655 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5656 	if ((error) || (newp == NULL))
   5657 		return (error);
   5658 	if (new_wufc == adapter->wufc)
   5659 		return (0);
   5660 
   5661 	if (new_wufc & 0xffffff00)
   5662 		return (EINVAL);
   5663 
   5664 	new_wufc &= 0xff;
   5665 	new_wufc |= (0xffffff & adapter->wufc);
   5666 	adapter->wufc = new_wufc;
   5667 
   5668 	return (0);
   5669 } /* ixgbe_sysctl_wufc */
   5670 
   5671 #ifdef IXGBE_DEBUG
   5672 /************************************************************************
   5673  * ixgbe_sysctl_print_rss_config
   5674  ************************************************************************/
   5675 static int
   5676 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
   5677 {
   5678 #ifdef notyet
   5679 	struct sysctlnode node = *rnode;
   5680 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5681 	struct ixgbe_hw *hw = &adapter->hw;
   5682 	device_t	dev = adapter->dev;
   5683 	struct sbuf	*buf;
   5684 	int		error = 0, reta_size;
   5685 	u32		reg;
   5686 
   5687 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5688 		return (EPERM);
   5689 
   5690 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
   5691 	if (!buf) {
   5692 		device_printf(dev, "Could not allocate sbuf for output.\n");
   5693 		return (ENOMEM);
   5694 	}
   5695 
   5696 	// TODO: use sbufs to make a string to print out
   5697 	/* Set multiplier for RETA setup and table size based on MAC */
   5698 	switch (adapter->hw.mac.type) {
   5699 	case ixgbe_mac_X550:
   5700 	case ixgbe_mac_X550EM_x:
   5701 	case ixgbe_mac_X550EM_a:
   5702 		reta_size = 128;
   5703 		break;
   5704 	default:
   5705 		reta_size = 32;
   5706 		break;
   5707 	}
   5708 
   5709 	/* Print out the redirection table */
   5710 	sbuf_cat(buf, "\n");
   5711 	for (int i = 0; i < reta_size; i++) {
   5712 		if (i < 32) {
   5713 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
   5714 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
   5715 		} else {
   5716 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
   5717 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
   5718 		}
   5719 	}
   5720 
   5721 	// TODO: print more config
   5722 
   5723 	error = sbuf_finish(buf);
   5724 	if (error)
   5725 		device_printf(dev, "Error finishing sbuf: %d\n", error);
   5726 
   5727 	sbuf_delete(buf);
   5728 #endif
   5729 	return (0);
   5730 } /* ixgbe_sysctl_print_rss_config */
   5731 #endif /* IXGBE_DEBUG */
   5732 
   5733 /************************************************************************
   5734  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
   5735  *
   5736  *   For X552/X557-AT devices using an external PHY
   5737  ************************************************************************/
   5738 static int
   5739 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
   5740 {
   5741 	struct sysctlnode node = *rnode;
   5742 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5743 	struct ixgbe_hw *hw = &adapter->hw;
   5744 	int val;
   5745 	u16 reg;
   5746 	int		error;
   5747 
   5748 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5749 		return (EPERM);
   5750 
   5751 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5752 		device_printf(adapter->dev,
   5753 		    "Device has no supported external thermal sensor.\n");
   5754 		return (ENODEV);
   5755 	}
   5756 
   5757 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
   5758 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5759 		device_printf(adapter->dev,
   5760 		    "Error reading from PHY's current temperature register\n");
   5761 		return (EAGAIN);
   5762 	}
   5763 
   5764 	node.sysctl_data = &val;
   5765 
   5766 	/* Shift temp for output */
   5767 	val = reg >> 8;
   5768 
   5769 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5770 	if ((error) || (newp == NULL))
   5771 		return (error);
   5772 
   5773 	return (0);
   5774 } /* ixgbe_sysctl_phy_temp */
   5775 
   5776 /************************************************************************
   5777  * ixgbe_sysctl_phy_overtemp_occurred
   5778  *
   5779  *   Reports (directly from the PHY) whether the current PHY
   5780  *   temperature is over the overtemp threshold.
   5781  ************************************************************************/
   5782 static int
   5783 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
   5784 {
   5785 	struct sysctlnode node = *rnode;
   5786 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5787 	struct ixgbe_hw *hw = &adapter->hw;
   5788 	int val, error;
   5789 	u16 reg;
   5790 
   5791 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5792 		return (EPERM);
   5793 
   5794 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5795 		device_printf(adapter->dev,
   5796 		    "Device has no supported external thermal sensor.\n");
   5797 		return (ENODEV);
   5798 	}
   5799 
   5800 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
   5801 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5802 		device_printf(adapter->dev,
   5803 		    "Error reading from PHY's temperature status register\n");
   5804 		return (EAGAIN);
   5805 	}
   5806 
   5807 	node.sysctl_data = &val;
   5808 
   5809 	/* Get occurrence bit */
   5810 	val = !!(reg & 0x4000);
   5811 
   5812 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5813 	if ((error) || (newp == NULL))
   5814 		return (error);
   5815 
   5816 	return (0);
   5817 } /* ixgbe_sysctl_phy_overtemp_occurred */
   5818 
   5819 /************************************************************************
   5820  * ixgbe_sysctl_eee_state
   5821  *
   5822  *   Sysctl to set EEE power saving feature
   5823  *   Values:
   5824  *     0      - disable EEE
   5825  *     1      - enable EEE
   5826  *     (none) - get current device EEE state
   5827  ************************************************************************/
   5828 static int
   5829 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
   5830 {
   5831 	struct sysctlnode node = *rnode;
   5832 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5833 	struct ifnet   *ifp = adapter->ifp;
   5834 	device_t       dev = adapter->dev;
   5835 	int	       curr_eee, new_eee, error = 0;
   5836 	s32	       retval;
   5837 
   5838 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5839 		return (EPERM);
   5840 
   5841 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
   5842 	node.sysctl_data = &new_eee;
   5843 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5844 	if ((error) || (newp == NULL))
   5845 		return (error);
   5846 
   5847 	/* Nothing to do */
   5848 	if (new_eee == curr_eee)
   5849 		return (0);
   5850 
   5851 	/* Not supported */
   5852 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
   5853 		return (EINVAL);
   5854 
   5855 	/* Bounds checking */
   5856 	if ((new_eee < 0) || (new_eee > 1))
   5857 		return (EINVAL);
   5858 
   5859 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
   5860 	if (retval) {
   5861 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
   5862 		return (EINVAL);
   5863 	}
   5864 
   5865 	/* Restart auto-neg */
   5866 	ifp->if_init(ifp);
   5867 
   5868 	device_printf(dev, "New EEE state: %d\n", new_eee);
   5869 
   5870 	/* Cache new value */
   5871 	if (new_eee)
   5872 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5873 	else
   5874 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
   5875 
   5876 	return (error);
   5877 } /* ixgbe_sysctl_eee_state */
   5878 
   5879 #define PRINTQS(adapter, regname)					\
   5880 	do {								\
   5881 		struct ixgbe_hw	*_hw = &(adapter)->hw;			\
   5882 		int _i;							\
   5883 									\
   5884 		printf("%s: %s", device_xname((adapter)->dev), #regname); \
   5885 		for (_i = 0; _i < (adapter)->num_queues; _i++) {	\
   5886 			printf((_i == 0) ? "\t" : " ");			\
   5887 			printf("%08x", IXGBE_READ_REG(_hw,		\
   5888 				IXGBE_##regname(_i)));			\
   5889 		}							\
   5890 		printf("\n");						\
   5891 	} while (0)
   5892 
   5893 /************************************************************************
   5894  * ixgbe_print_debug_info
   5895  *
   5896  *   Called only when em_display_debug_stats is enabled.
   5897  *   Provides a way to take a look at important statistics
   5898  *   maintained by the driver and hardware.
   5899  ************************************************************************/
   5900 static void
   5901 ixgbe_print_debug_info(struct adapter *adapter)
   5902 {
   5903 	device_t	dev = adapter->dev;
   5904 	struct ixgbe_hw *hw = &adapter->hw;
   5905 	int table_size;
   5906 	int i;
   5907 
   5908 	switch (adapter->hw.mac.type) {
   5909 	case ixgbe_mac_X550:
   5910 	case ixgbe_mac_X550EM_x:
   5911 	case ixgbe_mac_X550EM_a:
   5912 		table_size = 128;
   5913 		break;
   5914 	default:
   5915 		table_size = 32;
   5916 		break;
   5917 	}
   5918 
   5919 	device_printf(dev, "[E]RETA:\n");
   5920 	for (i = 0; i < table_size; i++) {
   5921 		if (i < 32)
   5922 			printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
   5923 				IXGBE_RETA(i)));
   5924 		else
   5925 			printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
   5926 				IXGBE_ERETA(i - 32)));
   5927 	}
   5928 
   5929 	device_printf(dev, "queue:");
   5930 	for (i = 0; i < adapter->num_queues; i++) {
   5931 		printf((i == 0) ? "\t" : " ");
   5932 		printf("%8d", i);
   5933 	}
   5934 	printf("\n");
   5935 	PRINTQS(adapter, RDBAL);
   5936 	PRINTQS(adapter, RDBAH);
   5937 	PRINTQS(adapter, RDLEN);
   5938 	PRINTQS(adapter, SRRCTL);
   5939 	PRINTQS(adapter, RDH);
   5940 	PRINTQS(adapter, RDT);
   5941 	PRINTQS(adapter, RXDCTL);
   5942 
   5943 	device_printf(dev, "RQSMR:");
   5944 	for (i = 0; i < adapter->num_queues / 4; i++) {
   5945 		printf((i == 0) ? "\t" : " ");
   5946 		printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
   5947 	}
   5948 	printf("\n");
   5949 
   5950 	device_printf(dev, "disabled_count:");
   5951 	for (i = 0; i < adapter->num_queues; i++) {
   5952 		printf((i == 0) ? "\t" : " ");
   5953 		printf("%8d", adapter->queues[i].disabled_count);
   5954 	}
   5955 	printf("\n");
   5956 
   5957 	device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
   5958 	if (hw->mac.type != ixgbe_mac_82598EB) {
   5959 		device_printf(dev, "EIMS_EX(0):\t%08x\n",
   5960 			      IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
   5961 		device_printf(dev, "EIMS_EX(1):\t%08x\n",
   5962 			      IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
   5963 	}
   5964 } /* ixgbe_print_debug_info */
   5965 
   5966 /************************************************************************
   5967  * ixgbe_sysctl_debug
   5968  ************************************************************************/
   5969 static int
   5970 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
   5971 {
   5972 	struct sysctlnode node = *rnode;
   5973 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5974 	int	       error, result = 0;
   5975 
   5976 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5977 		return (EPERM);
   5978 
   5979 	node.sysctl_data = &result;
   5980 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5981 
   5982 	if (error || newp == NULL)
   5983 		return error;
   5984 
   5985 	if (result == 1)
   5986 		ixgbe_print_debug_info(adapter);
   5987 
   5988 	return 0;
   5989 } /* ixgbe_sysctl_debug */
   5990 
   5991 /************************************************************************
   5992  * ixgbe_init_device_features
   5993  ************************************************************************/
   5994 static void
   5995 ixgbe_init_device_features(struct adapter *adapter)
   5996 {
   5997 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   5998 			  | IXGBE_FEATURE_RSS
   5999 			  | IXGBE_FEATURE_MSI
   6000 			  | IXGBE_FEATURE_MSIX
   6001 			  | IXGBE_FEATURE_LEGACY_IRQ
   6002 			  | IXGBE_FEATURE_LEGACY_TX;
   6003 
   6004 	/* Set capabilities first... */
   6005 	switch (adapter->hw.mac.type) {
   6006 	case ixgbe_mac_82598EB:
   6007 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
   6008 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
   6009 		break;
   6010 	case ixgbe_mac_X540:
   6011 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6012 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6013 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
   6014 		    (adapter->hw.bus.func == 0))
   6015 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   6016 		break;
   6017 	case ixgbe_mac_X550:
   6018 		/*
   6019 		 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
   6020 		 * NVM Image version.
   6021 		 */
   6022 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   6023 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6024 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6025 		break;
   6026 	case ixgbe_mac_X550EM_x:
   6027 		/*
   6028 		 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
   6029 		 * NVM Image version.
   6030 		 */
   6031 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6032 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6033 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
   6034 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   6035 		break;
   6036 	case ixgbe_mac_X550EM_a:
   6037 		/*
   6038 		 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
   6039 		 * NVM Image version.
   6040 		 */
   6041 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6042 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6043 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   6044 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
   6045 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
   6046 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   6047 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   6048 		}
   6049 		break;
   6050 	case ixgbe_mac_82599EB:
   6051 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6052 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6053 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
   6054 		    (adapter->hw.bus.func == 0))
   6055 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   6056 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
   6057 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   6058 		break;
   6059 	default:
   6060 		break;
   6061 	}
   6062 
   6063 	/* Enabled by default... */
   6064 	/* Fan failure detection */
   6065 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
   6066 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
   6067 	/* Netmap */
   6068 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   6069 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   6070 	/* EEE */
   6071 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   6072 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   6073 	/* Thermal Sensor */
   6074 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
   6075 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
   6076 	/*
   6077 	 * Recovery mode:
   6078 	 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
   6079 	 * NVM Image version.
   6080 	 */
   6081 
   6082 	/* Enabled via global sysctl... */
   6083 	/* Flow Director */
   6084 	if (ixgbe_enable_fdir) {
   6085 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
   6086 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
   6087 		else
   6088 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
   6089 	}
   6090 	/* Legacy (single queue) transmit */
   6091 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   6092 	    ixgbe_enable_legacy_tx)
   6093 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   6094 	/*
   6095 	 * Message Signal Interrupts - Extended (MSI-X)
   6096 	 * Normal MSI is only enabled if MSI-X calls fail.
   6097 	 */
   6098 	if (!ixgbe_enable_msix)
   6099 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
   6100 	/* Receive-Side Scaling (RSS) */
   6101 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
   6102 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   6103 
   6104 	/* Disable features with unmet dependencies... */
   6105 	/* No MSI-X */
   6106 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
   6107 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6108 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6109 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
   6110 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
   6111 	}
   6112 } /* ixgbe_init_device_features */
   6113 
   6114 /************************************************************************
   6115  * ixgbe_probe - Device identification routine
   6116  *
   6117  *   Determines if the driver should be loaded on
   6118  *   adapter based on its PCI vendor/device ID.
   6119  *
   6120  *   return BUS_PROBE_DEFAULT on success, positive on failure
   6121  ************************************************************************/
   6122 static int
   6123 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
   6124 {
   6125 	const struct pci_attach_args *pa = aux;
   6126 
   6127 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
   6128 }
   6129 
   6130 static const ixgbe_vendor_info_t *
   6131 ixgbe_lookup(const struct pci_attach_args *pa)
   6132 {
   6133 	const ixgbe_vendor_info_t *ent;
   6134 	pcireg_t subid;
   6135 
   6136 	INIT_DEBUGOUT("ixgbe_lookup: begin");
   6137 
   6138 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
   6139 		return NULL;
   6140 
   6141 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
   6142 
   6143 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
   6144 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
   6145 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
   6146 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
   6147 			(ent->subvendor_id == 0)) &&
   6148 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
   6149 			(ent->subdevice_id == 0))) {
   6150 			return ent;
   6151 		}
   6152 	}
   6153 	return NULL;
   6154 }
   6155 
   6156 static int
   6157 ixgbe_ifflags_cb(struct ethercom *ec)
   6158 {
   6159 	struct ifnet *ifp = &ec->ec_if;
   6160 	struct adapter *adapter = ifp->if_softc;
   6161 	int change, rv = 0;
   6162 
   6163 	IXGBE_CORE_LOCK(adapter);
   6164 
   6165 	change = ifp->if_flags ^ adapter->if_flags;
   6166 	if (change != 0)
   6167 		adapter->if_flags = ifp->if_flags;
   6168 
   6169 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   6170 		rv = ENETRESET;
   6171 		goto out;
   6172 	} else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   6173 		ixgbe_set_promisc(adapter);
   6174 
   6175 	/* Set up VLAN support and filter */
   6176 	ixgbe_setup_vlan_hw_support(adapter);
   6177 
   6178 out:
   6179 	IXGBE_CORE_UNLOCK(adapter);
   6180 
   6181 	return rv;
   6182 }
   6183 
   6184 /************************************************************************
   6185  * ixgbe_ioctl - Ioctl entry point
   6186  *
   6187  *   Called when the user wants to configure the interface.
   6188  *
   6189  *   return 0 on success, positive on failure
   6190  ************************************************************************/
   6191 static int
   6192 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   6193 {
   6194 	struct adapter	*adapter = ifp->if_softc;
   6195 	struct ixgbe_hw *hw = &adapter->hw;
   6196 	struct ifcapreq *ifcr = data;
   6197 	struct ifreq	*ifr = data;
   6198 	int		error = 0;
   6199 	int l4csum_en;
   6200 	const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
   6201 	     IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   6202 
   6203 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   6204 		return (EPERM);
   6205 
   6206 	switch (command) {
   6207 	case SIOCSIFFLAGS:
   6208 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   6209 		break;
   6210 	case SIOCADDMULTI:
   6211 	case SIOCDELMULTI:
   6212 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   6213 		break;
   6214 	case SIOCSIFMEDIA:
   6215 	case SIOCGIFMEDIA:
   6216 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   6217 		break;
   6218 	case SIOCSIFCAP:
   6219 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   6220 		break;
   6221 	case SIOCSIFMTU:
   6222 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   6223 		break;
   6224 #ifdef __NetBSD__
   6225 	case SIOCINITIFADDR:
   6226 		IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
   6227 		break;
   6228 	case SIOCGIFFLAGS:
   6229 		IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
   6230 		break;
   6231 	case SIOCGIFAFLAG_IN:
   6232 		IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
   6233 		break;
   6234 	case SIOCGIFADDR:
   6235 		IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
   6236 		break;
   6237 	case SIOCGIFMTU:
   6238 		IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
   6239 		break;
   6240 	case SIOCGIFCAP:
   6241 		IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
   6242 		break;
   6243 	case SIOCGETHERCAP:
   6244 		IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
   6245 		break;
   6246 	case SIOCGLIFADDR:
   6247 		IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
   6248 		break;
   6249 	case SIOCZIFDATA:
   6250 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   6251 		hw->mac.ops.clear_hw_cntrs(hw);
   6252 		ixgbe_clear_evcnt(adapter);
   6253 		break;
   6254 	case SIOCAIFADDR:
   6255 		IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
   6256 		break;
   6257 #endif
   6258 	default:
   6259 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   6260 		break;
   6261 	}
   6262 
   6263 	switch (command) {
   6264 	case SIOCSIFMEDIA:
   6265 	case SIOCGIFMEDIA:
   6266 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   6267 	case SIOCGI2C:
   6268 	{
   6269 		struct ixgbe_i2c_req	i2c;
   6270 
   6271 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
   6272 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
   6273 		if (error != 0)
   6274 			break;
   6275 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
   6276 			error = EINVAL;
   6277 			break;
   6278 		}
   6279 		if (i2c.len > sizeof(i2c.data)) {
   6280 			error = EINVAL;
   6281 			break;
   6282 		}
   6283 
   6284 		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
   6285 		    i2c.dev_addr, i2c.data);
   6286 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
   6287 		break;
   6288 	}
   6289 	case SIOCSIFCAP:
   6290 		/* Layer-4 Rx checksum offload has to be turned on and
   6291 		 * off as a unit.
   6292 		 */
   6293 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   6294 		if (l4csum_en != l4csum && l4csum_en != 0)
   6295 			return EINVAL;
   6296 		/*FALLTHROUGH*/
   6297 	case SIOCADDMULTI:
   6298 	case SIOCDELMULTI:
   6299 	case SIOCSIFFLAGS:
   6300 	case SIOCSIFMTU:
   6301 	default:
   6302 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   6303 			return error;
   6304 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   6305 			;
   6306 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   6307 			IXGBE_CORE_LOCK(adapter);
   6308 			if ((ifp->if_flags & IFF_RUNNING) != 0)
   6309 				ixgbe_init_locked(adapter);
   6310 			ixgbe_recalculate_max_frame(adapter);
   6311 			IXGBE_CORE_UNLOCK(adapter);
   6312 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   6313 			/*
   6314 			 * Multicast list has changed; set the hardware filter
   6315 			 * accordingly.
   6316 			 */
   6317 			IXGBE_CORE_LOCK(adapter);
   6318 			ixgbe_disable_intr(adapter);
   6319 			ixgbe_set_multi(adapter);
   6320 			ixgbe_enable_intr(adapter);
   6321 			IXGBE_CORE_UNLOCK(adapter);
   6322 		}
   6323 		return 0;
   6324 	}
   6325 
   6326 	return error;
   6327 } /* ixgbe_ioctl */
   6328 
   6329 /************************************************************************
   6330  * ixgbe_check_fan_failure
   6331  ************************************************************************/
   6332 static void
   6333 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
   6334 {
   6335 	u32 mask;
   6336 
   6337 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
   6338 	    IXGBE_ESDP_SDP1;
   6339 
   6340 	if (reg & mask)
   6341 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
   6342 } /* ixgbe_check_fan_failure */
   6343 
   6344 /************************************************************************
   6345  * ixgbe_handle_que
   6346  ************************************************************************/
   6347 static void
   6348 ixgbe_handle_que(void *context)
   6349 {
   6350 	struct ix_queue *que = context;
   6351 	struct adapter	*adapter = que->adapter;
   6352 	struct tx_ring	*txr = que->txr;
   6353 	struct ifnet	*ifp = adapter->ifp;
   6354 	bool		more = false;
   6355 
   6356 	que->handleq.ev_count++;
   6357 
   6358 	if (ifp->if_flags & IFF_RUNNING) {
   6359 		more = ixgbe_rxeof(que);
   6360 		IXGBE_TX_LOCK(txr);
   6361 		more |= ixgbe_txeof(txr);
   6362 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   6363 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   6364 				ixgbe_mq_start_locked(ifp, txr);
   6365 		/* Only for queue 0 */
   6366 		/* NetBSD still needs this for CBQ */
   6367 		if ((&adapter->queues[0] == que)
   6368 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   6369 			ixgbe_legacy_start_locked(ifp, txr);
   6370 		IXGBE_TX_UNLOCK(txr);
   6371 	}
   6372 
   6373 	if (more) {
   6374 		que->req.ev_count++;
   6375 		ixgbe_sched_handle_que(adapter, que);
   6376 	} else if (que->res != NULL) {
   6377 		/* Re-enable this interrupt */
   6378 		ixgbe_enable_queue(adapter, que->msix);
   6379 	} else
   6380 		ixgbe_enable_intr(adapter);
   6381 
   6382 	return;
   6383 } /* ixgbe_handle_que */
   6384 
   6385 /************************************************************************
   6386  * ixgbe_handle_que_work
   6387  ************************************************************************/
   6388 static void
   6389 ixgbe_handle_que_work(struct work *wk, void *context)
   6390 {
   6391 	struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
   6392 
   6393 	/*
   6394 	 * "enqueued flag" is not required here.
   6395 	 * See ixgbe_msix_que().
   6396 	 */
   6397 	ixgbe_handle_que(que);
   6398 }
   6399 
   6400 /************************************************************************
   6401  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
   6402  ************************************************************************/
   6403 static int
   6404 ixgbe_allocate_legacy(struct adapter *adapter,
   6405     const struct pci_attach_args *pa)
   6406 {
   6407 	device_t	dev = adapter->dev;
   6408 	struct ix_queue *que = adapter->queues;
   6409 	struct tx_ring	*txr = adapter->tx_rings;
   6410 	int		counts[PCI_INTR_TYPE_SIZE];
   6411 	pci_intr_type_t intr_type, max_type;
   6412 	char		intrbuf[PCI_INTRSTR_LEN];
   6413 	const char	*intrstr = NULL;
   6414 
   6415 	/* We allocate a single interrupt resource */
   6416 	max_type = PCI_INTR_TYPE_MSI;
   6417 	counts[PCI_INTR_TYPE_MSIX] = 0;
   6418 	counts[PCI_INTR_TYPE_MSI] =
   6419 	    (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
   6420 	/* Check not feat_en but feat_cap to fallback to INTx */
   6421 	counts[PCI_INTR_TYPE_INTX] =
   6422 	    (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
   6423 
   6424 alloc_retry:
   6425 	if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
   6426 		aprint_error_dev(dev, "couldn't alloc interrupt\n");
   6427 		return ENXIO;
   6428 	}
   6429 	adapter->osdep.nintrs = 1;
   6430 	intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
   6431 	    intrbuf, sizeof(intrbuf));
   6432 	adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
   6433 	    adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
   6434 	    device_xname(dev));
   6435 	intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
   6436 	if (adapter->osdep.ihs[0] == NULL) {
   6437 		aprint_error_dev(dev,"unable to establish %s\n",
   6438 		    (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6439 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   6440 		adapter->osdep.intrs = NULL;
   6441 		switch (intr_type) {
   6442 		case PCI_INTR_TYPE_MSI:
   6443 			/* The next try is for INTx: Disable MSI */
   6444 			max_type = PCI_INTR_TYPE_INTX;
   6445 			counts[PCI_INTR_TYPE_INTX] = 1;
   6446 			adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   6447 			if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
   6448 				adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6449 				goto alloc_retry;
   6450 			} else
   6451 				break;
   6452 		case PCI_INTR_TYPE_INTX:
   6453 		default:
   6454 			/* See below */
   6455 			break;
   6456 		}
   6457 	}
   6458 	if (intr_type == PCI_INTR_TYPE_INTX) {
   6459 		adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   6460 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6461 	}
   6462 	if (adapter->osdep.ihs[0] == NULL) {
   6463 		aprint_error_dev(dev,
   6464 		    "couldn't establish interrupt%s%s\n",
   6465 		    intrstr ? " at " : "", intrstr ? intrstr : "");
   6466 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   6467 		adapter->osdep.intrs = NULL;
   6468 		return ENXIO;
   6469 	}
   6470 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   6471 	/*
   6472 	 * Try allocating a fast interrupt and the associated deferred
   6473 	 * processing contexts.
   6474 	 */
   6475 	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   6476 		txr->txr_si =
   6477 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6478 			ixgbe_deferred_mq_start, txr);
   6479 	que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6480 	    ixgbe_handle_que, que);
   6481 
   6482 	if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
   6483 		& (txr->txr_si == NULL)) || (que->que_si == NULL)) {
   6484 		aprint_error_dev(dev,
   6485 		    "could not establish software interrupts\n");
   6486 
   6487 		return ENXIO;
   6488 	}
   6489 	/* For simplicity in the handlers */
   6490 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
   6491 
   6492 	return (0);
   6493 } /* ixgbe_allocate_legacy */
   6494 
   6495 /************************************************************************
   6496  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
   6497  ************************************************************************/
   6498 static int
   6499 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   6500 {
   6501 	device_t	dev = adapter->dev;
   6502 	struct		ix_queue *que = adapter->queues;
   6503 	struct		tx_ring *txr = adapter->tx_rings;
   6504 	pci_chipset_tag_t pc;
   6505 	char		intrbuf[PCI_INTRSTR_LEN];
   6506 	char		intr_xname[32];
   6507 	char		wqname[MAXCOMLEN];
   6508 	const char	*intrstr = NULL;
   6509 	int		error, vector = 0;
   6510 	int		cpu_id = 0;
   6511 	kcpuset_t	*affinity;
   6512 #ifdef RSS
   6513 	unsigned int	rss_buckets = 0;
   6514 	kcpuset_t	cpu_mask;
   6515 #endif
   6516 
   6517 	pc = adapter->osdep.pc;
   6518 #ifdef	RSS
   6519 	/*
   6520 	 * If we're doing RSS, the number of queues needs to
   6521 	 * match the number of RSS buckets that are configured.
   6522 	 *
   6523 	 * + If there's more queues than RSS buckets, we'll end
   6524 	 *   up with queues that get no traffic.
   6525 	 *
   6526 	 * + If there's more RSS buckets than queues, we'll end
   6527 	 *   up having multiple RSS buckets map to the same queue,
   6528 	 *   so there'll be some contention.
   6529 	 */
   6530 	rss_buckets = rss_getnumbuckets();
   6531 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
   6532 	    (adapter->num_queues != rss_buckets)) {
   6533 		device_printf(dev,
   6534 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
   6535 		    "; performance will be impacted.\n",
   6536 		    __func__, adapter->num_queues, rss_buckets);
   6537 	}
   6538 #endif
   6539 
   6540 	adapter->osdep.nintrs = adapter->num_queues + 1;
   6541 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   6542 	    adapter->osdep.nintrs) != 0) {
   6543 		aprint_error_dev(dev,
   6544 		    "failed to allocate MSI-X interrupt\n");
   6545 		return (ENXIO);
   6546 	}
   6547 
   6548 	kcpuset_create(&affinity, false);
   6549 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   6550 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   6551 		    device_xname(dev), i);
   6552 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   6553 		    sizeof(intrbuf));
   6554 #ifdef IXGBE_MPSAFE
   6555 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   6556 		    true);
   6557 #endif
   6558 		/* Set the handler function */
   6559 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   6560 		    adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
   6561 		    intr_xname);
   6562 		if (que->res == NULL) {
   6563 			aprint_error_dev(dev,
   6564 			    "Failed to register QUE handler\n");
   6565 			error = ENXIO;
   6566 			goto err_out;
   6567 		}
   6568 		que->msix = vector;
   6569 		adapter->active_queues |= 1ULL << que->msix;
   6570 
   6571 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   6572 #ifdef	RSS
   6573 			/*
   6574 			 * The queue ID is used as the RSS layer bucket ID.
   6575 			 * We look up the queue ID -> RSS CPU ID and select
   6576 			 * that.
   6577 			 */
   6578 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
   6579 			CPU_SETOF(cpu_id, &cpu_mask);
   6580 #endif
   6581 		} else {
   6582 			/*
   6583 			 * Bind the MSI-X vector, and thus the
   6584 			 * rings to the corresponding CPU.
   6585 			 *
   6586 			 * This just happens to match the default RSS
   6587 			 * round-robin bucket -> queue -> CPU allocation.
   6588 			 */
   6589 			if (adapter->num_queues > 1)
   6590 				cpu_id = i;
   6591 		}
   6592 		/* Round-robin affinity */
   6593 		kcpuset_zero(affinity);
   6594 		kcpuset_set(affinity, cpu_id % ncpu);
   6595 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   6596 		    NULL);
   6597 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   6598 		    intrstr);
   6599 		if (error == 0) {
   6600 #if 1 /* def IXGBE_DEBUG */
   6601 #ifdef	RSS
   6602 			aprintf_normal(", bound RSS bucket %d to CPU %d", i,
   6603 			    cpu_id % ncpu);
   6604 #else
   6605 			aprint_normal(", bound queue %d to cpu %d", i,
   6606 			    cpu_id % ncpu);
   6607 #endif
   6608 #endif /* IXGBE_DEBUG */
   6609 		}
   6610 		aprint_normal("\n");
   6611 
   6612 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   6613 			txr->txr_si = softint_establish(
   6614 				SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6615 				ixgbe_deferred_mq_start, txr);
   6616 			if (txr->txr_si == NULL) {
   6617 				aprint_error_dev(dev,
   6618 				    "couldn't establish software interrupt\n");
   6619 				error = ENXIO;
   6620 				goto err_out;
   6621 			}
   6622 		}
   6623 		que->que_si
   6624 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6625 			ixgbe_handle_que, que);
   6626 		if (que->que_si == NULL) {
   6627 			aprint_error_dev(dev,
   6628 			    "couldn't establish software interrupt\n");
   6629 			error = ENXIO;
   6630 			goto err_out;
   6631 		}
   6632 	}
   6633 	snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
   6634 	error = workqueue_create(&adapter->txr_wq, wqname,
   6635 	    ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   6636 	    IXGBE_WORKQUEUE_FLAGS);
   6637 	if (error) {
   6638 		aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
   6639 		goto err_out;
   6640 	}
   6641 	adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
   6642 
   6643 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
   6644 	error = workqueue_create(&adapter->que_wq, wqname,
   6645 	    ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   6646 	    IXGBE_WORKQUEUE_FLAGS);
   6647 	if (error) {
   6648 		aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
   6649 		goto err_out;
   6650 	}
   6651 
   6652 	/* and Link */
   6653 	cpu_id++;
   6654 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   6655 	adapter->vector = vector;
   6656 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   6657 	    sizeof(intrbuf));
   6658 #ifdef IXGBE_MPSAFE
   6659 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   6660 	    true);
   6661 #endif
   6662 	/* Set the link handler function */
   6663 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   6664 	    adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
   6665 	    intr_xname);
   6666 	if (adapter->osdep.ihs[vector] == NULL) {
   6667 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   6668 		error = ENXIO;
   6669 		goto err_out;
   6670 	}
   6671 	/* Round-robin affinity */
   6672 	kcpuset_zero(affinity);
   6673 	kcpuset_set(affinity, cpu_id % ncpu);
   6674 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
   6675 	    NULL);
   6676 
   6677 	aprint_normal_dev(dev,
   6678 	    "for link, interrupting at %s", intrstr);
   6679 	if (error == 0)
   6680 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   6681 	else
   6682 		aprint_normal("\n");
   6683 
   6684 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   6685 		adapter->mbx_si =
   6686 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6687 			ixgbe_handle_mbx, adapter);
   6688 		if (adapter->mbx_si == NULL) {
   6689 			aprint_error_dev(dev,
   6690 			    "could not establish software interrupts\n");
   6691 
   6692 			error = ENXIO;
   6693 			goto err_out;
   6694 		}
   6695 	}
   6696 
   6697 	kcpuset_destroy(affinity);
   6698 	aprint_normal_dev(dev,
   6699 	    "Using MSI-X interrupts with %d vectors\n", vector + 1);
   6700 
   6701 	return (0);
   6702 
   6703 err_out:
   6704 	kcpuset_destroy(affinity);
   6705 	ixgbe_free_softint(adapter);
   6706 	ixgbe_free_pciintr_resources(adapter);
   6707 	return (error);
   6708 } /* ixgbe_allocate_msix */
   6709 
   6710 /************************************************************************
   6711  * ixgbe_configure_interrupts
   6712  *
   6713  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
   6714  *   This will also depend on user settings.
   6715  ************************************************************************/
   6716 static int
   6717 ixgbe_configure_interrupts(struct adapter *adapter)
   6718 {
   6719 	device_t dev = adapter->dev;
   6720 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
   6721 	int want, queues, msgs;
   6722 
   6723 	/* Default to 1 queue if MSI-X setup fails */
   6724 	adapter->num_queues = 1;
   6725 
   6726 	/* Override by tuneable */
   6727 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
   6728 		goto msi;
   6729 
   6730 	/*
   6731 	 *  NetBSD only: Use single vector MSI when number of CPU is 1 to save
   6732 	 * interrupt slot.
   6733 	 */
   6734 	if (ncpu == 1)
   6735 		goto msi;
   6736 
   6737 	/* First try MSI-X */
   6738 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   6739 	msgs = MIN(msgs, IXG_MAX_NINTR);
   6740 	if (msgs < 2)
   6741 		goto msi;
   6742 
   6743 	adapter->msix_mem = (void *)1; /* XXX */
   6744 
   6745 	/* Figure out a reasonable auto config value */
   6746 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   6747 
   6748 #ifdef	RSS
   6749 	/* If we're doing RSS, clamp at the number of RSS buckets */
   6750 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   6751 		queues = min(queues, rss_getnumbuckets());
   6752 #endif
   6753 	if (ixgbe_num_queues > queues) {
   6754 		aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
   6755 		ixgbe_num_queues = queues;
   6756 	}
   6757 
   6758 	if (ixgbe_num_queues != 0)
   6759 		queues = ixgbe_num_queues;
   6760 	else
   6761 		queues = min(queues,
   6762 		    min(mac->max_tx_queues, mac->max_rx_queues));
   6763 
   6764 	/* reflect correct sysctl value */
   6765 	ixgbe_num_queues = queues;
   6766 
   6767 	/*
   6768 	 * Want one vector (RX/TX pair) per queue
   6769 	 * plus an additional for Link.
   6770 	 */
   6771 	want = queues + 1;
   6772 	if (msgs >= want)
   6773 		msgs = want;
   6774 	else {
   6775 		aprint_error_dev(dev, "MSI-X Configuration Problem, "
   6776 		    "%d vectors but %d queues wanted!\n",
   6777 		    msgs, want);
   6778 		goto msi;
   6779 	}
   6780 	adapter->num_queues = queues;
   6781 	adapter->feat_en |= IXGBE_FEATURE_MSIX;
   6782 	return (0);
   6783 
   6784 	/*
   6785 	 * MSI-X allocation failed or provided us with
   6786 	 * less vectors than needed. Free MSI-X resources
   6787 	 * and we'll try enabling MSI.
   6788 	 */
   6789 msi:
   6790 	/* Without MSI-X, some features are no longer supported */
   6791 	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6792 	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
   6793 	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6794 	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
   6795 
   6796 	msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
   6797 	adapter->msix_mem = NULL; /* XXX */
   6798 	if (msgs > 1)
   6799 		msgs = 1;
   6800 	if (msgs != 0) {
   6801 		msgs = 1;
   6802 		adapter->feat_en |= IXGBE_FEATURE_MSI;
   6803 		return (0);
   6804 	}
   6805 
   6806 	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
   6807 		aprint_error_dev(dev,
   6808 		    "Device does not support legacy interrupts.\n");
   6809 		return 1;
   6810 	}
   6811 
   6812 	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6813 
   6814 	return (0);
   6815 } /* ixgbe_configure_interrupts */
   6816 
   6817 
   6818 /************************************************************************
   6819  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
   6820  *
   6821  *   Done outside of interrupt context since the driver might sleep
   6822  ************************************************************************/
   6823 static void
   6824 ixgbe_handle_link(void *context)
   6825 {
   6826 	struct adapter	*adapter = context;
   6827 	struct ixgbe_hw *hw = &adapter->hw;
   6828 
   6829 	IXGBE_CORE_LOCK(adapter);
   6830 	++adapter->link_sicount.ev_count;
   6831 	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
   6832 	ixgbe_update_link_status(adapter);
   6833 
   6834 	/* Re-enable link interrupts */
   6835 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
   6836 
   6837 	IXGBE_CORE_UNLOCK(adapter);
   6838 } /* ixgbe_handle_link */
   6839 
   6840 /************************************************************************
   6841  * ixgbe_rearm_queues
   6842  ************************************************************************/
   6843 static __inline void
   6844 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   6845 {
   6846 	u32 mask;
   6847 
   6848 	switch (adapter->hw.mac.type) {
   6849 	case ixgbe_mac_82598EB:
   6850 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   6851 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   6852 		break;
   6853 	case ixgbe_mac_82599EB:
   6854 	case ixgbe_mac_X540:
   6855 	case ixgbe_mac_X550:
   6856 	case ixgbe_mac_X550EM_x:
   6857 	case ixgbe_mac_X550EM_a:
   6858 		mask = (queues & 0xFFFFFFFF);
   6859 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   6860 		mask = (queues >> 32);
   6861 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   6862 		break;
   6863 	default:
   6864 		break;
   6865 	}
   6866 } /* ixgbe_rearm_queues */
   6867