Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.199
      1 /* $NetBSD: ixgbe.c,v 1.199 2019/07/30 08:44:28 msaitoh Exp $ */
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
     36 
     37 /*
     38  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39  * All rights reserved.
     40  *
     41  * This code is derived from software contributed to The NetBSD Foundation
     42  * by Coyote Point Systems, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63  * POSSIBILITY OF SUCH DAMAGE.
     64  */
     65 
     66 #ifdef _KERNEL_OPT
     67 #include "opt_inet.h"
     68 #include "opt_inet6.h"
     69 #include "opt_net_mpsafe.h"
     70 #endif
     71 
     72 #include "ixgbe.h"
     73 #include "ixgbe_sriov.h"
     74 #include "vlan.h"
     75 
     76 #include <sys/cprng.h>
     77 #include <dev/mii/mii.h>
     78 #include <dev/mii/miivar.h>
     79 
     80 /************************************************************************
     81  * Driver version
     82  ************************************************************************/
     83 static const char ixgbe_driver_version[] = "4.0.1-k";
     84 /* XXX NetBSD: + 3.3.10 */
     85 
     86 /************************************************************************
     87  * PCI Device ID Table
     88  *
     89  *   Used by probe to select devices to load on
     90  *   Last field stores an index into ixgbe_strings
     91  *   Last entry must be all 0s
     92  *
     93  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     94  ************************************************************************/
     95 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     96 {
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    111 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    112 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    113 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
    114 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    115 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    116 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    117 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    118 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    119 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
    120 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    121 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
    122 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
    123 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
    124 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
    125 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
    126 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
    127 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
    128 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
    129 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
    130 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
    131 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
    132 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
    133 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
    134 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
    135 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
    136 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
    137 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
    138 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
    139 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
    140 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
    141 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
    142 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
    143 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
    144 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
    145 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
    146 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
    147 	/* required last entry */
    148 	{0, 0, 0, 0, 0}
    149 };
    150 
    151 /************************************************************************
    152  * Table of branding strings
    153  ************************************************************************/
    154 static const char    *ixgbe_strings[] = {
    155 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    156 };
    157 
    158 /************************************************************************
    159  * Function prototypes
    160  ************************************************************************/
    161 static int	ixgbe_probe(device_t, cfdata_t, void *);
    162 static void	ixgbe_attach(device_t, device_t, void *);
    163 static int	ixgbe_detach(device_t, int);
    164 #if 0
    165 static int	ixgbe_shutdown(device_t);
    166 #endif
    167 static bool	ixgbe_suspend(device_t, const pmf_qual_t *);
    168 static bool	ixgbe_resume(device_t, const pmf_qual_t *);
    169 static int	ixgbe_ifflags_cb(struct ethercom *);
    170 static int	ixgbe_ioctl(struct ifnet *, u_long, void *);
    171 static void	ixgbe_ifstop(struct ifnet *, int);
    172 static int	ixgbe_init(struct ifnet *);
    173 static void	ixgbe_init_locked(struct adapter *);
    174 static void	ixgbe_stop(void *);
    175 static void	ixgbe_init_device_features(struct adapter *);
    176 static void	ixgbe_check_fan_failure(struct adapter *, u32, bool);
    177 static void	ixgbe_add_media_types(struct adapter *);
    178 static void	ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    179 static int	ixgbe_media_change(struct ifnet *);
    180 static int	ixgbe_allocate_pci_resources(struct adapter *,
    181 		    const struct pci_attach_args *);
    182 static void	ixgbe_free_softint(struct adapter *);
    183 static void	ixgbe_get_slot_info(struct adapter *);
    184 static int	ixgbe_allocate_msix(struct adapter *,
    185 		    const struct pci_attach_args *);
    186 static int	ixgbe_allocate_legacy(struct adapter *,
    187 		    const struct pci_attach_args *);
    188 static int	ixgbe_configure_interrupts(struct adapter *);
    189 static void	ixgbe_free_pciintr_resources(struct adapter *);
    190 static void	ixgbe_free_pci_resources(struct adapter *);
    191 static void	ixgbe_local_timer(void *);
    192 static void	ixgbe_local_timer1(void *);
    193 static void	ixgbe_recovery_mode_timer(void *);
    194 static int	ixgbe_setup_interface(device_t, struct adapter *);
    195 static void	ixgbe_config_gpie(struct adapter *);
    196 static void	ixgbe_config_dmac(struct adapter *);
    197 static void	ixgbe_config_delay_values(struct adapter *);
    198 static void	ixgbe_config_link(struct adapter *);
    199 static void	ixgbe_check_wol_support(struct adapter *);
    200 static int	ixgbe_setup_low_power_mode(struct adapter *);
    201 #if 0
    202 static void	ixgbe_rearm_queues(struct adapter *, u64);
    203 #endif
    204 
    205 static void	ixgbe_initialize_transmit_units(struct adapter *);
    206 static void	ixgbe_initialize_receive_units(struct adapter *);
    207 static void	ixgbe_enable_rx_drop(struct adapter *);
    208 static void	ixgbe_disable_rx_drop(struct adapter *);
    209 static void	ixgbe_initialize_rss_mapping(struct adapter *);
    210 
    211 static void	ixgbe_enable_intr(struct adapter *);
    212 static void	ixgbe_disable_intr(struct adapter *);
    213 static void	ixgbe_update_stats_counters(struct adapter *);
    214 static void	ixgbe_set_promisc(struct adapter *);
    215 static void	ixgbe_set_multi(struct adapter *);
    216 static void	ixgbe_update_link_status(struct adapter *);
    217 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    218 static void	ixgbe_configure_ivars(struct adapter *);
    219 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    220 static void	ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
    221 
    222 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    223 static int	ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
    224 static int	ixgbe_register_vlan(void *, struct ifnet *, u16);
    225 static int	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
    226 
    227 static void	ixgbe_add_device_sysctls(struct adapter *);
    228 static void	ixgbe_add_hw_stats(struct adapter *);
    229 static void	ixgbe_clear_evcnt(struct adapter *);
    230 static int	ixgbe_set_flowcntl(struct adapter *, int);
    231 static int	ixgbe_set_advertise(struct adapter *, int);
    232 static int	ixgbe_get_advertise(struct adapter *);
    233 
    234 /* Sysctl handlers */
    235 static void	ixgbe_set_sysctl_value(struct adapter *, const char *,
    236 		     const char *, int *, int);
    237 static int	ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
    238 static int	ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
    239 static int	ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    240 static int	ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
    241 static int	ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
    242 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
    243 #ifdef IXGBE_DEBUG
    244 static int	ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
    245 static int	ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
    246 #endif
    247 static int	ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
    248 static int	ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
    249 static int	ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
    250 static int	ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
    251 static int	ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
    252 static int	ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
    253 static int	ixgbe_sysctl_debug(SYSCTLFN_PROTO);
    254 static int	ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
    255 static int	ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
    256 
    257 /* Support for pluggable optic modules */
    258 static bool	ixgbe_sfp_probe(struct adapter *);
    259 
    260 /* Legacy (single vector) interrupt handler */
    261 static int	ixgbe_legacy_irq(void *);
    262 
    263 /* The MSI/MSI-X Interrupt handlers */
    264 static int	ixgbe_msix_que(void *);
    265 static int	ixgbe_msix_link(void *);
    266 
    267 /* Software interrupts for deferred work */
    268 static void	ixgbe_handle_que(void *);
    269 static void	ixgbe_handle_link(void *);
    270 static void	ixgbe_handle_msf(void *);
    271 static void	ixgbe_handle_mod(void *);
    272 static void	ixgbe_handle_phy(void *);
    273 
    274 /* Workqueue handler for deferred work */
    275 static void	ixgbe_handle_que_work(struct work *, void *);
    276 
    277 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    278 
    279 /************************************************************************
    280  *  NetBSD Device Interface Entry Points
    281  ************************************************************************/
    282 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    283     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    284     DVF_DETACH_SHUTDOWN);
    285 
    286 #if 0
    287 devclass_t ix_devclass;
    288 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
    289 
    290 MODULE_DEPEND(ix, pci, 1, 1, 1);
    291 MODULE_DEPEND(ix, ether, 1, 1, 1);
    292 #ifdef DEV_NETMAP
    293 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    294 #endif
    295 #endif
    296 
    297 /*
    298  * TUNEABLE PARAMETERS:
    299  */
    300 
    301 /*
    302  * AIM: Adaptive Interrupt Moderation
    303  * which means that the interrupt rate
    304  * is varied over time based on the
    305  * traffic for that interrupt vector
    306  */
    307 static bool ixgbe_enable_aim = true;
    308 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
    309 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
    310     "Enable adaptive interrupt moderation");
    311 
    312 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    313 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
    314     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
    315 
    316 /* How many packets rxeof tries to clean at a time */
    317 static int ixgbe_rx_process_limit = 256;
    318 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
    319     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
    320 
    321 /* How many packets txeof tries to clean at a time */
    322 static int ixgbe_tx_process_limit = 256;
    323 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
    324     &ixgbe_tx_process_limit, 0,
    325     "Maximum number of sent packets to process at a time, -1 means unlimited");
    326 
    327 /* Flow control setting, default to full */
    328 static int ixgbe_flow_control = ixgbe_fc_full;
    329 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
    330     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
    331 
    332 /* Which packet processing uses workqueue or softint */
    333 static bool ixgbe_txrx_workqueue = false;
    334 
    335 /*
    336  * Smart speed setting, default to on
    337  * this only works as a compile option
    338  * right now as its during attach, set
    339  * this to 'ixgbe_smart_speed_off' to
    340  * disable.
    341  */
    342 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    343 
    344 /*
    345  * MSI-X should be the default for best performance,
    346  * but this allows it to be forced off for testing.
    347  */
    348 static int ixgbe_enable_msix = 1;
    349 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
    350     "Enable MSI-X interrupts");
    351 
    352 /*
    353  * Number of Queues, can be set to 0,
    354  * it then autoconfigures based on the
    355  * number of cpus with a max of 8. This
    356  * can be overriden manually here.
    357  */
    358 static int ixgbe_num_queues = 0;
    359 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
    360     "Number of queues to configure, 0 indicates autoconfigure");
    361 
    362 /*
    363  * Number of TX descriptors per ring,
    364  * setting higher than RX as this seems
    365  * the better performing choice.
    366  */
    367 static int ixgbe_txd = PERFORM_TXD;
    368 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
    369     "Number of transmit descriptors per queue");
    370 
    371 /* Number of RX descriptors per ring */
    372 static int ixgbe_rxd = PERFORM_RXD;
    373 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
    374     "Number of receive descriptors per queue");
    375 
    376 /*
    377  * Defining this on will allow the use
    378  * of unsupported SFP+ modules, note that
    379  * doing so you are on your own :)
    380  */
    381 static int allow_unsupported_sfp = false;
    382 #define TUNABLE_INT(__x, __y)
    383 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
    384 
    385 /*
    386  * Not sure if Flow Director is fully baked,
    387  * so we'll default to turning it off.
    388  */
    389 static int ixgbe_enable_fdir = 0;
    390 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
    391     "Enable Flow Director");
    392 
    393 /* Legacy Transmit (single queue) */
    394 static int ixgbe_enable_legacy_tx = 0;
    395 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
    396     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
    397 
    398 /* Receive-Side Scaling */
    399 static int ixgbe_enable_rss = 1;
    400 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
    401     "Enable Receive-Side Scaling (RSS)");
    402 
    403 #if 0
    404 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
    405 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
    406 #endif
    407 
    408 #ifdef NET_MPSAFE
    409 #define IXGBE_MPSAFE		1
    410 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    411 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    412 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    413 #else
    414 #define IXGBE_CALLOUT_FLAGS	0
    415 #define IXGBE_SOFTINFT_FLAGS	0
    416 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU
    417 #endif
    418 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
    419 
    420 /************************************************************************
    421  * ixgbe_initialize_rss_mapping
    422  ************************************************************************/
    423 static void
    424 ixgbe_initialize_rss_mapping(struct adapter *adapter)
    425 {
    426 	struct ixgbe_hw	*hw = &adapter->hw;
    427 	u32		reta = 0, mrqc, rss_key[10];
    428 	int		queue_id, table_size, index_mult;
    429 	int		i, j;
    430 	u32		rss_hash_config;
    431 
    432 	/* force use default RSS key. */
    433 #ifdef __NetBSD__
    434 	rss_getkey((uint8_t *) &rss_key);
    435 #else
    436 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    437 		/* Fetch the configured RSS key */
    438 		rss_getkey((uint8_t *) &rss_key);
    439 	} else {
    440 		/* set up random bits */
    441 		cprng_fast(&rss_key, sizeof(rss_key));
    442 	}
    443 #endif
    444 
    445 	/* Set multiplier for RETA setup and table size based on MAC */
    446 	index_mult = 0x1;
    447 	table_size = 128;
    448 	switch (adapter->hw.mac.type) {
    449 	case ixgbe_mac_82598EB:
    450 		index_mult = 0x11;
    451 		break;
    452 	case ixgbe_mac_X550:
    453 	case ixgbe_mac_X550EM_x:
    454 	case ixgbe_mac_X550EM_a:
    455 		table_size = 512;
    456 		break;
    457 	default:
    458 		break;
    459 	}
    460 
    461 	/* Set up the redirection table */
    462 	for (i = 0, j = 0; i < table_size; i++, j++) {
    463 		if (j == adapter->num_queues)
    464 			j = 0;
    465 
    466 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    467 			/*
    468 			 * Fetch the RSS bucket id for the given indirection
    469 			 * entry. Cap it at the number of configured buckets
    470 			 * (which is num_queues.)
    471 			 */
    472 			queue_id = rss_get_indirection_to_bucket(i);
    473 			queue_id = queue_id % adapter->num_queues;
    474 		} else
    475 			queue_id = (j * index_mult);
    476 
    477 		/*
    478 		 * The low 8 bits are for hash value (n+0);
    479 		 * The next 8 bits are for hash value (n+1), etc.
    480 		 */
    481 		reta = reta >> 8;
    482 		reta = reta | (((uint32_t) queue_id) << 24);
    483 		if ((i & 3) == 3) {
    484 			if (i < 128)
    485 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
    486 			else
    487 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
    488 				    reta);
    489 			reta = 0;
    490 		}
    491 	}
    492 
    493 	/* Now fill our hash function seeds */
    494 	for (i = 0; i < 10; i++)
    495 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
    496 
    497 	/* Perform hash on these packet types */
    498 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
    499 		rss_hash_config = rss_gethashconfig();
    500 	else {
    501 		/*
    502 		 * Disable UDP - IP fragments aren't currently being handled
    503 		 * and so we end up with a mix of 2-tuple and 4-tuple
    504 		 * traffic.
    505 		 */
    506 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
    507 				| RSS_HASHTYPE_RSS_TCP_IPV4
    508 				| RSS_HASHTYPE_RSS_IPV6
    509 				| RSS_HASHTYPE_RSS_TCP_IPV6
    510 				| RSS_HASHTYPE_RSS_IPV6_EX
    511 				| RSS_HASHTYPE_RSS_TCP_IPV6_EX;
    512 	}
    513 
    514 	mrqc = IXGBE_MRQC_RSSEN;
    515 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
    516 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
    517 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
    518 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
    519 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
    520 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
    521 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
    522 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
    523 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
    524 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
    525 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
    526 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
    527 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
    528 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
    529 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
    530 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
    531 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
    532 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
    533 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
    534 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    535 } /* ixgbe_initialize_rss_mapping */
    536 
    537 /************************************************************************
    538  * ixgbe_initialize_receive_units - Setup receive registers and features.
    539  ************************************************************************/
    540 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
    541 
    542 static void
    543 ixgbe_initialize_receive_units(struct adapter *adapter)
    544 {
    545 	struct	rx_ring	*rxr = adapter->rx_rings;
    546 	struct ixgbe_hw	*hw = &adapter->hw;
    547 	struct ifnet	*ifp = adapter->ifp;
    548 	int		i, j;
    549 	u32		bufsz, fctrl, srrctl, rxcsum;
    550 	u32		hlreg;
    551 
    552 	/*
    553 	 * Make sure receives are disabled while
    554 	 * setting up the descriptor ring
    555 	 */
    556 	ixgbe_disable_rx(hw);
    557 
    558 	/* Enable broadcasts */
    559 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
    560 	fctrl |= IXGBE_FCTRL_BAM;
    561 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
    562 		fctrl |= IXGBE_FCTRL_DPF;
    563 		fctrl |= IXGBE_FCTRL_PMCF;
    564 	}
    565 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
    566 
    567 	/* Set for Jumbo Frames? */
    568 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
    569 	if (ifp->if_mtu > ETHERMTU)
    570 		hlreg |= IXGBE_HLREG0_JUMBOEN;
    571 	else
    572 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
    573 
    574 #ifdef DEV_NETMAP
    575 	/* CRC stripping is conditional in Netmap */
    576 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
    577 	    (ifp->if_capenable & IFCAP_NETMAP) &&
    578 	    !ix_crcstrip)
    579 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
    580 	else
    581 #endif /* DEV_NETMAP */
    582 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
    583 
    584 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
    585 
    586 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
    587 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
    588 
    589 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
    590 		u64 rdba = rxr->rxdma.dma_paddr;
    591 		u32 reg;
    592 		int regnum = i / 4;	/* 1 register per 4 queues */
    593 		int regshift = i % 4;	/* 4 bits per 1 queue */
    594 		j = rxr->me;
    595 
    596 		/* Setup the Base and Length of the Rx Descriptor Ring */
    597 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
    598 		    (rdba & 0x00000000ffffffffULL));
    599 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
    600 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
    601 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
    602 
    603 		/* Set up the SRRCTL register */
    604 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
    605 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
    606 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
    607 		srrctl |= bufsz;
    608 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
    609 
    610 		/* Set RQSMR (Receive Queue Statistic Mapping) register */
    611 		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
    612 		reg &= ~(0x000000ffUL << (regshift * 8));
    613 		reg |= i << (regshift * 8);
    614 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
    615 
    616 		/*
    617 		 * Set DROP_EN iff we have no flow control and >1 queue.
    618 		 * Note that srrctl was cleared shortly before during reset,
    619 		 * so we do not need to clear the bit, but do it just in case
    620 		 * this code is moved elsewhere.
    621 		 */
    622 		if (adapter->num_queues > 1 &&
    623 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
    624 			srrctl |= IXGBE_SRRCTL_DROP_EN;
    625 		} else {
    626 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
    627 		}
    628 
    629 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
    630 
    631 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
    632 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
    633 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
    634 
    635 		/* Set the driver rx tail address */
    636 		rxr->tail =  IXGBE_RDT(rxr->me);
    637 	}
    638 
    639 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    640 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
    641 			    | IXGBE_PSRTYPE_UDPHDR
    642 			    | IXGBE_PSRTYPE_IPV4HDR
    643 			    | IXGBE_PSRTYPE_IPV6HDR;
    644 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
    645 	}
    646 
    647 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
    648 
    649 	ixgbe_initialize_rss_mapping(adapter);
    650 
    651 	if (adapter->num_queues > 1) {
    652 		/* RSS and RX IPP Checksum are mutually exclusive */
    653 		rxcsum |= IXGBE_RXCSUM_PCSD;
    654 	}
    655 
    656 	if (ifp->if_capenable & IFCAP_RXCSUM)
    657 		rxcsum |= IXGBE_RXCSUM_PCSD;
    658 
    659 	/* This is useful for calculating UDP/IP fragment checksums */
    660 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
    661 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
    662 
    663 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
    664 
    665 } /* ixgbe_initialize_receive_units */
    666 
    667 /************************************************************************
    668  * ixgbe_initialize_transmit_units - Enable transmit units.
    669  ************************************************************************/
    670 static void
    671 ixgbe_initialize_transmit_units(struct adapter *adapter)
    672 {
    673 	struct tx_ring	*txr = adapter->tx_rings;
    674 	struct ixgbe_hw	*hw = &adapter->hw;
    675 	int i;
    676 
    677 	/* Setup the Base and Length of the Tx Descriptor Ring */
    678 	for (i = 0; i < adapter->num_queues; i++, txr++) {
    679 		u64 tdba = txr->txdma.dma_paddr;
    680 		u32 txctrl = 0;
    681 		u32 tqsmreg, reg;
    682 		int regnum = i / 4;	/* 1 register per 4 queues */
    683 		int regshift = i % 4;	/* 4 bits per 1 queue */
    684 		int j = txr->me;
    685 
    686 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
    687 		    (tdba & 0x00000000ffffffffULL));
    688 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
    689 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
    690 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
    691 
    692 		/*
    693 		 * Set TQSMR (Transmit Queue Statistic Mapping) register.
    694 		 * Register location is different between 82598 and others.
    695 		 */
    696 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
    697 			tqsmreg = IXGBE_TQSMR(regnum);
    698 		else
    699 			tqsmreg = IXGBE_TQSM(regnum);
    700 		reg = IXGBE_READ_REG(hw, tqsmreg);
    701 		reg &= ~(0x000000ffUL << (regshift * 8));
    702 		reg |= i << (regshift * 8);
    703 		IXGBE_WRITE_REG(hw, tqsmreg, reg);
    704 
    705 		/* Setup the HW Tx Head and Tail descriptor pointers */
    706 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
    707 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
    708 
    709 		/* Cache the tail address */
    710 		txr->tail = IXGBE_TDT(j);
    711 
    712 		txr->txr_no_space = false;
    713 
    714 		/* Disable Head Writeback */
    715 		/*
    716 		 * Note: for X550 series devices, these registers are actually
    717 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
    718 		 * fields remain the same.
    719 		 */
    720 		switch (hw->mac.type) {
    721 		case ixgbe_mac_82598EB:
    722 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
    723 			break;
    724 		default:
    725 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
    726 			break;
    727 		}
    728 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
    729 		switch (hw->mac.type) {
    730 		case ixgbe_mac_82598EB:
    731 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
    732 			break;
    733 		default:
    734 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
    735 			break;
    736 		}
    737 
    738 	}
    739 
    740 	if (hw->mac.type != ixgbe_mac_82598EB) {
    741 		u32 dmatxctl, rttdcs;
    742 
    743 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
    744 		dmatxctl |= IXGBE_DMATXCTL_TE;
    745 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
    746 		/* Disable arbiter to set MTQC */
    747 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
    748 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
    749 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    750 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
    751 		    ixgbe_get_mtqc(adapter->iov_mode));
    752 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
    753 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    754 	}
    755 
    756 	return;
    757 } /* ixgbe_initialize_transmit_units */
    758 
    759 /************************************************************************
    760  * ixgbe_attach - Device initialization routine
    761  *
    762  *   Called when the driver is being loaded.
    763  *   Identifies the type of hardware, allocates all resources
    764  *   and initializes the hardware.
    765  *
    766  *   return 0 on success, positive on failure
    767  ************************************************************************/
    768 static void
    769 ixgbe_attach(device_t parent, device_t dev, void *aux)
    770 {
    771 	struct adapter	*adapter;
    772 	struct ixgbe_hw *hw;
    773 	int		error = -1;
    774 	u32		ctrl_ext;
    775 	u16		high, low, nvmreg;
    776 	pcireg_t	id, subid;
    777 	const ixgbe_vendor_info_t *ent;
    778 	struct pci_attach_args *pa = aux;
    779 	const char *str;
    780 	char buf[256];
    781 
    782 	INIT_DEBUGOUT("ixgbe_attach: begin");
    783 
    784 	/* Allocate, clear, and link in our adapter structure */
    785 	adapter = device_private(dev);
    786 	adapter->hw.back = adapter;
    787 	adapter->dev = dev;
    788 	hw = &adapter->hw;
    789 	adapter->osdep.pc = pa->pa_pc;
    790 	adapter->osdep.tag = pa->pa_tag;
    791 	if (pci_dma64_available(pa))
    792 		adapter->osdep.dmat = pa->pa_dmat64;
    793 	else
    794 		adapter->osdep.dmat = pa->pa_dmat;
    795 	adapter->osdep.attached = false;
    796 
    797 	ent = ixgbe_lookup(pa);
    798 
    799 	KASSERT(ent != NULL);
    800 
    801 	aprint_normal(": %s, Version - %s\n",
    802 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    803 
    804 	/* Core Lock Init*/
    805 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    806 
    807 	/* Set up the timer callout */
    808 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    809 
    810 	/* Determine hardware revision */
    811 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    812 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    813 
    814 	hw->vendor_id = PCI_VENDOR(id);
    815 	hw->device_id = PCI_PRODUCT(id);
    816 	hw->revision_id =
    817 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    818 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    819 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    820 
    821 	/*
    822 	 * Make sure BUSMASTER is set
    823 	 */
    824 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    825 
    826 	/* Do base PCI setup - map BAR0 */
    827 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    828 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    829 		error = ENXIO;
    830 		goto err_out;
    831 	}
    832 
    833 	/* let hardware know driver is loaded */
    834 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    835 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    836 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    837 
    838 	/*
    839 	 * Initialize the shared code
    840 	 */
    841 	if (ixgbe_init_shared_code(hw) != 0) {
    842 		aprint_error_dev(dev, "Unable to initialize the shared code\n");
    843 		error = ENXIO;
    844 		goto err_out;
    845 	}
    846 
    847 	switch (hw->mac.type) {
    848 	case ixgbe_mac_82598EB:
    849 		str = "82598EB";
    850 		break;
    851 	case ixgbe_mac_82599EB:
    852 		str = "82599EB";
    853 		break;
    854 	case ixgbe_mac_X540:
    855 		str = "X540";
    856 		break;
    857 	case ixgbe_mac_X550:
    858 		str = "X550";
    859 		break;
    860 	case ixgbe_mac_X550EM_x:
    861 		str = "X550EM";
    862 		break;
    863 	case ixgbe_mac_X550EM_a:
    864 		str = "X550EM A";
    865 		break;
    866 	default:
    867 		str = "Unknown";
    868 		break;
    869 	}
    870 	aprint_normal_dev(dev, "device %s\n", str);
    871 
    872 	if (hw->mbx.ops.init_params)
    873 		hw->mbx.ops.init_params(hw);
    874 
    875 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
    876 
    877 	/* Pick up the 82599 settings */
    878 	if (hw->mac.type != ixgbe_mac_82598EB) {
    879 		hw->phy.smart_speed = ixgbe_smart_speed;
    880 		adapter->num_segs = IXGBE_82599_SCATTER;
    881 	} else
    882 		adapter->num_segs = IXGBE_82598_SCATTER;
    883 
    884 	/* Ensure SW/FW semaphore is free */
    885 	ixgbe_init_swfw_semaphore(hw);
    886 
    887 	hw->mac.ops.set_lan_id(hw);
    888 	ixgbe_init_device_features(adapter);
    889 
    890 	if (ixgbe_configure_interrupts(adapter)) {
    891 		error = ENXIO;
    892 		goto err_out;
    893 	}
    894 
    895 	/* Allocate multicast array memory. */
    896 	adapter->mta = malloc(sizeof(*adapter->mta) *
    897 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    898 	if (adapter->mta == NULL) {
    899 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    900 		error = ENOMEM;
    901 		goto err_out;
    902 	}
    903 
    904 	/* Enable WoL (if supported) */
    905 	ixgbe_check_wol_support(adapter);
    906 
    907 	/* Register for VLAN events */
    908 	ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
    909 
    910 	/* Verify adapter fan is still functional (if applicable) */
    911 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
    912 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    913 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
    914 	}
    915 
    916 	/* Set an initial default flow control value */
    917 	hw->fc.requested_mode = ixgbe_flow_control;
    918 
    919 	/* Sysctls for limiting the amount of work done in the taskqueues */
    920 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
    921 	    "max number of rx packets to process",
    922 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
    923 
    924 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
    925 	    "max number of tx packets to process",
    926 	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
    927 
    928 	/* Do descriptor calc and sanity checks */
    929 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    930 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    931 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    932 		adapter->num_tx_desc = DEFAULT_TXD;
    933 	} else
    934 		adapter->num_tx_desc = ixgbe_txd;
    935 
    936 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    937 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
    938 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    939 		adapter->num_rx_desc = DEFAULT_RXD;
    940 	} else
    941 		adapter->num_rx_desc = ixgbe_rxd;
    942 
    943 	/* Allocate our TX/RX Queues */
    944 	if (ixgbe_allocate_queues(adapter)) {
    945 		error = ENOMEM;
    946 		goto err_out;
    947 	}
    948 
    949 	hw->phy.reset_if_overtemp = TRUE;
    950 	error = ixgbe_reset_hw(hw);
    951 	hw->phy.reset_if_overtemp = FALSE;
    952 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    953 		/*
    954 		 * No optics in this port, set up
    955 		 * so the timer routine will probe
    956 		 * for later insertion.
    957 		 */
    958 		adapter->sfp_probe = TRUE;
    959 		error = IXGBE_SUCCESS;
    960 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    961 		aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
    962 		error = EIO;
    963 		goto err_late;
    964 	} else if (error) {
    965 		aprint_error_dev(dev, "Hardware initialization failed\n");
    966 		error = EIO;
    967 		goto err_late;
    968 	}
    969 
    970 	/* Make sure we have a good EEPROM before we read from it */
    971 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
    972 		aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
    973 		error = EIO;
    974 		goto err_late;
    975 	}
    976 
    977 	aprint_normal("%s:", device_xname(dev));
    978 	/* NVM Image Version */
    979 	high = low = 0;
    980 	switch (hw->mac.type) {
    981 	case ixgbe_mac_X540:
    982 	case ixgbe_mac_X550EM_a:
    983 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    984 		if (nvmreg == 0xffff)
    985 			break;
    986 		high = (nvmreg >> 12) & 0x0f;
    987 		low = (nvmreg >> 4) & 0xff;
    988 		id = nvmreg & 0x0f;
    989 		aprint_normal(" NVM Image Version %u.", high);
    990 		if (hw->mac.type == ixgbe_mac_X540)
    991 			str = "%x";
    992 		else
    993 			str = "%02x";
    994 		aprint_normal(str, low);
    995 		aprint_normal(" ID 0x%x,", id);
    996 		break;
    997 	case ixgbe_mac_X550EM_x:
    998 	case ixgbe_mac_X550:
    999 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
   1000 		if (nvmreg == 0xffff)
   1001 			break;
   1002 		high = (nvmreg >> 12) & 0x0f;
   1003 		low = nvmreg & 0xff;
   1004 		aprint_normal(" NVM Image Version %u.%02x,", high, low);
   1005 		break;
   1006 	default:
   1007 		break;
   1008 	}
   1009 	hw->eeprom.nvm_image_ver_high = high;
   1010 	hw->eeprom.nvm_image_ver_low = low;
   1011 
   1012 	/* PHY firmware revision */
   1013 	switch (hw->mac.type) {
   1014 	case ixgbe_mac_X540:
   1015 	case ixgbe_mac_X550:
   1016 		hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
   1017 		if (nvmreg == 0xffff)
   1018 			break;
   1019 		high = (nvmreg >> 12) & 0x0f;
   1020 		low = (nvmreg >> 4) & 0xff;
   1021 		id = nvmreg & 0x000f;
   1022 		aprint_normal(" PHY FW Revision %u.", high);
   1023 		if (hw->mac.type == ixgbe_mac_X540)
   1024 			str = "%x";
   1025 		else
   1026 			str = "%02x";
   1027 		aprint_normal(str, low);
   1028 		aprint_normal(" ID 0x%x,", id);
   1029 		break;
   1030 	default:
   1031 		break;
   1032 	}
   1033 
   1034 	/* NVM Map version & OEM NVM Image version */
   1035 	switch (hw->mac.type) {
   1036 	case ixgbe_mac_X550:
   1037 	case ixgbe_mac_X550EM_x:
   1038 	case ixgbe_mac_X550EM_a:
   1039 		hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
   1040 		if (nvmreg != 0xffff) {
   1041 			high = (nvmreg >> 12) & 0x0f;
   1042 			low = nvmreg & 0x00ff;
   1043 			aprint_normal(" NVM Map version %u.%02x,", high, low);
   1044 		}
   1045 		hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
   1046 		if (nvmreg != 0xffff) {
   1047 			high = (nvmreg >> 12) & 0x0f;
   1048 			low = nvmreg & 0x00ff;
   1049 			aprint_verbose(" OEM NVM Image version %u.%02x,", high,
   1050 			    low);
   1051 		}
   1052 		break;
   1053 	default:
   1054 		break;
   1055 	}
   1056 
   1057 	/* Print the ETrackID */
   1058 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
   1059 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
   1060 	aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
   1061 
   1062 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   1063 		error = ixgbe_allocate_msix(adapter, pa);
   1064 		if (error) {
   1065 			/* Free allocated queue structures first */
   1066 			ixgbe_free_transmit_structures(adapter);
   1067 			ixgbe_free_receive_structures(adapter);
   1068 			free(adapter->queues, M_DEVBUF);
   1069 
   1070 			/* Fallback to legacy interrupt */
   1071 			adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
   1072 			if (adapter->feat_cap & IXGBE_FEATURE_MSI)
   1073 				adapter->feat_en |= IXGBE_FEATURE_MSI;
   1074 			adapter->num_queues = 1;
   1075 
   1076 			/* Allocate our TX/RX Queues again */
   1077 			if (ixgbe_allocate_queues(adapter)) {
   1078 				error = ENOMEM;
   1079 				goto err_out;
   1080 			}
   1081 		}
   1082 	}
   1083 	/* Recovery mode */
   1084 	switch (adapter->hw.mac.type) {
   1085 	case ixgbe_mac_X550:
   1086 	case ixgbe_mac_X550EM_x:
   1087 	case ixgbe_mac_X550EM_a:
   1088 		/* >= 2.00 */
   1089 		if (hw->eeprom.nvm_image_ver_high >= 2) {
   1090 			adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
   1091 			adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
   1092 		}
   1093 		break;
   1094 	default:
   1095 		break;
   1096 	}
   1097 
   1098 	if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
   1099 		error = ixgbe_allocate_legacy(adapter, pa);
   1100 	if (error)
   1101 		goto err_late;
   1102 
   1103 	/* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
   1104 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   1105 	    ixgbe_handle_link, adapter);
   1106 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1107 	    ixgbe_handle_mod, adapter);
   1108 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1109 	    ixgbe_handle_msf, adapter);
   1110 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1111 	    ixgbe_handle_phy, adapter);
   1112 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   1113 		adapter->fdir_si =
   1114 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1115 			ixgbe_reinit_fdir, adapter);
   1116 	if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
   1117 	    || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
   1118 	    || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
   1119 		&& (adapter->fdir_si == NULL))) {
   1120 		aprint_error_dev(dev,
   1121 		    "could not establish software interrupts ()\n");
   1122 		goto err_out;
   1123 	}
   1124 
   1125 	error = ixgbe_start_hw(hw);
   1126 	switch (error) {
   1127 	case IXGBE_ERR_EEPROM_VERSION:
   1128 		aprint_error_dev(dev, "This device is a pre-production adapter/"
   1129 		    "LOM.  Please be aware there may be issues associated "
   1130 		    "with your hardware.\nIf you are experiencing problems "
   1131 		    "please contact your Intel or hardware representative "
   1132 		    "who provided you with this hardware.\n");
   1133 		break;
   1134 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
   1135 		aprint_error_dev(dev, "Unsupported SFP+ Module\n");
   1136 		error = EIO;
   1137 		goto err_late;
   1138 	case IXGBE_ERR_SFP_NOT_PRESENT:
   1139 		aprint_error_dev(dev, "No SFP+ Module found\n");
   1140 		/* falls thru */
   1141 	default:
   1142 		break;
   1143 	}
   1144 
   1145 	/* Setup OS specific network interface */
   1146 	if (ixgbe_setup_interface(dev, adapter) != 0)
   1147 		goto err_late;
   1148 
   1149 	/*
   1150 	 *  Print PHY ID only for copper PHY. On device which has SFP(+) cage
   1151 	 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
   1152 	 */
   1153 	if (hw->phy.media_type == ixgbe_media_type_copper) {
   1154 		uint16_t id1, id2;
   1155 		int oui, model, rev;
   1156 		const char *descr;
   1157 
   1158 		id1 = hw->phy.id >> 16;
   1159 		id2 = hw->phy.id & 0xffff;
   1160 		oui = MII_OUI(id1, id2);
   1161 		model = MII_MODEL(id2);
   1162 		rev = MII_REV(id2);
   1163 		if ((descr = mii_get_descr(oui, model)) != NULL)
   1164 			aprint_normal_dev(dev,
   1165 			    "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
   1166 			    descr, oui, model, rev);
   1167 		else
   1168 			aprint_normal_dev(dev,
   1169 			    "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
   1170 			    oui, model, rev);
   1171 	}
   1172 
   1173 	/* Enable the optics for 82599 SFP+ fiber */
   1174 	ixgbe_enable_tx_laser(hw);
   1175 
   1176 	/* Enable EEE power saving */
   1177 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   1178 		hw->mac.ops.setup_eee(hw,
   1179 		    adapter->feat_en & IXGBE_FEATURE_EEE);
   1180 
   1181 	/* Enable power to the phy. */
   1182 	ixgbe_set_phy_power(hw, TRUE);
   1183 
   1184 	/* Initialize statistics */
   1185 	ixgbe_update_stats_counters(adapter);
   1186 
   1187 	/* Check PCIE slot type/speed/width */
   1188 	ixgbe_get_slot_info(adapter);
   1189 
   1190 	/*
   1191 	 * Do time init and sysctl init here, but
   1192 	 * only on the first port of a bypass adapter.
   1193 	 */
   1194 	ixgbe_bypass_init(adapter);
   1195 
   1196 	/* Set an initial dmac value */
   1197 	adapter->dmac = 0;
   1198 	/* Set initial advertised speeds (if applicable) */
   1199 	adapter->advertise = ixgbe_get_advertise(adapter);
   1200 
   1201 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   1202 		ixgbe_define_iov_schemas(dev, &error);
   1203 
   1204 	/* Add sysctls */
   1205 	ixgbe_add_device_sysctls(adapter);
   1206 	ixgbe_add_hw_stats(adapter);
   1207 
   1208 	/* For Netmap */
   1209 	adapter->init_locked = ixgbe_init_locked;
   1210 	adapter->stop_locked = ixgbe_stop;
   1211 
   1212 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1213 		ixgbe_netmap_attach(adapter);
   1214 
   1215 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
   1216 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
   1217 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
   1218 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
   1219 
   1220 	if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
   1221 		pmf_class_network_register(dev, adapter->ifp);
   1222 	else
   1223 		aprint_error_dev(dev, "couldn't establish power handler\n");
   1224 
   1225 	/* Init recovery mode timer and state variable */
   1226 	if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
   1227 		adapter->recovery_mode = 0;
   1228 
   1229 		/* Set up the timer callout */
   1230 		callout_init(&adapter->recovery_mode_timer,
   1231 		    IXGBE_CALLOUT_FLAGS);
   1232 
   1233 		/* Start the task */
   1234 		callout_reset(&adapter->recovery_mode_timer, hz,
   1235 		    ixgbe_recovery_mode_timer, adapter);
   1236 	}
   1237 
   1238 	INIT_DEBUGOUT("ixgbe_attach: end");
   1239 	adapter->osdep.attached = true;
   1240 
   1241 	return;
   1242 
   1243 err_late:
   1244 	ixgbe_free_transmit_structures(adapter);
   1245 	ixgbe_free_receive_structures(adapter);
   1246 	free(adapter->queues, M_DEVBUF);
   1247 err_out:
   1248 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   1249 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   1250 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   1251 	ixgbe_free_softint(adapter);
   1252 	ixgbe_free_pci_resources(adapter);
   1253 	if (adapter->mta != NULL)
   1254 		free(adapter->mta, M_DEVBUF);
   1255 	IXGBE_CORE_LOCK_DESTROY(adapter);
   1256 
   1257 	return;
   1258 } /* ixgbe_attach */
   1259 
   1260 /************************************************************************
   1261  * ixgbe_check_wol_support
   1262  *
   1263  *   Checks whether the adapter's ports are capable of
   1264  *   Wake On LAN by reading the adapter's NVM.
   1265  *
   1266  *   Sets each port's hw->wol_enabled value depending
   1267  *   on the value read here.
   1268  ************************************************************************/
   1269 static void
   1270 ixgbe_check_wol_support(struct adapter *adapter)
   1271 {
   1272 	struct ixgbe_hw *hw = &adapter->hw;
   1273 	u16		dev_caps = 0;
   1274 
   1275 	/* Find out WoL support for port */
   1276 	adapter->wol_support = hw->wol_enabled = 0;
   1277 	ixgbe_get_device_caps(hw, &dev_caps);
   1278 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
   1279 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
   1280 	     hw->bus.func == 0))
   1281 		adapter->wol_support = hw->wol_enabled = 1;
   1282 
   1283 	/* Save initial wake up filter configuration */
   1284 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
   1285 
   1286 	return;
   1287 } /* ixgbe_check_wol_support */
   1288 
   1289 /************************************************************************
   1290  * ixgbe_setup_interface
   1291  *
   1292  *   Setup networking device structure and register an interface.
   1293  ************************************************************************/
   1294 static int
   1295 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   1296 {
   1297 	struct ethercom *ec = &adapter->osdep.ec;
   1298 	struct ifnet   *ifp;
   1299 	int rv;
   1300 
   1301 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   1302 
   1303 	ifp = adapter->ifp = &ec->ec_if;
   1304 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1305 	ifp->if_baudrate = IF_Gbps(10);
   1306 	ifp->if_init = ixgbe_init;
   1307 	ifp->if_stop = ixgbe_ifstop;
   1308 	ifp->if_softc = adapter;
   1309 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1310 #ifdef IXGBE_MPSAFE
   1311 	ifp->if_extflags = IFEF_MPSAFE;
   1312 #endif
   1313 	ifp->if_ioctl = ixgbe_ioctl;
   1314 #if __FreeBSD_version >= 1100045
   1315 	/* TSO parameters */
   1316 	ifp->if_hw_tsomax = 65518;
   1317 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
   1318 	ifp->if_hw_tsomaxsegsize = 2048;
   1319 #endif
   1320 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1321 #if 0
   1322 		ixgbe_start_locked = ixgbe_legacy_start_locked;
   1323 #endif
   1324 	} else {
   1325 		ifp->if_transmit = ixgbe_mq_start;
   1326 #if 0
   1327 		ixgbe_start_locked = ixgbe_mq_start_locked;
   1328 #endif
   1329 	}
   1330 	ifp->if_start = ixgbe_legacy_start;
   1331 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1332 	IFQ_SET_READY(&ifp->if_snd);
   1333 
   1334 	rv = if_initialize(ifp);
   1335 	if (rv != 0) {
   1336 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1337 		return rv;
   1338 	}
   1339 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1340 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1341 	/*
   1342 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1343 	 * used.
   1344 	 */
   1345 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   1346 
   1347 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1348 
   1349 	/*
   1350 	 * Tell the upper layer(s) we support long frames.
   1351 	 */
   1352 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1353 
   1354 	/* Set capability flags */
   1355 	ifp->if_capabilities |= IFCAP_RXCSUM
   1356 			     |	IFCAP_TXCSUM
   1357 			     |	IFCAP_TSOv4
   1358 			     |	IFCAP_TSOv6;
   1359 	ifp->if_capenable = 0;
   1360 
   1361 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1362 			    |  ETHERCAP_VLAN_HWCSUM
   1363 			    |  ETHERCAP_JUMBO_MTU
   1364 			    |  ETHERCAP_VLAN_MTU;
   1365 
   1366 	/* Enable the above capabilities by default */
   1367 	ec->ec_capenable = ec->ec_capabilities;
   1368 
   1369 	/*
   1370 	 * Don't turn this on by default, if vlans are
   1371 	 * created on another pseudo device (eg. lagg)
   1372 	 * then vlan events are not passed thru, breaking
   1373 	 * operation, but with HW FILTER off it works. If
   1374 	 * using vlans directly on the ixgbe driver you can
   1375 	 * enable this and get full hardware tag filtering.
   1376 	 */
   1377 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1378 
   1379 	/*
   1380 	 * Specify the media types supported by this adapter and register
   1381 	 * callbacks to update media and link information
   1382 	 */
   1383 	ec->ec_ifmedia = &adapter->media;
   1384 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   1385 	    ixgbe_media_status);
   1386 
   1387 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
   1388 	ixgbe_add_media_types(adapter);
   1389 
   1390 	/* Set autoselect media by default */
   1391 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1392 
   1393 	if_register(ifp);
   1394 
   1395 	return (0);
   1396 } /* ixgbe_setup_interface */
   1397 
   1398 /************************************************************************
   1399  * ixgbe_add_media_types
   1400  ************************************************************************/
   1401 static void
   1402 ixgbe_add_media_types(struct adapter *adapter)
   1403 {
   1404 	struct ixgbe_hw *hw = &adapter->hw;
   1405 	device_t	dev = adapter->dev;
   1406 	u64		layer;
   1407 
   1408 	layer = adapter->phy_layer;
   1409 
   1410 #define	ADD(mm, dd)							\
   1411 	ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
   1412 
   1413 	ADD(IFM_NONE, 0);
   1414 
   1415 	/* Media types with matching NetBSD media defines */
   1416 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
   1417 		ADD(IFM_10G_T | IFM_FDX, 0);
   1418 	}
   1419 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
   1420 		ADD(IFM_1000_T | IFM_FDX, 0);
   1421 	}
   1422 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
   1423 		ADD(IFM_100_TX | IFM_FDX, 0);
   1424 	}
   1425 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
   1426 		ADD(IFM_10_T | IFM_FDX, 0);
   1427 	}
   1428 
   1429 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   1430 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
   1431 		ADD(IFM_10G_TWINAX | IFM_FDX, 0);
   1432 	}
   1433 
   1434 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
   1435 		ADD(IFM_10G_LR | IFM_FDX, 0);
   1436 		if (hw->phy.multispeed_fiber) {
   1437 			ADD(IFM_1000_LX | IFM_FDX, 0);
   1438 		}
   1439 	}
   1440 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
   1441 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1442 		if (hw->phy.multispeed_fiber) {
   1443 			ADD(IFM_1000_SX | IFM_FDX, 0);
   1444 		}
   1445 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
   1446 		ADD(IFM_1000_SX | IFM_FDX, 0);
   1447 	}
   1448 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
   1449 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1450 	}
   1451 
   1452 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1453 		ADD(IFM_10G_KR | IFM_FDX, 0);
   1454 	}
   1455 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1456 		ADD(IFM_10G_KX4 | IFM_FDX, 0);
   1457 	}
   1458 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
   1459 		ADD(IFM_1000_KX | IFM_FDX, 0);
   1460 	}
   1461 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
   1462 		ADD(IFM_2500_KX | IFM_FDX, 0);
   1463 	}
   1464 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
   1465 		ADD(IFM_2500_T | IFM_FDX, 0);
   1466 	}
   1467 	if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
   1468 		ADD(IFM_5000_T | IFM_FDX, 0);
   1469 	}
   1470 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
   1471 		device_printf(dev, "Media supported: 1000baseBX\n");
   1472 	/* XXX no ifmedia_set? */
   1473 
   1474 	ADD(IFM_AUTO, 0);
   1475 
   1476 #undef ADD
   1477 } /* ixgbe_add_media_types */
   1478 
   1479 /************************************************************************
   1480  * ixgbe_is_sfp
   1481  ************************************************************************/
   1482 static inline bool
   1483 ixgbe_is_sfp(struct ixgbe_hw *hw)
   1484 {
   1485 	switch (hw->mac.type) {
   1486 	case ixgbe_mac_82598EB:
   1487 		if (hw->phy.type == ixgbe_phy_nl)
   1488 			return (TRUE);
   1489 		return (FALSE);
   1490 	case ixgbe_mac_82599EB:
   1491 		switch (hw->mac.ops.get_media_type(hw)) {
   1492 		case ixgbe_media_type_fiber:
   1493 		case ixgbe_media_type_fiber_qsfp:
   1494 			return (TRUE);
   1495 		default:
   1496 			return (FALSE);
   1497 		}
   1498 	case ixgbe_mac_X550EM_x:
   1499 	case ixgbe_mac_X550EM_a:
   1500 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
   1501 			return (TRUE);
   1502 		return (FALSE);
   1503 	default:
   1504 		return (FALSE);
   1505 	}
   1506 } /* ixgbe_is_sfp */
   1507 
   1508 /************************************************************************
   1509  * ixgbe_config_link
   1510  ************************************************************************/
   1511 static void
   1512 ixgbe_config_link(struct adapter *adapter)
   1513 {
   1514 	struct ixgbe_hw *hw = &adapter->hw;
   1515 	u32		autoneg, err = 0;
   1516 	bool		sfp, negotiate = false;
   1517 
   1518 	sfp = ixgbe_is_sfp(hw);
   1519 
   1520 	if (sfp) {
   1521 		if (hw->phy.multispeed_fiber) {
   1522 			ixgbe_enable_tx_laser(hw);
   1523 			kpreempt_disable();
   1524 			softint_schedule(adapter->msf_si);
   1525 			kpreempt_enable();
   1526 		}
   1527 		kpreempt_disable();
   1528 		softint_schedule(adapter->mod_si);
   1529 		kpreempt_enable();
   1530 	} else {
   1531 		struct ifmedia	*ifm = &adapter->media;
   1532 
   1533 		if (hw->mac.ops.check_link)
   1534 			err = ixgbe_check_link(hw, &adapter->link_speed,
   1535 			    &adapter->link_up, FALSE);
   1536 		if (err)
   1537 			return;
   1538 
   1539 		/*
   1540 		 * Check if it's the first call. If it's the first call,
   1541 		 * get value for auto negotiation.
   1542 		 */
   1543 		autoneg = hw->phy.autoneg_advertised;
   1544 		if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
   1545 		    && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
   1546 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
   1547 			    &negotiate);
   1548 		if (err)
   1549 			return;
   1550 		if (hw->mac.ops.setup_link)
   1551 			err = hw->mac.ops.setup_link(hw, autoneg,
   1552 			    adapter->link_up);
   1553 	}
   1554 
   1555 } /* ixgbe_config_link */
   1556 
   1557 /************************************************************************
   1558  * ixgbe_update_stats_counters - Update board statistics counters.
   1559  ************************************************************************/
   1560 static void
   1561 ixgbe_update_stats_counters(struct adapter *adapter)
   1562 {
   1563 	struct ifnet	      *ifp = adapter->ifp;
   1564 	struct ixgbe_hw	      *hw = &adapter->hw;
   1565 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1566 	u32		      missed_rx = 0, bprc, lxon, lxoff, total;
   1567 	u64		      total_missed_rx = 0;
   1568 	uint64_t	      crcerrs, rlec;
   1569 	unsigned int	      queue_counters;
   1570 	int		      i;
   1571 
   1572 	crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   1573 	stats->crcerrs.ev_count += crcerrs;
   1574 	stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   1575 	stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   1576 	stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   1577 	if (hw->mac.type == ixgbe_mac_X550)
   1578 		stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
   1579 
   1580 	/* 16 registers exist */
   1581 	queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
   1582 	for (i = 0; i < queue_counters; i++) {
   1583 		stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   1584 		stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   1585 		if (hw->mac.type >= ixgbe_mac_82599EB) {
   1586 			stats->qprdc[i].ev_count
   1587 			    += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   1588 		}
   1589 	}
   1590 
   1591 	/* 8 registers exist */
   1592 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   1593 		uint32_t mp;
   1594 
   1595 		/* MPC */
   1596 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   1597 		/* global total per queue */
   1598 		stats->mpc[i].ev_count += mp;
   1599 		/* running comprehensive total for stats display */
   1600 		total_missed_rx += mp;
   1601 
   1602 		if (hw->mac.type == ixgbe_mac_82598EB)
   1603 			stats->rnbc[i].ev_count
   1604 			    += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   1605 
   1606 		stats->pxontxc[i].ev_count
   1607 		    += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
   1608 		stats->pxofftxc[i].ev_count
   1609 		    += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
   1610 		if (hw->mac.type >= ixgbe_mac_82599EB) {
   1611 			stats->pxonrxc[i].ev_count
   1612 			    += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
   1613 			stats->pxoffrxc[i].ev_count
   1614 			    += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
   1615 			stats->pxon2offc[i].ev_count
   1616 			    += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
   1617 		} else {
   1618 			stats->pxonrxc[i].ev_count
   1619 			    += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
   1620 			stats->pxoffrxc[i].ev_count
   1621 			    += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
   1622 		}
   1623 	}
   1624 	stats->mpctotal.ev_count += total_missed_rx;
   1625 
   1626 	/* Document says M[LR]FC are valid when link is up and 10Gbps */
   1627 	if ((adapter->link_active == LINK_STATE_UP)
   1628 	    && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
   1629 		stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   1630 		stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   1631 	}
   1632 	rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
   1633 	stats->rlec.ev_count += rlec;
   1634 
   1635 	/* Hardware workaround, gprc counts missed packets */
   1636 	stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   1637 
   1638 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   1639 	stats->lxontxc.ev_count += lxon;
   1640 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   1641 	stats->lxofftxc.ev_count += lxoff;
   1642 	total = lxon + lxoff;
   1643 
   1644 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1645 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   1646 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   1647 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   1648 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   1649 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   1650 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   1651 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   1652 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   1653 	} else {
   1654 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   1655 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   1656 		/* 82598 only has a counter in the high register */
   1657 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   1658 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   1659 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   1660 	}
   1661 
   1662 	/*
   1663 	 * Workaround: mprc hardware is incorrectly counting
   1664 	 * broadcasts, so for now we subtract those.
   1665 	 */
   1666 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   1667 	stats->bprc.ev_count += bprc;
   1668 	stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
   1669 	    - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   1670 
   1671 	stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   1672 	stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   1673 	stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   1674 	stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   1675 	stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   1676 	stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   1677 
   1678 	stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   1679 	stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   1680 	stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   1681 
   1682 	stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   1683 	stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   1684 	stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   1685 	stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   1686 	stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   1687 	stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   1688 	stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   1689 	stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   1690 	stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   1691 	stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   1692 	stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   1693 	stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   1694 	stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   1695 	stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   1696 	stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   1697 	stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   1698 	stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   1699 	stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   1700 	/* Only read FCOE on 82599 */
   1701 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1702 		stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   1703 		stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   1704 		stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   1705 		stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   1706 		stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   1707 	}
   1708 
   1709 	/* Fill out the OS statistics structure */
   1710 	/*
   1711 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   1712 	 * adapter->stats counters. It's required to make ifconfig -z
   1713 	 * (SOICZIFDATA) work.
   1714 	 */
   1715 	ifp->if_collisions = 0;
   1716 
   1717 	/* Rx Errors */
   1718 	ifp->if_iqdrops += total_missed_rx;
   1719 	ifp->if_ierrors += crcerrs + rlec;
   1720 } /* ixgbe_update_stats_counters */
   1721 
   1722 /************************************************************************
   1723  * ixgbe_add_hw_stats
   1724  *
   1725  *   Add sysctl variables, one per statistic, to the system.
   1726  ************************************************************************/
   1727 static void
   1728 ixgbe_add_hw_stats(struct adapter *adapter)
   1729 {
   1730 	device_t dev = adapter->dev;
   1731 	const struct sysctlnode *rnode, *cnode;
   1732 	struct sysctllog **log = &adapter->sysctllog;
   1733 	struct tx_ring *txr = adapter->tx_rings;
   1734 	struct rx_ring *rxr = adapter->rx_rings;
   1735 	struct ixgbe_hw *hw = &adapter->hw;
   1736 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1737 	const char *xname = device_xname(dev);
   1738 	int i;
   1739 
   1740 	/* Driver Statistics */
   1741 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   1742 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   1743 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   1744 	    NULL, xname, "m_defrag() failed");
   1745 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   1746 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   1747 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   1748 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   1749 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   1750 	    NULL, xname, "Driver tx dma hard fail other");
   1751 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   1752 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   1753 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   1754 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   1755 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   1756 	    NULL, xname, "Watchdog timeouts");
   1757 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   1758 	    NULL, xname, "TSO errors");
   1759 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   1760 	    NULL, xname, "Link MSI-X IRQ Handled");
   1761 	evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
   1762 	    NULL, xname, "Link softint");
   1763 	evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
   1764 	    NULL, xname, "module softint");
   1765 	evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
   1766 	    NULL, xname, "multimode softint");
   1767 	evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
   1768 	    NULL, xname, "external PHY softint");
   1769 
   1770 	/* Max number of traffic class is 8 */
   1771 	KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
   1772 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   1773 		snprintf(adapter->tcs[i].evnamebuf,
   1774 		    sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
   1775 		    xname, i);
   1776 		if (i < __arraycount(stats->mpc)) {
   1777 			evcnt_attach_dynamic(&stats->mpc[i],
   1778 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1779 			    "RX Missed Packet Count");
   1780 			if (hw->mac.type == ixgbe_mac_82598EB)
   1781 				evcnt_attach_dynamic(&stats->rnbc[i],
   1782 				    EVCNT_TYPE_MISC, NULL,
   1783 				    adapter->tcs[i].evnamebuf,
   1784 				    "Receive No Buffers");
   1785 		}
   1786 		if (i < __arraycount(stats->pxontxc)) {
   1787 			evcnt_attach_dynamic(&stats->pxontxc[i],
   1788 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1789 			    "pxontxc");
   1790 			evcnt_attach_dynamic(&stats->pxonrxc[i],
   1791 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1792 			    "pxonrxc");
   1793 			evcnt_attach_dynamic(&stats->pxofftxc[i],
   1794 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1795 			    "pxofftxc");
   1796 			evcnt_attach_dynamic(&stats->pxoffrxc[i],
   1797 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1798 			    "pxoffrxc");
   1799 			if (hw->mac.type >= ixgbe_mac_82599EB)
   1800 				evcnt_attach_dynamic(&stats->pxon2offc[i],
   1801 				    EVCNT_TYPE_MISC, NULL,
   1802 				    adapter->tcs[i].evnamebuf,
   1803 			    "pxon2offc");
   1804 		}
   1805 	}
   1806 
   1807 	for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1808 #ifdef LRO
   1809 		struct lro_ctrl *lro = &rxr->lro;
   1810 #endif /* LRO */
   1811 
   1812 		snprintf(adapter->queues[i].evnamebuf,
   1813 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   1814 		    xname, i);
   1815 		snprintf(adapter->queues[i].namebuf,
   1816 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   1817 
   1818 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   1819 			aprint_error_dev(dev, "could not create sysctl root\n");
   1820 			break;
   1821 		}
   1822 
   1823 		if (sysctl_createv(log, 0, &rnode, &rnode,
   1824 		    0, CTLTYPE_NODE,
   1825 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   1826 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   1827 			break;
   1828 
   1829 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1830 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   1831 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   1832 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   1833 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   1834 			break;
   1835 
   1836 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1837 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1838 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   1839 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   1840 		    0, CTL_CREATE, CTL_EOL) != 0)
   1841 			break;
   1842 
   1843 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1844 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1845 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   1846 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   1847 		    0, CTL_CREATE, CTL_EOL) != 0)
   1848 			break;
   1849 
   1850 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   1851 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   1852 		evcnt_attach_dynamic(&adapter->queues[i].handleq,
   1853 		    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1854 		    "Handled queue in softint");
   1855 		evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
   1856 		    NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
   1857 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   1858 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   1859 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   1860 		    NULL, adapter->queues[i].evnamebuf,
   1861 		    "Queue No Descriptor Available");
   1862 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   1863 		    NULL, adapter->queues[i].evnamebuf,
   1864 		    "Queue Packets Transmitted");
   1865 #ifndef IXGBE_LEGACY_TX
   1866 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   1867 		    NULL, adapter->queues[i].evnamebuf,
   1868 		    "Packets dropped in pcq");
   1869 #endif
   1870 
   1871 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1872 		    CTLFLAG_READONLY,
   1873 		    CTLTYPE_INT,
   1874 		    "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
   1875 			ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
   1876 		    CTL_CREATE, CTL_EOL) != 0)
   1877 			break;
   1878 
   1879 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1880 		    CTLFLAG_READONLY,
   1881 		    CTLTYPE_INT,
   1882 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   1883 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   1884 		    CTL_CREATE, CTL_EOL) != 0)
   1885 			break;
   1886 
   1887 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1888 		    CTLFLAG_READONLY,
   1889 		    CTLTYPE_INT,
   1890 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   1891 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   1892 		    CTL_CREATE, CTL_EOL) != 0)
   1893 			break;
   1894 
   1895 		if (i < __arraycount(stats->qprc)) {
   1896 			evcnt_attach_dynamic(&stats->qprc[i],
   1897 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1898 			    "qprc");
   1899 			evcnt_attach_dynamic(&stats->qptc[i],
   1900 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1901 			    "qptc");
   1902 			evcnt_attach_dynamic(&stats->qbrc[i],
   1903 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1904 			    "qbrc");
   1905 			evcnt_attach_dynamic(&stats->qbtc[i],
   1906 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1907 			    "qbtc");
   1908 			if (hw->mac.type >= ixgbe_mac_82599EB)
   1909 				evcnt_attach_dynamic(&stats->qprdc[i],
   1910 				    EVCNT_TYPE_MISC, NULL,
   1911 				    adapter->queues[i].evnamebuf, "qprdc");
   1912 		}
   1913 
   1914 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   1915 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   1916 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   1917 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   1918 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   1919 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   1920 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   1921 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   1922 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   1923 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   1924 #ifdef LRO
   1925 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   1926 				CTLFLAG_RD, &lro->lro_queued, 0,
   1927 				"LRO Queued");
   1928 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   1929 				CTLFLAG_RD, &lro->lro_flushed, 0,
   1930 				"LRO Flushed");
   1931 #endif /* LRO */
   1932 	}
   1933 
   1934 	/* MAC stats get their own sub node */
   1935 
   1936 	snprintf(stats->namebuf,
   1937 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   1938 
   1939 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   1940 	    stats->namebuf, "rx csum offload - IP");
   1941 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   1942 	    stats->namebuf, "rx csum offload - L4");
   1943 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   1944 	    stats->namebuf, "rx csum offload - IP bad");
   1945 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   1946 	    stats->namebuf, "rx csum offload - L4 bad");
   1947 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   1948 	    stats->namebuf, "Interrupt conditions zero");
   1949 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   1950 	    stats->namebuf, "Legacy interrupts");
   1951 
   1952 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   1953 	    stats->namebuf, "CRC Errors");
   1954 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   1955 	    stats->namebuf, "Illegal Byte Errors");
   1956 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   1957 	    stats->namebuf, "Byte Errors");
   1958 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   1959 	    stats->namebuf, "MAC Short Packets Discarded");
   1960 	if (hw->mac.type >= ixgbe_mac_X550)
   1961 		evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
   1962 		    stats->namebuf, "Bad SFD");
   1963 	evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
   1964 	    stats->namebuf, "Total Packets Missed");
   1965 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   1966 	    stats->namebuf, "MAC Local Faults");
   1967 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   1968 	    stats->namebuf, "MAC Remote Faults");
   1969 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   1970 	    stats->namebuf, "Receive Length Errors");
   1971 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   1972 	    stats->namebuf, "Link XON Transmitted");
   1973 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   1974 	    stats->namebuf, "Link XON Received");
   1975 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   1976 	    stats->namebuf, "Link XOFF Transmitted");
   1977 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   1978 	    stats->namebuf, "Link XOFF Received");
   1979 
   1980 	/* Packet Reception Stats */
   1981 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   1982 	    stats->namebuf, "Total Octets Received");
   1983 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   1984 	    stats->namebuf, "Good Octets Received");
   1985 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   1986 	    stats->namebuf, "Total Packets Received");
   1987 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   1988 	    stats->namebuf, "Good Packets Received");
   1989 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   1990 	    stats->namebuf, "Multicast Packets Received");
   1991 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   1992 	    stats->namebuf, "Broadcast Packets Received");
   1993 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   1994 	    stats->namebuf, "64 byte frames received ");
   1995 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   1996 	    stats->namebuf, "65-127 byte frames received");
   1997 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   1998 	    stats->namebuf, "128-255 byte frames received");
   1999 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   2000 	    stats->namebuf, "256-511 byte frames received");
   2001 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   2002 	    stats->namebuf, "512-1023 byte frames received");
   2003 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   2004 	    stats->namebuf, "1023-1522 byte frames received");
   2005 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   2006 	    stats->namebuf, "Receive Undersized");
   2007 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   2008 	    stats->namebuf, "Fragmented Packets Received ");
   2009 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   2010 	    stats->namebuf, "Oversized Packets Received");
   2011 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   2012 	    stats->namebuf, "Received Jabber");
   2013 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   2014 	    stats->namebuf, "Management Packets Received");
   2015 	evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
   2016 	    stats->namebuf, "Management Packets Dropped");
   2017 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   2018 	    stats->namebuf, "Checksum Errors");
   2019 
   2020 	/* Packet Transmission Stats */
   2021 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   2022 	    stats->namebuf, "Good Octets Transmitted");
   2023 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   2024 	    stats->namebuf, "Total Packets Transmitted");
   2025 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   2026 	    stats->namebuf, "Good Packets Transmitted");
   2027 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   2028 	    stats->namebuf, "Broadcast Packets Transmitted");
   2029 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   2030 	    stats->namebuf, "Multicast Packets Transmitted");
   2031 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   2032 	    stats->namebuf, "Management Packets Transmitted");
   2033 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   2034 	    stats->namebuf, "64 byte frames transmitted ");
   2035 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   2036 	    stats->namebuf, "65-127 byte frames transmitted");
   2037 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   2038 	    stats->namebuf, "128-255 byte frames transmitted");
   2039 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   2040 	    stats->namebuf, "256-511 byte frames transmitted");
   2041 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   2042 	    stats->namebuf, "512-1023 byte frames transmitted");
   2043 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   2044 	    stats->namebuf, "1024-1522 byte frames transmitted");
   2045 } /* ixgbe_add_hw_stats */
   2046 
   2047 static void
   2048 ixgbe_clear_evcnt(struct adapter *adapter)
   2049 {
   2050 	struct tx_ring *txr = adapter->tx_rings;
   2051 	struct rx_ring *rxr = adapter->rx_rings;
   2052 	struct ixgbe_hw *hw = &adapter->hw;
   2053 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   2054 	int i;
   2055 
   2056 	adapter->efbig_tx_dma_setup.ev_count = 0;
   2057 	adapter->mbuf_defrag_failed.ev_count = 0;
   2058 	adapter->efbig2_tx_dma_setup.ev_count = 0;
   2059 	adapter->einval_tx_dma_setup.ev_count = 0;
   2060 	adapter->other_tx_dma_setup.ev_count = 0;
   2061 	adapter->eagain_tx_dma_setup.ev_count = 0;
   2062 	adapter->enomem_tx_dma_setup.ev_count = 0;
   2063 	adapter->tso_err.ev_count = 0;
   2064 	adapter->watchdog_events.ev_count = 0;
   2065 	adapter->link_irq.ev_count = 0;
   2066 	adapter->link_sicount.ev_count = 0;
   2067 	adapter->mod_sicount.ev_count = 0;
   2068 	adapter->msf_sicount.ev_count = 0;
   2069 	adapter->phy_sicount.ev_count = 0;
   2070 
   2071 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   2072 		if (i < __arraycount(stats->mpc)) {
   2073 			stats->mpc[i].ev_count = 0;
   2074 			if (hw->mac.type == ixgbe_mac_82598EB)
   2075 				stats->rnbc[i].ev_count = 0;
   2076 		}
   2077 		if (i < __arraycount(stats->pxontxc)) {
   2078 			stats->pxontxc[i].ev_count = 0;
   2079 			stats->pxonrxc[i].ev_count = 0;
   2080 			stats->pxofftxc[i].ev_count = 0;
   2081 			stats->pxoffrxc[i].ev_count = 0;
   2082 			if (hw->mac.type >= ixgbe_mac_82599EB)
   2083 				stats->pxon2offc[i].ev_count = 0;
   2084 		}
   2085 	}
   2086 
   2087 	txr = adapter->tx_rings;
   2088 	for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2089 		adapter->queues[i].irqs.ev_count = 0;
   2090 		adapter->queues[i].handleq.ev_count = 0;
   2091 		adapter->queues[i].req.ev_count = 0;
   2092 		txr->no_desc_avail.ev_count = 0;
   2093 		txr->total_packets.ev_count = 0;
   2094 		txr->tso_tx.ev_count = 0;
   2095 #ifndef IXGBE_LEGACY_TX
   2096 		txr->pcq_drops.ev_count = 0;
   2097 #endif
   2098 		txr->q_efbig_tx_dma_setup = 0;
   2099 		txr->q_mbuf_defrag_failed = 0;
   2100 		txr->q_efbig2_tx_dma_setup = 0;
   2101 		txr->q_einval_tx_dma_setup = 0;
   2102 		txr->q_other_tx_dma_setup = 0;
   2103 		txr->q_eagain_tx_dma_setup = 0;
   2104 		txr->q_enomem_tx_dma_setup = 0;
   2105 		txr->q_tso_err = 0;
   2106 
   2107 		if (i < __arraycount(stats->qprc)) {
   2108 			stats->qprc[i].ev_count = 0;
   2109 			stats->qptc[i].ev_count = 0;
   2110 			stats->qbrc[i].ev_count = 0;
   2111 			stats->qbtc[i].ev_count = 0;
   2112 			if (hw->mac.type >= ixgbe_mac_82599EB)
   2113 				stats->qprdc[i].ev_count = 0;
   2114 		}
   2115 
   2116 		rxr->rx_packets.ev_count = 0;
   2117 		rxr->rx_bytes.ev_count = 0;
   2118 		rxr->rx_copies.ev_count = 0;
   2119 		rxr->no_jmbuf.ev_count = 0;
   2120 		rxr->rx_discarded.ev_count = 0;
   2121 	}
   2122 	stats->ipcs.ev_count = 0;
   2123 	stats->l4cs.ev_count = 0;
   2124 	stats->ipcs_bad.ev_count = 0;
   2125 	stats->l4cs_bad.ev_count = 0;
   2126 	stats->intzero.ev_count = 0;
   2127 	stats->legint.ev_count = 0;
   2128 	stats->crcerrs.ev_count = 0;
   2129 	stats->illerrc.ev_count = 0;
   2130 	stats->errbc.ev_count = 0;
   2131 	stats->mspdc.ev_count = 0;
   2132 	stats->mbsdc.ev_count = 0;
   2133 	stats->mpctotal.ev_count = 0;
   2134 	stats->mlfc.ev_count = 0;
   2135 	stats->mrfc.ev_count = 0;
   2136 	stats->rlec.ev_count = 0;
   2137 	stats->lxontxc.ev_count = 0;
   2138 	stats->lxonrxc.ev_count = 0;
   2139 	stats->lxofftxc.ev_count = 0;
   2140 	stats->lxoffrxc.ev_count = 0;
   2141 
   2142 	/* Packet Reception Stats */
   2143 	stats->tor.ev_count = 0;
   2144 	stats->gorc.ev_count = 0;
   2145 	stats->tpr.ev_count = 0;
   2146 	stats->gprc.ev_count = 0;
   2147 	stats->mprc.ev_count = 0;
   2148 	stats->bprc.ev_count = 0;
   2149 	stats->prc64.ev_count = 0;
   2150 	stats->prc127.ev_count = 0;
   2151 	stats->prc255.ev_count = 0;
   2152 	stats->prc511.ev_count = 0;
   2153 	stats->prc1023.ev_count = 0;
   2154 	stats->prc1522.ev_count = 0;
   2155 	stats->ruc.ev_count = 0;
   2156 	stats->rfc.ev_count = 0;
   2157 	stats->roc.ev_count = 0;
   2158 	stats->rjc.ev_count = 0;
   2159 	stats->mngprc.ev_count = 0;
   2160 	stats->mngpdc.ev_count = 0;
   2161 	stats->xec.ev_count = 0;
   2162 
   2163 	/* Packet Transmission Stats */
   2164 	stats->gotc.ev_count = 0;
   2165 	stats->tpt.ev_count = 0;
   2166 	stats->gptc.ev_count = 0;
   2167 	stats->bptc.ev_count = 0;
   2168 	stats->mptc.ev_count = 0;
   2169 	stats->mngptc.ev_count = 0;
   2170 	stats->ptc64.ev_count = 0;
   2171 	stats->ptc127.ev_count = 0;
   2172 	stats->ptc255.ev_count = 0;
   2173 	stats->ptc511.ev_count = 0;
   2174 	stats->ptc1023.ev_count = 0;
   2175 	stats->ptc1522.ev_count = 0;
   2176 }
   2177 
   2178 /************************************************************************
   2179  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
   2180  *
   2181  *   Retrieves the TDH value from the hardware
   2182  ************************************************************************/
   2183 static int
   2184 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   2185 {
   2186 	struct sysctlnode node = *rnode;
   2187 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2188 	struct adapter *adapter;
   2189 	uint32_t val;
   2190 
   2191 	if (!txr)
   2192 		return (0);
   2193 
   2194 	adapter = txr->adapter;
   2195 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2196 		return (EPERM);
   2197 
   2198 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
   2199 	node.sysctl_data = &val;
   2200 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2201 } /* ixgbe_sysctl_tdh_handler */
   2202 
   2203 /************************************************************************
   2204  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   2205  *
   2206  *   Retrieves the TDT value from the hardware
   2207  ************************************************************************/
   2208 static int
   2209 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   2210 {
   2211 	struct sysctlnode node = *rnode;
   2212 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2213 	struct adapter *adapter;
   2214 	uint32_t val;
   2215 
   2216 	if (!txr)
   2217 		return (0);
   2218 
   2219 	adapter = txr->adapter;
   2220 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2221 		return (EPERM);
   2222 
   2223 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
   2224 	node.sysctl_data = &val;
   2225 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2226 } /* ixgbe_sysctl_tdt_handler */
   2227 
   2228 /************************************************************************
   2229  * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
   2230  * handler function
   2231  *
   2232  *   Retrieves the next_to_check value
   2233  ************************************************************************/
   2234 static int
   2235 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
   2236 {
   2237 	struct sysctlnode node = *rnode;
   2238 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2239 	struct adapter *adapter;
   2240 	uint32_t val;
   2241 
   2242 	if (!rxr)
   2243 		return (0);
   2244 
   2245 	adapter = rxr->adapter;
   2246 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2247 		return (EPERM);
   2248 
   2249 	val = rxr->next_to_check;
   2250 	node.sysctl_data = &val;
   2251 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2252 } /* ixgbe_sysctl_next_to_check_handler */
   2253 
   2254 /************************************************************************
   2255  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
   2256  *
   2257  *   Retrieves the RDH value from the hardware
   2258  ************************************************************************/
   2259 static int
   2260 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2261 {
   2262 	struct sysctlnode node = *rnode;
   2263 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2264 	struct adapter *adapter;
   2265 	uint32_t val;
   2266 
   2267 	if (!rxr)
   2268 		return (0);
   2269 
   2270 	adapter = rxr->adapter;
   2271 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2272 		return (EPERM);
   2273 
   2274 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
   2275 	node.sysctl_data = &val;
   2276 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2277 } /* ixgbe_sysctl_rdh_handler */
   2278 
   2279 /************************************************************************
   2280  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2281  *
   2282  *   Retrieves the RDT value from the hardware
   2283  ************************************************************************/
   2284 static int
   2285 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2286 {
   2287 	struct sysctlnode node = *rnode;
   2288 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2289 	struct adapter *adapter;
   2290 	uint32_t val;
   2291 
   2292 	if (!rxr)
   2293 		return (0);
   2294 
   2295 	adapter = rxr->adapter;
   2296 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2297 		return (EPERM);
   2298 
   2299 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
   2300 	node.sysctl_data = &val;
   2301 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2302 } /* ixgbe_sysctl_rdt_handler */
   2303 
   2304 static int
   2305 ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
   2306 {
   2307 	struct ifnet *ifp = &ec->ec_if;
   2308 	int rv;
   2309 
   2310 	if (set)
   2311 		rv = ixgbe_register_vlan(ifp->if_softc, ifp, vid);
   2312 	else
   2313 		rv = ixgbe_unregister_vlan(ifp->if_softc, ifp, vid);
   2314 
   2315 	return rv;
   2316 }
   2317 
   2318 /************************************************************************
   2319  * ixgbe_register_vlan
   2320  *
   2321  *   Run via vlan config EVENT, it enables us to use the
   2322  *   HW Filter table since we can get the vlan id. This
   2323  *   just creates the entry in the soft version of the
   2324  *   VFTA, init will repopulate the real table.
   2325  ************************************************************************/
   2326 static int
   2327 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2328 {
   2329 	struct adapter	*adapter = ifp->if_softc;
   2330 	u16		index, bit;
   2331 	int		error;
   2332 
   2333 	if (ifp->if_softc != arg)   /* Not our event */
   2334 		return EINVAL;
   2335 
   2336 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2337 		return EINVAL;
   2338 
   2339 	IXGBE_CORE_LOCK(adapter);
   2340 	index = (vtag >> 5) & 0x7F;
   2341 	bit = vtag & 0x1F;
   2342 	adapter->shadow_vfta[index] |= ((u32)1 << bit);
   2343 	error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true,
   2344 	    true);
   2345 	IXGBE_CORE_UNLOCK(adapter);
   2346 	if (error != 0)
   2347 		error = EACCES;
   2348 
   2349 	return error;
   2350 } /* ixgbe_register_vlan */
   2351 
   2352 /************************************************************************
   2353  * ixgbe_unregister_vlan
   2354  *
   2355  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
   2356  ************************************************************************/
   2357 static int
   2358 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2359 {
   2360 	struct adapter	*adapter = ifp->if_softc;
   2361 	u16		index, bit;
   2362 	int		error;
   2363 
   2364 	if (ifp->if_softc != arg)
   2365 		return EINVAL;
   2366 
   2367 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2368 		return EINVAL;
   2369 
   2370 	IXGBE_CORE_LOCK(adapter);
   2371 	index = (vtag >> 5) & 0x7F;
   2372 	bit = vtag & 0x1F;
   2373 	adapter->shadow_vfta[index] &= ~((u32)1 << bit);
   2374 	error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false,
   2375 	    true);
   2376 	IXGBE_CORE_UNLOCK(adapter);
   2377 	if (error != 0)
   2378 		error = EACCES;
   2379 
   2380 	return error;
   2381 } /* ixgbe_unregister_vlan */
   2382 
   2383 static void
   2384 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   2385 {
   2386 	struct ethercom *ec = &adapter->osdep.ec;
   2387 	struct ixgbe_hw *hw = &adapter->hw;
   2388 	struct rx_ring	*rxr;
   2389 	int		i;
   2390 	u32		ctrl;
   2391 	struct vlanid_list *vlanidp;
   2392 	bool		hwtagging;
   2393 
   2394 	/*
   2395 	 *  This function is called from both if_init and ifflags_cb()
   2396 	 * on NetBSD.
   2397 	 */
   2398 
   2399 	/* Enable HW tagging only if any vlan is attached */
   2400 	hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
   2401 	    && VLAN_ATTACHED(ec);
   2402 
   2403 	/* Setup the queues for vlans */
   2404 	for (i = 0; i < adapter->num_queues; i++) {
   2405 		rxr = &adapter->rx_rings[i];
   2406 		/*
   2407 		 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
   2408 		 */
   2409 		if (hw->mac.type != ixgbe_mac_82598EB) {
   2410 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   2411 			if (hwtagging)
   2412 				ctrl |= IXGBE_RXDCTL_VME;
   2413 			else
   2414 				ctrl &= ~IXGBE_RXDCTL_VME;
   2415 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
   2416 		}
   2417 		rxr->vtag_strip = hwtagging ? TRUE : FALSE;
   2418 	}
   2419 
   2420 	/* Cleanup shadow_vfta */
   2421 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2422 		adapter->shadow_vfta[i] = 0;
   2423 	/* Generate shadow_vfta from ec_vids */
   2424 	mutex_enter(ec->ec_lock);
   2425 	SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
   2426 		uint32_t idx;
   2427 
   2428 		idx = vlanidp->vid / 32;
   2429 		KASSERT(idx < IXGBE_VFTA_SIZE);
   2430 		adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
   2431 	}
   2432 	mutex_exit(ec->ec_lock);
   2433 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2434 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]);
   2435 
   2436 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2437 	/* Enable the Filter Table if enabled */
   2438 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
   2439 		ctrl |= IXGBE_VLNCTRL_VFE;
   2440 	else
   2441 		ctrl &= ~IXGBE_VLNCTRL_VFE;
   2442 	/* VLAN hw tagging for 82598 */
   2443 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2444 		if (hwtagging)
   2445 			ctrl |= IXGBE_VLNCTRL_VME;
   2446 		else
   2447 			ctrl &= ~IXGBE_VLNCTRL_VME;
   2448 	}
   2449 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2450 } /* ixgbe_setup_vlan_hw_support */
   2451 
   2452 /************************************************************************
   2453  * ixgbe_get_slot_info
   2454  *
   2455  *   Get the width and transaction speed of
   2456  *   the slot this adapter is plugged into.
   2457  ************************************************************************/
   2458 static void
   2459 ixgbe_get_slot_info(struct adapter *adapter)
   2460 {
   2461 	device_t		dev = adapter->dev;
   2462 	struct ixgbe_hw		*hw = &adapter->hw;
   2463 	u32		      offset;
   2464 	u16			link;
   2465 	int		      bus_info_valid = TRUE;
   2466 
   2467 	/* Some devices are behind an internal bridge */
   2468 	switch (hw->device_id) {
   2469 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
   2470 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
   2471 		goto get_parent_info;
   2472 	default:
   2473 		break;
   2474 	}
   2475 
   2476 	ixgbe_get_bus_info(hw);
   2477 
   2478 	/*
   2479 	 * Some devices don't use PCI-E, but there is no need
   2480 	 * to display "Unknown" for bus speed and width.
   2481 	 */
   2482 	switch (hw->mac.type) {
   2483 	case ixgbe_mac_X550EM_x:
   2484 	case ixgbe_mac_X550EM_a:
   2485 		return;
   2486 	default:
   2487 		goto display;
   2488 	}
   2489 
   2490 get_parent_info:
   2491 	/*
   2492 	 * For the Quad port adapter we need to parse back
   2493 	 * up the PCI tree to find the speed of the expansion
   2494 	 * slot into which this adapter is plugged. A bit more work.
   2495 	 */
   2496 	dev = device_parent(device_parent(dev));
   2497 #if 0
   2498 #ifdef IXGBE_DEBUG
   2499 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
   2500 	    pci_get_slot(dev), pci_get_function(dev));
   2501 #endif
   2502 	dev = device_parent(device_parent(dev));
   2503 #ifdef IXGBE_DEBUG
   2504 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
   2505 	    pci_get_slot(dev), pci_get_function(dev));
   2506 #endif
   2507 #endif
   2508 	/* Now get the PCI Express Capabilities offset */
   2509 	if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
   2510 	    PCI_CAP_PCIEXPRESS, &offset, NULL)) {
   2511 		/*
   2512 		 * Hmm...can't get PCI-Express capabilities.
   2513 		 * Falling back to default method.
   2514 		 */
   2515 		bus_info_valid = FALSE;
   2516 		ixgbe_get_bus_info(hw);
   2517 		goto display;
   2518 	}
   2519 	/* ...and read the Link Status Register */
   2520 	link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
   2521 	    offset + PCIE_LCSR) >> 16;
   2522 	ixgbe_set_pci_config_data_generic(hw, link);
   2523 
   2524 display:
   2525 	device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
   2526 	    ((hw->bus.speed == ixgbe_bus_speed_8000)	? "8.0GT/s" :
   2527 	     (hw->bus.speed == ixgbe_bus_speed_5000)	? "5.0GT/s" :
   2528 	     (hw->bus.speed == ixgbe_bus_speed_2500)	? "2.5GT/s" :
   2529 	     "Unknown"),
   2530 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
   2531 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
   2532 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
   2533 	     "Unknown"));
   2534 
   2535 	if (bus_info_valid) {
   2536 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2537 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
   2538 			(hw->bus.speed == ixgbe_bus_speed_2500))) {
   2539 			device_printf(dev, "PCI-Express bandwidth available"
   2540 			    " for this card\n     is not sufficient for"
   2541 			    " optimal performance.\n");
   2542 			device_printf(dev, "For optimal performance a x8 "
   2543 			    "PCIE, or x4 PCIE Gen2 slot is required.\n");
   2544 		}
   2545 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2546 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
   2547 			(hw->bus.speed < ixgbe_bus_speed_8000))) {
   2548 			device_printf(dev, "PCI-Express bandwidth available"
   2549 			    " for this card\n     is not sufficient for"
   2550 			    " optimal performance.\n");
   2551 			device_printf(dev, "For optimal performance a x8 "
   2552 			    "PCIE Gen3 slot is required.\n");
   2553 		}
   2554 	} else
   2555 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
   2556 
   2557 	return;
   2558 } /* ixgbe_get_slot_info */
   2559 
   2560 /************************************************************************
   2561  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
   2562  ************************************************************************/
   2563 static inline void
   2564 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   2565 {
   2566 	struct ixgbe_hw *hw = &adapter->hw;
   2567 	struct ix_queue *que = &adapter->queues[vector];
   2568 	u64		queue = 1ULL << vector;
   2569 	u32		mask;
   2570 
   2571 	mutex_enter(&que->dc_mtx);
   2572 	if (que->disabled_count > 0 && --que->disabled_count > 0)
   2573 		goto out;
   2574 
   2575 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2576 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2577 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   2578 	} else {
   2579 		mask = (queue & 0xFFFFFFFF);
   2580 		if (mask)
   2581 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   2582 		mask = (queue >> 32);
   2583 		if (mask)
   2584 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   2585 	}
   2586 out:
   2587 	mutex_exit(&que->dc_mtx);
   2588 } /* ixgbe_enable_queue */
   2589 
   2590 /************************************************************************
   2591  * ixgbe_disable_queue_internal
   2592  ************************************************************************/
   2593 static inline void
   2594 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
   2595 {
   2596 	struct ixgbe_hw *hw = &adapter->hw;
   2597 	struct ix_queue *que = &adapter->queues[vector];
   2598 	u64		queue = 1ULL << vector;
   2599 	u32		mask;
   2600 
   2601 	mutex_enter(&que->dc_mtx);
   2602 
   2603 	if (que->disabled_count > 0) {
   2604 		if (nestok)
   2605 			que->disabled_count++;
   2606 		goto out;
   2607 	}
   2608 	que->disabled_count++;
   2609 
   2610 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2611 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2612 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   2613 	} else {
   2614 		mask = (queue & 0xFFFFFFFF);
   2615 		if (mask)
   2616 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   2617 		mask = (queue >> 32);
   2618 		if (mask)
   2619 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   2620 	}
   2621 out:
   2622 	mutex_exit(&que->dc_mtx);
   2623 } /* ixgbe_disable_queue_internal */
   2624 
   2625 /************************************************************************
   2626  * ixgbe_disable_queue
   2627  ************************************************************************/
   2628 static inline void
   2629 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   2630 {
   2631 
   2632 	ixgbe_disable_queue_internal(adapter, vector, true);
   2633 } /* ixgbe_disable_queue */
   2634 
   2635 /************************************************************************
   2636  * ixgbe_sched_handle_que - schedule deferred packet processing
   2637  ************************************************************************/
   2638 static inline void
   2639 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
   2640 {
   2641 
   2642 	if (que->txrx_use_workqueue) {
   2643 		/*
   2644 		 * adapter->que_wq is bound to each CPU instead of
   2645 		 * each NIC queue to reduce workqueue kthread. As we
   2646 		 * should consider about interrupt affinity in this
   2647 		 * function, the workqueue kthread must be WQ_PERCPU.
   2648 		 * If create WQ_PERCPU workqueue kthread for each NIC
   2649 		 * queue, that number of created workqueue kthread is
   2650 		 * (number of used NIC queue) * (number of CPUs) =
   2651 		 * (number of CPUs) ^ 2 most often.
   2652 		 *
   2653 		 * The same NIC queue's interrupts are avoided by
   2654 		 * masking the queue's interrupt. And different
   2655 		 * NIC queue's interrupts use different struct work
   2656 		 * (que->wq_cookie). So, "enqueued flag" to avoid
   2657 		 * twice workqueue_enqueue() is not required .
   2658 		 */
   2659 		workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
   2660 	} else {
   2661 		softint_schedule(que->que_si);
   2662 	}
   2663 }
   2664 
   2665 /************************************************************************
   2666  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
   2667  ************************************************************************/
   2668 static int
   2669 ixgbe_msix_que(void *arg)
   2670 {
   2671 	struct ix_queue	*que = arg;
   2672 	struct adapter	*adapter = que->adapter;
   2673 	struct ifnet	*ifp = adapter->ifp;
   2674 	struct tx_ring	*txr = que->txr;
   2675 	struct rx_ring	*rxr = que->rxr;
   2676 	bool		more;
   2677 	u32		newitr = 0;
   2678 
   2679 	/* Protect against spurious interrupts */
   2680 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   2681 		return 0;
   2682 
   2683 	ixgbe_disable_queue(adapter, que->msix);
   2684 	++que->irqs.ev_count;
   2685 
   2686 	/*
   2687 	 * Don't change "que->txrx_use_workqueue" from this point to avoid
   2688 	 * flip-flopping softint/workqueue mode in one deferred processing.
   2689 	 */
   2690 	que->txrx_use_workqueue = adapter->txrx_use_workqueue;
   2691 
   2692 #ifdef __NetBSD__
   2693 	/* Don't run ixgbe_rxeof in interrupt context */
   2694 	more = true;
   2695 #else
   2696 	more = ixgbe_rxeof(que);
   2697 #endif
   2698 
   2699 	IXGBE_TX_LOCK(txr);
   2700 	ixgbe_txeof(txr);
   2701 	IXGBE_TX_UNLOCK(txr);
   2702 
   2703 	/* Do AIM now? */
   2704 
   2705 	if (adapter->enable_aim == false)
   2706 		goto no_calc;
   2707 	/*
   2708 	 * Do Adaptive Interrupt Moderation:
   2709 	 *  - Write out last calculated setting
   2710 	 *  - Calculate based on average size over
   2711 	 *    the last interval.
   2712 	 */
   2713 	if (que->eitr_setting)
   2714 		ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
   2715 
   2716 	que->eitr_setting = 0;
   2717 
   2718 	/* Idle, do nothing */
   2719 	if ((txr->bytes == 0) && (rxr->bytes == 0))
   2720 		goto no_calc;
   2721 
   2722 	if ((txr->bytes) && (txr->packets))
   2723 		newitr = txr->bytes/txr->packets;
   2724 	if ((rxr->bytes) && (rxr->packets))
   2725 		newitr = uimax(newitr, (rxr->bytes / rxr->packets));
   2726 	newitr += 24; /* account for hardware frame, crc */
   2727 
   2728 	/* set an upper boundary */
   2729 	newitr = uimin(newitr, 3000);
   2730 
   2731 	/* Be nice to the mid range */
   2732 	if ((newitr > 300) && (newitr < 1200))
   2733 		newitr = (newitr / 3);
   2734 	else
   2735 		newitr = (newitr / 2);
   2736 
   2737 	/*
   2738 	 * When RSC is used, ITR interval must be larger than RSC_DELAY.
   2739 	 * Currently, we use 2us for RSC_DELAY. The minimum value is always
   2740 	 * greater than 2us on 100M (and 10M?(not documented)), but it's not
   2741 	 * on 1G and higher.
   2742 	 */
   2743 	if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   2744 	    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   2745 		if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
   2746 			newitr = IXGBE_MIN_RSC_EITR_10G1G;
   2747 	}
   2748 
   2749 	/* save for next interrupt */
   2750 	que->eitr_setting = newitr;
   2751 
   2752 	/* Reset state */
   2753 	txr->bytes = 0;
   2754 	txr->packets = 0;
   2755 	rxr->bytes = 0;
   2756 	rxr->packets = 0;
   2757 
   2758 no_calc:
   2759 	if (more)
   2760 		ixgbe_sched_handle_que(adapter, que);
   2761 	else
   2762 		ixgbe_enable_queue(adapter, que->msix);
   2763 
   2764 	return 1;
   2765 } /* ixgbe_msix_que */
   2766 
   2767 /************************************************************************
   2768  * ixgbe_media_status - Media Ioctl callback
   2769  *
   2770  *   Called whenever the user queries the status of
   2771  *   the interface using ifconfig.
   2772  ************************************************************************/
   2773 static void
   2774 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   2775 {
   2776 	struct adapter *adapter = ifp->if_softc;
   2777 	struct ixgbe_hw *hw = &adapter->hw;
   2778 	int layer;
   2779 
   2780 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   2781 	IXGBE_CORE_LOCK(adapter);
   2782 	ixgbe_update_link_status(adapter);
   2783 
   2784 	ifmr->ifm_status = IFM_AVALID;
   2785 	ifmr->ifm_active = IFM_ETHER;
   2786 
   2787 	if (adapter->link_active != LINK_STATE_UP) {
   2788 		ifmr->ifm_active |= IFM_NONE;
   2789 		IXGBE_CORE_UNLOCK(adapter);
   2790 		return;
   2791 	}
   2792 
   2793 	ifmr->ifm_status |= IFM_ACTIVE;
   2794 	layer = adapter->phy_layer;
   2795 
   2796 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
   2797 	    layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
   2798 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
   2799 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
   2800 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
   2801 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
   2802 		switch (adapter->link_speed) {
   2803 		case IXGBE_LINK_SPEED_10GB_FULL:
   2804 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   2805 			break;
   2806 		case IXGBE_LINK_SPEED_5GB_FULL:
   2807 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   2808 			break;
   2809 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2810 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   2811 			break;
   2812 		case IXGBE_LINK_SPEED_1GB_FULL:
   2813 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   2814 			break;
   2815 		case IXGBE_LINK_SPEED_100_FULL:
   2816 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   2817 			break;
   2818 		case IXGBE_LINK_SPEED_10_FULL:
   2819 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   2820 			break;
   2821 		}
   2822 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   2823 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
   2824 		switch (adapter->link_speed) {
   2825 		case IXGBE_LINK_SPEED_10GB_FULL:
   2826 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
   2827 			break;
   2828 		}
   2829 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
   2830 		switch (adapter->link_speed) {
   2831 		case IXGBE_LINK_SPEED_10GB_FULL:
   2832 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
   2833 			break;
   2834 		case IXGBE_LINK_SPEED_1GB_FULL:
   2835 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2836 			break;
   2837 		}
   2838 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
   2839 		switch (adapter->link_speed) {
   2840 		case IXGBE_LINK_SPEED_10GB_FULL:
   2841 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
   2842 			break;
   2843 		case IXGBE_LINK_SPEED_1GB_FULL:
   2844 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2845 			break;
   2846 		}
   2847 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
   2848 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
   2849 		switch (adapter->link_speed) {
   2850 		case IXGBE_LINK_SPEED_10GB_FULL:
   2851 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2852 			break;
   2853 		case IXGBE_LINK_SPEED_1GB_FULL:
   2854 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
   2855 			break;
   2856 		}
   2857 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
   2858 		switch (adapter->link_speed) {
   2859 		case IXGBE_LINK_SPEED_10GB_FULL:
   2860 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2861 			break;
   2862 		}
   2863 	/*
   2864 	 * XXX: These need to use the proper media types once
   2865 	 * they're added.
   2866 	 */
   2867 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
   2868 		switch (adapter->link_speed) {
   2869 		case IXGBE_LINK_SPEED_10GB_FULL:
   2870 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
   2871 			break;
   2872 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2873 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2874 			break;
   2875 		case IXGBE_LINK_SPEED_1GB_FULL:
   2876 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2877 			break;
   2878 		}
   2879 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
   2880 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
   2881 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
   2882 		switch (adapter->link_speed) {
   2883 		case IXGBE_LINK_SPEED_10GB_FULL:
   2884 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
   2885 			break;
   2886 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2887 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2888 			break;
   2889 		case IXGBE_LINK_SPEED_1GB_FULL:
   2890 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2891 			break;
   2892 		}
   2893 
   2894 	/* If nothing is recognized... */
   2895 #if 0
   2896 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
   2897 		ifmr->ifm_active |= IFM_UNKNOWN;
   2898 #endif
   2899 
   2900 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   2901 
   2902 	/* Display current flow control setting used on link */
   2903 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
   2904 	    hw->fc.current_mode == ixgbe_fc_full)
   2905 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
   2906 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
   2907 	    hw->fc.current_mode == ixgbe_fc_full)
   2908 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
   2909 
   2910 	IXGBE_CORE_UNLOCK(adapter);
   2911 
   2912 	return;
   2913 } /* ixgbe_media_status */
   2914 
   2915 /************************************************************************
   2916  * ixgbe_media_change - Media Ioctl callback
   2917  *
   2918  *   Called when the user changes speed/duplex using
   2919  *   media/mediopt option with ifconfig.
   2920  ************************************************************************/
   2921 static int
   2922 ixgbe_media_change(struct ifnet *ifp)
   2923 {
   2924 	struct adapter	 *adapter = ifp->if_softc;
   2925 	struct ifmedia	 *ifm = &adapter->media;
   2926 	struct ixgbe_hw	 *hw = &adapter->hw;
   2927 	ixgbe_link_speed speed = 0;
   2928 	ixgbe_link_speed link_caps = 0;
   2929 	bool negotiate = false;
   2930 	s32 err = IXGBE_NOT_IMPLEMENTED;
   2931 
   2932 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   2933 
   2934 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   2935 		return (EINVAL);
   2936 
   2937 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   2938 		return (EPERM);
   2939 
   2940 	IXGBE_CORE_LOCK(adapter);
   2941 	/*
   2942 	 * We don't actually need to check against the supported
   2943 	 * media types of the adapter; ifmedia will take care of
   2944 	 * that for us.
   2945 	 */
   2946 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   2947 	case IFM_AUTO:
   2948 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   2949 		    &negotiate);
   2950 		if (err != IXGBE_SUCCESS) {
   2951 			device_printf(adapter->dev, "Unable to determine "
   2952 			    "supported advertise speeds\n");
   2953 			IXGBE_CORE_UNLOCK(adapter);
   2954 			return (ENODEV);
   2955 		}
   2956 		speed |= link_caps;
   2957 		break;
   2958 	case IFM_10G_T:
   2959 	case IFM_10G_LRM:
   2960 	case IFM_10G_LR:
   2961 	case IFM_10G_TWINAX:
   2962 	case IFM_10G_SR:
   2963 	case IFM_10G_CX4:
   2964 	case IFM_10G_KR:
   2965 	case IFM_10G_KX4:
   2966 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   2967 		break;
   2968 	case IFM_5000_T:
   2969 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   2970 		break;
   2971 	case IFM_2500_T:
   2972 	case IFM_2500_KX:
   2973 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   2974 		break;
   2975 	case IFM_1000_T:
   2976 	case IFM_1000_LX:
   2977 	case IFM_1000_SX:
   2978 	case IFM_1000_KX:
   2979 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   2980 		break;
   2981 	case IFM_100_TX:
   2982 		speed |= IXGBE_LINK_SPEED_100_FULL;
   2983 		break;
   2984 	case IFM_10_T:
   2985 		speed |= IXGBE_LINK_SPEED_10_FULL;
   2986 		break;
   2987 	case IFM_NONE:
   2988 		break;
   2989 	default:
   2990 		goto invalid;
   2991 	}
   2992 
   2993 	hw->mac.autotry_restart = TRUE;
   2994 	hw->mac.ops.setup_link(hw, speed, TRUE);
   2995 	adapter->advertise = 0;
   2996 	if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
   2997 		if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
   2998 			adapter->advertise |= 1 << 2;
   2999 		if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
   3000 			adapter->advertise |= 1 << 1;
   3001 		if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
   3002 			adapter->advertise |= 1 << 0;
   3003 		if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
   3004 			adapter->advertise |= 1 << 3;
   3005 		if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
   3006 			adapter->advertise |= 1 << 4;
   3007 		if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
   3008 			adapter->advertise |= 1 << 5;
   3009 	}
   3010 
   3011 	IXGBE_CORE_UNLOCK(adapter);
   3012 	return (0);
   3013 
   3014 invalid:
   3015 	device_printf(adapter->dev, "Invalid media type!\n");
   3016 	IXGBE_CORE_UNLOCK(adapter);
   3017 
   3018 	return (EINVAL);
   3019 } /* ixgbe_media_change */
   3020 
   3021 /************************************************************************
   3022  * ixgbe_set_promisc
   3023  ************************************************************************/
   3024 static void
   3025 ixgbe_set_promisc(struct adapter *adapter)
   3026 {
   3027 	struct ifnet *ifp = adapter->ifp;
   3028 	int	     mcnt = 0;
   3029 	u32	     rctl;
   3030 	struct ether_multi *enm;
   3031 	struct ether_multistep step;
   3032 	struct ethercom *ec = &adapter->osdep.ec;
   3033 
   3034 	KASSERT(mutex_owned(&adapter->core_mtx));
   3035 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   3036 	rctl &= (~IXGBE_FCTRL_UPE);
   3037 	ETHER_LOCK(ec);
   3038 	if (ec->ec_flags & ETHER_F_ALLMULTI)
   3039 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
   3040 	else {
   3041 		ETHER_FIRST_MULTI(step, ec, enm);
   3042 		while (enm != NULL) {
   3043 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
   3044 				break;
   3045 			mcnt++;
   3046 			ETHER_NEXT_MULTI(step, enm);
   3047 		}
   3048 	}
   3049 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
   3050 		rctl &= (~IXGBE_FCTRL_MPE);
   3051 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   3052 
   3053 	if (ifp->if_flags & IFF_PROMISC) {
   3054 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   3055 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   3056 	} else if (ec->ec_flags & ETHER_F_ALLMULTI) {
   3057 		rctl |= IXGBE_FCTRL_MPE;
   3058 		rctl &= ~IXGBE_FCTRL_UPE;
   3059 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   3060 	}
   3061 	ETHER_UNLOCK(ec);
   3062 } /* ixgbe_set_promisc */
   3063 
   3064 /************************************************************************
   3065  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
   3066  ************************************************************************/
   3067 static int
   3068 ixgbe_msix_link(void *arg)
   3069 {
   3070 	struct adapter	*adapter = arg;
   3071 	struct ixgbe_hw *hw = &adapter->hw;
   3072 	u32		eicr, eicr_mask;
   3073 	s32		retval;
   3074 
   3075 	++adapter->link_irq.ev_count;
   3076 
   3077 	/* Pause other interrupts */
   3078 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
   3079 
   3080 	/* First get the cause */
   3081 	/*
   3082 	 * The specifications of 82598, 82599, X540 and X550 say EICS register
   3083 	 * is write only. However, Linux says it is a workaround for silicon
   3084 	 * errata to read EICS instead of EICR to get interrupt cause. It seems
   3085 	 * there is a problem about read clear mechanism for EICR register.
   3086 	 */
   3087 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   3088 	/* Be sure the queue bits are not cleared */
   3089 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
   3090 	/* Clear interrupt with write */
   3091 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
   3092 
   3093 	/* Link status change */
   3094 	if (eicr & IXGBE_EICR_LSC) {
   3095 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
   3096 		softint_schedule(adapter->link_si);
   3097 	}
   3098 
   3099 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   3100 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
   3101 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
   3102 			/* This is probably overkill :) */
   3103 			if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
   3104 				return 1;
   3105 			/* Disable the interrupt */
   3106 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
   3107 			softint_schedule(adapter->fdir_si);
   3108 		}
   3109 
   3110 		if (eicr & IXGBE_EICR_ECC) {
   3111 			device_printf(adapter->dev,
   3112 			    "CRITICAL: ECC ERROR!! Please Reboot!!\n");
   3113 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   3114 		}
   3115 
   3116 		/* Check for over temp condition */
   3117 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
   3118 			switch (adapter->hw.mac.type) {
   3119 			case ixgbe_mac_X550EM_a:
   3120 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
   3121 					break;
   3122 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
   3123 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   3124 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
   3125 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   3126 				retval = hw->phy.ops.check_overtemp(hw);
   3127 				if (retval != IXGBE_ERR_OVERTEMP)
   3128 					break;
   3129 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   3130 				device_printf(adapter->dev, "System shutdown required!\n");
   3131 				break;
   3132 			default:
   3133 				if (!(eicr & IXGBE_EICR_TS))
   3134 					break;
   3135 				retval = hw->phy.ops.check_overtemp(hw);
   3136 				if (retval != IXGBE_ERR_OVERTEMP)
   3137 					break;
   3138 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   3139 				device_printf(adapter->dev, "System shutdown required!\n");
   3140 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
   3141 				break;
   3142 			}
   3143 		}
   3144 
   3145 		/* Check for VF message */
   3146 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
   3147 		    (eicr & IXGBE_EICR_MAILBOX))
   3148 			softint_schedule(adapter->mbx_si);
   3149 	}
   3150 
   3151 	if (ixgbe_is_sfp(hw)) {
   3152 		/* Pluggable optics-related interrupt */
   3153 		if (hw->mac.type >= ixgbe_mac_X540)
   3154 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   3155 		else
   3156 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   3157 
   3158 		if (eicr & eicr_mask) {
   3159 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   3160 			softint_schedule(adapter->mod_si);
   3161 		}
   3162 
   3163 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   3164 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   3165 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   3166 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   3167 			softint_schedule(adapter->msf_si);
   3168 		}
   3169 	}
   3170 
   3171 	/* Check for fan failure */
   3172 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   3173 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
   3174 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   3175 	}
   3176 
   3177 	/* External PHY interrupt */
   3178 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   3179 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
   3180 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
   3181 		softint_schedule(adapter->phy_si);
   3182 	}
   3183 
   3184 	/* Re-enable other interrupts */
   3185 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   3186 	return 1;
   3187 } /* ixgbe_msix_link */
   3188 
   3189 static void
   3190 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
   3191 {
   3192 
   3193 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   3194 		itr |= itr << 16;
   3195 	else
   3196 		itr |= IXGBE_EITR_CNT_WDIS;
   3197 
   3198 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
   3199 }
   3200 
   3201 
   3202 /************************************************************************
   3203  * ixgbe_sysctl_interrupt_rate_handler
   3204  ************************************************************************/
   3205 static int
   3206 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   3207 {
   3208 	struct sysctlnode node = *rnode;
   3209 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   3210 	struct adapter	*adapter;
   3211 	uint32_t reg, usec, rate;
   3212 	int error;
   3213 
   3214 	if (que == NULL)
   3215 		return 0;
   3216 
   3217 	adapter = que->adapter;
   3218 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   3219 		return (EPERM);
   3220 
   3221 	reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
   3222 	usec = ((reg & 0x0FF8) >> 3);
   3223 	if (usec > 0)
   3224 		rate = 500000 / usec;
   3225 	else
   3226 		rate = 0;
   3227 	node.sysctl_data = &rate;
   3228 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   3229 	if (error || newp == NULL)
   3230 		return error;
   3231 	reg &= ~0xfff; /* default, no limitation */
   3232 	if (rate > 0 && rate < 500000) {
   3233 		if (rate < 1000)
   3234 			rate = 1000;
   3235 		reg |= ((4000000/rate) & 0xff8);
   3236 		/*
   3237 		 * When RSC is used, ITR interval must be larger than
   3238 		 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
   3239 		 * The minimum value is always greater than 2us on 100M
   3240 		 * (and 10M?(not documented)), but it's not on 1G and higher.
   3241 		 */
   3242 		if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   3243 		    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   3244 			if ((adapter->num_queues > 1)
   3245 			    && (reg < IXGBE_MIN_RSC_EITR_10G1G))
   3246 				return EINVAL;
   3247 		}
   3248 		ixgbe_max_interrupt_rate = rate;
   3249 	} else
   3250 		ixgbe_max_interrupt_rate = 0;
   3251 	ixgbe_eitr_write(adapter, que->msix, reg);
   3252 
   3253 	return (0);
   3254 } /* ixgbe_sysctl_interrupt_rate_handler */
   3255 
   3256 const struct sysctlnode *
   3257 ixgbe_sysctl_instance(struct adapter *adapter)
   3258 {
   3259 	const char *dvname;
   3260 	struct sysctllog **log;
   3261 	int rc;
   3262 	const struct sysctlnode *rnode;
   3263 
   3264 	if (adapter->sysctltop != NULL)
   3265 		return adapter->sysctltop;
   3266 
   3267 	log = &adapter->sysctllog;
   3268 	dvname = device_xname(adapter->dev);
   3269 
   3270 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   3271 	    0, CTLTYPE_NODE, dvname,
   3272 	    SYSCTL_DESCR("ixgbe information and settings"),
   3273 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   3274 		goto err;
   3275 
   3276 	return rnode;
   3277 err:
   3278 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   3279 	return NULL;
   3280 }
   3281 
   3282 /************************************************************************
   3283  * ixgbe_add_device_sysctls
   3284  ************************************************************************/
   3285 static void
   3286 ixgbe_add_device_sysctls(struct adapter *adapter)
   3287 {
   3288 	device_t	       dev = adapter->dev;
   3289 	struct ixgbe_hw	       *hw = &adapter->hw;
   3290 	struct sysctllog **log;
   3291 	const struct sysctlnode *rnode, *cnode;
   3292 
   3293 	log = &adapter->sysctllog;
   3294 
   3295 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   3296 		aprint_error_dev(dev, "could not create sysctl root\n");
   3297 		return;
   3298 	}
   3299 
   3300 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3301 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   3302 	    "debug", SYSCTL_DESCR("Debug Info"),
   3303 	    ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   3304 		aprint_error_dev(dev, "could not create sysctl\n");
   3305 
   3306 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3307 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3308 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
   3309 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   3310 		aprint_error_dev(dev, "could not create sysctl\n");
   3311 
   3312 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3313 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3314 	    "num_queues", SYSCTL_DESCR("Number of queues"),
   3315 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
   3316 		aprint_error_dev(dev, "could not create sysctl\n");
   3317 
   3318 	/* Sysctls for all devices */
   3319 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3320 	    CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
   3321 	    ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
   3322 	    CTL_EOL) != 0)
   3323 		aprint_error_dev(dev, "could not create sysctl\n");
   3324 
   3325 	adapter->enable_aim = ixgbe_enable_aim;
   3326 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3327 	    CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   3328 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   3329 		aprint_error_dev(dev, "could not create sysctl\n");
   3330 
   3331 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3332 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   3333 	    "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
   3334 	    ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
   3335 	    CTL_EOL) != 0)
   3336 		aprint_error_dev(dev, "could not create sysctl\n");
   3337 
   3338 	/*
   3339 	 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
   3340 	 * it causesflip-flopping softint/workqueue mode in one deferred
   3341 	 * processing. Therefore, preempt_disable()/preempt_enable() are
   3342 	 * required in ixgbe_sched_handle_que() to avoid
   3343 	 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
   3344 	 * I think changing "que->txrx_use_workqueue" in interrupt handler
   3345 	 * is lighter than doing preempt_disable()/preempt_enable() in every
   3346 	 * ixgbe_sched_handle_que().
   3347 	 */
   3348 	adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
   3349 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3350 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   3351 	    NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
   3352 		aprint_error_dev(dev, "could not create sysctl\n");
   3353 
   3354 #ifdef IXGBE_DEBUG
   3355 	/* testing sysctls (for all devices) */
   3356 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3357 	    CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
   3358 	    ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
   3359 	    CTL_EOL) != 0)
   3360 		aprint_error_dev(dev, "could not create sysctl\n");
   3361 
   3362 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
   3363 	    CTLTYPE_STRING, "print_rss_config",
   3364 	    SYSCTL_DESCR("Prints RSS Configuration"),
   3365 	    ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
   3366 	    CTL_EOL) != 0)
   3367 		aprint_error_dev(dev, "could not create sysctl\n");
   3368 #endif
   3369 	/* for X550 series devices */
   3370 	if (hw->mac.type >= ixgbe_mac_X550)
   3371 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3372 		    CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
   3373 		    ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
   3374 		    CTL_EOL) != 0)
   3375 			aprint_error_dev(dev, "could not create sysctl\n");
   3376 
   3377 	/* for WoL-capable devices */
   3378 	if (adapter->wol_support) {
   3379 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3380 		    CTLTYPE_BOOL, "wol_enable",
   3381 		    SYSCTL_DESCR("Enable/Disable Wake on LAN"),
   3382 		    ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
   3383 		    CTL_EOL) != 0)
   3384 			aprint_error_dev(dev, "could not create sysctl\n");
   3385 
   3386 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3387 		    CTLTYPE_INT, "wufc",
   3388 		    SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
   3389 		    ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
   3390 		    CTL_EOL) != 0)
   3391 			aprint_error_dev(dev, "could not create sysctl\n");
   3392 	}
   3393 
   3394 	/* for X552/X557-AT devices */
   3395 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3396 		const struct sysctlnode *phy_node;
   3397 
   3398 		if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
   3399 		    "phy", SYSCTL_DESCR("External PHY sysctls"),
   3400 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
   3401 			aprint_error_dev(dev, "could not create sysctl\n");
   3402 			return;
   3403 		}
   3404 
   3405 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3406 		    CTLTYPE_INT, "temp",
   3407 		    SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
   3408 		    ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
   3409 		    CTL_EOL) != 0)
   3410 			aprint_error_dev(dev, "could not create sysctl\n");
   3411 
   3412 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3413 		    CTLTYPE_INT, "overtemp_occurred",
   3414 		    SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
   3415 		    ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
   3416 		    CTL_CREATE, CTL_EOL) != 0)
   3417 			aprint_error_dev(dev, "could not create sysctl\n");
   3418 	}
   3419 
   3420 	if ((hw->mac.type == ixgbe_mac_X550EM_a)
   3421 	    && (hw->phy.type == ixgbe_phy_fw))
   3422 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3423 		    CTLTYPE_BOOL, "force_10_100_autonego",
   3424 		    SYSCTL_DESCR("Force autonego on 10M and 100M"),
   3425 		    NULL, 0, &hw->phy.force_10_100_autonego, 0,
   3426 		    CTL_CREATE, CTL_EOL) != 0)
   3427 			aprint_error_dev(dev, "could not create sysctl\n");
   3428 
   3429 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
   3430 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3431 		    CTLTYPE_INT, "eee_state",
   3432 		    SYSCTL_DESCR("EEE Power Save State"),
   3433 		    ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
   3434 		    CTL_EOL) != 0)
   3435 			aprint_error_dev(dev, "could not create sysctl\n");
   3436 	}
   3437 } /* ixgbe_add_device_sysctls */
   3438 
   3439 /************************************************************************
   3440  * ixgbe_allocate_pci_resources
   3441  ************************************************************************/
   3442 static int
   3443 ixgbe_allocate_pci_resources(struct adapter *adapter,
   3444     const struct pci_attach_args *pa)
   3445 {
   3446 	pcireg_t	memtype, csr;
   3447 	device_t dev = adapter->dev;
   3448 	bus_addr_t addr;
   3449 	int flags;
   3450 
   3451 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   3452 	switch (memtype) {
   3453 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   3454 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   3455 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   3456 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   3457 		      memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   3458 			goto map_err;
   3459 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   3460 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   3461 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3462 		}
   3463 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   3464 		     adapter->osdep.mem_size, flags,
   3465 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   3466 map_err:
   3467 			adapter->osdep.mem_size = 0;
   3468 			aprint_error_dev(dev, "unable to map BAR0\n");
   3469 			return ENXIO;
   3470 		}
   3471 		/*
   3472 		 * Enable address decoding for memory range in case BIOS or
   3473 		 * UEFI don't set it.
   3474 		 */
   3475 		csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
   3476 		    PCI_COMMAND_STATUS_REG);
   3477 		csr |= PCI_COMMAND_MEM_ENABLE;
   3478 		pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
   3479 		    csr);
   3480 		break;
   3481 	default:
   3482 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   3483 		return ENXIO;
   3484 	}
   3485 
   3486 	return (0);
   3487 } /* ixgbe_allocate_pci_resources */
   3488 
   3489 static void
   3490 ixgbe_free_softint(struct adapter *adapter)
   3491 {
   3492 	struct ix_queue *que = adapter->queues;
   3493 	struct tx_ring *txr = adapter->tx_rings;
   3494 	int i;
   3495 
   3496 	for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
   3497 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   3498 			if (txr->txr_si != NULL)
   3499 				softint_disestablish(txr->txr_si);
   3500 		}
   3501 		if (que->que_si != NULL)
   3502 			softint_disestablish(que->que_si);
   3503 	}
   3504 	if (adapter->txr_wq != NULL)
   3505 		workqueue_destroy(adapter->txr_wq);
   3506 	if (adapter->txr_wq_enqueued != NULL)
   3507 		percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
   3508 	if (adapter->que_wq != NULL)
   3509 		workqueue_destroy(adapter->que_wq);
   3510 
   3511 	/* Drain the Link queue */
   3512 	if (adapter->link_si != NULL) {
   3513 		softint_disestablish(adapter->link_si);
   3514 		adapter->link_si = NULL;
   3515 	}
   3516 	if (adapter->mod_si != NULL) {
   3517 		softint_disestablish(adapter->mod_si);
   3518 		adapter->mod_si = NULL;
   3519 	}
   3520 	if (adapter->msf_si != NULL) {
   3521 		softint_disestablish(adapter->msf_si);
   3522 		adapter->msf_si = NULL;
   3523 	}
   3524 	if (adapter->phy_si != NULL) {
   3525 		softint_disestablish(adapter->phy_si);
   3526 		adapter->phy_si = NULL;
   3527 	}
   3528 	if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
   3529 		if (adapter->fdir_si != NULL) {
   3530 			softint_disestablish(adapter->fdir_si);
   3531 			adapter->fdir_si = NULL;
   3532 		}
   3533 	}
   3534 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   3535 		if (adapter->mbx_si != NULL) {
   3536 			softint_disestablish(adapter->mbx_si);
   3537 			adapter->mbx_si = NULL;
   3538 		}
   3539 	}
   3540 } /* ixgbe_free_softint */
   3541 
   3542 /************************************************************************
   3543  * ixgbe_detach - Device removal routine
   3544  *
   3545  *   Called when the driver is being removed.
   3546  *   Stops the adapter and deallocates all the resources
   3547  *   that were allocated for driver operation.
   3548  *
   3549  *   return 0 on success, positive on failure
   3550  ************************************************************************/
   3551 static int
   3552 ixgbe_detach(device_t dev, int flags)
   3553 {
   3554 	struct adapter *adapter = device_private(dev);
   3555 	struct rx_ring *rxr = adapter->rx_rings;
   3556 	struct tx_ring *txr = adapter->tx_rings;
   3557 	struct ixgbe_hw *hw = &adapter->hw;
   3558 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   3559 	u32	ctrl_ext;
   3560 	int i;
   3561 
   3562 	INIT_DEBUGOUT("ixgbe_detach: begin");
   3563 	if (adapter->osdep.attached == false)
   3564 		return 0;
   3565 
   3566 	if (ixgbe_pci_iov_detach(dev) != 0) {
   3567 		device_printf(dev, "SR-IOV in use; detach first.\n");
   3568 		return (EBUSY);
   3569 	}
   3570 
   3571 	/*
   3572 	 * Stop the interface. ixgbe_setup_low_power_mode() calls ixgbe_stop(),
   3573 	 * so it's not required to call ixgbe_stop() directly.
   3574 	 */
   3575 	IXGBE_CORE_LOCK(adapter);
   3576 	ixgbe_setup_low_power_mode(adapter);
   3577 	IXGBE_CORE_UNLOCK(adapter);
   3578 #if NVLAN > 0
   3579 	/* Make sure VLANs are not using driver */
   3580 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   3581 		;	/* nothing to do: no VLANs */
   3582 	else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
   3583 		vlan_ifdetach(adapter->ifp);
   3584 	else {
   3585 		aprint_error_dev(dev, "VLANs in use, detach first\n");
   3586 		return (EBUSY);
   3587 	}
   3588 #endif
   3589 
   3590 	pmf_device_deregister(dev);
   3591 
   3592 	ether_ifdetach(adapter->ifp);
   3593 
   3594 	ixgbe_free_softint(adapter);
   3595 
   3596 	/* let hardware know driver is unloading */
   3597 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   3598 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   3599 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   3600 
   3601 	callout_halt(&adapter->timer, NULL);
   3602 	if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
   3603 		callout_halt(&adapter->recovery_mode_timer, NULL);
   3604 
   3605 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   3606 		netmap_detach(adapter->ifp);
   3607 
   3608 	ixgbe_free_pci_resources(adapter);
   3609 #if 0	/* XXX the NetBSD port is probably missing something here */
   3610 	bus_generic_detach(dev);
   3611 #endif
   3612 	if_detach(adapter->ifp);
   3613 	if_percpuq_destroy(adapter->ipq);
   3614 
   3615 	sysctl_teardown(&adapter->sysctllog);
   3616 	evcnt_detach(&adapter->efbig_tx_dma_setup);
   3617 	evcnt_detach(&adapter->mbuf_defrag_failed);
   3618 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
   3619 	evcnt_detach(&adapter->einval_tx_dma_setup);
   3620 	evcnt_detach(&adapter->other_tx_dma_setup);
   3621 	evcnt_detach(&adapter->eagain_tx_dma_setup);
   3622 	evcnt_detach(&adapter->enomem_tx_dma_setup);
   3623 	evcnt_detach(&adapter->watchdog_events);
   3624 	evcnt_detach(&adapter->tso_err);
   3625 	evcnt_detach(&adapter->link_irq);
   3626 	evcnt_detach(&adapter->link_sicount);
   3627 	evcnt_detach(&adapter->mod_sicount);
   3628 	evcnt_detach(&adapter->msf_sicount);
   3629 	evcnt_detach(&adapter->phy_sicount);
   3630 
   3631 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   3632 		if (i < __arraycount(stats->mpc)) {
   3633 			evcnt_detach(&stats->mpc[i]);
   3634 			if (hw->mac.type == ixgbe_mac_82598EB)
   3635 				evcnt_detach(&stats->rnbc[i]);
   3636 		}
   3637 		if (i < __arraycount(stats->pxontxc)) {
   3638 			evcnt_detach(&stats->pxontxc[i]);
   3639 			evcnt_detach(&stats->pxonrxc[i]);
   3640 			evcnt_detach(&stats->pxofftxc[i]);
   3641 			evcnt_detach(&stats->pxoffrxc[i]);
   3642 			if (hw->mac.type >= ixgbe_mac_82599EB)
   3643 				evcnt_detach(&stats->pxon2offc[i]);
   3644 		}
   3645 	}
   3646 
   3647 	txr = adapter->tx_rings;
   3648 	for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   3649 		evcnt_detach(&adapter->queues[i].irqs);
   3650 		evcnt_detach(&adapter->queues[i].handleq);
   3651 		evcnt_detach(&adapter->queues[i].req);
   3652 		evcnt_detach(&txr->no_desc_avail);
   3653 		evcnt_detach(&txr->total_packets);
   3654 		evcnt_detach(&txr->tso_tx);
   3655 #ifndef IXGBE_LEGACY_TX
   3656 		evcnt_detach(&txr->pcq_drops);
   3657 #endif
   3658 
   3659 		if (i < __arraycount(stats->qprc)) {
   3660 			evcnt_detach(&stats->qprc[i]);
   3661 			evcnt_detach(&stats->qptc[i]);
   3662 			evcnt_detach(&stats->qbrc[i]);
   3663 			evcnt_detach(&stats->qbtc[i]);
   3664 			if (hw->mac.type >= ixgbe_mac_82599EB)
   3665 				evcnt_detach(&stats->qprdc[i]);
   3666 		}
   3667 
   3668 		evcnt_detach(&rxr->rx_packets);
   3669 		evcnt_detach(&rxr->rx_bytes);
   3670 		evcnt_detach(&rxr->rx_copies);
   3671 		evcnt_detach(&rxr->no_jmbuf);
   3672 		evcnt_detach(&rxr->rx_discarded);
   3673 	}
   3674 	evcnt_detach(&stats->ipcs);
   3675 	evcnt_detach(&stats->l4cs);
   3676 	evcnt_detach(&stats->ipcs_bad);
   3677 	evcnt_detach(&stats->l4cs_bad);
   3678 	evcnt_detach(&stats->intzero);
   3679 	evcnt_detach(&stats->legint);
   3680 	evcnt_detach(&stats->crcerrs);
   3681 	evcnt_detach(&stats->illerrc);
   3682 	evcnt_detach(&stats->errbc);
   3683 	evcnt_detach(&stats->mspdc);
   3684 	if (hw->mac.type >= ixgbe_mac_X550)
   3685 		evcnt_detach(&stats->mbsdc);
   3686 	evcnt_detach(&stats->mpctotal);
   3687 	evcnt_detach(&stats->mlfc);
   3688 	evcnt_detach(&stats->mrfc);
   3689 	evcnt_detach(&stats->rlec);
   3690 	evcnt_detach(&stats->lxontxc);
   3691 	evcnt_detach(&stats->lxonrxc);
   3692 	evcnt_detach(&stats->lxofftxc);
   3693 	evcnt_detach(&stats->lxoffrxc);
   3694 
   3695 	/* Packet Reception Stats */
   3696 	evcnt_detach(&stats->tor);
   3697 	evcnt_detach(&stats->gorc);
   3698 	evcnt_detach(&stats->tpr);
   3699 	evcnt_detach(&stats->gprc);
   3700 	evcnt_detach(&stats->mprc);
   3701 	evcnt_detach(&stats->bprc);
   3702 	evcnt_detach(&stats->prc64);
   3703 	evcnt_detach(&stats->prc127);
   3704 	evcnt_detach(&stats->prc255);
   3705 	evcnt_detach(&stats->prc511);
   3706 	evcnt_detach(&stats->prc1023);
   3707 	evcnt_detach(&stats->prc1522);
   3708 	evcnt_detach(&stats->ruc);
   3709 	evcnt_detach(&stats->rfc);
   3710 	evcnt_detach(&stats->roc);
   3711 	evcnt_detach(&stats->rjc);
   3712 	evcnt_detach(&stats->mngprc);
   3713 	evcnt_detach(&stats->mngpdc);
   3714 	evcnt_detach(&stats->xec);
   3715 
   3716 	/* Packet Transmission Stats */
   3717 	evcnt_detach(&stats->gotc);
   3718 	evcnt_detach(&stats->tpt);
   3719 	evcnt_detach(&stats->gptc);
   3720 	evcnt_detach(&stats->bptc);
   3721 	evcnt_detach(&stats->mptc);
   3722 	evcnt_detach(&stats->mngptc);
   3723 	evcnt_detach(&stats->ptc64);
   3724 	evcnt_detach(&stats->ptc127);
   3725 	evcnt_detach(&stats->ptc255);
   3726 	evcnt_detach(&stats->ptc511);
   3727 	evcnt_detach(&stats->ptc1023);
   3728 	evcnt_detach(&stats->ptc1522);
   3729 
   3730 	ixgbe_free_transmit_structures(adapter);
   3731 	ixgbe_free_receive_structures(adapter);
   3732 	for (i = 0; i < adapter->num_queues; i++) {
   3733 		struct ix_queue * que = &adapter->queues[i];
   3734 		mutex_destroy(&que->dc_mtx);
   3735 	}
   3736 	free(adapter->queues, M_DEVBUF);
   3737 	free(adapter->mta, M_DEVBUF);
   3738 
   3739 	IXGBE_CORE_LOCK_DESTROY(adapter);
   3740 
   3741 	return (0);
   3742 } /* ixgbe_detach */
   3743 
   3744 /************************************************************************
   3745  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
   3746  *
   3747  *   Prepare the adapter/port for LPLU and/or WoL
   3748  ************************************************************************/
   3749 static int
   3750 ixgbe_setup_low_power_mode(struct adapter *adapter)
   3751 {
   3752 	struct ixgbe_hw *hw = &adapter->hw;
   3753 	device_t	dev = adapter->dev;
   3754 	s32		error = 0;
   3755 
   3756 	KASSERT(mutex_owned(&adapter->core_mtx));
   3757 
   3758 	/* Limit power management flow to X550EM baseT */
   3759 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
   3760 	    hw->phy.ops.enter_lplu) {
   3761 		/* X550EM baseT adapters need a special LPLU flow */
   3762 		hw->phy.reset_disable = true;
   3763 		ixgbe_stop(adapter);
   3764 		error = hw->phy.ops.enter_lplu(hw);
   3765 		if (error)
   3766 			device_printf(dev,
   3767 			    "Error entering LPLU: %d\n", error);
   3768 		hw->phy.reset_disable = false;
   3769 	} else {
   3770 		/* Just stop for other adapters */
   3771 		ixgbe_stop(adapter);
   3772 	}
   3773 
   3774 	if (!hw->wol_enabled) {
   3775 		ixgbe_set_phy_power(hw, FALSE);
   3776 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3777 		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
   3778 	} else {
   3779 		/* Turn off support for APM wakeup. (Using ACPI instead) */
   3780 		IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
   3781 		    IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
   3782 
   3783 		/*
   3784 		 * Clear Wake Up Status register to prevent any previous wakeup
   3785 		 * events from waking us up immediately after we suspend.
   3786 		 */
   3787 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3788 
   3789 		/*
   3790 		 * Program the Wakeup Filter Control register with user filter
   3791 		 * settings
   3792 		 */
   3793 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
   3794 
   3795 		/* Enable wakeups and power management in Wakeup Control */
   3796 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
   3797 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
   3798 
   3799 	}
   3800 
   3801 	return error;
   3802 } /* ixgbe_setup_low_power_mode */
   3803 
   3804 /************************************************************************
   3805  * ixgbe_shutdown - Shutdown entry point
   3806  ************************************************************************/
   3807 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   3808 static int
   3809 ixgbe_shutdown(device_t dev)
   3810 {
   3811 	struct adapter *adapter = device_private(dev);
   3812 	int error = 0;
   3813 
   3814 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
   3815 
   3816 	IXGBE_CORE_LOCK(adapter);
   3817 	error = ixgbe_setup_low_power_mode(adapter);
   3818 	IXGBE_CORE_UNLOCK(adapter);
   3819 
   3820 	return (error);
   3821 } /* ixgbe_shutdown */
   3822 #endif
   3823 
   3824 /************************************************************************
   3825  * ixgbe_suspend
   3826  *
   3827  *   From D0 to D3
   3828  ************************************************************************/
   3829 static bool
   3830 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
   3831 {
   3832 	struct adapter *adapter = device_private(dev);
   3833 	int	       error = 0;
   3834 
   3835 	INIT_DEBUGOUT("ixgbe_suspend: begin");
   3836 
   3837 	IXGBE_CORE_LOCK(adapter);
   3838 
   3839 	error = ixgbe_setup_low_power_mode(adapter);
   3840 
   3841 	IXGBE_CORE_UNLOCK(adapter);
   3842 
   3843 	return (error);
   3844 } /* ixgbe_suspend */
   3845 
   3846 /************************************************************************
   3847  * ixgbe_resume
   3848  *
   3849  *   From D3 to D0
   3850  ************************************************************************/
   3851 static bool
   3852 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
   3853 {
   3854 	struct adapter	*adapter = device_private(dev);
   3855 	struct ifnet	*ifp = adapter->ifp;
   3856 	struct ixgbe_hw *hw = &adapter->hw;
   3857 	u32		wus;
   3858 
   3859 	INIT_DEBUGOUT("ixgbe_resume: begin");
   3860 
   3861 	IXGBE_CORE_LOCK(adapter);
   3862 
   3863 	/* Read & clear WUS register */
   3864 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
   3865 	if (wus)
   3866 		device_printf(dev, "Woken up by (WUS): %#010x\n",
   3867 		    IXGBE_READ_REG(hw, IXGBE_WUS));
   3868 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3869 	/* And clear WUFC until next low-power transition */
   3870 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3871 
   3872 	/*
   3873 	 * Required after D3->D0 transition;
   3874 	 * will re-advertise all previous advertised speeds
   3875 	 */
   3876 	if (ifp->if_flags & IFF_UP)
   3877 		ixgbe_init_locked(adapter);
   3878 
   3879 	IXGBE_CORE_UNLOCK(adapter);
   3880 
   3881 	return true;
   3882 } /* ixgbe_resume */
   3883 
   3884 /*
   3885  * Set the various hardware offload abilities.
   3886  *
   3887  * This takes the ifnet's if_capenable flags (e.g. set by the user using
   3888  * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
   3889  * mbuf offload flags the driver will understand.
   3890  */
   3891 static void
   3892 ixgbe_set_if_hwassist(struct adapter *adapter)
   3893 {
   3894 	/* XXX */
   3895 }
   3896 
   3897 /************************************************************************
   3898  * ixgbe_init_locked - Init entry point
   3899  *
   3900  *   Used in two ways: It is used by the stack as an init
   3901  *   entry point in network interface structure. It is also
   3902  *   used by the driver as a hw/sw initialization routine to
   3903  *   get to a consistent state.
   3904  *
   3905  *   return 0 on success, positive on failure
   3906  ************************************************************************/
   3907 static void
   3908 ixgbe_init_locked(struct adapter *adapter)
   3909 {
   3910 	struct ifnet   *ifp = adapter->ifp;
   3911 	device_t	dev = adapter->dev;
   3912 	struct ixgbe_hw *hw = &adapter->hw;
   3913 	struct ix_queue *que;
   3914 	struct tx_ring	*txr;
   3915 	struct rx_ring	*rxr;
   3916 	u32		txdctl, mhadd;
   3917 	u32		rxdctl, rxctrl;
   3918 	u32		ctrl_ext;
   3919 	int		i, j, err;
   3920 
   3921 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   3922 
   3923 	KASSERT(mutex_owned(&adapter->core_mtx));
   3924 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
   3925 
   3926 	hw->adapter_stopped = FALSE;
   3927 	ixgbe_stop_adapter(hw);
   3928 	callout_stop(&adapter->timer);
   3929 	for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
   3930 		que->disabled_count = 0;
   3931 
   3932 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   3933 	adapter->max_frame_size =
   3934 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   3935 
   3936 	/* Queue indices may change with IOV mode */
   3937 	ixgbe_align_all_queue_indices(adapter);
   3938 
   3939 	/* reprogram the RAR[0] in case user changed it. */
   3940 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
   3941 
   3942 	/* Get the latest mac address, User can use a LAA */
   3943 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
   3944 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   3945 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
   3946 	hw->addr_ctrl.rar_used_count = 1;
   3947 
   3948 	/* Set hardware offload abilities from ifnet flags */
   3949 	ixgbe_set_if_hwassist(adapter);
   3950 
   3951 	/* Prepare transmit descriptors and buffers */
   3952 	if (ixgbe_setup_transmit_structures(adapter)) {
   3953 		device_printf(dev, "Could not setup transmit structures\n");
   3954 		ixgbe_stop(adapter);
   3955 		return;
   3956 	}
   3957 
   3958 	ixgbe_init_hw(hw);
   3959 
   3960 	ixgbe_initialize_iov(adapter);
   3961 
   3962 	ixgbe_initialize_transmit_units(adapter);
   3963 
   3964 	/* Setup Multicast table */
   3965 	ixgbe_set_multi(adapter);
   3966 
   3967 	/* Determine the correct mbuf pool, based on frame size */
   3968 	if (adapter->max_frame_size <= MCLBYTES)
   3969 		adapter->rx_mbuf_sz = MCLBYTES;
   3970 	else
   3971 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   3972 
   3973 	/* Prepare receive descriptors and buffers */
   3974 	if (ixgbe_setup_receive_structures(adapter)) {
   3975 		device_printf(dev, "Could not setup receive structures\n");
   3976 		ixgbe_stop(adapter);
   3977 		return;
   3978 	}
   3979 
   3980 	/* Configure RX settings */
   3981 	ixgbe_initialize_receive_units(adapter);
   3982 
   3983 	/* Enable SDP & MSI-X interrupts based on adapter */
   3984 	ixgbe_config_gpie(adapter);
   3985 
   3986 	/* Set MTU size */
   3987 	if (ifp->if_mtu > ETHERMTU) {
   3988 		/* aka IXGBE_MAXFRS on 82599 and newer */
   3989 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   3990 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   3991 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   3992 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   3993 	}
   3994 
   3995 	/* Now enable all the queues */
   3996 	for (i = 0; i < adapter->num_queues; i++) {
   3997 		txr = &adapter->tx_rings[i];
   3998 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
   3999 		txdctl |= IXGBE_TXDCTL_ENABLE;
   4000 		/* Set WTHRESH to 8, burst writeback */
   4001 		txdctl |= (8 << 16);
   4002 		/*
   4003 		 * When the internal queue falls below PTHRESH (32),
   4004 		 * start prefetching as long as there are at least
   4005 		 * HTHRESH (1) buffers ready. The values are taken
   4006 		 * from the Intel linux driver 3.8.21.
   4007 		 * Prefetching enables tx line rate even with 1 queue.
   4008 		 */
   4009 		txdctl |= (32 << 0) | (1 << 8);
   4010 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
   4011 	}
   4012 
   4013 	for (i = 0; i < adapter->num_queues; i++) {
   4014 		rxr = &adapter->rx_rings[i];
   4015 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   4016 		if (hw->mac.type == ixgbe_mac_82598EB) {
   4017 			/*
   4018 			 * PTHRESH = 21
   4019 			 * HTHRESH = 4
   4020 			 * WTHRESH = 8
   4021 			 */
   4022 			rxdctl &= ~0x3FFFFF;
   4023 			rxdctl |= 0x080420;
   4024 		}
   4025 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   4026 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
   4027 		for (j = 0; j < 10; j++) {
   4028 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
   4029 			    IXGBE_RXDCTL_ENABLE)
   4030 				break;
   4031 			else
   4032 				msec_delay(1);
   4033 		}
   4034 		wmb();
   4035 
   4036 		/*
   4037 		 * In netmap mode, we must preserve the buffers made
   4038 		 * available to userspace before the if_init()
   4039 		 * (this is true by default on the TX side, because
   4040 		 * init makes all buffers available to userspace).
   4041 		 *
   4042 		 * netmap_reset() and the device specific routines
   4043 		 * (e.g. ixgbe_setup_receive_rings()) map these
   4044 		 * buffers at the end of the NIC ring, so here we
   4045 		 * must set the RDT (tail) register to make sure
   4046 		 * they are not overwritten.
   4047 		 *
   4048 		 * In this driver the NIC ring starts at RDH = 0,
   4049 		 * RDT points to the last slot available for reception (?),
   4050 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   4051 		 */
   4052 #ifdef DEV_NETMAP
   4053 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   4054 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   4055 			struct netmap_adapter *na = NA(adapter->ifp);
   4056 			struct netmap_kring *kring = na->rx_rings[i];
   4057 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   4058 
   4059 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
   4060 		} else
   4061 #endif /* DEV_NETMAP */
   4062 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
   4063 			    adapter->num_rx_desc - 1);
   4064 	}
   4065 
   4066 	/* Enable Receive engine */
   4067 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   4068 	if (hw->mac.type == ixgbe_mac_82598EB)
   4069 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   4070 	rxctrl |= IXGBE_RXCTRL_RXEN;
   4071 	ixgbe_enable_rx_dma(hw, rxctrl);
   4072 
   4073 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4074 
   4075 	/* Set up MSI/MSI-X routing */
   4076 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   4077 		ixgbe_configure_ivars(adapter);
   4078 		/* Set up auto-mask */
   4079 		if (hw->mac.type == ixgbe_mac_82598EB)
   4080 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   4081 		else {
   4082 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   4083 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   4084 		}
   4085 	} else {  /* Simple settings for Legacy/MSI */
   4086 		ixgbe_set_ivar(adapter, 0, 0, 0);
   4087 		ixgbe_set_ivar(adapter, 0, 0, 1);
   4088 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   4089 	}
   4090 
   4091 	ixgbe_init_fdir(adapter);
   4092 
   4093 	/*
   4094 	 * Check on any SFP devices that
   4095 	 * need to be kick-started
   4096 	 */
   4097 	if (hw->phy.type == ixgbe_phy_none) {
   4098 		err = hw->phy.ops.identify(hw);
   4099 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4100 			device_printf(dev,
   4101 			    "Unsupported SFP+ module type was detected.\n");
   4102 			return;
   4103 		}
   4104 	}
   4105 
   4106 	/* Set moderation on the Link interrupt */
   4107 	ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
   4108 
   4109 	/* Enable EEE power saving */
   4110 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   4111 		hw->mac.ops.setup_eee(hw,
   4112 		    adapter->feat_en & IXGBE_FEATURE_EEE);
   4113 
   4114 	/* Enable power to the phy. */
   4115 	ixgbe_set_phy_power(hw, TRUE);
   4116 
   4117 	/* Config/Enable Link */
   4118 	ixgbe_config_link(adapter);
   4119 
   4120 	/* Hardware Packet Buffer & Flow Control setup */
   4121 	ixgbe_config_delay_values(adapter);
   4122 
   4123 	/* Initialize the FC settings */
   4124 	ixgbe_start_hw(hw);
   4125 
   4126 	/* Set up VLAN support and filter */
   4127 	ixgbe_setup_vlan_hw_support(adapter);
   4128 
   4129 	/* Setup DMA Coalescing */
   4130 	ixgbe_config_dmac(adapter);
   4131 
   4132 	/* And now turn on interrupts */
   4133 	ixgbe_enable_intr(adapter);
   4134 
   4135 	/* Enable the use of the MBX by the VF's */
   4136 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
   4137 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
   4138 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
   4139 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
   4140 	}
   4141 
   4142 	/* Update saved flags. See ixgbe_ifflags_cb() */
   4143 	adapter->if_flags = ifp->if_flags;
   4144 	adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
   4145 
   4146 	/* Now inform the stack we're ready */
   4147 	ifp->if_flags |= IFF_RUNNING;
   4148 
   4149 	return;
   4150 } /* ixgbe_init_locked */
   4151 
   4152 /************************************************************************
   4153  * ixgbe_init
   4154  ************************************************************************/
   4155 static int
   4156 ixgbe_init(struct ifnet *ifp)
   4157 {
   4158 	struct adapter *adapter = ifp->if_softc;
   4159 
   4160 	IXGBE_CORE_LOCK(adapter);
   4161 	ixgbe_init_locked(adapter);
   4162 	IXGBE_CORE_UNLOCK(adapter);
   4163 
   4164 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   4165 } /* ixgbe_init */
   4166 
   4167 /************************************************************************
   4168  * ixgbe_set_ivar
   4169  *
   4170  *   Setup the correct IVAR register for a particular MSI-X interrupt
   4171  *     (yes this is all very magic and confusing :)
   4172  *    - entry is the register array entry
   4173  *    - vector is the MSI-X vector for this queue
   4174  *    - type is RX/TX/MISC
   4175  ************************************************************************/
   4176 static void
   4177 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   4178 {
   4179 	struct ixgbe_hw *hw = &adapter->hw;
   4180 	u32 ivar, index;
   4181 
   4182 	vector |= IXGBE_IVAR_ALLOC_VAL;
   4183 
   4184 	switch (hw->mac.type) {
   4185 	case ixgbe_mac_82598EB:
   4186 		if (type == -1)
   4187 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   4188 		else
   4189 			entry += (type * 64);
   4190 		index = (entry >> 2) & 0x1F;
   4191 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   4192 		ivar &= ~(0xffUL << (8 * (entry & 0x3)));
   4193 		ivar |= ((u32)vector << (8 * (entry & 0x3)));
   4194 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   4195 		break;
   4196 	case ixgbe_mac_82599EB:
   4197 	case ixgbe_mac_X540:
   4198 	case ixgbe_mac_X550:
   4199 	case ixgbe_mac_X550EM_x:
   4200 	case ixgbe_mac_X550EM_a:
   4201 		if (type == -1) { /* MISC IVAR */
   4202 			index = (entry & 1) * 8;
   4203 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   4204 			ivar &= ~(0xffUL << index);
   4205 			ivar |= ((u32)vector << index);
   4206 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   4207 		} else {	/* RX/TX IVARS */
   4208 			index = (16 * (entry & 1)) + (8 * type);
   4209 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   4210 			ivar &= ~(0xffUL << index);
   4211 			ivar |= ((u32)vector << index);
   4212 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   4213 		}
   4214 		break;
   4215 	default:
   4216 		break;
   4217 	}
   4218 } /* ixgbe_set_ivar */
   4219 
   4220 /************************************************************************
   4221  * ixgbe_configure_ivars
   4222  ************************************************************************/
   4223 static void
   4224 ixgbe_configure_ivars(struct adapter *adapter)
   4225 {
   4226 	struct ix_queue *que = adapter->queues;
   4227 	u32		newitr;
   4228 
   4229 	if (ixgbe_max_interrupt_rate > 0)
   4230 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   4231 	else {
   4232 		/*
   4233 		 * Disable DMA coalescing if interrupt moderation is
   4234 		 * disabled.
   4235 		 */
   4236 		adapter->dmac = 0;
   4237 		newitr = 0;
   4238 	}
   4239 
   4240 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4241 		struct rx_ring *rxr = &adapter->rx_rings[i];
   4242 		struct tx_ring *txr = &adapter->tx_rings[i];
   4243 		/* First the RX queue entry */
   4244 		ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
   4245 		/* ... and the TX */
   4246 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
   4247 		/* Set an Initial EITR value */
   4248 		ixgbe_eitr_write(adapter, que->msix, newitr);
   4249 		/*
   4250 		 * To eliminate influence of the previous state.
   4251 		 * At this point, Tx/Rx interrupt handler
   4252 		 * (ixgbe_msix_que()) cannot be called, so  both
   4253 		 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
   4254 		 */
   4255 		que->eitr_setting = 0;
   4256 	}
   4257 
   4258 	/* For the Link interrupt */
   4259 	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
   4260 } /* ixgbe_configure_ivars */
   4261 
   4262 /************************************************************************
   4263  * ixgbe_config_gpie
   4264  ************************************************************************/
   4265 static void
   4266 ixgbe_config_gpie(struct adapter *adapter)
   4267 {
   4268 	struct ixgbe_hw *hw = &adapter->hw;
   4269 	u32		gpie;
   4270 
   4271 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
   4272 
   4273 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   4274 		/* Enable Enhanced MSI-X mode */
   4275 		gpie |= IXGBE_GPIE_MSIX_MODE
   4276 		     |	IXGBE_GPIE_EIAME
   4277 		     |	IXGBE_GPIE_PBA_SUPPORT
   4278 		     |	IXGBE_GPIE_OCD;
   4279 	}
   4280 
   4281 	/* Fan Failure Interrupt */
   4282 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4283 		gpie |= IXGBE_SDP1_GPIEN;
   4284 
   4285 	/* Thermal Sensor Interrupt */
   4286 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
   4287 		gpie |= IXGBE_SDP0_GPIEN_X540;
   4288 
   4289 	/* Link detection */
   4290 	switch (hw->mac.type) {
   4291 	case ixgbe_mac_82599EB:
   4292 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
   4293 		break;
   4294 	case ixgbe_mac_X550EM_x:
   4295 	case ixgbe_mac_X550EM_a:
   4296 		gpie |= IXGBE_SDP0_GPIEN_X540;
   4297 		break;
   4298 	default:
   4299 		break;
   4300 	}
   4301 
   4302 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   4303 
   4304 } /* ixgbe_config_gpie */
   4305 
   4306 /************************************************************************
   4307  * ixgbe_config_delay_values
   4308  *
   4309  *   Requires adapter->max_frame_size to be set.
   4310  ************************************************************************/
   4311 static void
   4312 ixgbe_config_delay_values(struct adapter *adapter)
   4313 {
   4314 	struct ixgbe_hw *hw = &adapter->hw;
   4315 	u32		rxpb, frame, size, tmp;
   4316 
   4317 	frame = adapter->max_frame_size;
   4318 
   4319 	/* Calculate High Water */
   4320 	switch (hw->mac.type) {
   4321 	case ixgbe_mac_X540:
   4322 	case ixgbe_mac_X550:
   4323 	case ixgbe_mac_X550EM_x:
   4324 	case ixgbe_mac_X550EM_a:
   4325 		tmp = IXGBE_DV_X540(frame, frame);
   4326 		break;
   4327 	default:
   4328 		tmp = IXGBE_DV(frame, frame);
   4329 		break;
   4330 	}
   4331 	size = IXGBE_BT2KB(tmp);
   4332 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
   4333 	hw->fc.high_water[0] = rxpb - size;
   4334 
   4335 	/* Now calculate Low Water */
   4336 	switch (hw->mac.type) {
   4337 	case ixgbe_mac_X540:
   4338 	case ixgbe_mac_X550:
   4339 	case ixgbe_mac_X550EM_x:
   4340 	case ixgbe_mac_X550EM_a:
   4341 		tmp = IXGBE_LOW_DV_X540(frame);
   4342 		break;
   4343 	default:
   4344 		tmp = IXGBE_LOW_DV(frame);
   4345 		break;
   4346 	}
   4347 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
   4348 
   4349 	hw->fc.pause_time = IXGBE_FC_PAUSE;
   4350 	hw->fc.send_xon = TRUE;
   4351 } /* ixgbe_config_delay_values */
   4352 
   4353 /************************************************************************
   4354  * ixgbe_set_multi - Multicast Update
   4355  *
   4356  *   Called whenever multicast address list is updated.
   4357  ************************************************************************/
   4358 static void
   4359 ixgbe_set_multi(struct adapter *adapter)
   4360 {
   4361 	struct ixgbe_mc_addr	*mta;
   4362 	struct ifnet		*ifp = adapter->ifp;
   4363 	u8			*update_ptr;
   4364 	int			mcnt = 0;
   4365 	u32			fctrl;
   4366 	struct ethercom		*ec = &adapter->osdep.ec;
   4367 	struct ether_multi	*enm;
   4368 	struct ether_multistep	step;
   4369 
   4370 	KASSERT(mutex_owned(&adapter->core_mtx));
   4371 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   4372 
   4373 	mta = adapter->mta;
   4374 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
   4375 
   4376 	ETHER_LOCK(ec);
   4377 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4378 	ETHER_FIRST_MULTI(step, ec, enm);
   4379 	while (enm != NULL) {
   4380 		if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
   4381 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   4382 			ETHER_ADDR_LEN) != 0)) {
   4383 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4384 			break;
   4385 		}
   4386 		bcopy(enm->enm_addrlo,
   4387 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
   4388 		mta[mcnt].vmdq = adapter->pool;
   4389 		mcnt++;
   4390 		ETHER_NEXT_MULTI(step, enm);
   4391 	}
   4392 
   4393 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   4394 	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4395 	if (ifp->if_flags & IFF_PROMISC)
   4396 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4397 	else if (ec->ec_flags & ETHER_F_ALLMULTI) {
   4398 		fctrl |= IXGBE_FCTRL_MPE;
   4399 	}
   4400 	ETHER_UNLOCK(ec);
   4401 
   4402 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   4403 
   4404 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
   4405 		update_ptr = (u8 *)mta;
   4406 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   4407 		    ixgbe_mc_array_itr, TRUE);
   4408 	}
   4409 
   4410 } /* ixgbe_set_multi */
   4411 
   4412 /************************************************************************
   4413  * ixgbe_mc_array_itr
   4414  *
   4415  *   An iterator function needed by the multicast shared code.
   4416  *   It feeds the shared code routine the addresses in the
   4417  *   array of ixgbe_set_multi() one by one.
   4418  ************************************************************************/
   4419 static u8 *
   4420 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   4421 {
   4422 	struct ixgbe_mc_addr *mta;
   4423 
   4424 	mta = (struct ixgbe_mc_addr *)*update_ptr;
   4425 	*vmdq = mta->vmdq;
   4426 
   4427 	*update_ptr = (u8*)(mta + 1);
   4428 
   4429 	return (mta->addr);
   4430 } /* ixgbe_mc_array_itr */
   4431 
   4432 /************************************************************************
   4433  * ixgbe_local_timer - Timer routine
   4434  *
   4435  *   Checks for link status, updates statistics,
   4436  *   and runs the watchdog check.
   4437  ************************************************************************/
   4438 static void
   4439 ixgbe_local_timer(void *arg)
   4440 {
   4441 	struct adapter *adapter = arg;
   4442 
   4443 	IXGBE_CORE_LOCK(adapter);
   4444 	ixgbe_local_timer1(adapter);
   4445 	IXGBE_CORE_UNLOCK(adapter);
   4446 }
   4447 
   4448 static void
   4449 ixgbe_local_timer1(void *arg)
   4450 {
   4451 	struct adapter	*adapter = arg;
   4452 	device_t	dev = adapter->dev;
   4453 	struct ix_queue *que = adapter->queues;
   4454 	u64		queues = 0;
   4455 	u64		v0, v1, v2, v3, v4, v5, v6, v7;
   4456 	int		hung = 0;
   4457 	int		i;
   4458 
   4459 	KASSERT(mutex_owned(&adapter->core_mtx));
   4460 
   4461 	/* Check for pluggable optics */
   4462 	if (adapter->sfp_probe)
   4463 		if (!ixgbe_sfp_probe(adapter))
   4464 			goto out; /* Nothing to do */
   4465 
   4466 	ixgbe_update_link_status(adapter);
   4467 	ixgbe_update_stats_counters(adapter);
   4468 
   4469 	/* Update some event counters */
   4470 	v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
   4471 	que = adapter->queues;
   4472 	for (i = 0; i < adapter->num_queues; i++, que++) {
   4473 		struct tx_ring	*txr = que->txr;
   4474 
   4475 		v0 += txr->q_efbig_tx_dma_setup;
   4476 		v1 += txr->q_mbuf_defrag_failed;
   4477 		v2 += txr->q_efbig2_tx_dma_setup;
   4478 		v3 += txr->q_einval_tx_dma_setup;
   4479 		v4 += txr->q_other_tx_dma_setup;
   4480 		v5 += txr->q_eagain_tx_dma_setup;
   4481 		v6 += txr->q_enomem_tx_dma_setup;
   4482 		v7 += txr->q_tso_err;
   4483 	}
   4484 	adapter->efbig_tx_dma_setup.ev_count = v0;
   4485 	adapter->mbuf_defrag_failed.ev_count = v1;
   4486 	adapter->efbig2_tx_dma_setup.ev_count = v2;
   4487 	adapter->einval_tx_dma_setup.ev_count = v3;
   4488 	adapter->other_tx_dma_setup.ev_count = v4;
   4489 	adapter->eagain_tx_dma_setup.ev_count = v5;
   4490 	adapter->enomem_tx_dma_setup.ev_count = v6;
   4491 	adapter->tso_err.ev_count = v7;
   4492 
   4493 	/*
   4494 	 * Check the TX queues status
   4495 	 *	- mark hung queues so we don't schedule on them
   4496 	 *	- watchdog only if all queues show hung
   4497 	 */
   4498 	que = adapter->queues;
   4499 	for (i = 0; i < adapter->num_queues; i++, que++) {
   4500 		/* Keep track of queues with work for soft irq */
   4501 		if (que->txr->busy)
   4502 			queues |= 1ULL << que->me;
   4503 		/*
   4504 		 * Each time txeof runs without cleaning, but there
   4505 		 * are uncleaned descriptors it increments busy. If
   4506 		 * we get to the MAX we declare it hung.
   4507 		 */
   4508 		if (que->busy == IXGBE_QUEUE_HUNG) {
   4509 			++hung;
   4510 			/* Mark the queue as inactive */
   4511 			adapter->active_queues &= ~(1ULL << que->me);
   4512 			continue;
   4513 		} else {
   4514 			/* Check if we've come back from hung */
   4515 			if ((adapter->active_queues & (1ULL << que->me)) == 0)
   4516 				adapter->active_queues |= 1ULL << que->me;
   4517 		}
   4518 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   4519 			device_printf(dev,
   4520 			    "Warning queue %d appears to be hung!\n", i);
   4521 			que->txr->busy = IXGBE_QUEUE_HUNG;
   4522 			++hung;
   4523 		}
   4524 	}
   4525 
   4526 	/* Only truely watchdog if all queues show hung */
   4527 	if (hung == adapter->num_queues)
   4528 		goto watchdog;
   4529 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
   4530 	else if (queues != 0) { /* Force an IRQ on queues with work */
   4531 		que = adapter->queues;
   4532 		for (i = 0; i < adapter->num_queues; i++, que++) {
   4533 			mutex_enter(&que->dc_mtx);
   4534 			if (que->disabled_count == 0)
   4535 				ixgbe_rearm_queues(adapter,
   4536 				    queues & ((u64)1 << i));
   4537 			mutex_exit(&que->dc_mtx);
   4538 		}
   4539 	}
   4540 #endif
   4541 
   4542 out:
   4543 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4544 	return;
   4545 
   4546 watchdog:
   4547 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   4548 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   4549 	adapter->watchdog_events.ev_count++;
   4550 	ixgbe_init_locked(adapter);
   4551 } /* ixgbe_local_timer */
   4552 
   4553 /************************************************************************
   4554  * ixgbe_recovery_mode_timer - Recovery mode timer routine
   4555  ************************************************************************/
   4556 static void
   4557 ixgbe_recovery_mode_timer(void *arg)
   4558 {
   4559 	struct adapter *adapter = arg;
   4560 	struct ixgbe_hw *hw = &adapter->hw;
   4561 
   4562 	IXGBE_CORE_LOCK(adapter);
   4563 	if (ixgbe_fw_recovery_mode(hw)) {
   4564 		if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
   4565 			/* Firmware error detected, entering recovery mode */
   4566 			device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
   4567 
   4568 			if (hw->adapter_stopped == FALSE)
   4569 				ixgbe_stop(adapter);
   4570 		}
   4571 	} else
   4572 		atomic_cas_uint(&adapter->recovery_mode, 1, 0);
   4573 
   4574 	callout_reset(&adapter->recovery_mode_timer, hz,
   4575 	    ixgbe_recovery_mode_timer, adapter);
   4576 	IXGBE_CORE_UNLOCK(adapter);
   4577 } /* ixgbe_recovery_mode_timer */
   4578 
   4579 /************************************************************************
   4580  * ixgbe_sfp_probe
   4581  *
   4582  *   Determine if a port had optics inserted.
   4583  ************************************************************************/
   4584 static bool
   4585 ixgbe_sfp_probe(struct adapter *adapter)
   4586 {
   4587 	struct ixgbe_hw	*hw = &adapter->hw;
   4588 	device_t	dev = adapter->dev;
   4589 	bool		result = FALSE;
   4590 
   4591 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4592 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4593 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4594 		if (ret)
   4595 			goto out;
   4596 		ret = hw->phy.ops.reset(hw);
   4597 		adapter->sfp_probe = FALSE;
   4598 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4599 			device_printf(dev,"Unsupported SFP+ module detected!");
   4600 			device_printf(dev,
   4601 			    "Reload driver with supported module.\n");
   4602 			goto out;
   4603 		} else
   4604 			device_printf(dev, "SFP+ module detected!\n");
   4605 		/* We now have supported optics */
   4606 		result = TRUE;
   4607 	}
   4608 out:
   4609 
   4610 	return (result);
   4611 } /* ixgbe_sfp_probe */
   4612 
   4613 /************************************************************************
   4614  * ixgbe_handle_mod - Tasklet for SFP module interrupts
   4615  ************************************************************************/
   4616 static void
   4617 ixgbe_handle_mod(void *context)
   4618 {
   4619 	struct adapter	*adapter = context;
   4620 	struct ixgbe_hw *hw = &adapter->hw;
   4621 	device_t	dev = adapter->dev;
   4622 	u32		err, cage_full = 0;
   4623 
   4624 	++adapter->mod_sicount.ev_count;
   4625 	if (adapter->hw.need_crosstalk_fix) {
   4626 		switch (hw->mac.type) {
   4627 		case ixgbe_mac_82599EB:
   4628 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4629 			    IXGBE_ESDP_SDP2;
   4630 			break;
   4631 		case ixgbe_mac_X550EM_x:
   4632 		case ixgbe_mac_X550EM_a:
   4633 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4634 			    IXGBE_ESDP_SDP0;
   4635 			break;
   4636 		default:
   4637 			break;
   4638 		}
   4639 
   4640 		if (!cage_full)
   4641 			return;
   4642 	}
   4643 
   4644 	err = hw->phy.ops.identify_sfp(hw);
   4645 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4646 		device_printf(dev,
   4647 		    "Unsupported SFP+ module type was detected.\n");
   4648 		return;
   4649 	}
   4650 
   4651 	if (hw->mac.type == ixgbe_mac_82598EB)
   4652 		err = hw->phy.ops.reset(hw);
   4653 	else
   4654 		err = hw->mac.ops.setup_sfp(hw);
   4655 
   4656 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4657 		device_printf(dev,
   4658 		    "Setup failure - unsupported SFP+ module type.\n");
   4659 		return;
   4660 	}
   4661 	softint_schedule(adapter->msf_si);
   4662 } /* ixgbe_handle_mod */
   4663 
   4664 
   4665 /************************************************************************
   4666  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
   4667  ************************************************************************/
   4668 static void
   4669 ixgbe_handle_msf(void *context)
   4670 {
   4671 	struct adapter	*adapter = context;
   4672 	struct ixgbe_hw *hw = &adapter->hw;
   4673 	u32		autoneg;
   4674 	bool		negotiate;
   4675 
   4676 	IXGBE_CORE_LOCK(adapter);
   4677 	++adapter->msf_sicount.ev_count;
   4678 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
   4679 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
   4680 
   4681 	autoneg = hw->phy.autoneg_advertised;
   4682 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   4683 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   4684 	else
   4685 		negotiate = 0;
   4686 	if (hw->mac.ops.setup_link)
   4687 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
   4688 
   4689 	/* Adjust media types shown in ifconfig */
   4690 	ifmedia_removeall(&adapter->media);
   4691 	ixgbe_add_media_types(adapter);
   4692 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   4693 	IXGBE_CORE_UNLOCK(adapter);
   4694 } /* ixgbe_handle_msf */
   4695 
   4696 /************************************************************************
   4697  * ixgbe_handle_phy - Tasklet for external PHY interrupts
   4698  ************************************************************************/
   4699 static void
   4700 ixgbe_handle_phy(void *context)
   4701 {
   4702 	struct adapter	*adapter = context;
   4703 	struct ixgbe_hw *hw = &adapter->hw;
   4704 	int error;
   4705 
   4706 	++adapter->phy_sicount.ev_count;
   4707 	error = hw->phy.ops.handle_lasi(hw);
   4708 	if (error == IXGBE_ERR_OVERTEMP)
   4709 		device_printf(adapter->dev,
   4710 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
   4711 		    " PHY will downshift to lower power state!\n");
   4712 	else if (error)
   4713 		device_printf(adapter->dev,
   4714 		    "Error handling LASI interrupt: %d\n", error);
   4715 } /* ixgbe_handle_phy */
   4716 
   4717 static void
   4718 ixgbe_ifstop(struct ifnet *ifp, int disable)
   4719 {
   4720 	struct adapter *adapter = ifp->if_softc;
   4721 
   4722 	IXGBE_CORE_LOCK(adapter);
   4723 	ixgbe_stop(adapter);
   4724 	IXGBE_CORE_UNLOCK(adapter);
   4725 }
   4726 
   4727 /************************************************************************
   4728  * ixgbe_stop - Stop the hardware
   4729  *
   4730  *   Disables all traffic on the adapter by issuing a
   4731  *   global reset on the MAC and deallocates TX/RX buffers.
   4732  ************************************************************************/
   4733 static void
   4734 ixgbe_stop(void *arg)
   4735 {
   4736 	struct ifnet	*ifp;
   4737 	struct adapter	*adapter = arg;
   4738 	struct ixgbe_hw *hw = &adapter->hw;
   4739 
   4740 	ifp = adapter->ifp;
   4741 
   4742 	KASSERT(mutex_owned(&adapter->core_mtx));
   4743 
   4744 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   4745 	ixgbe_disable_intr(adapter);
   4746 	callout_stop(&adapter->timer);
   4747 
   4748 	/* Let the stack know...*/
   4749 	ifp->if_flags &= ~IFF_RUNNING;
   4750 
   4751 	ixgbe_reset_hw(hw);
   4752 	hw->adapter_stopped = FALSE;
   4753 	ixgbe_stop_adapter(hw);
   4754 	if (hw->mac.type == ixgbe_mac_82599EB)
   4755 		ixgbe_stop_mac_link_on_d3_82599(hw);
   4756 	/* Turn off the laser - noop with no optics */
   4757 	ixgbe_disable_tx_laser(hw);
   4758 
   4759 	/* Update the stack */
   4760 	adapter->link_up = FALSE;
   4761 	ixgbe_update_link_status(adapter);
   4762 
   4763 	/* reprogram the RAR[0] in case user changed it. */
   4764 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   4765 
   4766 	return;
   4767 } /* ixgbe_stop */
   4768 
   4769 /************************************************************************
   4770  * ixgbe_update_link_status - Update OS on link state
   4771  *
   4772  * Note: Only updates the OS on the cached link state.
   4773  *	 The real check of the hardware only happens with
   4774  *	 a link interrupt.
   4775  ************************************************************************/
   4776 static void
   4777 ixgbe_update_link_status(struct adapter *adapter)
   4778 {
   4779 	struct ifnet	*ifp = adapter->ifp;
   4780 	device_t	dev = adapter->dev;
   4781 	struct ixgbe_hw *hw = &adapter->hw;
   4782 
   4783 	KASSERT(mutex_owned(&adapter->core_mtx));
   4784 
   4785 	if (adapter->link_up) {
   4786 		if (adapter->link_active != LINK_STATE_UP) {
   4787 			/*
   4788 			 * To eliminate influence of the previous state
   4789 			 * in the same way as ixgbe_init_locked().
   4790 			 */
   4791 			struct ix_queue	*que = adapter->queues;
   4792 			for (int i = 0; i < adapter->num_queues; i++, que++)
   4793 				que->eitr_setting = 0;
   4794 
   4795 			if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
   4796 				/*
   4797 				 *  Discard count for both MAC Local Fault and
   4798 				 * Remote Fault because those registers are
   4799 				 * valid only when the link speed is up and
   4800 				 * 10Gbps.
   4801 				 */
   4802 				IXGBE_READ_REG(hw, IXGBE_MLFC);
   4803 				IXGBE_READ_REG(hw, IXGBE_MRFC);
   4804 			}
   4805 
   4806 			if (bootverbose) {
   4807 				const char *bpsmsg;
   4808 
   4809 				switch (adapter->link_speed) {
   4810 				case IXGBE_LINK_SPEED_10GB_FULL:
   4811 					bpsmsg = "10 Gbps";
   4812 					break;
   4813 				case IXGBE_LINK_SPEED_5GB_FULL:
   4814 					bpsmsg = "5 Gbps";
   4815 					break;
   4816 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   4817 					bpsmsg = "2.5 Gbps";
   4818 					break;
   4819 				case IXGBE_LINK_SPEED_1GB_FULL:
   4820 					bpsmsg = "1 Gbps";
   4821 					break;
   4822 				case IXGBE_LINK_SPEED_100_FULL:
   4823 					bpsmsg = "100 Mbps";
   4824 					break;
   4825 				case IXGBE_LINK_SPEED_10_FULL:
   4826 					bpsmsg = "10 Mbps";
   4827 					break;
   4828 				default:
   4829 					bpsmsg = "unknown speed";
   4830 					break;
   4831 				}
   4832 				device_printf(dev, "Link is up %s %s \n",
   4833 				    bpsmsg, "Full Duplex");
   4834 			}
   4835 			adapter->link_active = LINK_STATE_UP;
   4836 			/* Update any Flow Control changes */
   4837 			ixgbe_fc_enable(&adapter->hw);
   4838 			/* Update DMA coalescing config */
   4839 			ixgbe_config_dmac(adapter);
   4840 			if_link_state_change(ifp, LINK_STATE_UP);
   4841 
   4842 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4843 				ixgbe_ping_all_vfs(adapter);
   4844 		}
   4845 	} else {
   4846 		/*
   4847 		 * Do it when link active changes to DOWN. i.e.
   4848 		 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
   4849 		 * b) LINK_STATE_UP	 -> LINK_STATE_DOWN
   4850 		 */
   4851 		if (adapter->link_active != LINK_STATE_DOWN) {
   4852 			if (bootverbose)
   4853 				device_printf(dev, "Link is Down\n");
   4854 			if_link_state_change(ifp, LINK_STATE_DOWN);
   4855 			adapter->link_active = LINK_STATE_DOWN;
   4856 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4857 				ixgbe_ping_all_vfs(adapter);
   4858 			ixgbe_drain_all(adapter);
   4859 		}
   4860 	}
   4861 } /* ixgbe_update_link_status */
   4862 
   4863 /************************************************************************
   4864  * ixgbe_config_dmac - Configure DMA Coalescing
   4865  ************************************************************************/
   4866 static void
   4867 ixgbe_config_dmac(struct adapter *adapter)
   4868 {
   4869 	struct ixgbe_hw *hw = &adapter->hw;
   4870 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
   4871 
   4872 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
   4873 		return;
   4874 
   4875 	if (dcfg->watchdog_timer ^ adapter->dmac ||
   4876 	    dcfg->link_speed ^ adapter->link_speed) {
   4877 		dcfg->watchdog_timer = adapter->dmac;
   4878 		dcfg->fcoe_en = false;
   4879 		dcfg->link_speed = adapter->link_speed;
   4880 		dcfg->num_tcs = 1;
   4881 
   4882 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
   4883 		    dcfg->watchdog_timer, dcfg->link_speed);
   4884 
   4885 		hw->mac.ops.dmac_config(hw);
   4886 	}
   4887 } /* ixgbe_config_dmac */
   4888 
   4889 /************************************************************************
   4890  * ixgbe_enable_intr
   4891  ************************************************************************/
   4892 static void
   4893 ixgbe_enable_intr(struct adapter *adapter)
   4894 {
   4895 	struct ixgbe_hw	*hw = &adapter->hw;
   4896 	struct ix_queue	*que = adapter->queues;
   4897 	u32		mask, fwsm;
   4898 
   4899 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4900 
   4901 	switch (adapter->hw.mac.type) {
   4902 	case ixgbe_mac_82599EB:
   4903 		mask |= IXGBE_EIMS_ECC;
   4904 		/* Temperature sensor on some adapters */
   4905 		mask |= IXGBE_EIMS_GPI_SDP0;
   4906 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
   4907 		mask |= IXGBE_EIMS_GPI_SDP1;
   4908 		mask |= IXGBE_EIMS_GPI_SDP2;
   4909 		break;
   4910 	case ixgbe_mac_X540:
   4911 		/* Detect if Thermal Sensor is enabled */
   4912 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
   4913 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
   4914 			mask |= IXGBE_EIMS_TS;
   4915 		mask |= IXGBE_EIMS_ECC;
   4916 		break;
   4917 	case ixgbe_mac_X550:
   4918 		/* MAC thermal sensor is automatically enabled */
   4919 		mask |= IXGBE_EIMS_TS;
   4920 		mask |= IXGBE_EIMS_ECC;
   4921 		break;
   4922 	case ixgbe_mac_X550EM_x:
   4923 	case ixgbe_mac_X550EM_a:
   4924 		/* Some devices use SDP0 for important information */
   4925 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
   4926 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
   4927 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
   4928 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
   4929 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
   4930 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
   4931 			mask |= IXGBE_EICR_GPI_SDP0_X540;
   4932 		mask |= IXGBE_EIMS_ECC;
   4933 		break;
   4934 	default:
   4935 		break;
   4936 	}
   4937 
   4938 	/* Enable Fan Failure detection */
   4939 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4940 		mask |= IXGBE_EIMS_GPI_SDP1;
   4941 	/* Enable SR-IOV */
   4942 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4943 		mask |= IXGBE_EIMS_MAILBOX;
   4944 	/* Enable Flow Director */
   4945 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   4946 		mask |= IXGBE_EIMS_FLOW_DIR;
   4947 
   4948 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4949 
   4950 	/* With MSI-X we use auto clear */
   4951 	if (adapter->msix_mem) {
   4952 		mask = IXGBE_EIMS_ENABLE_MASK;
   4953 		/* Don't autoclear Link */
   4954 		mask &= ~IXGBE_EIMS_OTHER;
   4955 		mask &= ~IXGBE_EIMS_LSC;
   4956 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   4957 			mask &= ~IXGBE_EIMS_MAILBOX;
   4958 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4959 	}
   4960 
   4961 	/*
   4962 	 * Now enable all queues, this is done separately to
   4963 	 * allow for handling the extended (beyond 32) MSI-X
   4964 	 * vectors that can be used by 82599
   4965 	 */
   4966 	for (int i = 0; i < adapter->num_queues; i++, que++)
   4967 		ixgbe_enable_queue(adapter, que->msix);
   4968 
   4969 	IXGBE_WRITE_FLUSH(hw);
   4970 
   4971 } /* ixgbe_enable_intr */
   4972 
   4973 /************************************************************************
   4974  * ixgbe_disable_intr_internal
   4975  ************************************************************************/
   4976 static void
   4977 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
   4978 {
   4979 	struct ix_queue	*que = adapter->queues;
   4980 
   4981 	/* disable interrupts other than queues */
   4982 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
   4983 
   4984 	if (adapter->msix_mem)
   4985 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4986 
   4987 	for (int i = 0; i < adapter->num_queues; i++, que++)
   4988 		ixgbe_disable_queue_internal(adapter, que->msix, nestok);
   4989 
   4990 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4991 
   4992 } /* ixgbe_do_disable_intr_internal */
   4993 
   4994 /************************************************************************
   4995  * ixgbe_disable_intr
   4996  ************************************************************************/
   4997 static void
   4998 ixgbe_disable_intr(struct adapter *adapter)
   4999 {
   5000 
   5001 	ixgbe_disable_intr_internal(adapter, true);
   5002 } /* ixgbe_disable_intr */
   5003 
   5004 /************************************************************************
   5005  * ixgbe_ensure_disabled_intr
   5006  ************************************************************************/
   5007 void
   5008 ixgbe_ensure_disabled_intr(struct adapter *adapter)
   5009 {
   5010 
   5011 	ixgbe_disable_intr_internal(adapter, false);
   5012 } /* ixgbe_ensure_disabled_intr */
   5013 
   5014 /************************************************************************
   5015  * ixgbe_legacy_irq - Legacy Interrupt Service routine
   5016  ************************************************************************/
   5017 static int
   5018 ixgbe_legacy_irq(void *arg)
   5019 {
   5020 	struct ix_queue *que = arg;
   5021 	struct adapter	*adapter = que->adapter;
   5022 	struct ixgbe_hw	*hw = &adapter->hw;
   5023 	struct ifnet	*ifp = adapter->ifp;
   5024 	struct		tx_ring *txr = adapter->tx_rings;
   5025 	bool		more = false;
   5026 	u32		eicr, eicr_mask;
   5027 
   5028 	/* Silicon errata #26 on 82598 */
   5029 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
   5030 
   5031 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   5032 
   5033 	adapter->stats.pf.legint.ev_count++;
   5034 	++que->irqs.ev_count;
   5035 	if (eicr == 0) {
   5036 		adapter->stats.pf.intzero.ev_count++;
   5037 		if ((ifp->if_flags & IFF_UP) != 0)
   5038 			ixgbe_enable_intr(adapter);
   5039 		return 0;
   5040 	}
   5041 
   5042 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   5043 		/*
   5044 		 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
   5045 		 */
   5046 		que->txrx_use_workqueue = adapter->txrx_use_workqueue;
   5047 
   5048 #ifdef __NetBSD__
   5049 		/* Don't run ixgbe_rxeof in interrupt context */
   5050 		more = true;
   5051 #else
   5052 		more = ixgbe_rxeof(que);
   5053 #endif
   5054 
   5055 		IXGBE_TX_LOCK(txr);
   5056 		ixgbe_txeof(txr);
   5057 #ifdef notyet
   5058 		if (!ixgbe_ring_empty(ifp, txr->br))
   5059 			ixgbe_start_locked(ifp, txr);
   5060 #endif
   5061 		IXGBE_TX_UNLOCK(txr);
   5062 	}
   5063 
   5064 	/* Check for fan failure */
   5065 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   5066 		ixgbe_check_fan_failure(adapter, eicr, true);
   5067 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   5068 	}
   5069 
   5070 	/* Link status change */
   5071 	if (eicr & IXGBE_EICR_LSC)
   5072 		softint_schedule(adapter->link_si);
   5073 
   5074 	if (ixgbe_is_sfp(hw)) {
   5075 		/* Pluggable optics-related interrupt */
   5076 		if (hw->mac.type >= ixgbe_mac_X540)
   5077 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   5078 		else
   5079 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   5080 
   5081 		if (eicr & eicr_mask) {
   5082 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   5083 			softint_schedule(adapter->mod_si);
   5084 		}
   5085 
   5086 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   5087 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   5088 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   5089 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   5090 			softint_schedule(adapter->msf_si);
   5091 		}
   5092 	}
   5093 
   5094 	/* External PHY interrupt */
   5095 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   5096 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
   5097 		softint_schedule(adapter->phy_si);
   5098 
   5099 	if (more) {
   5100 		que->req.ev_count++;
   5101 		ixgbe_sched_handle_que(adapter, que);
   5102 	} else
   5103 		ixgbe_enable_intr(adapter);
   5104 
   5105 	return 1;
   5106 } /* ixgbe_legacy_irq */
   5107 
   5108 /************************************************************************
   5109  * ixgbe_free_pciintr_resources
   5110  ************************************************************************/
   5111 static void
   5112 ixgbe_free_pciintr_resources(struct adapter *adapter)
   5113 {
   5114 	struct ix_queue *que = adapter->queues;
   5115 	int		rid;
   5116 
   5117 	/*
   5118 	 * Release all msix queue resources:
   5119 	 */
   5120 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   5121 		if (que->res != NULL) {
   5122 			pci_intr_disestablish(adapter->osdep.pc,
   5123 			    adapter->osdep.ihs[i]);
   5124 			adapter->osdep.ihs[i] = NULL;
   5125 		}
   5126 	}
   5127 
   5128 	/* Clean the Legacy or Link interrupt last */
   5129 	if (adapter->vector) /* we are doing MSIX */
   5130 		rid = adapter->vector;
   5131 	else
   5132 		rid = 0;
   5133 
   5134 	if (adapter->osdep.ihs[rid] != NULL) {
   5135 		pci_intr_disestablish(adapter->osdep.pc,
   5136 		    adapter->osdep.ihs[rid]);
   5137 		adapter->osdep.ihs[rid] = NULL;
   5138 	}
   5139 
   5140 	if (adapter->osdep.intrs != NULL) {
   5141 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   5142 		    adapter->osdep.nintrs);
   5143 		adapter->osdep.intrs = NULL;
   5144 	}
   5145 } /* ixgbe_free_pciintr_resources */
   5146 
   5147 /************************************************************************
   5148  * ixgbe_free_pci_resources
   5149  ************************************************************************/
   5150 static void
   5151 ixgbe_free_pci_resources(struct adapter *adapter)
   5152 {
   5153 
   5154 	ixgbe_free_pciintr_resources(adapter);
   5155 
   5156 	if (adapter->osdep.mem_size != 0) {
   5157 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   5158 		    adapter->osdep.mem_bus_space_handle,
   5159 		    adapter->osdep.mem_size);
   5160 	}
   5161 
   5162 } /* ixgbe_free_pci_resources */
   5163 
   5164 /************************************************************************
   5165  * ixgbe_set_sysctl_value
   5166  ************************************************************************/
   5167 static void
   5168 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
   5169     const char *description, int *limit, int value)
   5170 {
   5171 	device_t dev =	adapter->dev;
   5172 	struct sysctllog **log;
   5173 	const struct sysctlnode *rnode, *cnode;
   5174 
   5175 	/*
   5176 	 * It's not required to check recovery mode because this function never
   5177 	 * touches hardware.
   5178 	 */
   5179 
   5180 	log = &adapter->sysctllog;
   5181 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   5182 		aprint_error_dev(dev, "could not create sysctl root\n");
   5183 		return;
   5184 	}
   5185 	if (sysctl_createv(log, 0, &rnode, &cnode,
   5186 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   5187 	    name, SYSCTL_DESCR(description),
   5188 		NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   5189 		aprint_error_dev(dev, "could not create sysctl\n");
   5190 	*limit = value;
   5191 } /* ixgbe_set_sysctl_value */
   5192 
   5193 /************************************************************************
   5194  * ixgbe_sysctl_flowcntl
   5195  *
   5196  *   SYSCTL wrapper around setting Flow Control
   5197  ************************************************************************/
   5198 static int
   5199 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
   5200 {
   5201 	struct sysctlnode node = *rnode;
   5202 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5203 	int error, fc;
   5204 
   5205 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5206 		return (EPERM);
   5207 
   5208 	fc = adapter->hw.fc.current_mode;
   5209 	node.sysctl_data = &fc;
   5210 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5211 	if (error != 0 || newp == NULL)
   5212 		return error;
   5213 
   5214 	/* Don't bother if it's not changed */
   5215 	if (fc == adapter->hw.fc.current_mode)
   5216 		return (0);
   5217 
   5218 	return ixgbe_set_flowcntl(adapter, fc);
   5219 } /* ixgbe_sysctl_flowcntl */
   5220 
   5221 /************************************************************************
   5222  * ixgbe_set_flowcntl - Set flow control
   5223  *
   5224  *   Flow control values:
   5225  *     0 - off
   5226  *     1 - rx pause
   5227  *     2 - tx pause
   5228  *     3 - full
   5229  ************************************************************************/
   5230 static int
   5231 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
   5232 {
   5233 	switch (fc) {
   5234 		case ixgbe_fc_rx_pause:
   5235 		case ixgbe_fc_tx_pause:
   5236 		case ixgbe_fc_full:
   5237 			adapter->hw.fc.requested_mode = fc;
   5238 			if (adapter->num_queues > 1)
   5239 				ixgbe_disable_rx_drop(adapter);
   5240 			break;
   5241 		case ixgbe_fc_none:
   5242 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   5243 			if (adapter->num_queues > 1)
   5244 				ixgbe_enable_rx_drop(adapter);
   5245 			break;
   5246 		default:
   5247 			return (EINVAL);
   5248 	}
   5249 
   5250 #if 0 /* XXX NetBSD */
   5251 	/* Don't autoneg if forcing a value */
   5252 	adapter->hw.fc.disable_fc_autoneg = TRUE;
   5253 #endif
   5254 	ixgbe_fc_enable(&adapter->hw);
   5255 
   5256 	return (0);
   5257 } /* ixgbe_set_flowcntl */
   5258 
   5259 /************************************************************************
   5260  * ixgbe_enable_rx_drop
   5261  *
   5262  *   Enable the hardware to drop packets when the buffer is
   5263  *   full. This is useful with multiqueue, so that no single
   5264  *   queue being full stalls the entire RX engine. We only
   5265  *   enable this when Multiqueue is enabled AND Flow Control
   5266  *   is disabled.
   5267  ************************************************************************/
   5268 static void
   5269 ixgbe_enable_rx_drop(struct adapter *adapter)
   5270 {
   5271 	struct ixgbe_hw *hw = &adapter->hw;
   5272 	struct rx_ring	*rxr;
   5273 	u32		srrctl;
   5274 
   5275 	for (int i = 0; i < adapter->num_queues; i++) {
   5276 		rxr = &adapter->rx_rings[i];
   5277 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   5278 		srrctl |= IXGBE_SRRCTL_DROP_EN;
   5279 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   5280 	}
   5281 
   5282 	/* enable drop for each vf */
   5283 	for (int i = 0; i < adapter->num_vfs; i++) {
   5284 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   5285 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
   5286 		    IXGBE_QDE_ENABLE));
   5287 	}
   5288 } /* ixgbe_enable_rx_drop */
   5289 
   5290 /************************************************************************
   5291  * ixgbe_disable_rx_drop
   5292  ************************************************************************/
   5293 static void
   5294 ixgbe_disable_rx_drop(struct adapter *adapter)
   5295 {
   5296 	struct ixgbe_hw *hw = &adapter->hw;
   5297 	struct rx_ring	*rxr;
   5298 	u32		srrctl;
   5299 
   5300 	for (int i = 0; i < adapter->num_queues; i++) {
   5301 		rxr = &adapter->rx_rings[i];
   5302 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   5303 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
   5304 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   5305 	}
   5306 
   5307 	/* disable drop for each vf */
   5308 	for (int i = 0; i < adapter->num_vfs; i++) {
   5309 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   5310 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
   5311 	}
   5312 } /* ixgbe_disable_rx_drop */
   5313 
   5314 /************************************************************************
   5315  * ixgbe_sysctl_advertise
   5316  *
   5317  *   SYSCTL wrapper around setting advertised speed
   5318  ************************************************************************/
   5319 static int
   5320 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
   5321 {
   5322 	struct sysctlnode node = *rnode;
   5323 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5324 	int	       error = 0, advertise;
   5325 
   5326 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5327 		return (EPERM);
   5328 
   5329 	advertise = adapter->advertise;
   5330 	node.sysctl_data = &advertise;
   5331 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5332 	if (error != 0 || newp == NULL)
   5333 		return error;
   5334 
   5335 	return ixgbe_set_advertise(adapter, advertise);
   5336 } /* ixgbe_sysctl_advertise */
   5337 
   5338 /************************************************************************
   5339  * ixgbe_set_advertise - Control advertised link speed
   5340  *
   5341  *   Flags:
   5342  *     0x00 - Default (all capable link speed)
   5343  *     0x01 - advertise 100 Mb
   5344  *     0x02 - advertise 1G
   5345  *     0x04 - advertise 10G
   5346  *     0x08 - advertise 10 Mb
   5347  *     0x10 - advertise 2.5G
   5348  *     0x20 - advertise 5G
   5349  ************************************************************************/
   5350 static int
   5351 ixgbe_set_advertise(struct adapter *adapter, int advertise)
   5352 {
   5353 	device_t	 dev;
   5354 	struct ixgbe_hw	 *hw;
   5355 	ixgbe_link_speed speed = 0;
   5356 	ixgbe_link_speed link_caps = 0;
   5357 	s32		 err = IXGBE_NOT_IMPLEMENTED;
   5358 	bool		 negotiate = FALSE;
   5359 
   5360 	/* Checks to validate new value */
   5361 	if (adapter->advertise == advertise) /* no change */
   5362 		return (0);
   5363 
   5364 	dev = adapter->dev;
   5365 	hw = &adapter->hw;
   5366 
   5367 	/* No speed changes for backplane media */
   5368 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   5369 		return (ENODEV);
   5370 
   5371 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   5372 	    (hw->phy.multispeed_fiber))) {
   5373 		device_printf(dev,
   5374 		    "Advertised speed can only be set on copper or "
   5375 		    "multispeed fiber media types.\n");
   5376 		return (EINVAL);
   5377 	}
   5378 
   5379 	if (advertise < 0x0 || advertise > 0x2f) {
   5380 		device_printf(dev,
   5381 		    "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
   5382 		return (EINVAL);
   5383 	}
   5384 
   5385 	if (hw->mac.ops.get_link_capabilities) {
   5386 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   5387 		    &negotiate);
   5388 		if (err != IXGBE_SUCCESS) {
   5389 			device_printf(dev, "Unable to determine supported advertise speeds\n");
   5390 			return (ENODEV);
   5391 		}
   5392 	}
   5393 
   5394 	/* Set new value and report new advertised mode */
   5395 	if (advertise & 0x1) {
   5396 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
   5397 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
   5398 			return (EINVAL);
   5399 		}
   5400 		speed |= IXGBE_LINK_SPEED_100_FULL;
   5401 	}
   5402 	if (advertise & 0x2) {
   5403 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
   5404 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
   5405 			return (EINVAL);
   5406 		}
   5407 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   5408 	}
   5409 	if (advertise & 0x4) {
   5410 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
   5411 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
   5412 			return (EINVAL);
   5413 		}
   5414 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   5415 	}
   5416 	if (advertise & 0x8) {
   5417 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
   5418 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
   5419 			return (EINVAL);
   5420 		}
   5421 		speed |= IXGBE_LINK_SPEED_10_FULL;
   5422 	}
   5423 	if (advertise & 0x10) {
   5424 		if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
   5425 			device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
   5426 			return (EINVAL);
   5427 		}
   5428 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   5429 	}
   5430 	if (advertise & 0x20) {
   5431 		if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
   5432 			device_printf(dev, "Interface does not support 5Gb advertised speed\n");
   5433 			return (EINVAL);
   5434 		}
   5435 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   5436 	}
   5437 	if (advertise == 0)
   5438 		speed = link_caps; /* All capable link speed */
   5439 
   5440 	hw->mac.autotry_restart = TRUE;
   5441 	hw->mac.ops.setup_link(hw, speed, TRUE);
   5442 	adapter->advertise = advertise;
   5443 
   5444 	return (0);
   5445 } /* ixgbe_set_advertise */
   5446 
   5447 /************************************************************************
   5448  * ixgbe_get_advertise - Get current advertised speed settings
   5449  *
   5450  *   Formatted for sysctl usage.
   5451  *   Flags:
   5452  *     0x01 - advertise 100 Mb
   5453  *     0x02 - advertise 1G
   5454  *     0x04 - advertise 10G
   5455  *     0x08 - advertise 10 Mb (yes, Mb)
   5456  *     0x10 - advertise 2.5G
   5457  *     0x20 - advertise 5G
   5458  ************************************************************************/
   5459 static int
   5460 ixgbe_get_advertise(struct adapter *adapter)
   5461 {
   5462 	struct ixgbe_hw	 *hw = &adapter->hw;
   5463 	int		 speed;
   5464 	ixgbe_link_speed link_caps = 0;
   5465 	s32		 err;
   5466 	bool		 negotiate = FALSE;
   5467 
   5468 	/*
   5469 	 * Advertised speed means nothing unless it's copper or
   5470 	 * multi-speed fiber
   5471 	 */
   5472 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
   5473 	    !(hw->phy.multispeed_fiber))
   5474 		return (0);
   5475 
   5476 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
   5477 	if (err != IXGBE_SUCCESS)
   5478 		return (0);
   5479 
   5480 	speed =
   5481 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x04 : 0) |
   5482 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x02 : 0) |
   5483 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)   ? 0x01 : 0) |
   5484 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)    ? 0x08 : 0) |
   5485 	    ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
   5486 	    ((link_caps & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0);
   5487 
   5488 	return speed;
   5489 } /* ixgbe_get_advertise */
   5490 
   5491 /************************************************************************
   5492  * ixgbe_sysctl_dmac - Manage DMA Coalescing
   5493  *
   5494  *   Control values:
   5495  *     0/1 - off / on (use default value of 1000)
   5496  *
   5497  *     Legal timer values are:
   5498  *     50,100,250,500,1000,2000,5000,10000
   5499  *
   5500  *     Turning off interrupt moderation will also turn this off.
   5501  ************************************************************************/
   5502 static int
   5503 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
   5504 {
   5505 	struct sysctlnode node = *rnode;
   5506 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5507 	struct ifnet   *ifp = adapter->ifp;
   5508 	int	       error;
   5509 	int	       newval;
   5510 
   5511 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5512 		return (EPERM);
   5513 
   5514 	newval = adapter->dmac;
   5515 	node.sysctl_data = &newval;
   5516 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5517 	if ((error) || (newp == NULL))
   5518 		return (error);
   5519 
   5520 	switch (newval) {
   5521 	case 0:
   5522 		/* Disabled */
   5523 		adapter->dmac = 0;
   5524 		break;
   5525 	case 1:
   5526 		/* Enable and use default */
   5527 		adapter->dmac = 1000;
   5528 		break;
   5529 	case 50:
   5530 	case 100:
   5531 	case 250:
   5532 	case 500:
   5533 	case 1000:
   5534 	case 2000:
   5535 	case 5000:
   5536 	case 10000:
   5537 		/* Legal values - allow */
   5538 		adapter->dmac = newval;
   5539 		break;
   5540 	default:
   5541 		/* Do nothing, illegal value */
   5542 		return (EINVAL);
   5543 	}
   5544 
   5545 	/* Re-initialize hardware if it's already running */
   5546 	if (ifp->if_flags & IFF_RUNNING)
   5547 		ifp->if_init(ifp);
   5548 
   5549 	return (0);
   5550 }
   5551 
   5552 #ifdef IXGBE_DEBUG
   5553 /************************************************************************
   5554  * ixgbe_sysctl_power_state
   5555  *
   5556  *   Sysctl to test power states
   5557  *   Values:
   5558  *     0      - set device to D0
   5559  *     3      - set device to D3
   5560  *     (none) - get current device power state
   5561  ************************************************************************/
   5562 static int
   5563 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
   5564 {
   5565 #ifdef notyet
   5566 	struct sysctlnode node = *rnode;
   5567 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5568 	device_t       dev =  adapter->dev;
   5569 	int	       curr_ps, new_ps, error = 0;
   5570 
   5571 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5572 		return (EPERM);
   5573 
   5574 	curr_ps = new_ps = pci_get_powerstate(dev);
   5575 
   5576 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5577 	if ((error) || (req->newp == NULL))
   5578 		return (error);
   5579 
   5580 	if (new_ps == curr_ps)
   5581 		return (0);
   5582 
   5583 	if (new_ps == 3 && curr_ps == 0)
   5584 		error = DEVICE_SUSPEND(dev);
   5585 	else if (new_ps == 0 && curr_ps == 3)
   5586 		error = DEVICE_RESUME(dev);
   5587 	else
   5588 		return (EINVAL);
   5589 
   5590 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
   5591 
   5592 	return (error);
   5593 #else
   5594 	return 0;
   5595 #endif
   5596 } /* ixgbe_sysctl_power_state */
   5597 #endif
   5598 
   5599 /************************************************************************
   5600  * ixgbe_sysctl_wol_enable
   5601  *
   5602  *   Sysctl to enable/disable the WoL capability,
   5603  *   if supported by the adapter.
   5604  *
   5605  *   Values:
   5606  *     0 - disabled
   5607  *     1 - enabled
   5608  ************************************************************************/
   5609 static int
   5610 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
   5611 {
   5612 	struct sysctlnode node = *rnode;
   5613 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5614 	struct ixgbe_hw *hw = &adapter->hw;
   5615 	bool		new_wol_enabled;
   5616 	int		error = 0;
   5617 
   5618 	/*
   5619 	 * It's not required to check recovery mode because this function never
   5620 	 * touches hardware.
   5621 	 */
   5622 	new_wol_enabled = hw->wol_enabled;
   5623 	node.sysctl_data = &new_wol_enabled;
   5624 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5625 	if ((error) || (newp == NULL))
   5626 		return (error);
   5627 	if (new_wol_enabled == hw->wol_enabled)
   5628 		return (0);
   5629 
   5630 	if (new_wol_enabled && !adapter->wol_support)
   5631 		return (ENODEV);
   5632 	else
   5633 		hw->wol_enabled = new_wol_enabled;
   5634 
   5635 	return (0);
   5636 } /* ixgbe_sysctl_wol_enable */
   5637 
   5638 /************************************************************************
   5639  * ixgbe_sysctl_wufc - Wake Up Filter Control
   5640  *
   5641  *   Sysctl to enable/disable the types of packets that the
   5642  *   adapter will wake up on upon receipt.
   5643  *   Flags:
   5644  *     0x1  - Link Status Change
   5645  *     0x2  - Magic Packet
   5646  *     0x4  - Direct Exact
   5647  *     0x8  - Directed Multicast
   5648  *     0x10 - Broadcast
   5649  *     0x20 - ARP/IPv4 Request Packet
   5650  *     0x40 - Direct IPv4 Packet
   5651  *     0x80 - Direct IPv6 Packet
   5652  *
   5653  *   Settings not listed above will cause the sysctl to return an error.
   5654  ************************************************************************/
   5655 static int
   5656 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
   5657 {
   5658 	struct sysctlnode node = *rnode;
   5659 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5660 	int error = 0;
   5661 	u32 new_wufc;
   5662 
   5663 	/*
   5664 	 * It's not required to check recovery mode because this function never
   5665 	 * touches hardware.
   5666 	 */
   5667 	new_wufc = adapter->wufc;
   5668 	node.sysctl_data = &new_wufc;
   5669 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5670 	if ((error) || (newp == NULL))
   5671 		return (error);
   5672 	if (new_wufc == adapter->wufc)
   5673 		return (0);
   5674 
   5675 	if (new_wufc & 0xffffff00)
   5676 		return (EINVAL);
   5677 
   5678 	new_wufc &= 0xff;
   5679 	new_wufc |= (0xffffff & adapter->wufc);
   5680 	adapter->wufc = new_wufc;
   5681 
   5682 	return (0);
   5683 } /* ixgbe_sysctl_wufc */
   5684 
   5685 #ifdef IXGBE_DEBUG
   5686 /************************************************************************
   5687  * ixgbe_sysctl_print_rss_config
   5688  ************************************************************************/
   5689 static int
   5690 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
   5691 {
   5692 #ifdef notyet
   5693 	struct sysctlnode node = *rnode;
   5694 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5695 	struct ixgbe_hw *hw = &adapter->hw;
   5696 	device_t	dev = adapter->dev;
   5697 	struct sbuf	*buf;
   5698 	int		error = 0, reta_size;
   5699 	u32		reg;
   5700 
   5701 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5702 		return (EPERM);
   5703 
   5704 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
   5705 	if (!buf) {
   5706 		device_printf(dev, "Could not allocate sbuf for output.\n");
   5707 		return (ENOMEM);
   5708 	}
   5709 
   5710 	// TODO: use sbufs to make a string to print out
   5711 	/* Set multiplier for RETA setup and table size based on MAC */
   5712 	switch (adapter->hw.mac.type) {
   5713 	case ixgbe_mac_X550:
   5714 	case ixgbe_mac_X550EM_x:
   5715 	case ixgbe_mac_X550EM_a:
   5716 		reta_size = 128;
   5717 		break;
   5718 	default:
   5719 		reta_size = 32;
   5720 		break;
   5721 	}
   5722 
   5723 	/* Print out the redirection table */
   5724 	sbuf_cat(buf, "\n");
   5725 	for (int i = 0; i < reta_size; i++) {
   5726 		if (i < 32) {
   5727 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
   5728 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
   5729 		} else {
   5730 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
   5731 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
   5732 		}
   5733 	}
   5734 
   5735 	// TODO: print more config
   5736 
   5737 	error = sbuf_finish(buf);
   5738 	if (error)
   5739 		device_printf(dev, "Error finishing sbuf: %d\n", error);
   5740 
   5741 	sbuf_delete(buf);
   5742 #endif
   5743 	return (0);
   5744 } /* ixgbe_sysctl_print_rss_config */
   5745 #endif /* IXGBE_DEBUG */
   5746 
   5747 /************************************************************************
   5748  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
   5749  *
   5750  *   For X552/X557-AT devices using an external PHY
   5751  ************************************************************************/
   5752 static int
   5753 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
   5754 {
   5755 	struct sysctlnode node = *rnode;
   5756 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5757 	struct ixgbe_hw *hw = &adapter->hw;
   5758 	int val;
   5759 	u16 reg;
   5760 	int		error;
   5761 
   5762 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5763 		return (EPERM);
   5764 
   5765 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5766 		device_printf(adapter->dev,
   5767 		    "Device has no supported external thermal sensor.\n");
   5768 		return (ENODEV);
   5769 	}
   5770 
   5771 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
   5772 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5773 		device_printf(adapter->dev,
   5774 		    "Error reading from PHY's current temperature register\n");
   5775 		return (EAGAIN);
   5776 	}
   5777 
   5778 	node.sysctl_data = &val;
   5779 
   5780 	/* Shift temp for output */
   5781 	val = reg >> 8;
   5782 
   5783 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5784 	if ((error) || (newp == NULL))
   5785 		return (error);
   5786 
   5787 	return (0);
   5788 } /* ixgbe_sysctl_phy_temp */
   5789 
   5790 /************************************************************************
   5791  * ixgbe_sysctl_phy_overtemp_occurred
   5792  *
   5793  *   Reports (directly from the PHY) whether the current PHY
   5794  *   temperature is over the overtemp threshold.
   5795  ************************************************************************/
   5796 static int
   5797 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
   5798 {
   5799 	struct sysctlnode node = *rnode;
   5800 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5801 	struct ixgbe_hw *hw = &adapter->hw;
   5802 	int val, error;
   5803 	u16 reg;
   5804 
   5805 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5806 		return (EPERM);
   5807 
   5808 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5809 		device_printf(adapter->dev,
   5810 		    "Device has no supported external thermal sensor.\n");
   5811 		return (ENODEV);
   5812 	}
   5813 
   5814 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
   5815 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5816 		device_printf(adapter->dev,
   5817 		    "Error reading from PHY's temperature status register\n");
   5818 		return (EAGAIN);
   5819 	}
   5820 
   5821 	node.sysctl_data = &val;
   5822 
   5823 	/* Get occurrence bit */
   5824 	val = !!(reg & 0x4000);
   5825 
   5826 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5827 	if ((error) || (newp == NULL))
   5828 		return (error);
   5829 
   5830 	return (0);
   5831 } /* ixgbe_sysctl_phy_overtemp_occurred */
   5832 
   5833 /************************************************************************
   5834  * ixgbe_sysctl_eee_state
   5835  *
   5836  *   Sysctl to set EEE power saving feature
   5837  *   Values:
   5838  *     0      - disable EEE
   5839  *     1      - enable EEE
   5840  *     (none) - get current device EEE state
   5841  ************************************************************************/
   5842 static int
   5843 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
   5844 {
   5845 	struct sysctlnode node = *rnode;
   5846 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5847 	struct ifnet   *ifp = adapter->ifp;
   5848 	device_t       dev = adapter->dev;
   5849 	int	       curr_eee, new_eee, error = 0;
   5850 	s32	       retval;
   5851 
   5852 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5853 		return (EPERM);
   5854 
   5855 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
   5856 	node.sysctl_data = &new_eee;
   5857 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5858 	if ((error) || (newp == NULL))
   5859 		return (error);
   5860 
   5861 	/* Nothing to do */
   5862 	if (new_eee == curr_eee)
   5863 		return (0);
   5864 
   5865 	/* Not supported */
   5866 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
   5867 		return (EINVAL);
   5868 
   5869 	/* Bounds checking */
   5870 	if ((new_eee < 0) || (new_eee > 1))
   5871 		return (EINVAL);
   5872 
   5873 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
   5874 	if (retval) {
   5875 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
   5876 		return (EINVAL);
   5877 	}
   5878 
   5879 	/* Restart auto-neg */
   5880 	ifp->if_init(ifp);
   5881 
   5882 	device_printf(dev, "New EEE state: %d\n", new_eee);
   5883 
   5884 	/* Cache new value */
   5885 	if (new_eee)
   5886 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5887 	else
   5888 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
   5889 
   5890 	return (error);
   5891 } /* ixgbe_sysctl_eee_state */
   5892 
   5893 #define PRINTQS(adapter, regname)					\
   5894 	do {								\
   5895 		struct ixgbe_hw	*_hw = &(adapter)->hw;			\
   5896 		int _i;							\
   5897 									\
   5898 		printf("%s: %s", device_xname((adapter)->dev), #regname); \
   5899 		for (_i = 0; _i < (adapter)->num_queues; _i++) {	\
   5900 			printf((_i == 0) ? "\t" : " ");			\
   5901 			printf("%08x", IXGBE_READ_REG(_hw,		\
   5902 				IXGBE_##regname(_i)));			\
   5903 		}							\
   5904 		printf("\n");						\
   5905 	} while (0)
   5906 
   5907 /************************************************************************
   5908  * ixgbe_print_debug_info
   5909  *
   5910  *   Called only when em_display_debug_stats is enabled.
   5911  *   Provides a way to take a look at important statistics
   5912  *   maintained by the driver and hardware.
   5913  ************************************************************************/
   5914 static void
   5915 ixgbe_print_debug_info(struct adapter *adapter)
   5916 {
   5917 	device_t	dev = adapter->dev;
   5918 	struct ixgbe_hw *hw = &adapter->hw;
   5919 	int table_size;
   5920 	int i;
   5921 
   5922 	switch (adapter->hw.mac.type) {
   5923 	case ixgbe_mac_X550:
   5924 	case ixgbe_mac_X550EM_x:
   5925 	case ixgbe_mac_X550EM_a:
   5926 		table_size = 128;
   5927 		break;
   5928 	default:
   5929 		table_size = 32;
   5930 		break;
   5931 	}
   5932 
   5933 	device_printf(dev, "[E]RETA:\n");
   5934 	for (i = 0; i < table_size; i++) {
   5935 		if (i < 32)
   5936 			printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
   5937 				IXGBE_RETA(i)));
   5938 		else
   5939 			printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
   5940 				IXGBE_ERETA(i - 32)));
   5941 	}
   5942 
   5943 	device_printf(dev, "queue:");
   5944 	for (i = 0; i < adapter->num_queues; i++) {
   5945 		printf((i == 0) ? "\t" : " ");
   5946 		printf("%8d", i);
   5947 	}
   5948 	printf("\n");
   5949 	PRINTQS(adapter, RDBAL);
   5950 	PRINTQS(adapter, RDBAH);
   5951 	PRINTQS(adapter, RDLEN);
   5952 	PRINTQS(adapter, SRRCTL);
   5953 	PRINTQS(adapter, RDH);
   5954 	PRINTQS(adapter, RDT);
   5955 	PRINTQS(adapter, RXDCTL);
   5956 
   5957 	device_printf(dev, "RQSMR:");
   5958 	for (i = 0; i < adapter->num_queues / 4; i++) {
   5959 		printf((i == 0) ? "\t" : " ");
   5960 		printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
   5961 	}
   5962 	printf("\n");
   5963 
   5964 	device_printf(dev, "disabled_count:");
   5965 	for (i = 0; i < adapter->num_queues; i++) {
   5966 		printf((i == 0) ? "\t" : " ");
   5967 		printf("%8d", adapter->queues[i].disabled_count);
   5968 	}
   5969 	printf("\n");
   5970 
   5971 	device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
   5972 	if (hw->mac.type != ixgbe_mac_82598EB) {
   5973 		device_printf(dev, "EIMS_EX(0):\t%08x\n",
   5974 			      IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
   5975 		device_printf(dev, "EIMS_EX(1):\t%08x\n",
   5976 			      IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
   5977 	}
   5978 } /* ixgbe_print_debug_info */
   5979 
   5980 /************************************************************************
   5981  * ixgbe_sysctl_debug
   5982  ************************************************************************/
   5983 static int
   5984 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
   5985 {
   5986 	struct sysctlnode node = *rnode;
   5987 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5988 	int	       error, result = 0;
   5989 
   5990 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5991 		return (EPERM);
   5992 
   5993 	node.sysctl_data = &result;
   5994 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5995 
   5996 	if (error || newp == NULL)
   5997 		return error;
   5998 
   5999 	if (result == 1)
   6000 		ixgbe_print_debug_info(adapter);
   6001 
   6002 	return 0;
   6003 } /* ixgbe_sysctl_debug */
   6004 
   6005 /************************************************************************
   6006  * ixgbe_init_device_features
   6007  ************************************************************************/
   6008 static void
   6009 ixgbe_init_device_features(struct adapter *adapter)
   6010 {
   6011 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   6012 			  | IXGBE_FEATURE_RSS
   6013 			  | IXGBE_FEATURE_MSI
   6014 			  | IXGBE_FEATURE_MSIX
   6015 			  | IXGBE_FEATURE_LEGACY_IRQ
   6016 			  | IXGBE_FEATURE_LEGACY_TX;
   6017 
   6018 	/* Set capabilities first... */
   6019 	switch (adapter->hw.mac.type) {
   6020 	case ixgbe_mac_82598EB:
   6021 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
   6022 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
   6023 		break;
   6024 	case ixgbe_mac_X540:
   6025 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6026 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6027 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
   6028 		    (adapter->hw.bus.func == 0))
   6029 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   6030 		break;
   6031 	case ixgbe_mac_X550:
   6032 		/*
   6033 		 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
   6034 		 * NVM Image version.
   6035 		 */
   6036 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   6037 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6038 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6039 		break;
   6040 	case ixgbe_mac_X550EM_x:
   6041 		/*
   6042 		 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
   6043 		 * NVM Image version.
   6044 		 */
   6045 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6046 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6047 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
   6048 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   6049 		break;
   6050 	case ixgbe_mac_X550EM_a:
   6051 		/*
   6052 		 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
   6053 		 * NVM Image version.
   6054 		 */
   6055 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6056 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6057 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   6058 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
   6059 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
   6060 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   6061 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   6062 		}
   6063 		break;
   6064 	case ixgbe_mac_82599EB:
   6065 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6066 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6067 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
   6068 		    (adapter->hw.bus.func == 0))
   6069 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   6070 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
   6071 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   6072 		break;
   6073 	default:
   6074 		break;
   6075 	}
   6076 
   6077 	/* Enabled by default... */
   6078 	/* Fan failure detection */
   6079 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
   6080 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
   6081 	/* Netmap */
   6082 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   6083 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   6084 	/* EEE */
   6085 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   6086 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   6087 	/* Thermal Sensor */
   6088 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
   6089 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
   6090 	/*
   6091 	 * Recovery mode:
   6092 	 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
   6093 	 * NVM Image version.
   6094 	 */
   6095 
   6096 	/* Enabled via global sysctl... */
   6097 	/* Flow Director */
   6098 	if (ixgbe_enable_fdir) {
   6099 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
   6100 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
   6101 		else
   6102 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
   6103 	}
   6104 	/* Legacy (single queue) transmit */
   6105 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   6106 	    ixgbe_enable_legacy_tx)
   6107 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   6108 	/*
   6109 	 * Message Signal Interrupts - Extended (MSI-X)
   6110 	 * Normal MSI is only enabled if MSI-X calls fail.
   6111 	 */
   6112 	if (!ixgbe_enable_msix)
   6113 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
   6114 	/* Receive-Side Scaling (RSS) */
   6115 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
   6116 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   6117 
   6118 	/* Disable features with unmet dependencies... */
   6119 	/* No MSI-X */
   6120 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
   6121 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6122 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6123 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
   6124 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
   6125 	}
   6126 } /* ixgbe_init_device_features */
   6127 
   6128 /************************************************************************
   6129  * ixgbe_probe - Device identification routine
   6130  *
   6131  *   Determines if the driver should be loaded on
   6132  *   adapter based on its PCI vendor/device ID.
   6133  *
   6134  *   return BUS_PROBE_DEFAULT on success, positive on failure
   6135  ************************************************************************/
   6136 static int
   6137 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
   6138 {
   6139 	const struct pci_attach_args *pa = aux;
   6140 
   6141 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
   6142 }
   6143 
   6144 static const ixgbe_vendor_info_t *
   6145 ixgbe_lookup(const struct pci_attach_args *pa)
   6146 {
   6147 	const ixgbe_vendor_info_t *ent;
   6148 	pcireg_t subid;
   6149 
   6150 	INIT_DEBUGOUT("ixgbe_lookup: begin");
   6151 
   6152 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
   6153 		return NULL;
   6154 
   6155 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
   6156 
   6157 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
   6158 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
   6159 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
   6160 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
   6161 			(ent->subvendor_id == 0)) &&
   6162 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
   6163 			(ent->subdevice_id == 0))) {
   6164 			return ent;
   6165 		}
   6166 	}
   6167 	return NULL;
   6168 }
   6169 
   6170 static int
   6171 ixgbe_ifflags_cb(struct ethercom *ec)
   6172 {
   6173 	struct ifnet *ifp = &ec->ec_if;
   6174 	struct adapter *adapter = ifp->if_softc;
   6175 	int change, rv = 0;
   6176 
   6177 	IXGBE_CORE_LOCK(adapter);
   6178 
   6179 	change = ifp->if_flags ^ adapter->if_flags;
   6180 	if (change != 0)
   6181 		adapter->if_flags = ifp->if_flags;
   6182 
   6183 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   6184 		rv = ENETRESET;
   6185 		goto out;
   6186 	} else if ((change & IFF_PROMISC) != 0)
   6187 		ixgbe_set_promisc(adapter);
   6188 
   6189 	/* Check for ec_capenable. */
   6190 	change = ec->ec_capenable ^ adapter->ec_capenable;
   6191 	adapter->ec_capenable = ec->ec_capenable;
   6192 	if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
   6193 	    | ETHERCAP_VLAN_HWFILTER)) != 0) {
   6194 		rv = ENETRESET;
   6195 		goto out;
   6196 	}
   6197 
   6198 	/*
   6199 	 * Special handling is not required for ETHERCAP_VLAN_MTU.
   6200 	 * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
   6201 	 */
   6202 
   6203 	/* Set up VLAN support and filter */
   6204 	if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
   6205 		ixgbe_setup_vlan_hw_support(adapter);
   6206 
   6207 out:
   6208 	IXGBE_CORE_UNLOCK(adapter);
   6209 
   6210 	return rv;
   6211 }
   6212 
   6213 /************************************************************************
   6214  * ixgbe_ioctl - Ioctl entry point
   6215  *
   6216  *   Called when the user wants to configure the interface.
   6217  *
   6218  *   return 0 on success, positive on failure
   6219  ************************************************************************/
   6220 static int
   6221 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   6222 {
   6223 	struct adapter	*adapter = ifp->if_softc;
   6224 	struct ixgbe_hw *hw = &adapter->hw;
   6225 	struct ifcapreq *ifcr = data;
   6226 	struct ifreq	*ifr = data;
   6227 	int		error = 0;
   6228 	int l4csum_en;
   6229 	const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
   6230 	     IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   6231 
   6232 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   6233 		return (EPERM);
   6234 
   6235 	switch (command) {
   6236 	case SIOCSIFFLAGS:
   6237 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   6238 		break;
   6239 	case SIOCADDMULTI:
   6240 	case SIOCDELMULTI:
   6241 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   6242 		break;
   6243 	case SIOCSIFMEDIA:
   6244 	case SIOCGIFMEDIA:
   6245 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   6246 		break;
   6247 	case SIOCSIFCAP:
   6248 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   6249 		break;
   6250 	case SIOCSIFMTU:
   6251 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   6252 		break;
   6253 #ifdef __NetBSD__
   6254 	case SIOCINITIFADDR:
   6255 		IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
   6256 		break;
   6257 	case SIOCGIFFLAGS:
   6258 		IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
   6259 		break;
   6260 	case SIOCGIFAFLAG_IN:
   6261 		IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
   6262 		break;
   6263 	case SIOCGIFADDR:
   6264 		IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
   6265 		break;
   6266 	case SIOCGIFMTU:
   6267 		IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
   6268 		break;
   6269 	case SIOCGIFCAP:
   6270 		IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
   6271 		break;
   6272 	case SIOCGETHERCAP:
   6273 		IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
   6274 		break;
   6275 	case SIOCGLIFADDR:
   6276 		IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
   6277 		break;
   6278 	case SIOCZIFDATA:
   6279 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   6280 		hw->mac.ops.clear_hw_cntrs(hw);
   6281 		ixgbe_clear_evcnt(adapter);
   6282 		break;
   6283 	case SIOCAIFADDR:
   6284 		IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
   6285 		break;
   6286 #endif
   6287 	default:
   6288 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   6289 		break;
   6290 	}
   6291 
   6292 	switch (command) {
   6293 	case SIOCGI2C:
   6294 	{
   6295 		struct ixgbe_i2c_req	i2c;
   6296 
   6297 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
   6298 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
   6299 		if (error != 0)
   6300 			break;
   6301 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
   6302 			error = EINVAL;
   6303 			break;
   6304 		}
   6305 		if (i2c.len > sizeof(i2c.data)) {
   6306 			error = EINVAL;
   6307 			break;
   6308 		}
   6309 
   6310 		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
   6311 		    i2c.dev_addr, i2c.data);
   6312 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
   6313 		break;
   6314 	}
   6315 	case SIOCSIFCAP:
   6316 		/* Layer-4 Rx checksum offload has to be turned on and
   6317 		 * off as a unit.
   6318 		 */
   6319 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   6320 		if (l4csum_en != l4csum && l4csum_en != 0)
   6321 			return EINVAL;
   6322 		/*FALLTHROUGH*/
   6323 	case SIOCADDMULTI:
   6324 	case SIOCDELMULTI:
   6325 	case SIOCSIFFLAGS:
   6326 	case SIOCSIFMTU:
   6327 	default:
   6328 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   6329 			return error;
   6330 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   6331 			;
   6332 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   6333 			IXGBE_CORE_LOCK(adapter);
   6334 			if ((ifp->if_flags & IFF_RUNNING) != 0)
   6335 				ixgbe_init_locked(adapter);
   6336 			ixgbe_recalculate_max_frame(adapter);
   6337 			IXGBE_CORE_UNLOCK(adapter);
   6338 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   6339 			/*
   6340 			 * Multicast list has changed; set the hardware filter
   6341 			 * accordingly.
   6342 			 */
   6343 			IXGBE_CORE_LOCK(adapter);
   6344 			ixgbe_disable_intr(adapter);
   6345 			ixgbe_set_multi(adapter);
   6346 			ixgbe_enable_intr(adapter);
   6347 			IXGBE_CORE_UNLOCK(adapter);
   6348 		}
   6349 		return 0;
   6350 	}
   6351 
   6352 	return error;
   6353 } /* ixgbe_ioctl */
   6354 
   6355 /************************************************************************
   6356  * ixgbe_check_fan_failure
   6357  ************************************************************************/
   6358 static void
   6359 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
   6360 {
   6361 	u32 mask;
   6362 
   6363 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
   6364 	    IXGBE_ESDP_SDP1;
   6365 
   6366 	if (reg & mask)
   6367 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
   6368 } /* ixgbe_check_fan_failure */
   6369 
   6370 /************************************************************************
   6371  * ixgbe_handle_que
   6372  ************************************************************************/
   6373 static void
   6374 ixgbe_handle_que(void *context)
   6375 {
   6376 	struct ix_queue *que = context;
   6377 	struct adapter	*adapter = que->adapter;
   6378 	struct tx_ring	*txr = que->txr;
   6379 	struct ifnet	*ifp = adapter->ifp;
   6380 	bool		more = false;
   6381 
   6382 	que->handleq.ev_count++;
   6383 
   6384 	if (ifp->if_flags & IFF_RUNNING) {
   6385 		more = ixgbe_rxeof(que);
   6386 		IXGBE_TX_LOCK(txr);
   6387 		more |= ixgbe_txeof(txr);
   6388 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   6389 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   6390 				ixgbe_mq_start_locked(ifp, txr);
   6391 		/* Only for queue 0 */
   6392 		/* NetBSD still needs this for CBQ */
   6393 		if ((&adapter->queues[0] == que)
   6394 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   6395 			ixgbe_legacy_start_locked(ifp, txr);
   6396 		IXGBE_TX_UNLOCK(txr);
   6397 	}
   6398 
   6399 	if (more) {
   6400 		que->req.ev_count++;
   6401 		ixgbe_sched_handle_que(adapter, que);
   6402 	} else if (que->res != NULL) {
   6403 		/* Re-enable this interrupt */
   6404 		ixgbe_enable_queue(adapter, que->msix);
   6405 	} else
   6406 		ixgbe_enable_intr(adapter);
   6407 
   6408 	return;
   6409 } /* ixgbe_handle_que */
   6410 
   6411 /************************************************************************
   6412  * ixgbe_handle_que_work
   6413  ************************************************************************/
   6414 static void
   6415 ixgbe_handle_que_work(struct work *wk, void *context)
   6416 {
   6417 	struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
   6418 
   6419 	/*
   6420 	 * "enqueued flag" is not required here.
   6421 	 * See ixgbe_msix_que().
   6422 	 */
   6423 	ixgbe_handle_que(que);
   6424 }
   6425 
   6426 /************************************************************************
   6427  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
   6428  ************************************************************************/
   6429 static int
   6430 ixgbe_allocate_legacy(struct adapter *adapter,
   6431     const struct pci_attach_args *pa)
   6432 {
   6433 	device_t	dev = adapter->dev;
   6434 	struct ix_queue *que = adapter->queues;
   6435 	struct tx_ring	*txr = adapter->tx_rings;
   6436 	int		counts[PCI_INTR_TYPE_SIZE];
   6437 	pci_intr_type_t intr_type, max_type;
   6438 	char		intrbuf[PCI_INTRSTR_LEN];
   6439 	const char	*intrstr = NULL;
   6440 
   6441 	/* We allocate a single interrupt resource */
   6442 	max_type = PCI_INTR_TYPE_MSI;
   6443 	counts[PCI_INTR_TYPE_MSIX] = 0;
   6444 	counts[PCI_INTR_TYPE_MSI] =
   6445 	    (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
   6446 	/* Check not feat_en but feat_cap to fallback to INTx */
   6447 	counts[PCI_INTR_TYPE_INTX] =
   6448 	    (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
   6449 
   6450 alloc_retry:
   6451 	if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
   6452 		aprint_error_dev(dev, "couldn't alloc interrupt\n");
   6453 		return ENXIO;
   6454 	}
   6455 	adapter->osdep.nintrs = 1;
   6456 	intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
   6457 	    intrbuf, sizeof(intrbuf));
   6458 	adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
   6459 	    adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
   6460 	    device_xname(dev));
   6461 	intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
   6462 	if (adapter->osdep.ihs[0] == NULL) {
   6463 		aprint_error_dev(dev,"unable to establish %s\n",
   6464 		    (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6465 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   6466 		adapter->osdep.intrs = NULL;
   6467 		switch (intr_type) {
   6468 		case PCI_INTR_TYPE_MSI:
   6469 			/* The next try is for INTx: Disable MSI */
   6470 			max_type = PCI_INTR_TYPE_INTX;
   6471 			counts[PCI_INTR_TYPE_INTX] = 1;
   6472 			adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   6473 			if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
   6474 				adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6475 				goto alloc_retry;
   6476 			} else
   6477 				break;
   6478 		case PCI_INTR_TYPE_INTX:
   6479 		default:
   6480 			/* See below */
   6481 			break;
   6482 		}
   6483 	}
   6484 	if (intr_type == PCI_INTR_TYPE_INTX) {
   6485 		adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   6486 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6487 	}
   6488 	if (adapter->osdep.ihs[0] == NULL) {
   6489 		aprint_error_dev(dev,
   6490 		    "couldn't establish interrupt%s%s\n",
   6491 		    intrstr ? " at " : "", intrstr ? intrstr : "");
   6492 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   6493 		adapter->osdep.intrs = NULL;
   6494 		return ENXIO;
   6495 	}
   6496 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   6497 	/*
   6498 	 * Try allocating a fast interrupt and the associated deferred
   6499 	 * processing contexts.
   6500 	 */
   6501 	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   6502 		txr->txr_si =
   6503 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6504 			ixgbe_deferred_mq_start, txr);
   6505 	que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6506 	    ixgbe_handle_que, que);
   6507 
   6508 	if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
   6509 		& (txr->txr_si == NULL)) || (que->que_si == NULL)) {
   6510 		aprint_error_dev(dev,
   6511 		    "could not establish software interrupts\n");
   6512 
   6513 		return ENXIO;
   6514 	}
   6515 	/* For simplicity in the handlers */
   6516 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
   6517 
   6518 	return (0);
   6519 } /* ixgbe_allocate_legacy */
   6520 
   6521 /************************************************************************
   6522  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
   6523  ************************************************************************/
   6524 static int
   6525 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   6526 {
   6527 	device_t	dev = adapter->dev;
   6528 	struct		ix_queue *que = adapter->queues;
   6529 	struct		tx_ring *txr = adapter->tx_rings;
   6530 	pci_chipset_tag_t pc;
   6531 	char		intrbuf[PCI_INTRSTR_LEN];
   6532 	char		intr_xname[32];
   6533 	char		wqname[MAXCOMLEN];
   6534 	const char	*intrstr = NULL;
   6535 	int		error, vector = 0;
   6536 	int		cpu_id = 0;
   6537 	kcpuset_t	*affinity;
   6538 #ifdef RSS
   6539 	unsigned int	rss_buckets = 0;
   6540 	kcpuset_t	cpu_mask;
   6541 #endif
   6542 
   6543 	pc = adapter->osdep.pc;
   6544 #ifdef	RSS
   6545 	/*
   6546 	 * If we're doing RSS, the number of queues needs to
   6547 	 * match the number of RSS buckets that are configured.
   6548 	 *
   6549 	 * + If there's more queues than RSS buckets, we'll end
   6550 	 *   up with queues that get no traffic.
   6551 	 *
   6552 	 * + If there's more RSS buckets than queues, we'll end
   6553 	 *   up having multiple RSS buckets map to the same queue,
   6554 	 *   so there'll be some contention.
   6555 	 */
   6556 	rss_buckets = rss_getnumbuckets();
   6557 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
   6558 	    (adapter->num_queues != rss_buckets)) {
   6559 		device_printf(dev,
   6560 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
   6561 		    "; performance will be impacted.\n",
   6562 		    __func__, adapter->num_queues, rss_buckets);
   6563 	}
   6564 #endif
   6565 
   6566 	adapter->osdep.nintrs = adapter->num_queues + 1;
   6567 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   6568 	    adapter->osdep.nintrs) != 0) {
   6569 		aprint_error_dev(dev,
   6570 		    "failed to allocate MSI-X interrupt\n");
   6571 		return (ENXIO);
   6572 	}
   6573 
   6574 	kcpuset_create(&affinity, false);
   6575 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   6576 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   6577 		    device_xname(dev), i);
   6578 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   6579 		    sizeof(intrbuf));
   6580 #ifdef IXGBE_MPSAFE
   6581 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   6582 		    true);
   6583 #endif
   6584 		/* Set the handler function */
   6585 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   6586 		    adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
   6587 		    intr_xname);
   6588 		if (que->res == NULL) {
   6589 			aprint_error_dev(dev,
   6590 			    "Failed to register QUE handler\n");
   6591 			error = ENXIO;
   6592 			goto err_out;
   6593 		}
   6594 		que->msix = vector;
   6595 		adapter->active_queues |= 1ULL << que->msix;
   6596 
   6597 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   6598 #ifdef	RSS
   6599 			/*
   6600 			 * The queue ID is used as the RSS layer bucket ID.
   6601 			 * We look up the queue ID -> RSS CPU ID and select
   6602 			 * that.
   6603 			 */
   6604 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
   6605 			CPU_SETOF(cpu_id, &cpu_mask);
   6606 #endif
   6607 		} else {
   6608 			/*
   6609 			 * Bind the MSI-X vector, and thus the
   6610 			 * rings to the corresponding CPU.
   6611 			 *
   6612 			 * This just happens to match the default RSS
   6613 			 * round-robin bucket -> queue -> CPU allocation.
   6614 			 */
   6615 			if (adapter->num_queues > 1)
   6616 				cpu_id = i;
   6617 		}
   6618 		/* Round-robin affinity */
   6619 		kcpuset_zero(affinity);
   6620 		kcpuset_set(affinity, cpu_id % ncpu);
   6621 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   6622 		    NULL);
   6623 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   6624 		    intrstr);
   6625 		if (error == 0) {
   6626 #if 1 /* def IXGBE_DEBUG */
   6627 #ifdef	RSS
   6628 			aprintf_normal(", bound RSS bucket %d to CPU %d", i,
   6629 			    cpu_id % ncpu);
   6630 #else
   6631 			aprint_normal(", bound queue %d to cpu %d", i,
   6632 			    cpu_id % ncpu);
   6633 #endif
   6634 #endif /* IXGBE_DEBUG */
   6635 		}
   6636 		aprint_normal("\n");
   6637 
   6638 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   6639 			txr->txr_si = softint_establish(
   6640 				SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6641 				ixgbe_deferred_mq_start, txr);
   6642 			if (txr->txr_si == NULL) {
   6643 				aprint_error_dev(dev,
   6644 				    "couldn't establish software interrupt\n");
   6645 				error = ENXIO;
   6646 				goto err_out;
   6647 			}
   6648 		}
   6649 		que->que_si
   6650 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6651 			ixgbe_handle_que, que);
   6652 		if (que->que_si == NULL) {
   6653 			aprint_error_dev(dev,
   6654 			    "couldn't establish software interrupt\n");
   6655 			error = ENXIO;
   6656 			goto err_out;
   6657 		}
   6658 	}
   6659 	snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
   6660 	error = workqueue_create(&adapter->txr_wq, wqname,
   6661 	    ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   6662 	    IXGBE_WORKQUEUE_FLAGS);
   6663 	if (error) {
   6664 		aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
   6665 		goto err_out;
   6666 	}
   6667 	adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
   6668 
   6669 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
   6670 	error = workqueue_create(&adapter->que_wq, wqname,
   6671 	    ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   6672 	    IXGBE_WORKQUEUE_FLAGS);
   6673 	if (error) {
   6674 		aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
   6675 		goto err_out;
   6676 	}
   6677 
   6678 	/* and Link */
   6679 	cpu_id++;
   6680 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   6681 	adapter->vector = vector;
   6682 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   6683 	    sizeof(intrbuf));
   6684 #ifdef IXGBE_MPSAFE
   6685 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   6686 	    true);
   6687 #endif
   6688 	/* Set the link handler function */
   6689 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   6690 	    adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
   6691 	    intr_xname);
   6692 	if (adapter->osdep.ihs[vector] == NULL) {
   6693 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   6694 		error = ENXIO;
   6695 		goto err_out;
   6696 	}
   6697 	/* Round-robin affinity */
   6698 	kcpuset_zero(affinity);
   6699 	kcpuset_set(affinity, cpu_id % ncpu);
   6700 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
   6701 	    NULL);
   6702 
   6703 	aprint_normal_dev(dev,
   6704 	    "for link, interrupting at %s", intrstr);
   6705 	if (error == 0)
   6706 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   6707 	else
   6708 		aprint_normal("\n");
   6709 
   6710 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   6711 		adapter->mbx_si =
   6712 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6713 			ixgbe_handle_mbx, adapter);
   6714 		if (adapter->mbx_si == NULL) {
   6715 			aprint_error_dev(dev,
   6716 			    "could not establish software interrupts\n");
   6717 
   6718 			error = ENXIO;
   6719 			goto err_out;
   6720 		}
   6721 	}
   6722 
   6723 	kcpuset_destroy(affinity);
   6724 	aprint_normal_dev(dev,
   6725 	    "Using MSI-X interrupts with %d vectors\n", vector + 1);
   6726 
   6727 	return (0);
   6728 
   6729 err_out:
   6730 	kcpuset_destroy(affinity);
   6731 	ixgbe_free_softint(adapter);
   6732 	ixgbe_free_pciintr_resources(adapter);
   6733 	return (error);
   6734 } /* ixgbe_allocate_msix */
   6735 
   6736 /************************************************************************
   6737  * ixgbe_configure_interrupts
   6738  *
   6739  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
   6740  *   This will also depend on user settings.
   6741  ************************************************************************/
   6742 static int
   6743 ixgbe_configure_interrupts(struct adapter *adapter)
   6744 {
   6745 	device_t dev = adapter->dev;
   6746 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
   6747 	int want, queues, msgs;
   6748 
   6749 	/* Default to 1 queue if MSI-X setup fails */
   6750 	adapter->num_queues = 1;
   6751 
   6752 	/* Override by tuneable */
   6753 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
   6754 		goto msi;
   6755 
   6756 	/*
   6757 	 *  NetBSD only: Use single vector MSI when number of CPU is 1 to save
   6758 	 * interrupt slot.
   6759 	 */
   6760 	if (ncpu == 1)
   6761 		goto msi;
   6762 
   6763 	/* First try MSI-X */
   6764 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   6765 	msgs = MIN(msgs, IXG_MAX_NINTR);
   6766 	if (msgs < 2)
   6767 		goto msi;
   6768 
   6769 	adapter->msix_mem = (void *)1; /* XXX */
   6770 
   6771 	/* Figure out a reasonable auto config value */
   6772 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   6773 
   6774 #ifdef	RSS
   6775 	/* If we're doing RSS, clamp at the number of RSS buckets */
   6776 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   6777 		queues = uimin(queues, rss_getnumbuckets());
   6778 #endif
   6779 	if (ixgbe_num_queues > queues) {
   6780 		aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
   6781 		ixgbe_num_queues = queues;
   6782 	}
   6783 
   6784 	if (ixgbe_num_queues != 0)
   6785 		queues = ixgbe_num_queues;
   6786 	else
   6787 		queues = uimin(queues,
   6788 		    uimin(mac->max_tx_queues, mac->max_rx_queues));
   6789 
   6790 	/* reflect correct sysctl value */
   6791 	ixgbe_num_queues = queues;
   6792 
   6793 	/*
   6794 	 * Want one vector (RX/TX pair) per queue
   6795 	 * plus an additional for Link.
   6796 	 */
   6797 	want = queues + 1;
   6798 	if (msgs >= want)
   6799 		msgs = want;
   6800 	else {
   6801 		aprint_error_dev(dev, "MSI-X Configuration Problem, "
   6802 		    "%d vectors but %d queues wanted!\n",
   6803 		    msgs, want);
   6804 		goto msi;
   6805 	}
   6806 	adapter->num_queues = queues;
   6807 	adapter->feat_en |= IXGBE_FEATURE_MSIX;
   6808 	return (0);
   6809 
   6810 	/*
   6811 	 * MSI-X allocation failed or provided us with
   6812 	 * less vectors than needed. Free MSI-X resources
   6813 	 * and we'll try enabling MSI.
   6814 	 */
   6815 msi:
   6816 	/* Without MSI-X, some features are no longer supported */
   6817 	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6818 	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
   6819 	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6820 	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
   6821 
   6822 	msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
   6823 	adapter->msix_mem = NULL; /* XXX */
   6824 	if (msgs > 1)
   6825 		msgs = 1;
   6826 	if (msgs != 0) {
   6827 		msgs = 1;
   6828 		adapter->feat_en |= IXGBE_FEATURE_MSI;
   6829 		return (0);
   6830 	}
   6831 
   6832 	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
   6833 		aprint_error_dev(dev,
   6834 		    "Device does not support legacy interrupts.\n");
   6835 		return 1;
   6836 	}
   6837 
   6838 	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6839 
   6840 	return (0);
   6841 } /* ixgbe_configure_interrupts */
   6842 
   6843 
   6844 /************************************************************************
   6845  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
   6846  *
   6847  *   Done outside of interrupt context since the driver might sleep
   6848  ************************************************************************/
   6849 static void
   6850 ixgbe_handle_link(void *context)
   6851 {
   6852 	struct adapter	*adapter = context;
   6853 	struct ixgbe_hw *hw = &adapter->hw;
   6854 
   6855 	IXGBE_CORE_LOCK(adapter);
   6856 	++adapter->link_sicount.ev_count;
   6857 	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
   6858 	ixgbe_update_link_status(adapter);
   6859 
   6860 	/* Re-enable link interrupts */
   6861 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
   6862 
   6863 	IXGBE_CORE_UNLOCK(adapter);
   6864 } /* ixgbe_handle_link */
   6865 
   6866 #if 0
   6867 /************************************************************************
   6868  * ixgbe_rearm_queues
   6869  ************************************************************************/
   6870 static __inline void
   6871 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   6872 {
   6873 	u32 mask;
   6874 
   6875 	switch (adapter->hw.mac.type) {
   6876 	case ixgbe_mac_82598EB:
   6877 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   6878 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   6879 		break;
   6880 	case ixgbe_mac_82599EB:
   6881 	case ixgbe_mac_X540:
   6882 	case ixgbe_mac_X550:
   6883 	case ixgbe_mac_X550EM_x:
   6884 	case ixgbe_mac_X550EM_a:
   6885 		mask = (queues & 0xFFFFFFFF);
   6886 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   6887 		mask = (queues >> 32);
   6888 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   6889 		break;
   6890 	default:
   6891 		break;
   6892 	}
   6893 } /* ixgbe_rearm_queues */
   6894 #endif
   6895