Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.195
      1 /* $NetBSD: ixgbe.c,v 1.195 2019/07/25 09:01:56 msaitoh Exp $ */
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
     36 
     37 /*
     38  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39  * All rights reserved.
     40  *
     41  * This code is derived from software contributed to The NetBSD Foundation
     42  * by Coyote Point Systems, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63  * POSSIBILITY OF SUCH DAMAGE.
     64  */
     65 
     66 #ifdef _KERNEL_OPT
     67 #include "opt_inet.h"
     68 #include "opt_inet6.h"
     69 #include "opt_net_mpsafe.h"
     70 #endif
     71 
     72 #include "ixgbe.h"
     73 #include "ixgbe_sriov.h"
     74 #include "vlan.h"
     75 
     76 #include <sys/cprng.h>
     77 #include <dev/mii/mii.h>
     78 #include <dev/mii/miivar.h>
     79 
     80 /************************************************************************
     81  * Driver version
     82  ************************************************************************/
     83 static const char ixgbe_driver_version[] = "4.0.1-k";
     84 /* XXX NetBSD: + 3.3.10 */
     85 
     86 /************************************************************************
     87  * PCI Device ID Table
     88  *
     89  *   Used by probe to select devices to load on
     90  *   Last field stores an index into ixgbe_strings
     91  *   Last entry must be all 0s
     92  *
     93  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     94  ************************************************************************/
     95 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     96 {
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    111 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    112 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    113 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
    114 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    115 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    116 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    117 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    118 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    119 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
    120 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    121 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
    122 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
    123 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
    124 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
    125 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
    126 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
    127 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
    128 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
    129 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
    130 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
    131 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
    132 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
    133 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
    134 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
    135 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
    136 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
    137 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
    138 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
    139 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
    140 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
    141 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
    142 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
    143 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
    144 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
    145 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
    146 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
    147 	/* required last entry */
    148 	{0, 0, 0, 0, 0}
    149 };
    150 
    151 /************************************************************************
    152  * Table of branding strings
    153  ************************************************************************/
    154 static const char    *ixgbe_strings[] = {
    155 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    156 };
    157 
    158 /************************************************************************
    159  * Function prototypes
    160  ************************************************************************/
    161 static int	ixgbe_probe(device_t, cfdata_t, void *);
    162 static void	ixgbe_attach(device_t, device_t, void *);
    163 static int	ixgbe_detach(device_t, int);
    164 #if 0
    165 static int	ixgbe_shutdown(device_t);
    166 #endif
    167 static bool	ixgbe_suspend(device_t, const pmf_qual_t *);
    168 static bool	ixgbe_resume(device_t, const pmf_qual_t *);
    169 static int	ixgbe_ifflags_cb(struct ethercom *);
    170 static int	ixgbe_ioctl(struct ifnet *, u_long, void *);
    171 static void	ixgbe_ifstop(struct ifnet *, int);
    172 static int	ixgbe_init(struct ifnet *);
    173 static void	ixgbe_init_locked(struct adapter *);
    174 static void	ixgbe_stop(void *);
    175 static void	ixgbe_init_device_features(struct adapter *);
    176 static void	ixgbe_check_fan_failure(struct adapter *, u32, bool);
    177 static void	ixgbe_add_media_types(struct adapter *);
    178 static void	ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    179 static int	ixgbe_media_change(struct ifnet *);
    180 static int	ixgbe_allocate_pci_resources(struct adapter *,
    181 		    const struct pci_attach_args *);
    182 static void	ixgbe_free_softint(struct adapter *);
    183 static void	ixgbe_get_slot_info(struct adapter *);
    184 static int	ixgbe_allocate_msix(struct adapter *,
    185 		    const struct pci_attach_args *);
    186 static int	ixgbe_allocate_legacy(struct adapter *,
    187 		    const struct pci_attach_args *);
    188 static int	ixgbe_configure_interrupts(struct adapter *);
    189 static void	ixgbe_free_pciintr_resources(struct adapter *);
    190 static void	ixgbe_free_pci_resources(struct adapter *);
    191 static void	ixgbe_local_timer(void *);
    192 static void	ixgbe_local_timer1(void *);
    193 static void	ixgbe_recovery_mode_timer(void *);
    194 static int	ixgbe_setup_interface(device_t, struct adapter *);
    195 static void	ixgbe_config_gpie(struct adapter *);
    196 static void	ixgbe_config_dmac(struct adapter *);
    197 static void	ixgbe_config_delay_values(struct adapter *);
    198 static void	ixgbe_config_link(struct adapter *);
    199 static void	ixgbe_check_wol_support(struct adapter *);
    200 static int	ixgbe_setup_low_power_mode(struct adapter *);
    201 #if 0
    202 static void	ixgbe_rearm_queues(struct adapter *, u64);
    203 #endif
    204 
    205 static void	ixgbe_initialize_transmit_units(struct adapter *);
    206 static void	ixgbe_initialize_receive_units(struct adapter *);
    207 static void	ixgbe_enable_rx_drop(struct adapter *);
    208 static void	ixgbe_disable_rx_drop(struct adapter *);
    209 static void	ixgbe_initialize_rss_mapping(struct adapter *);
    210 
    211 static void	ixgbe_enable_intr(struct adapter *);
    212 static void	ixgbe_disable_intr(struct adapter *);
    213 static void	ixgbe_update_stats_counters(struct adapter *);
    214 static void	ixgbe_set_promisc(struct adapter *);
    215 static void	ixgbe_set_multi(struct adapter *);
    216 static void	ixgbe_update_link_status(struct adapter *);
    217 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    218 static void	ixgbe_configure_ivars(struct adapter *);
    219 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    220 static void	ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
    221 
    222 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    223 static int	ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
    224 static int	ixgbe_register_vlan(void *, struct ifnet *, u16);
    225 static int	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
    226 
    227 static void	ixgbe_add_device_sysctls(struct adapter *);
    228 static void	ixgbe_add_hw_stats(struct adapter *);
    229 static void	ixgbe_clear_evcnt(struct adapter *);
    230 static int	ixgbe_set_flowcntl(struct adapter *, int);
    231 static int	ixgbe_set_advertise(struct adapter *, int);
    232 static int	ixgbe_get_advertise(struct adapter *);
    233 
    234 /* Sysctl handlers */
    235 static void	ixgbe_set_sysctl_value(struct adapter *, const char *,
    236 		     const char *, int *, int);
    237 static int	ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
    238 static int	ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
    239 static int	ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    240 static int	ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
    241 static int	ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
    242 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
    243 #ifdef IXGBE_DEBUG
    244 static int	ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
    245 static int	ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
    246 #endif
    247 static int	ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
    248 static int	ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
    249 static int	ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
    250 static int	ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
    251 static int	ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
    252 static int	ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
    253 static int	ixgbe_sysctl_debug(SYSCTLFN_PROTO);
    254 static int	ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
    255 static int	ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
    256 
    257 /* Support for pluggable optic modules */
    258 static bool	ixgbe_sfp_probe(struct adapter *);
    259 
    260 /* Legacy (single vector) interrupt handler */
    261 static int	ixgbe_legacy_irq(void *);
    262 
    263 /* The MSI/MSI-X Interrupt handlers */
    264 static int	ixgbe_msix_que(void *);
    265 static int	ixgbe_msix_link(void *);
    266 
    267 /* Software interrupts for deferred work */
    268 static void	ixgbe_handle_que(void *);
    269 static void	ixgbe_handle_link(void *);
    270 static void	ixgbe_handle_msf(void *);
    271 static void	ixgbe_handle_mod(void *);
    272 static void	ixgbe_handle_phy(void *);
    273 
    274 /* Workqueue handler for deferred work */
    275 static void	ixgbe_handle_que_work(struct work *, void *);
    276 
    277 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    278 
    279 /************************************************************************
    280  *  NetBSD Device Interface Entry Points
    281  ************************************************************************/
    282 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    283     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    284     DVF_DETACH_SHUTDOWN);
    285 
    286 #if 0
    287 devclass_t ix_devclass;
    288 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
    289 
    290 MODULE_DEPEND(ix, pci, 1, 1, 1);
    291 MODULE_DEPEND(ix, ether, 1, 1, 1);
    292 #ifdef DEV_NETMAP
    293 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    294 #endif
    295 #endif
    296 
    297 /*
    298  * TUNEABLE PARAMETERS:
    299  */
    300 
    301 /*
    302  * AIM: Adaptive Interrupt Moderation
    303  * which means that the interrupt rate
    304  * is varied over time based on the
    305  * traffic for that interrupt vector
    306  */
    307 static bool ixgbe_enable_aim = true;
    308 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
    309 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
    310     "Enable adaptive interrupt moderation");
    311 
    312 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    313 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
    314     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
    315 
    316 /* How many packets rxeof tries to clean at a time */
    317 static int ixgbe_rx_process_limit = 256;
    318 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
    319     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
    320 
    321 /* How many packets txeof tries to clean at a time */
    322 static int ixgbe_tx_process_limit = 256;
    323 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
    324     &ixgbe_tx_process_limit, 0,
    325     "Maximum number of sent packets to process at a time, -1 means unlimited");
    326 
    327 /* Flow control setting, default to full */
    328 static int ixgbe_flow_control = ixgbe_fc_full;
    329 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
    330     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
    331 
    332 /* Which packet processing uses workqueue or softint */
    333 static bool ixgbe_txrx_workqueue = false;
    334 
    335 /*
    336  * Smart speed setting, default to on
    337  * this only works as a compile option
    338  * right now as its during attach, set
    339  * this to 'ixgbe_smart_speed_off' to
    340  * disable.
    341  */
    342 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    343 
    344 /*
    345  * MSI-X should be the default for best performance,
    346  * but this allows it to be forced off for testing.
    347  */
    348 static int ixgbe_enable_msix = 1;
    349 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
    350     "Enable MSI-X interrupts");
    351 
    352 /*
    353  * Number of Queues, can be set to 0,
    354  * it then autoconfigures based on the
    355  * number of cpus with a max of 8. This
    356  * can be overriden manually here.
    357  */
    358 static int ixgbe_num_queues = 0;
    359 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
    360     "Number of queues to configure, 0 indicates autoconfigure");
    361 
    362 /*
    363  * Number of TX descriptors per ring,
    364  * setting higher than RX as this seems
    365  * the better performing choice.
    366  */
    367 static int ixgbe_txd = PERFORM_TXD;
    368 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
    369     "Number of transmit descriptors per queue");
    370 
    371 /* Number of RX descriptors per ring */
    372 static int ixgbe_rxd = PERFORM_RXD;
    373 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
    374     "Number of receive descriptors per queue");
    375 
    376 /*
    377  * Defining this on will allow the use
    378  * of unsupported SFP+ modules, note that
    379  * doing so you are on your own :)
    380  */
    381 static int allow_unsupported_sfp = false;
    382 #define TUNABLE_INT(__x, __y)
    383 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
    384 
    385 /*
    386  * Not sure if Flow Director is fully baked,
    387  * so we'll default to turning it off.
    388  */
    389 static int ixgbe_enable_fdir = 0;
    390 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
    391     "Enable Flow Director");
    392 
    393 /* Legacy Transmit (single queue) */
    394 static int ixgbe_enable_legacy_tx = 0;
    395 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
    396     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
    397 
    398 /* Receive-Side Scaling */
    399 static int ixgbe_enable_rss = 1;
    400 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
    401     "Enable Receive-Side Scaling (RSS)");
    402 
    403 #if 0
    404 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
    405 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
    406 #endif
    407 
    408 #ifdef NET_MPSAFE
    409 #define IXGBE_MPSAFE		1
    410 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    411 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    412 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    413 #else
    414 #define IXGBE_CALLOUT_FLAGS	0
    415 #define IXGBE_SOFTINFT_FLAGS	0
    416 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU
    417 #endif
    418 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
    419 
    420 /************************************************************************
    421  * ixgbe_initialize_rss_mapping
    422  ************************************************************************/
    423 static void
    424 ixgbe_initialize_rss_mapping(struct adapter *adapter)
    425 {
    426 	struct ixgbe_hw	*hw = &adapter->hw;
    427 	u32		reta = 0, mrqc, rss_key[10];
    428 	int		queue_id, table_size, index_mult;
    429 	int		i, j;
    430 	u32		rss_hash_config;
    431 
    432 	/* force use default RSS key. */
    433 #ifdef __NetBSD__
    434 	rss_getkey((uint8_t *) &rss_key);
    435 #else
    436 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    437 		/* Fetch the configured RSS key */
    438 		rss_getkey((uint8_t *) &rss_key);
    439 	} else {
    440 		/* set up random bits */
    441 		cprng_fast(&rss_key, sizeof(rss_key));
    442 	}
    443 #endif
    444 
    445 	/* Set multiplier for RETA setup and table size based on MAC */
    446 	index_mult = 0x1;
    447 	table_size = 128;
    448 	switch (adapter->hw.mac.type) {
    449 	case ixgbe_mac_82598EB:
    450 		index_mult = 0x11;
    451 		break;
    452 	case ixgbe_mac_X550:
    453 	case ixgbe_mac_X550EM_x:
    454 	case ixgbe_mac_X550EM_a:
    455 		table_size = 512;
    456 		break;
    457 	default:
    458 		break;
    459 	}
    460 
    461 	/* Set up the redirection table */
    462 	for (i = 0, j = 0; i < table_size; i++, j++) {
    463 		if (j == adapter->num_queues)
    464 			j = 0;
    465 
    466 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    467 			/*
    468 			 * Fetch the RSS bucket id for the given indirection
    469 			 * entry. Cap it at the number of configured buckets
    470 			 * (which is num_queues.)
    471 			 */
    472 			queue_id = rss_get_indirection_to_bucket(i);
    473 			queue_id = queue_id % adapter->num_queues;
    474 		} else
    475 			queue_id = (j * index_mult);
    476 
    477 		/*
    478 		 * The low 8 bits are for hash value (n+0);
    479 		 * The next 8 bits are for hash value (n+1), etc.
    480 		 */
    481 		reta = reta >> 8;
    482 		reta = reta | (((uint32_t) queue_id) << 24);
    483 		if ((i & 3) == 3) {
    484 			if (i < 128)
    485 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
    486 			else
    487 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
    488 				    reta);
    489 			reta = 0;
    490 		}
    491 	}
    492 
    493 	/* Now fill our hash function seeds */
    494 	for (i = 0; i < 10; i++)
    495 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
    496 
    497 	/* Perform hash on these packet types */
    498 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
    499 		rss_hash_config = rss_gethashconfig();
    500 	else {
    501 		/*
    502 		 * Disable UDP - IP fragments aren't currently being handled
    503 		 * and so we end up with a mix of 2-tuple and 4-tuple
    504 		 * traffic.
    505 		 */
    506 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
    507 				| RSS_HASHTYPE_RSS_TCP_IPV4
    508 				| RSS_HASHTYPE_RSS_IPV6
    509 				| RSS_HASHTYPE_RSS_TCP_IPV6
    510 				| RSS_HASHTYPE_RSS_IPV6_EX
    511 				| RSS_HASHTYPE_RSS_TCP_IPV6_EX;
    512 	}
    513 
    514 	mrqc = IXGBE_MRQC_RSSEN;
    515 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
    516 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
    517 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
    518 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
    519 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
    520 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
    521 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
    522 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
    523 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
    524 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
    525 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
    526 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
    527 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
    528 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
    529 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
    530 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
    531 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
    532 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
    533 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
    534 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    535 } /* ixgbe_initialize_rss_mapping */
    536 
    537 /************************************************************************
    538  * ixgbe_initialize_receive_units - Setup receive registers and features.
    539  ************************************************************************/
    540 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
    541 
    542 static void
    543 ixgbe_initialize_receive_units(struct adapter *adapter)
    544 {
    545 	struct	rx_ring	*rxr = adapter->rx_rings;
    546 	struct ixgbe_hw	*hw = &adapter->hw;
    547 	struct ifnet	*ifp = adapter->ifp;
    548 	int		i, j;
    549 	u32		bufsz, fctrl, srrctl, rxcsum;
    550 	u32		hlreg;
    551 
    552 	/*
    553 	 * Make sure receives are disabled while
    554 	 * setting up the descriptor ring
    555 	 */
    556 	ixgbe_disable_rx(hw);
    557 
    558 	/* Enable broadcasts */
    559 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
    560 	fctrl |= IXGBE_FCTRL_BAM;
    561 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
    562 		fctrl |= IXGBE_FCTRL_DPF;
    563 		fctrl |= IXGBE_FCTRL_PMCF;
    564 	}
    565 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
    566 
    567 	/* Set for Jumbo Frames? */
    568 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
    569 	if (ifp->if_mtu > ETHERMTU)
    570 		hlreg |= IXGBE_HLREG0_JUMBOEN;
    571 	else
    572 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
    573 
    574 #ifdef DEV_NETMAP
    575 	/* CRC stripping is conditional in Netmap */
    576 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
    577 	    (ifp->if_capenable & IFCAP_NETMAP) &&
    578 	    !ix_crcstrip)
    579 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
    580 	else
    581 #endif /* DEV_NETMAP */
    582 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
    583 
    584 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
    585 
    586 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
    587 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
    588 
    589 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
    590 		u64 rdba = rxr->rxdma.dma_paddr;
    591 		u32 reg;
    592 		int regnum = i / 4;	/* 1 register per 4 queues */
    593 		int regshift = i % 4;	/* 4 bits per 1 queue */
    594 		j = rxr->me;
    595 
    596 		/* Setup the Base and Length of the Rx Descriptor Ring */
    597 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
    598 		    (rdba & 0x00000000ffffffffULL));
    599 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
    600 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
    601 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
    602 
    603 		/* Set up the SRRCTL register */
    604 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
    605 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
    606 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
    607 		srrctl |= bufsz;
    608 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
    609 
    610 		/* Set RQSMR (Receive Queue Statistic Mapping) register */
    611 		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
    612 		reg &= ~(0x000000ffUL << (regshift * 8));
    613 		reg |= i << (regshift * 8);
    614 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
    615 
    616 		/*
    617 		 * Set DROP_EN iff we have no flow control and >1 queue.
    618 		 * Note that srrctl was cleared shortly before during reset,
    619 		 * so we do not need to clear the bit, but do it just in case
    620 		 * this code is moved elsewhere.
    621 		 */
    622 		if (adapter->num_queues > 1 &&
    623 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
    624 			srrctl |= IXGBE_SRRCTL_DROP_EN;
    625 		} else {
    626 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
    627 		}
    628 
    629 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
    630 
    631 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
    632 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
    633 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
    634 
    635 		/* Set the driver rx tail address */
    636 		rxr->tail =  IXGBE_RDT(rxr->me);
    637 	}
    638 
    639 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    640 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
    641 			    | IXGBE_PSRTYPE_UDPHDR
    642 			    | IXGBE_PSRTYPE_IPV4HDR
    643 			    | IXGBE_PSRTYPE_IPV6HDR;
    644 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
    645 	}
    646 
    647 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
    648 
    649 	ixgbe_initialize_rss_mapping(adapter);
    650 
    651 	if (adapter->num_queues > 1) {
    652 		/* RSS and RX IPP Checksum are mutually exclusive */
    653 		rxcsum |= IXGBE_RXCSUM_PCSD;
    654 	}
    655 
    656 	if (ifp->if_capenable & IFCAP_RXCSUM)
    657 		rxcsum |= IXGBE_RXCSUM_PCSD;
    658 
    659 	/* This is useful for calculating UDP/IP fragment checksums */
    660 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
    661 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
    662 
    663 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
    664 
    665 } /* ixgbe_initialize_receive_units */
    666 
    667 /************************************************************************
    668  * ixgbe_initialize_transmit_units - Enable transmit units.
    669  ************************************************************************/
    670 static void
    671 ixgbe_initialize_transmit_units(struct adapter *adapter)
    672 {
    673 	struct tx_ring	*txr = adapter->tx_rings;
    674 	struct ixgbe_hw	*hw = &adapter->hw;
    675 	int i;
    676 
    677 	/* Setup the Base and Length of the Tx Descriptor Ring */
    678 	for (i = 0; i < adapter->num_queues; i++, txr++) {
    679 		u64 tdba = txr->txdma.dma_paddr;
    680 		u32 txctrl = 0;
    681 		u32 tqsmreg, reg;
    682 		int regnum = i / 4;	/* 1 register per 4 queues */
    683 		int regshift = i % 4;	/* 4 bits per 1 queue */
    684 		int j = txr->me;
    685 
    686 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
    687 		    (tdba & 0x00000000ffffffffULL));
    688 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
    689 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
    690 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
    691 
    692 		/*
    693 		 * Set TQSMR (Transmit Queue Statistic Mapping) register.
    694 		 * Register location is different between 82598 and others.
    695 		 */
    696 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
    697 			tqsmreg = IXGBE_TQSMR(regnum);
    698 		else
    699 			tqsmreg = IXGBE_TQSM(regnum);
    700 		reg = IXGBE_READ_REG(hw, tqsmreg);
    701 		reg &= ~(0x000000ffUL << (regshift * 8));
    702 		reg |= i << (regshift * 8);
    703 		IXGBE_WRITE_REG(hw, tqsmreg, reg);
    704 
    705 		/* Setup the HW Tx Head and Tail descriptor pointers */
    706 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
    707 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
    708 
    709 		/* Cache the tail address */
    710 		txr->tail = IXGBE_TDT(j);
    711 
    712 		txr->txr_no_space = false;
    713 
    714 		/* Disable Head Writeback */
    715 		/*
    716 		 * Note: for X550 series devices, these registers are actually
    717 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
    718 		 * fields remain the same.
    719 		 */
    720 		switch (hw->mac.type) {
    721 		case ixgbe_mac_82598EB:
    722 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
    723 			break;
    724 		default:
    725 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
    726 			break;
    727 		}
    728 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
    729 		switch (hw->mac.type) {
    730 		case ixgbe_mac_82598EB:
    731 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
    732 			break;
    733 		default:
    734 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
    735 			break;
    736 		}
    737 
    738 	}
    739 
    740 	if (hw->mac.type != ixgbe_mac_82598EB) {
    741 		u32 dmatxctl, rttdcs;
    742 
    743 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
    744 		dmatxctl |= IXGBE_DMATXCTL_TE;
    745 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
    746 		/* Disable arbiter to set MTQC */
    747 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
    748 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
    749 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    750 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
    751 		    ixgbe_get_mtqc(adapter->iov_mode));
    752 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
    753 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    754 	}
    755 
    756 	return;
    757 } /* ixgbe_initialize_transmit_units */
    758 
    759 /************************************************************************
    760  * ixgbe_attach - Device initialization routine
    761  *
    762  *   Called when the driver is being loaded.
    763  *   Identifies the type of hardware, allocates all resources
    764  *   and initializes the hardware.
    765  *
    766  *   return 0 on success, positive on failure
    767  ************************************************************************/
    768 static void
    769 ixgbe_attach(device_t parent, device_t dev, void *aux)
    770 {
    771 	struct adapter	*adapter;
    772 	struct ixgbe_hw *hw;
    773 	int		error = -1;
    774 	u32		ctrl_ext;
    775 	u16		high, low, nvmreg;
    776 	pcireg_t	id, subid;
    777 	const ixgbe_vendor_info_t *ent;
    778 	struct pci_attach_args *pa = aux;
    779 	const char *str;
    780 	char buf[256];
    781 
    782 	INIT_DEBUGOUT("ixgbe_attach: begin");
    783 
    784 	/* Allocate, clear, and link in our adapter structure */
    785 	adapter = device_private(dev);
    786 	adapter->hw.back = adapter;
    787 	adapter->dev = dev;
    788 	hw = &adapter->hw;
    789 	adapter->osdep.pc = pa->pa_pc;
    790 	adapter->osdep.tag = pa->pa_tag;
    791 	if (pci_dma64_available(pa))
    792 		adapter->osdep.dmat = pa->pa_dmat64;
    793 	else
    794 		adapter->osdep.dmat = pa->pa_dmat;
    795 	adapter->osdep.attached = false;
    796 
    797 	ent = ixgbe_lookup(pa);
    798 
    799 	KASSERT(ent != NULL);
    800 
    801 	aprint_normal(": %s, Version - %s\n",
    802 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    803 
    804 	/* Core Lock Init*/
    805 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    806 
    807 	/* Set up the timer callout */
    808 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    809 
    810 	/* Determine hardware revision */
    811 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    812 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    813 
    814 	hw->vendor_id = PCI_VENDOR(id);
    815 	hw->device_id = PCI_PRODUCT(id);
    816 	hw->revision_id =
    817 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    818 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    819 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    820 
    821 	/*
    822 	 * Make sure BUSMASTER is set
    823 	 */
    824 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    825 
    826 	/* Do base PCI setup - map BAR0 */
    827 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    828 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    829 		error = ENXIO;
    830 		goto err_out;
    831 	}
    832 
    833 	/* let hardware know driver is loaded */
    834 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    835 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    836 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    837 
    838 	/*
    839 	 * Initialize the shared code
    840 	 */
    841 	if (ixgbe_init_shared_code(hw) != 0) {
    842 		aprint_error_dev(dev, "Unable to initialize the shared code\n");
    843 		error = ENXIO;
    844 		goto err_out;
    845 	}
    846 
    847 	switch (hw->mac.type) {
    848 	case ixgbe_mac_82598EB:
    849 		str = "82598EB";
    850 		break;
    851 	case ixgbe_mac_82599EB:
    852 		str = "82599EB";
    853 		break;
    854 	case ixgbe_mac_X540:
    855 		str = "X540";
    856 		break;
    857 	case ixgbe_mac_X550:
    858 		str = "X550";
    859 		break;
    860 	case ixgbe_mac_X550EM_x:
    861 		str = "X550EM";
    862 		break;
    863 	case ixgbe_mac_X550EM_a:
    864 		str = "X550EM A";
    865 		break;
    866 	default:
    867 		str = "Unknown";
    868 		break;
    869 	}
    870 	aprint_normal_dev(dev, "device %s\n", str);
    871 
    872 	if (hw->mbx.ops.init_params)
    873 		hw->mbx.ops.init_params(hw);
    874 
    875 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
    876 
    877 	/* Pick up the 82599 settings */
    878 	if (hw->mac.type != ixgbe_mac_82598EB) {
    879 		hw->phy.smart_speed = ixgbe_smart_speed;
    880 		adapter->num_segs = IXGBE_82599_SCATTER;
    881 	} else
    882 		adapter->num_segs = IXGBE_82598_SCATTER;
    883 
    884 	/* Ensure SW/FW semaphore is free */
    885 	ixgbe_init_swfw_semaphore(hw);
    886 
    887 	hw->mac.ops.set_lan_id(hw);
    888 	ixgbe_init_device_features(adapter);
    889 
    890 	if (ixgbe_configure_interrupts(adapter)) {
    891 		error = ENXIO;
    892 		goto err_out;
    893 	}
    894 
    895 	/* Allocate multicast array memory. */
    896 	adapter->mta = malloc(sizeof(*adapter->mta) *
    897 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    898 	if (adapter->mta == NULL) {
    899 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    900 		error = ENOMEM;
    901 		goto err_out;
    902 	}
    903 
    904 	/* Enable WoL (if supported) */
    905 	ixgbe_check_wol_support(adapter);
    906 
    907 	/* Register for VLAN events */
    908 	ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
    909 
    910 	/* Verify adapter fan is still functional (if applicable) */
    911 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
    912 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    913 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
    914 	}
    915 
    916 	/* Set an initial default flow control value */
    917 	hw->fc.requested_mode = ixgbe_flow_control;
    918 
    919 	/* Sysctls for limiting the amount of work done in the taskqueues */
    920 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
    921 	    "max number of rx packets to process",
    922 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
    923 
    924 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
    925 	    "max number of tx packets to process",
    926 	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
    927 
    928 	/* Do descriptor calc and sanity checks */
    929 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    930 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    931 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    932 		adapter->num_tx_desc = DEFAULT_TXD;
    933 	} else
    934 		adapter->num_tx_desc = ixgbe_txd;
    935 
    936 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    937 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
    938 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    939 		adapter->num_rx_desc = DEFAULT_RXD;
    940 	} else
    941 		adapter->num_rx_desc = ixgbe_rxd;
    942 
    943 	/* Allocate our TX/RX Queues */
    944 	if (ixgbe_allocate_queues(adapter)) {
    945 		error = ENOMEM;
    946 		goto err_out;
    947 	}
    948 
    949 	hw->phy.reset_if_overtemp = TRUE;
    950 	error = ixgbe_reset_hw(hw);
    951 	hw->phy.reset_if_overtemp = FALSE;
    952 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    953 		/*
    954 		 * No optics in this port, set up
    955 		 * so the timer routine will probe
    956 		 * for later insertion.
    957 		 */
    958 		adapter->sfp_probe = TRUE;
    959 		error = IXGBE_SUCCESS;
    960 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    961 		aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
    962 		error = EIO;
    963 		goto err_late;
    964 	} else if (error) {
    965 		aprint_error_dev(dev, "Hardware initialization failed\n");
    966 		error = EIO;
    967 		goto err_late;
    968 	}
    969 
    970 	/* Make sure we have a good EEPROM before we read from it */
    971 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
    972 		aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
    973 		error = EIO;
    974 		goto err_late;
    975 	}
    976 
    977 	aprint_normal("%s:", device_xname(dev));
    978 	/* NVM Image Version */
    979 	high = low = 0;
    980 	switch (hw->mac.type) {
    981 	case ixgbe_mac_X540:
    982 	case ixgbe_mac_X550EM_a:
    983 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    984 		if (nvmreg == 0xffff)
    985 			break;
    986 		high = (nvmreg >> 12) & 0x0f;
    987 		low = (nvmreg >> 4) & 0xff;
    988 		id = nvmreg & 0x0f;
    989 		aprint_normal(" NVM Image Version %u.", high);
    990 		if (hw->mac.type == ixgbe_mac_X540)
    991 			str = "%x";
    992 		else
    993 			str = "%02x";
    994 		aprint_normal(str, low);
    995 		aprint_normal(" ID 0x%x,", id);
    996 		break;
    997 	case ixgbe_mac_X550EM_x:
    998 	case ixgbe_mac_X550:
    999 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
   1000 		if (nvmreg == 0xffff)
   1001 			break;
   1002 		high = (nvmreg >> 12) & 0x0f;
   1003 		low = nvmreg & 0xff;
   1004 		aprint_normal(" NVM Image Version %u.%02x,", high, low);
   1005 		break;
   1006 	default:
   1007 		break;
   1008 	}
   1009 	hw->eeprom.nvm_image_ver_high = high;
   1010 	hw->eeprom.nvm_image_ver_low = low;
   1011 
   1012 	/* PHY firmware revision */
   1013 	switch (hw->mac.type) {
   1014 	case ixgbe_mac_X540:
   1015 	case ixgbe_mac_X550:
   1016 		hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
   1017 		if (nvmreg == 0xffff)
   1018 			break;
   1019 		high = (nvmreg >> 12) & 0x0f;
   1020 		low = (nvmreg >> 4) & 0xff;
   1021 		id = nvmreg & 0x000f;
   1022 		aprint_normal(" PHY FW Revision %u.", high);
   1023 		if (hw->mac.type == ixgbe_mac_X540)
   1024 			str = "%x";
   1025 		else
   1026 			str = "%02x";
   1027 		aprint_normal(str, low);
   1028 		aprint_normal(" ID 0x%x,", id);
   1029 		break;
   1030 	default:
   1031 		break;
   1032 	}
   1033 
   1034 	/* NVM Map version & OEM NVM Image version */
   1035 	switch (hw->mac.type) {
   1036 	case ixgbe_mac_X550:
   1037 	case ixgbe_mac_X550EM_x:
   1038 	case ixgbe_mac_X550EM_a:
   1039 		hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
   1040 		if (nvmreg != 0xffff) {
   1041 			high = (nvmreg >> 12) & 0x0f;
   1042 			low = nvmreg & 0x00ff;
   1043 			aprint_normal(" NVM Map version %u.%02x,", high, low);
   1044 		}
   1045 		hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
   1046 		if (nvmreg != 0xffff) {
   1047 			high = (nvmreg >> 12) & 0x0f;
   1048 			low = nvmreg & 0x00ff;
   1049 			aprint_verbose(" OEM NVM Image version %u.%02x,", high,
   1050 			    low);
   1051 		}
   1052 		break;
   1053 	default:
   1054 		break;
   1055 	}
   1056 
   1057 	/* Print the ETrackID */
   1058 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
   1059 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
   1060 	aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
   1061 
   1062 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   1063 		error = ixgbe_allocate_msix(adapter, pa);
   1064 		if (error) {
   1065 			/* Free allocated queue structures first */
   1066 			ixgbe_free_transmit_structures(adapter);
   1067 			ixgbe_free_receive_structures(adapter);
   1068 			free(adapter->queues, M_DEVBUF);
   1069 
   1070 			/* Fallback to legacy interrupt */
   1071 			adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
   1072 			if (adapter->feat_cap & IXGBE_FEATURE_MSI)
   1073 				adapter->feat_en |= IXGBE_FEATURE_MSI;
   1074 			adapter->num_queues = 1;
   1075 
   1076 			/* Allocate our TX/RX Queues again */
   1077 			if (ixgbe_allocate_queues(adapter)) {
   1078 				error = ENOMEM;
   1079 				goto err_out;
   1080 			}
   1081 		}
   1082 	}
   1083 	/* Recovery mode */
   1084 	switch (adapter->hw.mac.type) {
   1085 	case ixgbe_mac_X550:
   1086 	case ixgbe_mac_X550EM_x:
   1087 	case ixgbe_mac_X550EM_a:
   1088 		/* >= 2.00 */
   1089 		if (hw->eeprom.nvm_image_ver_high >= 2) {
   1090 			adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
   1091 			adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
   1092 		}
   1093 		break;
   1094 	default:
   1095 		break;
   1096 	}
   1097 
   1098 	if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
   1099 		error = ixgbe_allocate_legacy(adapter, pa);
   1100 	if (error)
   1101 		goto err_late;
   1102 
   1103 	/* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
   1104 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   1105 	    ixgbe_handle_link, adapter);
   1106 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1107 	    ixgbe_handle_mod, adapter);
   1108 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1109 	    ixgbe_handle_msf, adapter);
   1110 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1111 	    ixgbe_handle_phy, adapter);
   1112 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   1113 		adapter->fdir_si =
   1114 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1115 			ixgbe_reinit_fdir, adapter);
   1116 	if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
   1117 	    || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
   1118 	    || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
   1119 		&& (adapter->fdir_si == NULL))) {
   1120 		aprint_error_dev(dev,
   1121 		    "could not establish software interrupts ()\n");
   1122 		goto err_out;
   1123 	}
   1124 
   1125 	error = ixgbe_start_hw(hw);
   1126 	switch (error) {
   1127 	case IXGBE_ERR_EEPROM_VERSION:
   1128 		aprint_error_dev(dev, "This device is a pre-production adapter/"
   1129 		    "LOM.  Please be aware there may be issues associated "
   1130 		    "with your hardware.\nIf you are experiencing problems "
   1131 		    "please contact your Intel or hardware representative "
   1132 		    "who provided you with this hardware.\n");
   1133 		break;
   1134 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
   1135 		aprint_error_dev(dev, "Unsupported SFP+ Module\n");
   1136 		error = EIO;
   1137 		goto err_late;
   1138 	case IXGBE_ERR_SFP_NOT_PRESENT:
   1139 		aprint_error_dev(dev, "No SFP+ Module found\n");
   1140 		/* falls thru */
   1141 	default:
   1142 		break;
   1143 	}
   1144 
   1145 	/* Setup OS specific network interface */
   1146 	if (ixgbe_setup_interface(dev, adapter) != 0)
   1147 		goto err_late;
   1148 
   1149 	/*
   1150 	 *  Print PHY ID only for copper PHY. On device which has SFP(+) cage
   1151 	 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
   1152 	 */
   1153 	if (hw->phy.media_type == ixgbe_media_type_copper) {
   1154 		uint16_t id1, id2;
   1155 		int oui, model, rev;
   1156 		const char *descr;
   1157 
   1158 		id1 = hw->phy.id >> 16;
   1159 		id2 = hw->phy.id & 0xffff;
   1160 		oui = MII_OUI(id1, id2);
   1161 		model = MII_MODEL(id2);
   1162 		rev = MII_REV(id2);
   1163 		if ((descr = mii_get_descr(oui, model)) != NULL)
   1164 			aprint_normal_dev(dev,
   1165 			    "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
   1166 			    descr, oui, model, rev);
   1167 		else
   1168 			aprint_normal_dev(dev,
   1169 			    "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
   1170 			    oui, model, rev);
   1171 	}
   1172 
   1173 	/* Enable the optics for 82599 SFP+ fiber */
   1174 	ixgbe_enable_tx_laser(hw);
   1175 
   1176 	/* Enable EEE power saving */
   1177 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   1178 		hw->mac.ops.setup_eee(hw,
   1179 		    adapter->feat_en & IXGBE_FEATURE_EEE);
   1180 
   1181 	/* Enable power to the phy. */
   1182 	ixgbe_set_phy_power(hw, TRUE);
   1183 
   1184 	/* Initialize statistics */
   1185 	ixgbe_update_stats_counters(adapter);
   1186 
   1187 	/* Check PCIE slot type/speed/width */
   1188 	ixgbe_get_slot_info(adapter);
   1189 
   1190 	/*
   1191 	 * Do time init and sysctl init here, but
   1192 	 * only on the first port of a bypass adapter.
   1193 	 */
   1194 	ixgbe_bypass_init(adapter);
   1195 
   1196 	/* Set an initial dmac value */
   1197 	adapter->dmac = 0;
   1198 	/* Set initial advertised speeds (if applicable) */
   1199 	adapter->advertise = ixgbe_get_advertise(adapter);
   1200 
   1201 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   1202 		ixgbe_define_iov_schemas(dev, &error);
   1203 
   1204 	/* Add sysctls */
   1205 	ixgbe_add_device_sysctls(adapter);
   1206 	ixgbe_add_hw_stats(adapter);
   1207 
   1208 	/* For Netmap */
   1209 	adapter->init_locked = ixgbe_init_locked;
   1210 	adapter->stop_locked = ixgbe_stop;
   1211 
   1212 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1213 		ixgbe_netmap_attach(adapter);
   1214 
   1215 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
   1216 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
   1217 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
   1218 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
   1219 
   1220 	if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
   1221 		pmf_class_network_register(dev, adapter->ifp);
   1222 	else
   1223 		aprint_error_dev(dev, "couldn't establish power handler\n");
   1224 
   1225 	/* Init recovery mode timer and state variable */
   1226 	if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
   1227 		adapter->recovery_mode = 0;
   1228 
   1229 		/* Set up the timer callout */
   1230 		callout_init(&adapter->recovery_mode_timer,
   1231 		    IXGBE_CALLOUT_FLAGS);
   1232 
   1233 		/* Start the task */
   1234 		callout_reset(&adapter->recovery_mode_timer, hz,
   1235 		    ixgbe_recovery_mode_timer, adapter);
   1236 	}
   1237 
   1238 	INIT_DEBUGOUT("ixgbe_attach: end");
   1239 	adapter->osdep.attached = true;
   1240 
   1241 	return;
   1242 
   1243 err_late:
   1244 	ixgbe_free_transmit_structures(adapter);
   1245 	ixgbe_free_receive_structures(adapter);
   1246 	free(adapter->queues, M_DEVBUF);
   1247 err_out:
   1248 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   1249 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   1250 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   1251 	ixgbe_free_softint(adapter);
   1252 	ixgbe_free_pci_resources(adapter);
   1253 	if (adapter->mta != NULL)
   1254 		free(adapter->mta, M_DEVBUF);
   1255 	IXGBE_CORE_LOCK_DESTROY(adapter);
   1256 
   1257 	return;
   1258 } /* ixgbe_attach */
   1259 
   1260 /************************************************************************
   1261  * ixgbe_check_wol_support
   1262  *
   1263  *   Checks whether the adapter's ports are capable of
   1264  *   Wake On LAN by reading the adapter's NVM.
   1265  *
   1266  *   Sets each port's hw->wol_enabled value depending
   1267  *   on the value read here.
   1268  ************************************************************************/
   1269 static void
   1270 ixgbe_check_wol_support(struct adapter *adapter)
   1271 {
   1272 	struct ixgbe_hw *hw = &adapter->hw;
   1273 	u16		dev_caps = 0;
   1274 
   1275 	/* Find out WoL support for port */
   1276 	adapter->wol_support = hw->wol_enabled = 0;
   1277 	ixgbe_get_device_caps(hw, &dev_caps);
   1278 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
   1279 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
   1280 	     hw->bus.func == 0))
   1281 		adapter->wol_support = hw->wol_enabled = 1;
   1282 
   1283 	/* Save initial wake up filter configuration */
   1284 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
   1285 
   1286 	return;
   1287 } /* ixgbe_check_wol_support */
   1288 
   1289 /************************************************************************
   1290  * ixgbe_setup_interface
   1291  *
   1292  *   Setup networking device structure and register an interface.
   1293  ************************************************************************/
   1294 static int
   1295 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   1296 {
   1297 	struct ethercom *ec = &adapter->osdep.ec;
   1298 	struct ifnet   *ifp;
   1299 	int rv;
   1300 
   1301 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   1302 
   1303 	ifp = adapter->ifp = &ec->ec_if;
   1304 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1305 	ifp->if_baudrate = IF_Gbps(10);
   1306 	ifp->if_init = ixgbe_init;
   1307 	ifp->if_stop = ixgbe_ifstop;
   1308 	ifp->if_softc = adapter;
   1309 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1310 #ifdef IXGBE_MPSAFE
   1311 	ifp->if_extflags = IFEF_MPSAFE;
   1312 #endif
   1313 	ifp->if_ioctl = ixgbe_ioctl;
   1314 #if __FreeBSD_version >= 1100045
   1315 	/* TSO parameters */
   1316 	ifp->if_hw_tsomax = 65518;
   1317 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
   1318 	ifp->if_hw_tsomaxsegsize = 2048;
   1319 #endif
   1320 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1321 #if 0
   1322 		ixgbe_start_locked = ixgbe_legacy_start_locked;
   1323 #endif
   1324 	} else {
   1325 		ifp->if_transmit = ixgbe_mq_start;
   1326 #if 0
   1327 		ixgbe_start_locked = ixgbe_mq_start_locked;
   1328 #endif
   1329 	}
   1330 	ifp->if_start = ixgbe_legacy_start;
   1331 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1332 	IFQ_SET_READY(&ifp->if_snd);
   1333 
   1334 	rv = if_initialize(ifp);
   1335 	if (rv != 0) {
   1336 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1337 		return rv;
   1338 	}
   1339 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1340 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1341 	/*
   1342 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1343 	 * used.
   1344 	 */
   1345 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   1346 
   1347 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1348 
   1349 	/*
   1350 	 * Tell the upper layer(s) we support long frames.
   1351 	 */
   1352 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1353 
   1354 	/* Set capability flags */
   1355 	ifp->if_capabilities |= IFCAP_RXCSUM
   1356 			     |	IFCAP_TXCSUM
   1357 			     |	IFCAP_TSOv4
   1358 			     |	IFCAP_TSOv6;
   1359 	ifp->if_capenable = 0;
   1360 
   1361 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1362 			    |  ETHERCAP_VLAN_HWCSUM
   1363 			    |  ETHERCAP_JUMBO_MTU
   1364 			    |  ETHERCAP_VLAN_MTU;
   1365 
   1366 	/* Enable the above capabilities by default */
   1367 	ec->ec_capenable = ec->ec_capabilities;
   1368 
   1369 	/*
   1370 	 * Don't turn this on by default, if vlans are
   1371 	 * created on another pseudo device (eg. lagg)
   1372 	 * then vlan events are not passed thru, breaking
   1373 	 * operation, but with HW FILTER off it works. If
   1374 	 * using vlans directly on the ixgbe driver you can
   1375 	 * enable this and get full hardware tag filtering.
   1376 	 */
   1377 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1378 
   1379 	/*
   1380 	 * Specify the media types supported by this adapter and register
   1381 	 * callbacks to update media and link information
   1382 	 */
   1383 	ec->ec_ifmedia = &adapter->media;
   1384 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   1385 	    ixgbe_media_status);
   1386 
   1387 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
   1388 	ixgbe_add_media_types(adapter);
   1389 
   1390 	/* Set autoselect media by default */
   1391 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1392 
   1393 	if_register(ifp);
   1394 
   1395 	return (0);
   1396 } /* ixgbe_setup_interface */
   1397 
   1398 /************************************************************************
   1399  * ixgbe_add_media_types
   1400  ************************************************************************/
   1401 static void
   1402 ixgbe_add_media_types(struct adapter *adapter)
   1403 {
   1404 	struct ixgbe_hw *hw = &adapter->hw;
   1405 	device_t	dev = adapter->dev;
   1406 	u64		layer;
   1407 
   1408 	layer = adapter->phy_layer;
   1409 
   1410 #define	ADD(mm, dd)							\
   1411 	ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
   1412 
   1413 	ADD(IFM_NONE, 0);
   1414 
   1415 	/* Media types with matching NetBSD media defines */
   1416 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
   1417 		ADD(IFM_10G_T | IFM_FDX, 0);
   1418 	}
   1419 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
   1420 		ADD(IFM_1000_T | IFM_FDX, 0);
   1421 	}
   1422 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
   1423 		ADD(IFM_100_TX | IFM_FDX, 0);
   1424 	}
   1425 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
   1426 		ADD(IFM_10_T | IFM_FDX, 0);
   1427 	}
   1428 
   1429 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   1430 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
   1431 		ADD(IFM_10G_TWINAX | IFM_FDX, 0);
   1432 	}
   1433 
   1434 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
   1435 		ADD(IFM_10G_LR | IFM_FDX, 0);
   1436 		if (hw->phy.multispeed_fiber) {
   1437 			ADD(IFM_1000_LX | IFM_FDX, 0);
   1438 		}
   1439 	}
   1440 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
   1441 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1442 		if (hw->phy.multispeed_fiber) {
   1443 			ADD(IFM_1000_SX | IFM_FDX, 0);
   1444 		}
   1445 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
   1446 		ADD(IFM_1000_SX | IFM_FDX, 0);
   1447 	}
   1448 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
   1449 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1450 	}
   1451 
   1452 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1453 		ADD(IFM_10G_KR | IFM_FDX, 0);
   1454 	}
   1455 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1456 		ADD(IFM_10G_KX4 | IFM_FDX, 0);
   1457 	}
   1458 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
   1459 		ADD(IFM_1000_KX | IFM_FDX, 0);
   1460 	}
   1461 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
   1462 		ADD(IFM_2500_KX | IFM_FDX, 0);
   1463 	}
   1464 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
   1465 		ADD(IFM_2500_T | IFM_FDX, 0);
   1466 	}
   1467 	if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
   1468 		ADD(IFM_5000_T | IFM_FDX, 0);
   1469 	}
   1470 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
   1471 		device_printf(dev, "Media supported: 1000baseBX\n");
   1472 	/* XXX no ifmedia_set? */
   1473 
   1474 	ADD(IFM_AUTO, 0);
   1475 
   1476 #undef ADD
   1477 } /* ixgbe_add_media_types */
   1478 
   1479 /************************************************************************
   1480  * ixgbe_is_sfp
   1481  ************************************************************************/
   1482 static inline bool
   1483 ixgbe_is_sfp(struct ixgbe_hw *hw)
   1484 {
   1485 	switch (hw->mac.type) {
   1486 	case ixgbe_mac_82598EB:
   1487 		if (hw->phy.type == ixgbe_phy_nl)
   1488 			return (TRUE);
   1489 		return (FALSE);
   1490 	case ixgbe_mac_82599EB:
   1491 		switch (hw->mac.ops.get_media_type(hw)) {
   1492 		case ixgbe_media_type_fiber:
   1493 		case ixgbe_media_type_fiber_qsfp:
   1494 			return (TRUE);
   1495 		default:
   1496 			return (FALSE);
   1497 		}
   1498 	case ixgbe_mac_X550EM_x:
   1499 	case ixgbe_mac_X550EM_a:
   1500 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
   1501 			return (TRUE);
   1502 		return (FALSE);
   1503 	default:
   1504 		return (FALSE);
   1505 	}
   1506 } /* ixgbe_is_sfp */
   1507 
   1508 /************************************************************************
   1509  * ixgbe_config_link
   1510  ************************************************************************/
   1511 static void
   1512 ixgbe_config_link(struct adapter *adapter)
   1513 {
   1514 	struct ixgbe_hw *hw = &adapter->hw;
   1515 	u32		autoneg, err = 0;
   1516 	bool		sfp, negotiate = false;
   1517 
   1518 	sfp = ixgbe_is_sfp(hw);
   1519 
   1520 	if (sfp) {
   1521 		if (hw->phy.multispeed_fiber) {
   1522 			ixgbe_enable_tx_laser(hw);
   1523 			kpreempt_disable();
   1524 			softint_schedule(adapter->msf_si);
   1525 			kpreempt_enable();
   1526 		}
   1527 		kpreempt_disable();
   1528 		softint_schedule(adapter->mod_si);
   1529 		kpreempt_enable();
   1530 	} else {
   1531 		struct ifmedia	*ifm = &adapter->media;
   1532 
   1533 		if (hw->mac.ops.check_link)
   1534 			err = ixgbe_check_link(hw, &adapter->link_speed,
   1535 			    &adapter->link_up, FALSE);
   1536 		if (err)
   1537 			return;
   1538 
   1539 		/*
   1540 		 * Check if it's the first call. If it's the first call,
   1541 		 * get value for auto negotiation.
   1542 		 */
   1543 		autoneg = hw->phy.autoneg_advertised;
   1544 		if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
   1545 		    && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
   1546 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
   1547 			    &negotiate);
   1548 		if (err)
   1549 			return;
   1550 		if (hw->mac.ops.setup_link)
   1551 			err = hw->mac.ops.setup_link(hw, autoneg,
   1552 			    adapter->link_up);
   1553 	}
   1554 
   1555 } /* ixgbe_config_link */
   1556 
   1557 /************************************************************************
   1558  * ixgbe_update_stats_counters - Update board statistics counters.
   1559  ************************************************************************/
   1560 static void
   1561 ixgbe_update_stats_counters(struct adapter *adapter)
   1562 {
   1563 	struct ifnet	      *ifp = adapter->ifp;
   1564 	struct ixgbe_hw	      *hw = &adapter->hw;
   1565 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1566 	u32		      missed_rx = 0, bprc, lxon, lxoff, total;
   1567 	u64		      total_missed_rx = 0;
   1568 	uint64_t	      crcerrs, rlec;
   1569 	unsigned int	      queue_counters;
   1570 	int		      i;
   1571 
   1572 	crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   1573 	stats->crcerrs.ev_count += crcerrs;
   1574 	stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   1575 	stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   1576 	stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   1577 	if (hw->mac.type == ixgbe_mac_X550)
   1578 		stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
   1579 
   1580 	/* 16 registers exist */
   1581 	queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
   1582 	for (i = 0; i < queue_counters; i++) {
   1583 		stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   1584 		stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   1585 		if (hw->mac.type >= ixgbe_mac_82599EB) {
   1586 			stats->qprdc[i].ev_count
   1587 			    += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   1588 		}
   1589 	}
   1590 
   1591 	/* 8 registers exist */
   1592 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   1593 		uint32_t mp;
   1594 
   1595 		/* MPC */
   1596 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   1597 		/* global total per queue */
   1598 		stats->mpc[i].ev_count += mp;
   1599 		/* running comprehensive total for stats display */
   1600 		total_missed_rx += mp;
   1601 
   1602 		if (hw->mac.type == ixgbe_mac_82598EB)
   1603 			stats->rnbc[i].ev_count
   1604 			    += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   1605 
   1606 		stats->pxontxc[i].ev_count
   1607 		    += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
   1608 		stats->pxofftxc[i].ev_count
   1609 		    += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
   1610 		if (hw->mac.type >= ixgbe_mac_82599EB) {
   1611 			stats->pxonrxc[i].ev_count
   1612 			    += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
   1613 			stats->pxoffrxc[i].ev_count
   1614 			    += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
   1615 			stats->pxon2offc[i].ev_count
   1616 			    += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
   1617 		} else {
   1618 			stats->pxonrxc[i].ev_count
   1619 			    += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
   1620 			stats->pxoffrxc[i].ev_count
   1621 			    += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
   1622 		}
   1623 	}
   1624 	stats->mpctotal.ev_count += total_missed_rx;
   1625 
   1626 	/* Document says M[LR]FC are valid when link is up and 10Gbps */
   1627 	if ((adapter->link_active == LINK_STATE_UP)
   1628 	    && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
   1629 		stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   1630 		stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   1631 	}
   1632 	rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
   1633 	stats->rlec.ev_count += rlec;
   1634 
   1635 	/* Hardware workaround, gprc counts missed packets */
   1636 	stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   1637 
   1638 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   1639 	stats->lxontxc.ev_count += lxon;
   1640 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   1641 	stats->lxofftxc.ev_count += lxoff;
   1642 	total = lxon + lxoff;
   1643 
   1644 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1645 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   1646 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   1647 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   1648 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   1649 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   1650 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   1651 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   1652 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   1653 	} else {
   1654 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   1655 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   1656 		/* 82598 only has a counter in the high register */
   1657 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   1658 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   1659 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   1660 	}
   1661 
   1662 	/*
   1663 	 * Workaround: mprc hardware is incorrectly counting
   1664 	 * broadcasts, so for now we subtract those.
   1665 	 */
   1666 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   1667 	stats->bprc.ev_count += bprc;
   1668 	stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
   1669 	    - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   1670 
   1671 	stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   1672 	stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   1673 	stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   1674 	stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   1675 	stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   1676 	stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   1677 
   1678 	stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   1679 	stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   1680 	stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   1681 
   1682 	stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   1683 	stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   1684 	stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   1685 	stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   1686 	stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   1687 	stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   1688 	stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   1689 	stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   1690 	stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   1691 	stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   1692 	stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   1693 	stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   1694 	stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   1695 	stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   1696 	stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   1697 	stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   1698 	stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   1699 	stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   1700 	/* Only read FCOE on 82599 */
   1701 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1702 		stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   1703 		stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   1704 		stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   1705 		stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   1706 		stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   1707 	}
   1708 
   1709 	/* Fill out the OS statistics structure */
   1710 	/*
   1711 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   1712 	 * adapter->stats counters. It's required to make ifconfig -z
   1713 	 * (SOICZIFDATA) work.
   1714 	 */
   1715 	ifp->if_collisions = 0;
   1716 
   1717 	/* Rx Errors */
   1718 	ifp->if_iqdrops += total_missed_rx;
   1719 	ifp->if_ierrors += crcerrs + rlec;
   1720 } /* ixgbe_update_stats_counters */
   1721 
   1722 /************************************************************************
   1723  * ixgbe_add_hw_stats
   1724  *
   1725  *   Add sysctl variables, one per statistic, to the system.
   1726  ************************************************************************/
   1727 static void
   1728 ixgbe_add_hw_stats(struct adapter *adapter)
   1729 {
   1730 	device_t dev = adapter->dev;
   1731 	const struct sysctlnode *rnode, *cnode;
   1732 	struct sysctllog **log = &adapter->sysctllog;
   1733 	struct tx_ring *txr = adapter->tx_rings;
   1734 	struct rx_ring *rxr = adapter->rx_rings;
   1735 	struct ixgbe_hw *hw = &adapter->hw;
   1736 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1737 	const char *xname = device_xname(dev);
   1738 	int i;
   1739 
   1740 	/* Driver Statistics */
   1741 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   1742 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   1743 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   1744 	    NULL, xname, "m_defrag() failed");
   1745 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   1746 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   1747 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   1748 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   1749 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   1750 	    NULL, xname, "Driver tx dma hard fail other");
   1751 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   1752 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   1753 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   1754 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   1755 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   1756 	    NULL, xname, "Watchdog timeouts");
   1757 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   1758 	    NULL, xname, "TSO errors");
   1759 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   1760 	    NULL, xname, "Link MSI-X IRQ Handled");
   1761 	evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
   1762 	    NULL, xname, "Link softint");
   1763 	evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
   1764 	    NULL, xname, "module softint");
   1765 	evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
   1766 	    NULL, xname, "multimode softint");
   1767 	evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
   1768 	    NULL, xname, "external PHY softint");
   1769 
   1770 	/* Max number of traffic class is 8 */
   1771 	KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
   1772 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   1773 		snprintf(adapter->tcs[i].evnamebuf,
   1774 		    sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
   1775 		    xname, i);
   1776 		if (i < __arraycount(stats->mpc)) {
   1777 			evcnt_attach_dynamic(&stats->mpc[i],
   1778 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1779 			    "RX Missed Packet Count");
   1780 			if (hw->mac.type == ixgbe_mac_82598EB)
   1781 				evcnt_attach_dynamic(&stats->rnbc[i],
   1782 				    EVCNT_TYPE_MISC, NULL,
   1783 				    adapter->tcs[i].evnamebuf,
   1784 				    "Receive No Buffers");
   1785 		}
   1786 		if (i < __arraycount(stats->pxontxc)) {
   1787 			evcnt_attach_dynamic(&stats->pxontxc[i],
   1788 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1789 			    "pxontxc");
   1790 			evcnt_attach_dynamic(&stats->pxonrxc[i],
   1791 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1792 			    "pxonrxc");
   1793 			evcnt_attach_dynamic(&stats->pxofftxc[i],
   1794 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1795 			    "pxofftxc");
   1796 			evcnt_attach_dynamic(&stats->pxoffrxc[i],
   1797 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1798 			    "pxoffrxc");
   1799 			if (hw->mac.type >= ixgbe_mac_82599EB)
   1800 				evcnt_attach_dynamic(&stats->pxon2offc[i],
   1801 				    EVCNT_TYPE_MISC, NULL,
   1802 				    adapter->tcs[i].evnamebuf,
   1803 			    "pxon2offc");
   1804 		}
   1805 	}
   1806 
   1807 	for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1808 #ifdef LRO
   1809 		struct lro_ctrl *lro = &rxr->lro;
   1810 #endif /* LRO */
   1811 
   1812 		snprintf(adapter->queues[i].evnamebuf,
   1813 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   1814 		    xname, i);
   1815 		snprintf(adapter->queues[i].namebuf,
   1816 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   1817 
   1818 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   1819 			aprint_error_dev(dev, "could not create sysctl root\n");
   1820 			break;
   1821 		}
   1822 
   1823 		if (sysctl_createv(log, 0, &rnode, &rnode,
   1824 		    0, CTLTYPE_NODE,
   1825 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   1826 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   1827 			break;
   1828 
   1829 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1830 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   1831 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   1832 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   1833 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   1834 			break;
   1835 
   1836 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1837 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1838 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   1839 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   1840 		    0, CTL_CREATE, CTL_EOL) != 0)
   1841 			break;
   1842 
   1843 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1844 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1845 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   1846 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   1847 		    0, CTL_CREATE, CTL_EOL) != 0)
   1848 			break;
   1849 
   1850 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   1851 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   1852 		evcnt_attach_dynamic(&adapter->queues[i].handleq,
   1853 		    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1854 		    "Handled queue in softint");
   1855 		evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
   1856 		    NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
   1857 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   1858 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   1859 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   1860 		    NULL, adapter->queues[i].evnamebuf,
   1861 		    "Queue No Descriptor Available");
   1862 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   1863 		    NULL, adapter->queues[i].evnamebuf,
   1864 		    "Queue Packets Transmitted");
   1865 #ifndef IXGBE_LEGACY_TX
   1866 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   1867 		    NULL, adapter->queues[i].evnamebuf,
   1868 		    "Packets dropped in pcq");
   1869 #endif
   1870 
   1871 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1872 		    CTLFLAG_READONLY,
   1873 		    CTLTYPE_INT,
   1874 		    "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
   1875 			ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
   1876 		    CTL_CREATE, CTL_EOL) != 0)
   1877 			break;
   1878 
   1879 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1880 		    CTLFLAG_READONLY,
   1881 		    CTLTYPE_INT,
   1882 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   1883 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   1884 		    CTL_CREATE, CTL_EOL) != 0)
   1885 			break;
   1886 
   1887 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1888 		    CTLFLAG_READONLY,
   1889 		    CTLTYPE_INT,
   1890 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   1891 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   1892 		    CTL_CREATE, CTL_EOL) != 0)
   1893 			break;
   1894 
   1895 		if (i < __arraycount(stats->qprc)) {
   1896 			evcnt_attach_dynamic(&stats->qprc[i],
   1897 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1898 			    "qprc");
   1899 			evcnt_attach_dynamic(&stats->qptc[i],
   1900 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1901 			    "qptc");
   1902 			evcnt_attach_dynamic(&stats->qbrc[i],
   1903 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1904 			    "qbrc");
   1905 			evcnt_attach_dynamic(&stats->qbtc[i],
   1906 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1907 			    "qbtc");
   1908 			if (hw->mac.type >= ixgbe_mac_82599EB)
   1909 				evcnt_attach_dynamic(&stats->qprdc[i],
   1910 				    EVCNT_TYPE_MISC, NULL,
   1911 				    adapter->queues[i].evnamebuf, "qprdc");
   1912 		}
   1913 
   1914 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   1915 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   1916 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   1917 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   1918 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   1919 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   1920 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   1921 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   1922 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   1923 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   1924 #ifdef LRO
   1925 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   1926 				CTLFLAG_RD, &lro->lro_queued, 0,
   1927 				"LRO Queued");
   1928 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   1929 				CTLFLAG_RD, &lro->lro_flushed, 0,
   1930 				"LRO Flushed");
   1931 #endif /* LRO */
   1932 	}
   1933 
   1934 	/* MAC stats get their own sub node */
   1935 
   1936 	snprintf(stats->namebuf,
   1937 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   1938 
   1939 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   1940 	    stats->namebuf, "rx csum offload - IP");
   1941 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   1942 	    stats->namebuf, "rx csum offload - L4");
   1943 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   1944 	    stats->namebuf, "rx csum offload - IP bad");
   1945 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   1946 	    stats->namebuf, "rx csum offload - L4 bad");
   1947 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   1948 	    stats->namebuf, "Interrupt conditions zero");
   1949 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   1950 	    stats->namebuf, "Legacy interrupts");
   1951 
   1952 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   1953 	    stats->namebuf, "CRC Errors");
   1954 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   1955 	    stats->namebuf, "Illegal Byte Errors");
   1956 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   1957 	    stats->namebuf, "Byte Errors");
   1958 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   1959 	    stats->namebuf, "MAC Short Packets Discarded");
   1960 	if (hw->mac.type >= ixgbe_mac_X550)
   1961 		evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
   1962 		    stats->namebuf, "Bad SFD");
   1963 	evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
   1964 	    stats->namebuf, "Total Packets Missed");
   1965 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   1966 	    stats->namebuf, "MAC Local Faults");
   1967 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   1968 	    stats->namebuf, "MAC Remote Faults");
   1969 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   1970 	    stats->namebuf, "Receive Length Errors");
   1971 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   1972 	    stats->namebuf, "Link XON Transmitted");
   1973 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   1974 	    stats->namebuf, "Link XON Received");
   1975 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   1976 	    stats->namebuf, "Link XOFF Transmitted");
   1977 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   1978 	    stats->namebuf, "Link XOFF Received");
   1979 
   1980 	/* Packet Reception Stats */
   1981 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   1982 	    stats->namebuf, "Total Octets Received");
   1983 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   1984 	    stats->namebuf, "Good Octets Received");
   1985 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   1986 	    stats->namebuf, "Total Packets Received");
   1987 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   1988 	    stats->namebuf, "Good Packets Received");
   1989 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   1990 	    stats->namebuf, "Multicast Packets Received");
   1991 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   1992 	    stats->namebuf, "Broadcast Packets Received");
   1993 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   1994 	    stats->namebuf, "64 byte frames received ");
   1995 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   1996 	    stats->namebuf, "65-127 byte frames received");
   1997 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   1998 	    stats->namebuf, "128-255 byte frames received");
   1999 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   2000 	    stats->namebuf, "256-511 byte frames received");
   2001 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   2002 	    stats->namebuf, "512-1023 byte frames received");
   2003 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   2004 	    stats->namebuf, "1023-1522 byte frames received");
   2005 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   2006 	    stats->namebuf, "Receive Undersized");
   2007 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   2008 	    stats->namebuf, "Fragmented Packets Received ");
   2009 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   2010 	    stats->namebuf, "Oversized Packets Received");
   2011 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   2012 	    stats->namebuf, "Received Jabber");
   2013 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   2014 	    stats->namebuf, "Management Packets Received");
   2015 	evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
   2016 	    stats->namebuf, "Management Packets Dropped");
   2017 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   2018 	    stats->namebuf, "Checksum Errors");
   2019 
   2020 	/* Packet Transmission Stats */
   2021 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   2022 	    stats->namebuf, "Good Octets Transmitted");
   2023 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   2024 	    stats->namebuf, "Total Packets Transmitted");
   2025 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   2026 	    stats->namebuf, "Good Packets Transmitted");
   2027 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   2028 	    stats->namebuf, "Broadcast Packets Transmitted");
   2029 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   2030 	    stats->namebuf, "Multicast Packets Transmitted");
   2031 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   2032 	    stats->namebuf, "Management Packets Transmitted");
   2033 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   2034 	    stats->namebuf, "64 byte frames transmitted ");
   2035 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   2036 	    stats->namebuf, "65-127 byte frames transmitted");
   2037 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   2038 	    stats->namebuf, "128-255 byte frames transmitted");
   2039 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   2040 	    stats->namebuf, "256-511 byte frames transmitted");
   2041 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   2042 	    stats->namebuf, "512-1023 byte frames transmitted");
   2043 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   2044 	    stats->namebuf, "1024-1522 byte frames transmitted");
   2045 } /* ixgbe_add_hw_stats */
   2046 
   2047 static void
   2048 ixgbe_clear_evcnt(struct adapter *adapter)
   2049 {
   2050 	struct tx_ring *txr = adapter->tx_rings;
   2051 	struct rx_ring *rxr = adapter->rx_rings;
   2052 	struct ixgbe_hw *hw = &adapter->hw;
   2053 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   2054 	int i;
   2055 
   2056 	adapter->efbig_tx_dma_setup.ev_count = 0;
   2057 	adapter->mbuf_defrag_failed.ev_count = 0;
   2058 	adapter->efbig2_tx_dma_setup.ev_count = 0;
   2059 	adapter->einval_tx_dma_setup.ev_count = 0;
   2060 	adapter->other_tx_dma_setup.ev_count = 0;
   2061 	adapter->eagain_tx_dma_setup.ev_count = 0;
   2062 	adapter->enomem_tx_dma_setup.ev_count = 0;
   2063 	adapter->tso_err.ev_count = 0;
   2064 	adapter->watchdog_events.ev_count = 0;
   2065 	adapter->link_irq.ev_count = 0;
   2066 	adapter->link_sicount.ev_count = 0;
   2067 	adapter->mod_sicount.ev_count = 0;
   2068 	adapter->msf_sicount.ev_count = 0;
   2069 	adapter->phy_sicount.ev_count = 0;
   2070 
   2071 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   2072 		if (i < __arraycount(stats->mpc)) {
   2073 			stats->mpc[i].ev_count = 0;
   2074 			if (hw->mac.type == ixgbe_mac_82598EB)
   2075 				stats->rnbc[i].ev_count = 0;
   2076 		}
   2077 		if (i < __arraycount(stats->pxontxc)) {
   2078 			stats->pxontxc[i].ev_count = 0;
   2079 			stats->pxonrxc[i].ev_count = 0;
   2080 			stats->pxofftxc[i].ev_count = 0;
   2081 			stats->pxoffrxc[i].ev_count = 0;
   2082 			if (hw->mac.type >= ixgbe_mac_82599EB)
   2083 				stats->pxon2offc[i].ev_count = 0;
   2084 		}
   2085 	}
   2086 
   2087 	txr = adapter->tx_rings;
   2088 	for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2089 		adapter->queues[i].irqs.ev_count = 0;
   2090 		adapter->queues[i].handleq.ev_count = 0;
   2091 		adapter->queues[i].req.ev_count = 0;
   2092 		txr->no_desc_avail.ev_count = 0;
   2093 		txr->total_packets.ev_count = 0;
   2094 		txr->tso_tx.ev_count = 0;
   2095 #ifndef IXGBE_LEGACY_TX
   2096 		txr->pcq_drops.ev_count = 0;
   2097 #endif
   2098 		txr->q_efbig_tx_dma_setup = 0;
   2099 		txr->q_mbuf_defrag_failed = 0;
   2100 		txr->q_efbig2_tx_dma_setup = 0;
   2101 		txr->q_einval_tx_dma_setup = 0;
   2102 		txr->q_other_tx_dma_setup = 0;
   2103 		txr->q_eagain_tx_dma_setup = 0;
   2104 		txr->q_enomem_tx_dma_setup = 0;
   2105 		txr->q_tso_err = 0;
   2106 
   2107 		if (i < __arraycount(stats->qprc)) {
   2108 			stats->qprc[i].ev_count = 0;
   2109 			stats->qptc[i].ev_count = 0;
   2110 			stats->qbrc[i].ev_count = 0;
   2111 			stats->qbtc[i].ev_count = 0;
   2112 			if (hw->mac.type >= ixgbe_mac_82599EB)
   2113 				stats->qprdc[i].ev_count = 0;
   2114 		}
   2115 
   2116 		rxr->rx_packets.ev_count = 0;
   2117 		rxr->rx_bytes.ev_count = 0;
   2118 		rxr->rx_copies.ev_count = 0;
   2119 		rxr->no_jmbuf.ev_count = 0;
   2120 		rxr->rx_discarded.ev_count = 0;
   2121 	}
   2122 	stats->ipcs.ev_count = 0;
   2123 	stats->l4cs.ev_count = 0;
   2124 	stats->ipcs_bad.ev_count = 0;
   2125 	stats->l4cs_bad.ev_count = 0;
   2126 	stats->intzero.ev_count = 0;
   2127 	stats->legint.ev_count = 0;
   2128 	stats->crcerrs.ev_count = 0;
   2129 	stats->illerrc.ev_count = 0;
   2130 	stats->errbc.ev_count = 0;
   2131 	stats->mspdc.ev_count = 0;
   2132 	stats->mbsdc.ev_count = 0;
   2133 	stats->mpctotal.ev_count = 0;
   2134 	stats->mlfc.ev_count = 0;
   2135 	stats->mrfc.ev_count = 0;
   2136 	stats->rlec.ev_count = 0;
   2137 	stats->lxontxc.ev_count = 0;
   2138 	stats->lxonrxc.ev_count = 0;
   2139 	stats->lxofftxc.ev_count = 0;
   2140 	stats->lxoffrxc.ev_count = 0;
   2141 
   2142 	/* Packet Reception Stats */
   2143 	stats->tor.ev_count = 0;
   2144 	stats->gorc.ev_count = 0;
   2145 	stats->tpr.ev_count = 0;
   2146 	stats->gprc.ev_count = 0;
   2147 	stats->mprc.ev_count = 0;
   2148 	stats->bprc.ev_count = 0;
   2149 	stats->prc64.ev_count = 0;
   2150 	stats->prc127.ev_count = 0;
   2151 	stats->prc255.ev_count = 0;
   2152 	stats->prc511.ev_count = 0;
   2153 	stats->prc1023.ev_count = 0;
   2154 	stats->prc1522.ev_count = 0;
   2155 	stats->ruc.ev_count = 0;
   2156 	stats->rfc.ev_count = 0;
   2157 	stats->roc.ev_count = 0;
   2158 	stats->rjc.ev_count = 0;
   2159 	stats->mngprc.ev_count = 0;
   2160 	stats->mngpdc.ev_count = 0;
   2161 	stats->xec.ev_count = 0;
   2162 
   2163 	/* Packet Transmission Stats */
   2164 	stats->gotc.ev_count = 0;
   2165 	stats->tpt.ev_count = 0;
   2166 	stats->gptc.ev_count = 0;
   2167 	stats->bptc.ev_count = 0;
   2168 	stats->mptc.ev_count = 0;
   2169 	stats->mngptc.ev_count = 0;
   2170 	stats->ptc64.ev_count = 0;
   2171 	stats->ptc127.ev_count = 0;
   2172 	stats->ptc255.ev_count = 0;
   2173 	stats->ptc511.ev_count = 0;
   2174 	stats->ptc1023.ev_count = 0;
   2175 	stats->ptc1522.ev_count = 0;
   2176 }
   2177 
   2178 /************************************************************************
   2179  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
   2180  *
   2181  *   Retrieves the TDH value from the hardware
   2182  ************************************************************************/
   2183 static int
   2184 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   2185 {
   2186 	struct sysctlnode node = *rnode;
   2187 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2188 	struct adapter *adapter;
   2189 	uint32_t val;
   2190 
   2191 	if (!txr)
   2192 		return (0);
   2193 
   2194 	adapter = txr->adapter;
   2195 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2196 		return (EPERM);
   2197 
   2198 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
   2199 	node.sysctl_data = &val;
   2200 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2201 } /* ixgbe_sysctl_tdh_handler */
   2202 
   2203 /************************************************************************
   2204  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   2205  *
   2206  *   Retrieves the TDT value from the hardware
   2207  ************************************************************************/
   2208 static int
   2209 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   2210 {
   2211 	struct sysctlnode node = *rnode;
   2212 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2213 	struct adapter *adapter;
   2214 	uint32_t val;
   2215 
   2216 	if (!txr)
   2217 		return (0);
   2218 
   2219 	adapter = txr->adapter;
   2220 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2221 		return (EPERM);
   2222 
   2223 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
   2224 	node.sysctl_data = &val;
   2225 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2226 } /* ixgbe_sysctl_tdt_handler */
   2227 
   2228 /************************************************************************
   2229  * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
   2230  * handler function
   2231  *
   2232  *   Retrieves the next_to_check value
   2233  ************************************************************************/
   2234 static int
   2235 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
   2236 {
   2237 	struct sysctlnode node = *rnode;
   2238 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2239 	struct adapter *adapter;
   2240 	uint32_t val;
   2241 
   2242 	if (!rxr)
   2243 		return (0);
   2244 
   2245 	adapter = rxr->adapter;
   2246 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2247 		return (EPERM);
   2248 
   2249 	val = rxr->next_to_check;
   2250 	node.sysctl_data = &val;
   2251 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2252 } /* ixgbe_sysctl_next_to_check_handler */
   2253 
   2254 /************************************************************************
   2255  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
   2256  *
   2257  *   Retrieves the RDH value from the hardware
   2258  ************************************************************************/
   2259 static int
   2260 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2261 {
   2262 	struct sysctlnode node = *rnode;
   2263 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2264 	struct adapter *adapter;
   2265 	uint32_t val;
   2266 
   2267 	if (!rxr)
   2268 		return (0);
   2269 
   2270 	adapter = rxr->adapter;
   2271 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2272 		return (EPERM);
   2273 
   2274 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
   2275 	node.sysctl_data = &val;
   2276 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2277 } /* ixgbe_sysctl_rdh_handler */
   2278 
   2279 /************************************************************************
   2280  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2281  *
   2282  *   Retrieves the RDT value from the hardware
   2283  ************************************************************************/
   2284 static int
   2285 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2286 {
   2287 	struct sysctlnode node = *rnode;
   2288 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2289 	struct adapter *adapter;
   2290 	uint32_t val;
   2291 
   2292 	if (!rxr)
   2293 		return (0);
   2294 
   2295 	adapter = rxr->adapter;
   2296 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2297 		return (EPERM);
   2298 
   2299 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
   2300 	node.sysctl_data = &val;
   2301 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2302 } /* ixgbe_sysctl_rdt_handler */
   2303 
   2304 static int
   2305 ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
   2306 {
   2307 	struct ifnet *ifp = &ec->ec_if;
   2308 	int rv;
   2309 
   2310 	if (set)
   2311 		rv = ixgbe_register_vlan(ifp->if_softc, ifp, vid);
   2312 	else
   2313 		rv = ixgbe_unregister_vlan(ifp->if_softc, ifp, vid);
   2314 
   2315 	return rv;
   2316 }
   2317 
   2318 /************************************************************************
   2319  * ixgbe_register_vlan
   2320  *
   2321  *   Run via vlan config EVENT, it enables us to use the
   2322  *   HW Filter table since we can get the vlan id. This
   2323  *   just creates the entry in the soft version of the
   2324  *   VFTA, init will repopulate the real table.
   2325  ************************************************************************/
   2326 static int
   2327 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2328 {
   2329 	struct adapter	*adapter = ifp->if_softc;
   2330 	u16		index, bit;
   2331 	int		error;
   2332 
   2333 	if (ifp->if_softc != arg)   /* Not our event */
   2334 		return EINVAL;
   2335 
   2336 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2337 		return EINVAL;
   2338 
   2339 	IXGBE_CORE_LOCK(adapter);
   2340 	index = (vtag >> 5) & 0x7F;
   2341 	bit = vtag & 0x1F;
   2342 	adapter->shadow_vfta[index] |= ((u32)1 << bit);
   2343 	error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true,
   2344 	    true);
   2345 	IXGBE_CORE_UNLOCK(adapter);
   2346 	if (error != 0)
   2347 		error = EACCES;
   2348 
   2349 	return error;
   2350 } /* ixgbe_register_vlan */
   2351 
   2352 /************************************************************************
   2353  * ixgbe_unregister_vlan
   2354  *
   2355  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
   2356  ************************************************************************/
   2357 static int
   2358 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2359 {
   2360 	struct adapter	*adapter = ifp->if_softc;
   2361 	u16		index, bit;
   2362 	int		error;
   2363 
   2364 	if (ifp->if_softc != arg)
   2365 		return EINVAL;
   2366 
   2367 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2368 		return EINVAL;
   2369 
   2370 	IXGBE_CORE_LOCK(adapter);
   2371 	index = (vtag >> 5) & 0x7F;
   2372 	bit = vtag & 0x1F;
   2373 	adapter->shadow_vfta[index] &= ~((u32)1 << bit);
   2374 	error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false,
   2375 	    true);
   2376 	IXGBE_CORE_UNLOCK(adapter);
   2377 	if (error != 0)
   2378 		error = EACCES;
   2379 
   2380 	return error;
   2381 } /* ixgbe_unregister_vlan */
   2382 
   2383 static void
   2384 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   2385 {
   2386 	struct ethercom *ec = &adapter->osdep.ec;
   2387 	struct ixgbe_hw *hw = &adapter->hw;
   2388 	struct rx_ring	*rxr;
   2389 	int		i;
   2390 	u32		ctrl;
   2391 	struct vlanid_list *vlanidp;
   2392 	bool		hwtagging;
   2393 
   2394 	/*
   2395 	 *  This function is called from both if_init and ifflags_cb()
   2396 	 * on NetBSD.
   2397 	 */
   2398 
   2399 	/* Enable HW tagging only if any vlan is attached */
   2400 	hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
   2401 	    && VLAN_ATTACHED(ec);
   2402 
   2403 	/* Setup the queues for vlans */
   2404 	for (i = 0; i < adapter->num_queues; i++) {
   2405 		rxr = &adapter->rx_rings[i];
   2406 		/*
   2407 		 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
   2408 		 */
   2409 		if (hw->mac.type != ixgbe_mac_82598EB) {
   2410 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   2411 			if (hwtagging)
   2412 				ctrl |= IXGBE_RXDCTL_VME;
   2413 			else
   2414 				ctrl &= ~IXGBE_RXDCTL_VME;
   2415 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
   2416 		}
   2417 		rxr->vtag_strip = hwtagging ? TRUE : FALSE;
   2418 	}
   2419 
   2420 	/* Cleanup shadow_vfta */
   2421 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2422 		adapter->shadow_vfta[i] = 0;
   2423 	/* Generate shadow_vfta from ec_vids */
   2424 	mutex_enter(ec->ec_lock);
   2425 	SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
   2426 		uint32_t idx;
   2427 
   2428 		idx = vlanidp->vid / 32;
   2429 		KASSERT(idx < IXGBE_VFTA_SIZE);
   2430 		adapter->shadow_vfta[idx] |= 1 << vlanidp->vid % 32;
   2431 	}
   2432 	mutex_exit(ec->ec_lock);
   2433 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2434 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]);
   2435 
   2436 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2437 	/* Enable the Filter Table if enabled */
   2438 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
   2439 		ctrl |= IXGBE_VLNCTRL_VFE;
   2440 	else
   2441 		ctrl &= ~IXGBE_VLNCTRL_VFE;
   2442 	/* VLAN hw tagging for 82598 */
   2443 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2444 		if (hwtagging)
   2445 			ctrl |= IXGBE_VLNCTRL_VME;
   2446 		else
   2447 			ctrl &= ~IXGBE_VLNCTRL_VME;
   2448 	}
   2449 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2450 } /* ixgbe_setup_vlan_hw_support */
   2451 
   2452 /************************************************************************
   2453  * ixgbe_get_slot_info
   2454  *
   2455  *   Get the width and transaction speed of
   2456  *   the slot this adapter is plugged into.
   2457  ************************************************************************/
   2458 static void
   2459 ixgbe_get_slot_info(struct adapter *adapter)
   2460 {
   2461 	device_t		dev = adapter->dev;
   2462 	struct ixgbe_hw		*hw = &adapter->hw;
   2463 	u32		      offset;
   2464 	u16			link;
   2465 	int		      bus_info_valid = TRUE;
   2466 
   2467 	/* Some devices are behind an internal bridge */
   2468 	switch (hw->device_id) {
   2469 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
   2470 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
   2471 		goto get_parent_info;
   2472 	default:
   2473 		break;
   2474 	}
   2475 
   2476 	ixgbe_get_bus_info(hw);
   2477 
   2478 	/*
   2479 	 * Some devices don't use PCI-E, but there is no need
   2480 	 * to display "Unknown" for bus speed and width.
   2481 	 */
   2482 	switch (hw->mac.type) {
   2483 	case ixgbe_mac_X550EM_x:
   2484 	case ixgbe_mac_X550EM_a:
   2485 		return;
   2486 	default:
   2487 		goto display;
   2488 	}
   2489 
   2490 get_parent_info:
   2491 	/*
   2492 	 * For the Quad port adapter we need to parse back
   2493 	 * up the PCI tree to find the speed of the expansion
   2494 	 * slot into which this adapter is plugged. A bit more work.
   2495 	 */
   2496 	dev = device_parent(device_parent(dev));
   2497 #if 0
   2498 #ifdef IXGBE_DEBUG
   2499 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
   2500 	    pci_get_slot(dev), pci_get_function(dev));
   2501 #endif
   2502 	dev = device_parent(device_parent(dev));
   2503 #ifdef IXGBE_DEBUG
   2504 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
   2505 	    pci_get_slot(dev), pci_get_function(dev));
   2506 #endif
   2507 #endif
   2508 	/* Now get the PCI Express Capabilities offset */
   2509 	if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
   2510 	    PCI_CAP_PCIEXPRESS, &offset, NULL)) {
   2511 		/*
   2512 		 * Hmm...can't get PCI-Express capabilities.
   2513 		 * Falling back to default method.
   2514 		 */
   2515 		bus_info_valid = FALSE;
   2516 		ixgbe_get_bus_info(hw);
   2517 		goto display;
   2518 	}
   2519 	/* ...and read the Link Status Register */
   2520 	link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
   2521 	    offset + PCIE_LCSR) >> 16;
   2522 	ixgbe_set_pci_config_data_generic(hw, link);
   2523 
   2524 display:
   2525 	device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
   2526 	    ((hw->bus.speed == ixgbe_bus_speed_8000)	? "8.0GT/s" :
   2527 	     (hw->bus.speed == ixgbe_bus_speed_5000)	? "5.0GT/s" :
   2528 	     (hw->bus.speed == ixgbe_bus_speed_2500)	? "2.5GT/s" :
   2529 	     "Unknown"),
   2530 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
   2531 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
   2532 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
   2533 	     "Unknown"));
   2534 
   2535 	if (bus_info_valid) {
   2536 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2537 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
   2538 			(hw->bus.speed == ixgbe_bus_speed_2500))) {
   2539 			device_printf(dev, "PCI-Express bandwidth available"
   2540 			    " for this card\n     is not sufficient for"
   2541 			    " optimal performance.\n");
   2542 			device_printf(dev, "For optimal performance a x8 "
   2543 			    "PCIE, or x4 PCIE Gen2 slot is required.\n");
   2544 		}
   2545 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2546 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
   2547 			(hw->bus.speed < ixgbe_bus_speed_8000))) {
   2548 			device_printf(dev, "PCI-Express bandwidth available"
   2549 			    " for this card\n     is not sufficient for"
   2550 			    " optimal performance.\n");
   2551 			device_printf(dev, "For optimal performance a x8 "
   2552 			    "PCIE Gen3 slot is required.\n");
   2553 		}
   2554 	} else
   2555 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
   2556 
   2557 	return;
   2558 } /* ixgbe_get_slot_info */
   2559 
   2560 /************************************************************************
   2561  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
   2562  ************************************************************************/
   2563 static inline void
   2564 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   2565 {
   2566 	struct ixgbe_hw *hw = &adapter->hw;
   2567 	struct ix_queue *que = &adapter->queues[vector];
   2568 	u64		queue = (u64)(1ULL << vector);
   2569 	u32		mask;
   2570 
   2571 	mutex_enter(&que->dc_mtx);
   2572 	if (que->disabled_count > 0 && --que->disabled_count > 0)
   2573 		goto out;
   2574 
   2575 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2576 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2577 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   2578 	} else {
   2579 		mask = (queue & 0xFFFFFFFF);
   2580 		if (mask)
   2581 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   2582 		mask = (queue >> 32);
   2583 		if (mask)
   2584 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   2585 	}
   2586 out:
   2587 	mutex_exit(&que->dc_mtx);
   2588 } /* ixgbe_enable_queue */
   2589 
   2590 /************************************************************************
   2591  * ixgbe_disable_queue_internal
   2592  ************************************************************************/
   2593 static inline void
   2594 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
   2595 {
   2596 	struct ixgbe_hw *hw = &adapter->hw;
   2597 	struct ix_queue *que = &adapter->queues[vector];
   2598 	u64		queue = (u64)(1ULL << vector);
   2599 	u32		mask;
   2600 
   2601 	mutex_enter(&que->dc_mtx);
   2602 
   2603 	if (que->disabled_count > 0) {
   2604 		if (nestok)
   2605 			que->disabled_count++;
   2606 		goto out;
   2607 	}
   2608 	que->disabled_count++;
   2609 
   2610 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2611 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2612 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   2613 	} else {
   2614 		mask = (queue & 0xFFFFFFFF);
   2615 		if (mask)
   2616 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   2617 		mask = (queue >> 32);
   2618 		if (mask)
   2619 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   2620 	}
   2621 out:
   2622 	mutex_exit(&que->dc_mtx);
   2623 } /* ixgbe_disable_queue_internal */
   2624 
   2625 /************************************************************************
   2626  * ixgbe_disable_queue
   2627  ************************************************************************/
   2628 static inline void
   2629 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   2630 {
   2631 
   2632 	ixgbe_disable_queue_internal(adapter, vector, true);
   2633 } /* ixgbe_disable_queue */
   2634 
   2635 /************************************************************************
   2636  * ixgbe_sched_handle_que - schedule deferred packet processing
   2637  ************************************************************************/
   2638 static inline void
   2639 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
   2640 {
   2641 
   2642 	if (que->txrx_use_workqueue) {
   2643 		/*
   2644 		 * adapter->que_wq is bound to each CPU instead of
   2645 		 * each NIC queue to reduce workqueue kthread. As we
   2646 		 * should consider about interrupt affinity in this
   2647 		 * function, the workqueue kthread must be WQ_PERCPU.
   2648 		 * If create WQ_PERCPU workqueue kthread for each NIC
   2649 		 * queue, that number of created workqueue kthread is
   2650 		 * (number of used NIC queue) * (number of CPUs) =
   2651 		 * (number of CPUs) ^ 2 most often.
   2652 		 *
   2653 		 * The same NIC queue's interrupts are avoided by
   2654 		 * masking the queue's interrupt. And different
   2655 		 * NIC queue's interrupts use different struct work
   2656 		 * (que->wq_cookie). So, "enqueued flag" to avoid
   2657 		 * twice workqueue_enqueue() is not required .
   2658 		 */
   2659 		workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
   2660 	} else {
   2661 		softint_schedule(que->que_si);
   2662 	}
   2663 }
   2664 
   2665 /************************************************************************
   2666  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
   2667  ************************************************************************/
   2668 static int
   2669 ixgbe_msix_que(void *arg)
   2670 {
   2671 	struct ix_queue	*que = arg;
   2672 	struct adapter	*adapter = que->adapter;
   2673 	struct ifnet	*ifp = adapter->ifp;
   2674 	struct tx_ring	*txr = que->txr;
   2675 	struct rx_ring	*rxr = que->rxr;
   2676 	bool		more;
   2677 	u32		newitr = 0;
   2678 
   2679 	/* Protect against spurious interrupts */
   2680 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   2681 		return 0;
   2682 
   2683 	ixgbe_disable_queue(adapter, que->msix);
   2684 	++que->irqs.ev_count;
   2685 
   2686 	/*
   2687 	 * Don't change "que->txrx_use_workqueue" from this point to avoid
   2688 	 * flip-flopping softint/workqueue mode in one deferred processing.
   2689 	 */
   2690 	que->txrx_use_workqueue = adapter->txrx_use_workqueue;
   2691 
   2692 #ifdef __NetBSD__
   2693 	/* Don't run ixgbe_rxeof in interrupt context */
   2694 	more = true;
   2695 #else
   2696 	more = ixgbe_rxeof(que);
   2697 #endif
   2698 
   2699 	IXGBE_TX_LOCK(txr);
   2700 	ixgbe_txeof(txr);
   2701 	IXGBE_TX_UNLOCK(txr);
   2702 
   2703 	/* Do AIM now? */
   2704 
   2705 	if (adapter->enable_aim == false)
   2706 		goto no_calc;
   2707 	/*
   2708 	 * Do Adaptive Interrupt Moderation:
   2709 	 *  - Write out last calculated setting
   2710 	 *  - Calculate based on average size over
   2711 	 *    the last interval.
   2712 	 */
   2713 	if (que->eitr_setting)
   2714 		ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
   2715 
   2716 	que->eitr_setting = 0;
   2717 
   2718 	/* Idle, do nothing */
   2719 	if ((txr->bytes == 0) && (rxr->bytes == 0))
   2720 		goto no_calc;
   2721 
   2722 	if ((txr->bytes) && (txr->packets))
   2723 		newitr = txr->bytes/txr->packets;
   2724 	if ((rxr->bytes) && (rxr->packets))
   2725 		newitr = uimax(newitr, (rxr->bytes / rxr->packets));
   2726 	newitr += 24; /* account for hardware frame, crc */
   2727 
   2728 	/* set an upper boundary */
   2729 	newitr = uimin(newitr, 3000);
   2730 
   2731 	/* Be nice to the mid range */
   2732 	if ((newitr > 300) && (newitr < 1200))
   2733 		newitr = (newitr / 3);
   2734 	else
   2735 		newitr = (newitr / 2);
   2736 
   2737 	/*
   2738 	 * When RSC is used, ITR interval must be larger than RSC_DELAY.
   2739 	 * Currently, we use 2us for RSC_DELAY. The minimum value is always
   2740 	 * greater than 2us on 100M (and 10M?(not documented)), but it's not
   2741 	 * on 1G and higher.
   2742 	 */
   2743 	if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   2744 	    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   2745 		if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
   2746 			newitr = IXGBE_MIN_RSC_EITR_10G1G;
   2747 	}
   2748 
   2749 	/* save for next interrupt */
   2750 	que->eitr_setting = newitr;
   2751 
   2752 	/* Reset state */
   2753 	txr->bytes = 0;
   2754 	txr->packets = 0;
   2755 	rxr->bytes = 0;
   2756 	rxr->packets = 0;
   2757 
   2758 no_calc:
   2759 	if (more)
   2760 		ixgbe_sched_handle_que(adapter, que);
   2761 	else
   2762 		ixgbe_enable_queue(adapter, que->msix);
   2763 
   2764 	return 1;
   2765 } /* ixgbe_msix_que */
   2766 
   2767 /************************************************************************
   2768  * ixgbe_media_status - Media Ioctl callback
   2769  *
   2770  *   Called whenever the user queries the status of
   2771  *   the interface using ifconfig.
   2772  ************************************************************************/
   2773 static void
   2774 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   2775 {
   2776 	struct adapter *adapter = ifp->if_softc;
   2777 	struct ixgbe_hw *hw = &adapter->hw;
   2778 	int layer;
   2779 
   2780 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   2781 	IXGBE_CORE_LOCK(adapter);
   2782 	ixgbe_update_link_status(adapter);
   2783 
   2784 	ifmr->ifm_status = IFM_AVALID;
   2785 	ifmr->ifm_active = IFM_ETHER;
   2786 
   2787 	if (adapter->link_active != LINK_STATE_UP) {
   2788 		ifmr->ifm_active |= IFM_NONE;
   2789 		IXGBE_CORE_UNLOCK(adapter);
   2790 		return;
   2791 	}
   2792 
   2793 	ifmr->ifm_status |= IFM_ACTIVE;
   2794 	layer = adapter->phy_layer;
   2795 
   2796 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
   2797 	    layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
   2798 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
   2799 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
   2800 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
   2801 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
   2802 		switch (adapter->link_speed) {
   2803 		case IXGBE_LINK_SPEED_10GB_FULL:
   2804 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   2805 			break;
   2806 		case IXGBE_LINK_SPEED_5GB_FULL:
   2807 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   2808 			break;
   2809 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2810 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   2811 			break;
   2812 		case IXGBE_LINK_SPEED_1GB_FULL:
   2813 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   2814 			break;
   2815 		case IXGBE_LINK_SPEED_100_FULL:
   2816 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   2817 			break;
   2818 		case IXGBE_LINK_SPEED_10_FULL:
   2819 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   2820 			break;
   2821 		}
   2822 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   2823 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
   2824 		switch (adapter->link_speed) {
   2825 		case IXGBE_LINK_SPEED_10GB_FULL:
   2826 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
   2827 			break;
   2828 		}
   2829 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
   2830 		switch (adapter->link_speed) {
   2831 		case IXGBE_LINK_SPEED_10GB_FULL:
   2832 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
   2833 			break;
   2834 		case IXGBE_LINK_SPEED_1GB_FULL:
   2835 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2836 			break;
   2837 		}
   2838 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
   2839 		switch (adapter->link_speed) {
   2840 		case IXGBE_LINK_SPEED_10GB_FULL:
   2841 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
   2842 			break;
   2843 		case IXGBE_LINK_SPEED_1GB_FULL:
   2844 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2845 			break;
   2846 		}
   2847 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
   2848 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
   2849 		switch (adapter->link_speed) {
   2850 		case IXGBE_LINK_SPEED_10GB_FULL:
   2851 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2852 			break;
   2853 		case IXGBE_LINK_SPEED_1GB_FULL:
   2854 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
   2855 			break;
   2856 		}
   2857 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
   2858 		switch (adapter->link_speed) {
   2859 		case IXGBE_LINK_SPEED_10GB_FULL:
   2860 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2861 			break;
   2862 		}
   2863 	/*
   2864 	 * XXX: These need to use the proper media types once
   2865 	 * they're added.
   2866 	 */
   2867 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
   2868 		switch (adapter->link_speed) {
   2869 		case IXGBE_LINK_SPEED_10GB_FULL:
   2870 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
   2871 			break;
   2872 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2873 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2874 			break;
   2875 		case IXGBE_LINK_SPEED_1GB_FULL:
   2876 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2877 			break;
   2878 		}
   2879 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
   2880 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
   2881 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
   2882 		switch (adapter->link_speed) {
   2883 		case IXGBE_LINK_SPEED_10GB_FULL:
   2884 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
   2885 			break;
   2886 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2887 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2888 			break;
   2889 		case IXGBE_LINK_SPEED_1GB_FULL:
   2890 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2891 			break;
   2892 		}
   2893 
   2894 	/* If nothing is recognized... */
   2895 #if 0
   2896 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
   2897 		ifmr->ifm_active |= IFM_UNKNOWN;
   2898 #endif
   2899 
   2900 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   2901 
   2902 	/* Display current flow control setting used on link */
   2903 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
   2904 	    hw->fc.current_mode == ixgbe_fc_full)
   2905 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
   2906 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
   2907 	    hw->fc.current_mode == ixgbe_fc_full)
   2908 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
   2909 
   2910 	IXGBE_CORE_UNLOCK(adapter);
   2911 
   2912 	return;
   2913 } /* ixgbe_media_status */
   2914 
   2915 /************************************************************************
   2916  * ixgbe_media_change - Media Ioctl callback
   2917  *
   2918  *   Called when the user changes speed/duplex using
   2919  *   media/mediopt option with ifconfig.
   2920  ************************************************************************/
   2921 static int
   2922 ixgbe_media_change(struct ifnet *ifp)
   2923 {
   2924 	struct adapter	 *adapter = ifp->if_softc;
   2925 	struct ifmedia	 *ifm = &adapter->media;
   2926 	struct ixgbe_hw	 *hw = &adapter->hw;
   2927 	ixgbe_link_speed speed = 0;
   2928 	ixgbe_link_speed link_caps = 0;
   2929 	bool negotiate = false;
   2930 	s32 err = IXGBE_NOT_IMPLEMENTED;
   2931 
   2932 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   2933 
   2934 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   2935 		return (EINVAL);
   2936 
   2937 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   2938 		return (EPERM);
   2939 
   2940 	IXGBE_CORE_LOCK(adapter);
   2941 	/*
   2942 	 * We don't actually need to check against the supported
   2943 	 * media types of the adapter; ifmedia will take care of
   2944 	 * that for us.
   2945 	 */
   2946 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   2947 	case IFM_AUTO:
   2948 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   2949 		    &negotiate);
   2950 		if (err != IXGBE_SUCCESS) {
   2951 			device_printf(adapter->dev, "Unable to determine "
   2952 			    "supported advertise speeds\n");
   2953 			IXGBE_CORE_UNLOCK(adapter);
   2954 			return (ENODEV);
   2955 		}
   2956 		speed |= link_caps;
   2957 		break;
   2958 	case IFM_10G_T:
   2959 	case IFM_10G_LRM:
   2960 	case IFM_10G_LR:
   2961 	case IFM_10G_TWINAX:
   2962 	case IFM_10G_SR:
   2963 	case IFM_10G_CX4:
   2964 	case IFM_10G_KR:
   2965 	case IFM_10G_KX4:
   2966 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   2967 		break;
   2968 	case IFM_5000_T:
   2969 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   2970 		break;
   2971 	case IFM_2500_T:
   2972 	case IFM_2500_KX:
   2973 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   2974 		break;
   2975 	case IFM_1000_T:
   2976 	case IFM_1000_LX:
   2977 	case IFM_1000_SX:
   2978 	case IFM_1000_KX:
   2979 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   2980 		break;
   2981 	case IFM_100_TX:
   2982 		speed |= IXGBE_LINK_SPEED_100_FULL;
   2983 		break;
   2984 	case IFM_10_T:
   2985 		speed |= IXGBE_LINK_SPEED_10_FULL;
   2986 		break;
   2987 	case IFM_NONE:
   2988 		break;
   2989 	default:
   2990 		goto invalid;
   2991 	}
   2992 
   2993 	hw->mac.autotry_restart = TRUE;
   2994 	hw->mac.ops.setup_link(hw, speed, TRUE);
   2995 	adapter->advertise = 0;
   2996 	if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
   2997 		if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
   2998 			adapter->advertise |= 1 << 2;
   2999 		if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
   3000 			adapter->advertise |= 1 << 1;
   3001 		if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
   3002 			adapter->advertise |= 1 << 0;
   3003 		if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
   3004 			adapter->advertise |= 1 << 3;
   3005 		if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
   3006 			adapter->advertise |= 1 << 4;
   3007 		if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
   3008 			adapter->advertise |= 1 << 5;
   3009 	}
   3010 
   3011 	IXGBE_CORE_UNLOCK(adapter);
   3012 	return (0);
   3013 
   3014 invalid:
   3015 	device_printf(adapter->dev, "Invalid media type!\n");
   3016 	IXGBE_CORE_UNLOCK(adapter);
   3017 
   3018 	return (EINVAL);
   3019 } /* ixgbe_media_change */
   3020 
   3021 /************************************************************************
   3022  * ixgbe_set_promisc
   3023  ************************************************************************/
   3024 static void
   3025 ixgbe_set_promisc(struct adapter *adapter)
   3026 {
   3027 	struct ifnet *ifp = adapter->ifp;
   3028 	int	     mcnt = 0;
   3029 	u32	     rctl;
   3030 	struct ether_multi *enm;
   3031 	struct ether_multistep step;
   3032 	struct ethercom *ec = &adapter->osdep.ec;
   3033 
   3034 	KASSERT(mutex_owned(&adapter->core_mtx));
   3035 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   3036 	rctl &= (~IXGBE_FCTRL_UPE);
   3037 	ETHER_LOCK(ec);
   3038 	if (ec->ec_flags & ETHER_F_ALLMULTI)
   3039 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
   3040 	else {
   3041 		ETHER_FIRST_MULTI(step, ec, enm);
   3042 		while (enm != NULL) {
   3043 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
   3044 				break;
   3045 			mcnt++;
   3046 			ETHER_NEXT_MULTI(step, enm);
   3047 		}
   3048 	}
   3049 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
   3050 		rctl &= (~IXGBE_FCTRL_MPE);
   3051 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   3052 
   3053 	if (ifp->if_flags & IFF_PROMISC) {
   3054 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   3055 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   3056 	} else if (ec->ec_flags & ETHER_F_ALLMULTI) {
   3057 		rctl |= IXGBE_FCTRL_MPE;
   3058 		rctl &= ~IXGBE_FCTRL_UPE;
   3059 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   3060 	}
   3061 	ETHER_UNLOCK(ec);
   3062 } /* ixgbe_set_promisc */
   3063 
   3064 /************************************************************************
   3065  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
   3066  ************************************************************************/
   3067 static int
   3068 ixgbe_msix_link(void *arg)
   3069 {
   3070 	struct adapter	*adapter = arg;
   3071 	struct ixgbe_hw *hw = &adapter->hw;
   3072 	u32		eicr, eicr_mask;
   3073 	s32		retval;
   3074 
   3075 	++adapter->link_irq.ev_count;
   3076 
   3077 	/* Pause other interrupts */
   3078 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
   3079 
   3080 	/* First get the cause */
   3081 	/*
   3082 	 * The specifications of 82598, 82599, X540 and X550 say EICS register
   3083 	 * is write only. However, Linux says it is a workaround for silicon
   3084 	 * errata to read EICS instead of EICR to get interrupt cause. It seems
   3085 	 * there is a problem about read clear mechanism for EICR register.
   3086 	 */
   3087 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   3088 	/* Be sure the queue bits are not cleared */
   3089 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
   3090 	/* Clear interrupt with write */
   3091 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
   3092 
   3093 	/* Link status change */
   3094 	if (eicr & IXGBE_EICR_LSC) {
   3095 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
   3096 		softint_schedule(adapter->link_si);
   3097 	}
   3098 
   3099 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   3100 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
   3101 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
   3102 			/* This is probably overkill :) */
   3103 			if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
   3104 				return 1;
   3105 			/* Disable the interrupt */
   3106 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
   3107 			softint_schedule(adapter->fdir_si);
   3108 		}
   3109 
   3110 		if (eicr & IXGBE_EICR_ECC) {
   3111 			device_printf(adapter->dev,
   3112 			    "CRITICAL: ECC ERROR!! Please Reboot!!\n");
   3113 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   3114 		}
   3115 
   3116 		/* Check for over temp condition */
   3117 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
   3118 			switch (adapter->hw.mac.type) {
   3119 			case ixgbe_mac_X550EM_a:
   3120 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
   3121 					break;
   3122 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
   3123 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   3124 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
   3125 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   3126 				retval = hw->phy.ops.check_overtemp(hw);
   3127 				if (retval != IXGBE_ERR_OVERTEMP)
   3128 					break;
   3129 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   3130 				device_printf(adapter->dev, "System shutdown required!\n");
   3131 				break;
   3132 			default:
   3133 				if (!(eicr & IXGBE_EICR_TS))
   3134 					break;
   3135 				retval = hw->phy.ops.check_overtemp(hw);
   3136 				if (retval != IXGBE_ERR_OVERTEMP)
   3137 					break;
   3138 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   3139 				device_printf(adapter->dev, "System shutdown required!\n");
   3140 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
   3141 				break;
   3142 			}
   3143 		}
   3144 
   3145 		/* Check for VF message */
   3146 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
   3147 		    (eicr & IXGBE_EICR_MAILBOX))
   3148 			softint_schedule(adapter->mbx_si);
   3149 	}
   3150 
   3151 	if (ixgbe_is_sfp(hw)) {
   3152 		/* Pluggable optics-related interrupt */
   3153 		if (hw->mac.type >= ixgbe_mac_X540)
   3154 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   3155 		else
   3156 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   3157 
   3158 		if (eicr & eicr_mask) {
   3159 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   3160 			softint_schedule(adapter->mod_si);
   3161 		}
   3162 
   3163 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   3164 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   3165 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   3166 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   3167 			softint_schedule(adapter->msf_si);
   3168 		}
   3169 	}
   3170 
   3171 	/* Check for fan failure */
   3172 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   3173 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
   3174 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   3175 	}
   3176 
   3177 	/* External PHY interrupt */
   3178 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   3179 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
   3180 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
   3181 		softint_schedule(adapter->phy_si);
   3182 	}
   3183 
   3184 	/* Re-enable other interrupts */
   3185 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   3186 	return 1;
   3187 } /* ixgbe_msix_link */
   3188 
   3189 static void
   3190 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
   3191 {
   3192 
   3193 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   3194 		itr |= itr << 16;
   3195 	else
   3196 		itr |= IXGBE_EITR_CNT_WDIS;
   3197 
   3198 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
   3199 }
   3200 
   3201 
   3202 /************************************************************************
   3203  * ixgbe_sysctl_interrupt_rate_handler
   3204  ************************************************************************/
   3205 static int
   3206 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   3207 {
   3208 	struct sysctlnode node = *rnode;
   3209 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   3210 	struct adapter	*adapter;
   3211 	uint32_t reg, usec, rate;
   3212 	int error;
   3213 
   3214 	if (que == NULL)
   3215 		return 0;
   3216 
   3217 	adapter = que->adapter;
   3218 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   3219 		return (EPERM);
   3220 
   3221 	reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
   3222 	usec = ((reg & 0x0FF8) >> 3);
   3223 	if (usec > 0)
   3224 		rate = 500000 / usec;
   3225 	else
   3226 		rate = 0;
   3227 	node.sysctl_data = &rate;
   3228 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   3229 	if (error || newp == NULL)
   3230 		return error;
   3231 	reg &= ~0xfff; /* default, no limitation */
   3232 	if (rate > 0 && rate < 500000) {
   3233 		if (rate < 1000)
   3234 			rate = 1000;
   3235 		reg |= ((4000000/rate) & 0xff8);
   3236 		/*
   3237 		 * When RSC is used, ITR interval must be larger than
   3238 		 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
   3239 		 * The minimum value is always greater than 2us on 100M
   3240 		 * (and 10M?(not documented)), but it's not on 1G and higher.
   3241 		 */
   3242 		if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   3243 		    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   3244 			if ((adapter->num_queues > 1)
   3245 			    && (reg < IXGBE_MIN_RSC_EITR_10G1G))
   3246 				return EINVAL;
   3247 		}
   3248 		ixgbe_max_interrupt_rate = rate;
   3249 	} else
   3250 		ixgbe_max_interrupt_rate = 0;
   3251 	ixgbe_eitr_write(adapter, que->msix, reg);
   3252 
   3253 	return (0);
   3254 } /* ixgbe_sysctl_interrupt_rate_handler */
   3255 
   3256 const struct sysctlnode *
   3257 ixgbe_sysctl_instance(struct adapter *adapter)
   3258 {
   3259 	const char *dvname;
   3260 	struct sysctllog **log;
   3261 	int rc;
   3262 	const struct sysctlnode *rnode;
   3263 
   3264 	if (adapter->sysctltop != NULL)
   3265 		return adapter->sysctltop;
   3266 
   3267 	log = &adapter->sysctllog;
   3268 	dvname = device_xname(adapter->dev);
   3269 
   3270 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   3271 	    0, CTLTYPE_NODE, dvname,
   3272 	    SYSCTL_DESCR("ixgbe information and settings"),
   3273 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   3274 		goto err;
   3275 
   3276 	return rnode;
   3277 err:
   3278 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   3279 	return NULL;
   3280 }
   3281 
   3282 /************************************************************************
   3283  * ixgbe_add_device_sysctls
   3284  ************************************************************************/
   3285 static void
   3286 ixgbe_add_device_sysctls(struct adapter *adapter)
   3287 {
   3288 	device_t	       dev = adapter->dev;
   3289 	struct ixgbe_hw	       *hw = &adapter->hw;
   3290 	struct sysctllog **log;
   3291 	const struct sysctlnode *rnode, *cnode;
   3292 
   3293 	log = &adapter->sysctllog;
   3294 
   3295 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   3296 		aprint_error_dev(dev, "could not create sysctl root\n");
   3297 		return;
   3298 	}
   3299 
   3300 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3301 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   3302 	    "debug", SYSCTL_DESCR("Debug Info"),
   3303 	    ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   3304 		aprint_error_dev(dev, "could not create sysctl\n");
   3305 
   3306 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3307 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3308 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
   3309 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   3310 		aprint_error_dev(dev, "could not create sysctl\n");
   3311 
   3312 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3313 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3314 	    "num_queues", SYSCTL_DESCR("Number of queues"),
   3315 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
   3316 		aprint_error_dev(dev, "could not create sysctl\n");
   3317 
   3318 	/* Sysctls for all devices */
   3319 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3320 	    CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
   3321 	    ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
   3322 	    CTL_EOL) != 0)
   3323 		aprint_error_dev(dev, "could not create sysctl\n");
   3324 
   3325 	adapter->enable_aim = ixgbe_enable_aim;
   3326 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3327 	    CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   3328 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   3329 		aprint_error_dev(dev, "could not create sysctl\n");
   3330 
   3331 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3332 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   3333 	    "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
   3334 	    ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
   3335 	    CTL_EOL) != 0)
   3336 		aprint_error_dev(dev, "could not create sysctl\n");
   3337 
   3338 	/*
   3339 	 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
   3340 	 * it causesflip-flopping softint/workqueue mode in one deferred
   3341 	 * processing. Therefore, preempt_disable()/preempt_enable() are
   3342 	 * required in ixgbe_sched_handle_que() to avoid
   3343 	 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
   3344 	 * I think changing "que->txrx_use_workqueue" in interrupt handler
   3345 	 * is lighter than doing preempt_disable()/preempt_enable() in every
   3346 	 * ixgbe_sched_handle_que().
   3347 	 */
   3348 	adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
   3349 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3350 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   3351 	    NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
   3352 		aprint_error_dev(dev, "could not create sysctl\n");
   3353 
   3354 #ifdef IXGBE_DEBUG
   3355 	/* testing sysctls (for all devices) */
   3356 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3357 	    CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
   3358 	    ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
   3359 	    CTL_EOL) != 0)
   3360 		aprint_error_dev(dev, "could not create sysctl\n");
   3361 
   3362 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
   3363 	    CTLTYPE_STRING, "print_rss_config",
   3364 	    SYSCTL_DESCR("Prints RSS Configuration"),
   3365 	    ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
   3366 	    CTL_EOL) != 0)
   3367 		aprint_error_dev(dev, "could not create sysctl\n");
   3368 #endif
   3369 	/* for X550 series devices */
   3370 	if (hw->mac.type >= ixgbe_mac_X550)
   3371 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3372 		    CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
   3373 		    ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
   3374 		    CTL_EOL) != 0)
   3375 			aprint_error_dev(dev, "could not create sysctl\n");
   3376 
   3377 	/* for WoL-capable devices */
   3378 	if (adapter->wol_support) {
   3379 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3380 		    CTLTYPE_BOOL, "wol_enable",
   3381 		    SYSCTL_DESCR("Enable/Disable Wake on LAN"),
   3382 		    ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
   3383 		    CTL_EOL) != 0)
   3384 			aprint_error_dev(dev, "could not create sysctl\n");
   3385 
   3386 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3387 		    CTLTYPE_INT, "wufc",
   3388 		    SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
   3389 		    ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
   3390 		    CTL_EOL) != 0)
   3391 			aprint_error_dev(dev, "could not create sysctl\n");
   3392 	}
   3393 
   3394 	/* for X552/X557-AT devices */
   3395 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3396 		const struct sysctlnode *phy_node;
   3397 
   3398 		if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
   3399 		    "phy", SYSCTL_DESCR("External PHY sysctls"),
   3400 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
   3401 			aprint_error_dev(dev, "could not create sysctl\n");
   3402 			return;
   3403 		}
   3404 
   3405 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3406 		    CTLTYPE_INT, "temp",
   3407 		    SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
   3408 		    ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
   3409 		    CTL_EOL) != 0)
   3410 			aprint_error_dev(dev, "could not create sysctl\n");
   3411 
   3412 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3413 		    CTLTYPE_INT, "overtemp_occurred",
   3414 		    SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
   3415 		    ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
   3416 		    CTL_CREATE, CTL_EOL) != 0)
   3417 			aprint_error_dev(dev, "could not create sysctl\n");
   3418 	}
   3419 
   3420 	if ((hw->mac.type == ixgbe_mac_X550EM_a)
   3421 	    && (hw->phy.type == ixgbe_phy_fw))
   3422 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3423 		    CTLTYPE_BOOL, "force_10_100_autonego",
   3424 		    SYSCTL_DESCR("Force autonego on 10M and 100M"),
   3425 		    NULL, 0, &hw->phy.force_10_100_autonego, 0,
   3426 		    CTL_CREATE, CTL_EOL) != 0)
   3427 			aprint_error_dev(dev, "could not create sysctl\n");
   3428 
   3429 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
   3430 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3431 		    CTLTYPE_INT, "eee_state",
   3432 		    SYSCTL_DESCR("EEE Power Save State"),
   3433 		    ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
   3434 		    CTL_EOL) != 0)
   3435 			aprint_error_dev(dev, "could not create sysctl\n");
   3436 	}
   3437 } /* ixgbe_add_device_sysctls */
   3438 
   3439 /************************************************************************
   3440  * ixgbe_allocate_pci_resources
   3441  ************************************************************************/
   3442 static int
   3443 ixgbe_allocate_pci_resources(struct adapter *adapter,
   3444     const struct pci_attach_args *pa)
   3445 {
   3446 	pcireg_t	memtype, csr;
   3447 	device_t dev = adapter->dev;
   3448 	bus_addr_t addr;
   3449 	int flags;
   3450 
   3451 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   3452 	switch (memtype) {
   3453 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   3454 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   3455 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   3456 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   3457 		      memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   3458 			goto map_err;
   3459 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   3460 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   3461 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3462 		}
   3463 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   3464 		     adapter->osdep.mem_size, flags,
   3465 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   3466 map_err:
   3467 			adapter->osdep.mem_size = 0;
   3468 			aprint_error_dev(dev, "unable to map BAR0\n");
   3469 			return ENXIO;
   3470 		}
   3471 		/*
   3472 		 * Enable address decoding for memory range in case BIOS or
   3473 		 * UEFI don't set it.
   3474 		 */
   3475 		csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
   3476 		    PCI_COMMAND_STATUS_REG);
   3477 		csr |= PCI_COMMAND_MEM_ENABLE;
   3478 		pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
   3479 		    csr);
   3480 		break;
   3481 	default:
   3482 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   3483 		return ENXIO;
   3484 	}
   3485 
   3486 	return (0);
   3487 } /* ixgbe_allocate_pci_resources */
   3488 
   3489 static void
   3490 ixgbe_free_softint(struct adapter *adapter)
   3491 {
   3492 	struct ix_queue *que = adapter->queues;
   3493 	struct tx_ring *txr = adapter->tx_rings;
   3494 	int i;
   3495 
   3496 	for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
   3497 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   3498 			if (txr->txr_si != NULL)
   3499 				softint_disestablish(txr->txr_si);
   3500 		}
   3501 		if (que->que_si != NULL)
   3502 			softint_disestablish(que->que_si);
   3503 	}
   3504 	if (adapter->txr_wq != NULL)
   3505 		workqueue_destroy(adapter->txr_wq);
   3506 	if (adapter->txr_wq_enqueued != NULL)
   3507 		percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
   3508 	if (adapter->que_wq != NULL)
   3509 		workqueue_destroy(adapter->que_wq);
   3510 
   3511 	/* Drain the Link queue */
   3512 	if (adapter->link_si != NULL) {
   3513 		softint_disestablish(adapter->link_si);
   3514 		adapter->link_si = NULL;
   3515 	}
   3516 	if (adapter->mod_si != NULL) {
   3517 		softint_disestablish(adapter->mod_si);
   3518 		adapter->mod_si = NULL;
   3519 	}
   3520 	if (adapter->msf_si != NULL) {
   3521 		softint_disestablish(adapter->msf_si);
   3522 		adapter->msf_si = NULL;
   3523 	}
   3524 	if (adapter->phy_si != NULL) {
   3525 		softint_disestablish(adapter->phy_si);
   3526 		adapter->phy_si = NULL;
   3527 	}
   3528 	if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
   3529 		if (adapter->fdir_si != NULL) {
   3530 			softint_disestablish(adapter->fdir_si);
   3531 			adapter->fdir_si = NULL;
   3532 		}
   3533 	}
   3534 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   3535 		if (adapter->mbx_si != NULL) {
   3536 			softint_disestablish(adapter->mbx_si);
   3537 			adapter->mbx_si = NULL;
   3538 		}
   3539 	}
   3540 } /* ixgbe_free_softint */
   3541 
   3542 /************************************************************************
   3543  * ixgbe_detach - Device removal routine
   3544  *
   3545  *   Called when the driver is being removed.
   3546  *   Stops the adapter and deallocates all the resources
   3547  *   that were allocated for driver operation.
   3548  *
   3549  *   return 0 on success, positive on failure
   3550  ************************************************************************/
   3551 static int
   3552 ixgbe_detach(device_t dev, int flags)
   3553 {
   3554 	struct adapter *adapter = device_private(dev);
   3555 	struct rx_ring *rxr = adapter->rx_rings;
   3556 	struct tx_ring *txr = adapter->tx_rings;
   3557 	struct ixgbe_hw *hw = &adapter->hw;
   3558 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   3559 	u32	ctrl_ext;
   3560 	int i;
   3561 
   3562 	INIT_DEBUGOUT("ixgbe_detach: begin");
   3563 	if (adapter->osdep.attached == false)
   3564 		return 0;
   3565 
   3566 	if (ixgbe_pci_iov_detach(dev) != 0) {
   3567 		device_printf(dev, "SR-IOV in use; detach first.\n");
   3568 		return (EBUSY);
   3569 	}
   3570 
   3571 	/* Stop the interface. Callouts are stopped in it. */
   3572 	ixgbe_ifstop(adapter->ifp, 1);
   3573 #if NVLAN > 0
   3574 	/* Make sure VLANs are not using driver */
   3575 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   3576 		;	/* nothing to do: no VLANs */
   3577 	else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
   3578 		vlan_ifdetach(adapter->ifp);
   3579 	else {
   3580 		aprint_error_dev(dev, "VLANs in use, detach first\n");
   3581 		return (EBUSY);
   3582 	}
   3583 #endif
   3584 
   3585 	pmf_device_deregister(dev);
   3586 
   3587 	ether_ifdetach(adapter->ifp);
   3588 	/* Stop the adapter */
   3589 	IXGBE_CORE_LOCK(adapter);
   3590 	ixgbe_setup_low_power_mode(adapter);
   3591 	IXGBE_CORE_UNLOCK(adapter);
   3592 
   3593 	ixgbe_free_softint(adapter);
   3594 
   3595 	/* let hardware know driver is unloading */
   3596 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   3597 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   3598 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   3599 
   3600 	callout_halt(&adapter->timer, NULL);
   3601 	if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
   3602 		callout_halt(&adapter->recovery_mode_timer, NULL);
   3603 
   3604 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   3605 		netmap_detach(adapter->ifp);
   3606 
   3607 	ixgbe_free_pci_resources(adapter);
   3608 #if 0	/* XXX the NetBSD port is probably missing something here */
   3609 	bus_generic_detach(dev);
   3610 #endif
   3611 	if_detach(adapter->ifp);
   3612 	if_percpuq_destroy(adapter->ipq);
   3613 
   3614 	sysctl_teardown(&adapter->sysctllog);
   3615 	evcnt_detach(&adapter->efbig_tx_dma_setup);
   3616 	evcnt_detach(&adapter->mbuf_defrag_failed);
   3617 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
   3618 	evcnt_detach(&adapter->einval_tx_dma_setup);
   3619 	evcnt_detach(&adapter->other_tx_dma_setup);
   3620 	evcnt_detach(&adapter->eagain_tx_dma_setup);
   3621 	evcnt_detach(&adapter->enomem_tx_dma_setup);
   3622 	evcnt_detach(&adapter->watchdog_events);
   3623 	evcnt_detach(&adapter->tso_err);
   3624 	evcnt_detach(&adapter->link_irq);
   3625 	evcnt_detach(&adapter->link_sicount);
   3626 	evcnt_detach(&adapter->mod_sicount);
   3627 	evcnt_detach(&adapter->msf_sicount);
   3628 	evcnt_detach(&adapter->phy_sicount);
   3629 
   3630 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   3631 		if (i < __arraycount(stats->mpc)) {
   3632 			evcnt_detach(&stats->mpc[i]);
   3633 			if (hw->mac.type == ixgbe_mac_82598EB)
   3634 				evcnt_detach(&stats->rnbc[i]);
   3635 		}
   3636 		if (i < __arraycount(stats->pxontxc)) {
   3637 			evcnt_detach(&stats->pxontxc[i]);
   3638 			evcnt_detach(&stats->pxonrxc[i]);
   3639 			evcnt_detach(&stats->pxofftxc[i]);
   3640 			evcnt_detach(&stats->pxoffrxc[i]);
   3641 			if (hw->mac.type >= ixgbe_mac_82599EB)
   3642 				evcnt_detach(&stats->pxon2offc[i]);
   3643 		}
   3644 	}
   3645 
   3646 	txr = adapter->tx_rings;
   3647 	for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   3648 		evcnt_detach(&adapter->queues[i].irqs);
   3649 		evcnt_detach(&adapter->queues[i].handleq);
   3650 		evcnt_detach(&adapter->queues[i].req);
   3651 		evcnt_detach(&txr->no_desc_avail);
   3652 		evcnt_detach(&txr->total_packets);
   3653 		evcnt_detach(&txr->tso_tx);
   3654 #ifndef IXGBE_LEGACY_TX
   3655 		evcnt_detach(&txr->pcq_drops);
   3656 #endif
   3657 
   3658 		if (i < __arraycount(stats->qprc)) {
   3659 			evcnt_detach(&stats->qprc[i]);
   3660 			evcnt_detach(&stats->qptc[i]);
   3661 			evcnt_detach(&stats->qbrc[i]);
   3662 			evcnt_detach(&stats->qbtc[i]);
   3663 			if (hw->mac.type >= ixgbe_mac_82599EB)
   3664 				evcnt_detach(&stats->qprdc[i]);
   3665 		}
   3666 
   3667 		evcnt_detach(&rxr->rx_packets);
   3668 		evcnt_detach(&rxr->rx_bytes);
   3669 		evcnt_detach(&rxr->rx_copies);
   3670 		evcnt_detach(&rxr->no_jmbuf);
   3671 		evcnt_detach(&rxr->rx_discarded);
   3672 	}
   3673 	evcnt_detach(&stats->ipcs);
   3674 	evcnt_detach(&stats->l4cs);
   3675 	evcnt_detach(&stats->ipcs_bad);
   3676 	evcnt_detach(&stats->l4cs_bad);
   3677 	evcnt_detach(&stats->intzero);
   3678 	evcnt_detach(&stats->legint);
   3679 	evcnt_detach(&stats->crcerrs);
   3680 	evcnt_detach(&stats->illerrc);
   3681 	evcnt_detach(&stats->errbc);
   3682 	evcnt_detach(&stats->mspdc);
   3683 	if (hw->mac.type >= ixgbe_mac_X550)
   3684 		evcnt_detach(&stats->mbsdc);
   3685 	evcnt_detach(&stats->mpctotal);
   3686 	evcnt_detach(&stats->mlfc);
   3687 	evcnt_detach(&stats->mrfc);
   3688 	evcnt_detach(&stats->rlec);
   3689 	evcnt_detach(&stats->lxontxc);
   3690 	evcnt_detach(&stats->lxonrxc);
   3691 	evcnt_detach(&stats->lxofftxc);
   3692 	evcnt_detach(&stats->lxoffrxc);
   3693 
   3694 	/* Packet Reception Stats */
   3695 	evcnt_detach(&stats->tor);
   3696 	evcnt_detach(&stats->gorc);
   3697 	evcnt_detach(&stats->tpr);
   3698 	evcnt_detach(&stats->gprc);
   3699 	evcnt_detach(&stats->mprc);
   3700 	evcnt_detach(&stats->bprc);
   3701 	evcnt_detach(&stats->prc64);
   3702 	evcnt_detach(&stats->prc127);
   3703 	evcnt_detach(&stats->prc255);
   3704 	evcnt_detach(&stats->prc511);
   3705 	evcnt_detach(&stats->prc1023);
   3706 	evcnt_detach(&stats->prc1522);
   3707 	evcnt_detach(&stats->ruc);
   3708 	evcnt_detach(&stats->rfc);
   3709 	evcnt_detach(&stats->roc);
   3710 	evcnt_detach(&stats->rjc);
   3711 	evcnt_detach(&stats->mngprc);
   3712 	evcnt_detach(&stats->mngpdc);
   3713 	evcnt_detach(&stats->xec);
   3714 
   3715 	/* Packet Transmission Stats */
   3716 	evcnt_detach(&stats->gotc);
   3717 	evcnt_detach(&stats->tpt);
   3718 	evcnt_detach(&stats->gptc);
   3719 	evcnt_detach(&stats->bptc);
   3720 	evcnt_detach(&stats->mptc);
   3721 	evcnt_detach(&stats->mngptc);
   3722 	evcnt_detach(&stats->ptc64);
   3723 	evcnt_detach(&stats->ptc127);
   3724 	evcnt_detach(&stats->ptc255);
   3725 	evcnt_detach(&stats->ptc511);
   3726 	evcnt_detach(&stats->ptc1023);
   3727 	evcnt_detach(&stats->ptc1522);
   3728 
   3729 	ixgbe_free_transmit_structures(adapter);
   3730 	ixgbe_free_receive_structures(adapter);
   3731 	for (i = 0; i < adapter->num_queues; i++) {
   3732 		struct ix_queue * que = &adapter->queues[i];
   3733 		mutex_destroy(&que->dc_mtx);
   3734 	}
   3735 	free(adapter->queues, M_DEVBUF);
   3736 	free(adapter->mta, M_DEVBUF);
   3737 
   3738 	IXGBE_CORE_LOCK_DESTROY(adapter);
   3739 
   3740 	return (0);
   3741 } /* ixgbe_detach */
   3742 
   3743 /************************************************************************
   3744  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
   3745  *
   3746  *   Prepare the adapter/port for LPLU and/or WoL
   3747  ************************************************************************/
   3748 static int
   3749 ixgbe_setup_low_power_mode(struct adapter *adapter)
   3750 {
   3751 	struct ixgbe_hw *hw = &adapter->hw;
   3752 	device_t	dev = adapter->dev;
   3753 	s32		error = 0;
   3754 
   3755 	KASSERT(mutex_owned(&adapter->core_mtx));
   3756 
   3757 	/* Limit power management flow to X550EM baseT */
   3758 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
   3759 	    hw->phy.ops.enter_lplu) {
   3760 		/* X550EM baseT adapters need a special LPLU flow */
   3761 		hw->phy.reset_disable = true;
   3762 		ixgbe_stop(adapter);
   3763 		error = hw->phy.ops.enter_lplu(hw);
   3764 		if (error)
   3765 			device_printf(dev,
   3766 			    "Error entering LPLU: %d\n", error);
   3767 		hw->phy.reset_disable = false;
   3768 	} else {
   3769 		/* Just stop for other adapters */
   3770 		ixgbe_stop(adapter);
   3771 	}
   3772 
   3773 	if (!hw->wol_enabled) {
   3774 		ixgbe_set_phy_power(hw, FALSE);
   3775 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3776 		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
   3777 	} else {
   3778 		/* Turn off support for APM wakeup. (Using ACPI instead) */
   3779 		IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
   3780 		    IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
   3781 
   3782 		/*
   3783 		 * Clear Wake Up Status register to prevent any previous wakeup
   3784 		 * events from waking us up immediately after we suspend.
   3785 		 */
   3786 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3787 
   3788 		/*
   3789 		 * Program the Wakeup Filter Control register with user filter
   3790 		 * settings
   3791 		 */
   3792 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
   3793 
   3794 		/* Enable wakeups and power management in Wakeup Control */
   3795 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
   3796 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
   3797 
   3798 	}
   3799 
   3800 	return error;
   3801 } /* ixgbe_setup_low_power_mode */
   3802 
   3803 /************************************************************************
   3804  * ixgbe_shutdown - Shutdown entry point
   3805  ************************************************************************/
   3806 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   3807 static int
   3808 ixgbe_shutdown(device_t dev)
   3809 {
   3810 	struct adapter *adapter = device_private(dev);
   3811 	int error = 0;
   3812 
   3813 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
   3814 
   3815 	IXGBE_CORE_LOCK(adapter);
   3816 	error = ixgbe_setup_low_power_mode(adapter);
   3817 	IXGBE_CORE_UNLOCK(adapter);
   3818 
   3819 	return (error);
   3820 } /* ixgbe_shutdown */
   3821 #endif
   3822 
   3823 /************************************************************************
   3824  * ixgbe_suspend
   3825  *
   3826  *   From D0 to D3
   3827  ************************************************************************/
   3828 static bool
   3829 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
   3830 {
   3831 	struct adapter *adapter = device_private(dev);
   3832 	int	       error = 0;
   3833 
   3834 	INIT_DEBUGOUT("ixgbe_suspend: begin");
   3835 
   3836 	IXGBE_CORE_LOCK(adapter);
   3837 
   3838 	error = ixgbe_setup_low_power_mode(adapter);
   3839 
   3840 	IXGBE_CORE_UNLOCK(adapter);
   3841 
   3842 	return (error);
   3843 } /* ixgbe_suspend */
   3844 
   3845 /************************************************************************
   3846  * ixgbe_resume
   3847  *
   3848  *   From D3 to D0
   3849  ************************************************************************/
   3850 static bool
   3851 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
   3852 {
   3853 	struct adapter	*adapter = device_private(dev);
   3854 	struct ifnet	*ifp = adapter->ifp;
   3855 	struct ixgbe_hw *hw = &adapter->hw;
   3856 	u32		wus;
   3857 
   3858 	INIT_DEBUGOUT("ixgbe_resume: begin");
   3859 
   3860 	IXGBE_CORE_LOCK(adapter);
   3861 
   3862 	/* Read & clear WUS register */
   3863 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
   3864 	if (wus)
   3865 		device_printf(dev, "Woken up by (WUS): %#010x\n",
   3866 		    IXGBE_READ_REG(hw, IXGBE_WUS));
   3867 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3868 	/* And clear WUFC until next low-power transition */
   3869 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3870 
   3871 	/*
   3872 	 * Required after D3->D0 transition;
   3873 	 * will re-advertise all previous advertised speeds
   3874 	 */
   3875 	if (ifp->if_flags & IFF_UP)
   3876 		ixgbe_init_locked(adapter);
   3877 
   3878 	IXGBE_CORE_UNLOCK(adapter);
   3879 
   3880 	return true;
   3881 } /* ixgbe_resume */
   3882 
   3883 /*
   3884  * Set the various hardware offload abilities.
   3885  *
   3886  * This takes the ifnet's if_capenable flags (e.g. set by the user using
   3887  * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
   3888  * mbuf offload flags the driver will understand.
   3889  */
   3890 static void
   3891 ixgbe_set_if_hwassist(struct adapter *adapter)
   3892 {
   3893 	/* XXX */
   3894 }
   3895 
   3896 /************************************************************************
   3897  * ixgbe_init_locked - Init entry point
   3898  *
   3899  *   Used in two ways: It is used by the stack as an init
   3900  *   entry point in network interface structure. It is also
   3901  *   used by the driver as a hw/sw initialization routine to
   3902  *   get to a consistent state.
   3903  *
   3904  *   return 0 on success, positive on failure
   3905  ************************************************************************/
   3906 static void
   3907 ixgbe_init_locked(struct adapter *adapter)
   3908 {
   3909 	struct ifnet   *ifp = adapter->ifp;
   3910 	device_t	dev = adapter->dev;
   3911 	struct ixgbe_hw *hw = &adapter->hw;
   3912 	struct ix_queue *que;
   3913 	struct tx_ring	*txr;
   3914 	struct rx_ring	*rxr;
   3915 	u32		txdctl, mhadd;
   3916 	u32		rxdctl, rxctrl;
   3917 	u32		ctrl_ext;
   3918 	int		i, j, err;
   3919 
   3920 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   3921 
   3922 	KASSERT(mutex_owned(&adapter->core_mtx));
   3923 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
   3924 
   3925 	hw->adapter_stopped = FALSE;
   3926 	ixgbe_stop_adapter(hw);
   3927 	callout_stop(&adapter->timer);
   3928 	for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
   3929 		que->disabled_count = 0;
   3930 
   3931 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   3932 	adapter->max_frame_size =
   3933 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   3934 
   3935 	/* Queue indices may change with IOV mode */
   3936 	ixgbe_align_all_queue_indices(adapter);
   3937 
   3938 	/* reprogram the RAR[0] in case user changed it. */
   3939 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
   3940 
   3941 	/* Get the latest mac address, User can use a LAA */
   3942 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
   3943 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   3944 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
   3945 	hw->addr_ctrl.rar_used_count = 1;
   3946 
   3947 	/* Set hardware offload abilities from ifnet flags */
   3948 	ixgbe_set_if_hwassist(adapter);
   3949 
   3950 	/* Prepare transmit descriptors and buffers */
   3951 	if (ixgbe_setup_transmit_structures(adapter)) {
   3952 		device_printf(dev, "Could not setup transmit structures\n");
   3953 		ixgbe_stop(adapter);
   3954 		return;
   3955 	}
   3956 
   3957 	ixgbe_init_hw(hw);
   3958 
   3959 	ixgbe_initialize_iov(adapter);
   3960 
   3961 	ixgbe_initialize_transmit_units(adapter);
   3962 
   3963 	/* Setup Multicast table */
   3964 	ixgbe_set_multi(adapter);
   3965 
   3966 	/* Determine the correct mbuf pool, based on frame size */
   3967 	if (adapter->max_frame_size <= MCLBYTES)
   3968 		adapter->rx_mbuf_sz = MCLBYTES;
   3969 	else
   3970 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   3971 
   3972 	/* Prepare receive descriptors and buffers */
   3973 	if (ixgbe_setup_receive_structures(adapter)) {
   3974 		device_printf(dev, "Could not setup receive structures\n");
   3975 		ixgbe_stop(adapter);
   3976 		return;
   3977 	}
   3978 
   3979 	/* Configure RX settings */
   3980 	ixgbe_initialize_receive_units(adapter);
   3981 
   3982 	/* Enable SDP & MSI-X interrupts based on adapter */
   3983 	ixgbe_config_gpie(adapter);
   3984 
   3985 	/* Set MTU size */
   3986 	if (ifp->if_mtu > ETHERMTU) {
   3987 		/* aka IXGBE_MAXFRS on 82599 and newer */
   3988 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   3989 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   3990 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   3991 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   3992 	}
   3993 
   3994 	/* Now enable all the queues */
   3995 	for (i = 0; i < adapter->num_queues; i++) {
   3996 		txr = &adapter->tx_rings[i];
   3997 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
   3998 		txdctl |= IXGBE_TXDCTL_ENABLE;
   3999 		/* Set WTHRESH to 8, burst writeback */
   4000 		txdctl |= (8 << 16);
   4001 		/*
   4002 		 * When the internal queue falls below PTHRESH (32),
   4003 		 * start prefetching as long as there are at least
   4004 		 * HTHRESH (1) buffers ready. The values are taken
   4005 		 * from the Intel linux driver 3.8.21.
   4006 		 * Prefetching enables tx line rate even with 1 queue.
   4007 		 */
   4008 		txdctl |= (32 << 0) | (1 << 8);
   4009 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
   4010 	}
   4011 
   4012 	for (i = 0; i < adapter->num_queues; i++) {
   4013 		rxr = &adapter->rx_rings[i];
   4014 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   4015 		if (hw->mac.type == ixgbe_mac_82598EB) {
   4016 			/*
   4017 			 * PTHRESH = 21
   4018 			 * HTHRESH = 4
   4019 			 * WTHRESH = 8
   4020 			 */
   4021 			rxdctl &= ~0x3FFFFF;
   4022 			rxdctl |= 0x080420;
   4023 		}
   4024 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   4025 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
   4026 		for (j = 0; j < 10; j++) {
   4027 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
   4028 			    IXGBE_RXDCTL_ENABLE)
   4029 				break;
   4030 			else
   4031 				msec_delay(1);
   4032 		}
   4033 		wmb();
   4034 
   4035 		/*
   4036 		 * In netmap mode, we must preserve the buffers made
   4037 		 * available to userspace before the if_init()
   4038 		 * (this is true by default on the TX side, because
   4039 		 * init makes all buffers available to userspace).
   4040 		 *
   4041 		 * netmap_reset() and the device specific routines
   4042 		 * (e.g. ixgbe_setup_receive_rings()) map these
   4043 		 * buffers at the end of the NIC ring, so here we
   4044 		 * must set the RDT (tail) register to make sure
   4045 		 * they are not overwritten.
   4046 		 *
   4047 		 * In this driver the NIC ring starts at RDH = 0,
   4048 		 * RDT points to the last slot available for reception (?),
   4049 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   4050 		 */
   4051 #ifdef DEV_NETMAP
   4052 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   4053 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   4054 			struct netmap_adapter *na = NA(adapter->ifp);
   4055 			struct netmap_kring *kring = na->rx_rings[i];
   4056 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   4057 
   4058 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
   4059 		} else
   4060 #endif /* DEV_NETMAP */
   4061 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
   4062 			    adapter->num_rx_desc - 1);
   4063 	}
   4064 
   4065 	/* Enable Receive engine */
   4066 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   4067 	if (hw->mac.type == ixgbe_mac_82598EB)
   4068 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   4069 	rxctrl |= IXGBE_RXCTRL_RXEN;
   4070 	ixgbe_enable_rx_dma(hw, rxctrl);
   4071 
   4072 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4073 
   4074 	/* Set up MSI/MSI-X routing */
   4075 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   4076 		ixgbe_configure_ivars(adapter);
   4077 		/* Set up auto-mask */
   4078 		if (hw->mac.type == ixgbe_mac_82598EB)
   4079 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   4080 		else {
   4081 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   4082 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   4083 		}
   4084 	} else {  /* Simple settings for Legacy/MSI */
   4085 		ixgbe_set_ivar(adapter, 0, 0, 0);
   4086 		ixgbe_set_ivar(adapter, 0, 0, 1);
   4087 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   4088 	}
   4089 
   4090 	ixgbe_init_fdir(adapter);
   4091 
   4092 	/*
   4093 	 * Check on any SFP devices that
   4094 	 * need to be kick-started
   4095 	 */
   4096 	if (hw->phy.type == ixgbe_phy_none) {
   4097 		err = hw->phy.ops.identify(hw);
   4098 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4099 			device_printf(dev,
   4100 			    "Unsupported SFP+ module type was detected.\n");
   4101 			return;
   4102 		}
   4103 	}
   4104 
   4105 	/* Set moderation on the Link interrupt */
   4106 	ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
   4107 
   4108 	/* Enable EEE power saving */
   4109 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   4110 		hw->mac.ops.setup_eee(hw,
   4111 		    adapter->feat_en & IXGBE_FEATURE_EEE);
   4112 
   4113 	/* Enable power to the phy. */
   4114 	ixgbe_set_phy_power(hw, TRUE);
   4115 
   4116 	/* Config/Enable Link */
   4117 	ixgbe_config_link(adapter);
   4118 
   4119 	/* Hardware Packet Buffer & Flow Control setup */
   4120 	ixgbe_config_delay_values(adapter);
   4121 
   4122 	/* Initialize the FC settings */
   4123 	ixgbe_start_hw(hw);
   4124 
   4125 	/* Set up VLAN support and filter */
   4126 	ixgbe_setup_vlan_hw_support(adapter);
   4127 
   4128 	/* Setup DMA Coalescing */
   4129 	ixgbe_config_dmac(adapter);
   4130 
   4131 	/* And now turn on interrupts */
   4132 	ixgbe_enable_intr(adapter);
   4133 
   4134 	/* Enable the use of the MBX by the VF's */
   4135 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
   4136 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
   4137 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
   4138 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
   4139 	}
   4140 
   4141 	/* Update saved flags. See ixgbe_ifflags_cb() */
   4142 	adapter->if_flags = ifp->if_flags;
   4143 	adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
   4144 
   4145 	/* Now inform the stack we're ready */
   4146 	ifp->if_flags |= IFF_RUNNING;
   4147 
   4148 	return;
   4149 } /* ixgbe_init_locked */
   4150 
   4151 /************************************************************************
   4152  * ixgbe_init
   4153  ************************************************************************/
   4154 static int
   4155 ixgbe_init(struct ifnet *ifp)
   4156 {
   4157 	struct adapter *adapter = ifp->if_softc;
   4158 
   4159 	IXGBE_CORE_LOCK(adapter);
   4160 	ixgbe_init_locked(adapter);
   4161 	IXGBE_CORE_UNLOCK(adapter);
   4162 
   4163 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   4164 } /* ixgbe_init */
   4165 
   4166 /************************************************************************
   4167  * ixgbe_set_ivar
   4168  *
   4169  *   Setup the correct IVAR register for a particular MSI-X interrupt
   4170  *     (yes this is all very magic and confusing :)
   4171  *    - entry is the register array entry
   4172  *    - vector is the MSI-X vector for this queue
   4173  *    - type is RX/TX/MISC
   4174  ************************************************************************/
   4175 static void
   4176 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   4177 {
   4178 	struct ixgbe_hw *hw = &adapter->hw;
   4179 	u32 ivar, index;
   4180 
   4181 	vector |= IXGBE_IVAR_ALLOC_VAL;
   4182 
   4183 	switch (hw->mac.type) {
   4184 	case ixgbe_mac_82598EB:
   4185 		if (type == -1)
   4186 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   4187 		else
   4188 			entry += (type * 64);
   4189 		index = (entry >> 2) & 0x1F;
   4190 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   4191 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
   4192 		ivar |= (vector << (8 * (entry & 0x3)));
   4193 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   4194 		break;
   4195 	case ixgbe_mac_82599EB:
   4196 	case ixgbe_mac_X540:
   4197 	case ixgbe_mac_X550:
   4198 	case ixgbe_mac_X550EM_x:
   4199 	case ixgbe_mac_X550EM_a:
   4200 		if (type == -1) { /* MISC IVAR */
   4201 			index = (entry & 1) * 8;
   4202 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   4203 			ivar &= ~(0xffUL << index);
   4204 			ivar |= ((u32)vector << index);
   4205 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   4206 		} else {	/* RX/TX IVARS */
   4207 			index = (16 * (entry & 1)) + (8 * type);
   4208 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   4209 			ivar &= ~(0xffUL << index);
   4210 			ivar |= ((u32)vector << index);
   4211 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   4212 		}
   4213 		break;
   4214 	default:
   4215 		break;
   4216 	}
   4217 } /* ixgbe_set_ivar */
   4218 
   4219 /************************************************************************
   4220  * ixgbe_configure_ivars
   4221  ************************************************************************/
   4222 static void
   4223 ixgbe_configure_ivars(struct adapter *adapter)
   4224 {
   4225 	struct ix_queue *que = adapter->queues;
   4226 	u32		newitr;
   4227 
   4228 	if (ixgbe_max_interrupt_rate > 0)
   4229 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   4230 	else {
   4231 		/*
   4232 		 * Disable DMA coalescing if interrupt moderation is
   4233 		 * disabled.
   4234 		 */
   4235 		adapter->dmac = 0;
   4236 		newitr = 0;
   4237 	}
   4238 
   4239 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4240 		struct rx_ring *rxr = &adapter->rx_rings[i];
   4241 		struct tx_ring *txr = &adapter->tx_rings[i];
   4242 		/* First the RX queue entry */
   4243 		ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
   4244 		/* ... and the TX */
   4245 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
   4246 		/* Set an Initial EITR value */
   4247 		ixgbe_eitr_write(adapter, que->msix, newitr);
   4248 		/*
   4249 		 * To eliminate influence of the previous state.
   4250 		 * At this point, Tx/Rx interrupt handler
   4251 		 * (ixgbe_msix_que()) cannot be called, so  both
   4252 		 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
   4253 		 */
   4254 		que->eitr_setting = 0;
   4255 	}
   4256 
   4257 	/* For the Link interrupt */
   4258 	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
   4259 } /* ixgbe_configure_ivars */
   4260 
   4261 /************************************************************************
   4262  * ixgbe_config_gpie
   4263  ************************************************************************/
   4264 static void
   4265 ixgbe_config_gpie(struct adapter *adapter)
   4266 {
   4267 	struct ixgbe_hw *hw = &adapter->hw;
   4268 	u32		gpie;
   4269 
   4270 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
   4271 
   4272 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   4273 		/* Enable Enhanced MSI-X mode */
   4274 		gpie |= IXGBE_GPIE_MSIX_MODE
   4275 		     |	IXGBE_GPIE_EIAME
   4276 		     |	IXGBE_GPIE_PBA_SUPPORT
   4277 		     |	IXGBE_GPIE_OCD;
   4278 	}
   4279 
   4280 	/* Fan Failure Interrupt */
   4281 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4282 		gpie |= IXGBE_SDP1_GPIEN;
   4283 
   4284 	/* Thermal Sensor Interrupt */
   4285 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
   4286 		gpie |= IXGBE_SDP0_GPIEN_X540;
   4287 
   4288 	/* Link detection */
   4289 	switch (hw->mac.type) {
   4290 	case ixgbe_mac_82599EB:
   4291 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
   4292 		break;
   4293 	case ixgbe_mac_X550EM_x:
   4294 	case ixgbe_mac_X550EM_a:
   4295 		gpie |= IXGBE_SDP0_GPIEN_X540;
   4296 		break;
   4297 	default:
   4298 		break;
   4299 	}
   4300 
   4301 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   4302 
   4303 } /* ixgbe_config_gpie */
   4304 
   4305 /************************************************************************
   4306  * ixgbe_config_delay_values
   4307  *
   4308  *   Requires adapter->max_frame_size to be set.
   4309  ************************************************************************/
   4310 static void
   4311 ixgbe_config_delay_values(struct adapter *adapter)
   4312 {
   4313 	struct ixgbe_hw *hw = &adapter->hw;
   4314 	u32		rxpb, frame, size, tmp;
   4315 
   4316 	frame = adapter->max_frame_size;
   4317 
   4318 	/* Calculate High Water */
   4319 	switch (hw->mac.type) {
   4320 	case ixgbe_mac_X540:
   4321 	case ixgbe_mac_X550:
   4322 	case ixgbe_mac_X550EM_x:
   4323 	case ixgbe_mac_X550EM_a:
   4324 		tmp = IXGBE_DV_X540(frame, frame);
   4325 		break;
   4326 	default:
   4327 		tmp = IXGBE_DV(frame, frame);
   4328 		break;
   4329 	}
   4330 	size = IXGBE_BT2KB(tmp);
   4331 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
   4332 	hw->fc.high_water[0] = rxpb - size;
   4333 
   4334 	/* Now calculate Low Water */
   4335 	switch (hw->mac.type) {
   4336 	case ixgbe_mac_X540:
   4337 	case ixgbe_mac_X550:
   4338 	case ixgbe_mac_X550EM_x:
   4339 	case ixgbe_mac_X550EM_a:
   4340 		tmp = IXGBE_LOW_DV_X540(frame);
   4341 		break;
   4342 	default:
   4343 		tmp = IXGBE_LOW_DV(frame);
   4344 		break;
   4345 	}
   4346 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
   4347 
   4348 	hw->fc.pause_time = IXGBE_FC_PAUSE;
   4349 	hw->fc.send_xon = TRUE;
   4350 } /* ixgbe_config_delay_values */
   4351 
   4352 /************************************************************************
   4353  * ixgbe_set_multi - Multicast Update
   4354  *
   4355  *   Called whenever multicast address list is updated.
   4356  ************************************************************************/
   4357 static void
   4358 ixgbe_set_multi(struct adapter *adapter)
   4359 {
   4360 	struct ixgbe_mc_addr	*mta;
   4361 	struct ifnet		*ifp = adapter->ifp;
   4362 	u8			*update_ptr;
   4363 	int			mcnt = 0;
   4364 	u32			fctrl;
   4365 	struct ethercom		*ec = &adapter->osdep.ec;
   4366 	struct ether_multi	*enm;
   4367 	struct ether_multistep	step;
   4368 
   4369 	KASSERT(mutex_owned(&adapter->core_mtx));
   4370 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   4371 
   4372 	mta = adapter->mta;
   4373 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
   4374 
   4375 	ETHER_LOCK(ec);
   4376 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4377 	ETHER_FIRST_MULTI(step, ec, enm);
   4378 	while (enm != NULL) {
   4379 		if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
   4380 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   4381 			ETHER_ADDR_LEN) != 0)) {
   4382 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4383 			break;
   4384 		}
   4385 		bcopy(enm->enm_addrlo,
   4386 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
   4387 		mta[mcnt].vmdq = adapter->pool;
   4388 		mcnt++;
   4389 		ETHER_NEXT_MULTI(step, enm);
   4390 	}
   4391 
   4392 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   4393 	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4394 	if (ifp->if_flags & IFF_PROMISC)
   4395 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4396 	else if (ec->ec_flags & ETHER_F_ALLMULTI) {
   4397 		fctrl |= IXGBE_FCTRL_MPE;
   4398 	}
   4399 	ETHER_UNLOCK(ec);
   4400 
   4401 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   4402 
   4403 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
   4404 		update_ptr = (u8 *)mta;
   4405 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   4406 		    ixgbe_mc_array_itr, TRUE);
   4407 	}
   4408 
   4409 } /* ixgbe_set_multi */
   4410 
   4411 /************************************************************************
   4412  * ixgbe_mc_array_itr
   4413  *
   4414  *   An iterator function needed by the multicast shared code.
   4415  *   It feeds the shared code routine the addresses in the
   4416  *   array of ixgbe_set_multi() one by one.
   4417  ************************************************************************/
   4418 static u8 *
   4419 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   4420 {
   4421 	struct ixgbe_mc_addr *mta;
   4422 
   4423 	mta = (struct ixgbe_mc_addr *)*update_ptr;
   4424 	*vmdq = mta->vmdq;
   4425 
   4426 	*update_ptr = (u8*)(mta + 1);
   4427 
   4428 	return (mta->addr);
   4429 } /* ixgbe_mc_array_itr */
   4430 
   4431 /************************************************************************
   4432  * ixgbe_local_timer - Timer routine
   4433  *
   4434  *   Checks for link status, updates statistics,
   4435  *   and runs the watchdog check.
   4436  ************************************************************************/
   4437 static void
   4438 ixgbe_local_timer(void *arg)
   4439 {
   4440 	struct adapter *adapter = arg;
   4441 
   4442 	IXGBE_CORE_LOCK(adapter);
   4443 	ixgbe_local_timer1(adapter);
   4444 	IXGBE_CORE_UNLOCK(adapter);
   4445 }
   4446 
   4447 static void
   4448 ixgbe_local_timer1(void *arg)
   4449 {
   4450 	struct adapter	*adapter = arg;
   4451 	device_t	dev = adapter->dev;
   4452 	struct ix_queue *que = adapter->queues;
   4453 	u64		queues = 0;
   4454 	u64		v0, v1, v2, v3, v4, v5, v6, v7;
   4455 	int		hung = 0;
   4456 	int		i;
   4457 
   4458 	KASSERT(mutex_owned(&adapter->core_mtx));
   4459 
   4460 	/* Check for pluggable optics */
   4461 	if (adapter->sfp_probe)
   4462 		if (!ixgbe_sfp_probe(adapter))
   4463 			goto out; /* Nothing to do */
   4464 
   4465 	ixgbe_update_link_status(adapter);
   4466 	ixgbe_update_stats_counters(adapter);
   4467 
   4468 	/* Update some event counters */
   4469 	v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
   4470 	que = adapter->queues;
   4471 	for (i = 0; i < adapter->num_queues; i++, que++) {
   4472 		struct tx_ring	*txr = que->txr;
   4473 
   4474 		v0 += txr->q_efbig_tx_dma_setup;
   4475 		v1 += txr->q_mbuf_defrag_failed;
   4476 		v2 += txr->q_efbig2_tx_dma_setup;
   4477 		v3 += txr->q_einval_tx_dma_setup;
   4478 		v4 += txr->q_other_tx_dma_setup;
   4479 		v5 += txr->q_eagain_tx_dma_setup;
   4480 		v6 += txr->q_enomem_tx_dma_setup;
   4481 		v7 += txr->q_tso_err;
   4482 	}
   4483 	adapter->efbig_tx_dma_setup.ev_count = v0;
   4484 	adapter->mbuf_defrag_failed.ev_count = v1;
   4485 	adapter->efbig2_tx_dma_setup.ev_count = v2;
   4486 	adapter->einval_tx_dma_setup.ev_count = v3;
   4487 	adapter->other_tx_dma_setup.ev_count = v4;
   4488 	adapter->eagain_tx_dma_setup.ev_count = v5;
   4489 	adapter->enomem_tx_dma_setup.ev_count = v6;
   4490 	adapter->tso_err.ev_count = v7;
   4491 
   4492 	/*
   4493 	 * Check the TX queues status
   4494 	 *	- mark hung queues so we don't schedule on them
   4495 	 *	- watchdog only if all queues show hung
   4496 	 */
   4497 	que = adapter->queues;
   4498 	for (i = 0; i < adapter->num_queues; i++, que++) {
   4499 		/* Keep track of queues with work for soft irq */
   4500 		if (que->txr->busy)
   4501 			queues |= 1ULL << que->me;
   4502 		/*
   4503 		 * Each time txeof runs without cleaning, but there
   4504 		 * are uncleaned descriptors it increments busy. If
   4505 		 * we get to the MAX we declare it hung.
   4506 		 */
   4507 		if (que->busy == IXGBE_QUEUE_HUNG) {
   4508 			++hung;
   4509 			/* Mark the queue as inactive */
   4510 			adapter->active_queues &= ~(1ULL << que->me);
   4511 			continue;
   4512 		} else {
   4513 			/* Check if we've come back from hung */
   4514 			if ((adapter->active_queues & (1ULL << que->me)) == 0)
   4515 				adapter->active_queues |= 1ULL << que->me;
   4516 		}
   4517 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   4518 			device_printf(dev,
   4519 			    "Warning queue %d appears to be hung!\n", i);
   4520 			que->txr->busy = IXGBE_QUEUE_HUNG;
   4521 			++hung;
   4522 		}
   4523 	}
   4524 
   4525 	/* Only truely watchdog if all queues show hung */
   4526 	if (hung == adapter->num_queues)
   4527 		goto watchdog;
   4528 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
   4529 	else if (queues != 0) { /* Force an IRQ on queues with work */
   4530 		que = adapter->queues;
   4531 		for (i = 0; i < adapter->num_queues; i++, que++) {
   4532 			mutex_enter(&que->dc_mtx);
   4533 			if (que->disabled_count == 0)
   4534 				ixgbe_rearm_queues(adapter,
   4535 				    queues & ((u64)1 << i));
   4536 			mutex_exit(&que->dc_mtx);
   4537 		}
   4538 	}
   4539 #endif
   4540 
   4541 out:
   4542 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4543 	return;
   4544 
   4545 watchdog:
   4546 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   4547 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   4548 	adapter->watchdog_events.ev_count++;
   4549 	ixgbe_init_locked(adapter);
   4550 } /* ixgbe_local_timer */
   4551 
   4552 /************************************************************************
   4553  * ixgbe_recovery_mode_timer - Recovery mode timer routine
   4554  ************************************************************************/
   4555 static void
   4556 ixgbe_recovery_mode_timer(void *arg)
   4557 {
   4558 	struct adapter *adapter = arg;
   4559 	struct ixgbe_hw *hw = &adapter->hw;
   4560 
   4561 	IXGBE_CORE_LOCK(adapter);
   4562 	if (ixgbe_fw_recovery_mode(hw)) {
   4563 		if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
   4564 			/* Firmware error detected, entering recovery mode */
   4565 			device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
   4566 
   4567 			if (hw->adapter_stopped == FALSE)
   4568 				ixgbe_stop(adapter);
   4569 		}
   4570 	} else
   4571 		atomic_cas_uint(&adapter->recovery_mode, 1, 0);
   4572 
   4573 	callout_reset(&adapter->recovery_mode_timer, hz,
   4574 	    ixgbe_recovery_mode_timer, adapter);
   4575 	IXGBE_CORE_UNLOCK(adapter);
   4576 } /* ixgbe_recovery_mode_timer */
   4577 
   4578 /************************************************************************
   4579  * ixgbe_sfp_probe
   4580  *
   4581  *   Determine if a port had optics inserted.
   4582  ************************************************************************/
   4583 static bool
   4584 ixgbe_sfp_probe(struct adapter *adapter)
   4585 {
   4586 	struct ixgbe_hw	*hw = &adapter->hw;
   4587 	device_t	dev = adapter->dev;
   4588 	bool		result = FALSE;
   4589 
   4590 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4591 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4592 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4593 		if (ret)
   4594 			goto out;
   4595 		ret = hw->phy.ops.reset(hw);
   4596 		adapter->sfp_probe = FALSE;
   4597 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4598 			device_printf(dev,"Unsupported SFP+ module detected!");
   4599 			device_printf(dev,
   4600 			    "Reload driver with supported module.\n");
   4601 			goto out;
   4602 		} else
   4603 			device_printf(dev, "SFP+ module detected!\n");
   4604 		/* We now have supported optics */
   4605 		result = TRUE;
   4606 	}
   4607 out:
   4608 
   4609 	return (result);
   4610 } /* ixgbe_sfp_probe */
   4611 
   4612 /************************************************************************
   4613  * ixgbe_handle_mod - Tasklet for SFP module interrupts
   4614  ************************************************************************/
   4615 static void
   4616 ixgbe_handle_mod(void *context)
   4617 {
   4618 	struct adapter	*adapter = context;
   4619 	struct ixgbe_hw *hw = &adapter->hw;
   4620 	device_t	dev = adapter->dev;
   4621 	u32		err, cage_full = 0;
   4622 
   4623 	++adapter->mod_sicount.ev_count;
   4624 	if (adapter->hw.need_crosstalk_fix) {
   4625 		switch (hw->mac.type) {
   4626 		case ixgbe_mac_82599EB:
   4627 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4628 			    IXGBE_ESDP_SDP2;
   4629 			break;
   4630 		case ixgbe_mac_X550EM_x:
   4631 		case ixgbe_mac_X550EM_a:
   4632 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4633 			    IXGBE_ESDP_SDP0;
   4634 			break;
   4635 		default:
   4636 			break;
   4637 		}
   4638 
   4639 		if (!cage_full)
   4640 			return;
   4641 	}
   4642 
   4643 	err = hw->phy.ops.identify_sfp(hw);
   4644 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4645 		device_printf(dev,
   4646 		    "Unsupported SFP+ module type was detected.\n");
   4647 		return;
   4648 	}
   4649 
   4650 	if (hw->mac.type == ixgbe_mac_82598EB)
   4651 		err = hw->phy.ops.reset(hw);
   4652 	else
   4653 		err = hw->mac.ops.setup_sfp(hw);
   4654 
   4655 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4656 		device_printf(dev,
   4657 		    "Setup failure - unsupported SFP+ module type.\n");
   4658 		return;
   4659 	}
   4660 	softint_schedule(adapter->msf_si);
   4661 } /* ixgbe_handle_mod */
   4662 
   4663 
   4664 /************************************************************************
   4665  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
   4666  ************************************************************************/
   4667 static void
   4668 ixgbe_handle_msf(void *context)
   4669 {
   4670 	struct adapter	*adapter = context;
   4671 	struct ixgbe_hw *hw = &adapter->hw;
   4672 	u32		autoneg;
   4673 	bool		negotiate;
   4674 
   4675 	IXGBE_CORE_LOCK(adapter);
   4676 	++adapter->msf_sicount.ev_count;
   4677 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
   4678 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
   4679 
   4680 	autoneg = hw->phy.autoneg_advertised;
   4681 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   4682 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   4683 	else
   4684 		negotiate = 0;
   4685 	if (hw->mac.ops.setup_link)
   4686 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
   4687 
   4688 	/* Adjust media types shown in ifconfig */
   4689 	ifmedia_removeall(&adapter->media);
   4690 	ixgbe_add_media_types(adapter);
   4691 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   4692 	IXGBE_CORE_UNLOCK(adapter);
   4693 } /* ixgbe_handle_msf */
   4694 
   4695 /************************************************************************
   4696  * ixgbe_handle_phy - Tasklet for external PHY interrupts
   4697  ************************************************************************/
   4698 static void
   4699 ixgbe_handle_phy(void *context)
   4700 {
   4701 	struct adapter	*adapter = context;
   4702 	struct ixgbe_hw *hw = &adapter->hw;
   4703 	int error;
   4704 
   4705 	++adapter->phy_sicount.ev_count;
   4706 	error = hw->phy.ops.handle_lasi(hw);
   4707 	if (error == IXGBE_ERR_OVERTEMP)
   4708 		device_printf(adapter->dev,
   4709 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
   4710 		    " PHY will downshift to lower power state!\n");
   4711 	else if (error)
   4712 		device_printf(adapter->dev,
   4713 		    "Error handling LASI interrupt: %d\n", error);
   4714 } /* ixgbe_handle_phy */
   4715 
   4716 static void
   4717 ixgbe_ifstop(struct ifnet *ifp, int disable)
   4718 {
   4719 	struct adapter *adapter = ifp->if_softc;
   4720 
   4721 	IXGBE_CORE_LOCK(adapter);
   4722 	ixgbe_stop(adapter);
   4723 	IXGBE_CORE_UNLOCK(adapter);
   4724 }
   4725 
   4726 /************************************************************************
   4727  * ixgbe_stop - Stop the hardware
   4728  *
   4729  *   Disables all traffic on the adapter by issuing a
   4730  *   global reset on the MAC and deallocates TX/RX buffers.
   4731  ************************************************************************/
   4732 static void
   4733 ixgbe_stop(void *arg)
   4734 {
   4735 	struct ifnet	*ifp;
   4736 	struct adapter	*adapter = arg;
   4737 	struct ixgbe_hw *hw = &adapter->hw;
   4738 
   4739 	ifp = adapter->ifp;
   4740 
   4741 	KASSERT(mutex_owned(&adapter->core_mtx));
   4742 
   4743 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   4744 	ixgbe_disable_intr(adapter);
   4745 	callout_stop(&adapter->timer);
   4746 
   4747 	/* Let the stack know...*/
   4748 	ifp->if_flags &= ~IFF_RUNNING;
   4749 
   4750 	ixgbe_reset_hw(hw);
   4751 	hw->adapter_stopped = FALSE;
   4752 	ixgbe_stop_adapter(hw);
   4753 	if (hw->mac.type == ixgbe_mac_82599EB)
   4754 		ixgbe_stop_mac_link_on_d3_82599(hw);
   4755 	/* Turn off the laser - noop with no optics */
   4756 	ixgbe_disable_tx_laser(hw);
   4757 
   4758 	/* Update the stack */
   4759 	adapter->link_up = FALSE;
   4760 	ixgbe_update_link_status(adapter);
   4761 
   4762 	/* reprogram the RAR[0] in case user changed it. */
   4763 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   4764 
   4765 	return;
   4766 } /* ixgbe_stop */
   4767 
   4768 /************************************************************************
   4769  * ixgbe_update_link_status - Update OS on link state
   4770  *
   4771  * Note: Only updates the OS on the cached link state.
   4772  *	 The real check of the hardware only happens with
   4773  *	 a link interrupt.
   4774  ************************************************************************/
   4775 static void
   4776 ixgbe_update_link_status(struct adapter *adapter)
   4777 {
   4778 	struct ifnet	*ifp = adapter->ifp;
   4779 	device_t	dev = adapter->dev;
   4780 	struct ixgbe_hw *hw = &adapter->hw;
   4781 
   4782 	KASSERT(mutex_owned(&adapter->core_mtx));
   4783 
   4784 	if (adapter->link_up) {
   4785 		if (adapter->link_active != LINK_STATE_UP) {
   4786 			/*
   4787 			 * To eliminate influence of the previous state
   4788 			 * in the same way as ixgbe_init_locked().
   4789 			 */
   4790 			struct ix_queue	*que = adapter->queues;
   4791 			for (int i = 0; i < adapter->num_queues; i++, que++)
   4792 				que->eitr_setting = 0;
   4793 
   4794 			if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
   4795 				/*
   4796 				 *  Discard count for both MAC Local Fault and
   4797 				 * Remote Fault because those registers are
   4798 				 * valid only when the link speed is up and
   4799 				 * 10Gbps.
   4800 				 */
   4801 				IXGBE_READ_REG(hw, IXGBE_MLFC);
   4802 				IXGBE_READ_REG(hw, IXGBE_MRFC);
   4803 			}
   4804 
   4805 			if (bootverbose) {
   4806 				const char *bpsmsg;
   4807 
   4808 				switch (adapter->link_speed) {
   4809 				case IXGBE_LINK_SPEED_10GB_FULL:
   4810 					bpsmsg = "10 Gbps";
   4811 					break;
   4812 				case IXGBE_LINK_SPEED_5GB_FULL:
   4813 					bpsmsg = "5 Gbps";
   4814 					break;
   4815 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   4816 					bpsmsg = "2.5 Gbps";
   4817 					break;
   4818 				case IXGBE_LINK_SPEED_1GB_FULL:
   4819 					bpsmsg = "1 Gbps";
   4820 					break;
   4821 				case IXGBE_LINK_SPEED_100_FULL:
   4822 					bpsmsg = "100 Mbps";
   4823 					break;
   4824 				case IXGBE_LINK_SPEED_10_FULL:
   4825 					bpsmsg = "10 Mbps";
   4826 					break;
   4827 				default:
   4828 					bpsmsg = "unknown speed";
   4829 					break;
   4830 				}
   4831 				device_printf(dev, "Link is up %s %s \n",
   4832 				    bpsmsg, "Full Duplex");
   4833 			}
   4834 			adapter->link_active = LINK_STATE_UP;
   4835 			/* Update any Flow Control changes */
   4836 			ixgbe_fc_enable(&adapter->hw);
   4837 			/* Update DMA coalescing config */
   4838 			ixgbe_config_dmac(adapter);
   4839 			if_link_state_change(ifp, LINK_STATE_UP);
   4840 
   4841 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4842 				ixgbe_ping_all_vfs(adapter);
   4843 		}
   4844 	} else {
   4845 		/*
   4846 		 * Do it when link active changes to DOWN. i.e.
   4847 		 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
   4848 		 * b) LINK_STATE_UP	 -> LINK_STATE_DOWN
   4849 		 */
   4850 		if (adapter->link_active != LINK_STATE_DOWN) {
   4851 			if (bootverbose)
   4852 				device_printf(dev, "Link is Down\n");
   4853 			if_link_state_change(ifp, LINK_STATE_DOWN);
   4854 			adapter->link_active = LINK_STATE_DOWN;
   4855 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4856 				ixgbe_ping_all_vfs(adapter);
   4857 			ixgbe_drain_all(adapter);
   4858 		}
   4859 	}
   4860 } /* ixgbe_update_link_status */
   4861 
   4862 /************************************************************************
   4863  * ixgbe_config_dmac - Configure DMA Coalescing
   4864  ************************************************************************/
   4865 static void
   4866 ixgbe_config_dmac(struct adapter *adapter)
   4867 {
   4868 	struct ixgbe_hw *hw = &adapter->hw;
   4869 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
   4870 
   4871 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
   4872 		return;
   4873 
   4874 	if (dcfg->watchdog_timer ^ adapter->dmac ||
   4875 	    dcfg->link_speed ^ adapter->link_speed) {
   4876 		dcfg->watchdog_timer = adapter->dmac;
   4877 		dcfg->fcoe_en = false;
   4878 		dcfg->link_speed = adapter->link_speed;
   4879 		dcfg->num_tcs = 1;
   4880 
   4881 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
   4882 		    dcfg->watchdog_timer, dcfg->link_speed);
   4883 
   4884 		hw->mac.ops.dmac_config(hw);
   4885 	}
   4886 } /* ixgbe_config_dmac */
   4887 
   4888 /************************************************************************
   4889  * ixgbe_enable_intr
   4890  ************************************************************************/
   4891 static void
   4892 ixgbe_enable_intr(struct adapter *adapter)
   4893 {
   4894 	struct ixgbe_hw	*hw = &adapter->hw;
   4895 	struct ix_queue	*que = adapter->queues;
   4896 	u32		mask, fwsm;
   4897 
   4898 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4899 
   4900 	switch (adapter->hw.mac.type) {
   4901 	case ixgbe_mac_82599EB:
   4902 		mask |= IXGBE_EIMS_ECC;
   4903 		/* Temperature sensor on some adapters */
   4904 		mask |= IXGBE_EIMS_GPI_SDP0;
   4905 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
   4906 		mask |= IXGBE_EIMS_GPI_SDP1;
   4907 		mask |= IXGBE_EIMS_GPI_SDP2;
   4908 		break;
   4909 	case ixgbe_mac_X540:
   4910 		/* Detect if Thermal Sensor is enabled */
   4911 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
   4912 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
   4913 			mask |= IXGBE_EIMS_TS;
   4914 		mask |= IXGBE_EIMS_ECC;
   4915 		break;
   4916 	case ixgbe_mac_X550:
   4917 		/* MAC thermal sensor is automatically enabled */
   4918 		mask |= IXGBE_EIMS_TS;
   4919 		mask |= IXGBE_EIMS_ECC;
   4920 		break;
   4921 	case ixgbe_mac_X550EM_x:
   4922 	case ixgbe_mac_X550EM_a:
   4923 		/* Some devices use SDP0 for important information */
   4924 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
   4925 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
   4926 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
   4927 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
   4928 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
   4929 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
   4930 			mask |= IXGBE_EICR_GPI_SDP0_X540;
   4931 		mask |= IXGBE_EIMS_ECC;
   4932 		break;
   4933 	default:
   4934 		break;
   4935 	}
   4936 
   4937 	/* Enable Fan Failure detection */
   4938 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4939 		mask |= IXGBE_EIMS_GPI_SDP1;
   4940 	/* Enable SR-IOV */
   4941 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4942 		mask |= IXGBE_EIMS_MAILBOX;
   4943 	/* Enable Flow Director */
   4944 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   4945 		mask |= IXGBE_EIMS_FLOW_DIR;
   4946 
   4947 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4948 
   4949 	/* With MSI-X we use auto clear */
   4950 	if (adapter->msix_mem) {
   4951 		mask = IXGBE_EIMS_ENABLE_MASK;
   4952 		/* Don't autoclear Link */
   4953 		mask &= ~IXGBE_EIMS_OTHER;
   4954 		mask &= ~IXGBE_EIMS_LSC;
   4955 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   4956 			mask &= ~IXGBE_EIMS_MAILBOX;
   4957 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4958 	}
   4959 
   4960 	/*
   4961 	 * Now enable all queues, this is done separately to
   4962 	 * allow for handling the extended (beyond 32) MSI-X
   4963 	 * vectors that can be used by 82599
   4964 	 */
   4965 	for (int i = 0; i < adapter->num_queues; i++, que++)
   4966 		ixgbe_enable_queue(adapter, que->msix);
   4967 
   4968 	IXGBE_WRITE_FLUSH(hw);
   4969 
   4970 } /* ixgbe_enable_intr */
   4971 
   4972 /************************************************************************
   4973  * ixgbe_disable_intr_internal
   4974  ************************************************************************/
   4975 static void
   4976 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
   4977 {
   4978 	struct ix_queue	*que = adapter->queues;
   4979 
   4980 	/* disable interrupts other than queues */
   4981 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
   4982 
   4983 	if (adapter->msix_mem)
   4984 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4985 
   4986 	for (int i = 0; i < adapter->num_queues; i++, que++)
   4987 		ixgbe_disable_queue_internal(adapter, que->msix, nestok);
   4988 
   4989 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4990 
   4991 } /* ixgbe_do_disable_intr_internal */
   4992 
   4993 /************************************************************************
   4994  * ixgbe_disable_intr
   4995  ************************************************************************/
   4996 static void
   4997 ixgbe_disable_intr(struct adapter *adapter)
   4998 {
   4999 
   5000 	ixgbe_disable_intr_internal(adapter, true);
   5001 } /* ixgbe_disable_intr */
   5002 
   5003 /************************************************************************
   5004  * ixgbe_ensure_disabled_intr
   5005  ************************************************************************/
   5006 void
   5007 ixgbe_ensure_disabled_intr(struct adapter *adapter)
   5008 {
   5009 
   5010 	ixgbe_disable_intr_internal(adapter, false);
   5011 } /* ixgbe_ensure_disabled_intr */
   5012 
   5013 /************************************************************************
   5014  * ixgbe_legacy_irq - Legacy Interrupt Service routine
   5015  ************************************************************************/
   5016 static int
   5017 ixgbe_legacy_irq(void *arg)
   5018 {
   5019 	struct ix_queue *que = arg;
   5020 	struct adapter	*adapter = que->adapter;
   5021 	struct ixgbe_hw	*hw = &adapter->hw;
   5022 	struct ifnet	*ifp = adapter->ifp;
   5023 	struct		tx_ring *txr = adapter->tx_rings;
   5024 	bool		more = false;
   5025 	u32		eicr, eicr_mask;
   5026 
   5027 	/* Silicon errata #26 on 82598 */
   5028 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
   5029 
   5030 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   5031 
   5032 	adapter->stats.pf.legint.ev_count++;
   5033 	++que->irqs.ev_count;
   5034 	if (eicr == 0) {
   5035 		adapter->stats.pf.intzero.ev_count++;
   5036 		if ((ifp->if_flags & IFF_UP) != 0)
   5037 			ixgbe_enable_intr(adapter);
   5038 		return 0;
   5039 	}
   5040 
   5041 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   5042 		/*
   5043 		 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
   5044 		 */
   5045 		que->txrx_use_workqueue = adapter->txrx_use_workqueue;
   5046 
   5047 #ifdef __NetBSD__
   5048 		/* Don't run ixgbe_rxeof in interrupt context */
   5049 		more = true;
   5050 #else
   5051 		more = ixgbe_rxeof(que);
   5052 #endif
   5053 
   5054 		IXGBE_TX_LOCK(txr);
   5055 		ixgbe_txeof(txr);
   5056 #ifdef notyet
   5057 		if (!ixgbe_ring_empty(ifp, txr->br))
   5058 			ixgbe_start_locked(ifp, txr);
   5059 #endif
   5060 		IXGBE_TX_UNLOCK(txr);
   5061 	}
   5062 
   5063 	/* Check for fan failure */
   5064 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   5065 		ixgbe_check_fan_failure(adapter, eicr, true);
   5066 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   5067 	}
   5068 
   5069 	/* Link status change */
   5070 	if (eicr & IXGBE_EICR_LSC)
   5071 		softint_schedule(adapter->link_si);
   5072 
   5073 	if (ixgbe_is_sfp(hw)) {
   5074 		/* Pluggable optics-related interrupt */
   5075 		if (hw->mac.type >= ixgbe_mac_X540)
   5076 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   5077 		else
   5078 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   5079 
   5080 		if (eicr & eicr_mask) {
   5081 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   5082 			softint_schedule(adapter->mod_si);
   5083 		}
   5084 
   5085 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   5086 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   5087 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   5088 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   5089 			softint_schedule(adapter->msf_si);
   5090 		}
   5091 	}
   5092 
   5093 	/* External PHY interrupt */
   5094 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   5095 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
   5096 		softint_schedule(adapter->phy_si);
   5097 
   5098 	if (more) {
   5099 		que->req.ev_count++;
   5100 		ixgbe_sched_handle_que(adapter, que);
   5101 	} else
   5102 		ixgbe_enable_intr(adapter);
   5103 
   5104 	return 1;
   5105 } /* ixgbe_legacy_irq */
   5106 
   5107 /************************************************************************
   5108  * ixgbe_free_pciintr_resources
   5109  ************************************************************************/
   5110 static void
   5111 ixgbe_free_pciintr_resources(struct adapter *adapter)
   5112 {
   5113 	struct ix_queue *que = adapter->queues;
   5114 	int		rid;
   5115 
   5116 	/*
   5117 	 * Release all msix queue resources:
   5118 	 */
   5119 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   5120 		if (que->res != NULL) {
   5121 			pci_intr_disestablish(adapter->osdep.pc,
   5122 			    adapter->osdep.ihs[i]);
   5123 			adapter->osdep.ihs[i] = NULL;
   5124 		}
   5125 	}
   5126 
   5127 	/* Clean the Legacy or Link interrupt last */
   5128 	if (adapter->vector) /* we are doing MSIX */
   5129 		rid = adapter->vector;
   5130 	else
   5131 		rid = 0;
   5132 
   5133 	if (adapter->osdep.ihs[rid] != NULL) {
   5134 		pci_intr_disestablish(adapter->osdep.pc,
   5135 		    adapter->osdep.ihs[rid]);
   5136 		adapter->osdep.ihs[rid] = NULL;
   5137 	}
   5138 
   5139 	if (adapter->osdep.intrs != NULL) {
   5140 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   5141 		    adapter->osdep.nintrs);
   5142 		adapter->osdep.intrs = NULL;
   5143 	}
   5144 } /* ixgbe_free_pciintr_resources */
   5145 
   5146 /************************************************************************
   5147  * ixgbe_free_pci_resources
   5148  ************************************************************************/
   5149 static void
   5150 ixgbe_free_pci_resources(struct adapter *adapter)
   5151 {
   5152 
   5153 	ixgbe_free_pciintr_resources(adapter);
   5154 
   5155 	if (adapter->osdep.mem_size != 0) {
   5156 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   5157 		    adapter->osdep.mem_bus_space_handle,
   5158 		    adapter->osdep.mem_size);
   5159 	}
   5160 
   5161 } /* ixgbe_free_pci_resources */
   5162 
   5163 /************************************************************************
   5164  * ixgbe_set_sysctl_value
   5165  ************************************************************************/
   5166 static void
   5167 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
   5168     const char *description, int *limit, int value)
   5169 {
   5170 	device_t dev =	adapter->dev;
   5171 	struct sysctllog **log;
   5172 	const struct sysctlnode *rnode, *cnode;
   5173 
   5174 	/*
   5175 	 * It's not required to check recovery mode because this function never
   5176 	 * touches hardware.
   5177 	 */
   5178 
   5179 	log = &adapter->sysctllog;
   5180 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   5181 		aprint_error_dev(dev, "could not create sysctl root\n");
   5182 		return;
   5183 	}
   5184 	if (sysctl_createv(log, 0, &rnode, &cnode,
   5185 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   5186 	    name, SYSCTL_DESCR(description),
   5187 		NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   5188 		aprint_error_dev(dev, "could not create sysctl\n");
   5189 	*limit = value;
   5190 } /* ixgbe_set_sysctl_value */
   5191 
   5192 /************************************************************************
   5193  * ixgbe_sysctl_flowcntl
   5194  *
   5195  *   SYSCTL wrapper around setting Flow Control
   5196  ************************************************************************/
   5197 static int
   5198 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
   5199 {
   5200 	struct sysctlnode node = *rnode;
   5201 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5202 	int error, fc;
   5203 
   5204 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5205 		return (EPERM);
   5206 
   5207 	fc = adapter->hw.fc.current_mode;
   5208 	node.sysctl_data = &fc;
   5209 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5210 	if (error != 0 || newp == NULL)
   5211 		return error;
   5212 
   5213 	/* Don't bother if it's not changed */
   5214 	if (fc == adapter->hw.fc.current_mode)
   5215 		return (0);
   5216 
   5217 	return ixgbe_set_flowcntl(adapter, fc);
   5218 } /* ixgbe_sysctl_flowcntl */
   5219 
   5220 /************************************************************************
   5221  * ixgbe_set_flowcntl - Set flow control
   5222  *
   5223  *   Flow control values:
   5224  *     0 - off
   5225  *     1 - rx pause
   5226  *     2 - tx pause
   5227  *     3 - full
   5228  ************************************************************************/
   5229 static int
   5230 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
   5231 {
   5232 	switch (fc) {
   5233 		case ixgbe_fc_rx_pause:
   5234 		case ixgbe_fc_tx_pause:
   5235 		case ixgbe_fc_full:
   5236 			adapter->hw.fc.requested_mode = fc;
   5237 			if (adapter->num_queues > 1)
   5238 				ixgbe_disable_rx_drop(adapter);
   5239 			break;
   5240 		case ixgbe_fc_none:
   5241 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   5242 			if (adapter->num_queues > 1)
   5243 				ixgbe_enable_rx_drop(adapter);
   5244 			break;
   5245 		default:
   5246 			return (EINVAL);
   5247 	}
   5248 
   5249 #if 0 /* XXX NetBSD */
   5250 	/* Don't autoneg if forcing a value */
   5251 	adapter->hw.fc.disable_fc_autoneg = TRUE;
   5252 #endif
   5253 	ixgbe_fc_enable(&adapter->hw);
   5254 
   5255 	return (0);
   5256 } /* ixgbe_set_flowcntl */
   5257 
   5258 /************************************************************************
   5259  * ixgbe_enable_rx_drop
   5260  *
   5261  *   Enable the hardware to drop packets when the buffer is
   5262  *   full. This is useful with multiqueue, so that no single
   5263  *   queue being full stalls the entire RX engine. We only
   5264  *   enable this when Multiqueue is enabled AND Flow Control
   5265  *   is disabled.
   5266  ************************************************************************/
   5267 static void
   5268 ixgbe_enable_rx_drop(struct adapter *adapter)
   5269 {
   5270 	struct ixgbe_hw *hw = &adapter->hw;
   5271 	struct rx_ring	*rxr;
   5272 	u32		srrctl;
   5273 
   5274 	for (int i = 0; i < adapter->num_queues; i++) {
   5275 		rxr = &adapter->rx_rings[i];
   5276 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   5277 		srrctl |= IXGBE_SRRCTL_DROP_EN;
   5278 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   5279 	}
   5280 
   5281 	/* enable drop for each vf */
   5282 	for (int i = 0; i < adapter->num_vfs; i++) {
   5283 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   5284 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
   5285 		    IXGBE_QDE_ENABLE));
   5286 	}
   5287 } /* ixgbe_enable_rx_drop */
   5288 
   5289 /************************************************************************
   5290  * ixgbe_disable_rx_drop
   5291  ************************************************************************/
   5292 static void
   5293 ixgbe_disable_rx_drop(struct adapter *adapter)
   5294 {
   5295 	struct ixgbe_hw *hw = &adapter->hw;
   5296 	struct rx_ring	*rxr;
   5297 	u32		srrctl;
   5298 
   5299 	for (int i = 0; i < adapter->num_queues; i++) {
   5300 		rxr = &adapter->rx_rings[i];
   5301 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   5302 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
   5303 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   5304 	}
   5305 
   5306 	/* disable drop for each vf */
   5307 	for (int i = 0; i < adapter->num_vfs; i++) {
   5308 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   5309 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
   5310 	}
   5311 } /* ixgbe_disable_rx_drop */
   5312 
   5313 /************************************************************************
   5314  * ixgbe_sysctl_advertise
   5315  *
   5316  *   SYSCTL wrapper around setting advertised speed
   5317  ************************************************************************/
   5318 static int
   5319 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
   5320 {
   5321 	struct sysctlnode node = *rnode;
   5322 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5323 	int	       error = 0, advertise;
   5324 
   5325 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5326 		return (EPERM);
   5327 
   5328 	advertise = adapter->advertise;
   5329 	node.sysctl_data = &advertise;
   5330 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5331 	if (error != 0 || newp == NULL)
   5332 		return error;
   5333 
   5334 	return ixgbe_set_advertise(adapter, advertise);
   5335 } /* ixgbe_sysctl_advertise */
   5336 
   5337 /************************************************************************
   5338  * ixgbe_set_advertise - Control advertised link speed
   5339  *
   5340  *   Flags:
   5341  *     0x00 - Default (all capable link speed)
   5342  *     0x01 - advertise 100 Mb
   5343  *     0x02 - advertise 1G
   5344  *     0x04 - advertise 10G
   5345  *     0x08 - advertise 10 Mb
   5346  *     0x10 - advertise 2.5G
   5347  *     0x20 - advertise 5G
   5348  ************************************************************************/
   5349 static int
   5350 ixgbe_set_advertise(struct adapter *adapter, int advertise)
   5351 {
   5352 	device_t	 dev;
   5353 	struct ixgbe_hw	 *hw;
   5354 	ixgbe_link_speed speed = 0;
   5355 	ixgbe_link_speed link_caps = 0;
   5356 	s32		 err = IXGBE_NOT_IMPLEMENTED;
   5357 	bool		 negotiate = FALSE;
   5358 
   5359 	/* Checks to validate new value */
   5360 	if (adapter->advertise == advertise) /* no change */
   5361 		return (0);
   5362 
   5363 	dev = adapter->dev;
   5364 	hw = &adapter->hw;
   5365 
   5366 	/* No speed changes for backplane media */
   5367 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   5368 		return (ENODEV);
   5369 
   5370 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   5371 	    (hw->phy.multispeed_fiber))) {
   5372 		device_printf(dev,
   5373 		    "Advertised speed can only be set on copper or "
   5374 		    "multispeed fiber media types.\n");
   5375 		return (EINVAL);
   5376 	}
   5377 
   5378 	if (advertise < 0x0 || advertise > 0x2f) {
   5379 		device_printf(dev,
   5380 		    "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
   5381 		return (EINVAL);
   5382 	}
   5383 
   5384 	if (hw->mac.ops.get_link_capabilities) {
   5385 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   5386 		    &negotiate);
   5387 		if (err != IXGBE_SUCCESS) {
   5388 			device_printf(dev, "Unable to determine supported advertise speeds\n");
   5389 			return (ENODEV);
   5390 		}
   5391 	}
   5392 
   5393 	/* Set new value and report new advertised mode */
   5394 	if (advertise & 0x1) {
   5395 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
   5396 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
   5397 			return (EINVAL);
   5398 		}
   5399 		speed |= IXGBE_LINK_SPEED_100_FULL;
   5400 	}
   5401 	if (advertise & 0x2) {
   5402 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
   5403 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
   5404 			return (EINVAL);
   5405 		}
   5406 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   5407 	}
   5408 	if (advertise & 0x4) {
   5409 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
   5410 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
   5411 			return (EINVAL);
   5412 		}
   5413 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   5414 	}
   5415 	if (advertise & 0x8) {
   5416 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
   5417 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
   5418 			return (EINVAL);
   5419 		}
   5420 		speed |= IXGBE_LINK_SPEED_10_FULL;
   5421 	}
   5422 	if (advertise & 0x10) {
   5423 		if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
   5424 			device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
   5425 			return (EINVAL);
   5426 		}
   5427 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   5428 	}
   5429 	if (advertise & 0x20) {
   5430 		if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
   5431 			device_printf(dev, "Interface does not support 5Gb advertised speed\n");
   5432 			return (EINVAL);
   5433 		}
   5434 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   5435 	}
   5436 	if (advertise == 0)
   5437 		speed = link_caps; /* All capable link speed */
   5438 
   5439 	hw->mac.autotry_restart = TRUE;
   5440 	hw->mac.ops.setup_link(hw, speed, TRUE);
   5441 	adapter->advertise = advertise;
   5442 
   5443 	return (0);
   5444 } /* ixgbe_set_advertise */
   5445 
   5446 /************************************************************************
   5447  * ixgbe_get_advertise - Get current advertised speed settings
   5448  *
   5449  *   Formatted for sysctl usage.
   5450  *   Flags:
   5451  *     0x01 - advertise 100 Mb
   5452  *     0x02 - advertise 1G
   5453  *     0x04 - advertise 10G
   5454  *     0x08 - advertise 10 Mb (yes, Mb)
   5455  *     0x10 - advertise 2.5G
   5456  *     0x20 - advertise 5G
   5457  ************************************************************************/
   5458 static int
   5459 ixgbe_get_advertise(struct adapter *adapter)
   5460 {
   5461 	struct ixgbe_hw	 *hw = &adapter->hw;
   5462 	int		 speed;
   5463 	ixgbe_link_speed link_caps = 0;
   5464 	s32		 err;
   5465 	bool		 negotiate = FALSE;
   5466 
   5467 	/*
   5468 	 * Advertised speed means nothing unless it's copper or
   5469 	 * multi-speed fiber
   5470 	 */
   5471 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
   5472 	    !(hw->phy.multispeed_fiber))
   5473 		return (0);
   5474 
   5475 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
   5476 	if (err != IXGBE_SUCCESS)
   5477 		return (0);
   5478 
   5479 	speed =
   5480 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x04 : 0) |
   5481 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x02 : 0) |
   5482 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)   ? 0x01 : 0) |
   5483 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)    ? 0x08 : 0) |
   5484 	    ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
   5485 	    ((link_caps & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0);
   5486 
   5487 	return speed;
   5488 } /* ixgbe_get_advertise */
   5489 
   5490 /************************************************************************
   5491  * ixgbe_sysctl_dmac - Manage DMA Coalescing
   5492  *
   5493  *   Control values:
   5494  *     0/1 - off / on (use default value of 1000)
   5495  *
   5496  *     Legal timer values are:
   5497  *     50,100,250,500,1000,2000,5000,10000
   5498  *
   5499  *     Turning off interrupt moderation will also turn this off.
   5500  ************************************************************************/
   5501 static int
   5502 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
   5503 {
   5504 	struct sysctlnode node = *rnode;
   5505 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5506 	struct ifnet   *ifp = adapter->ifp;
   5507 	int	       error;
   5508 	int	       newval;
   5509 
   5510 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5511 		return (EPERM);
   5512 
   5513 	newval = adapter->dmac;
   5514 	node.sysctl_data = &newval;
   5515 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5516 	if ((error) || (newp == NULL))
   5517 		return (error);
   5518 
   5519 	switch (newval) {
   5520 	case 0:
   5521 		/* Disabled */
   5522 		adapter->dmac = 0;
   5523 		break;
   5524 	case 1:
   5525 		/* Enable and use default */
   5526 		adapter->dmac = 1000;
   5527 		break;
   5528 	case 50:
   5529 	case 100:
   5530 	case 250:
   5531 	case 500:
   5532 	case 1000:
   5533 	case 2000:
   5534 	case 5000:
   5535 	case 10000:
   5536 		/* Legal values - allow */
   5537 		adapter->dmac = newval;
   5538 		break;
   5539 	default:
   5540 		/* Do nothing, illegal value */
   5541 		return (EINVAL);
   5542 	}
   5543 
   5544 	/* Re-initialize hardware if it's already running */
   5545 	if (ifp->if_flags & IFF_RUNNING)
   5546 		ifp->if_init(ifp);
   5547 
   5548 	return (0);
   5549 }
   5550 
   5551 #ifdef IXGBE_DEBUG
   5552 /************************************************************************
   5553  * ixgbe_sysctl_power_state
   5554  *
   5555  *   Sysctl to test power states
   5556  *   Values:
   5557  *     0      - set device to D0
   5558  *     3      - set device to D3
   5559  *     (none) - get current device power state
   5560  ************************************************************************/
   5561 static int
   5562 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
   5563 {
   5564 #ifdef notyet
   5565 	struct sysctlnode node = *rnode;
   5566 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5567 	device_t       dev =  adapter->dev;
   5568 	int	       curr_ps, new_ps, error = 0;
   5569 
   5570 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5571 		return (EPERM);
   5572 
   5573 	curr_ps = new_ps = pci_get_powerstate(dev);
   5574 
   5575 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5576 	if ((error) || (req->newp == NULL))
   5577 		return (error);
   5578 
   5579 	if (new_ps == curr_ps)
   5580 		return (0);
   5581 
   5582 	if (new_ps == 3 && curr_ps == 0)
   5583 		error = DEVICE_SUSPEND(dev);
   5584 	else if (new_ps == 0 && curr_ps == 3)
   5585 		error = DEVICE_RESUME(dev);
   5586 	else
   5587 		return (EINVAL);
   5588 
   5589 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
   5590 
   5591 	return (error);
   5592 #else
   5593 	return 0;
   5594 #endif
   5595 } /* ixgbe_sysctl_power_state */
   5596 #endif
   5597 
   5598 /************************************************************************
   5599  * ixgbe_sysctl_wol_enable
   5600  *
   5601  *   Sysctl to enable/disable the WoL capability,
   5602  *   if supported by the adapter.
   5603  *
   5604  *   Values:
   5605  *     0 - disabled
   5606  *     1 - enabled
   5607  ************************************************************************/
   5608 static int
   5609 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
   5610 {
   5611 	struct sysctlnode node = *rnode;
   5612 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5613 	struct ixgbe_hw *hw = &adapter->hw;
   5614 	bool		new_wol_enabled;
   5615 	int		error = 0;
   5616 
   5617 	/*
   5618 	 * It's not required to check recovery mode because this function never
   5619 	 * touches hardware.
   5620 	 */
   5621 	new_wol_enabled = hw->wol_enabled;
   5622 	node.sysctl_data = &new_wol_enabled;
   5623 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5624 	if ((error) || (newp == NULL))
   5625 		return (error);
   5626 	if (new_wol_enabled == hw->wol_enabled)
   5627 		return (0);
   5628 
   5629 	if (new_wol_enabled && !adapter->wol_support)
   5630 		return (ENODEV);
   5631 	else
   5632 		hw->wol_enabled = new_wol_enabled;
   5633 
   5634 	return (0);
   5635 } /* ixgbe_sysctl_wol_enable */
   5636 
   5637 /************************************************************************
   5638  * ixgbe_sysctl_wufc - Wake Up Filter Control
   5639  *
   5640  *   Sysctl to enable/disable the types of packets that the
   5641  *   adapter will wake up on upon receipt.
   5642  *   Flags:
   5643  *     0x1  - Link Status Change
   5644  *     0x2  - Magic Packet
   5645  *     0x4  - Direct Exact
   5646  *     0x8  - Directed Multicast
   5647  *     0x10 - Broadcast
   5648  *     0x20 - ARP/IPv4 Request Packet
   5649  *     0x40 - Direct IPv4 Packet
   5650  *     0x80 - Direct IPv6 Packet
   5651  *
   5652  *   Settings not listed above will cause the sysctl to return an error.
   5653  ************************************************************************/
   5654 static int
   5655 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
   5656 {
   5657 	struct sysctlnode node = *rnode;
   5658 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5659 	int error = 0;
   5660 	u32 new_wufc;
   5661 
   5662 	/*
   5663 	 * It's not required to check recovery mode because this function never
   5664 	 * touches hardware.
   5665 	 */
   5666 	new_wufc = adapter->wufc;
   5667 	node.sysctl_data = &new_wufc;
   5668 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5669 	if ((error) || (newp == NULL))
   5670 		return (error);
   5671 	if (new_wufc == adapter->wufc)
   5672 		return (0);
   5673 
   5674 	if (new_wufc & 0xffffff00)
   5675 		return (EINVAL);
   5676 
   5677 	new_wufc &= 0xff;
   5678 	new_wufc |= (0xffffff & adapter->wufc);
   5679 	adapter->wufc = new_wufc;
   5680 
   5681 	return (0);
   5682 } /* ixgbe_sysctl_wufc */
   5683 
   5684 #ifdef IXGBE_DEBUG
   5685 /************************************************************************
   5686  * ixgbe_sysctl_print_rss_config
   5687  ************************************************************************/
   5688 static int
   5689 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
   5690 {
   5691 #ifdef notyet
   5692 	struct sysctlnode node = *rnode;
   5693 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5694 	struct ixgbe_hw *hw = &adapter->hw;
   5695 	device_t	dev = adapter->dev;
   5696 	struct sbuf	*buf;
   5697 	int		error = 0, reta_size;
   5698 	u32		reg;
   5699 
   5700 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5701 		return (EPERM);
   5702 
   5703 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
   5704 	if (!buf) {
   5705 		device_printf(dev, "Could not allocate sbuf for output.\n");
   5706 		return (ENOMEM);
   5707 	}
   5708 
   5709 	// TODO: use sbufs to make a string to print out
   5710 	/* Set multiplier for RETA setup and table size based on MAC */
   5711 	switch (adapter->hw.mac.type) {
   5712 	case ixgbe_mac_X550:
   5713 	case ixgbe_mac_X550EM_x:
   5714 	case ixgbe_mac_X550EM_a:
   5715 		reta_size = 128;
   5716 		break;
   5717 	default:
   5718 		reta_size = 32;
   5719 		break;
   5720 	}
   5721 
   5722 	/* Print out the redirection table */
   5723 	sbuf_cat(buf, "\n");
   5724 	for (int i = 0; i < reta_size; i++) {
   5725 		if (i < 32) {
   5726 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
   5727 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
   5728 		} else {
   5729 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
   5730 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
   5731 		}
   5732 	}
   5733 
   5734 	// TODO: print more config
   5735 
   5736 	error = sbuf_finish(buf);
   5737 	if (error)
   5738 		device_printf(dev, "Error finishing sbuf: %d\n", error);
   5739 
   5740 	sbuf_delete(buf);
   5741 #endif
   5742 	return (0);
   5743 } /* ixgbe_sysctl_print_rss_config */
   5744 #endif /* IXGBE_DEBUG */
   5745 
   5746 /************************************************************************
   5747  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
   5748  *
   5749  *   For X552/X557-AT devices using an external PHY
   5750  ************************************************************************/
   5751 static int
   5752 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
   5753 {
   5754 	struct sysctlnode node = *rnode;
   5755 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5756 	struct ixgbe_hw *hw = &adapter->hw;
   5757 	int val;
   5758 	u16 reg;
   5759 	int		error;
   5760 
   5761 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5762 		return (EPERM);
   5763 
   5764 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5765 		device_printf(adapter->dev,
   5766 		    "Device has no supported external thermal sensor.\n");
   5767 		return (ENODEV);
   5768 	}
   5769 
   5770 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
   5771 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5772 		device_printf(adapter->dev,
   5773 		    "Error reading from PHY's current temperature register\n");
   5774 		return (EAGAIN);
   5775 	}
   5776 
   5777 	node.sysctl_data = &val;
   5778 
   5779 	/* Shift temp for output */
   5780 	val = reg >> 8;
   5781 
   5782 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5783 	if ((error) || (newp == NULL))
   5784 		return (error);
   5785 
   5786 	return (0);
   5787 } /* ixgbe_sysctl_phy_temp */
   5788 
   5789 /************************************************************************
   5790  * ixgbe_sysctl_phy_overtemp_occurred
   5791  *
   5792  *   Reports (directly from the PHY) whether the current PHY
   5793  *   temperature is over the overtemp threshold.
   5794  ************************************************************************/
   5795 static int
   5796 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
   5797 {
   5798 	struct sysctlnode node = *rnode;
   5799 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5800 	struct ixgbe_hw *hw = &adapter->hw;
   5801 	int val, error;
   5802 	u16 reg;
   5803 
   5804 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5805 		return (EPERM);
   5806 
   5807 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5808 		device_printf(adapter->dev,
   5809 		    "Device has no supported external thermal sensor.\n");
   5810 		return (ENODEV);
   5811 	}
   5812 
   5813 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
   5814 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5815 		device_printf(adapter->dev,
   5816 		    "Error reading from PHY's temperature status register\n");
   5817 		return (EAGAIN);
   5818 	}
   5819 
   5820 	node.sysctl_data = &val;
   5821 
   5822 	/* Get occurrence bit */
   5823 	val = !!(reg & 0x4000);
   5824 
   5825 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5826 	if ((error) || (newp == NULL))
   5827 		return (error);
   5828 
   5829 	return (0);
   5830 } /* ixgbe_sysctl_phy_overtemp_occurred */
   5831 
   5832 /************************************************************************
   5833  * ixgbe_sysctl_eee_state
   5834  *
   5835  *   Sysctl to set EEE power saving feature
   5836  *   Values:
   5837  *     0      - disable EEE
   5838  *     1      - enable EEE
   5839  *     (none) - get current device EEE state
   5840  ************************************************************************/
   5841 static int
   5842 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
   5843 {
   5844 	struct sysctlnode node = *rnode;
   5845 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5846 	struct ifnet   *ifp = adapter->ifp;
   5847 	device_t       dev = adapter->dev;
   5848 	int	       curr_eee, new_eee, error = 0;
   5849 	s32	       retval;
   5850 
   5851 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5852 		return (EPERM);
   5853 
   5854 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
   5855 	node.sysctl_data = &new_eee;
   5856 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5857 	if ((error) || (newp == NULL))
   5858 		return (error);
   5859 
   5860 	/* Nothing to do */
   5861 	if (new_eee == curr_eee)
   5862 		return (0);
   5863 
   5864 	/* Not supported */
   5865 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
   5866 		return (EINVAL);
   5867 
   5868 	/* Bounds checking */
   5869 	if ((new_eee < 0) || (new_eee > 1))
   5870 		return (EINVAL);
   5871 
   5872 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
   5873 	if (retval) {
   5874 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
   5875 		return (EINVAL);
   5876 	}
   5877 
   5878 	/* Restart auto-neg */
   5879 	ifp->if_init(ifp);
   5880 
   5881 	device_printf(dev, "New EEE state: %d\n", new_eee);
   5882 
   5883 	/* Cache new value */
   5884 	if (new_eee)
   5885 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5886 	else
   5887 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
   5888 
   5889 	return (error);
   5890 } /* ixgbe_sysctl_eee_state */
   5891 
   5892 #define PRINTQS(adapter, regname)					\
   5893 	do {								\
   5894 		struct ixgbe_hw	*_hw = &(adapter)->hw;			\
   5895 		int _i;							\
   5896 									\
   5897 		printf("%s: %s", device_xname((adapter)->dev), #regname); \
   5898 		for (_i = 0; _i < (adapter)->num_queues; _i++) {	\
   5899 			printf((_i == 0) ? "\t" : " ");			\
   5900 			printf("%08x", IXGBE_READ_REG(_hw,		\
   5901 				IXGBE_##regname(_i)));			\
   5902 		}							\
   5903 		printf("\n");						\
   5904 	} while (0)
   5905 
   5906 /************************************************************************
   5907  * ixgbe_print_debug_info
   5908  *
   5909  *   Called only when em_display_debug_stats is enabled.
   5910  *   Provides a way to take a look at important statistics
   5911  *   maintained by the driver and hardware.
   5912  ************************************************************************/
   5913 static void
   5914 ixgbe_print_debug_info(struct adapter *adapter)
   5915 {
   5916 	device_t	dev = adapter->dev;
   5917 	struct ixgbe_hw *hw = &adapter->hw;
   5918 	int table_size;
   5919 	int i;
   5920 
   5921 	switch (adapter->hw.mac.type) {
   5922 	case ixgbe_mac_X550:
   5923 	case ixgbe_mac_X550EM_x:
   5924 	case ixgbe_mac_X550EM_a:
   5925 		table_size = 128;
   5926 		break;
   5927 	default:
   5928 		table_size = 32;
   5929 		break;
   5930 	}
   5931 
   5932 	device_printf(dev, "[E]RETA:\n");
   5933 	for (i = 0; i < table_size; i++) {
   5934 		if (i < 32)
   5935 			printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
   5936 				IXGBE_RETA(i)));
   5937 		else
   5938 			printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
   5939 				IXGBE_ERETA(i - 32)));
   5940 	}
   5941 
   5942 	device_printf(dev, "queue:");
   5943 	for (i = 0; i < adapter->num_queues; i++) {
   5944 		printf((i == 0) ? "\t" : " ");
   5945 		printf("%8d", i);
   5946 	}
   5947 	printf("\n");
   5948 	PRINTQS(adapter, RDBAL);
   5949 	PRINTQS(adapter, RDBAH);
   5950 	PRINTQS(adapter, RDLEN);
   5951 	PRINTQS(adapter, SRRCTL);
   5952 	PRINTQS(adapter, RDH);
   5953 	PRINTQS(adapter, RDT);
   5954 	PRINTQS(adapter, RXDCTL);
   5955 
   5956 	device_printf(dev, "RQSMR:");
   5957 	for (i = 0; i < adapter->num_queues / 4; i++) {
   5958 		printf((i == 0) ? "\t" : " ");
   5959 		printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
   5960 	}
   5961 	printf("\n");
   5962 
   5963 	device_printf(dev, "disabled_count:");
   5964 	for (i = 0; i < adapter->num_queues; i++) {
   5965 		printf((i == 0) ? "\t" : " ");
   5966 		printf("%8d", adapter->queues[i].disabled_count);
   5967 	}
   5968 	printf("\n");
   5969 
   5970 	device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
   5971 	if (hw->mac.type != ixgbe_mac_82598EB) {
   5972 		device_printf(dev, "EIMS_EX(0):\t%08x\n",
   5973 			      IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
   5974 		device_printf(dev, "EIMS_EX(1):\t%08x\n",
   5975 			      IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
   5976 	}
   5977 } /* ixgbe_print_debug_info */
   5978 
   5979 /************************************************************************
   5980  * ixgbe_sysctl_debug
   5981  ************************************************************************/
   5982 static int
   5983 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
   5984 {
   5985 	struct sysctlnode node = *rnode;
   5986 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5987 	int	       error, result = 0;
   5988 
   5989 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5990 		return (EPERM);
   5991 
   5992 	node.sysctl_data = &result;
   5993 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5994 
   5995 	if (error || newp == NULL)
   5996 		return error;
   5997 
   5998 	if (result == 1)
   5999 		ixgbe_print_debug_info(adapter);
   6000 
   6001 	return 0;
   6002 } /* ixgbe_sysctl_debug */
   6003 
   6004 /************************************************************************
   6005  * ixgbe_init_device_features
   6006  ************************************************************************/
   6007 static void
   6008 ixgbe_init_device_features(struct adapter *adapter)
   6009 {
   6010 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   6011 			  | IXGBE_FEATURE_RSS
   6012 			  | IXGBE_FEATURE_MSI
   6013 			  | IXGBE_FEATURE_MSIX
   6014 			  | IXGBE_FEATURE_LEGACY_IRQ
   6015 			  | IXGBE_FEATURE_LEGACY_TX;
   6016 
   6017 	/* Set capabilities first... */
   6018 	switch (adapter->hw.mac.type) {
   6019 	case ixgbe_mac_82598EB:
   6020 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
   6021 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
   6022 		break;
   6023 	case ixgbe_mac_X540:
   6024 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6025 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6026 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
   6027 		    (adapter->hw.bus.func == 0))
   6028 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   6029 		break;
   6030 	case ixgbe_mac_X550:
   6031 		/*
   6032 		 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
   6033 		 * NVM Image version.
   6034 		 */
   6035 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   6036 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6037 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6038 		break;
   6039 	case ixgbe_mac_X550EM_x:
   6040 		/*
   6041 		 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
   6042 		 * NVM Image version.
   6043 		 */
   6044 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6045 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6046 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
   6047 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   6048 		break;
   6049 	case ixgbe_mac_X550EM_a:
   6050 		/*
   6051 		 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
   6052 		 * NVM Image version.
   6053 		 */
   6054 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6055 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6056 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   6057 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
   6058 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
   6059 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   6060 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   6061 		}
   6062 		break;
   6063 	case ixgbe_mac_82599EB:
   6064 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6065 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6066 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
   6067 		    (adapter->hw.bus.func == 0))
   6068 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   6069 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
   6070 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   6071 		break;
   6072 	default:
   6073 		break;
   6074 	}
   6075 
   6076 	/* Enabled by default... */
   6077 	/* Fan failure detection */
   6078 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
   6079 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
   6080 	/* Netmap */
   6081 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   6082 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   6083 	/* EEE */
   6084 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   6085 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   6086 	/* Thermal Sensor */
   6087 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
   6088 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
   6089 	/*
   6090 	 * Recovery mode:
   6091 	 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
   6092 	 * NVM Image version.
   6093 	 */
   6094 
   6095 	/* Enabled via global sysctl... */
   6096 	/* Flow Director */
   6097 	if (ixgbe_enable_fdir) {
   6098 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
   6099 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
   6100 		else
   6101 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
   6102 	}
   6103 	/* Legacy (single queue) transmit */
   6104 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   6105 	    ixgbe_enable_legacy_tx)
   6106 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   6107 	/*
   6108 	 * Message Signal Interrupts - Extended (MSI-X)
   6109 	 * Normal MSI is only enabled if MSI-X calls fail.
   6110 	 */
   6111 	if (!ixgbe_enable_msix)
   6112 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
   6113 	/* Receive-Side Scaling (RSS) */
   6114 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
   6115 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   6116 
   6117 	/* Disable features with unmet dependencies... */
   6118 	/* No MSI-X */
   6119 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
   6120 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6121 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6122 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
   6123 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
   6124 	}
   6125 } /* ixgbe_init_device_features */
   6126 
   6127 /************************************************************************
   6128  * ixgbe_probe - Device identification routine
   6129  *
   6130  *   Determines if the driver should be loaded on
   6131  *   adapter based on its PCI vendor/device ID.
   6132  *
   6133  *   return BUS_PROBE_DEFAULT on success, positive on failure
   6134  ************************************************************************/
   6135 static int
   6136 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
   6137 {
   6138 	const struct pci_attach_args *pa = aux;
   6139 
   6140 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
   6141 }
   6142 
   6143 static const ixgbe_vendor_info_t *
   6144 ixgbe_lookup(const struct pci_attach_args *pa)
   6145 {
   6146 	const ixgbe_vendor_info_t *ent;
   6147 	pcireg_t subid;
   6148 
   6149 	INIT_DEBUGOUT("ixgbe_lookup: begin");
   6150 
   6151 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
   6152 		return NULL;
   6153 
   6154 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
   6155 
   6156 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
   6157 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
   6158 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
   6159 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
   6160 			(ent->subvendor_id == 0)) &&
   6161 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
   6162 			(ent->subdevice_id == 0))) {
   6163 			return ent;
   6164 		}
   6165 	}
   6166 	return NULL;
   6167 }
   6168 
   6169 static int
   6170 ixgbe_ifflags_cb(struct ethercom *ec)
   6171 {
   6172 	struct ifnet *ifp = &ec->ec_if;
   6173 	struct adapter *adapter = ifp->if_softc;
   6174 	int change, rv = 0;
   6175 
   6176 	IXGBE_CORE_LOCK(adapter);
   6177 
   6178 	change = ifp->if_flags ^ adapter->if_flags;
   6179 	if (change != 0)
   6180 		adapter->if_flags = ifp->if_flags;
   6181 
   6182 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   6183 		rv = ENETRESET;
   6184 		goto out;
   6185 	} else if ((change & IFF_PROMISC) != 0)
   6186 		ixgbe_set_promisc(adapter);
   6187 
   6188 	/* Check for ec_capenable. */
   6189 	change = ec->ec_capenable ^ adapter->ec_capenable;
   6190 	adapter->ec_capenable = ec->ec_capenable;
   6191 	if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
   6192 	    | ETHERCAP_VLAN_HWFILTER)) != 0) {
   6193 		rv = ENETRESET;
   6194 		goto out;
   6195 	}
   6196 
   6197 	/*
   6198 	 * Special handling is not required for ETHERCAP_VLAN_MTU.
   6199 	 * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
   6200 	 */
   6201 
   6202 	/* Set up VLAN support and filter */
   6203 	if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
   6204 		ixgbe_setup_vlan_hw_support(adapter);
   6205 
   6206 out:
   6207 	IXGBE_CORE_UNLOCK(adapter);
   6208 
   6209 	return rv;
   6210 }
   6211 
   6212 /************************************************************************
   6213  * ixgbe_ioctl - Ioctl entry point
   6214  *
   6215  *   Called when the user wants to configure the interface.
   6216  *
   6217  *   return 0 on success, positive on failure
   6218  ************************************************************************/
   6219 static int
   6220 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   6221 {
   6222 	struct adapter	*adapter = ifp->if_softc;
   6223 	struct ixgbe_hw *hw = &adapter->hw;
   6224 	struct ifcapreq *ifcr = data;
   6225 	struct ifreq	*ifr = data;
   6226 	int		error = 0;
   6227 	int l4csum_en;
   6228 	const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
   6229 	     IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   6230 
   6231 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   6232 		return (EPERM);
   6233 
   6234 	switch (command) {
   6235 	case SIOCSIFFLAGS:
   6236 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   6237 		break;
   6238 	case SIOCADDMULTI:
   6239 	case SIOCDELMULTI:
   6240 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   6241 		break;
   6242 	case SIOCSIFMEDIA:
   6243 	case SIOCGIFMEDIA:
   6244 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   6245 		break;
   6246 	case SIOCSIFCAP:
   6247 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   6248 		break;
   6249 	case SIOCSIFMTU:
   6250 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   6251 		break;
   6252 #ifdef __NetBSD__
   6253 	case SIOCINITIFADDR:
   6254 		IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
   6255 		break;
   6256 	case SIOCGIFFLAGS:
   6257 		IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
   6258 		break;
   6259 	case SIOCGIFAFLAG_IN:
   6260 		IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
   6261 		break;
   6262 	case SIOCGIFADDR:
   6263 		IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
   6264 		break;
   6265 	case SIOCGIFMTU:
   6266 		IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
   6267 		break;
   6268 	case SIOCGIFCAP:
   6269 		IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
   6270 		break;
   6271 	case SIOCGETHERCAP:
   6272 		IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
   6273 		break;
   6274 	case SIOCGLIFADDR:
   6275 		IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
   6276 		break;
   6277 	case SIOCZIFDATA:
   6278 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   6279 		hw->mac.ops.clear_hw_cntrs(hw);
   6280 		ixgbe_clear_evcnt(adapter);
   6281 		break;
   6282 	case SIOCAIFADDR:
   6283 		IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
   6284 		break;
   6285 #endif
   6286 	default:
   6287 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   6288 		break;
   6289 	}
   6290 
   6291 	switch (command) {
   6292 	case SIOCGI2C:
   6293 	{
   6294 		struct ixgbe_i2c_req	i2c;
   6295 
   6296 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
   6297 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
   6298 		if (error != 0)
   6299 			break;
   6300 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
   6301 			error = EINVAL;
   6302 			break;
   6303 		}
   6304 		if (i2c.len > sizeof(i2c.data)) {
   6305 			error = EINVAL;
   6306 			break;
   6307 		}
   6308 
   6309 		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
   6310 		    i2c.dev_addr, i2c.data);
   6311 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
   6312 		break;
   6313 	}
   6314 	case SIOCSIFCAP:
   6315 		/* Layer-4 Rx checksum offload has to be turned on and
   6316 		 * off as a unit.
   6317 		 */
   6318 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   6319 		if (l4csum_en != l4csum && l4csum_en != 0)
   6320 			return EINVAL;
   6321 		/*FALLTHROUGH*/
   6322 	case SIOCADDMULTI:
   6323 	case SIOCDELMULTI:
   6324 	case SIOCSIFFLAGS:
   6325 	case SIOCSIFMTU:
   6326 	default:
   6327 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   6328 			return error;
   6329 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   6330 			;
   6331 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   6332 			IXGBE_CORE_LOCK(adapter);
   6333 			if ((ifp->if_flags & IFF_RUNNING) != 0)
   6334 				ixgbe_init_locked(adapter);
   6335 			ixgbe_recalculate_max_frame(adapter);
   6336 			IXGBE_CORE_UNLOCK(adapter);
   6337 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   6338 			/*
   6339 			 * Multicast list has changed; set the hardware filter
   6340 			 * accordingly.
   6341 			 */
   6342 			IXGBE_CORE_LOCK(adapter);
   6343 			ixgbe_disable_intr(adapter);
   6344 			ixgbe_set_multi(adapter);
   6345 			ixgbe_enable_intr(adapter);
   6346 			IXGBE_CORE_UNLOCK(adapter);
   6347 		}
   6348 		return 0;
   6349 	}
   6350 
   6351 	return error;
   6352 } /* ixgbe_ioctl */
   6353 
   6354 /************************************************************************
   6355  * ixgbe_check_fan_failure
   6356  ************************************************************************/
   6357 static void
   6358 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
   6359 {
   6360 	u32 mask;
   6361 
   6362 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
   6363 	    IXGBE_ESDP_SDP1;
   6364 
   6365 	if (reg & mask)
   6366 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
   6367 } /* ixgbe_check_fan_failure */
   6368 
   6369 /************************************************************************
   6370  * ixgbe_handle_que
   6371  ************************************************************************/
   6372 static void
   6373 ixgbe_handle_que(void *context)
   6374 {
   6375 	struct ix_queue *que = context;
   6376 	struct adapter	*adapter = que->adapter;
   6377 	struct tx_ring	*txr = que->txr;
   6378 	struct ifnet	*ifp = adapter->ifp;
   6379 	bool		more = false;
   6380 
   6381 	que->handleq.ev_count++;
   6382 
   6383 	if (ifp->if_flags & IFF_RUNNING) {
   6384 		more = ixgbe_rxeof(que);
   6385 		IXGBE_TX_LOCK(txr);
   6386 		more |= ixgbe_txeof(txr);
   6387 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   6388 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   6389 				ixgbe_mq_start_locked(ifp, txr);
   6390 		/* Only for queue 0 */
   6391 		/* NetBSD still needs this for CBQ */
   6392 		if ((&adapter->queues[0] == que)
   6393 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   6394 			ixgbe_legacy_start_locked(ifp, txr);
   6395 		IXGBE_TX_UNLOCK(txr);
   6396 	}
   6397 
   6398 	if (more) {
   6399 		que->req.ev_count++;
   6400 		ixgbe_sched_handle_que(adapter, que);
   6401 	} else if (que->res != NULL) {
   6402 		/* Re-enable this interrupt */
   6403 		ixgbe_enable_queue(adapter, que->msix);
   6404 	} else
   6405 		ixgbe_enable_intr(adapter);
   6406 
   6407 	return;
   6408 } /* ixgbe_handle_que */
   6409 
   6410 /************************************************************************
   6411  * ixgbe_handle_que_work
   6412  ************************************************************************/
   6413 static void
   6414 ixgbe_handle_que_work(struct work *wk, void *context)
   6415 {
   6416 	struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
   6417 
   6418 	/*
   6419 	 * "enqueued flag" is not required here.
   6420 	 * See ixgbe_msix_que().
   6421 	 */
   6422 	ixgbe_handle_que(que);
   6423 }
   6424 
   6425 /************************************************************************
   6426  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
   6427  ************************************************************************/
   6428 static int
   6429 ixgbe_allocate_legacy(struct adapter *adapter,
   6430     const struct pci_attach_args *pa)
   6431 {
   6432 	device_t	dev = adapter->dev;
   6433 	struct ix_queue *que = adapter->queues;
   6434 	struct tx_ring	*txr = adapter->tx_rings;
   6435 	int		counts[PCI_INTR_TYPE_SIZE];
   6436 	pci_intr_type_t intr_type, max_type;
   6437 	char		intrbuf[PCI_INTRSTR_LEN];
   6438 	const char	*intrstr = NULL;
   6439 
   6440 	/* We allocate a single interrupt resource */
   6441 	max_type = PCI_INTR_TYPE_MSI;
   6442 	counts[PCI_INTR_TYPE_MSIX] = 0;
   6443 	counts[PCI_INTR_TYPE_MSI] =
   6444 	    (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
   6445 	/* Check not feat_en but feat_cap to fallback to INTx */
   6446 	counts[PCI_INTR_TYPE_INTX] =
   6447 	    (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
   6448 
   6449 alloc_retry:
   6450 	if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
   6451 		aprint_error_dev(dev, "couldn't alloc interrupt\n");
   6452 		return ENXIO;
   6453 	}
   6454 	adapter->osdep.nintrs = 1;
   6455 	intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
   6456 	    intrbuf, sizeof(intrbuf));
   6457 	adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
   6458 	    adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
   6459 	    device_xname(dev));
   6460 	intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
   6461 	if (adapter->osdep.ihs[0] == NULL) {
   6462 		aprint_error_dev(dev,"unable to establish %s\n",
   6463 		    (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6464 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   6465 		adapter->osdep.intrs = NULL;
   6466 		switch (intr_type) {
   6467 		case PCI_INTR_TYPE_MSI:
   6468 			/* The next try is for INTx: Disable MSI */
   6469 			max_type = PCI_INTR_TYPE_INTX;
   6470 			counts[PCI_INTR_TYPE_INTX] = 1;
   6471 			adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   6472 			if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
   6473 				adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6474 				goto alloc_retry;
   6475 			} else
   6476 				break;
   6477 		case PCI_INTR_TYPE_INTX:
   6478 		default:
   6479 			/* See below */
   6480 			break;
   6481 		}
   6482 	}
   6483 	if (intr_type == PCI_INTR_TYPE_INTX) {
   6484 		adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   6485 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6486 	}
   6487 	if (adapter->osdep.ihs[0] == NULL) {
   6488 		aprint_error_dev(dev,
   6489 		    "couldn't establish interrupt%s%s\n",
   6490 		    intrstr ? " at " : "", intrstr ? intrstr : "");
   6491 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   6492 		adapter->osdep.intrs = NULL;
   6493 		return ENXIO;
   6494 	}
   6495 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   6496 	/*
   6497 	 * Try allocating a fast interrupt and the associated deferred
   6498 	 * processing contexts.
   6499 	 */
   6500 	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   6501 		txr->txr_si =
   6502 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6503 			ixgbe_deferred_mq_start, txr);
   6504 	que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6505 	    ixgbe_handle_que, que);
   6506 
   6507 	if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
   6508 		& (txr->txr_si == NULL)) || (que->que_si == NULL)) {
   6509 		aprint_error_dev(dev,
   6510 		    "could not establish software interrupts\n");
   6511 
   6512 		return ENXIO;
   6513 	}
   6514 	/* For simplicity in the handlers */
   6515 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
   6516 
   6517 	return (0);
   6518 } /* ixgbe_allocate_legacy */
   6519 
   6520 /************************************************************************
   6521  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
   6522  ************************************************************************/
   6523 static int
   6524 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   6525 {
   6526 	device_t	dev = adapter->dev;
   6527 	struct		ix_queue *que = adapter->queues;
   6528 	struct		tx_ring *txr = adapter->tx_rings;
   6529 	pci_chipset_tag_t pc;
   6530 	char		intrbuf[PCI_INTRSTR_LEN];
   6531 	char		intr_xname[32];
   6532 	char		wqname[MAXCOMLEN];
   6533 	const char	*intrstr = NULL;
   6534 	int		error, vector = 0;
   6535 	int		cpu_id = 0;
   6536 	kcpuset_t	*affinity;
   6537 #ifdef RSS
   6538 	unsigned int	rss_buckets = 0;
   6539 	kcpuset_t	cpu_mask;
   6540 #endif
   6541 
   6542 	pc = adapter->osdep.pc;
   6543 #ifdef	RSS
   6544 	/*
   6545 	 * If we're doing RSS, the number of queues needs to
   6546 	 * match the number of RSS buckets that are configured.
   6547 	 *
   6548 	 * + If there's more queues than RSS buckets, we'll end
   6549 	 *   up with queues that get no traffic.
   6550 	 *
   6551 	 * + If there's more RSS buckets than queues, we'll end
   6552 	 *   up having multiple RSS buckets map to the same queue,
   6553 	 *   so there'll be some contention.
   6554 	 */
   6555 	rss_buckets = rss_getnumbuckets();
   6556 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
   6557 	    (adapter->num_queues != rss_buckets)) {
   6558 		device_printf(dev,
   6559 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
   6560 		    "; performance will be impacted.\n",
   6561 		    __func__, adapter->num_queues, rss_buckets);
   6562 	}
   6563 #endif
   6564 
   6565 	adapter->osdep.nintrs = adapter->num_queues + 1;
   6566 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   6567 	    adapter->osdep.nintrs) != 0) {
   6568 		aprint_error_dev(dev,
   6569 		    "failed to allocate MSI-X interrupt\n");
   6570 		return (ENXIO);
   6571 	}
   6572 
   6573 	kcpuset_create(&affinity, false);
   6574 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   6575 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   6576 		    device_xname(dev), i);
   6577 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   6578 		    sizeof(intrbuf));
   6579 #ifdef IXGBE_MPSAFE
   6580 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   6581 		    true);
   6582 #endif
   6583 		/* Set the handler function */
   6584 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   6585 		    adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
   6586 		    intr_xname);
   6587 		if (que->res == NULL) {
   6588 			aprint_error_dev(dev,
   6589 			    "Failed to register QUE handler\n");
   6590 			error = ENXIO;
   6591 			goto err_out;
   6592 		}
   6593 		que->msix = vector;
   6594 		adapter->active_queues |= 1ULL << que->msix;
   6595 
   6596 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   6597 #ifdef	RSS
   6598 			/*
   6599 			 * The queue ID is used as the RSS layer bucket ID.
   6600 			 * We look up the queue ID -> RSS CPU ID and select
   6601 			 * that.
   6602 			 */
   6603 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
   6604 			CPU_SETOF(cpu_id, &cpu_mask);
   6605 #endif
   6606 		} else {
   6607 			/*
   6608 			 * Bind the MSI-X vector, and thus the
   6609 			 * rings to the corresponding CPU.
   6610 			 *
   6611 			 * This just happens to match the default RSS
   6612 			 * round-robin bucket -> queue -> CPU allocation.
   6613 			 */
   6614 			if (adapter->num_queues > 1)
   6615 				cpu_id = i;
   6616 		}
   6617 		/* Round-robin affinity */
   6618 		kcpuset_zero(affinity);
   6619 		kcpuset_set(affinity, cpu_id % ncpu);
   6620 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   6621 		    NULL);
   6622 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   6623 		    intrstr);
   6624 		if (error == 0) {
   6625 #if 1 /* def IXGBE_DEBUG */
   6626 #ifdef	RSS
   6627 			aprintf_normal(", bound RSS bucket %d to CPU %d", i,
   6628 			    cpu_id % ncpu);
   6629 #else
   6630 			aprint_normal(", bound queue %d to cpu %d", i,
   6631 			    cpu_id % ncpu);
   6632 #endif
   6633 #endif /* IXGBE_DEBUG */
   6634 		}
   6635 		aprint_normal("\n");
   6636 
   6637 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   6638 			txr->txr_si = softint_establish(
   6639 				SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6640 				ixgbe_deferred_mq_start, txr);
   6641 			if (txr->txr_si == NULL) {
   6642 				aprint_error_dev(dev,
   6643 				    "couldn't establish software interrupt\n");
   6644 				error = ENXIO;
   6645 				goto err_out;
   6646 			}
   6647 		}
   6648 		que->que_si
   6649 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6650 			ixgbe_handle_que, que);
   6651 		if (que->que_si == NULL) {
   6652 			aprint_error_dev(dev,
   6653 			    "couldn't establish software interrupt\n");
   6654 			error = ENXIO;
   6655 			goto err_out;
   6656 		}
   6657 	}
   6658 	snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
   6659 	error = workqueue_create(&adapter->txr_wq, wqname,
   6660 	    ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   6661 	    IXGBE_WORKQUEUE_FLAGS);
   6662 	if (error) {
   6663 		aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
   6664 		goto err_out;
   6665 	}
   6666 	adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
   6667 
   6668 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
   6669 	error = workqueue_create(&adapter->que_wq, wqname,
   6670 	    ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   6671 	    IXGBE_WORKQUEUE_FLAGS);
   6672 	if (error) {
   6673 		aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
   6674 		goto err_out;
   6675 	}
   6676 
   6677 	/* and Link */
   6678 	cpu_id++;
   6679 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   6680 	adapter->vector = vector;
   6681 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   6682 	    sizeof(intrbuf));
   6683 #ifdef IXGBE_MPSAFE
   6684 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   6685 	    true);
   6686 #endif
   6687 	/* Set the link handler function */
   6688 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   6689 	    adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
   6690 	    intr_xname);
   6691 	if (adapter->osdep.ihs[vector] == NULL) {
   6692 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   6693 		error = ENXIO;
   6694 		goto err_out;
   6695 	}
   6696 	/* Round-robin affinity */
   6697 	kcpuset_zero(affinity);
   6698 	kcpuset_set(affinity, cpu_id % ncpu);
   6699 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
   6700 	    NULL);
   6701 
   6702 	aprint_normal_dev(dev,
   6703 	    "for link, interrupting at %s", intrstr);
   6704 	if (error == 0)
   6705 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   6706 	else
   6707 		aprint_normal("\n");
   6708 
   6709 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   6710 		adapter->mbx_si =
   6711 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6712 			ixgbe_handle_mbx, adapter);
   6713 		if (adapter->mbx_si == NULL) {
   6714 			aprint_error_dev(dev,
   6715 			    "could not establish software interrupts\n");
   6716 
   6717 			error = ENXIO;
   6718 			goto err_out;
   6719 		}
   6720 	}
   6721 
   6722 	kcpuset_destroy(affinity);
   6723 	aprint_normal_dev(dev,
   6724 	    "Using MSI-X interrupts with %d vectors\n", vector + 1);
   6725 
   6726 	return (0);
   6727 
   6728 err_out:
   6729 	kcpuset_destroy(affinity);
   6730 	ixgbe_free_softint(adapter);
   6731 	ixgbe_free_pciintr_resources(adapter);
   6732 	return (error);
   6733 } /* ixgbe_allocate_msix */
   6734 
   6735 /************************************************************************
   6736  * ixgbe_configure_interrupts
   6737  *
   6738  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
   6739  *   This will also depend on user settings.
   6740  ************************************************************************/
   6741 static int
   6742 ixgbe_configure_interrupts(struct adapter *adapter)
   6743 {
   6744 	device_t dev = adapter->dev;
   6745 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
   6746 	int want, queues, msgs;
   6747 
   6748 	/* Default to 1 queue if MSI-X setup fails */
   6749 	adapter->num_queues = 1;
   6750 
   6751 	/* Override by tuneable */
   6752 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
   6753 		goto msi;
   6754 
   6755 	/*
   6756 	 *  NetBSD only: Use single vector MSI when number of CPU is 1 to save
   6757 	 * interrupt slot.
   6758 	 */
   6759 	if (ncpu == 1)
   6760 		goto msi;
   6761 
   6762 	/* First try MSI-X */
   6763 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   6764 	msgs = MIN(msgs, IXG_MAX_NINTR);
   6765 	if (msgs < 2)
   6766 		goto msi;
   6767 
   6768 	adapter->msix_mem = (void *)1; /* XXX */
   6769 
   6770 	/* Figure out a reasonable auto config value */
   6771 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   6772 
   6773 #ifdef	RSS
   6774 	/* If we're doing RSS, clamp at the number of RSS buckets */
   6775 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   6776 		queues = uimin(queues, rss_getnumbuckets());
   6777 #endif
   6778 	if (ixgbe_num_queues > queues) {
   6779 		aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
   6780 		ixgbe_num_queues = queues;
   6781 	}
   6782 
   6783 	if (ixgbe_num_queues != 0)
   6784 		queues = ixgbe_num_queues;
   6785 	else
   6786 		queues = uimin(queues,
   6787 		    uimin(mac->max_tx_queues, mac->max_rx_queues));
   6788 
   6789 	/* reflect correct sysctl value */
   6790 	ixgbe_num_queues = queues;
   6791 
   6792 	/*
   6793 	 * Want one vector (RX/TX pair) per queue
   6794 	 * plus an additional for Link.
   6795 	 */
   6796 	want = queues + 1;
   6797 	if (msgs >= want)
   6798 		msgs = want;
   6799 	else {
   6800 		aprint_error_dev(dev, "MSI-X Configuration Problem, "
   6801 		    "%d vectors but %d queues wanted!\n",
   6802 		    msgs, want);
   6803 		goto msi;
   6804 	}
   6805 	adapter->num_queues = queues;
   6806 	adapter->feat_en |= IXGBE_FEATURE_MSIX;
   6807 	return (0);
   6808 
   6809 	/*
   6810 	 * MSI-X allocation failed or provided us with
   6811 	 * less vectors than needed. Free MSI-X resources
   6812 	 * and we'll try enabling MSI.
   6813 	 */
   6814 msi:
   6815 	/* Without MSI-X, some features are no longer supported */
   6816 	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6817 	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
   6818 	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6819 	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
   6820 
   6821 	msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
   6822 	adapter->msix_mem = NULL; /* XXX */
   6823 	if (msgs > 1)
   6824 		msgs = 1;
   6825 	if (msgs != 0) {
   6826 		msgs = 1;
   6827 		adapter->feat_en |= IXGBE_FEATURE_MSI;
   6828 		return (0);
   6829 	}
   6830 
   6831 	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
   6832 		aprint_error_dev(dev,
   6833 		    "Device does not support legacy interrupts.\n");
   6834 		return 1;
   6835 	}
   6836 
   6837 	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6838 
   6839 	return (0);
   6840 } /* ixgbe_configure_interrupts */
   6841 
   6842 
   6843 /************************************************************************
   6844  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
   6845  *
   6846  *   Done outside of interrupt context since the driver might sleep
   6847  ************************************************************************/
   6848 static void
   6849 ixgbe_handle_link(void *context)
   6850 {
   6851 	struct adapter	*adapter = context;
   6852 	struct ixgbe_hw *hw = &adapter->hw;
   6853 
   6854 	IXGBE_CORE_LOCK(adapter);
   6855 	++adapter->link_sicount.ev_count;
   6856 	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
   6857 	ixgbe_update_link_status(adapter);
   6858 
   6859 	/* Re-enable link interrupts */
   6860 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
   6861 
   6862 	IXGBE_CORE_UNLOCK(adapter);
   6863 } /* ixgbe_handle_link */
   6864 
   6865 #if 0
   6866 /************************************************************************
   6867  * ixgbe_rearm_queues
   6868  ************************************************************************/
   6869 static __inline void
   6870 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   6871 {
   6872 	u32 mask;
   6873 
   6874 	switch (adapter->hw.mac.type) {
   6875 	case ixgbe_mac_82598EB:
   6876 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   6877 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   6878 		break;
   6879 	case ixgbe_mac_82599EB:
   6880 	case ixgbe_mac_X540:
   6881 	case ixgbe_mac_X550:
   6882 	case ixgbe_mac_X550EM_x:
   6883 	case ixgbe_mac_X550EM_a:
   6884 		mask = (queues & 0xFFFFFFFF);
   6885 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   6886 		mask = (queues >> 32);
   6887 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   6888 		break;
   6889 	default:
   6890 		break;
   6891 	}
   6892 } /* ixgbe_rearm_queues */
   6893 #endif
   6894