Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.211
      1 /* $NetBSD: ixgbe.c,v 1.211 2019/09/18 05:32:15 msaitoh Exp $ */
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
     36 
     37 /*
     38  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39  * All rights reserved.
     40  *
     41  * This code is derived from software contributed to The NetBSD Foundation
     42  * by Coyote Point Systems, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63  * POSSIBILITY OF SUCH DAMAGE.
     64  */
     65 
     66 #ifdef _KERNEL_OPT
     67 #include "opt_inet.h"
     68 #include "opt_inet6.h"
     69 #include "opt_net_mpsafe.h"
     70 #endif
     71 
     72 #include "ixgbe.h"
     73 #include "ixgbe_sriov.h"
     74 #include "vlan.h"
     75 
     76 #include <sys/cprng.h>
     77 #include <dev/mii/mii.h>
     78 #include <dev/mii/miivar.h>
     79 
     80 /************************************************************************
     81  * Driver version
     82  ************************************************************************/
     83 static const char ixgbe_driver_version[] = "4.0.1-k";
     84 /* XXX NetBSD: + 3.3.10 */
     85 
     86 /************************************************************************
     87  * PCI Device ID Table
     88  *
     89  *   Used by probe to select devices to load on
     90  *   Last field stores an index into ixgbe_strings
     91  *   Last entry must be all 0s
     92  *
     93  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     94  ************************************************************************/
     95 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     96 {
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    111 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    112 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    113 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
    114 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    115 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    116 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    117 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    118 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    119 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
    120 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    121 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
    122 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
    123 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
    124 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
    125 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
    126 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
    127 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
    128 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
    129 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
    130 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
    131 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
    132 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
    133 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
    134 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
    135 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
    136 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
    137 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
    138 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
    139 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
    140 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
    141 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
    142 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
    143 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
    144 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
    145 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
    146 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
    147 	/* required last entry */
    148 	{0, 0, 0, 0, 0}
    149 };
    150 
    151 /************************************************************************
    152  * Table of branding strings
    153  ************************************************************************/
    154 static const char    *ixgbe_strings[] = {
    155 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    156 };
    157 
    158 /************************************************************************
    159  * Function prototypes
    160  ************************************************************************/
    161 static int	ixgbe_probe(device_t, cfdata_t, void *);
    162 static void	ixgbe_attach(device_t, device_t, void *);
    163 static int	ixgbe_detach(device_t, int);
    164 #if 0
    165 static int	ixgbe_shutdown(device_t);
    166 #endif
    167 static bool	ixgbe_suspend(device_t, const pmf_qual_t *);
    168 static bool	ixgbe_resume(device_t, const pmf_qual_t *);
    169 static int	ixgbe_ifflags_cb(struct ethercom *);
    170 static int	ixgbe_ioctl(struct ifnet *, u_long, void *);
    171 static void	ixgbe_ifstop(struct ifnet *, int);
    172 static int	ixgbe_init(struct ifnet *);
    173 static void	ixgbe_init_locked(struct adapter *);
    174 static void	ixgbe_stop(void *);
    175 static void	ixgbe_init_device_features(struct adapter *);
    176 static void	ixgbe_check_fan_failure(struct adapter *, u32, bool);
    177 static void	ixgbe_add_media_types(struct adapter *);
    178 static void	ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    179 static int	ixgbe_media_change(struct ifnet *);
    180 static int	ixgbe_allocate_pci_resources(struct adapter *,
    181 		    const struct pci_attach_args *);
    182 static void	ixgbe_free_softint(struct adapter *);
    183 static void	ixgbe_get_slot_info(struct adapter *);
    184 static int	ixgbe_allocate_msix(struct adapter *,
    185 		    const struct pci_attach_args *);
    186 static int	ixgbe_allocate_legacy(struct adapter *,
    187 		    const struct pci_attach_args *);
    188 static int	ixgbe_configure_interrupts(struct adapter *);
    189 static void	ixgbe_free_pciintr_resources(struct adapter *);
    190 static void	ixgbe_free_pci_resources(struct adapter *);
    191 static void	ixgbe_local_timer(void *);
    192 static void	ixgbe_local_timer1(void *);
    193 static void	ixgbe_recovery_mode_timer(void *);
    194 static int	ixgbe_setup_interface(device_t, struct adapter *);
    195 static void	ixgbe_config_gpie(struct adapter *);
    196 static void	ixgbe_config_dmac(struct adapter *);
    197 static void	ixgbe_config_delay_values(struct adapter *);
    198 static void	ixgbe_config_link(struct adapter *);
    199 static void	ixgbe_check_wol_support(struct adapter *);
    200 static int	ixgbe_setup_low_power_mode(struct adapter *);
    201 #if 0
    202 static void	ixgbe_rearm_queues(struct adapter *, u64);
    203 #endif
    204 
    205 static void	ixgbe_initialize_transmit_units(struct adapter *);
    206 static void	ixgbe_initialize_receive_units(struct adapter *);
    207 static void	ixgbe_enable_rx_drop(struct adapter *);
    208 static void	ixgbe_disable_rx_drop(struct adapter *);
    209 static void	ixgbe_initialize_rss_mapping(struct adapter *);
    210 
    211 static void	ixgbe_enable_intr(struct adapter *);
    212 static void	ixgbe_disable_intr(struct adapter *);
    213 static void	ixgbe_update_stats_counters(struct adapter *);
    214 static void	ixgbe_set_promisc(struct adapter *);
    215 static void	ixgbe_set_multi(struct adapter *);
    216 static void	ixgbe_update_link_status(struct adapter *);
    217 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    218 static void	ixgbe_configure_ivars(struct adapter *);
    219 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    220 static void	ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
    221 
    222 static void	ixgbe_setup_vlan_hw_tagging(struct adapter *);
    223 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    224 static int	ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
    225 static int	ixgbe_register_vlan(struct adapter *, u16);
    226 static int	ixgbe_unregister_vlan(struct adapter *, u16);
    227 
    228 static void	ixgbe_add_device_sysctls(struct adapter *);
    229 static void	ixgbe_add_hw_stats(struct adapter *);
    230 static void	ixgbe_clear_evcnt(struct adapter *);
    231 static int	ixgbe_set_flowcntl(struct adapter *, int);
    232 static int	ixgbe_set_advertise(struct adapter *, int);
    233 static int	ixgbe_get_advertise(struct adapter *);
    234 
    235 /* Sysctl handlers */
    236 static void	ixgbe_set_sysctl_value(struct adapter *, const char *,
    237 		     const char *, int *, int);
    238 static int	ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
    239 static int	ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
    240 static int	ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    241 static int	ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
    242 static int	ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
    243 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
    244 #ifdef IXGBE_DEBUG
    245 static int	ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
    246 static int	ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
    247 #endif
    248 static int	ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
    249 static int	ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
    250 static int	ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
    251 static int	ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
    252 static int	ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
    253 static int	ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
    254 static int	ixgbe_sysctl_debug(SYSCTLFN_PROTO);
    255 static int	ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
    256 static int	ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
    257 
    258 /* Support for pluggable optic modules */
    259 static bool	ixgbe_sfp_probe(struct adapter *);
    260 
    261 /* Legacy (single vector) interrupt handler */
    262 static int	ixgbe_legacy_irq(void *);
    263 
    264 /* The MSI/MSI-X Interrupt handlers */
    265 static int	ixgbe_msix_que(void *);
    266 static int	ixgbe_msix_link(void *);
    267 
    268 /* Software interrupts for deferred work */
    269 static void	ixgbe_handle_que(void *);
    270 static void	ixgbe_handle_link(void *);
    271 static void	ixgbe_handle_msf(void *);
    272 static void	ixgbe_handle_mod(void *);
    273 static void	ixgbe_handle_phy(void *);
    274 
    275 /* Workqueue handler for deferred work */
    276 static void	ixgbe_handle_que_work(struct work *, void *);
    277 
    278 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    279 
    280 /************************************************************************
    281  *  NetBSD Device Interface Entry Points
    282  ************************************************************************/
    283 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    284     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    285     DVF_DETACH_SHUTDOWN);
    286 
    287 #if 0
    288 devclass_t ix_devclass;
    289 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
    290 
    291 MODULE_DEPEND(ix, pci, 1, 1, 1);
    292 MODULE_DEPEND(ix, ether, 1, 1, 1);
    293 #ifdef DEV_NETMAP
    294 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    295 #endif
    296 #endif
    297 
    298 /*
    299  * TUNEABLE PARAMETERS:
    300  */
    301 
    302 /*
    303  * AIM: Adaptive Interrupt Moderation
    304  * which means that the interrupt rate
    305  * is varied over time based on the
    306  * traffic for that interrupt vector
    307  */
    308 static bool ixgbe_enable_aim = true;
    309 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
    310 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
    311     "Enable adaptive interrupt moderation");
    312 
    313 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    314 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
    315     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
    316 
    317 /* How many packets rxeof tries to clean at a time */
    318 static int ixgbe_rx_process_limit = 256;
    319 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
    320     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
    321 
    322 /* How many packets txeof tries to clean at a time */
    323 static int ixgbe_tx_process_limit = 256;
    324 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
    325     &ixgbe_tx_process_limit, 0,
    326     "Maximum number of sent packets to process at a time, -1 means unlimited");
    327 
    328 /* Flow control setting, default to full */
    329 static int ixgbe_flow_control = ixgbe_fc_full;
    330 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
    331     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
    332 
    333 /* Which packet processing uses workqueue or softint */
    334 static bool ixgbe_txrx_workqueue = false;
    335 
    336 /*
    337  * Smart speed setting, default to on
    338  * this only works as a compile option
    339  * right now as its during attach, set
    340  * this to 'ixgbe_smart_speed_off' to
    341  * disable.
    342  */
    343 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    344 
    345 /*
    346  * MSI-X should be the default for best performance,
    347  * but this allows it to be forced off for testing.
    348  */
    349 static int ixgbe_enable_msix = 1;
    350 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
    351     "Enable MSI-X interrupts");
    352 
    353 /*
    354  * Number of Queues, can be set to 0,
    355  * it then autoconfigures based on the
    356  * number of cpus with a max of 8. This
    357  * can be overriden manually here.
    358  */
    359 static int ixgbe_num_queues = 0;
    360 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
    361     "Number of queues to configure, 0 indicates autoconfigure");
    362 
    363 /*
    364  * Number of TX descriptors per ring,
    365  * setting higher than RX as this seems
    366  * the better performing choice.
    367  */
    368 static int ixgbe_txd = PERFORM_TXD;
    369 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
    370     "Number of transmit descriptors per queue");
    371 
    372 /* Number of RX descriptors per ring */
    373 static int ixgbe_rxd = PERFORM_RXD;
    374 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
    375     "Number of receive descriptors per queue");
    376 
    377 /*
    378  * Defining this on will allow the use
    379  * of unsupported SFP+ modules, note that
    380  * doing so you are on your own :)
    381  */
    382 static int allow_unsupported_sfp = false;
    383 #define TUNABLE_INT(__x, __y)
    384 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
    385 
    386 /*
    387  * Not sure if Flow Director is fully baked,
    388  * so we'll default to turning it off.
    389  */
    390 static int ixgbe_enable_fdir = 0;
    391 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
    392     "Enable Flow Director");
    393 
    394 /* Legacy Transmit (single queue) */
    395 static int ixgbe_enable_legacy_tx = 0;
    396 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
    397     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
    398 
    399 /* Receive-Side Scaling */
    400 static int ixgbe_enable_rss = 1;
    401 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
    402     "Enable Receive-Side Scaling (RSS)");
    403 
    404 #if 0
    405 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
    406 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
    407 #endif
    408 
    409 #ifdef NET_MPSAFE
    410 #define IXGBE_MPSAFE		1
    411 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    412 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    413 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    414 #else
    415 #define IXGBE_CALLOUT_FLAGS	0
    416 #define IXGBE_SOFTINFT_FLAGS	0
    417 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU
    418 #endif
    419 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
    420 
    421 /************************************************************************
    422  * ixgbe_initialize_rss_mapping
    423  ************************************************************************/
    424 static void
    425 ixgbe_initialize_rss_mapping(struct adapter *adapter)
    426 {
    427 	struct ixgbe_hw	*hw = &adapter->hw;
    428 	u32		reta = 0, mrqc, rss_key[10];
    429 	int		queue_id, table_size, index_mult;
    430 	int		i, j;
    431 	u32		rss_hash_config;
    432 
    433 	/* force use default RSS key. */
    434 #ifdef __NetBSD__
    435 	rss_getkey((uint8_t *) &rss_key);
    436 #else
    437 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    438 		/* Fetch the configured RSS key */
    439 		rss_getkey((uint8_t *) &rss_key);
    440 	} else {
    441 		/* set up random bits */
    442 		cprng_fast(&rss_key, sizeof(rss_key));
    443 	}
    444 #endif
    445 
    446 	/* Set multiplier for RETA setup and table size based on MAC */
    447 	index_mult = 0x1;
    448 	table_size = 128;
    449 	switch (adapter->hw.mac.type) {
    450 	case ixgbe_mac_82598EB:
    451 		index_mult = 0x11;
    452 		break;
    453 	case ixgbe_mac_X550:
    454 	case ixgbe_mac_X550EM_x:
    455 	case ixgbe_mac_X550EM_a:
    456 		table_size = 512;
    457 		break;
    458 	default:
    459 		break;
    460 	}
    461 
    462 	/* Set up the redirection table */
    463 	for (i = 0, j = 0; i < table_size; i++, j++) {
    464 		if (j == adapter->num_queues)
    465 			j = 0;
    466 
    467 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    468 			/*
    469 			 * Fetch the RSS bucket id for the given indirection
    470 			 * entry. Cap it at the number of configured buckets
    471 			 * (which is num_queues.)
    472 			 */
    473 			queue_id = rss_get_indirection_to_bucket(i);
    474 			queue_id = queue_id % adapter->num_queues;
    475 		} else
    476 			queue_id = (j * index_mult);
    477 
    478 		/*
    479 		 * The low 8 bits are for hash value (n+0);
    480 		 * The next 8 bits are for hash value (n+1), etc.
    481 		 */
    482 		reta = reta >> 8;
    483 		reta = reta | (((uint32_t) queue_id) << 24);
    484 		if ((i & 3) == 3) {
    485 			if (i < 128)
    486 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
    487 			else
    488 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
    489 				    reta);
    490 			reta = 0;
    491 		}
    492 	}
    493 
    494 	/* Now fill our hash function seeds */
    495 	for (i = 0; i < 10; i++)
    496 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
    497 
    498 	/* Perform hash on these packet types */
    499 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
    500 		rss_hash_config = rss_gethashconfig();
    501 	else {
    502 		/*
    503 		 * Disable UDP - IP fragments aren't currently being handled
    504 		 * and so we end up with a mix of 2-tuple and 4-tuple
    505 		 * traffic.
    506 		 */
    507 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
    508 				| RSS_HASHTYPE_RSS_TCP_IPV4
    509 				| RSS_HASHTYPE_RSS_IPV6
    510 				| RSS_HASHTYPE_RSS_TCP_IPV6
    511 				| RSS_HASHTYPE_RSS_IPV6_EX
    512 				| RSS_HASHTYPE_RSS_TCP_IPV6_EX;
    513 	}
    514 
    515 	mrqc = IXGBE_MRQC_RSSEN;
    516 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
    517 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
    518 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
    519 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
    520 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
    521 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
    522 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
    523 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
    524 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
    525 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
    526 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
    527 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
    528 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
    529 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
    530 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
    531 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
    532 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
    533 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
    534 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
    535 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    536 } /* ixgbe_initialize_rss_mapping */
    537 
    538 /************************************************************************
    539  * ixgbe_initialize_receive_units - Setup receive registers and features.
    540  ************************************************************************/
    541 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
    542 
    543 static void
    544 ixgbe_initialize_receive_units(struct adapter *adapter)
    545 {
    546 	struct	rx_ring	*rxr = adapter->rx_rings;
    547 	struct ixgbe_hw	*hw = &adapter->hw;
    548 	struct ifnet	*ifp = adapter->ifp;
    549 	int		i, j;
    550 	u32		bufsz, fctrl, srrctl, rxcsum;
    551 	u32		hlreg;
    552 
    553 	/*
    554 	 * Make sure receives are disabled while
    555 	 * setting up the descriptor ring
    556 	 */
    557 	ixgbe_disable_rx(hw);
    558 
    559 	/* Enable broadcasts */
    560 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
    561 	fctrl |= IXGBE_FCTRL_BAM;
    562 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
    563 		fctrl |= IXGBE_FCTRL_DPF;
    564 		fctrl |= IXGBE_FCTRL_PMCF;
    565 	}
    566 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
    567 
    568 	/* Set for Jumbo Frames? */
    569 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
    570 	if (ifp->if_mtu > ETHERMTU)
    571 		hlreg |= IXGBE_HLREG0_JUMBOEN;
    572 	else
    573 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
    574 
    575 #ifdef DEV_NETMAP
    576 	/* CRC stripping is conditional in Netmap */
    577 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
    578 	    (ifp->if_capenable & IFCAP_NETMAP) &&
    579 	    !ix_crcstrip)
    580 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
    581 	else
    582 #endif /* DEV_NETMAP */
    583 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
    584 
    585 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
    586 
    587 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
    588 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
    589 
    590 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
    591 		u64 rdba = rxr->rxdma.dma_paddr;
    592 		u32 reg;
    593 		int regnum = i / 4;	/* 1 register per 4 queues */
    594 		int regshift = i % 4;	/* 4 bits per 1 queue */
    595 		j = rxr->me;
    596 
    597 		/* Setup the Base and Length of the Rx Descriptor Ring */
    598 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
    599 		    (rdba & 0x00000000ffffffffULL));
    600 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
    601 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
    602 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
    603 
    604 		/* Set up the SRRCTL register */
    605 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
    606 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
    607 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
    608 		srrctl |= bufsz;
    609 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
    610 
    611 		/* Set RQSMR (Receive Queue Statistic Mapping) register */
    612 		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
    613 		reg &= ~(0x000000ffUL << (regshift * 8));
    614 		reg |= i << (regshift * 8);
    615 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
    616 
    617 		/*
    618 		 * Set DROP_EN iff we have no flow control and >1 queue.
    619 		 * Note that srrctl was cleared shortly before during reset,
    620 		 * so we do not need to clear the bit, but do it just in case
    621 		 * this code is moved elsewhere.
    622 		 */
    623 		if (adapter->num_queues > 1 &&
    624 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
    625 			srrctl |= IXGBE_SRRCTL_DROP_EN;
    626 		} else {
    627 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
    628 		}
    629 
    630 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
    631 
    632 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
    633 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
    634 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
    635 
    636 		/* Set the driver rx tail address */
    637 		rxr->tail =  IXGBE_RDT(rxr->me);
    638 	}
    639 
    640 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    641 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
    642 			    | IXGBE_PSRTYPE_UDPHDR
    643 			    | IXGBE_PSRTYPE_IPV4HDR
    644 			    | IXGBE_PSRTYPE_IPV6HDR;
    645 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
    646 	}
    647 
    648 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
    649 
    650 	ixgbe_initialize_rss_mapping(adapter);
    651 
    652 	if (adapter->num_queues > 1) {
    653 		/* RSS and RX IPP Checksum are mutually exclusive */
    654 		rxcsum |= IXGBE_RXCSUM_PCSD;
    655 	}
    656 
    657 	if (ifp->if_capenable & IFCAP_RXCSUM)
    658 		rxcsum |= IXGBE_RXCSUM_PCSD;
    659 
    660 	/* This is useful for calculating UDP/IP fragment checksums */
    661 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
    662 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
    663 
    664 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
    665 
    666 } /* ixgbe_initialize_receive_units */
    667 
    668 /************************************************************************
    669  * ixgbe_initialize_transmit_units - Enable transmit units.
    670  ************************************************************************/
    671 static void
    672 ixgbe_initialize_transmit_units(struct adapter *adapter)
    673 {
    674 	struct tx_ring	*txr = adapter->tx_rings;
    675 	struct ixgbe_hw	*hw = &adapter->hw;
    676 	int i;
    677 
    678 	/* Setup the Base and Length of the Tx Descriptor Ring */
    679 	for (i = 0; i < adapter->num_queues; i++, txr++) {
    680 		u64 tdba = txr->txdma.dma_paddr;
    681 		u32 txctrl = 0;
    682 		u32 tqsmreg, reg;
    683 		int regnum = i / 4;	/* 1 register per 4 queues */
    684 		int regshift = i % 4;	/* 4 bits per 1 queue */
    685 		int j = txr->me;
    686 
    687 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
    688 		    (tdba & 0x00000000ffffffffULL));
    689 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
    690 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
    691 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
    692 
    693 		/*
    694 		 * Set TQSMR (Transmit Queue Statistic Mapping) register.
    695 		 * Register location is different between 82598 and others.
    696 		 */
    697 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
    698 			tqsmreg = IXGBE_TQSMR(regnum);
    699 		else
    700 			tqsmreg = IXGBE_TQSM(regnum);
    701 		reg = IXGBE_READ_REG(hw, tqsmreg);
    702 		reg &= ~(0x000000ffUL << (regshift * 8));
    703 		reg |= i << (regshift * 8);
    704 		IXGBE_WRITE_REG(hw, tqsmreg, reg);
    705 
    706 		/* Setup the HW Tx Head and Tail descriptor pointers */
    707 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
    708 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
    709 
    710 		/* Cache the tail address */
    711 		txr->tail = IXGBE_TDT(j);
    712 
    713 		txr->txr_no_space = false;
    714 
    715 		/* Disable Head Writeback */
    716 		/*
    717 		 * Note: for X550 series devices, these registers are actually
    718 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
    719 		 * fields remain the same.
    720 		 */
    721 		switch (hw->mac.type) {
    722 		case ixgbe_mac_82598EB:
    723 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
    724 			break;
    725 		default:
    726 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
    727 			break;
    728 		}
    729 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
    730 		switch (hw->mac.type) {
    731 		case ixgbe_mac_82598EB:
    732 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
    733 			break;
    734 		default:
    735 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
    736 			break;
    737 		}
    738 
    739 	}
    740 
    741 	if (hw->mac.type != ixgbe_mac_82598EB) {
    742 		u32 dmatxctl, rttdcs;
    743 
    744 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
    745 		dmatxctl |= IXGBE_DMATXCTL_TE;
    746 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
    747 		/* Disable arbiter to set MTQC */
    748 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
    749 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
    750 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    751 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
    752 		    ixgbe_get_mtqc(adapter->iov_mode));
    753 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
    754 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    755 	}
    756 
    757 	return;
    758 } /* ixgbe_initialize_transmit_units */
    759 
    760 /************************************************************************
    761  * ixgbe_attach - Device initialization routine
    762  *
    763  *   Called when the driver is being loaded.
    764  *   Identifies the type of hardware, allocates all resources
    765  *   and initializes the hardware.
    766  *
    767  *   return 0 on success, positive on failure
    768  ************************************************************************/
    769 static void
    770 ixgbe_attach(device_t parent, device_t dev, void *aux)
    771 {
    772 	struct adapter	*adapter;
    773 	struct ixgbe_hw *hw;
    774 	int		error = -1;
    775 	u32		ctrl_ext;
    776 	u16		high, low, nvmreg;
    777 	pcireg_t	id, subid;
    778 	const ixgbe_vendor_info_t *ent;
    779 	struct pci_attach_args *pa = aux;
    780 	const char *str;
    781 	char buf[256];
    782 
    783 	INIT_DEBUGOUT("ixgbe_attach: begin");
    784 
    785 	/* Allocate, clear, and link in our adapter structure */
    786 	adapter = device_private(dev);
    787 	adapter->hw.back = adapter;
    788 	adapter->dev = dev;
    789 	hw = &adapter->hw;
    790 	adapter->osdep.pc = pa->pa_pc;
    791 	adapter->osdep.tag = pa->pa_tag;
    792 	if (pci_dma64_available(pa))
    793 		adapter->osdep.dmat = pa->pa_dmat64;
    794 	else
    795 		adapter->osdep.dmat = pa->pa_dmat;
    796 	adapter->osdep.attached = false;
    797 
    798 	ent = ixgbe_lookup(pa);
    799 
    800 	KASSERT(ent != NULL);
    801 
    802 	aprint_normal(": %s, Version - %s\n",
    803 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    804 
    805 	/* Core Lock Init*/
    806 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    807 
    808 	/* Set up the timer callout */
    809 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    810 
    811 	/* Determine hardware revision */
    812 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    813 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    814 
    815 	hw->vendor_id = PCI_VENDOR(id);
    816 	hw->device_id = PCI_PRODUCT(id);
    817 	hw->revision_id =
    818 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    819 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    820 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    821 
    822 	/*
    823 	 * Make sure BUSMASTER is set
    824 	 */
    825 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    826 
    827 	/* Do base PCI setup - map BAR0 */
    828 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    829 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    830 		error = ENXIO;
    831 		goto err_out;
    832 	}
    833 
    834 	/* let hardware know driver is loaded */
    835 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    836 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    837 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    838 
    839 	/*
    840 	 * Initialize the shared code
    841 	 */
    842 	if (ixgbe_init_shared_code(hw) != 0) {
    843 		aprint_error_dev(dev, "Unable to initialize the shared code\n");
    844 		error = ENXIO;
    845 		goto err_out;
    846 	}
    847 
    848 	switch (hw->mac.type) {
    849 	case ixgbe_mac_82598EB:
    850 		str = "82598EB";
    851 		break;
    852 	case ixgbe_mac_82599EB:
    853 		str = "82599EB";
    854 		break;
    855 	case ixgbe_mac_X540:
    856 		str = "X540";
    857 		break;
    858 	case ixgbe_mac_X550:
    859 		str = "X550";
    860 		break;
    861 	case ixgbe_mac_X550EM_x:
    862 		str = "X550EM";
    863 		break;
    864 	case ixgbe_mac_X550EM_a:
    865 		str = "X550EM A";
    866 		break;
    867 	default:
    868 		str = "Unknown";
    869 		break;
    870 	}
    871 	aprint_normal_dev(dev, "device %s\n", str);
    872 
    873 	if (hw->mbx.ops.init_params)
    874 		hw->mbx.ops.init_params(hw);
    875 
    876 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
    877 
    878 	/* Pick up the 82599 settings */
    879 	if (hw->mac.type != ixgbe_mac_82598EB) {
    880 		hw->phy.smart_speed = ixgbe_smart_speed;
    881 		adapter->num_segs = IXGBE_82599_SCATTER;
    882 	} else
    883 		adapter->num_segs = IXGBE_82598_SCATTER;
    884 
    885 	/* Ensure SW/FW semaphore is free */
    886 	ixgbe_init_swfw_semaphore(hw);
    887 
    888 	hw->mac.ops.set_lan_id(hw);
    889 	ixgbe_init_device_features(adapter);
    890 
    891 	if (ixgbe_configure_interrupts(adapter)) {
    892 		error = ENXIO;
    893 		goto err_out;
    894 	}
    895 
    896 	/* Allocate multicast array memory. */
    897 	adapter->mta = malloc(sizeof(*adapter->mta) *
    898 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    899 	if (adapter->mta == NULL) {
    900 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    901 		error = ENOMEM;
    902 		goto err_out;
    903 	}
    904 
    905 	/* Enable WoL (if supported) */
    906 	ixgbe_check_wol_support(adapter);
    907 
    908 	/* Register for VLAN events */
    909 	ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
    910 
    911 	/* Verify adapter fan is still functional (if applicable) */
    912 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
    913 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    914 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
    915 	}
    916 
    917 	/* Set an initial default flow control value */
    918 	hw->fc.requested_mode = ixgbe_flow_control;
    919 
    920 	/* Sysctls for limiting the amount of work done in the taskqueues */
    921 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
    922 	    "max number of rx packets to process",
    923 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
    924 
    925 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
    926 	    "max number of tx packets to process",
    927 	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
    928 
    929 	/* Do descriptor calc and sanity checks */
    930 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    931 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    932 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    933 		adapter->num_tx_desc = DEFAULT_TXD;
    934 	} else
    935 		adapter->num_tx_desc = ixgbe_txd;
    936 
    937 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    938 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
    939 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    940 		adapter->num_rx_desc = DEFAULT_RXD;
    941 	} else
    942 		adapter->num_rx_desc = ixgbe_rxd;
    943 
    944 	/* Allocate our TX/RX Queues */
    945 	if (ixgbe_allocate_queues(adapter)) {
    946 		error = ENOMEM;
    947 		goto err_out;
    948 	}
    949 
    950 	hw->phy.reset_if_overtemp = TRUE;
    951 	error = ixgbe_reset_hw(hw);
    952 	hw->phy.reset_if_overtemp = FALSE;
    953 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    954 		/*
    955 		 * No optics in this port, set up
    956 		 * so the timer routine will probe
    957 		 * for later insertion.
    958 		 */
    959 		adapter->sfp_probe = TRUE;
    960 		error = IXGBE_SUCCESS;
    961 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    962 		aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
    963 		error = EIO;
    964 		goto err_late;
    965 	} else if (error) {
    966 		aprint_error_dev(dev, "Hardware initialization failed\n");
    967 		error = EIO;
    968 		goto err_late;
    969 	}
    970 
    971 	/* Make sure we have a good EEPROM before we read from it */
    972 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
    973 		aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
    974 		error = EIO;
    975 		goto err_late;
    976 	}
    977 
    978 	aprint_normal("%s:", device_xname(dev));
    979 	/* NVM Image Version */
    980 	high = low = 0;
    981 	switch (hw->mac.type) {
    982 	case ixgbe_mac_X540:
    983 	case ixgbe_mac_X550EM_a:
    984 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    985 		if (nvmreg == 0xffff)
    986 			break;
    987 		high = (nvmreg >> 12) & 0x0f;
    988 		low = (nvmreg >> 4) & 0xff;
    989 		id = nvmreg & 0x0f;
    990 		aprint_normal(" NVM Image Version %u.", high);
    991 		if (hw->mac.type == ixgbe_mac_X540)
    992 			str = "%x";
    993 		else
    994 			str = "%02x";
    995 		aprint_normal(str, low);
    996 		aprint_normal(" ID 0x%x,", id);
    997 		break;
    998 	case ixgbe_mac_X550EM_x:
    999 	case ixgbe_mac_X550:
   1000 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
   1001 		if (nvmreg == 0xffff)
   1002 			break;
   1003 		high = (nvmreg >> 12) & 0x0f;
   1004 		low = nvmreg & 0xff;
   1005 		aprint_normal(" NVM Image Version %u.%02x,", high, low);
   1006 		break;
   1007 	default:
   1008 		break;
   1009 	}
   1010 	hw->eeprom.nvm_image_ver_high = high;
   1011 	hw->eeprom.nvm_image_ver_low = low;
   1012 
   1013 	/* PHY firmware revision */
   1014 	switch (hw->mac.type) {
   1015 	case ixgbe_mac_X540:
   1016 	case ixgbe_mac_X550:
   1017 		hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
   1018 		if (nvmreg == 0xffff)
   1019 			break;
   1020 		high = (nvmreg >> 12) & 0x0f;
   1021 		low = (nvmreg >> 4) & 0xff;
   1022 		id = nvmreg & 0x000f;
   1023 		aprint_normal(" PHY FW Revision %u.", high);
   1024 		if (hw->mac.type == ixgbe_mac_X540)
   1025 			str = "%x";
   1026 		else
   1027 			str = "%02x";
   1028 		aprint_normal(str, low);
   1029 		aprint_normal(" ID 0x%x,", id);
   1030 		break;
   1031 	default:
   1032 		break;
   1033 	}
   1034 
   1035 	/* NVM Map version & OEM NVM Image version */
   1036 	switch (hw->mac.type) {
   1037 	case ixgbe_mac_X550:
   1038 	case ixgbe_mac_X550EM_x:
   1039 	case ixgbe_mac_X550EM_a:
   1040 		hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
   1041 		if (nvmreg != 0xffff) {
   1042 			high = (nvmreg >> 12) & 0x0f;
   1043 			low = nvmreg & 0x00ff;
   1044 			aprint_normal(" NVM Map version %u.%02x,", high, low);
   1045 		}
   1046 		hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
   1047 		if (nvmreg != 0xffff) {
   1048 			high = (nvmreg >> 12) & 0x0f;
   1049 			low = nvmreg & 0x00ff;
   1050 			aprint_verbose(" OEM NVM Image version %u.%02x,", high,
   1051 			    low);
   1052 		}
   1053 		break;
   1054 	default:
   1055 		break;
   1056 	}
   1057 
   1058 	/* Print the ETrackID */
   1059 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
   1060 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
   1061 	aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
   1062 
   1063 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   1064 		error = ixgbe_allocate_msix(adapter, pa);
   1065 		if (error) {
   1066 			/* Free allocated queue structures first */
   1067 			ixgbe_free_transmit_structures(adapter);
   1068 			ixgbe_free_receive_structures(adapter);
   1069 			free(adapter->queues, M_DEVBUF);
   1070 
   1071 			/* Fallback to legacy interrupt */
   1072 			adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
   1073 			if (adapter->feat_cap & IXGBE_FEATURE_MSI)
   1074 				adapter->feat_en |= IXGBE_FEATURE_MSI;
   1075 			adapter->num_queues = 1;
   1076 
   1077 			/* Allocate our TX/RX Queues again */
   1078 			if (ixgbe_allocate_queues(adapter)) {
   1079 				error = ENOMEM;
   1080 				goto err_out;
   1081 			}
   1082 		}
   1083 	}
   1084 	/* Recovery mode */
   1085 	switch (adapter->hw.mac.type) {
   1086 	case ixgbe_mac_X550:
   1087 	case ixgbe_mac_X550EM_x:
   1088 	case ixgbe_mac_X550EM_a:
   1089 		/* >= 2.00 */
   1090 		if (hw->eeprom.nvm_image_ver_high >= 2) {
   1091 			adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
   1092 			adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
   1093 		}
   1094 		break;
   1095 	default:
   1096 		break;
   1097 	}
   1098 
   1099 	if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
   1100 		error = ixgbe_allocate_legacy(adapter, pa);
   1101 	if (error)
   1102 		goto err_late;
   1103 
   1104 	/* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
   1105 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   1106 	    ixgbe_handle_link, adapter);
   1107 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1108 	    ixgbe_handle_mod, adapter);
   1109 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1110 	    ixgbe_handle_msf, adapter);
   1111 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1112 	    ixgbe_handle_phy, adapter);
   1113 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   1114 		adapter->fdir_si =
   1115 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1116 			ixgbe_reinit_fdir, adapter);
   1117 	if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
   1118 	    || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
   1119 	    || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
   1120 		&& (adapter->fdir_si == NULL))) {
   1121 		aprint_error_dev(dev,
   1122 		    "could not establish software interrupts ()\n");
   1123 		goto err_out;
   1124 	}
   1125 
   1126 	error = ixgbe_start_hw(hw);
   1127 	switch (error) {
   1128 	case IXGBE_ERR_EEPROM_VERSION:
   1129 		aprint_error_dev(dev, "This device is a pre-production adapter/"
   1130 		    "LOM.  Please be aware there may be issues associated "
   1131 		    "with your hardware.\nIf you are experiencing problems "
   1132 		    "please contact your Intel or hardware representative "
   1133 		    "who provided you with this hardware.\n");
   1134 		break;
   1135 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
   1136 		aprint_error_dev(dev, "Unsupported SFP+ Module\n");
   1137 		error = EIO;
   1138 		goto err_late;
   1139 	case IXGBE_ERR_SFP_NOT_PRESENT:
   1140 		aprint_error_dev(dev, "No SFP+ Module found\n");
   1141 		/* falls thru */
   1142 	default:
   1143 		break;
   1144 	}
   1145 
   1146 	/* Setup OS specific network interface */
   1147 	if (ixgbe_setup_interface(dev, adapter) != 0)
   1148 		goto err_late;
   1149 
   1150 	/*
   1151 	 *  Print PHY ID only for copper PHY. On device which has SFP(+) cage
   1152 	 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
   1153 	 */
   1154 	if (hw->phy.media_type == ixgbe_media_type_copper) {
   1155 		uint16_t id1, id2;
   1156 		int oui, model, rev;
   1157 		const char *descr;
   1158 
   1159 		id1 = hw->phy.id >> 16;
   1160 		id2 = hw->phy.id & 0xffff;
   1161 		oui = MII_OUI(id1, id2);
   1162 		model = MII_MODEL(id2);
   1163 		rev = MII_REV(id2);
   1164 		if ((descr = mii_get_descr(oui, model)) != NULL)
   1165 			aprint_normal_dev(dev,
   1166 			    "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
   1167 			    descr, oui, model, rev);
   1168 		else
   1169 			aprint_normal_dev(dev,
   1170 			    "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
   1171 			    oui, model, rev);
   1172 	}
   1173 
   1174 	/* Enable the optics for 82599 SFP+ fiber */
   1175 	ixgbe_enable_tx_laser(hw);
   1176 
   1177 	/* Enable EEE power saving */
   1178 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   1179 		hw->mac.ops.setup_eee(hw,
   1180 		    adapter->feat_en & IXGBE_FEATURE_EEE);
   1181 
   1182 	/* Enable power to the phy. */
   1183 	ixgbe_set_phy_power(hw, TRUE);
   1184 
   1185 	/* Initialize statistics */
   1186 	ixgbe_update_stats_counters(adapter);
   1187 
   1188 	/* Check PCIE slot type/speed/width */
   1189 	ixgbe_get_slot_info(adapter);
   1190 
   1191 	/*
   1192 	 * Do time init and sysctl init here, but
   1193 	 * only on the first port of a bypass adapter.
   1194 	 */
   1195 	ixgbe_bypass_init(adapter);
   1196 
   1197 	/* Set an initial dmac value */
   1198 	adapter->dmac = 0;
   1199 	/* Set initial advertised speeds (if applicable) */
   1200 	adapter->advertise = ixgbe_get_advertise(adapter);
   1201 
   1202 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   1203 		ixgbe_define_iov_schemas(dev, &error);
   1204 
   1205 	/* Add sysctls */
   1206 	ixgbe_add_device_sysctls(adapter);
   1207 	ixgbe_add_hw_stats(adapter);
   1208 
   1209 	/* For Netmap */
   1210 	adapter->init_locked = ixgbe_init_locked;
   1211 	adapter->stop_locked = ixgbe_stop;
   1212 
   1213 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1214 		ixgbe_netmap_attach(adapter);
   1215 
   1216 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
   1217 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
   1218 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
   1219 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
   1220 
   1221 	if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
   1222 		pmf_class_network_register(dev, adapter->ifp);
   1223 	else
   1224 		aprint_error_dev(dev, "couldn't establish power handler\n");
   1225 
   1226 	/* Init recovery mode timer and state variable */
   1227 	if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
   1228 		adapter->recovery_mode = 0;
   1229 
   1230 		/* Set up the timer callout */
   1231 		callout_init(&adapter->recovery_mode_timer,
   1232 		    IXGBE_CALLOUT_FLAGS);
   1233 
   1234 		/* Start the task */
   1235 		callout_reset(&adapter->recovery_mode_timer, hz,
   1236 		    ixgbe_recovery_mode_timer, adapter);
   1237 	}
   1238 
   1239 	INIT_DEBUGOUT("ixgbe_attach: end");
   1240 	adapter->osdep.attached = true;
   1241 
   1242 	return;
   1243 
   1244 err_late:
   1245 	ixgbe_free_transmit_structures(adapter);
   1246 	ixgbe_free_receive_structures(adapter);
   1247 	free(adapter->queues, M_DEVBUF);
   1248 err_out:
   1249 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   1250 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   1251 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   1252 	ixgbe_free_softint(adapter);
   1253 	ixgbe_free_pci_resources(adapter);
   1254 	if (adapter->mta != NULL)
   1255 		free(adapter->mta, M_DEVBUF);
   1256 	IXGBE_CORE_LOCK_DESTROY(adapter);
   1257 
   1258 	return;
   1259 } /* ixgbe_attach */
   1260 
   1261 /************************************************************************
   1262  * ixgbe_check_wol_support
   1263  *
   1264  *   Checks whether the adapter's ports are capable of
   1265  *   Wake On LAN by reading the adapter's NVM.
   1266  *
   1267  *   Sets each port's hw->wol_enabled value depending
   1268  *   on the value read here.
   1269  ************************************************************************/
   1270 static void
   1271 ixgbe_check_wol_support(struct adapter *adapter)
   1272 {
   1273 	struct ixgbe_hw *hw = &adapter->hw;
   1274 	u16		dev_caps = 0;
   1275 
   1276 	/* Find out WoL support for port */
   1277 	adapter->wol_support = hw->wol_enabled = 0;
   1278 	ixgbe_get_device_caps(hw, &dev_caps);
   1279 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
   1280 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
   1281 	     hw->bus.func == 0))
   1282 		adapter->wol_support = hw->wol_enabled = 1;
   1283 
   1284 	/* Save initial wake up filter configuration */
   1285 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
   1286 
   1287 	return;
   1288 } /* ixgbe_check_wol_support */
   1289 
   1290 /************************************************************************
   1291  * ixgbe_setup_interface
   1292  *
   1293  *   Setup networking device structure and register an interface.
   1294  ************************************************************************/
   1295 static int
   1296 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   1297 {
   1298 	struct ethercom *ec = &adapter->osdep.ec;
   1299 	struct ifnet   *ifp;
   1300 	int rv;
   1301 
   1302 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   1303 
   1304 	ifp = adapter->ifp = &ec->ec_if;
   1305 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1306 	ifp->if_baudrate = IF_Gbps(10);
   1307 	ifp->if_init = ixgbe_init;
   1308 	ifp->if_stop = ixgbe_ifstop;
   1309 	ifp->if_softc = adapter;
   1310 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1311 #ifdef IXGBE_MPSAFE
   1312 	ifp->if_extflags = IFEF_MPSAFE;
   1313 #endif
   1314 	ifp->if_ioctl = ixgbe_ioctl;
   1315 #if __FreeBSD_version >= 1100045
   1316 	/* TSO parameters */
   1317 	ifp->if_hw_tsomax = 65518;
   1318 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
   1319 	ifp->if_hw_tsomaxsegsize = 2048;
   1320 #endif
   1321 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1322 #if 0
   1323 		ixgbe_start_locked = ixgbe_legacy_start_locked;
   1324 #endif
   1325 	} else {
   1326 		ifp->if_transmit = ixgbe_mq_start;
   1327 #if 0
   1328 		ixgbe_start_locked = ixgbe_mq_start_locked;
   1329 #endif
   1330 	}
   1331 	ifp->if_start = ixgbe_legacy_start;
   1332 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1333 	IFQ_SET_READY(&ifp->if_snd);
   1334 
   1335 	rv = if_initialize(ifp);
   1336 	if (rv != 0) {
   1337 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1338 		return rv;
   1339 	}
   1340 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1341 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1342 	/*
   1343 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1344 	 * used.
   1345 	 */
   1346 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   1347 
   1348 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1349 
   1350 	/*
   1351 	 * Tell the upper layer(s) we support long frames.
   1352 	 */
   1353 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1354 
   1355 	/* Set capability flags */
   1356 	ifp->if_capabilities |= IFCAP_RXCSUM
   1357 			     |	IFCAP_TXCSUM
   1358 			     |	IFCAP_TSOv4
   1359 			     |	IFCAP_TSOv6;
   1360 	ifp->if_capenable = 0;
   1361 
   1362 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1363 			    |  ETHERCAP_VLAN_HWCSUM
   1364 			    |  ETHERCAP_JUMBO_MTU
   1365 			    |  ETHERCAP_VLAN_MTU;
   1366 
   1367 	/* Enable the above capabilities by default */
   1368 	ec->ec_capenable = ec->ec_capabilities;
   1369 
   1370 	/*
   1371 	 * Don't turn this on by default, if vlans are
   1372 	 * created on another pseudo device (eg. lagg)
   1373 	 * then vlan events are not passed thru, breaking
   1374 	 * operation, but with HW FILTER off it works. If
   1375 	 * using vlans directly on the ixgbe driver you can
   1376 	 * enable this and get full hardware tag filtering.
   1377 	 */
   1378 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1379 
   1380 	/*
   1381 	 * Specify the media types supported by this adapter and register
   1382 	 * callbacks to update media and link information
   1383 	 */
   1384 	ec->ec_ifmedia = &adapter->media;
   1385 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   1386 	    ixgbe_media_status);
   1387 
   1388 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
   1389 	ixgbe_add_media_types(adapter);
   1390 
   1391 	/* Set autoselect media by default */
   1392 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1393 
   1394 	if_register(ifp);
   1395 
   1396 	return (0);
   1397 } /* ixgbe_setup_interface */
   1398 
   1399 /************************************************************************
   1400  * ixgbe_add_media_types
   1401  ************************************************************************/
   1402 static void
   1403 ixgbe_add_media_types(struct adapter *adapter)
   1404 {
   1405 	struct ixgbe_hw *hw = &adapter->hw;
   1406 	u64		layer;
   1407 
   1408 	layer = adapter->phy_layer;
   1409 
   1410 #define	ADD(mm, dd)							\
   1411 	ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
   1412 
   1413 	ADD(IFM_NONE, 0);
   1414 
   1415 	/* Media types with matching NetBSD media defines */
   1416 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
   1417 		ADD(IFM_10G_T | IFM_FDX, 0);
   1418 	}
   1419 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
   1420 		ADD(IFM_1000_T | IFM_FDX, 0);
   1421 	}
   1422 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
   1423 		ADD(IFM_100_TX | IFM_FDX, 0);
   1424 	}
   1425 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
   1426 		ADD(IFM_10_T | IFM_FDX, 0);
   1427 	}
   1428 
   1429 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   1430 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
   1431 		ADD(IFM_10G_TWINAX | IFM_FDX, 0);
   1432 	}
   1433 
   1434 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
   1435 		ADD(IFM_10G_LR | IFM_FDX, 0);
   1436 		if (hw->phy.multispeed_fiber) {
   1437 			ADD(IFM_1000_LX | IFM_FDX, 0);
   1438 		}
   1439 	}
   1440 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
   1441 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1442 		if (hw->phy.multispeed_fiber) {
   1443 			ADD(IFM_1000_SX | IFM_FDX, 0);
   1444 		}
   1445 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
   1446 		ADD(IFM_1000_SX | IFM_FDX, 0);
   1447 	}
   1448 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
   1449 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1450 	}
   1451 
   1452 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1453 		ADD(IFM_10G_KR | IFM_FDX, 0);
   1454 	}
   1455 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1456 		ADD(IFM_10G_KX4 | IFM_FDX, 0);
   1457 	}
   1458 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
   1459 		ADD(IFM_1000_KX | IFM_FDX, 0);
   1460 	}
   1461 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
   1462 		ADD(IFM_2500_KX | IFM_FDX, 0);
   1463 	}
   1464 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
   1465 		ADD(IFM_2500_T | IFM_FDX, 0);
   1466 	}
   1467 	if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
   1468 		ADD(IFM_5000_T | IFM_FDX, 0);
   1469 	}
   1470 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
   1471 		ADD(IFM_1000_BX10 | IFM_FDX, 0);
   1472 	/* XXX no ifmedia_set? */
   1473 
   1474 	ADD(IFM_AUTO, 0);
   1475 
   1476 #undef ADD
   1477 } /* ixgbe_add_media_types */
   1478 
   1479 /************************************************************************
   1480  * ixgbe_is_sfp
   1481  ************************************************************************/
   1482 static inline bool
   1483 ixgbe_is_sfp(struct ixgbe_hw *hw)
   1484 {
   1485 	switch (hw->mac.type) {
   1486 	case ixgbe_mac_82598EB:
   1487 		if (hw->phy.type == ixgbe_phy_nl)
   1488 			return (TRUE);
   1489 		return (FALSE);
   1490 	case ixgbe_mac_82599EB:
   1491 	case ixgbe_mac_X550EM_x:
   1492 	case ixgbe_mac_X550EM_a:
   1493 		switch (hw->mac.ops.get_media_type(hw)) {
   1494 		case ixgbe_media_type_fiber:
   1495 		case ixgbe_media_type_fiber_qsfp:
   1496 			return (TRUE);
   1497 		default:
   1498 			return (FALSE);
   1499 		}
   1500 	default:
   1501 		return (FALSE);
   1502 	}
   1503 } /* ixgbe_is_sfp */
   1504 
   1505 /************************************************************************
   1506  * ixgbe_config_link
   1507  ************************************************************************/
   1508 static void
   1509 ixgbe_config_link(struct adapter *adapter)
   1510 {
   1511 	struct ixgbe_hw *hw = &adapter->hw;
   1512 	u32		autoneg, err = 0;
   1513 	bool		sfp, negotiate = false;
   1514 
   1515 	sfp = ixgbe_is_sfp(hw);
   1516 
   1517 	if (sfp) {
   1518 		if (hw->phy.multispeed_fiber) {
   1519 			ixgbe_enable_tx_laser(hw);
   1520 			kpreempt_disable();
   1521 			softint_schedule(adapter->msf_si);
   1522 			kpreempt_enable();
   1523 		}
   1524 		kpreempt_disable();
   1525 		softint_schedule(adapter->mod_si);
   1526 		kpreempt_enable();
   1527 	} else {
   1528 		struct ifmedia	*ifm = &adapter->media;
   1529 
   1530 		if (hw->mac.ops.check_link)
   1531 			err = ixgbe_check_link(hw, &adapter->link_speed,
   1532 			    &adapter->link_up, FALSE);
   1533 		if (err)
   1534 			return;
   1535 
   1536 		/*
   1537 		 * Check if it's the first call. If it's the first call,
   1538 		 * get value for auto negotiation.
   1539 		 */
   1540 		autoneg = hw->phy.autoneg_advertised;
   1541 		if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
   1542 		    && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
   1543 			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
   1544 			    &negotiate);
   1545 		if (err)
   1546 			return;
   1547 		if (hw->mac.ops.setup_link)
   1548 			err = hw->mac.ops.setup_link(hw, autoneg,
   1549 			    adapter->link_up);
   1550 	}
   1551 
   1552 } /* ixgbe_config_link */
   1553 
   1554 /************************************************************************
   1555  * ixgbe_update_stats_counters - Update board statistics counters.
   1556  ************************************************************************/
   1557 static void
   1558 ixgbe_update_stats_counters(struct adapter *adapter)
   1559 {
   1560 	struct ifnet	      *ifp = adapter->ifp;
   1561 	struct ixgbe_hw	      *hw = &adapter->hw;
   1562 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1563 	u32		      missed_rx = 0, bprc, lxon, lxoff, total;
   1564 	u64		      total_missed_rx = 0;
   1565 	uint64_t	      crcerrs, rlec;
   1566 	unsigned int	      queue_counters;
   1567 	int		      i;
   1568 
   1569 	crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   1570 	stats->crcerrs.ev_count += crcerrs;
   1571 	stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   1572 	stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   1573 	stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   1574 	if (hw->mac.type >= ixgbe_mac_X550)
   1575 		stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
   1576 
   1577 	/* 16 registers exist */
   1578 	queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
   1579 	for (i = 0; i < queue_counters; i++) {
   1580 		stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   1581 		stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   1582 		if (hw->mac.type >= ixgbe_mac_82599EB) {
   1583 			stats->qprdc[i].ev_count
   1584 			    += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   1585 		}
   1586 	}
   1587 
   1588 	/* 8 registers exist */
   1589 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   1590 		uint32_t mp;
   1591 
   1592 		/* MPC */
   1593 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   1594 		/* global total per queue */
   1595 		stats->mpc[i].ev_count += mp;
   1596 		/* running comprehensive total for stats display */
   1597 		total_missed_rx += mp;
   1598 
   1599 		if (hw->mac.type == ixgbe_mac_82598EB)
   1600 			stats->rnbc[i].ev_count
   1601 			    += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   1602 
   1603 		stats->pxontxc[i].ev_count
   1604 		    += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
   1605 		stats->pxofftxc[i].ev_count
   1606 		    += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
   1607 		if (hw->mac.type >= ixgbe_mac_82599EB) {
   1608 			stats->pxonrxc[i].ev_count
   1609 			    += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
   1610 			stats->pxoffrxc[i].ev_count
   1611 			    += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
   1612 			stats->pxon2offc[i].ev_count
   1613 			    += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
   1614 		} else {
   1615 			stats->pxonrxc[i].ev_count
   1616 			    += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
   1617 			stats->pxoffrxc[i].ev_count
   1618 			    += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
   1619 		}
   1620 	}
   1621 	stats->mpctotal.ev_count += total_missed_rx;
   1622 
   1623 	/* Document says M[LR]FC are valid when link is up and 10Gbps */
   1624 	if ((adapter->link_active == LINK_STATE_UP)
   1625 	    && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
   1626 		stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   1627 		stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   1628 	}
   1629 	rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
   1630 	stats->rlec.ev_count += rlec;
   1631 
   1632 	/* Hardware workaround, gprc counts missed packets */
   1633 	stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   1634 
   1635 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   1636 	stats->lxontxc.ev_count += lxon;
   1637 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   1638 	stats->lxofftxc.ev_count += lxoff;
   1639 	total = lxon + lxoff;
   1640 
   1641 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1642 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   1643 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   1644 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   1645 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   1646 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   1647 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   1648 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   1649 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   1650 	} else {
   1651 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   1652 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   1653 		/* 82598 only has a counter in the high register */
   1654 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   1655 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   1656 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   1657 	}
   1658 
   1659 	/*
   1660 	 * Workaround: mprc hardware is incorrectly counting
   1661 	 * broadcasts, so for now we subtract those.
   1662 	 */
   1663 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   1664 	stats->bprc.ev_count += bprc;
   1665 	stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
   1666 	    - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   1667 
   1668 	stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   1669 	stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   1670 	stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   1671 	stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   1672 	stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   1673 	stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   1674 
   1675 	stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   1676 	stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   1677 	stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   1678 
   1679 	stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   1680 	stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   1681 	stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   1682 	stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   1683 	stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   1684 	stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   1685 	stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   1686 	stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   1687 	stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   1688 	stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   1689 	stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   1690 	stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   1691 	stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   1692 	stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   1693 	stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   1694 	stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   1695 	stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   1696 	stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   1697 	/* Only read FCOE on 82599 */
   1698 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1699 		stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   1700 		stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   1701 		stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   1702 		stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   1703 		stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   1704 	}
   1705 
   1706 	/* Fill out the OS statistics structure */
   1707 	/*
   1708 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   1709 	 * adapter->stats counters. It's required to make ifconfig -z
   1710 	 * (SOICZIFDATA) work.
   1711 	 */
   1712 	ifp->if_collisions = 0;
   1713 
   1714 	/* Rx Errors */
   1715 	ifp->if_iqdrops += total_missed_rx;
   1716 	ifp->if_ierrors += crcerrs + rlec;
   1717 } /* ixgbe_update_stats_counters */
   1718 
   1719 /************************************************************************
   1720  * ixgbe_add_hw_stats
   1721  *
   1722  *   Add sysctl variables, one per statistic, to the system.
   1723  ************************************************************************/
   1724 static void
   1725 ixgbe_add_hw_stats(struct adapter *adapter)
   1726 {
   1727 	device_t dev = adapter->dev;
   1728 	const struct sysctlnode *rnode, *cnode;
   1729 	struct sysctllog **log = &adapter->sysctllog;
   1730 	struct tx_ring *txr = adapter->tx_rings;
   1731 	struct rx_ring *rxr = adapter->rx_rings;
   1732 	struct ixgbe_hw *hw = &adapter->hw;
   1733 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1734 	const char *xname = device_xname(dev);
   1735 	int i;
   1736 
   1737 	/* Driver Statistics */
   1738 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   1739 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   1740 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   1741 	    NULL, xname, "m_defrag() failed");
   1742 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   1743 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   1744 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   1745 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   1746 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   1747 	    NULL, xname, "Driver tx dma hard fail other");
   1748 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   1749 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   1750 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   1751 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   1752 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   1753 	    NULL, xname, "Watchdog timeouts");
   1754 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   1755 	    NULL, xname, "TSO errors");
   1756 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   1757 	    NULL, xname, "Link MSI-X IRQ Handled");
   1758 	evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
   1759 	    NULL, xname, "Link softint");
   1760 	evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
   1761 	    NULL, xname, "module softint");
   1762 	evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
   1763 	    NULL, xname, "multimode softint");
   1764 	evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
   1765 	    NULL, xname, "external PHY softint");
   1766 
   1767 	/* Max number of traffic class is 8 */
   1768 	KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
   1769 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   1770 		snprintf(adapter->tcs[i].evnamebuf,
   1771 		    sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
   1772 		    xname, i);
   1773 		if (i < __arraycount(stats->mpc)) {
   1774 			evcnt_attach_dynamic(&stats->mpc[i],
   1775 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1776 			    "RX Missed Packet Count");
   1777 			if (hw->mac.type == ixgbe_mac_82598EB)
   1778 				evcnt_attach_dynamic(&stats->rnbc[i],
   1779 				    EVCNT_TYPE_MISC, NULL,
   1780 				    adapter->tcs[i].evnamebuf,
   1781 				    "Receive No Buffers");
   1782 		}
   1783 		if (i < __arraycount(stats->pxontxc)) {
   1784 			evcnt_attach_dynamic(&stats->pxontxc[i],
   1785 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1786 			    "pxontxc");
   1787 			evcnt_attach_dynamic(&stats->pxonrxc[i],
   1788 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1789 			    "pxonrxc");
   1790 			evcnt_attach_dynamic(&stats->pxofftxc[i],
   1791 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1792 			    "pxofftxc");
   1793 			evcnt_attach_dynamic(&stats->pxoffrxc[i],
   1794 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1795 			    "pxoffrxc");
   1796 			if (hw->mac.type >= ixgbe_mac_82599EB)
   1797 				evcnt_attach_dynamic(&stats->pxon2offc[i],
   1798 				    EVCNT_TYPE_MISC, NULL,
   1799 				    adapter->tcs[i].evnamebuf,
   1800 			    "pxon2offc");
   1801 		}
   1802 	}
   1803 
   1804 	for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1805 #ifdef LRO
   1806 		struct lro_ctrl *lro = &rxr->lro;
   1807 #endif /* LRO */
   1808 
   1809 		snprintf(adapter->queues[i].evnamebuf,
   1810 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   1811 		    xname, i);
   1812 		snprintf(adapter->queues[i].namebuf,
   1813 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   1814 
   1815 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   1816 			aprint_error_dev(dev, "could not create sysctl root\n");
   1817 			break;
   1818 		}
   1819 
   1820 		if (sysctl_createv(log, 0, &rnode, &rnode,
   1821 		    0, CTLTYPE_NODE,
   1822 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   1823 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   1824 			break;
   1825 
   1826 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1827 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   1828 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   1829 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   1830 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   1831 			break;
   1832 
   1833 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1834 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1835 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   1836 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   1837 		    0, CTL_CREATE, CTL_EOL) != 0)
   1838 			break;
   1839 
   1840 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1841 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1842 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   1843 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   1844 		    0, CTL_CREATE, CTL_EOL) != 0)
   1845 			break;
   1846 
   1847 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   1848 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   1849 		evcnt_attach_dynamic(&adapter->queues[i].handleq,
   1850 		    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1851 		    "Handled queue in softint");
   1852 		evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
   1853 		    NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
   1854 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   1855 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   1856 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   1857 		    NULL, adapter->queues[i].evnamebuf,
   1858 		    "Queue No Descriptor Available");
   1859 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   1860 		    NULL, adapter->queues[i].evnamebuf,
   1861 		    "Queue Packets Transmitted");
   1862 #ifndef IXGBE_LEGACY_TX
   1863 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   1864 		    NULL, adapter->queues[i].evnamebuf,
   1865 		    "Packets dropped in pcq");
   1866 #endif
   1867 
   1868 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1869 		    CTLFLAG_READONLY,
   1870 		    CTLTYPE_INT,
   1871 		    "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
   1872 			ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
   1873 		    CTL_CREATE, CTL_EOL) != 0)
   1874 			break;
   1875 
   1876 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1877 		    CTLFLAG_READONLY,
   1878 		    CTLTYPE_INT,
   1879 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   1880 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   1881 		    CTL_CREATE, CTL_EOL) != 0)
   1882 			break;
   1883 
   1884 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1885 		    CTLFLAG_READONLY,
   1886 		    CTLTYPE_INT,
   1887 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   1888 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   1889 		    CTL_CREATE, CTL_EOL) != 0)
   1890 			break;
   1891 
   1892 		if (i < __arraycount(stats->qprc)) {
   1893 			evcnt_attach_dynamic(&stats->qprc[i],
   1894 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1895 			    "qprc");
   1896 			evcnt_attach_dynamic(&stats->qptc[i],
   1897 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1898 			    "qptc");
   1899 			evcnt_attach_dynamic(&stats->qbrc[i],
   1900 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1901 			    "qbrc");
   1902 			evcnt_attach_dynamic(&stats->qbtc[i],
   1903 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1904 			    "qbtc");
   1905 			if (hw->mac.type >= ixgbe_mac_82599EB)
   1906 				evcnt_attach_dynamic(&stats->qprdc[i],
   1907 				    EVCNT_TYPE_MISC, NULL,
   1908 				    adapter->queues[i].evnamebuf, "qprdc");
   1909 		}
   1910 
   1911 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   1912 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   1913 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   1914 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   1915 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   1916 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   1917 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   1918 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   1919 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   1920 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   1921 #ifdef LRO
   1922 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   1923 				CTLFLAG_RD, &lro->lro_queued, 0,
   1924 				"LRO Queued");
   1925 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   1926 				CTLFLAG_RD, &lro->lro_flushed, 0,
   1927 				"LRO Flushed");
   1928 #endif /* LRO */
   1929 	}
   1930 
   1931 	/* MAC stats get their own sub node */
   1932 
   1933 	snprintf(stats->namebuf,
   1934 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   1935 
   1936 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   1937 	    stats->namebuf, "rx csum offload - IP");
   1938 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   1939 	    stats->namebuf, "rx csum offload - L4");
   1940 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   1941 	    stats->namebuf, "rx csum offload - IP bad");
   1942 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   1943 	    stats->namebuf, "rx csum offload - L4 bad");
   1944 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   1945 	    stats->namebuf, "Interrupt conditions zero");
   1946 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   1947 	    stats->namebuf, "Legacy interrupts");
   1948 
   1949 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   1950 	    stats->namebuf, "CRC Errors");
   1951 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   1952 	    stats->namebuf, "Illegal Byte Errors");
   1953 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   1954 	    stats->namebuf, "Byte Errors");
   1955 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   1956 	    stats->namebuf, "MAC Short Packets Discarded");
   1957 	if (hw->mac.type >= ixgbe_mac_X550)
   1958 		evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
   1959 		    stats->namebuf, "Bad SFD");
   1960 	evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
   1961 	    stats->namebuf, "Total Packets Missed");
   1962 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   1963 	    stats->namebuf, "MAC Local Faults");
   1964 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   1965 	    stats->namebuf, "MAC Remote Faults");
   1966 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   1967 	    stats->namebuf, "Receive Length Errors");
   1968 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   1969 	    stats->namebuf, "Link XON Transmitted");
   1970 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   1971 	    stats->namebuf, "Link XON Received");
   1972 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   1973 	    stats->namebuf, "Link XOFF Transmitted");
   1974 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   1975 	    stats->namebuf, "Link XOFF Received");
   1976 
   1977 	/* Packet Reception Stats */
   1978 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   1979 	    stats->namebuf, "Total Octets Received");
   1980 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   1981 	    stats->namebuf, "Good Octets Received");
   1982 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   1983 	    stats->namebuf, "Total Packets Received");
   1984 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   1985 	    stats->namebuf, "Good Packets Received");
   1986 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   1987 	    stats->namebuf, "Multicast Packets Received");
   1988 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   1989 	    stats->namebuf, "Broadcast Packets Received");
   1990 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   1991 	    stats->namebuf, "64 byte frames received ");
   1992 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   1993 	    stats->namebuf, "65-127 byte frames received");
   1994 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   1995 	    stats->namebuf, "128-255 byte frames received");
   1996 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   1997 	    stats->namebuf, "256-511 byte frames received");
   1998 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   1999 	    stats->namebuf, "512-1023 byte frames received");
   2000 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   2001 	    stats->namebuf, "1023-1522 byte frames received");
   2002 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   2003 	    stats->namebuf, "Receive Undersized");
   2004 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   2005 	    stats->namebuf, "Fragmented Packets Received ");
   2006 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   2007 	    stats->namebuf, "Oversized Packets Received");
   2008 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   2009 	    stats->namebuf, "Received Jabber");
   2010 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   2011 	    stats->namebuf, "Management Packets Received");
   2012 	evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
   2013 	    stats->namebuf, "Management Packets Dropped");
   2014 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   2015 	    stats->namebuf, "Checksum Errors");
   2016 
   2017 	/* Packet Transmission Stats */
   2018 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   2019 	    stats->namebuf, "Good Octets Transmitted");
   2020 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   2021 	    stats->namebuf, "Total Packets Transmitted");
   2022 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   2023 	    stats->namebuf, "Good Packets Transmitted");
   2024 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   2025 	    stats->namebuf, "Broadcast Packets Transmitted");
   2026 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   2027 	    stats->namebuf, "Multicast Packets Transmitted");
   2028 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   2029 	    stats->namebuf, "Management Packets Transmitted");
   2030 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   2031 	    stats->namebuf, "64 byte frames transmitted ");
   2032 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   2033 	    stats->namebuf, "65-127 byte frames transmitted");
   2034 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   2035 	    stats->namebuf, "128-255 byte frames transmitted");
   2036 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   2037 	    stats->namebuf, "256-511 byte frames transmitted");
   2038 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   2039 	    stats->namebuf, "512-1023 byte frames transmitted");
   2040 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   2041 	    stats->namebuf, "1024-1522 byte frames transmitted");
   2042 } /* ixgbe_add_hw_stats */
   2043 
   2044 static void
   2045 ixgbe_clear_evcnt(struct adapter *adapter)
   2046 {
   2047 	struct tx_ring *txr = adapter->tx_rings;
   2048 	struct rx_ring *rxr = adapter->rx_rings;
   2049 	struct ixgbe_hw *hw = &adapter->hw;
   2050 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   2051 	int i;
   2052 
   2053 	adapter->efbig_tx_dma_setup.ev_count = 0;
   2054 	adapter->mbuf_defrag_failed.ev_count = 0;
   2055 	adapter->efbig2_tx_dma_setup.ev_count = 0;
   2056 	adapter->einval_tx_dma_setup.ev_count = 0;
   2057 	adapter->other_tx_dma_setup.ev_count = 0;
   2058 	adapter->eagain_tx_dma_setup.ev_count = 0;
   2059 	adapter->enomem_tx_dma_setup.ev_count = 0;
   2060 	adapter->tso_err.ev_count = 0;
   2061 	adapter->watchdog_events.ev_count = 0;
   2062 	adapter->link_irq.ev_count = 0;
   2063 	adapter->link_sicount.ev_count = 0;
   2064 	adapter->mod_sicount.ev_count = 0;
   2065 	adapter->msf_sicount.ev_count = 0;
   2066 	adapter->phy_sicount.ev_count = 0;
   2067 
   2068 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   2069 		if (i < __arraycount(stats->mpc)) {
   2070 			stats->mpc[i].ev_count = 0;
   2071 			if (hw->mac.type == ixgbe_mac_82598EB)
   2072 				stats->rnbc[i].ev_count = 0;
   2073 		}
   2074 		if (i < __arraycount(stats->pxontxc)) {
   2075 			stats->pxontxc[i].ev_count = 0;
   2076 			stats->pxonrxc[i].ev_count = 0;
   2077 			stats->pxofftxc[i].ev_count = 0;
   2078 			stats->pxoffrxc[i].ev_count = 0;
   2079 			if (hw->mac.type >= ixgbe_mac_82599EB)
   2080 				stats->pxon2offc[i].ev_count = 0;
   2081 		}
   2082 	}
   2083 
   2084 	txr = adapter->tx_rings;
   2085 	for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2086 		adapter->queues[i].irqs.ev_count = 0;
   2087 		adapter->queues[i].handleq.ev_count = 0;
   2088 		adapter->queues[i].req.ev_count = 0;
   2089 		txr->no_desc_avail.ev_count = 0;
   2090 		txr->total_packets.ev_count = 0;
   2091 		txr->tso_tx.ev_count = 0;
   2092 #ifndef IXGBE_LEGACY_TX
   2093 		txr->pcq_drops.ev_count = 0;
   2094 #endif
   2095 		txr->q_efbig_tx_dma_setup = 0;
   2096 		txr->q_mbuf_defrag_failed = 0;
   2097 		txr->q_efbig2_tx_dma_setup = 0;
   2098 		txr->q_einval_tx_dma_setup = 0;
   2099 		txr->q_other_tx_dma_setup = 0;
   2100 		txr->q_eagain_tx_dma_setup = 0;
   2101 		txr->q_enomem_tx_dma_setup = 0;
   2102 		txr->q_tso_err = 0;
   2103 
   2104 		if (i < __arraycount(stats->qprc)) {
   2105 			stats->qprc[i].ev_count = 0;
   2106 			stats->qptc[i].ev_count = 0;
   2107 			stats->qbrc[i].ev_count = 0;
   2108 			stats->qbtc[i].ev_count = 0;
   2109 			if (hw->mac.type >= ixgbe_mac_82599EB)
   2110 				stats->qprdc[i].ev_count = 0;
   2111 		}
   2112 
   2113 		rxr->rx_packets.ev_count = 0;
   2114 		rxr->rx_bytes.ev_count = 0;
   2115 		rxr->rx_copies.ev_count = 0;
   2116 		rxr->no_jmbuf.ev_count = 0;
   2117 		rxr->rx_discarded.ev_count = 0;
   2118 	}
   2119 	stats->ipcs.ev_count = 0;
   2120 	stats->l4cs.ev_count = 0;
   2121 	stats->ipcs_bad.ev_count = 0;
   2122 	stats->l4cs_bad.ev_count = 0;
   2123 	stats->intzero.ev_count = 0;
   2124 	stats->legint.ev_count = 0;
   2125 	stats->crcerrs.ev_count = 0;
   2126 	stats->illerrc.ev_count = 0;
   2127 	stats->errbc.ev_count = 0;
   2128 	stats->mspdc.ev_count = 0;
   2129 	if (hw->mac.type >= ixgbe_mac_X550)
   2130 		stats->mbsdc.ev_count = 0;
   2131 	stats->mpctotal.ev_count = 0;
   2132 	stats->mlfc.ev_count = 0;
   2133 	stats->mrfc.ev_count = 0;
   2134 	stats->rlec.ev_count = 0;
   2135 	stats->lxontxc.ev_count = 0;
   2136 	stats->lxonrxc.ev_count = 0;
   2137 	stats->lxofftxc.ev_count = 0;
   2138 	stats->lxoffrxc.ev_count = 0;
   2139 
   2140 	/* Packet Reception Stats */
   2141 	stats->tor.ev_count = 0;
   2142 	stats->gorc.ev_count = 0;
   2143 	stats->tpr.ev_count = 0;
   2144 	stats->gprc.ev_count = 0;
   2145 	stats->mprc.ev_count = 0;
   2146 	stats->bprc.ev_count = 0;
   2147 	stats->prc64.ev_count = 0;
   2148 	stats->prc127.ev_count = 0;
   2149 	stats->prc255.ev_count = 0;
   2150 	stats->prc511.ev_count = 0;
   2151 	stats->prc1023.ev_count = 0;
   2152 	stats->prc1522.ev_count = 0;
   2153 	stats->ruc.ev_count = 0;
   2154 	stats->rfc.ev_count = 0;
   2155 	stats->roc.ev_count = 0;
   2156 	stats->rjc.ev_count = 0;
   2157 	stats->mngprc.ev_count = 0;
   2158 	stats->mngpdc.ev_count = 0;
   2159 	stats->xec.ev_count = 0;
   2160 
   2161 	/* Packet Transmission Stats */
   2162 	stats->gotc.ev_count = 0;
   2163 	stats->tpt.ev_count = 0;
   2164 	stats->gptc.ev_count = 0;
   2165 	stats->bptc.ev_count = 0;
   2166 	stats->mptc.ev_count = 0;
   2167 	stats->mngptc.ev_count = 0;
   2168 	stats->ptc64.ev_count = 0;
   2169 	stats->ptc127.ev_count = 0;
   2170 	stats->ptc255.ev_count = 0;
   2171 	stats->ptc511.ev_count = 0;
   2172 	stats->ptc1023.ev_count = 0;
   2173 	stats->ptc1522.ev_count = 0;
   2174 }
   2175 
   2176 /************************************************************************
   2177  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
   2178  *
   2179  *   Retrieves the TDH value from the hardware
   2180  ************************************************************************/
   2181 static int
   2182 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   2183 {
   2184 	struct sysctlnode node = *rnode;
   2185 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2186 	struct adapter *adapter;
   2187 	uint32_t val;
   2188 
   2189 	if (!txr)
   2190 		return (0);
   2191 
   2192 	adapter = txr->adapter;
   2193 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2194 		return (EPERM);
   2195 
   2196 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
   2197 	node.sysctl_data = &val;
   2198 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2199 } /* ixgbe_sysctl_tdh_handler */
   2200 
   2201 /************************************************************************
   2202  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   2203  *
   2204  *   Retrieves the TDT value from the hardware
   2205  ************************************************************************/
   2206 static int
   2207 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   2208 {
   2209 	struct sysctlnode node = *rnode;
   2210 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2211 	struct adapter *adapter;
   2212 	uint32_t val;
   2213 
   2214 	if (!txr)
   2215 		return (0);
   2216 
   2217 	adapter = txr->adapter;
   2218 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2219 		return (EPERM);
   2220 
   2221 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
   2222 	node.sysctl_data = &val;
   2223 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2224 } /* ixgbe_sysctl_tdt_handler */
   2225 
   2226 /************************************************************************
   2227  * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
   2228  * handler function
   2229  *
   2230  *   Retrieves the next_to_check value
   2231  ************************************************************************/
   2232 static int
   2233 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
   2234 {
   2235 	struct sysctlnode node = *rnode;
   2236 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2237 	struct adapter *adapter;
   2238 	uint32_t val;
   2239 
   2240 	if (!rxr)
   2241 		return (0);
   2242 
   2243 	adapter = rxr->adapter;
   2244 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2245 		return (EPERM);
   2246 
   2247 	val = rxr->next_to_check;
   2248 	node.sysctl_data = &val;
   2249 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2250 } /* ixgbe_sysctl_next_to_check_handler */
   2251 
   2252 /************************************************************************
   2253  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
   2254  *
   2255  *   Retrieves the RDH value from the hardware
   2256  ************************************************************************/
   2257 static int
   2258 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2259 {
   2260 	struct sysctlnode node = *rnode;
   2261 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2262 	struct adapter *adapter;
   2263 	uint32_t val;
   2264 
   2265 	if (!rxr)
   2266 		return (0);
   2267 
   2268 	adapter = rxr->adapter;
   2269 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2270 		return (EPERM);
   2271 
   2272 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
   2273 	node.sysctl_data = &val;
   2274 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2275 } /* ixgbe_sysctl_rdh_handler */
   2276 
   2277 /************************************************************************
   2278  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2279  *
   2280  *   Retrieves the RDT value from the hardware
   2281  ************************************************************************/
   2282 static int
   2283 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2284 {
   2285 	struct sysctlnode node = *rnode;
   2286 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2287 	struct adapter *adapter;
   2288 	uint32_t val;
   2289 
   2290 	if (!rxr)
   2291 		return (0);
   2292 
   2293 	adapter = rxr->adapter;
   2294 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2295 		return (EPERM);
   2296 
   2297 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
   2298 	node.sysctl_data = &val;
   2299 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2300 } /* ixgbe_sysctl_rdt_handler */
   2301 
   2302 static int
   2303 ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
   2304 {
   2305 	struct ifnet *ifp = &ec->ec_if;
   2306 	struct adapter *adapter = ifp->if_softc;
   2307 	int rv;
   2308 
   2309 	if (set)
   2310 		rv = ixgbe_register_vlan(adapter, vid);
   2311 	else
   2312 		rv = ixgbe_unregister_vlan(adapter, vid);
   2313 
   2314 	if (rv != 0)
   2315 		return rv;
   2316 
   2317 	/*
   2318 	 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
   2319 	 * or 0 to 1.
   2320 	 */
   2321 	if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
   2322 		ixgbe_setup_vlan_hw_tagging(adapter);
   2323 
   2324 	return rv;
   2325 }
   2326 
   2327 /************************************************************************
   2328  * ixgbe_register_vlan
   2329  *
   2330  *   Run via vlan config EVENT, it enables us to use the
   2331  *   HW Filter table since we can get the vlan id. This
   2332  *   just creates the entry in the soft version of the
   2333  *   VFTA, init will repopulate the real table.
   2334  ************************************************************************/
   2335 static int
   2336 ixgbe_register_vlan(struct adapter *adapter, u16 vtag)
   2337 {
   2338 	u16		index, bit;
   2339 	int		error;
   2340 
   2341 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2342 		return EINVAL;
   2343 
   2344 	IXGBE_CORE_LOCK(adapter);
   2345 	index = (vtag >> 5) & 0x7F;
   2346 	bit = vtag & 0x1F;
   2347 	adapter->shadow_vfta[index] |= ((u32)1 << bit);
   2348 	error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true,
   2349 	    true);
   2350 	IXGBE_CORE_UNLOCK(adapter);
   2351 	if (error != 0)
   2352 		error = EACCES;
   2353 
   2354 	return error;
   2355 } /* ixgbe_register_vlan */
   2356 
   2357 /************************************************************************
   2358  * ixgbe_unregister_vlan
   2359  *
   2360  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
   2361  ************************************************************************/
   2362 static int
   2363 ixgbe_unregister_vlan(struct adapter *adapter, u16 vtag)
   2364 {
   2365 	u16		index, bit;
   2366 	int		error;
   2367 
   2368 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2369 		return EINVAL;
   2370 
   2371 	IXGBE_CORE_LOCK(adapter);
   2372 	index = (vtag >> 5) & 0x7F;
   2373 	bit = vtag & 0x1F;
   2374 	adapter->shadow_vfta[index] &= ~((u32)1 << bit);
   2375 	error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false,
   2376 	    true);
   2377 	IXGBE_CORE_UNLOCK(adapter);
   2378 	if (error != 0)
   2379 		error = EACCES;
   2380 
   2381 	return error;
   2382 } /* ixgbe_unregister_vlan */
   2383 
   2384 static void
   2385 ixgbe_setup_vlan_hw_tagging(struct adapter *adapter)
   2386 {
   2387 	struct ethercom *ec = &adapter->osdep.ec;
   2388 	struct ixgbe_hw *hw = &adapter->hw;
   2389 	struct rx_ring	*rxr;
   2390 	u32		ctrl;
   2391 	int		i;
   2392 	bool		hwtagging;
   2393 
   2394 	/* Enable HW tagging only if any vlan is attached */
   2395 	hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
   2396 	    && VLAN_ATTACHED(ec);
   2397 
   2398 	/* Setup the queues for vlans */
   2399 	for (i = 0; i < adapter->num_queues; i++) {
   2400 		rxr = &adapter->rx_rings[i];
   2401 		/*
   2402 		 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
   2403 		 */
   2404 		if (hw->mac.type != ixgbe_mac_82598EB) {
   2405 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   2406 			if (hwtagging)
   2407 				ctrl |= IXGBE_RXDCTL_VME;
   2408 			else
   2409 				ctrl &= ~IXGBE_RXDCTL_VME;
   2410 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
   2411 		}
   2412 		rxr->vtag_strip = hwtagging ? TRUE : FALSE;
   2413 	}
   2414 
   2415 	/* VLAN hw tagging for 82598 */
   2416 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2417 		ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2418 		if (hwtagging)
   2419 			ctrl |= IXGBE_VLNCTRL_VME;
   2420 		else
   2421 			ctrl &= ~IXGBE_VLNCTRL_VME;
   2422 		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2423 	}
   2424 } /* ixgbe_setup_vlan_hw_tagging */
   2425 
   2426 static void
   2427 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   2428 {
   2429 	struct ethercom *ec = &adapter->osdep.ec;
   2430 	struct ixgbe_hw *hw = &adapter->hw;
   2431 	int		i;
   2432 	u32		ctrl;
   2433 	struct vlanid_list *vlanidp;
   2434 
   2435 	/*
   2436 	 *  This function is called from both if_init and ifflags_cb()
   2437 	 * on NetBSD.
   2438 	 */
   2439 
   2440 	/*
   2441 	 * Part 1:
   2442 	 * Setup VLAN HW tagging
   2443 	 */
   2444 	ixgbe_setup_vlan_hw_tagging(adapter);
   2445 
   2446 	/*
   2447 	 * Part 2:
   2448 	 * Setup VLAN HW filter
   2449 	 */
   2450 	/* Cleanup shadow_vfta */
   2451 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2452 		adapter->shadow_vfta[i] = 0;
   2453 	/* Generate shadow_vfta from ec_vids */
   2454 	ETHER_LOCK(ec);
   2455 	SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
   2456 		uint32_t idx;
   2457 
   2458 		idx = vlanidp->vid / 32;
   2459 		KASSERT(idx < IXGBE_VFTA_SIZE);
   2460 		adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
   2461 	}
   2462 	ETHER_UNLOCK(ec);
   2463 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2464 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]);
   2465 
   2466 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2467 	/* Enable the Filter Table if enabled */
   2468 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
   2469 		ctrl |= IXGBE_VLNCTRL_VFE;
   2470 	else
   2471 		ctrl &= ~IXGBE_VLNCTRL_VFE;
   2472 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2473 } /* ixgbe_setup_vlan_hw_support */
   2474 
   2475 /************************************************************************
   2476  * ixgbe_get_slot_info
   2477  *
   2478  *   Get the width and transaction speed of
   2479  *   the slot this adapter is plugged into.
   2480  ************************************************************************/
   2481 static void
   2482 ixgbe_get_slot_info(struct adapter *adapter)
   2483 {
   2484 	device_t		dev = adapter->dev;
   2485 	struct ixgbe_hw		*hw = &adapter->hw;
   2486 	u32		      offset;
   2487 	u16			link;
   2488 	int		      bus_info_valid = TRUE;
   2489 
   2490 	/* Some devices are behind an internal bridge */
   2491 	switch (hw->device_id) {
   2492 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
   2493 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
   2494 		goto get_parent_info;
   2495 	default:
   2496 		break;
   2497 	}
   2498 
   2499 	ixgbe_get_bus_info(hw);
   2500 
   2501 	/*
   2502 	 * Some devices don't use PCI-E, but there is no need
   2503 	 * to display "Unknown" for bus speed and width.
   2504 	 */
   2505 	switch (hw->mac.type) {
   2506 	case ixgbe_mac_X550EM_x:
   2507 	case ixgbe_mac_X550EM_a:
   2508 		return;
   2509 	default:
   2510 		goto display;
   2511 	}
   2512 
   2513 get_parent_info:
   2514 	/*
   2515 	 * For the Quad port adapter we need to parse back
   2516 	 * up the PCI tree to find the speed of the expansion
   2517 	 * slot into which this adapter is plugged. A bit more work.
   2518 	 */
   2519 	dev = device_parent(device_parent(dev));
   2520 #if 0
   2521 #ifdef IXGBE_DEBUG
   2522 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
   2523 	    pci_get_slot(dev), pci_get_function(dev));
   2524 #endif
   2525 	dev = device_parent(device_parent(dev));
   2526 #ifdef IXGBE_DEBUG
   2527 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
   2528 	    pci_get_slot(dev), pci_get_function(dev));
   2529 #endif
   2530 #endif
   2531 	/* Now get the PCI Express Capabilities offset */
   2532 	if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
   2533 	    PCI_CAP_PCIEXPRESS, &offset, NULL)) {
   2534 		/*
   2535 		 * Hmm...can't get PCI-Express capabilities.
   2536 		 * Falling back to default method.
   2537 		 */
   2538 		bus_info_valid = FALSE;
   2539 		ixgbe_get_bus_info(hw);
   2540 		goto display;
   2541 	}
   2542 	/* ...and read the Link Status Register */
   2543 	link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
   2544 	    offset + PCIE_LCSR) >> 16;
   2545 	ixgbe_set_pci_config_data_generic(hw, link);
   2546 
   2547 display:
   2548 	device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
   2549 	    ((hw->bus.speed == ixgbe_bus_speed_8000)	? "8.0GT/s" :
   2550 	     (hw->bus.speed == ixgbe_bus_speed_5000)	? "5.0GT/s" :
   2551 	     (hw->bus.speed == ixgbe_bus_speed_2500)	? "2.5GT/s" :
   2552 	     "Unknown"),
   2553 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
   2554 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
   2555 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
   2556 	     "Unknown"));
   2557 
   2558 	if (bus_info_valid) {
   2559 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2560 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
   2561 			(hw->bus.speed == ixgbe_bus_speed_2500))) {
   2562 			device_printf(dev, "PCI-Express bandwidth available"
   2563 			    " for this card\n     is not sufficient for"
   2564 			    " optimal performance.\n");
   2565 			device_printf(dev, "For optimal performance a x8 "
   2566 			    "PCIE, or x4 PCIE Gen2 slot is required.\n");
   2567 		}
   2568 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2569 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
   2570 			(hw->bus.speed < ixgbe_bus_speed_8000))) {
   2571 			device_printf(dev, "PCI-Express bandwidth available"
   2572 			    " for this card\n     is not sufficient for"
   2573 			    " optimal performance.\n");
   2574 			device_printf(dev, "For optimal performance a x8 "
   2575 			    "PCIE Gen3 slot is required.\n");
   2576 		}
   2577 	} else
   2578 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
   2579 
   2580 	return;
   2581 } /* ixgbe_get_slot_info */
   2582 
   2583 /************************************************************************
   2584  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
   2585  ************************************************************************/
   2586 static inline void
   2587 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   2588 {
   2589 	struct ixgbe_hw *hw = &adapter->hw;
   2590 	struct ix_queue *que = &adapter->queues[vector];
   2591 	u64		queue = 1ULL << vector;
   2592 	u32		mask;
   2593 
   2594 	mutex_enter(&que->dc_mtx);
   2595 	if (que->disabled_count > 0 && --que->disabled_count > 0)
   2596 		goto out;
   2597 
   2598 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2599 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2600 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   2601 	} else {
   2602 		mask = (queue & 0xFFFFFFFF);
   2603 		if (mask)
   2604 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   2605 		mask = (queue >> 32);
   2606 		if (mask)
   2607 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   2608 	}
   2609 out:
   2610 	mutex_exit(&que->dc_mtx);
   2611 } /* ixgbe_enable_queue */
   2612 
   2613 /************************************************************************
   2614  * ixgbe_disable_queue_internal
   2615  ************************************************************************/
   2616 static inline void
   2617 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
   2618 {
   2619 	struct ixgbe_hw *hw = &adapter->hw;
   2620 	struct ix_queue *que = &adapter->queues[vector];
   2621 	u64		queue = 1ULL << vector;
   2622 	u32		mask;
   2623 
   2624 	mutex_enter(&que->dc_mtx);
   2625 
   2626 	if (que->disabled_count > 0) {
   2627 		if (nestok)
   2628 			que->disabled_count++;
   2629 		goto out;
   2630 	}
   2631 	que->disabled_count++;
   2632 
   2633 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2634 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2635 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   2636 	} else {
   2637 		mask = (queue & 0xFFFFFFFF);
   2638 		if (mask)
   2639 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   2640 		mask = (queue >> 32);
   2641 		if (mask)
   2642 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   2643 	}
   2644 out:
   2645 	mutex_exit(&que->dc_mtx);
   2646 } /* ixgbe_disable_queue_internal */
   2647 
   2648 /************************************************************************
   2649  * ixgbe_disable_queue
   2650  ************************************************************************/
   2651 static inline void
   2652 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   2653 {
   2654 
   2655 	ixgbe_disable_queue_internal(adapter, vector, true);
   2656 } /* ixgbe_disable_queue */
   2657 
   2658 /************************************************************************
   2659  * ixgbe_sched_handle_que - schedule deferred packet processing
   2660  ************************************************************************/
   2661 static inline void
   2662 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
   2663 {
   2664 
   2665 	if (que->txrx_use_workqueue) {
   2666 		/*
   2667 		 * adapter->que_wq is bound to each CPU instead of
   2668 		 * each NIC queue to reduce workqueue kthread. As we
   2669 		 * should consider about interrupt affinity in this
   2670 		 * function, the workqueue kthread must be WQ_PERCPU.
   2671 		 * If create WQ_PERCPU workqueue kthread for each NIC
   2672 		 * queue, that number of created workqueue kthread is
   2673 		 * (number of used NIC queue) * (number of CPUs) =
   2674 		 * (number of CPUs) ^ 2 most often.
   2675 		 *
   2676 		 * The same NIC queue's interrupts are avoided by
   2677 		 * masking the queue's interrupt. And different
   2678 		 * NIC queue's interrupts use different struct work
   2679 		 * (que->wq_cookie). So, "enqueued flag" to avoid
   2680 		 * twice workqueue_enqueue() is not required .
   2681 		 */
   2682 		workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
   2683 	} else {
   2684 		softint_schedule(que->que_si);
   2685 	}
   2686 }
   2687 
   2688 /************************************************************************
   2689  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
   2690  ************************************************************************/
   2691 static int
   2692 ixgbe_msix_que(void *arg)
   2693 {
   2694 	struct ix_queue	*que = arg;
   2695 	struct adapter	*adapter = que->adapter;
   2696 	struct ifnet	*ifp = adapter->ifp;
   2697 	struct tx_ring	*txr = que->txr;
   2698 	struct rx_ring	*rxr = que->rxr;
   2699 	bool		more;
   2700 	u32		newitr = 0;
   2701 
   2702 	/* Protect against spurious interrupts */
   2703 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   2704 		return 0;
   2705 
   2706 	ixgbe_disable_queue(adapter, que->msix);
   2707 	++que->irqs.ev_count;
   2708 
   2709 	/*
   2710 	 * Don't change "que->txrx_use_workqueue" from this point to avoid
   2711 	 * flip-flopping softint/workqueue mode in one deferred processing.
   2712 	 */
   2713 	que->txrx_use_workqueue = adapter->txrx_use_workqueue;
   2714 
   2715 #ifdef __NetBSD__
   2716 	/* Don't run ixgbe_rxeof in interrupt context */
   2717 	more = true;
   2718 #else
   2719 	more = ixgbe_rxeof(que);
   2720 #endif
   2721 
   2722 	IXGBE_TX_LOCK(txr);
   2723 	ixgbe_txeof(txr);
   2724 	IXGBE_TX_UNLOCK(txr);
   2725 
   2726 	/* Do AIM now? */
   2727 
   2728 	if (adapter->enable_aim == false)
   2729 		goto no_calc;
   2730 	/*
   2731 	 * Do Adaptive Interrupt Moderation:
   2732 	 *  - Write out last calculated setting
   2733 	 *  - Calculate based on average size over
   2734 	 *    the last interval.
   2735 	 */
   2736 	if (que->eitr_setting)
   2737 		ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
   2738 
   2739 	que->eitr_setting = 0;
   2740 
   2741 	/* Idle, do nothing */
   2742 	if ((txr->bytes == 0) && (rxr->bytes == 0))
   2743 		goto no_calc;
   2744 
   2745 	if ((txr->bytes) && (txr->packets))
   2746 		newitr = txr->bytes/txr->packets;
   2747 	if ((rxr->bytes) && (rxr->packets))
   2748 		newitr = uimax(newitr, (rxr->bytes / rxr->packets));
   2749 	newitr += 24; /* account for hardware frame, crc */
   2750 
   2751 	/* set an upper boundary */
   2752 	newitr = uimin(newitr, 3000);
   2753 
   2754 	/* Be nice to the mid range */
   2755 	if ((newitr > 300) && (newitr < 1200))
   2756 		newitr = (newitr / 3);
   2757 	else
   2758 		newitr = (newitr / 2);
   2759 
   2760 	/*
   2761 	 * When RSC is used, ITR interval must be larger than RSC_DELAY.
   2762 	 * Currently, we use 2us for RSC_DELAY. The minimum value is always
   2763 	 * greater than 2us on 100M (and 10M?(not documented)), but it's not
   2764 	 * on 1G and higher.
   2765 	 */
   2766 	if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   2767 	    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   2768 		if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
   2769 			newitr = IXGBE_MIN_RSC_EITR_10G1G;
   2770 	}
   2771 
   2772 	/* save for next interrupt */
   2773 	que->eitr_setting = newitr;
   2774 
   2775 	/* Reset state */
   2776 	txr->bytes = 0;
   2777 	txr->packets = 0;
   2778 	rxr->bytes = 0;
   2779 	rxr->packets = 0;
   2780 
   2781 no_calc:
   2782 	if (more)
   2783 		ixgbe_sched_handle_que(adapter, que);
   2784 	else
   2785 		ixgbe_enable_queue(adapter, que->msix);
   2786 
   2787 	return 1;
   2788 } /* ixgbe_msix_que */
   2789 
   2790 /************************************************************************
   2791  * ixgbe_media_status - Media Ioctl callback
   2792  *
   2793  *   Called whenever the user queries the status of
   2794  *   the interface using ifconfig.
   2795  ************************************************************************/
   2796 static void
   2797 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   2798 {
   2799 	struct adapter *adapter = ifp->if_softc;
   2800 	struct ixgbe_hw *hw = &adapter->hw;
   2801 	int layer;
   2802 
   2803 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   2804 	IXGBE_CORE_LOCK(adapter);
   2805 	ixgbe_update_link_status(adapter);
   2806 
   2807 	ifmr->ifm_status = IFM_AVALID;
   2808 	ifmr->ifm_active = IFM_ETHER;
   2809 
   2810 	if (adapter->link_active != LINK_STATE_UP) {
   2811 		ifmr->ifm_active |= IFM_NONE;
   2812 		IXGBE_CORE_UNLOCK(adapter);
   2813 		return;
   2814 	}
   2815 
   2816 	ifmr->ifm_status |= IFM_ACTIVE;
   2817 	layer = adapter->phy_layer;
   2818 
   2819 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
   2820 	    layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
   2821 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
   2822 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
   2823 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
   2824 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
   2825 		switch (adapter->link_speed) {
   2826 		case IXGBE_LINK_SPEED_10GB_FULL:
   2827 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   2828 			break;
   2829 		case IXGBE_LINK_SPEED_5GB_FULL:
   2830 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   2831 			break;
   2832 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2833 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   2834 			break;
   2835 		case IXGBE_LINK_SPEED_1GB_FULL:
   2836 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   2837 			break;
   2838 		case IXGBE_LINK_SPEED_100_FULL:
   2839 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   2840 			break;
   2841 		case IXGBE_LINK_SPEED_10_FULL:
   2842 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   2843 			break;
   2844 		}
   2845 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   2846 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
   2847 		switch (adapter->link_speed) {
   2848 		case IXGBE_LINK_SPEED_10GB_FULL:
   2849 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
   2850 			break;
   2851 		}
   2852 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
   2853 		switch (adapter->link_speed) {
   2854 		case IXGBE_LINK_SPEED_10GB_FULL:
   2855 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
   2856 			break;
   2857 		case IXGBE_LINK_SPEED_1GB_FULL:
   2858 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2859 			break;
   2860 		}
   2861 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
   2862 		switch (adapter->link_speed) {
   2863 		case IXGBE_LINK_SPEED_10GB_FULL:
   2864 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
   2865 			break;
   2866 		case IXGBE_LINK_SPEED_1GB_FULL:
   2867 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2868 			break;
   2869 		}
   2870 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
   2871 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
   2872 		switch (adapter->link_speed) {
   2873 		case IXGBE_LINK_SPEED_10GB_FULL:
   2874 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2875 			break;
   2876 		case IXGBE_LINK_SPEED_1GB_FULL:
   2877 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
   2878 			break;
   2879 		}
   2880 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
   2881 		switch (adapter->link_speed) {
   2882 		case IXGBE_LINK_SPEED_10GB_FULL:
   2883 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2884 			break;
   2885 		}
   2886 	/*
   2887 	 * XXX: These need to use the proper media types once
   2888 	 * they're added.
   2889 	 */
   2890 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
   2891 		switch (adapter->link_speed) {
   2892 		case IXGBE_LINK_SPEED_10GB_FULL:
   2893 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
   2894 			break;
   2895 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2896 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2897 			break;
   2898 		case IXGBE_LINK_SPEED_1GB_FULL:
   2899 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2900 			break;
   2901 		}
   2902 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
   2903 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
   2904 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
   2905 		switch (adapter->link_speed) {
   2906 		case IXGBE_LINK_SPEED_10GB_FULL:
   2907 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
   2908 			break;
   2909 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2910 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2911 			break;
   2912 		case IXGBE_LINK_SPEED_1GB_FULL:
   2913 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2914 			break;
   2915 		}
   2916 
   2917 	/* If nothing is recognized... */
   2918 #if 0
   2919 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
   2920 		ifmr->ifm_active |= IFM_UNKNOWN;
   2921 #endif
   2922 
   2923 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   2924 
   2925 	/* Display current flow control setting used on link */
   2926 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
   2927 	    hw->fc.current_mode == ixgbe_fc_full)
   2928 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
   2929 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
   2930 	    hw->fc.current_mode == ixgbe_fc_full)
   2931 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
   2932 
   2933 	IXGBE_CORE_UNLOCK(adapter);
   2934 
   2935 	return;
   2936 } /* ixgbe_media_status */
   2937 
   2938 /************************************************************************
   2939  * ixgbe_media_change - Media Ioctl callback
   2940  *
   2941  *   Called when the user changes speed/duplex using
   2942  *   media/mediopt option with ifconfig.
   2943  ************************************************************************/
   2944 static int
   2945 ixgbe_media_change(struct ifnet *ifp)
   2946 {
   2947 	struct adapter	 *adapter = ifp->if_softc;
   2948 	struct ifmedia	 *ifm = &adapter->media;
   2949 	struct ixgbe_hw	 *hw = &adapter->hw;
   2950 	ixgbe_link_speed speed = 0;
   2951 	ixgbe_link_speed link_caps = 0;
   2952 	bool negotiate = false;
   2953 	s32 err = IXGBE_NOT_IMPLEMENTED;
   2954 
   2955 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   2956 
   2957 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   2958 		return (EINVAL);
   2959 
   2960 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   2961 		return (EPERM);
   2962 
   2963 	IXGBE_CORE_LOCK(adapter);
   2964 	/*
   2965 	 * We don't actually need to check against the supported
   2966 	 * media types of the adapter; ifmedia will take care of
   2967 	 * that for us.
   2968 	 */
   2969 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   2970 	case IFM_AUTO:
   2971 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   2972 		    &negotiate);
   2973 		if (err != IXGBE_SUCCESS) {
   2974 			device_printf(adapter->dev, "Unable to determine "
   2975 			    "supported advertise speeds\n");
   2976 			IXGBE_CORE_UNLOCK(adapter);
   2977 			return (ENODEV);
   2978 		}
   2979 		speed |= link_caps;
   2980 		break;
   2981 	case IFM_10G_T:
   2982 	case IFM_10G_LRM:
   2983 	case IFM_10G_LR:
   2984 	case IFM_10G_TWINAX:
   2985 	case IFM_10G_SR:
   2986 	case IFM_10G_CX4:
   2987 	case IFM_10G_KR:
   2988 	case IFM_10G_KX4:
   2989 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   2990 		break;
   2991 	case IFM_5000_T:
   2992 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   2993 		break;
   2994 	case IFM_2500_T:
   2995 	case IFM_2500_KX:
   2996 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   2997 		break;
   2998 	case IFM_1000_T:
   2999 	case IFM_1000_LX:
   3000 	case IFM_1000_SX:
   3001 	case IFM_1000_KX:
   3002 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   3003 		break;
   3004 	case IFM_100_TX:
   3005 		speed |= IXGBE_LINK_SPEED_100_FULL;
   3006 		break;
   3007 	case IFM_10_T:
   3008 		speed |= IXGBE_LINK_SPEED_10_FULL;
   3009 		break;
   3010 	case IFM_NONE:
   3011 		break;
   3012 	default:
   3013 		goto invalid;
   3014 	}
   3015 
   3016 	hw->mac.autotry_restart = TRUE;
   3017 	hw->mac.ops.setup_link(hw, speed, TRUE);
   3018 	adapter->advertise = 0;
   3019 	if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
   3020 		if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
   3021 			adapter->advertise |= 1 << 2;
   3022 		if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
   3023 			adapter->advertise |= 1 << 1;
   3024 		if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
   3025 			adapter->advertise |= 1 << 0;
   3026 		if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
   3027 			adapter->advertise |= 1 << 3;
   3028 		if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
   3029 			adapter->advertise |= 1 << 4;
   3030 		if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
   3031 			adapter->advertise |= 1 << 5;
   3032 	}
   3033 
   3034 	IXGBE_CORE_UNLOCK(adapter);
   3035 	return (0);
   3036 
   3037 invalid:
   3038 	device_printf(adapter->dev, "Invalid media type!\n");
   3039 	IXGBE_CORE_UNLOCK(adapter);
   3040 
   3041 	return (EINVAL);
   3042 } /* ixgbe_media_change */
   3043 
   3044 /************************************************************************
   3045  * ixgbe_set_promisc
   3046  ************************************************************************/
   3047 static void
   3048 ixgbe_set_promisc(struct adapter *adapter)
   3049 {
   3050 	struct ifnet *ifp = adapter->ifp;
   3051 	int	     mcnt = 0;
   3052 	u32	     rctl;
   3053 	struct ether_multi *enm;
   3054 	struct ether_multistep step;
   3055 	struct ethercom *ec = &adapter->osdep.ec;
   3056 
   3057 	KASSERT(mutex_owned(&adapter->core_mtx));
   3058 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   3059 	rctl &= (~IXGBE_FCTRL_UPE);
   3060 	ETHER_LOCK(ec);
   3061 	if (ec->ec_flags & ETHER_F_ALLMULTI)
   3062 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
   3063 	else {
   3064 		ETHER_FIRST_MULTI(step, ec, enm);
   3065 		while (enm != NULL) {
   3066 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
   3067 				break;
   3068 			mcnt++;
   3069 			ETHER_NEXT_MULTI(step, enm);
   3070 		}
   3071 	}
   3072 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
   3073 		rctl &= (~IXGBE_FCTRL_MPE);
   3074 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   3075 
   3076 	if (ifp->if_flags & IFF_PROMISC) {
   3077 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   3078 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   3079 	} else if (ec->ec_flags & ETHER_F_ALLMULTI) {
   3080 		rctl |= IXGBE_FCTRL_MPE;
   3081 		rctl &= ~IXGBE_FCTRL_UPE;
   3082 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   3083 	}
   3084 	ETHER_UNLOCK(ec);
   3085 } /* ixgbe_set_promisc */
   3086 
   3087 /************************************************************************
   3088  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
   3089  ************************************************************************/
   3090 static int
   3091 ixgbe_msix_link(void *arg)
   3092 {
   3093 	struct adapter	*adapter = arg;
   3094 	struct ixgbe_hw *hw = &adapter->hw;
   3095 	u32		eicr, eicr_mask;
   3096 	s32		retval;
   3097 
   3098 	++adapter->link_irq.ev_count;
   3099 
   3100 	/* Pause other interrupts */
   3101 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
   3102 
   3103 	/* First get the cause */
   3104 	/*
   3105 	 * The specifications of 82598, 82599, X540 and X550 say EICS register
   3106 	 * is write only. However, Linux says it is a workaround for silicon
   3107 	 * errata to read EICS instead of EICR to get interrupt cause. It seems
   3108 	 * there is a problem about read clear mechanism for EICR register.
   3109 	 */
   3110 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   3111 	/* Be sure the queue bits are not cleared */
   3112 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
   3113 	/* Clear interrupt with write */
   3114 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
   3115 
   3116 	if (ixgbe_is_sfp(hw)) {
   3117 		/* Pluggable optics-related interrupt */
   3118 		if (hw->mac.type >= ixgbe_mac_X540)
   3119 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   3120 		else
   3121 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   3122 
   3123 		/*
   3124 		 *  An interrupt might not arrive when a module is inserted.
   3125 		 * When an link status change interrupt occurred and the driver
   3126 		 * still regard SFP as unplugged, issue the module softint
   3127 		 * and then issue LSC interrupt.
   3128 		 */
   3129 		if ((eicr & eicr_mask)
   3130 		    || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
   3131 			&& (eicr & IXGBE_EICR_LSC))) {
   3132 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   3133 			softint_schedule(adapter->mod_si);
   3134 		}
   3135 
   3136 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   3137 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   3138 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   3139 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   3140 			softint_schedule(adapter->msf_si);
   3141 		}
   3142 	}
   3143 
   3144 	/* Link status change */
   3145 	if (eicr & IXGBE_EICR_LSC) {
   3146 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
   3147 		softint_schedule(adapter->link_si);
   3148 	}
   3149 
   3150 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   3151 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
   3152 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
   3153 			/* This is probably overkill :) */
   3154 			if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
   3155 				return 1;
   3156 			/* Disable the interrupt */
   3157 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
   3158 			softint_schedule(adapter->fdir_si);
   3159 		}
   3160 
   3161 		if (eicr & IXGBE_EICR_ECC) {
   3162 			device_printf(adapter->dev,
   3163 			    "CRITICAL: ECC ERROR!! Please Reboot!!\n");
   3164 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   3165 		}
   3166 
   3167 		/* Check for over temp condition */
   3168 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
   3169 			switch (adapter->hw.mac.type) {
   3170 			case ixgbe_mac_X550EM_a:
   3171 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
   3172 					break;
   3173 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
   3174 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   3175 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
   3176 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   3177 				retval = hw->phy.ops.check_overtemp(hw);
   3178 				if (retval != IXGBE_ERR_OVERTEMP)
   3179 					break;
   3180 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   3181 				device_printf(adapter->dev, "System shutdown required!\n");
   3182 				break;
   3183 			default:
   3184 				if (!(eicr & IXGBE_EICR_TS))
   3185 					break;
   3186 				retval = hw->phy.ops.check_overtemp(hw);
   3187 				if (retval != IXGBE_ERR_OVERTEMP)
   3188 					break;
   3189 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   3190 				device_printf(adapter->dev, "System shutdown required!\n");
   3191 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
   3192 				break;
   3193 			}
   3194 		}
   3195 
   3196 		/* Check for VF message */
   3197 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
   3198 		    (eicr & IXGBE_EICR_MAILBOX))
   3199 			softint_schedule(adapter->mbx_si);
   3200 	}
   3201 
   3202 	/* Check for fan failure */
   3203 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   3204 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
   3205 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   3206 	}
   3207 
   3208 	/* External PHY interrupt */
   3209 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   3210 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
   3211 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
   3212 		softint_schedule(adapter->phy_si);
   3213 	}
   3214 
   3215 	/* Re-enable other interrupts */
   3216 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   3217 	return 1;
   3218 } /* ixgbe_msix_link */
   3219 
   3220 static void
   3221 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
   3222 {
   3223 
   3224 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   3225 		itr |= itr << 16;
   3226 	else
   3227 		itr |= IXGBE_EITR_CNT_WDIS;
   3228 
   3229 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
   3230 }
   3231 
   3232 
   3233 /************************************************************************
   3234  * ixgbe_sysctl_interrupt_rate_handler
   3235  ************************************************************************/
   3236 static int
   3237 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   3238 {
   3239 	struct sysctlnode node = *rnode;
   3240 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   3241 	struct adapter	*adapter;
   3242 	uint32_t reg, usec, rate;
   3243 	int error;
   3244 
   3245 	if (que == NULL)
   3246 		return 0;
   3247 
   3248 	adapter = que->adapter;
   3249 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   3250 		return (EPERM);
   3251 
   3252 	reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
   3253 	usec = ((reg & 0x0FF8) >> 3);
   3254 	if (usec > 0)
   3255 		rate = 500000 / usec;
   3256 	else
   3257 		rate = 0;
   3258 	node.sysctl_data = &rate;
   3259 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   3260 	if (error || newp == NULL)
   3261 		return error;
   3262 	reg &= ~0xfff; /* default, no limitation */
   3263 	if (rate > 0 && rate < 500000) {
   3264 		if (rate < 1000)
   3265 			rate = 1000;
   3266 		reg |= ((4000000/rate) & 0xff8);
   3267 		/*
   3268 		 * When RSC is used, ITR interval must be larger than
   3269 		 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
   3270 		 * The minimum value is always greater than 2us on 100M
   3271 		 * (and 10M?(not documented)), but it's not on 1G and higher.
   3272 		 */
   3273 		if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   3274 		    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   3275 			if ((adapter->num_queues > 1)
   3276 			    && (reg < IXGBE_MIN_RSC_EITR_10G1G))
   3277 				return EINVAL;
   3278 		}
   3279 		ixgbe_max_interrupt_rate = rate;
   3280 	} else
   3281 		ixgbe_max_interrupt_rate = 0;
   3282 	ixgbe_eitr_write(adapter, que->msix, reg);
   3283 
   3284 	return (0);
   3285 } /* ixgbe_sysctl_interrupt_rate_handler */
   3286 
   3287 const struct sysctlnode *
   3288 ixgbe_sysctl_instance(struct adapter *adapter)
   3289 {
   3290 	const char *dvname;
   3291 	struct sysctllog **log;
   3292 	int rc;
   3293 	const struct sysctlnode *rnode;
   3294 
   3295 	if (adapter->sysctltop != NULL)
   3296 		return adapter->sysctltop;
   3297 
   3298 	log = &adapter->sysctllog;
   3299 	dvname = device_xname(adapter->dev);
   3300 
   3301 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   3302 	    0, CTLTYPE_NODE, dvname,
   3303 	    SYSCTL_DESCR("ixgbe information and settings"),
   3304 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   3305 		goto err;
   3306 
   3307 	return rnode;
   3308 err:
   3309 	device_printf(adapter->dev,
   3310 	    "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   3311 	return NULL;
   3312 }
   3313 
   3314 /************************************************************************
   3315  * ixgbe_add_device_sysctls
   3316  ************************************************************************/
   3317 static void
   3318 ixgbe_add_device_sysctls(struct adapter *adapter)
   3319 {
   3320 	device_t	       dev = adapter->dev;
   3321 	struct ixgbe_hw	       *hw = &adapter->hw;
   3322 	struct sysctllog **log;
   3323 	const struct sysctlnode *rnode, *cnode;
   3324 
   3325 	log = &adapter->sysctllog;
   3326 
   3327 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   3328 		aprint_error_dev(dev, "could not create sysctl root\n");
   3329 		return;
   3330 	}
   3331 
   3332 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3333 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   3334 	    "debug", SYSCTL_DESCR("Debug Info"),
   3335 	    ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   3336 		aprint_error_dev(dev, "could not create sysctl\n");
   3337 
   3338 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3339 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3340 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
   3341 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   3342 		aprint_error_dev(dev, "could not create sysctl\n");
   3343 
   3344 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3345 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3346 	    "num_queues", SYSCTL_DESCR("Number of queues"),
   3347 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
   3348 		aprint_error_dev(dev, "could not create sysctl\n");
   3349 
   3350 	/* Sysctls for all devices */
   3351 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3352 	    CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
   3353 	    ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
   3354 	    CTL_EOL) != 0)
   3355 		aprint_error_dev(dev, "could not create sysctl\n");
   3356 
   3357 	adapter->enable_aim = ixgbe_enable_aim;
   3358 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3359 	    CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   3360 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   3361 		aprint_error_dev(dev, "could not create sysctl\n");
   3362 
   3363 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3364 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   3365 	    "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
   3366 	    ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
   3367 	    CTL_EOL) != 0)
   3368 		aprint_error_dev(dev, "could not create sysctl\n");
   3369 
   3370 	/*
   3371 	 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
   3372 	 * it causesflip-flopping softint/workqueue mode in one deferred
   3373 	 * processing. Therefore, preempt_disable()/preempt_enable() are
   3374 	 * required in ixgbe_sched_handle_que() to avoid
   3375 	 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
   3376 	 * I think changing "que->txrx_use_workqueue" in interrupt handler
   3377 	 * is lighter than doing preempt_disable()/preempt_enable() in every
   3378 	 * ixgbe_sched_handle_que().
   3379 	 */
   3380 	adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
   3381 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3382 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   3383 	    NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
   3384 		aprint_error_dev(dev, "could not create sysctl\n");
   3385 
   3386 #ifdef IXGBE_DEBUG
   3387 	/* testing sysctls (for all devices) */
   3388 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3389 	    CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
   3390 	    ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
   3391 	    CTL_EOL) != 0)
   3392 		aprint_error_dev(dev, "could not create sysctl\n");
   3393 
   3394 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
   3395 	    CTLTYPE_STRING, "print_rss_config",
   3396 	    SYSCTL_DESCR("Prints RSS Configuration"),
   3397 	    ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
   3398 	    CTL_EOL) != 0)
   3399 		aprint_error_dev(dev, "could not create sysctl\n");
   3400 #endif
   3401 	/* for X550 series devices */
   3402 	if (hw->mac.type >= ixgbe_mac_X550)
   3403 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3404 		    CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
   3405 		    ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
   3406 		    CTL_EOL) != 0)
   3407 			aprint_error_dev(dev, "could not create sysctl\n");
   3408 
   3409 	/* for WoL-capable devices */
   3410 	if (adapter->wol_support) {
   3411 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3412 		    CTLTYPE_BOOL, "wol_enable",
   3413 		    SYSCTL_DESCR("Enable/Disable Wake on LAN"),
   3414 		    ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
   3415 		    CTL_EOL) != 0)
   3416 			aprint_error_dev(dev, "could not create sysctl\n");
   3417 
   3418 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3419 		    CTLTYPE_INT, "wufc",
   3420 		    SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
   3421 		    ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
   3422 		    CTL_EOL) != 0)
   3423 			aprint_error_dev(dev, "could not create sysctl\n");
   3424 	}
   3425 
   3426 	/* for X552/X557-AT devices */
   3427 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3428 		const struct sysctlnode *phy_node;
   3429 
   3430 		if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
   3431 		    "phy", SYSCTL_DESCR("External PHY sysctls"),
   3432 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
   3433 			aprint_error_dev(dev, "could not create sysctl\n");
   3434 			return;
   3435 		}
   3436 
   3437 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3438 		    CTLTYPE_INT, "temp",
   3439 		    SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
   3440 		    ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
   3441 		    CTL_EOL) != 0)
   3442 			aprint_error_dev(dev, "could not create sysctl\n");
   3443 
   3444 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3445 		    CTLTYPE_INT, "overtemp_occurred",
   3446 		    SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
   3447 		    ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
   3448 		    CTL_CREATE, CTL_EOL) != 0)
   3449 			aprint_error_dev(dev, "could not create sysctl\n");
   3450 	}
   3451 
   3452 	if ((hw->mac.type == ixgbe_mac_X550EM_a)
   3453 	    && (hw->phy.type == ixgbe_phy_fw))
   3454 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3455 		    CTLTYPE_BOOL, "force_10_100_autonego",
   3456 		    SYSCTL_DESCR("Force autonego on 10M and 100M"),
   3457 		    NULL, 0, &hw->phy.force_10_100_autonego, 0,
   3458 		    CTL_CREATE, CTL_EOL) != 0)
   3459 			aprint_error_dev(dev, "could not create sysctl\n");
   3460 
   3461 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
   3462 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3463 		    CTLTYPE_INT, "eee_state",
   3464 		    SYSCTL_DESCR("EEE Power Save State"),
   3465 		    ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
   3466 		    CTL_EOL) != 0)
   3467 			aprint_error_dev(dev, "could not create sysctl\n");
   3468 	}
   3469 } /* ixgbe_add_device_sysctls */
   3470 
   3471 /************************************************************************
   3472  * ixgbe_allocate_pci_resources
   3473  ************************************************************************/
   3474 static int
   3475 ixgbe_allocate_pci_resources(struct adapter *adapter,
   3476     const struct pci_attach_args *pa)
   3477 {
   3478 	pcireg_t	memtype, csr;
   3479 	device_t dev = adapter->dev;
   3480 	bus_addr_t addr;
   3481 	int flags;
   3482 
   3483 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   3484 	switch (memtype) {
   3485 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   3486 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   3487 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   3488 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   3489 		      memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   3490 			goto map_err;
   3491 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   3492 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   3493 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3494 		}
   3495 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   3496 		     adapter->osdep.mem_size, flags,
   3497 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   3498 map_err:
   3499 			adapter->osdep.mem_size = 0;
   3500 			aprint_error_dev(dev, "unable to map BAR0\n");
   3501 			return ENXIO;
   3502 		}
   3503 		/*
   3504 		 * Enable address decoding for memory range in case BIOS or
   3505 		 * UEFI don't set it.
   3506 		 */
   3507 		csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
   3508 		    PCI_COMMAND_STATUS_REG);
   3509 		csr |= PCI_COMMAND_MEM_ENABLE;
   3510 		pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
   3511 		    csr);
   3512 		break;
   3513 	default:
   3514 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   3515 		return ENXIO;
   3516 	}
   3517 
   3518 	return (0);
   3519 } /* ixgbe_allocate_pci_resources */
   3520 
   3521 static void
   3522 ixgbe_free_softint(struct adapter *adapter)
   3523 {
   3524 	struct ix_queue *que = adapter->queues;
   3525 	struct tx_ring *txr = adapter->tx_rings;
   3526 	int i;
   3527 
   3528 	for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
   3529 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   3530 			if (txr->txr_si != NULL)
   3531 				softint_disestablish(txr->txr_si);
   3532 		}
   3533 		if (que->que_si != NULL)
   3534 			softint_disestablish(que->que_si);
   3535 	}
   3536 	if (adapter->txr_wq != NULL)
   3537 		workqueue_destroy(adapter->txr_wq);
   3538 	if (adapter->txr_wq_enqueued != NULL)
   3539 		percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
   3540 	if (adapter->que_wq != NULL)
   3541 		workqueue_destroy(adapter->que_wq);
   3542 
   3543 	/* Drain the Link queue */
   3544 	if (adapter->link_si != NULL) {
   3545 		softint_disestablish(adapter->link_si);
   3546 		adapter->link_si = NULL;
   3547 	}
   3548 	if (adapter->mod_si != NULL) {
   3549 		softint_disestablish(adapter->mod_si);
   3550 		adapter->mod_si = NULL;
   3551 	}
   3552 	if (adapter->msf_si != NULL) {
   3553 		softint_disestablish(adapter->msf_si);
   3554 		adapter->msf_si = NULL;
   3555 	}
   3556 	if (adapter->phy_si != NULL) {
   3557 		softint_disestablish(adapter->phy_si);
   3558 		adapter->phy_si = NULL;
   3559 	}
   3560 	if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
   3561 		if (adapter->fdir_si != NULL) {
   3562 			softint_disestablish(adapter->fdir_si);
   3563 			adapter->fdir_si = NULL;
   3564 		}
   3565 	}
   3566 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   3567 		if (adapter->mbx_si != NULL) {
   3568 			softint_disestablish(adapter->mbx_si);
   3569 			adapter->mbx_si = NULL;
   3570 		}
   3571 	}
   3572 } /* ixgbe_free_softint */
   3573 
   3574 /************************************************************************
   3575  * ixgbe_detach - Device removal routine
   3576  *
   3577  *   Called when the driver is being removed.
   3578  *   Stops the adapter and deallocates all the resources
   3579  *   that were allocated for driver operation.
   3580  *
   3581  *   return 0 on success, positive on failure
   3582  ************************************************************************/
   3583 static int
   3584 ixgbe_detach(device_t dev, int flags)
   3585 {
   3586 	struct adapter *adapter = device_private(dev);
   3587 	struct rx_ring *rxr = adapter->rx_rings;
   3588 	struct tx_ring *txr = adapter->tx_rings;
   3589 	struct ixgbe_hw *hw = &adapter->hw;
   3590 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   3591 	u32	ctrl_ext;
   3592 	int i;
   3593 
   3594 	INIT_DEBUGOUT("ixgbe_detach: begin");
   3595 	if (adapter->osdep.attached == false)
   3596 		return 0;
   3597 
   3598 	if (ixgbe_pci_iov_detach(dev) != 0) {
   3599 		device_printf(dev, "SR-IOV in use; detach first.\n");
   3600 		return (EBUSY);
   3601 	}
   3602 
   3603 	/*
   3604 	 * Stop the interface. ixgbe_setup_low_power_mode() calls ixgbe_stop(),
   3605 	 * so it's not required to call ixgbe_stop() directly.
   3606 	 */
   3607 	IXGBE_CORE_LOCK(adapter);
   3608 	ixgbe_setup_low_power_mode(adapter);
   3609 	IXGBE_CORE_UNLOCK(adapter);
   3610 #if NVLAN > 0
   3611 	/* Make sure VLANs are not using driver */
   3612 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   3613 		;	/* nothing to do: no VLANs */
   3614 	else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
   3615 		vlan_ifdetach(adapter->ifp);
   3616 	else {
   3617 		aprint_error_dev(dev, "VLANs in use, detach first\n");
   3618 		return (EBUSY);
   3619 	}
   3620 #endif
   3621 
   3622 	pmf_device_deregister(dev);
   3623 
   3624 	ether_ifdetach(adapter->ifp);
   3625 
   3626 	ixgbe_free_softint(adapter);
   3627 
   3628 	/* let hardware know driver is unloading */
   3629 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   3630 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   3631 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   3632 
   3633 	callout_halt(&adapter->timer, NULL);
   3634 	if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
   3635 		callout_halt(&adapter->recovery_mode_timer, NULL);
   3636 
   3637 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   3638 		netmap_detach(adapter->ifp);
   3639 
   3640 	ixgbe_free_pci_resources(adapter);
   3641 #if 0	/* XXX the NetBSD port is probably missing something here */
   3642 	bus_generic_detach(dev);
   3643 #endif
   3644 	if_detach(adapter->ifp);
   3645 	if_percpuq_destroy(adapter->ipq);
   3646 
   3647 	sysctl_teardown(&adapter->sysctllog);
   3648 	evcnt_detach(&adapter->efbig_tx_dma_setup);
   3649 	evcnt_detach(&adapter->mbuf_defrag_failed);
   3650 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
   3651 	evcnt_detach(&adapter->einval_tx_dma_setup);
   3652 	evcnt_detach(&adapter->other_tx_dma_setup);
   3653 	evcnt_detach(&adapter->eagain_tx_dma_setup);
   3654 	evcnt_detach(&adapter->enomem_tx_dma_setup);
   3655 	evcnt_detach(&adapter->watchdog_events);
   3656 	evcnt_detach(&adapter->tso_err);
   3657 	evcnt_detach(&adapter->link_irq);
   3658 	evcnt_detach(&adapter->link_sicount);
   3659 	evcnt_detach(&adapter->mod_sicount);
   3660 	evcnt_detach(&adapter->msf_sicount);
   3661 	evcnt_detach(&adapter->phy_sicount);
   3662 
   3663 	for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
   3664 		if (i < __arraycount(stats->mpc)) {
   3665 			evcnt_detach(&stats->mpc[i]);
   3666 			if (hw->mac.type == ixgbe_mac_82598EB)
   3667 				evcnt_detach(&stats->rnbc[i]);
   3668 		}
   3669 		if (i < __arraycount(stats->pxontxc)) {
   3670 			evcnt_detach(&stats->pxontxc[i]);
   3671 			evcnt_detach(&stats->pxonrxc[i]);
   3672 			evcnt_detach(&stats->pxofftxc[i]);
   3673 			evcnt_detach(&stats->pxoffrxc[i]);
   3674 			if (hw->mac.type >= ixgbe_mac_82599EB)
   3675 				evcnt_detach(&stats->pxon2offc[i]);
   3676 		}
   3677 	}
   3678 
   3679 	txr = adapter->tx_rings;
   3680 	for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   3681 		evcnt_detach(&adapter->queues[i].irqs);
   3682 		evcnt_detach(&adapter->queues[i].handleq);
   3683 		evcnt_detach(&adapter->queues[i].req);
   3684 		evcnt_detach(&txr->no_desc_avail);
   3685 		evcnt_detach(&txr->total_packets);
   3686 		evcnt_detach(&txr->tso_tx);
   3687 #ifndef IXGBE_LEGACY_TX
   3688 		evcnt_detach(&txr->pcq_drops);
   3689 #endif
   3690 
   3691 		if (i < __arraycount(stats->qprc)) {
   3692 			evcnt_detach(&stats->qprc[i]);
   3693 			evcnt_detach(&stats->qptc[i]);
   3694 			evcnt_detach(&stats->qbrc[i]);
   3695 			evcnt_detach(&stats->qbtc[i]);
   3696 			if (hw->mac.type >= ixgbe_mac_82599EB)
   3697 				evcnt_detach(&stats->qprdc[i]);
   3698 		}
   3699 
   3700 		evcnt_detach(&rxr->rx_packets);
   3701 		evcnt_detach(&rxr->rx_bytes);
   3702 		evcnt_detach(&rxr->rx_copies);
   3703 		evcnt_detach(&rxr->no_jmbuf);
   3704 		evcnt_detach(&rxr->rx_discarded);
   3705 	}
   3706 	evcnt_detach(&stats->ipcs);
   3707 	evcnt_detach(&stats->l4cs);
   3708 	evcnt_detach(&stats->ipcs_bad);
   3709 	evcnt_detach(&stats->l4cs_bad);
   3710 	evcnt_detach(&stats->intzero);
   3711 	evcnt_detach(&stats->legint);
   3712 	evcnt_detach(&stats->crcerrs);
   3713 	evcnt_detach(&stats->illerrc);
   3714 	evcnt_detach(&stats->errbc);
   3715 	evcnt_detach(&stats->mspdc);
   3716 	if (hw->mac.type >= ixgbe_mac_X550)
   3717 		evcnt_detach(&stats->mbsdc);
   3718 	evcnt_detach(&stats->mpctotal);
   3719 	evcnt_detach(&stats->mlfc);
   3720 	evcnt_detach(&stats->mrfc);
   3721 	evcnt_detach(&stats->rlec);
   3722 	evcnt_detach(&stats->lxontxc);
   3723 	evcnt_detach(&stats->lxonrxc);
   3724 	evcnt_detach(&stats->lxofftxc);
   3725 	evcnt_detach(&stats->lxoffrxc);
   3726 
   3727 	/* Packet Reception Stats */
   3728 	evcnt_detach(&stats->tor);
   3729 	evcnt_detach(&stats->gorc);
   3730 	evcnt_detach(&stats->tpr);
   3731 	evcnt_detach(&stats->gprc);
   3732 	evcnt_detach(&stats->mprc);
   3733 	evcnt_detach(&stats->bprc);
   3734 	evcnt_detach(&stats->prc64);
   3735 	evcnt_detach(&stats->prc127);
   3736 	evcnt_detach(&stats->prc255);
   3737 	evcnt_detach(&stats->prc511);
   3738 	evcnt_detach(&stats->prc1023);
   3739 	evcnt_detach(&stats->prc1522);
   3740 	evcnt_detach(&stats->ruc);
   3741 	evcnt_detach(&stats->rfc);
   3742 	evcnt_detach(&stats->roc);
   3743 	evcnt_detach(&stats->rjc);
   3744 	evcnt_detach(&stats->mngprc);
   3745 	evcnt_detach(&stats->mngpdc);
   3746 	evcnt_detach(&stats->xec);
   3747 
   3748 	/* Packet Transmission Stats */
   3749 	evcnt_detach(&stats->gotc);
   3750 	evcnt_detach(&stats->tpt);
   3751 	evcnt_detach(&stats->gptc);
   3752 	evcnt_detach(&stats->bptc);
   3753 	evcnt_detach(&stats->mptc);
   3754 	evcnt_detach(&stats->mngptc);
   3755 	evcnt_detach(&stats->ptc64);
   3756 	evcnt_detach(&stats->ptc127);
   3757 	evcnt_detach(&stats->ptc255);
   3758 	evcnt_detach(&stats->ptc511);
   3759 	evcnt_detach(&stats->ptc1023);
   3760 	evcnt_detach(&stats->ptc1522);
   3761 
   3762 	ixgbe_free_transmit_structures(adapter);
   3763 	ixgbe_free_receive_structures(adapter);
   3764 	for (i = 0; i < adapter->num_queues; i++) {
   3765 		struct ix_queue * que = &adapter->queues[i];
   3766 		mutex_destroy(&que->dc_mtx);
   3767 	}
   3768 	free(adapter->queues, M_DEVBUF);
   3769 	free(adapter->mta, M_DEVBUF);
   3770 
   3771 	IXGBE_CORE_LOCK_DESTROY(adapter);
   3772 
   3773 	return (0);
   3774 } /* ixgbe_detach */
   3775 
   3776 /************************************************************************
   3777  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
   3778  *
   3779  *   Prepare the adapter/port for LPLU and/or WoL
   3780  ************************************************************************/
   3781 static int
   3782 ixgbe_setup_low_power_mode(struct adapter *adapter)
   3783 {
   3784 	struct ixgbe_hw *hw = &adapter->hw;
   3785 	device_t	dev = adapter->dev;
   3786 	s32		error = 0;
   3787 
   3788 	KASSERT(mutex_owned(&adapter->core_mtx));
   3789 
   3790 	/* Limit power management flow to X550EM baseT */
   3791 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
   3792 	    hw->phy.ops.enter_lplu) {
   3793 		/* X550EM baseT adapters need a special LPLU flow */
   3794 		hw->phy.reset_disable = true;
   3795 		ixgbe_stop(adapter);
   3796 		error = hw->phy.ops.enter_lplu(hw);
   3797 		if (error)
   3798 			device_printf(dev,
   3799 			    "Error entering LPLU: %d\n", error);
   3800 		hw->phy.reset_disable = false;
   3801 	} else {
   3802 		/* Just stop for other adapters */
   3803 		ixgbe_stop(adapter);
   3804 	}
   3805 
   3806 	if (!hw->wol_enabled) {
   3807 		ixgbe_set_phy_power(hw, FALSE);
   3808 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3809 		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
   3810 	} else {
   3811 		/* Turn off support for APM wakeup. (Using ACPI instead) */
   3812 		IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
   3813 		    IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
   3814 
   3815 		/*
   3816 		 * Clear Wake Up Status register to prevent any previous wakeup
   3817 		 * events from waking us up immediately after we suspend.
   3818 		 */
   3819 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3820 
   3821 		/*
   3822 		 * Program the Wakeup Filter Control register with user filter
   3823 		 * settings
   3824 		 */
   3825 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
   3826 
   3827 		/* Enable wakeups and power management in Wakeup Control */
   3828 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
   3829 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
   3830 
   3831 	}
   3832 
   3833 	return error;
   3834 } /* ixgbe_setup_low_power_mode */
   3835 
   3836 /************************************************************************
   3837  * ixgbe_shutdown - Shutdown entry point
   3838  ************************************************************************/
   3839 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   3840 static int
   3841 ixgbe_shutdown(device_t dev)
   3842 {
   3843 	struct adapter *adapter = device_private(dev);
   3844 	int error = 0;
   3845 
   3846 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
   3847 
   3848 	IXGBE_CORE_LOCK(adapter);
   3849 	error = ixgbe_setup_low_power_mode(adapter);
   3850 	IXGBE_CORE_UNLOCK(adapter);
   3851 
   3852 	return (error);
   3853 } /* ixgbe_shutdown */
   3854 #endif
   3855 
   3856 /************************************************************************
   3857  * ixgbe_suspend
   3858  *
   3859  *   From D0 to D3
   3860  ************************************************************************/
   3861 static bool
   3862 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
   3863 {
   3864 	struct adapter *adapter = device_private(dev);
   3865 	int	       error = 0;
   3866 
   3867 	INIT_DEBUGOUT("ixgbe_suspend: begin");
   3868 
   3869 	IXGBE_CORE_LOCK(adapter);
   3870 
   3871 	error = ixgbe_setup_low_power_mode(adapter);
   3872 
   3873 	IXGBE_CORE_UNLOCK(adapter);
   3874 
   3875 	return (error);
   3876 } /* ixgbe_suspend */
   3877 
   3878 /************************************************************************
   3879  * ixgbe_resume
   3880  *
   3881  *   From D3 to D0
   3882  ************************************************************************/
   3883 static bool
   3884 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
   3885 {
   3886 	struct adapter	*adapter = device_private(dev);
   3887 	struct ifnet	*ifp = adapter->ifp;
   3888 	struct ixgbe_hw *hw = &adapter->hw;
   3889 	u32		wus;
   3890 
   3891 	INIT_DEBUGOUT("ixgbe_resume: begin");
   3892 
   3893 	IXGBE_CORE_LOCK(adapter);
   3894 
   3895 	/* Read & clear WUS register */
   3896 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
   3897 	if (wus)
   3898 		device_printf(dev, "Woken up by (WUS): %#010x\n",
   3899 		    IXGBE_READ_REG(hw, IXGBE_WUS));
   3900 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3901 	/* And clear WUFC until next low-power transition */
   3902 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3903 
   3904 	/*
   3905 	 * Required after D3->D0 transition;
   3906 	 * will re-advertise all previous advertised speeds
   3907 	 */
   3908 	if (ifp->if_flags & IFF_UP)
   3909 		ixgbe_init_locked(adapter);
   3910 
   3911 	IXGBE_CORE_UNLOCK(adapter);
   3912 
   3913 	return true;
   3914 } /* ixgbe_resume */
   3915 
   3916 /*
   3917  * Set the various hardware offload abilities.
   3918  *
   3919  * This takes the ifnet's if_capenable flags (e.g. set by the user using
   3920  * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
   3921  * mbuf offload flags the driver will understand.
   3922  */
   3923 static void
   3924 ixgbe_set_if_hwassist(struct adapter *adapter)
   3925 {
   3926 	/* XXX */
   3927 }
   3928 
   3929 /************************************************************************
   3930  * ixgbe_init_locked - Init entry point
   3931  *
   3932  *   Used in two ways: It is used by the stack as an init
   3933  *   entry point in network interface structure. It is also
   3934  *   used by the driver as a hw/sw initialization routine to
   3935  *   get to a consistent state.
   3936  *
   3937  *   return 0 on success, positive on failure
   3938  ************************************************************************/
   3939 static void
   3940 ixgbe_init_locked(struct adapter *adapter)
   3941 {
   3942 	struct ifnet   *ifp = adapter->ifp;
   3943 	device_t	dev = adapter->dev;
   3944 	struct ixgbe_hw *hw = &adapter->hw;
   3945 	struct ix_queue *que;
   3946 	struct tx_ring	*txr;
   3947 	struct rx_ring	*rxr;
   3948 	u32		txdctl, mhadd;
   3949 	u32		rxdctl, rxctrl;
   3950 	u32		ctrl_ext;
   3951 	int		i, j, err;
   3952 
   3953 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   3954 
   3955 	KASSERT(mutex_owned(&adapter->core_mtx));
   3956 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
   3957 
   3958 	hw->adapter_stopped = FALSE;
   3959 	ixgbe_stop_adapter(hw);
   3960 	callout_stop(&adapter->timer);
   3961 	for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
   3962 		que->disabled_count = 0;
   3963 
   3964 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   3965 	adapter->max_frame_size =
   3966 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   3967 
   3968 	/* Queue indices may change with IOV mode */
   3969 	ixgbe_align_all_queue_indices(adapter);
   3970 
   3971 	/* reprogram the RAR[0] in case user changed it. */
   3972 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
   3973 
   3974 	/* Get the latest mac address, User can use a LAA */
   3975 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
   3976 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   3977 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
   3978 	hw->addr_ctrl.rar_used_count = 1;
   3979 
   3980 	/* Set hardware offload abilities from ifnet flags */
   3981 	ixgbe_set_if_hwassist(adapter);
   3982 
   3983 	/* Prepare transmit descriptors and buffers */
   3984 	if (ixgbe_setup_transmit_structures(adapter)) {
   3985 		device_printf(dev, "Could not setup transmit structures\n");
   3986 		ixgbe_stop(adapter);
   3987 		return;
   3988 	}
   3989 
   3990 	ixgbe_init_hw(hw);
   3991 
   3992 	ixgbe_initialize_iov(adapter);
   3993 
   3994 	ixgbe_initialize_transmit_units(adapter);
   3995 
   3996 	/* Setup Multicast table */
   3997 	ixgbe_set_multi(adapter);
   3998 
   3999 	/* Determine the correct mbuf pool, based on frame size */
   4000 	if (adapter->max_frame_size <= MCLBYTES)
   4001 		adapter->rx_mbuf_sz = MCLBYTES;
   4002 	else
   4003 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   4004 
   4005 	/* Prepare receive descriptors and buffers */
   4006 	if (ixgbe_setup_receive_structures(adapter)) {
   4007 		device_printf(dev, "Could not setup receive structures\n");
   4008 		ixgbe_stop(adapter);
   4009 		return;
   4010 	}
   4011 
   4012 	/* Configure RX settings */
   4013 	ixgbe_initialize_receive_units(adapter);
   4014 
   4015 	/* Enable SDP & MSI-X interrupts based on adapter */
   4016 	ixgbe_config_gpie(adapter);
   4017 
   4018 	/* Set MTU size */
   4019 	if (ifp->if_mtu > ETHERMTU) {
   4020 		/* aka IXGBE_MAXFRS on 82599 and newer */
   4021 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   4022 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   4023 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   4024 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   4025 	}
   4026 
   4027 	/* Now enable all the queues */
   4028 	for (i = 0; i < adapter->num_queues; i++) {
   4029 		txr = &adapter->tx_rings[i];
   4030 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
   4031 		txdctl |= IXGBE_TXDCTL_ENABLE;
   4032 		/* Set WTHRESH to 8, burst writeback */
   4033 		txdctl |= (8 << 16);
   4034 		/*
   4035 		 * When the internal queue falls below PTHRESH (32),
   4036 		 * start prefetching as long as there are at least
   4037 		 * HTHRESH (1) buffers ready. The values are taken
   4038 		 * from the Intel linux driver 3.8.21.
   4039 		 * Prefetching enables tx line rate even with 1 queue.
   4040 		 */
   4041 		txdctl |= (32 << 0) | (1 << 8);
   4042 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
   4043 	}
   4044 
   4045 	for (i = 0; i < adapter->num_queues; i++) {
   4046 		rxr = &adapter->rx_rings[i];
   4047 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   4048 		if (hw->mac.type == ixgbe_mac_82598EB) {
   4049 			/*
   4050 			 * PTHRESH = 21
   4051 			 * HTHRESH = 4
   4052 			 * WTHRESH = 8
   4053 			 */
   4054 			rxdctl &= ~0x3FFFFF;
   4055 			rxdctl |= 0x080420;
   4056 		}
   4057 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   4058 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
   4059 		for (j = 0; j < 10; j++) {
   4060 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
   4061 			    IXGBE_RXDCTL_ENABLE)
   4062 				break;
   4063 			else
   4064 				msec_delay(1);
   4065 		}
   4066 		wmb();
   4067 
   4068 		/*
   4069 		 * In netmap mode, we must preserve the buffers made
   4070 		 * available to userspace before the if_init()
   4071 		 * (this is true by default on the TX side, because
   4072 		 * init makes all buffers available to userspace).
   4073 		 *
   4074 		 * netmap_reset() and the device specific routines
   4075 		 * (e.g. ixgbe_setup_receive_rings()) map these
   4076 		 * buffers at the end of the NIC ring, so here we
   4077 		 * must set the RDT (tail) register to make sure
   4078 		 * they are not overwritten.
   4079 		 *
   4080 		 * In this driver the NIC ring starts at RDH = 0,
   4081 		 * RDT points to the last slot available for reception (?),
   4082 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   4083 		 */
   4084 #ifdef DEV_NETMAP
   4085 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   4086 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   4087 			struct netmap_adapter *na = NA(adapter->ifp);
   4088 			struct netmap_kring *kring = na->rx_rings[i];
   4089 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   4090 
   4091 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
   4092 		} else
   4093 #endif /* DEV_NETMAP */
   4094 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
   4095 			    adapter->num_rx_desc - 1);
   4096 	}
   4097 
   4098 	/* Enable Receive engine */
   4099 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   4100 	if (hw->mac.type == ixgbe_mac_82598EB)
   4101 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   4102 	rxctrl |= IXGBE_RXCTRL_RXEN;
   4103 	ixgbe_enable_rx_dma(hw, rxctrl);
   4104 
   4105 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4106 
   4107 	/* Set up MSI/MSI-X routing */
   4108 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   4109 		ixgbe_configure_ivars(adapter);
   4110 		/* Set up auto-mask */
   4111 		if (hw->mac.type == ixgbe_mac_82598EB)
   4112 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   4113 		else {
   4114 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   4115 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   4116 		}
   4117 	} else {  /* Simple settings for Legacy/MSI */
   4118 		ixgbe_set_ivar(adapter, 0, 0, 0);
   4119 		ixgbe_set_ivar(adapter, 0, 0, 1);
   4120 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   4121 	}
   4122 
   4123 	ixgbe_init_fdir(adapter);
   4124 
   4125 	/*
   4126 	 * Check on any SFP devices that
   4127 	 * need to be kick-started
   4128 	 */
   4129 	if (hw->phy.type == ixgbe_phy_none) {
   4130 		err = hw->phy.ops.identify(hw);
   4131 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4132 			device_printf(dev,
   4133 			    "Unsupported SFP+ module type was detected.\n");
   4134 			return;
   4135 		}
   4136 	}
   4137 
   4138 	/* Set moderation on the Link interrupt */
   4139 	ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
   4140 
   4141 	/* Enable EEE power saving */
   4142 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   4143 		hw->mac.ops.setup_eee(hw,
   4144 		    adapter->feat_en & IXGBE_FEATURE_EEE);
   4145 
   4146 	/* Enable power to the phy. */
   4147 	ixgbe_set_phy_power(hw, TRUE);
   4148 
   4149 	/* Config/Enable Link */
   4150 	ixgbe_config_link(adapter);
   4151 
   4152 	/* Hardware Packet Buffer & Flow Control setup */
   4153 	ixgbe_config_delay_values(adapter);
   4154 
   4155 	/* Initialize the FC settings */
   4156 	ixgbe_start_hw(hw);
   4157 
   4158 	/* Set up VLAN support and filter */
   4159 	ixgbe_setup_vlan_hw_support(adapter);
   4160 
   4161 	/* Setup DMA Coalescing */
   4162 	ixgbe_config_dmac(adapter);
   4163 
   4164 	/* And now turn on interrupts */
   4165 	ixgbe_enable_intr(adapter);
   4166 
   4167 	/* Enable the use of the MBX by the VF's */
   4168 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
   4169 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
   4170 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
   4171 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
   4172 	}
   4173 
   4174 	/* Update saved flags. See ixgbe_ifflags_cb() */
   4175 	adapter->if_flags = ifp->if_flags;
   4176 	adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
   4177 
   4178 	/* Now inform the stack we're ready */
   4179 	ifp->if_flags |= IFF_RUNNING;
   4180 
   4181 	return;
   4182 } /* ixgbe_init_locked */
   4183 
   4184 /************************************************************************
   4185  * ixgbe_init
   4186  ************************************************************************/
   4187 static int
   4188 ixgbe_init(struct ifnet *ifp)
   4189 {
   4190 	struct adapter *adapter = ifp->if_softc;
   4191 
   4192 	IXGBE_CORE_LOCK(adapter);
   4193 	ixgbe_init_locked(adapter);
   4194 	IXGBE_CORE_UNLOCK(adapter);
   4195 
   4196 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   4197 } /* ixgbe_init */
   4198 
   4199 /************************************************************************
   4200  * ixgbe_set_ivar
   4201  *
   4202  *   Setup the correct IVAR register for a particular MSI-X interrupt
   4203  *     (yes this is all very magic and confusing :)
   4204  *    - entry is the register array entry
   4205  *    - vector is the MSI-X vector for this queue
   4206  *    - type is RX/TX/MISC
   4207  ************************************************************************/
   4208 static void
   4209 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   4210 {
   4211 	struct ixgbe_hw *hw = &adapter->hw;
   4212 	u32 ivar, index;
   4213 
   4214 	vector |= IXGBE_IVAR_ALLOC_VAL;
   4215 
   4216 	switch (hw->mac.type) {
   4217 	case ixgbe_mac_82598EB:
   4218 		if (type == -1)
   4219 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   4220 		else
   4221 			entry += (type * 64);
   4222 		index = (entry >> 2) & 0x1F;
   4223 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   4224 		ivar &= ~(0xffUL << (8 * (entry & 0x3)));
   4225 		ivar |= ((u32)vector << (8 * (entry & 0x3)));
   4226 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   4227 		break;
   4228 	case ixgbe_mac_82599EB:
   4229 	case ixgbe_mac_X540:
   4230 	case ixgbe_mac_X550:
   4231 	case ixgbe_mac_X550EM_x:
   4232 	case ixgbe_mac_X550EM_a:
   4233 		if (type == -1) { /* MISC IVAR */
   4234 			index = (entry & 1) * 8;
   4235 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   4236 			ivar &= ~(0xffUL << index);
   4237 			ivar |= ((u32)vector << index);
   4238 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   4239 		} else {	/* RX/TX IVARS */
   4240 			index = (16 * (entry & 1)) + (8 * type);
   4241 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   4242 			ivar &= ~(0xffUL << index);
   4243 			ivar |= ((u32)vector << index);
   4244 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   4245 		}
   4246 		break;
   4247 	default:
   4248 		break;
   4249 	}
   4250 } /* ixgbe_set_ivar */
   4251 
   4252 /************************************************************************
   4253  * ixgbe_configure_ivars
   4254  ************************************************************************/
   4255 static void
   4256 ixgbe_configure_ivars(struct adapter *adapter)
   4257 {
   4258 	struct ix_queue *que = adapter->queues;
   4259 	u32		newitr;
   4260 
   4261 	if (ixgbe_max_interrupt_rate > 0)
   4262 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   4263 	else {
   4264 		/*
   4265 		 * Disable DMA coalescing if interrupt moderation is
   4266 		 * disabled.
   4267 		 */
   4268 		adapter->dmac = 0;
   4269 		newitr = 0;
   4270 	}
   4271 
   4272 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4273 		struct rx_ring *rxr = &adapter->rx_rings[i];
   4274 		struct tx_ring *txr = &adapter->tx_rings[i];
   4275 		/* First the RX queue entry */
   4276 		ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
   4277 		/* ... and the TX */
   4278 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
   4279 		/* Set an Initial EITR value */
   4280 		ixgbe_eitr_write(adapter, que->msix, newitr);
   4281 		/*
   4282 		 * To eliminate influence of the previous state.
   4283 		 * At this point, Tx/Rx interrupt handler
   4284 		 * (ixgbe_msix_que()) cannot be called, so  both
   4285 		 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
   4286 		 */
   4287 		que->eitr_setting = 0;
   4288 	}
   4289 
   4290 	/* For the Link interrupt */
   4291 	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
   4292 } /* ixgbe_configure_ivars */
   4293 
   4294 /************************************************************************
   4295  * ixgbe_config_gpie
   4296  ************************************************************************/
   4297 static void
   4298 ixgbe_config_gpie(struct adapter *adapter)
   4299 {
   4300 	struct ixgbe_hw *hw = &adapter->hw;
   4301 	u32		gpie;
   4302 
   4303 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
   4304 
   4305 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   4306 		/* Enable Enhanced MSI-X mode */
   4307 		gpie |= IXGBE_GPIE_MSIX_MODE
   4308 		     |	IXGBE_GPIE_EIAME
   4309 		     |	IXGBE_GPIE_PBA_SUPPORT
   4310 		     |	IXGBE_GPIE_OCD;
   4311 	}
   4312 
   4313 	/* Fan Failure Interrupt */
   4314 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4315 		gpie |= IXGBE_SDP1_GPIEN;
   4316 
   4317 	/* Thermal Sensor Interrupt */
   4318 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
   4319 		gpie |= IXGBE_SDP0_GPIEN_X540;
   4320 
   4321 	/* Link detection */
   4322 	switch (hw->mac.type) {
   4323 	case ixgbe_mac_82599EB:
   4324 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
   4325 		break;
   4326 	case ixgbe_mac_X550EM_x:
   4327 	case ixgbe_mac_X550EM_a:
   4328 		gpie |= IXGBE_SDP0_GPIEN_X540;
   4329 		break;
   4330 	default:
   4331 		break;
   4332 	}
   4333 
   4334 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   4335 
   4336 } /* ixgbe_config_gpie */
   4337 
   4338 /************************************************************************
   4339  * ixgbe_config_delay_values
   4340  *
   4341  *   Requires adapter->max_frame_size to be set.
   4342  ************************************************************************/
   4343 static void
   4344 ixgbe_config_delay_values(struct adapter *adapter)
   4345 {
   4346 	struct ixgbe_hw *hw = &adapter->hw;
   4347 	u32		rxpb, frame, size, tmp;
   4348 
   4349 	frame = adapter->max_frame_size;
   4350 
   4351 	/* Calculate High Water */
   4352 	switch (hw->mac.type) {
   4353 	case ixgbe_mac_X540:
   4354 	case ixgbe_mac_X550:
   4355 	case ixgbe_mac_X550EM_x:
   4356 	case ixgbe_mac_X550EM_a:
   4357 		tmp = IXGBE_DV_X540(frame, frame);
   4358 		break;
   4359 	default:
   4360 		tmp = IXGBE_DV(frame, frame);
   4361 		break;
   4362 	}
   4363 	size = IXGBE_BT2KB(tmp);
   4364 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
   4365 	hw->fc.high_water[0] = rxpb - size;
   4366 
   4367 	/* Now calculate Low Water */
   4368 	switch (hw->mac.type) {
   4369 	case ixgbe_mac_X540:
   4370 	case ixgbe_mac_X550:
   4371 	case ixgbe_mac_X550EM_x:
   4372 	case ixgbe_mac_X550EM_a:
   4373 		tmp = IXGBE_LOW_DV_X540(frame);
   4374 		break;
   4375 	default:
   4376 		tmp = IXGBE_LOW_DV(frame);
   4377 		break;
   4378 	}
   4379 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
   4380 
   4381 	hw->fc.pause_time = IXGBE_FC_PAUSE;
   4382 	hw->fc.send_xon = TRUE;
   4383 } /* ixgbe_config_delay_values */
   4384 
   4385 /************************************************************************
   4386  * ixgbe_set_multi - Multicast Update
   4387  *
   4388  *   Called whenever multicast address list is updated.
   4389  ************************************************************************/
   4390 static void
   4391 ixgbe_set_multi(struct adapter *adapter)
   4392 {
   4393 	struct ixgbe_mc_addr	*mta;
   4394 	struct ifnet		*ifp = adapter->ifp;
   4395 	u8			*update_ptr;
   4396 	int			mcnt = 0;
   4397 	u32			fctrl;
   4398 	struct ethercom		*ec = &adapter->osdep.ec;
   4399 	struct ether_multi	*enm;
   4400 	struct ether_multistep	step;
   4401 
   4402 	KASSERT(mutex_owned(&adapter->core_mtx));
   4403 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   4404 
   4405 	mta = adapter->mta;
   4406 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
   4407 
   4408 	ETHER_LOCK(ec);
   4409 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   4410 	ETHER_FIRST_MULTI(step, ec, enm);
   4411 	while (enm != NULL) {
   4412 		if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
   4413 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   4414 			ETHER_ADDR_LEN) != 0)) {
   4415 			ec->ec_flags |= ETHER_F_ALLMULTI;
   4416 			break;
   4417 		}
   4418 		bcopy(enm->enm_addrlo,
   4419 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
   4420 		mta[mcnt].vmdq = adapter->pool;
   4421 		mcnt++;
   4422 		ETHER_NEXT_MULTI(step, enm);
   4423 	}
   4424 
   4425 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   4426 	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4427 	if (ifp->if_flags & IFF_PROMISC)
   4428 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4429 	else if (ec->ec_flags & ETHER_F_ALLMULTI) {
   4430 		fctrl |= IXGBE_FCTRL_MPE;
   4431 	}
   4432 
   4433 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   4434 
   4435 	/* Update multicast filter entries only when it's not ALLMULTI */
   4436 	if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
   4437 		ETHER_UNLOCK(ec);
   4438 		update_ptr = (u8 *)mta;
   4439 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   4440 		    ixgbe_mc_array_itr, TRUE);
   4441 	} else
   4442 		ETHER_UNLOCK(ec);
   4443 
   4444 } /* ixgbe_set_multi */
   4445 
   4446 /************************************************************************
   4447  * ixgbe_mc_array_itr
   4448  *
   4449  *   An iterator function needed by the multicast shared code.
   4450  *   It feeds the shared code routine the addresses in the
   4451  *   array of ixgbe_set_multi() one by one.
   4452  ************************************************************************/
   4453 static u8 *
   4454 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   4455 {
   4456 	struct ixgbe_mc_addr *mta;
   4457 
   4458 	mta = (struct ixgbe_mc_addr *)*update_ptr;
   4459 	*vmdq = mta->vmdq;
   4460 
   4461 	*update_ptr = (u8*)(mta + 1);
   4462 
   4463 	return (mta->addr);
   4464 } /* ixgbe_mc_array_itr */
   4465 
   4466 /************************************************************************
   4467  * ixgbe_local_timer - Timer routine
   4468  *
   4469  *   Checks for link status, updates statistics,
   4470  *   and runs the watchdog check.
   4471  ************************************************************************/
   4472 static void
   4473 ixgbe_local_timer(void *arg)
   4474 {
   4475 	struct adapter *adapter = arg;
   4476 
   4477 	IXGBE_CORE_LOCK(adapter);
   4478 	ixgbe_local_timer1(adapter);
   4479 	IXGBE_CORE_UNLOCK(adapter);
   4480 }
   4481 
   4482 static void
   4483 ixgbe_local_timer1(void *arg)
   4484 {
   4485 	struct adapter	*adapter = arg;
   4486 	device_t	dev = adapter->dev;
   4487 	struct ix_queue *que = adapter->queues;
   4488 	u64		queues = 0;
   4489 	u64		v0, v1, v2, v3, v4, v5, v6, v7;
   4490 	int		hung = 0;
   4491 	int		i;
   4492 
   4493 	KASSERT(mutex_owned(&adapter->core_mtx));
   4494 
   4495 	/* Check for pluggable optics */
   4496 	if (adapter->sfp_probe)
   4497 		if (!ixgbe_sfp_probe(adapter))
   4498 			goto out; /* Nothing to do */
   4499 
   4500 	ixgbe_update_link_status(adapter);
   4501 	ixgbe_update_stats_counters(adapter);
   4502 
   4503 	/* Update some event counters */
   4504 	v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
   4505 	que = adapter->queues;
   4506 	for (i = 0; i < adapter->num_queues; i++, que++) {
   4507 		struct tx_ring	*txr = que->txr;
   4508 
   4509 		v0 += txr->q_efbig_tx_dma_setup;
   4510 		v1 += txr->q_mbuf_defrag_failed;
   4511 		v2 += txr->q_efbig2_tx_dma_setup;
   4512 		v3 += txr->q_einval_tx_dma_setup;
   4513 		v4 += txr->q_other_tx_dma_setup;
   4514 		v5 += txr->q_eagain_tx_dma_setup;
   4515 		v6 += txr->q_enomem_tx_dma_setup;
   4516 		v7 += txr->q_tso_err;
   4517 	}
   4518 	adapter->efbig_tx_dma_setup.ev_count = v0;
   4519 	adapter->mbuf_defrag_failed.ev_count = v1;
   4520 	adapter->efbig2_tx_dma_setup.ev_count = v2;
   4521 	adapter->einval_tx_dma_setup.ev_count = v3;
   4522 	adapter->other_tx_dma_setup.ev_count = v4;
   4523 	adapter->eagain_tx_dma_setup.ev_count = v5;
   4524 	adapter->enomem_tx_dma_setup.ev_count = v6;
   4525 	adapter->tso_err.ev_count = v7;
   4526 
   4527 	/*
   4528 	 * Check the TX queues status
   4529 	 *	- mark hung queues so we don't schedule on them
   4530 	 *	- watchdog only if all queues show hung
   4531 	 */
   4532 	que = adapter->queues;
   4533 	for (i = 0; i < adapter->num_queues; i++, que++) {
   4534 		/* Keep track of queues with work for soft irq */
   4535 		if (que->txr->busy)
   4536 			queues |= 1ULL << que->me;
   4537 		/*
   4538 		 * Each time txeof runs without cleaning, but there
   4539 		 * are uncleaned descriptors it increments busy. If
   4540 		 * we get to the MAX we declare it hung.
   4541 		 */
   4542 		if (que->busy == IXGBE_QUEUE_HUNG) {
   4543 			++hung;
   4544 			/* Mark the queue as inactive */
   4545 			adapter->active_queues &= ~(1ULL << que->me);
   4546 			continue;
   4547 		} else {
   4548 			/* Check if we've come back from hung */
   4549 			if ((adapter->active_queues & (1ULL << que->me)) == 0)
   4550 				adapter->active_queues |= 1ULL << que->me;
   4551 		}
   4552 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   4553 			device_printf(dev,
   4554 			    "Warning queue %d appears to be hung!\n", i);
   4555 			que->txr->busy = IXGBE_QUEUE_HUNG;
   4556 			++hung;
   4557 		}
   4558 	}
   4559 
   4560 	/* Only truely watchdog if all queues show hung */
   4561 	if (hung == adapter->num_queues)
   4562 		goto watchdog;
   4563 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
   4564 	else if (queues != 0) { /* Force an IRQ on queues with work */
   4565 		que = adapter->queues;
   4566 		for (i = 0; i < adapter->num_queues; i++, que++) {
   4567 			mutex_enter(&que->dc_mtx);
   4568 			if (que->disabled_count == 0)
   4569 				ixgbe_rearm_queues(adapter,
   4570 				    queues & ((u64)1 << i));
   4571 			mutex_exit(&que->dc_mtx);
   4572 		}
   4573 	}
   4574 #endif
   4575 
   4576 out:
   4577 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4578 	return;
   4579 
   4580 watchdog:
   4581 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   4582 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   4583 	adapter->watchdog_events.ev_count++;
   4584 	ixgbe_init_locked(adapter);
   4585 } /* ixgbe_local_timer */
   4586 
   4587 /************************************************************************
   4588  * ixgbe_recovery_mode_timer - Recovery mode timer routine
   4589  ************************************************************************/
   4590 static void
   4591 ixgbe_recovery_mode_timer(void *arg)
   4592 {
   4593 	struct adapter *adapter = arg;
   4594 	struct ixgbe_hw *hw = &adapter->hw;
   4595 
   4596 	IXGBE_CORE_LOCK(adapter);
   4597 	if (ixgbe_fw_recovery_mode(hw)) {
   4598 		if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
   4599 			/* Firmware error detected, entering recovery mode */
   4600 			device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
   4601 
   4602 			if (hw->adapter_stopped == FALSE)
   4603 				ixgbe_stop(adapter);
   4604 		}
   4605 	} else
   4606 		atomic_cas_uint(&adapter->recovery_mode, 1, 0);
   4607 
   4608 	callout_reset(&adapter->recovery_mode_timer, hz,
   4609 	    ixgbe_recovery_mode_timer, adapter);
   4610 	IXGBE_CORE_UNLOCK(adapter);
   4611 } /* ixgbe_recovery_mode_timer */
   4612 
   4613 /************************************************************************
   4614  * ixgbe_sfp_probe
   4615  *
   4616  *   Determine if a port had optics inserted.
   4617  ************************************************************************/
   4618 static bool
   4619 ixgbe_sfp_probe(struct adapter *adapter)
   4620 {
   4621 	struct ixgbe_hw	*hw = &adapter->hw;
   4622 	device_t	dev = adapter->dev;
   4623 	bool		result = FALSE;
   4624 
   4625 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4626 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4627 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4628 		if (ret)
   4629 			goto out;
   4630 		ret = hw->phy.ops.reset(hw);
   4631 		adapter->sfp_probe = FALSE;
   4632 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4633 			device_printf(dev,"Unsupported SFP+ module detected!");
   4634 			device_printf(dev,
   4635 			    "Reload driver with supported module.\n");
   4636 			goto out;
   4637 		} else
   4638 			device_printf(dev, "SFP+ module detected!\n");
   4639 		/* We now have supported optics */
   4640 		result = TRUE;
   4641 	}
   4642 out:
   4643 
   4644 	return (result);
   4645 } /* ixgbe_sfp_probe */
   4646 
   4647 /************************************************************************
   4648  * ixgbe_handle_mod - Tasklet for SFP module interrupts
   4649  ************************************************************************/
   4650 static void
   4651 ixgbe_handle_mod(void *context)
   4652 {
   4653 	struct adapter	*adapter = context;
   4654 	struct ixgbe_hw *hw = &adapter->hw;
   4655 	device_t	dev = adapter->dev;
   4656 	u32		err, cage_full = 0;
   4657 
   4658 	++adapter->mod_sicount.ev_count;
   4659 	if (adapter->hw.need_crosstalk_fix) {
   4660 		switch (hw->mac.type) {
   4661 		case ixgbe_mac_82599EB:
   4662 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4663 			    IXGBE_ESDP_SDP2;
   4664 			break;
   4665 		case ixgbe_mac_X550EM_x:
   4666 		case ixgbe_mac_X550EM_a:
   4667 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4668 			    IXGBE_ESDP_SDP0;
   4669 			break;
   4670 		default:
   4671 			break;
   4672 		}
   4673 
   4674 		if (!cage_full)
   4675 			return;
   4676 	}
   4677 
   4678 	err = hw->phy.ops.identify_sfp(hw);
   4679 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4680 		device_printf(dev,
   4681 		    "Unsupported SFP+ module type was detected.\n");
   4682 		return;
   4683 	}
   4684 
   4685 	if (hw->mac.type == ixgbe_mac_82598EB)
   4686 		err = hw->phy.ops.reset(hw);
   4687 	else
   4688 		err = hw->mac.ops.setup_sfp(hw);
   4689 
   4690 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4691 		device_printf(dev,
   4692 		    "Setup failure - unsupported SFP+ module type.\n");
   4693 		return;
   4694 	}
   4695 	softint_schedule(adapter->msf_si);
   4696 } /* ixgbe_handle_mod */
   4697 
   4698 
   4699 /************************************************************************
   4700  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
   4701  ************************************************************************/
   4702 static void
   4703 ixgbe_handle_msf(void *context)
   4704 {
   4705 	struct adapter	*adapter = context;
   4706 	struct ixgbe_hw *hw = &adapter->hw;
   4707 	u32		autoneg;
   4708 	bool		negotiate;
   4709 
   4710 	IXGBE_CORE_LOCK(adapter);
   4711 	++adapter->msf_sicount.ev_count;
   4712 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
   4713 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
   4714 
   4715 	autoneg = hw->phy.autoneg_advertised;
   4716 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   4717 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   4718 	else
   4719 		negotiate = 0;
   4720 	if (hw->mac.ops.setup_link)
   4721 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
   4722 
   4723 	/* Adjust media types shown in ifconfig */
   4724 	ifmedia_removeall(&adapter->media);
   4725 	ixgbe_add_media_types(adapter);
   4726 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   4727 	IXGBE_CORE_UNLOCK(adapter);
   4728 } /* ixgbe_handle_msf */
   4729 
   4730 /************************************************************************
   4731  * ixgbe_handle_phy - Tasklet for external PHY interrupts
   4732  ************************************************************************/
   4733 static void
   4734 ixgbe_handle_phy(void *context)
   4735 {
   4736 	struct adapter	*adapter = context;
   4737 	struct ixgbe_hw *hw = &adapter->hw;
   4738 	int error;
   4739 
   4740 	++adapter->phy_sicount.ev_count;
   4741 	error = hw->phy.ops.handle_lasi(hw);
   4742 	if (error == IXGBE_ERR_OVERTEMP)
   4743 		device_printf(adapter->dev,
   4744 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
   4745 		    " PHY will downshift to lower power state!\n");
   4746 	else if (error)
   4747 		device_printf(adapter->dev,
   4748 		    "Error handling LASI interrupt: %d\n", error);
   4749 } /* ixgbe_handle_phy */
   4750 
   4751 static void
   4752 ixgbe_ifstop(struct ifnet *ifp, int disable)
   4753 {
   4754 	struct adapter *adapter = ifp->if_softc;
   4755 
   4756 	IXGBE_CORE_LOCK(adapter);
   4757 	ixgbe_stop(adapter);
   4758 	IXGBE_CORE_UNLOCK(adapter);
   4759 }
   4760 
   4761 /************************************************************************
   4762  * ixgbe_stop - Stop the hardware
   4763  *
   4764  *   Disables all traffic on the adapter by issuing a
   4765  *   global reset on the MAC and deallocates TX/RX buffers.
   4766  ************************************************************************/
   4767 static void
   4768 ixgbe_stop(void *arg)
   4769 {
   4770 	struct ifnet	*ifp;
   4771 	struct adapter	*adapter = arg;
   4772 	struct ixgbe_hw *hw = &adapter->hw;
   4773 
   4774 	ifp = adapter->ifp;
   4775 
   4776 	KASSERT(mutex_owned(&adapter->core_mtx));
   4777 
   4778 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   4779 	ixgbe_disable_intr(adapter);
   4780 	callout_stop(&adapter->timer);
   4781 
   4782 	/* Let the stack know...*/
   4783 	ifp->if_flags &= ~IFF_RUNNING;
   4784 
   4785 	ixgbe_reset_hw(hw);
   4786 	hw->adapter_stopped = FALSE;
   4787 	ixgbe_stop_adapter(hw);
   4788 	if (hw->mac.type == ixgbe_mac_82599EB)
   4789 		ixgbe_stop_mac_link_on_d3_82599(hw);
   4790 	/* Turn off the laser - noop with no optics */
   4791 	ixgbe_disable_tx_laser(hw);
   4792 
   4793 	/* Update the stack */
   4794 	adapter->link_up = FALSE;
   4795 	ixgbe_update_link_status(adapter);
   4796 
   4797 	/* reprogram the RAR[0] in case user changed it. */
   4798 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   4799 
   4800 	return;
   4801 } /* ixgbe_stop */
   4802 
   4803 /************************************************************************
   4804  * ixgbe_update_link_status - Update OS on link state
   4805  *
   4806  * Note: Only updates the OS on the cached link state.
   4807  *	 The real check of the hardware only happens with
   4808  *	 a link interrupt.
   4809  ************************************************************************/
   4810 static void
   4811 ixgbe_update_link_status(struct adapter *adapter)
   4812 {
   4813 	struct ifnet	*ifp = adapter->ifp;
   4814 	device_t	dev = adapter->dev;
   4815 	struct ixgbe_hw *hw = &adapter->hw;
   4816 
   4817 	KASSERT(mutex_owned(&adapter->core_mtx));
   4818 
   4819 	if (adapter->link_up) {
   4820 		if (adapter->link_active != LINK_STATE_UP) {
   4821 			/*
   4822 			 * To eliminate influence of the previous state
   4823 			 * in the same way as ixgbe_init_locked().
   4824 			 */
   4825 			struct ix_queue	*que = adapter->queues;
   4826 			for (int i = 0; i < adapter->num_queues; i++, que++)
   4827 				que->eitr_setting = 0;
   4828 
   4829 			if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
   4830 				/*
   4831 				 *  Discard count for both MAC Local Fault and
   4832 				 * Remote Fault because those registers are
   4833 				 * valid only when the link speed is up and
   4834 				 * 10Gbps.
   4835 				 */
   4836 				IXGBE_READ_REG(hw, IXGBE_MLFC);
   4837 				IXGBE_READ_REG(hw, IXGBE_MRFC);
   4838 			}
   4839 
   4840 			if (bootverbose) {
   4841 				const char *bpsmsg;
   4842 
   4843 				switch (adapter->link_speed) {
   4844 				case IXGBE_LINK_SPEED_10GB_FULL:
   4845 					bpsmsg = "10 Gbps";
   4846 					break;
   4847 				case IXGBE_LINK_SPEED_5GB_FULL:
   4848 					bpsmsg = "5 Gbps";
   4849 					break;
   4850 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   4851 					bpsmsg = "2.5 Gbps";
   4852 					break;
   4853 				case IXGBE_LINK_SPEED_1GB_FULL:
   4854 					bpsmsg = "1 Gbps";
   4855 					break;
   4856 				case IXGBE_LINK_SPEED_100_FULL:
   4857 					bpsmsg = "100 Mbps";
   4858 					break;
   4859 				case IXGBE_LINK_SPEED_10_FULL:
   4860 					bpsmsg = "10 Mbps";
   4861 					break;
   4862 				default:
   4863 					bpsmsg = "unknown speed";
   4864 					break;
   4865 				}
   4866 				device_printf(dev, "Link is up %s %s \n",
   4867 				    bpsmsg, "Full Duplex");
   4868 			}
   4869 			adapter->link_active = LINK_STATE_UP;
   4870 			/* Update any Flow Control changes */
   4871 			ixgbe_fc_enable(&adapter->hw);
   4872 			/* Update DMA coalescing config */
   4873 			ixgbe_config_dmac(adapter);
   4874 			if_link_state_change(ifp, LINK_STATE_UP);
   4875 
   4876 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4877 				ixgbe_ping_all_vfs(adapter);
   4878 		}
   4879 	} else {
   4880 		/*
   4881 		 * Do it when link active changes to DOWN. i.e.
   4882 		 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
   4883 		 * b) LINK_STATE_UP	 -> LINK_STATE_DOWN
   4884 		 */
   4885 		if (adapter->link_active != LINK_STATE_DOWN) {
   4886 			if (bootverbose)
   4887 				device_printf(dev, "Link is Down\n");
   4888 			if_link_state_change(ifp, LINK_STATE_DOWN);
   4889 			adapter->link_active = LINK_STATE_DOWN;
   4890 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4891 				ixgbe_ping_all_vfs(adapter);
   4892 			ixgbe_drain_all(adapter);
   4893 		}
   4894 	}
   4895 } /* ixgbe_update_link_status */
   4896 
   4897 /************************************************************************
   4898  * ixgbe_config_dmac - Configure DMA Coalescing
   4899  ************************************************************************/
   4900 static void
   4901 ixgbe_config_dmac(struct adapter *adapter)
   4902 {
   4903 	struct ixgbe_hw *hw = &adapter->hw;
   4904 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
   4905 
   4906 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
   4907 		return;
   4908 
   4909 	if (dcfg->watchdog_timer ^ adapter->dmac ||
   4910 	    dcfg->link_speed ^ adapter->link_speed) {
   4911 		dcfg->watchdog_timer = adapter->dmac;
   4912 		dcfg->fcoe_en = false;
   4913 		dcfg->link_speed = adapter->link_speed;
   4914 		dcfg->num_tcs = 1;
   4915 
   4916 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
   4917 		    dcfg->watchdog_timer, dcfg->link_speed);
   4918 
   4919 		hw->mac.ops.dmac_config(hw);
   4920 	}
   4921 } /* ixgbe_config_dmac */
   4922 
   4923 /************************************************************************
   4924  * ixgbe_enable_intr
   4925  ************************************************************************/
   4926 static void
   4927 ixgbe_enable_intr(struct adapter *adapter)
   4928 {
   4929 	struct ixgbe_hw	*hw = &adapter->hw;
   4930 	struct ix_queue	*que = adapter->queues;
   4931 	u32		mask, fwsm;
   4932 
   4933 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4934 
   4935 	switch (adapter->hw.mac.type) {
   4936 	case ixgbe_mac_82599EB:
   4937 		mask |= IXGBE_EIMS_ECC;
   4938 		/* Temperature sensor on some adapters */
   4939 		mask |= IXGBE_EIMS_GPI_SDP0;
   4940 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
   4941 		mask |= IXGBE_EIMS_GPI_SDP1;
   4942 		mask |= IXGBE_EIMS_GPI_SDP2;
   4943 		break;
   4944 	case ixgbe_mac_X540:
   4945 		/* Detect if Thermal Sensor is enabled */
   4946 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
   4947 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
   4948 			mask |= IXGBE_EIMS_TS;
   4949 		mask |= IXGBE_EIMS_ECC;
   4950 		break;
   4951 	case ixgbe_mac_X550:
   4952 		/* MAC thermal sensor is automatically enabled */
   4953 		mask |= IXGBE_EIMS_TS;
   4954 		mask |= IXGBE_EIMS_ECC;
   4955 		break;
   4956 	case ixgbe_mac_X550EM_x:
   4957 	case ixgbe_mac_X550EM_a:
   4958 		/* Some devices use SDP0 for important information */
   4959 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
   4960 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
   4961 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
   4962 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
   4963 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
   4964 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
   4965 			mask |= IXGBE_EICR_GPI_SDP0_X540;
   4966 		mask |= IXGBE_EIMS_ECC;
   4967 		break;
   4968 	default:
   4969 		break;
   4970 	}
   4971 
   4972 	/* Enable Fan Failure detection */
   4973 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4974 		mask |= IXGBE_EIMS_GPI_SDP1;
   4975 	/* Enable SR-IOV */
   4976 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4977 		mask |= IXGBE_EIMS_MAILBOX;
   4978 	/* Enable Flow Director */
   4979 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   4980 		mask |= IXGBE_EIMS_FLOW_DIR;
   4981 
   4982 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4983 
   4984 	/* With MSI-X we use auto clear */
   4985 	if (adapter->msix_mem) {
   4986 		mask = IXGBE_EIMS_ENABLE_MASK;
   4987 		/* Don't autoclear Link */
   4988 		mask &= ~IXGBE_EIMS_OTHER;
   4989 		mask &= ~IXGBE_EIMS_LSC;
   4990 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   4991 			mask &= ~IXGBE_EIMS_MAILBOX;
   4992 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4993 	}
   4994 
   4995 	/*
   4996 	 * Now enable all queues, this is done separately to
   4997 	 * allow for handling the extended (beyond 32) MSI-X
   4998 	 * vectors that can be used by 82599
   4999 	 */
   5000 	for (int i = 0; i < adapter->num_queues; i++, que++)
   5001 		ixgbe_enable_queue(adapter, que->msix);
   5002 
   5003 	IXGBE_WRITE_FLUSH(hw);
   5004 
   5005 } /* ixgbe_enable_intr */
   5006 
   5007 /************************************************************************
   5008  * ixgbe_disable_intr_internal
   5009  ************************************************************************/
   5010 static void
   5011 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
   5012 {
   5013 	struct ix_queue	*que = adapter->queues;
   5014 
   5015 	/* disable interrupts other than queues */
   5016 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
   5017 
   5018 	if (adapter->msix_mem)
   5019 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   5020 
   5021 	for (int i = 0; i < adapter->num_queues; i++, que++)
   5022 		ixgbe_disable_queue_internal(adapter, que->msix, nestok);
   5023 
   5024 	IXGBE_WRITE_FLUSH(&adapter->hw);
   5025 
   5026 } /* ixgbe_do_disable_intr_internal */
   5027 
   5028 /************************************************************************
   5029  * ixgbe_disable_intr
   5030  ************************************************************************/
   5031 static void
   5032 ixgbe_disable_intr(struct adapter *adapter)
   5033 {
   5034 
   5035 	ixgbe_disable_intr_internal(adapter, true);
   5036 } /* ixgbe_disable_intr */
   5037 
   5038 /************************************************************************
   5039  * ixgbe_ensure_disabled_intr
   5040  ************************************************************************/
   5041 void
   5042 ixgbe_ensure_disabled_intr(struct adapter *adapter)
   5043 {
   5044 
   5045 	ixgbe_disable_intr_internal(adapter, false);
   5046 } /* ixgbe_ensure_disabled_intr */
   5047 
   5048 /************************************************************************
   5049  * ixgbe_legacy_irq - Legacy Interrupt Service routine
   5050  ************************************************************************/
   5051 static int
   5052 ixgbe_legacy_irq(void *arg)
   5053 {
   5054 	struct ix_queue *que = arg;
   5055 	struct adapter	*adapter = que->adapter;
   5056 	struct ixgbe_hw	*hw = &adapter->hw;
   5057 	struct ifnet	*ifp = adapter->ifp;
   5058 	struct		tx_ring *txr = adapter->tx_rings;
   5059 	bool		more = false;
   5060 	u32		eicr, eicr_mask;
   5061 
   5062 	/* Silicon errata #26 on 82598 */
   5063 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
   5064 
   5065 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   5066 
   5067 	adapter->stats.pf.legint.ev_count++;
   5068 	++que->irqs.ev_count;
   5069 	if (eicr == 0) {
   5070 		adapter->stats.pf.intzero.ev_count++;
   5071 		if ((ifp->if_flags & IFF_UP) != 0)
   5072 			ixgbe_enable_intr(adapter);
   5073 		return 0;
   5074 	}
   5075 
   5076 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   5077 		/*
   5078 		 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
   5079 		 */
   5080 		que->txrx_use_workqueue = adapter->txrx_use_workqueue;
   5081 
   5082 #ifdef __NetBSD__
   5083 		/* Don't run ixgbe_rxeof in interrupt context */
   5084 		more = true;
   5085 #else
   5086 		more = ixgbe_rxeof(que);
   5087 #endif
   5088 
   5089 		IXGBE_TX_LOCK(txr);
   5090 		ixgbe_txeof(txr);
   5091 #ifdef notyet
   5092 		if (!ixgbe_ring_empty(ifp, txr->br))
   5093 			ixgbe_start_locked(ifp, txr);
   5094 #endif
   5095 		IXGBE_TX_UNLOCK(txr);
   5096 	}
   5097 
   5098 	/* Check for fan failure */
   5099 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   5100 		ixgbe_check_fan_failure(adapter, eicr, true);
   5101 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   5102 	}
   5103 
   5104 	/* Link status change */
   5105 	if (eicr & IXGBE_EICR_LSC)
   5106 		softint_schedule(adapter->link_si);
   5107 
   5108 	if (ixgbe_is_sfp(hw)) {
   5109 		/* Pluggable optics-related interrupt */
   5110 		if (hw->mac.type >= ixgbe_mac_X540)
   5111 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   5112 		else
   5113 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   5114 
   5115 		if (eicr & eicr_mask) {
   5116 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   5117 			softint_schedule(adapter->mod_si);
   5118 		}
   5119 
   5120 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   5121 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   5122 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   5123 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   5124 			softint_schedule(adapter->msf_si);
   5125 		}
   5126 	}
   5127 
   5128 	/* External PHY interrupt */
   5129 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   5130 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
   5131 		softint_schedule(adapter->phy_si);
   5132 
   5133 	if (more) {
   5134 		que->req.ev_count++;
   5135 		ixgbe_sched_handle_que(adapter, que);
   5136 	} else
   5137 		ixgbe_enable_intr(adapter);
   5138 
   5139 	return 1;
   5140 } /* ixgbe_legacy_irq */
   5141 
   5142 /************************************************************************
   5143  * ixgbe_free_pciintr_resources
   5144  ************************************************************************/
   5145 static void
   5146 ixgbe_free_pciintr_resources(struct adapter *adapter)
   5147 {
   5148 	struct ix_queue *que = adapter->queues;
   5149 	int		rid;
   5150 
   5151 	/*
   5152 	 * Release all msix queue resources:
   5153 	 */
   5154 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   5155 		if (que->res != NULL) {
   5156 			pci_intr_disestablish(adapter->osdep.pc,
   5157 			    adapter->osdep.ihs[i]);
   5158 			adapter->osdep.ihs[i] = NULL;
   5159 		}
   5160 	}
   5161 
   5162 	/* Clean the Legacy or Link interrupt last */
   5163 	if (adapter->vector) /* we are doing MSIX */
   5164 		rid = adapter->vector;
   5165 	else
   5166 		rid = 0;
   5167 
   5168 	if (adapter->osdep.ihs[rid] != NULL) {
   5169 		pci_intr_disestablish(adapter->osdep.pc,
   5170 		    adapter->osdep.ihs[rid]);
   5171 		adapter->osdep.ihs[rid] = NULL;
   5172 	}
   5173 
   5174 	if (adapter->osdep.intrs != NULL) {
   5175 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   5176 		    adapter->osdep.nintrs);
   5177 		adapter->osdep.intrs = NULL;
   5178 	}
   5179 } /* ixgbe_free_pciintr_resources */
   5180 
   5181 /************************************************************************
   5182  * ixgbe_free_pci_resources
   5183  ************************************************************************/
   5184 static void
   5185 ixgbe_free_pci_resources(struct adapter *adapter)
   5186 {
   5187 
   5188 	ixgbe_free_pciintr_resources(adapter);
   5189 
   5190 	if (adapter->osdep.mem_size != 0) {
   5191 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   5192 		    adapter->osdep.mem_bus_space_handle,
   5193 		    adapter->osdep.mem_size);
   5194 	}
   5195 
   5196 } /* ixgbe_free_pci_resources */
   5197 
   5198 /************************************************************************
   5199  * ixgbe_set_sysctl_value
   5200  ************************************************************************/
   5201 static void
   5202 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
   5203     const char *description, int *limit, int value)
   5204 {
   5205 	device_t dev =	adapter->dev;
   5206 	struct sysctllog **log;
   5207 	const struct sysctlnode *rnode, *cnode;
   5208 
   5209 	/*
   5210 	 * It's not required to check recovery mode because this function never
   5211 	 * touches hardware.
   5212 	 */
   5213 
   5214 	log = &adapter->sysctllog;
   5215 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   5216 		aprint_error_dev(dev, "could not create sysctl root\n");
   5217 		return;
   5218 	}
   5219 	if (sysctl_createv(log, 0, &rnode, &cnode,
   5220 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   5221 	    name, SYSCTL_DESCR(description),
   5222 		NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   5223 		aprint_error_dev(dev, "could not create sysctl\n");
   5224 	*limit = value;
   5225 } /* ixgbe_set_sysctl_value */
   5226 
   5227 /************************************************************************
   5228  * ixgbe_sysctl_flowcntl
   5229  *
   5230  *   SYSCTL wrapper around setting Flow Control
   5231  ************************************************************************/
   5232 static int
   5233 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
   5234 {
   5235 	struct sysctlnode node = *rnode;
   5236 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5237 	int error, fc;
   5238 
   5239 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5240 		return (EPERM);
   5241 
   5242 	fc = adapter->hw.fc.current_mode;
   5243 	node.sysctl_data = &fc;
   5244 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5245 	if (error != 0 || newp == NULL)
   5246 		return error;
   5247 
   5248 	/* Don't bother if it's not changed */
   5249 	if (fc == adapter->hw.fc.current_mode)
   5250 		return (0);
   5251 
   5252 	return ixgbe_set_flowcntl(adapter, fc);
   5253 } /* ixgbe_sysctl_flowcntl */
   5254 
   5255 /************************************************************************
   5256  * ixgbe_set_flowcntl - Set flow control
   5257  *
   5258  *   Flow control values:
   5259  *     0 - off
   5260  *     1 - rx pause
   5261  *     2 - tx pause
   5262  *     3 - full
   5263  ************************************************************************/
   5264 static int
   5265 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
   5266 {
   5267 	switch (fc) {
   5268 		case ixgbe_fc_rx_pause:
   5269 		case ixgbe_fc_tx_pause:
   5270 		case ixgbe_fc_full:
   5271 			adapter->hw.fc.requested_mode = fc;
   5272 			if (adapter->num_queues > 1)
   5273 				ixgbe_disable_rx_drop(adapter);
   5274 			break;
   5275 		case ixgbe_fc_none:
   5276 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   5277 			if (adapter->num_queues > 1)
   5278 				ixgbe_enable_rx_drop(adapter);
   5279 			break;
   5280 		default:
   5281 			return (EINVAL);
   5282 	}
   5283 
   5284 #if 0 /* XXX NetBSD */
   5285 	/* Don't autoneg if forcing a value */
   5286 	adapter->hw.fc.disable_fc_autoneg = TRUE;
   5287 #endif
   5288 	ixgbe_fc_enable(&adapter->hw);
   5289 
   5290 	return (0);
   5291 } /* ixgbe_set_flowcntl */
   5292 
   5293 /************************************************************************
   5294  * ixgbe_enable_rx_drop
   5295  *
   5296  *   Enable the hardware to drop packets when the buffer is
   5297  *   full. This is useful with multiqueue, so that no single
   5298  *   queue being full stalls the entire RX engine. We only
   5299  *   enable this when Multiqueue is enabled AND Flow Control
   5300  *   is disabled.
   5301  ************************************************************************/
   5302 static void
   5303 ixgbe_enable_rx_drop(struct adapter *adapter)
   5304 {
   5305 	struct ixgbe_hw *hw = &adapter->hw;
   5306 	struct rx_ring	*rxr;
   5307 	u32		srrctl;
   5308 
   5309 	for (int i = 0; i < adapter->num_queues; i++) {
   5310 		rxr = &adapter->rx_rings[i];
   5311 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   5312 		srrctl |= IXGBE_SRRCTL_DROP_EN;
   5313 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   5314 	}
   5315 
   5316 	/* enable drop for each vf */
   5317 	for (int i = 0; i < adapter->num_vfs; i++) {
   5318 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   5319 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
   5320 		    IXGBE_QDE_ENABLE));
   5321 	}
   5322 } /* ixgbe_enable_rx_drop */
   5323 
   5324 /************************************************************************
   5325  * ixgbe_disable_rx_drop
   5326  ************************************************************************/
   5327 static void
   5328 ixgbe_disable_rx_drop(struct adapter *adapter)
   5329 {
   5330 	struct ixgbe_hw *hw = &adapter->hw;
   5331 	struct rx_ring	*rxr;
   5332 	u32		srrctl;
   5333 
   5334 	for (int i = 0; i < adapter->num_queues; i++) {
   5335 		rxr = &adapter->rx_rings[i];
   5336 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   5337 		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
   5338 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   5339 	}
   5340 
   5341 	/* disable drop for each vf */
   5342 	for (int i = 0; i < adapter->num_vfs; i++) {
   5343 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   5344 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
   5345 	}
   5346 } /* ixgbe_disable_rx_drop */
   5347 
   5348 /************************************************************************
   5349  * ixgbe_sysctl_advertise
   5350  *
   5351  *   SYSCTL wrapper around setting advertised speed
   5352  ************************************************************************/
   5353 static int
   5354 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
   5355 {
   5356 	struct sysctlnode node = *rnode;
   5357 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5358 	int	       error = 0, advertise;
   5359 
   5360 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5361 		return (EPERM);
   5362 
   5363 	advertise = adapter->advertise;
   5364 	node.sysctl_data = &advertise;
   5365 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5366 	if (error != 0 || newp == NULL)
   5367 		return error;
   5368 
   5369 	return ixgbe_set_advertise(adapter, advertise);
   5370 } /* ixgbe_sysctl_advertise */
   5371 
   5372 /************************************************************************
   5373  * ixgbe_set_advertise - Control advertised link speed
   5374  *
   5375  *   Flags:
   5376  *     0x00 - Default (all capable link speed)
   5377  *     0x01 - advertise 100 Mb
   5378  *     0x02 - advertise 1G
   5379  *     0x04 - advertise 10G
   5380  *     0x08 - advertise 10 Mb
   5381  *     0x10 - advertise 2.5G
   5382  *     0x20 - advertise 5G
   5383  ************************************************************************/
   5384 static int
   5385 ixgbe_set_advertise(struct adapter *adapter, int advertise)
   5386 {
   5387 	device_t	 dev;
   5388 	struct ixgbe_hw	 *hw;
   5389 	ixgbe_link_speed speed = 0;
   5390 	ixgbe_link_speed link_caps = 0;
   5391 	s32		 err = IXGBE_NOT_IMPLEMENTED;
   5392 	bool		 negotiate = FALSE;
   5393 
   5394 	/* Checks to validate new value */
   5395 	if (adapter->advertise == advertise) /* no change */
   5396 		return (0);
   5397 
   5398 	dev = adapter->dev;
   5399 	hw = &adapter->hw;
   5400 
   5401 	/* No speed changes for backplane media */
   5402 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   5403 		return (ENODEV);
   5404 
   5405 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   5406 	    (hw->phy.multispeed_fiber))) {
   5407 		device_printf(dev,
   5408 		    "Advertised speed can only be set on copper or "
   5409 		    "multispeed fiber media types.\n");
   5410 		return (EINVAL);
   5411 	}
   5412 
   5413 	if (advertise < 0x0 || advertise > 0x2f) {
   5414 		device_printf(dev,
   5415 		    "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
   5416 		return (EINVAL);
   5417 	}
   5418 
   5419 	if (hw->mac.ops.get_link_capabilities) {
   5420 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   5421 		    &negotiate);
   5422 		if (err != IXGBE_SUCCESS) {
   5423 			device_printf(dev, "Unable to determine supported advertise speeds\n");
   5424 			return (ENODEV);
   5425 		}
   5426 	}
   5427 
   5428 	/* Set new value and report new advertised mode */
   5429 	if (advertise & 0x1) {
   5430 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
   5431 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
   5432 			return (EINVAL);
   5433 		}
   5434 		speed |= IXGBE_LINK_SPEED_100_FULL;
   5435 	}
   5436 	if (advertise & 0x2) {
   5437 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
   5438 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
   5439 			return (EINVAL);
   5440 		}
   5441 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   5442 	}
   5443 	if (advertise & 0x4) {
   5444 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
   5445 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
   5446 			return (EINVAL);
   5447 		}
   5448 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   5449 	}
   5450 	if (advertise & 0x8) {
   5451 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
   5452 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
   5453 			return (EINVAL);
   5454 		}
   5455 		speed |= IXGBE_LINK_SPEED_10_FULL;
   5456 	}
   5457 	if (advertise & 0x10) {
   5458 		if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
   5459 			device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
   5460 			return (EINVAL);
   5461 		}
   5462 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   5463 	}
   5464 	if (advertise & 0x20) {
   5465 		if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
   5466 			device_printf(dev, "Interface does not support 5Gb advertised speed\n");
   5467 			return (EINVAL);
   5468 		}
   5469 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   5470 	}
   5471 	if (advertise == 0)
   5472 		speed = link_caps; /* All capable link speed */
   5473 
   5474 	hw->mac.autotry_restart = TRUE;
   5475 	hw->mac.ops.setup_link(hw, speed, TRUE);
   5476 	adapter->advertise = advertise;
   5477 
   5478 	return (0);
   5479 } /* ixgbe_set_advertise */
   5480 
   5481 /************************************************************************
   5482  * ixgbe_get_advertise - Get current advertised speed settings
   5483  *
   5484  *   Formatted for sysctl usage.
   5485  *   Flags:
   5486  *     0x01 - advertise 100 Mb
   5487  *     0x02 - advertise 1G
   5488  *     0x04 - advertise 10G
   5489  *     0x08 - advertise 10 Mb (yes, Mb)
   5490  *     0x10 - advertise 2.5G
   5491  *     0x20 - advertise 5G
   5492  ************************************************************************/
   5493 static int
   5494 ixgbe_get_advertise(struct adapter *adapter)
   5495 {
   5496 	struct ixgbe_hw	 *hw = &adapter->hw;
   5497 	int		 speed;
   5498 	ixgbe_link_speed link_caps = 0;
   5499 	s32		 err;
   5500 	bool		 negotiate = FALSE;
   5501 
   5502 	/*
   5503 	 * Advertised speed means nothing unless it's copper or
   5504 	 * multi-speed fiber
   5505 	 */
   5506 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
   5507 	    !(hw->phy.multispeed_fiber))
   5508 		return (0);
   5509 
   5510 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
   5511 	if (err != IXGBE_SUCCESS)
   5512 		return (0);
   5513 
   5514 	speed =
   5515 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x04 : 0) |
   5516 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x02 : 0) |
   5517 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)   ? 0x01 : 0) |
   5518 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)    ? 0x08 : 0) |
   5519 	    ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
   5520 	    ((link_caps & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0);
   5521 
   5522 	return speed;
   5523 } /* ixgbe_get_advertise */
   5524 
   5525 /************************************************************************
   5526  * ixgbe_sysctl_dmac - Manage DMA Coalescing
   5527  *
   5528  *   Control values:
   5529  *     0/1 - off / on (use default value of 1000)
   5530  *
   5531  *     Legal timer values are:
   5532  *     50,100,250,500,1000,2000,5000,10000
   5533  *
   5534  *     Turning off interrupt moderation will also turn this off.
   5535  ************************************************************************/
   5536 static int
   5537 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
   5538 {
   5539 	struct sysctlnode node = *rnode;
   5540 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5541 	struct ifnet   *ifp = adapter->ifp;
   5542 	int	       error;
   5543 	int	       newval;
   5544 
   5545 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5546 		return (EPERM);
   5547 
   5548 	newval = adapter->dmac;
   5549 	node.sysctl_data = &newval;
   5550 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5551 	if ((error) || (newp == NULL))
   5552 		return (error);
   5553 
   5554 	switch (newval) {
   5555 	case 0:
   5556 		/* Disabled */
   5557 		adapter->dmac = 0;
   5558 		break;
   5559 	case 1:
   5560 		/* Enable and use default */
   5561 		adapter->dmac = 1000;
   5562 		break;
   5563 	case 50:
   5564 	case 100:
   5565 	case 250:
   5566 	case 500:
   5567 	case 1000:
   5568 	case 2000:
   5569 	case 5000:
   5570 	case 10000:
   5571 		/* Legal values - allow */
   5572 		adapter->dmac = newval;
   5573 		break;
   5574 	default:
   5575 		/* Do nothing, illegal value */
   5576 		return (EINVAL);
   5577 	}
   5578 
   5579 	/* Re-initialize hardware if it's already running */
   5580 	if (ifp->if_flags & IFF_RUNNING)
   5581 		ifp->if_init(ifp);
   5582 
   5583 	return (0);
   5584 }
   5585 
   5586 #ifdef IXGBE_DEBUG
   5587 /************************************************************************
   5588  * ixgbe_sysctl_power_state
   5589  *
   5590  *   Sysctl to test power states
   5591  *   Values:
   5592  *     0      - set device to D0
   5593  *     3      - set device to D3
   5594  *     (none) - get current device power state
   5595  ************************************************************************/
   5596 static int
   5597 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
   5598 {
   5599 #ifdef notyet
   5600 	struct sysctlnode node = *rnode;
   5601 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5602 	device_t       dev =  adapter->dev;
   5603 	int	       curr_ps, new_ps, error = 0;
   5604 
   5605 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5606 		return (EPERM);
   5607 
   5608 	curr_ps = new_ps = pci_get_powerstate(dev);
   5609 
   5610 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5611 	if ((error) || (req->newp == NULL))
   5612 		return (error);
   5613 
   5614 	if (new_ps == curr_ps)
   5615 		return (0);
   5616 
   5617 	if (new_ps == 3 && curr_ps == 0)
   5618 		error = DEVICE_SUSPEND(dev);
   5619 	else if (new_ps == 0 && curr_ps == 3)
   5620 		error = DEVICE_RESUME(dev);
   5621 	else
   5622 		return (EINVAL);
   5623 
   5624 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
   5625 
   5626 	return (error);
   5627 #else
   5628 	return 0;
   5629 #endif
   5630 } /* ixgbe_sysctl_power_state */
   5631 #endif
   5632 
   5633 /************************************************************************
   5634  * ixgbe_sysctl_wol_enable
   5635  *
   5636  *   Sysctl to enable/disable the WoL capability,
   5637  *   if supported by the adapter.
   5638  *
   5639  *   Values:
   5640  *     0 - disabled
   5641  *     1 - enabled
   5642  ************************************************************************/
   5643 static int
   5644 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
   5645 {
   5646 	struct sysctlnode node = *rnode;
   5647 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5648 	struct ixgbe_hw *hw = &adapter->hw;
   5649 	bool		new_wol_enabled;
   5650 	int		error = 0;
   5651 
   5652 	/*
   5653 	 * It's not required to check recovery mode because this function never
   5654 	 * touches hardware.
   5655 	 */
   5656 	new_wol_enabled = hw->wol_enabled;
   5657 	node.sysctl_data = &new_wol_enabled;
   5658 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5659 	if ((error) || (newp == NULL))
   5660 		return (error);
   5661 	if (new_wol_enabled == hw->wol_enabled)
   5662 		return (0);
   5663 
   5664 	if (new_wol_enabled && !adapter->wol_support)
   5665 		return (ENODEV);
   5666 	else
   5667 		hw->wol_enabled = new_wol_enabled;
   5668 
   5669 	return (0);
   5670 } /* ixgbe_sysctl_wol_enable */
   5671 
   5672 /************************************************************************
   5673  * ixgbe_sysctl_wufc - Wake Up Filter Control
   5674  *
   5675  *   Sysctl to enable/disable the types of packets that the
   5676  *   adapter will wake up on upon receipt.
   5677  *   Flags:
   5678  *     0x1  - Link Status Change
   5679  *     0x2  - Magic Packet
   5680  *     0x4  - Direct Exact
   5681  *     0x8  - Directed Multicast
   5682  *     0x10 - Broadcast
   5683  *     0x20 - ARP/IPv4 Request Packet
   5684  *     0x40 - Direct IPv4 Packet
   5685  *     0x80 - Direct IPv6 Packet
   5686  *
   5687  *   Settings not listed above will cause the sysctl to return an error.
   5688  ************************************************************************/
   5689 static int
   5690 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
   5691 {
   5692 	struct sysctlnode node = *rnode;
   5693 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5694 	int error = 0;
   5695 	u32 new_wufc;
   5696 
   5697 	/*
   5698 	 * It's not required to check recovery mode because this function never
   5699 	 * touches hardware.
   5700 	 */
   5701 	new_wufc = adapter->wufc;
   5702 	node.sysctl_data = &new_wufc;
   5703 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5704 	if ((error) || (newp == NULL))
   5705 		return (error);
   5706 	if (new_wufc == adapter->wufc)
   5707 		return (0);
   5708 
   5709 	if (new_wufc & 0xffffff00)
   5710 		return (EINVAL);
   5711 
   5712 	new_wufc &= 0xff;
   5713 	new_wufc |= (0xffffff & adapter->wufc);
   5714 	adapter->wufc = new_wufc;
   5715 
   5716 	return (0);
   5717 } /* ixgbe_sysctl_wufc */
   5718 
   5719 #ifdef IXGBE_DEBUG
   5720 /************************************************************************
   5721  * ixgbe_sysctl_print_rss_config
   5722  ************************************************************************/
   5723 static int
   5724 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
   5725 {
   5726 #ifdef notyet
   5727 	struct sysctlnode node = *rnode;
   5728 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5729 	struct ixgbe_hw *hw = &adapter->hw;
   5730 	device_t	dev = adapter->dev;
   5731 	struct sbuf	*buf;
   5732 	int		error = 0, reta_size;
   5733 	u32		reg;
   5734 
   5735 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5736 		return (EPERM);
   5737 
   5738 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
   5739 	if (!buf) {
   5740 		device_printf(dev, "Could not allocate sbuf for output.\n");
   5741 		return (ENOMEM);
   5742 	}
   5743 
   5744 	// TODO: use sbufs to make a string to print out
   5745 	/* Set multiplier for RETA setup and table size based on MAC */
   5746 	switch (adapter->hw.mac.type) {
   5747 	case ixgbe_mac_X550:
   5748 	case ixgbe_mac_X550EM_x:
   5749 	case ixgbe_mac_X550EM_a:
   5750 		reta_size = 128;
   5751 		break;
   5752 	default:
   5753 		reta_size = 32;
   5754 		break;
   5755 	}
   5756 
   5757 	/* Print out the redirection table */
   5758 	sbuf_cat(buf, "\n");
   5759 	for (int i = 0; i < reta_size; i++) {
   5760 		if (i < 32) {
   5761 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
   5762 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
   5763 		} else {
   5764 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
   5765 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
   5766 		}
   5767 	}
   5768 
   5769 	// TODO: print more config
   5770 
   5771 	error = sbuf_finish(buf);
   5772 	if (error)
   5773 		device_printf(dev, "Error finishing sbuf: %d\n", error);
   5774 
   5775 	sbuf_delete(buf);
   5776 #endif
   5777 	return (0);
   5778 } /* ixgbe_sysctl_print_rss_config */
   5779 #endif /* IXGBE_DEBUG */
   5780 
   5781 /************************************************************************
   5782  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
   5783  *
   5784  *   For X552/X557-AT devices using an external PHY
   5785  ************************************************************************/
   5786 static int
   5787 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
   5788 {
   5789 	struct sysctlnode node = *rnode;
   5790 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5791 	struct ixgbe_hw *hw = &adapter->hw;
   5792 	int val;
   5793 	u16 reg;
   5794 	int		error;
   5795 
   5796 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5797 		return (EPERM);
   5798 
   5799 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5800 		device_printf(adapter->dev,
   5801 		    "Device has no supported external thermal sensor.\n");
   5802 		return (ENODEV);
   5803 	}
   5804 
   5805 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
   5806 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5807 		device_printf(adapter->dev,
   5808 		    "Error reading from PHY's current temperature register\n");
   5809 		return (EAGAIN);
   5810 	}
   5811 
   5812 	node.sysctl_data = &val;
   5813 
   5814 	/* Shift temp for output */
   5815 	val = reg >> 8;
   5816 
   5817 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5818 	if ((error) || (newp == NULL))
   5819 		return (error);
   5820 
   5821 	return (0);
   5822 } /* ixgbe_sysctl_phy_temp */
   5823 
   5824 /************************************************************************
   5825  * ixgbe_sysctl_phy_overtemp_occurred
   5826  *
   5827  *   Reports (directly from the PHY) whether the current PHY
   5828  *   temperature is over the overtemp threshold.
   5829  ************************************************************************/
   5830 static int
   5831 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
   5832 {
   5833 	struct sysctlnode node = *rnode;
   5834 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5835 	struct ixgbe_hw *hw = &adapter->hw;
   5836 	int val, error;
   5837 	u16 reg;
   5838 
   5839 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5840 		return (EPERM);
   5841 
   5842 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5843 		device_printf(adapter->dev,
   5844 		    "Device has no supported external thermal sensor.\n");
   5845 		return (ENODEV);
   5846 	}
   5847 
   5848 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
   5849 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5850 		device_printf(adapter->dev,
   5851 		    "Error reading from PHY's temperature status register\n");
   5852 		return (EAGAIN);
   5853 	}
   5854 
   5855 	node.sysctl_data = &val;
   5856 
   5857 	/* Get occurrence bit */
   5858 	val = !!(reg & 0x4000);
   5859 
   5860 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5861 	if ((error) || (newp == NULL))
   5862 		return (error);
   5863 
   5864 	return (0);
   5865 } /* ixgbe_sysctl_phy_overtemp_occurred */
   5866 
   5867 /************************************************************************
   5868  * ixgbe_sysctl_eee_state
   5869  *
   5870  *   Sysctl to set EEE power saving feature
   5871  *   Values:
   5872  *     0      - disable EEE
   5873  *     1      - enable EEE
   5874  *     (none) - get current device EEE state
   5875  ************************************************************************/
   5876 static int
   5877 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
   5878 {
   5879 	struct sysctlnode node = *rnode;
   5880 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5881 	struct ifnet   *ifp = adapter->ifp;
   5882 	device_t       dev = adapter->dev;
   5883 	int	       curr_eee, new_eee, error = 0;
   5884 	s32	       retval;
   5885 
   5886 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5887 		return (EPERM);
   5888 
   5889 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
   5890 	node.sysctl_data = &new_eee;
   5891 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5892 	if ((error) || (newp == NULL))
   5893 		return (error);
   5894 
   5895 	/* Nothing to do */
   5896 	if (new_eee == curr_eee)
   5897 		return (0);
   5898 
   5899 	/* Not supported */
   5900 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
   5901 		return (EINVAL);
   5902 
   5903 	/* Bounds checking */
   5904 	if ((new_eee < 0) || (new_eee > 1))
   5905 		return (EINVAL);
   5906 
   5907 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
   5908 	if (retval) {
   5909 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
   5910 		return (EINVAL);
   5911 	}
   5912 
   5913 	/* Restart auto-neg */
   5914 	ifp->if_init(ifp);
   5915 
   5916 	device_printf(dev, "New EEE state: %d\n", new_eee);
   5917 
   5918 	/* Cache new value */
   5919 	if (new_eee)
   5920 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5921 	else
   5922 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
   5923 
   5924 	return (error);
   5925 } /* ixgbe_sysctl_eee_state */
   5926 
   5927 #define PRINTQS(adapter, regname)					\
   5928 	do {								\
   5929 		struct ixgbe_hw	*_hw = &(adapter)->hw;			\
   5930 		int _i;							\
   5931 									\
   5932 		printf("%s: %s", device_xname((adapter)->dev), #regname); \
   5933 		for (_i = 0; _i < (adapter)->num_queues; _i++) {	\
   5934 			printf((_i == 0) ? "\t" : " ");			\
   5935 			printf("%08x", IXGBE_READ_REG(_hw,		\
   5936 				IXGBE_##regname(_i)));			\
   5937 		}							\
   5938 		printf("\n");						\
   5939 	} while (0)
   5940 
   5941 /************************************************************************
   5942  * ixgbe_print_debug_info
   5943  *
   5944  *   Called only when em_display_debug_stats is enabled.
   5945  *   Provides a way to take a look at important statistics
   5946  *   maintained by the driver and hardware.
   5947  ************************************************************************/
   5948 static void
   5949 ixgbe_print_debug_info(struct adapter *adapter)
   5950 {
   5951 	device_t	dev = adapter->dev;
   5952 	struct ixgbe_hw *hw = &adapter->hw;
   5953 	int table_size;
   5954 	int i;
   5955 
   5956 	switch (adapter->hw.mac.type) {
   5957 	case ixgbe_mac_X550:
   5958 	case ixgbe_mac_X550EM_x:
   5959 	case ixgbe_mac_X550EM_a:
   5960 		table_size = 128;
   5961 		break;
   5962 	default:
   5963 		table_size = 32;
   5964 		break;
   5965 	}
   5966 
   5967 	device_printf(dev, "[E]RETA:\n");
   5968 	for (i = 0; i < table_size; i++) {
   5969 		if (i < 32)
   5970 			printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
   5971 				IXGBE_RETA(i)));
   5972 		else
   5973 			printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
   5974 				IXGBE_ERETA(i - 32)));
   5975 	}
   5976 
   5977 	device_printf(dev, "queue:");
   5978 	for (i = 0; i < adapter->num_queues; i++) {
   5979 		printf((i == 0) ? "\t" : " ");
   5980 		printf("%8d", i);
   5981 	}
   5982 	printf("\n");
   5983 	PRINTQS(adapter, RDBAL);
   5984 	PRINTQS(adapter, RDBAH);
   5985 	PRINTQS(adapter, RDLEN);
   5986 	PRINTQS(adapter, SRRCTL);
   5987 	PRINTQS(adapter, RDH);
   5988 	PRINTQS(adapter, RDT);
   5989 	PRINTQS(adapter, RXDCTL);
   5990 
   5991 	device_printf(dev, "RQSMR:");
   5992 	for (i = 0; i < adapter->num_queues / 4; i++) {
   5993 		printf((i == 0) ? "\t" : " ");
   5994 		printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
   5995 	}
   5996 	printf("\n");
   5997 
   5998 	device_printf(dev, "disabled_count:");
   5999 	for (i = 0; i < adapter->num_queues; i++) {
   6000 		printf((i == 0) ? "\t" : " ");
   6001 		printf("%8d", adapter->queues[i].disabled_count);
   6002 	}
   6003 	printf("\n");
   6004 
   6005 	device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
   6006 	if (hw->mac.type != ixgbe_mac_82598EB) {
   6007 		device_printf(dev, "EIMS_EX(0):\t%08x\n",
   6008 			      IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
   6009 		device_printf(dev, "EIMS_EX(1):\t%08x\n",
   6010 			      IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
   6011 	}
   6012 } /* ixgbe_print_debug_info */
   6013 
   6014 /************************************************************************
   6015  * ixgbe_sysctl_debug
   6016  ************************************************************************/
   6017 static int
   6018 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
   6019 {
   6020 	struct sysctlnode node = *rnode;
   6021 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   6022 	int	       error, result = 0;
   6023 
   6024 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   6025 		return (EPERM);
   6026 
   6027 	node.sysctl_data = &result;
   6028 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   6029 
   6030 	if (error || newp == NULL)
   6031 		return error;
   6032 
   6033 	if (result == 1)
   6034 		ixgbe_print_debug_info(adapter);
   6035 
   6036 	return 0;
   6037 } /* ixgbe_sysctl_debug */
   6038 
   6039 /************************************************************************
   6040  * ixgbe_init_device_features
   6041  ************************************************************************/
   6042 static void
   6043 ixgbe_init_device_features(struct adapter *adapter)
   6044 {
   6045 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   6046 			  | IXGBE_FEATURE_RSS
   6047 			  | IXGBE_FEATURE_MSI
   6048 			  | IXGBE_FEATURE_MSIX
   6049 			  | IXGBE_FEATURE_LEGACY_IRQ
   6050 			  | IXGBE_FEATURE_LEGACY_TX;
   6051 
   6052 	/* Set capabilities first... */
   6053 	switch (adapter->hw.mac.type) {
   6054 	case ixgbe_mac_82598EB:
   6055 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
   6056 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
   6057 		break;
   6058 	case ixgbe_mac_X540:
   6059 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6060 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6061 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
   6062 		    (adapter->hw.bus.func == 0))
   6063 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   6064 		break;
   6065 	case ixgbe_mac_X550:
   6066 		/*
   6067 		 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
   6068 		 * NVM Image version.
   6069 		 */
   6070 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   6071 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6072 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6073 		break;
   6074 	case ixgbe_mac_X550EM_x:
   6075 		/*
   6076 		 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
   6077 		 * NVM Image version.
   6078 		 */
   6079 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6080 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6081 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
   6082 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   6083 		break;
   6084 	case ixgbe_mac_X550EM_a:
   6085 		/*
   6086 		 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
   6087 		 * NVM Image version.
   6088 		 */
   6089 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6090 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6091 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   6092 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
   6093 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
   6094 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   6095 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   6096 		}
   6097 		break;
   6098 	case ixgbe_mac_82599EB:
   6099 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6100 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6101 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
   6102 		    (adapter->hw.bus.func == 0))
   6103 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   6104 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
   6105 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   6106 		break;
   6107 	default:
   6108 		break;
   6109 	}
   6110 
   6111 	/* Enabled by default... */
   6112 	/* Fan failure detection */
   6113 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
   6114 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
   6115 	/* Netmap */
   6116 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   6117 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   6118 	/* EEE */
   6119 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   6120 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   6121 	/* Thermal Sensor */
   6122 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
   6123 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
   6124 	/*
   6125 	 * Recovery mode:
   6126 	 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
   6127 	 * NVM Image version.
   6128 	 */
   6129 
   6130 	/* Enabled via global sysctl... */
   6131 	/* Flow Director */
   6132 	if (ixgbe_enable_fdir) {
   6133 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
   6134 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
   6135 		else
   6136 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
   6137 	}
   6138 	/* Legacy (single queue) transmit */
   6139 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   6140 	    ixgbe_enable_legacy_tx)
   6141 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   6142 	/*
   6143 	 * Message Signal Interrupts - Extended (MSI-X)
   6144 	 * Normal MSI is only enabled if MSI-X calls fail.
   6145 	 */
   6146 	if (!ixgbe_enable_msix)
   6147 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
   6148 	/* Receive-Side Scaling (RSS) */
   6149 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
   6150 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   6151 
   6152 	/* Disable features with unmet dependencies... */
   6153 	/* No MSI-X */
   6154 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
   6155 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6156 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6157 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
   6158 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
   6159 	}
   6160 } /* ixgbe_init_device_features */
   6161 
   6162 /************************************************************************
   6163  * ixgbe_probe - Device identification routine
   6164  *
   6165  *   Determines if the driver should be loaded on
   6166  *   adapter based on its PCI vendor/device ID.
   6167  *
   6168  *   return BUS_PROBE_DEFAULT on success, positive on failure
   6169  ************************************************************************/
   6170 static int
   6171 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
   6172 {
   6173 	const struct pci_attach_args *pa = aux;
   6174 
   6175 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
   6176 }
   6177 
   6178 static const ixgbe_vendor_info_t *
   6179 ixgbe_lookup(const struct pci_attach_args *pa)
   6180 {
   6181 	const ixgbe_vendor_info_t *ent;
   6182 	pcireg_t subid;
   6183 
   6184 	INIT_DEBUGOUT("ixgbe_lookup: begin");
   6185 
   6186 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
   6187 		return NULL;
   6188 
   6189 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
   6190 
   6191 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
   6192 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
   6193 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
   6194 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
   6195 			(ent->subvendor_id == 0)) &&
   6196 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
   6197 			(ent->subdevice_id == 0))) {
   6198 			return ent;
   6199 		}
   6200 	}
   6201 	return NULL;
   6202 }
   6203 
   6204 static int
   6205 ixgbe_ifflags_cb(struct ethercom *ec)
   6206 {
   6207 	struct ifnet *ifp = &ec->ec_if;
   6208 	struct adapter *adapter = ifp->if_softc;
   6209 	u_short change;
   6210 	int rv = 0;
   6211 
   6212 	IXGBE_CORE_LOCK(adapter);
   6213 
   6214 	change = ifp->if_flags ^ adapter->if_flags;
   6215 	if (change != 0)
   6216 		adapter->if_flags = ifp->if_flags;
   6217 
   6218 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   6219 		rv = ENETRESET;
   6220 		goto out;
   6221 	} else if ((change & IFF_PROMISC) != 0)
   6222 		ixgbe_set_promisc(adapter);
   6223 
   6224 	/* Check for ec_capenable. */
   6225 	change = ec->ec_capenable ^ adapter->ec_capenable;
   6226 	adapter->ec_capenable = ec->ec_capenable;
   6227 	if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
   6228 	    | ETHERCAP_VLAN_HWFILTER)) != 0) {
   6229 		rv = ENETRESET;
   6230 		goto out;
   6231 	}
   6232 
   6233 	/*
   6234 	 * Special handling is not required for ETHERCAP_VLAN_MTU.
   6235 	 * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
   6236 	 */
   6237 
   6238 	/* Set up VLAN support and filter */
   6239 	if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
   6240 		ixgbe_setup_vlan_hw_support(adapter);
   6241 
   6242 out:
   6243 	IXGBE_CORE_UNLOCK(adapter);
   6244 
   6245 	return rv;
   6246 }
   6247 
   6248 /************************************************************************
   6249  * ixgbe_ioctl - Ioctl entry point
   6250  *
   6251  *   Called when the user wants to configure the interface.
   6252  *
   6253  *   return 0 on success, positive on failure
   6254  ************************************************************************/
   6255 static int
   6256 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   6257 {
   6258 	struct adapter	*adapter = ifp->if_softc;
   6259 	struct ixgbe_hw *hw = &adapter->hw;
   6260 	struct ifcapreq *ifcr = data;
   6261 	struct ifreq	*ifr = data;
   6262 	int		error = 0;
   6263 	int l4csum_en;
   6264 	const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
   6265 	     IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   6266 
   6267 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   6268 		return (EPERM);
   6269 
   6270 	switch (command) {
   6271 	case SIOCSIFFLAGS:
   6272 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   6273 		break;
   6274 	case SIOCADDMULTI:
   6275 	case SIOCDELMULTI:
   6276 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   6277 		break;
   6278 	case SIOCSIFMEDIA:
   6279 	case SIOCGIFMEDIA:
   6280 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   6281 		break;
   6282 	case SIOCSIFCAP:
   6283 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   6284 		break;
   6285 	case SIOCSIFMTU:
   6286 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   6287 		break;
   6288 #ifdef __NetBSD__
   6289 	case SIOCINITIFADDR:
   6290 		IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
   6291 		break;
   6292 	case SIOCGIFFLAGS:
   6293 		IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
   6294 		break;
   6295 	case SIOCGIFAFLAG_IN:
   6296 		IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
   6297 		break;
   6298 	case SIOCGIFADDR:
   6299 		IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
   6300 		break;
   6301 	case SIOCGIFMTU:
   6302 		IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
   6303 		break;
   6304 	case SIOCGIFCAP:
   6305 		IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
   6306 		break;
   6307 	case SIOCGETHERCAP:
   6308 		IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
   6309 		break;
   6310 	case SIOCGLIFADDR:
   6311 		IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
   6312 		break;
   6313 	case SIOCZIFDATA:
   6314 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   6315 		hw->mac.ops.clear_hw_cntrs(hw);
   6316 		ixgbe_clear_evcnt(adapter);
   6317 		break;
   6318 	case SIOCAIFADDR:
   6319 		IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
   6320 		break;
   6321 #endif
   6322 	default:
   6323 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   6324 		break;
   6325 	}
   6326 
   6327 	switch (command) {
   6328 	case SIOCGI2C:
   6329 	{
   6330 		struct ixgbe_i2c_req	i2c;
   6331 
   6332 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
   6333 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
   6334 		if (error != 0)
   6335 			break;
   6336 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
   6337 			error = EINVAL;
   6338 			break;
   6339 		}
   6340 		if (i2c.len > sizeof(i2c.data)) {
   6341 			error = EINVAL;
   6342 			break;
   6343 		}
   6344 
   6345 		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
   6346 		    i2c.dev_addr, i2c.data);
   6347 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
   6348 		break;
   6349 	}
   6350 	case SIOCSIFCAP:
   6351 		/* Layer-4 Rx checksum offload has to be turned on and
   6352 		 * off as a unit.
   6353 		 */
   6354 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   6355 		if (l4csum_en != l4csum && l4csum_en != 0)
   6356 			return EINVAL;
   6357 		/*FALLTHROUGH*/
   6358 	case SIOCADDMULTI:
   6359 	case SIOCDELMULTI:
   6360 	case SIOCSIFFLAGS:
   6361 	case SIOCSIFMTU:
   6362 	default:
   6363 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   6364 			return error;
   6365 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   6366 			;
   6367 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   6368 			IXGBE_CORE_LOCK(adapter);
   6369 			if ((ifp->if_flags & IFF_RUNNING) != 0)
   6370 				ixgbe_init_locked(adapter);
   6371 			ixgbe_recalculate_max_frame(adapter);
   6372 			IXGBE_CORE_UNLOCK(adapter);
   6373 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   6374 			/*
   6375 			 * Multicast list has changed; set the hardware filter
   6376 			 * accordingly.
   6377 			 */
   6378 			IXGBE_CORE_LOCK(adapter);
   6379 			ixgbe_disable_intr(adapter);
   6380 			ixgbe_set_multi(adapter);
   6381 			ixgbe_enable_intr(adapter);
   6382 			IXGBE_CORE_UNLOCK(adapter);
   6383 		}
   6384 		return 0;
   6385 	}
   6386 
   6387 	return error;
   6388 } /* ixgbe_ioctl */
   6389 
   6390 /************************************************************************
   6391  * ixgbe_check_fan_failure
   6392  ************************************************************************/
   6393 static void
   6394 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
   6395 {
   6396 	u32 mask;
   6397 
   6398 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
   6399 	    IXGBE_ESDP_SDP1;
   6400 
   6401 	if (reg & mask)
   6402 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
   6403 } /* ixgbe_check_fan_failure */
   6404 
   6405 /************************************************************************
   6406  * ixgbe_handle_que
   6407  ************************************************************************/
   6408 static void
   6409 ixgbe_handle_que(void *context)
   6410 {
   6411 	struct ix_queue *que = context;
   6412 	struct adapter	*adapter = que->adapter;
   6413 	struct tx_ring	*txr = que->txr;
   6414 	struct ifnet	*ifp = adapter->ifp;
   6415 	bool		more = false;
   6416 
   6417 	que->handleq.ev_count++;
   6418 
   6419 	if (ifp->if_flags & IFF_RUNNING) {
   6420 		more = ixgbe_rxeof(que);
   6421 		IXGBE_TX_LOCK(txr);
   6422 		more |= ixgbe_txeof(txr);
   6423 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   6424 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   6425 				ixgbe_mq_start_locked(ifp, txr);
   6426 		/* Only for queue 0 */
   6427 		/* NetBSD still needs this for CBQ */
   6428 		if ((&adapter->queues[0] == que)
   6429 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   6430 			ixgbe_legacy_start_locked(ifp, txr);
   6431 		IXGBE_TX_UNLOCK(txr);
   6432 	}
   6433 
   6434 	if (more) {
   6435 		que->req.ev_count++;
   6436 		ixgbe_sched_handle_que(adapter, que);
   6437 	} else if (que->res != NULL) {
   6438 		/* Re-enable this interrupt */
   6439 		ixgbe_enable_queue(adapter, que->msix);
   6440 	} else
   6441 		ixgbe_enable_intr(adapter);
   6442 
   6443 	return;
   6444 } /* ixgbe_handle_que */
   6445 
   6446 /************************************************************************
   6447  * ixgbe_handle_que_work
   6448  ************************************************************************/
   6449 static void
   6450 ixgbe_handle_que_work(struct work *wk, void *context)
   6451 {
   6452 	struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
   6453 
   6454 	/*
   6455 	 * "enqueued flag" is not required here.
   6456 	 * See ixgbe_msix_que().
   6457 	 */
   6458 	ixgbe_handle_que(que);
   6459 }
   6460 
   6461 /************************************************************************
   6462  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
   6463  ************************************************************************/
   6464 static int
   6465 ixgbe_allocate_legacy(struct adapter *adapter,
   6466     const struct pci_attach_args *pa)
   6467 {
   6468 	device_t	dev = adapter->dev;
   6469 	struct ix_queue *que = adapter->queues;
   6470 	struct tx_ring	*txr = adapter->tx_rings;
   6471 	int		counts[PCI_INTR_TYPE_SIZE];
   6472 	pci_intr_type_t intr_type, max_type;
   6473 	char		intrbuf[PCI_INTRSTR_LEN];
   6474 	char		wqname[MAXCOMLEN];
   6475 	const char	*intrstr = NULL;
   6476 	int defertx_error = 0, error;
   6477 
   6478 	/* We allocate a single interrupt resource */
   6479 	max_type = PCI_INTR_TYPE_MSI;
   6480 	counts[PCI_INTR_TYPE_MSIX] = 0;
   6481 	counts[PCI_INTR_TYPE_MSI] =
   6482 	    (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
   6483 	/* Check not feat_en but feat_cap to fallback to INTx */
   6484 	counts[PCI_INTR_TYPE_INTX] =
   6485 	    (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
   6486 
   6487 alloc_retry:
   6488 	if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
   6489 		aprint_error_dev(dev, "couldn't alloc interrupt\n");
   6490 		return ENXIO;
   6491 	}
   6492 	adapter->osdep.nintrs = 1;
   6493 	intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
   6494 	    intrbuf, sizeof(intrbuf));
   6495 	adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
   6496 	    adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
   6497 	    device_xname(dev));
   6498 	intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
   6499 	if (adapter->osdep.ihs[0] == NULL) {
   6500 		aprint_error_dev(dev,"unable to establish %s\n",
   6501 		    (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6502 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   6503 		adapter->osdep.intrs = NULL;
   6504 		switch (intr_type) {
   6505 		case PCI_INTR_TYPE_MSI:
   6506 			/* The next try is for INTx: Disable MSI */
   6507 			max_type = PCI_INTR_TYPE_INTX;
   6508 			counts[PCI_INTR_TYPE_INTX] = 1;
   6509 			adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   6510 			if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
   6511 				adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6512 				goto alloc_retry;
   6513 			} else
   6514 				break;
   6515 		case PCI_INTR_TYPE_INTX:
   6516 		default:
   6517 			/* See below */
   6518 			break;
   6519 		}
   6520 	}
   6521 	if (intr_type == PCI_INTR_TYPE_INTX) {
   6522 		adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   6523 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6524 	}
   6525 	if (adapter->osdep.ihs[0] == NULL) {
   6526 		aprint_error_dev(dev,
   6527 		    "couldn't establish interrupt%s%s\n",
   6528 		    intrstr ? " at " : "", intrstr ? intrstr : "");
   6529 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   6530 		adapter->osdep.intrs = NULL;
   6531 		return ENXIO;
   6532 	}
   6533 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   6534 	/*
   6535 	 * Try allocating a fast interrupt and the associated deferred
   6536 	 * processing contexts.
   6537 	 */
   6538 	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   6539 		txr->txr_si =
   6540 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6541 			ixgbe_deferred_mq_start, txr);
   6542 
   6543 		snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
   6544 		defertx_error = workqueue_create(&adapter->txr_wq, wqname,
   6545 		    ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI,
   6546 		    IPL_NET, IXGBE_WORKQUEUE_FLAGS);
   6547 		adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
   6548 	}
   6549 	que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6550 	    ixgbe_handle_que, que);
   6551 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
   6552 	error = workqueue_create(&adapter->que_wq, wqname,
   6553 	    ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   6554 	    IXGBE_WORKQUEUE_FLAGS);
   6555 
   6556 	if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
   6557 		&& ((txr->txr_si == NULL) || defertx_error != 0))
   6558 	    || (que->que_si == NULL) || error != 0) {
   6559 		aprint_error_dev(dev,
   6560 		    "could not establish software interrupts\n");
   6561 
   6562 		return ENXIO;
   6563 	}
   6564 	/* For simplicity in the handlers */
   6565 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
   6566 
   6567 	return (0);
   6568 } /* ixgbe_allocate_legacy */
   6569 
   6570 /************************************************************************
   6571  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
   6572  ************************************************************************/
   6573 static int
   6574 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   6575 {
   6576 	device_t	dev = adapter->dev;
   6577 	struct		ix_queue *que = adapter->queues;
   6578 	struct		tx_ring *txr = adapter->tx_rings;
   6579 	pci_chipset_tag_t pc;
   6580 	char		intrbuf[PCI_INTRSTR_LEN];
   6581 	char		intr_xname[32];
   6582 	char		wqname[MAXCOMLEN];
   6583 	const char	*intrstr = NULL;
   6584 	int		error, vector = 0;
   6585 	int		cpu_id = 0;
   6586 	kcpuset_t	*affinity;
   6587 #ifdef RSS
   6588 	unsigned int	rss_buckets = 0;
   6589 	kcpuset_t	cpu_mask;
   6590 #endif
   6591 
   6592 	pc = adapter->osdep.pc;
   6593 #ifdef	RSS
   6594 	/*
   6595 	 * If we're doing RSS, the number of queues needs to
   6596 	 * match the number of RSS buckets that are configured.
   6597 	 *
   6598 	 * + If there's more queues than RSS buckets, we'll end
   6599 	 *   up with queues that get no traffic.
   6600 	 *
   6601 	 * + If there's more RSS buckets than queues, we'll end
   6602 	 *   up having multiple RSS buckets map to the same queue,
   6603 	 *   so there'll be some contention.
   6604 	 */
   6605 	rss_buckets = rss_getnumbuckets();
   6606 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
   6607 	    (adapter->num_queues != rss_buckets)) {
   6608 		device_printf(dev,
   6609 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
   6610 		    "; performance will be impacted.\n",
   6611 		    __func__, adapter->num_queues, rss_buckets);
   6612 	}
   6613 #endif
   6614 
   6615 	adapter->osdep.nintrs = adapter->num_queues + 1;
   6616 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   6617 	    adapter->osdep.nintrs) != 0) {
   6618 		aprint_error_dev(dev,
   6619 		    "failed to allocate MSI-X interrupt\n");
   6620 		return (ENXIO);
   6621 	}
   6622 
   6623 	kcpuset_create(&affinity, false);
   6624 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   6625 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   6626 		    device_xname(dev), i);
   6627 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   6628 		    sizeof(intrbuf));
   6629 #ifdef IXGBE_MPSAFE
   6630 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   6631 		    true);
   6632 #endif
   6633 		/* Set the handler function */
   6634 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   6635 		    adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
   6636 		    intr_xname);
   6637 		if (que->res == NULL) {
   6638 			aprint_error_dev(dev,
   6639 			    "Failed to register QUE handler\n");
   6640 			error = ENXIO;
   6641 			goto err_out;
   6642 		}
   6643 		que->msix = vector;
   6644 		adapter->active_queues |= 1ULL << que->msix;
   6645 
   6646 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   6647 #ifdef	RSS
   6648 			/*
   6649 			 * The queue ID is used as the RSS layer bucket ID.
   6650 			 * We look up the queue ID -> RSS CPU ID and select
   6651 			 * that.
   6652 			 */
   6653 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
   6654 			CPU_SETOF(cpu_id, &cpu_mask);
   6655 #endif
   6656 		} else {
   6657 			/*
   6658 			 * Bind the MSI-X vector, and thus the
   6659 			 * rings to the corresponding CPU.
   6660 			 *
   6661 			 * This just happens to match the default RSS
   6662 			 * round-robin bucket -> queue -> CPU allocation.
   6663 			 */
   6664 			if (adapter->num_queues > 1)
   6665 				cpu_id = i;
   6666 		}
   6667 		/* Round-robin affinity */
   6668 		kcpuset_zero(affinity);
   6669 		kcpuset_set(affinity, cpu_id % ncpu);
   6670 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   6671 		    NULL);
   6672 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   6673 		    intrstr);
   6674 		if (error == 0) {
   6675 #if 1 /* def IXGBE_DEBUG */
   6676 #ifdef	RSS
   6677 			aprintf_normal(", bound RSS bucket %d to CPU %d", i,
   6678 			    cpu_id % ncpu);
   6679 #else
   6680 			aprint_normal(", bound queue %d to cpu %d", i,
   6681 			    cpu_id % ncpu);
   6682 #endif
   6683 #endif /* IXGBE_DEBUG */
   6684 		}
   6685 		aprint_normal("\n");
   6686 
   6687 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   6688 			txr->txr_si = softint_establish(
   6689 				SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6690 				ixgbe_deferred_mq_start, txr);
   6691 			if (txr->txr_si == NULL) {
   6692 				aprint_error_dev(dev,
   6693 				    "couldn't establish software interrupt\n");
   6694 				error = ENXIO;
   6695 				goto err_out;
   6696 			}
   6697 		}
   6698 		que->que_si
   6699 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6700 			ixgbe_handle_que, que);
   6701 		if (que->que_si == NULL) {
   6702 			aprint_error_dev(dev,
   6703 			    "couldn't establish software interrupt\n");
   6704 			error = ENXIO;
   6705 			goto err_out;
   6706 		}
   6707 	}
   6708 	snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
   6709 	error = workqueue_create(&adapter->txr_wq, wqname,
   6710 	    ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   6711 	    IXGBE_WORKQUEUE_FLAGS);
   6712 	if (error) {
   6713 		aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
   6714 		goto err_out;
   6715 	}
   6716 	adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
   6717 
   6718 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
   6719 	error = workqueue_create(&adapter->que_wq, wqname,
   6720 	    ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   6721 	    IXGBE_WORKQUEUE_FLAGS);
   6722 	if (error) {
   6723 		aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
   6724 		goto err_out;
   6725 	}
   6726 
   6727 	/* and Link */
   6728 	cpu_id++;
   6729 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   6730 	adapter->vector = vector;
   6731 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   6732 	    sizeof(intrbuf));
   6733 #ifdef IXGBE_MPSAFE
   6734 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   6735 	    true);
   6736 #endif
   6737 	/* Set the link handler function */
   6738 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   6739 	    adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
   6740 	    intr_xname);
   6741 	if (adapter->osdep.ihs[vector] == NULL) {
   6742 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   6743 		error = ENXIO;
   6744 		goto err_out;
   6745 	}
   6746 	/* Round-robin affinity */
   6747 	kcpuset_zero(affinity);
   6748 	kcpuset_set(affinity, cpu_id % ncpu);
   6749 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
   6750 	    NULL);
   6751 
   6752 	aprint_normal_dev(dev,
   6753 	    "for link, interrupting at %s", intrstr);
   6754 	if (error == 0)
   6755 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   6756 	else
   6757 		aprint_normal("\n");
   6758 
   6759 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   6760 		adapter->mbx_si =
   6761 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6762 			ixgbe_handle_mbx, adapter);
   6763 		if (adapter->mbx_si == NULL) {
   6764 			aprint_error_dev(dev,
   6765 			    "could not establish software interrupts\n");
   6766 
   6767 			error = ENXIO;
   6768 			goto err_out;
   6769 		}
   6770 	}
   6771 
   6772 	kcpuset_destroy(affinity);
   6773 	aprint_normal_dev(dev,
   6774 	    "Using MSI-X interrupts with %d vectors\n", vector + 1);
   6775 
   6776 	return (0);
   6777 
   6778 err_out:
   6779 	kcpuset_destroy(affinity);
   6780 	ixgbe_free_softint(adapter);
   6781 	ixgbe_free_pciintr_resources(adapter);
   6782 	return (error);
   6783 } /* ixgbe_allocate_msix */
   6784 
   6785 /************************************************************************
   6786  * ixgbe_configure_interrupts
   6787  *
   6788  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
   6789  *   This will also depend on user settings.
   6790  ************************************************************************/
   6791 static int
   6792 ixgbe_configure_interrupts(struct adapter *adapter)
   6793 {
   6794 	device_t dev = adapter->dev;
   6795 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
   6796 	int want, queues, msgs;
   6797 
   6798 	/* Default to 1 queue if MSI-X setup fails */
   6799 	adapter->num_queues = 1;
   6800 
   6801 	/* Override by tuneable */
   6802 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
   6803 		goto msi;
   6804 
   6805 	/*
   6806 	 *  NetBSD only: Use single vector MSI when number of CPU is 1 to save
   6807 	 * interrupt slot.
   6808 	 */
   6809 	if (ncpu == 1)
   6810 		goto msi;
   6811 
   6812 	/* First try MSI-X */
   6813 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   6814 	msgs = MIN(msgs, IXG_MAX_NINTR);
   6815 	if (msgs < 2)
   6816 		goto msi;
   6817 
   6818 	adapter->msix_mem = (void *)1; /* XXX */
   6819 
   6820 	/* Figure out a reasonable auto config value */
   6821 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   6822 
   6823 #ifdef	RSS
   6824 	/* If we're doing RSS, clamp at the number of RSS buckets */
   6825 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   6826 		queues = uimin(queues, rss_getnumbuckets());
   6827 #endif
   6828 	if (ixgbe_num_queues > queues) {
   6829 		aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
   6830 		ixgbe_num_queues = queues;
   6831 	}
   6832 
   6833 	if (ixgbe_num_queues != 0)
   6834 		queues = ixgbe_num_queues;
   6835 	else
   6836 		queues = uimin(queues,
   6837 		    uimin(mac->max_tx_queues, mac->max_rx_queues));
   6838 
   6839 	/* reflect correct sysctl value */
   6840 	ixgbe_num_queues = queues;
   6841 
   6842 	/*
   6843 	 * Want one vector (RX/TX pair) per queue
   6844 	 * plus an additional for Link.
   6845 	 */
   6846 	want = queues + 1;
   6847 	if (msgs >= want)
   6848 		msgs = want;
   6849 	else {
   6850 		aprint_error_dev(dev, "MSI-X Configuration Problem, "
   6851 		    "%d vectors but %d queues wanted!\n",
   6852 		    msgs, want);
   6853 		goto msi;
   6854 	}
   6855 	adapter->num_queues = queues;
   6856 	adapter->feat_en |= IXGBE_FEATURE_MSIX;
   6857 	return (0);
   6858 
   6859 	/*
   6860 	 * MSI-X allocation failed or provided us with
   6861 	 * less vectors than needed. Free MSI-X resources
   6862 	 * and we'll try enabling MSI.
   6863 	 */
   6864 msi:
   6865 	/* Without MSI-X, some features are no longer supported */
   6866 	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6867 	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
   6868 	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6869 	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
   6870 
   6871 	msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
   6872 	adapter->msix_mem = NULL; /* XXX */
   6873 	if (msgs > 1)
   6874 		msgs = 1;
   6875 	if (msgs != 0) {
   6876 		msgs = 1;
   6877 		adapter->feat_en |= IXGBE_FEATURE_MSI;
   6878 		return (0);
   6879 	}
   6880 
   6881 	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
   6882 		aprint_error_dev(dev,
   6883 		    "Device does not support legacy interrupts.\n");
   6884 		return 1;
   6885 	}
   6886 
   6887 	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6888 
   6889 	return (0);
   6890 } /* ixgbe_configure_interrupts */
   6891 
   6892 
   6893 /************************************************************************
   6894  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
   6895  *
   6896  *   Done outside of interrupt context since the driver might sleep
   6897  ************************************************************************/
   6898 static void
   6899 ixgbe_handle_link(void *context)
   6900 {
   6901 	struct adapter	*adapter = context;
   6902 	struct ixgbe_hw *hw = &adapter->hw;
   6903 
   6904 	IXGBE_CORE_LOCK(adapter);
   6905 	++adapter->link_sicount.ev_count;
   6906 	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
   6907 	ixgbe_update_link_status(adapter);
   6908 
   6909 	/* Re-enable link interrupts */
   6910 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
   6911 
   6912 	IXGBE_CORE_UNLOCK(adapter);
   6913 } /* ixgbe_handle_link */
   6914 
   6915 #if 0
   6916 /************************************************************************
   6917  * ixgbe_rearm_queues
   6918  ************************************************************************/
   6919 static __inline void
   6920 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   6921 {
   6922 	u32 mask;
   6923 
   6924 	switch (adapter->hw.mac.type) {
   6925 	case ixgbe_mac_82598EB:
   6926 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   6927 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   6928 		break;
   6929 	case ixgbe_mac_82599EB:
   6930 	case ixgbe_mac_X540:
   6931 	case ixgbe_mac_X550:
   6932 	case ixgbe_mac_X550EM_x:
   6933 	case ixgbe_mac_X550EM_a:
   6934 		mask = (queues & 0xFFFFFFFF);
   6935 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   6936 		mask = (queues >> 32);
   6937 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   6938 		break;
   6939 	default:
   6940 		break;
   6941 	}
   6942 } /* ixgbe_rearm_queues */
   6943 #endif
   6944