Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.169
      1 /* $NetBSD: ixgbe.c,v 1.169 2018/12/06 13:25:02 msaitoh Exp $ */
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
     36 
     37 /*
     38  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39  * All rights reserved.
     40  *
     41  * This code is derived from software contributed to The NetBSD Foundation
     42  * by Coyote Point Systems, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63  * POSSIBILITY OF SUCH DAMAGE.
     64  */
     65 
     66 #ifdef _KERNEL_OPT
     67 #include "opt_inet.h"
     68 #include "opt_inet6.h"
     69 #include "opt_net_mpsafe.h"
     70 #endif
     71 
     72 #include "ixgbe.h"
     73 #include "ixgbe_sriov.h"
     74 #include "vlan.h"
     75 
     76 #include <sys/cprng.h>
     77 #include <dev/mii/mii.h>
     78 #include <dev/mii/miivar.h>
     79 
     80 /************************************************************************
     81  * Driver version
     82  ************************************************************************/
     83 static const char ixgbe_driver_version[] = "4.0.1-k";
     84 /* XXX NetBSD: + 3.3.6 */
     85 
     86 /************************************************************************
     87  * PCI Device ID Table
     88  *
     89  *   Used by probe to select devices to load on
     90  *   Last field stores an index into ixgbe_strings
     91  *   Last entry must be all 0s
     92  *
     93  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     94  ************************************************************************/
     95 static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     96 {
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    111 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    112 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    113 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    114 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    115 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    116 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
    117 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    118 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
    119 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
    120 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
    121 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
    122 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
    123 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
    124 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
    125 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
    126 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
    127 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
    128 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
    129 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
    130 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
    131 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
    132 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
    133 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
    134 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
    135 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
    136 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
    137 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
    138 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
    139 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
    140 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
    141 	/* required last entry */
    142 	{0, 0, 0, 0, 0}
    143 };
    144 
    145 /************************************************************************
    146  * Table of branding strings
    147  ************************************************************************/
    148 static const char    *ixgbe_strings[] = {
    149 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    150 };
    151 
    152 /************************************************************************
    153  * Function prototypes
    154  ************************************************************************/
    155 static int      ixgbe_probe(device_t, cfdata_t, void *);
    156 static void     ixgbe_attach(device_t, device_t, void *);
    157 static int      ixgbe_detach(device_t, int);
    158 #if 0
    159 static int      ixgbe_shutdown(device_t);
    160 #endif
    161 static bool	ixgbe_suspend(device_t, const pmf_qual_t *);
    162 static bool	ixgbe_resume(device_t, const pmf_qual_t *);
    163 static int	ixgbe_ifflags_cb(struct ethercom *);
    164 static int      ixgbe_ioctl(struct ifnet *, u_long, void *);
    165 static void	ixgbe_ifstop(struct ifnet *, int);
    166 static int	ixgbe_init(struct ifnet *);
    167 static void	ixgbe_init_locked(struct adapter *);
    168 static void     ixgbe_stop(void *);
    169 static void     ixgbe_init_device_features(struct adapter *);
    170 static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
    171 static void	ixgbe_add_media_types(struct adapter *);
    172 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    173 static int      ixgbe_media_change(struct ifnet *);
    174 static int      ixgbe_allocate_pci_resources(struct adapter *,
    175 		    const struct pci_attach_args *);
    176 static void     ixgbe_free_softint(struct adapter *);
    177 static void	ixgbe_get_slot_info(struct adapter *);
    178 static int      ixgbe_allocate_msix(struct adapter *,
    179 		    const struct pci_attach_args *);
    180 static int      ixgbe_allocate_legacy(struct adapter *,
    181 		    const struct pci_attach_args *);
    182 static int      ixgbe_configure_interrupts(struct adapter *);
    183 static void	ixgbe_free_pciintr_resources(struct adapter *);
    184 static void	ixgbe_free_pci_resources(struct adapter *);
    185 static void	ixgbe_local_timer(void *);
    186 static void	ixgbe_local_timer1(void *);
    187 static void     ixgbe_recovery_mode_timer(void *);
    188 static int	ixgbe_setup_interface(device_t, struct adapter *);
    189 static void	ixgbe_config_gpie(struct adapter *);
    190 static void	ixgbe_config_dmac(struct adapter *);
    191 static void	ixgbe_config_delay_values(struct adapter *);
    192 static void	ixgbe_config_link(struct adapter *);
    193 static void	ixgbe_check_wol_support(struct adapter *);
    194 static int	ixgbe_setup_low_power_mode(struct adapter *);
    195 #if 0
    196 static void	ixgbe_rearm_queues(struct adapter *, u64);
    197 #endif
    198 
    199 static void     ixgbe_initialize_transmit_units(struct adapter *);
    200 static void     ixgbe_initialize_receive_units(struct adapter *);
    201 static void	ixgbe_enable_rx_drop(struct adapter *);
    202 static void	ixgbe_disable_rx_drop(struct adapter *);
    203 static void	ixgbe_initialize_rss_mapping(struct adapter *);
    204 
    205 static void     ixgbe_enable_intr(struct adapter *);
    206 static void     ixgbe_disable_intr(struct adapter *);
    207 static void     ixgbe_update_stats_counters(struct adapter *);
    208 static void     ixgbe_set_promisc(struct adapter *);
    209 static void     ixgbe_set_multi(struct adapter *);
    210 static void     ixgbe_update_link_status(struct adapter *);
    211 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    212 static void	ixgbe_configure_ivars(struct adapter *);
    213 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    214 static void	ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
    215 
    216 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    217 #if 0
    218 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
    219 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
    220 #endif
    221 
    222 static void	ixgbe_add_device_sysctls(struct adapter *);
    223 static void     ixgbe_add_hw_stats(struct adapter *);
    224 static void	ixgbe_clear_evcnt(struct adapter *);
    225 static int	ixgbe_set_flowcntl(struct adapter *, int);
    226 static int	ixgbe_set_advertise(struct adapter *, int);
    227 static int      ixgbe_get_advertise(struct adapter *);
    228 
    229 /* Sysctl handlers */
    230 static void	ixgbe_set_sysctl_value(struct adapter *, const char *,
    231 		     const char *, int *, int);
    232 static int	ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
    233 static int	ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
    234 static int      ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    235 static int	ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
    236 static int	ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
    237 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
    238 #ifdef IXGBE_DEBUG
    239 static int	ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
    240 static int	ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
    241 #endif
    242 static int      ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
    243 static int      ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
    244 static int      ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
    245 static int      ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
    246 static int      ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
    247 static int      ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
    248 static int	ixgbe_sysctl_debug(SYSCTLFN_PROTO);
    249 static int	ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
    250 static int	ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
    251 
    252 /* Support for pluggable optic modules */
    253 static bool	ixgbe_sfp_probe(struct adapter *);
    254 
    255 /* Legacy (single vector) interrupt handler */
    256 static int	ixgbe_legacy_irq(void *);
    257 
    258 /* The MSI/MSI-X Interrupt handlers */
    259 static int	ixgbe_msix_que(void *);
    260 static int	ixgbe_msix_link(void *);
    261 
    262 /* Software interrupts for deferred work */
    263 static void	ixgbe_handle_que(void *);
    264 static void	ixgbe_handle_link(void *);
    265 static void	ixgbe_handle_msf(void *);
    266 static void	ixgbe_handle_mod(void *);
    267 static void	ixgbe_handle_phy(void *);
    268 
    269 /* Workqueue handler for deferred work */
    270 static void	ixgbe_handle_que_work(struct work *, void *);
    271 
    272 static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    273 
    274 /************************************************************************
    275  *  NetBSD Device Interface Entry Points
    276  ************************************************************************/
    277 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    278     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    279     DVF_DETACH_SHUTDOWN);
    280 
    281 #if 0
    282 devclass_t ix_devclass;
    283 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
    284 
    285 MODULE_DEPEND(ix, pci, 1, 1, 1);
    286 MODULE_DEPEND(ix, ether, 1, 1, 1);
    287 #ifdef DEV_NETMAP
    288 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    289 #endif
    290 #endif
    291 
    292 /*
    293  * TUNEABLE PARAMETERS:
    294  */
    295 
    296 /*
    297  * AIM: Adaptive Interrupt Moderation
    298  * which means that the interrupt rate
    299  * is varied over time based on the
    300  * traffic for that interrupt vector
    301  */
    302 static bool ixgbe_enable_aim = true;
    303 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
    304 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
    305     "Enable adaptive interrupt moderation");
    306 
    307 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    308 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
    309     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
    310 
    311 /* How many packets rxeof tries to clean at a time */
    312 static int ixgbe_rx_process_limit = 256;
    313 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
    314     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
    315 
    316 /* How many packets txeof tries to clean at a time */
    317 static int ixgbe_tx_process_limit = 256;
    318 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
    319     &ixgbe_tx_process_limit, 0,
    320     "Maximum number of sent packets to process at a time, -1 means unlimited");
    321 
    322 /* Flow control setting, default to full */
    323 static int ixgbe_flow_control = ixgbe_fc_full;
    324 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
    325     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
    326 
    327 /* Which pakcet processing uses workqueue or softint */
    328 static bool ixgbe_txrx_workqueue = false;
    329 
    330 /*
    331  * Smart speed setting, default to on
    332  * this only works as a compile option
    333  * right now as its during attach, set
    334  * this to 'ixgbe_smart_speed_off' to
    335  * disable.
    336  */
    337 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    338 
    339 /*
    340  * MSI-X should be the default for best performance,
    341  * but this allows it to be forced off for testing.
    342  */
    343 static int ixgbe_enable_msix = 1;
    344 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
    345     "Enable MSI-X interrupts");
    346 
    347 /*
    348  * Number of Queues, can be set to 0,
    349  * it then autoconfigures based on the
    350  * number of cpus with a max of 8. This
    351  * can be overriden manually here.
    352  */
    353 static int ixgbe_num_queues = 0;
    354 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
    355     "Number of queues to configure, 0 indicates autoconfigure");
    356 
    357 /*
    358  * Number of TX descriptors per ring,
    359  * setting higher than RX as this seems
    360  * the better performing choice.
    361  */
    362 static int ixgbe_txd = PERFORM_TXD;
    363 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
    364     "Number of transmit descriptors per queue");
    365 
    366 /* Number of RX descriptors per ring */
    367 static int ixgbe_rxd = PERFORM_RXD;
    368 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
    369     "Number of receive descriptors per queue");
    370 
    371 /*
    372  * Defining this on will allow the use
    373  * of unsupported SFP+ modules, note that
    374  * doing so you are on your own :)
    375  */
    376 static int allow_unsupported_sfp = false;
    377 #define TUNABLE_INT(__x, __y)
    378 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
    379 
    380 /*
    381  * Not sure if Flow Director is fully baked,
    382  * so we'll default to turning it off.
    383  */
    384 static int ixgbe_enable_fdir = 0;
    385 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
    386     "Enable Flow Director");
    387 
    388 /* Legacy Transmit (single queue) */
    389 static int ixgbe_enable_legacy_tx = 0;
    390 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
    391     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
    392 
    393 /* Receive-Side Scaling */
    394 static int ixgbe_enable_rss = 1;
    395 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
    396     "Enable Receive-Side Scaling (RSS)");
    397 
    398 #if 0
    399 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
    400 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
    401 #endif
    402 
    403 #ifdef NET_MPSAFE
    404 #define IXGBE_MPSAFE		1
    405 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    406 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    407 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    408 #else
    409 #define IXGBE_CALLOUT_FLAGS	0
    410 #define IXGBE_SOFTINFT_FLAGS	0
    411 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU
    412 #endif
    413 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
    414 
    415 /************************************************************************
    416  * ixgbe_initialize_rss_mapping
    417  ************************************************************************/
    418 static void
    419 ixgbe_initialize_rss_mapping(struct adapter *adapter)
    420 {
    421 	struct ixgbe_hw	*hw = &adapter->hw;
    422 	u32             reta = 0, mrqc, rss_key[10];
    423 	int             queue_id, table_size, index_mult;
    424 	int             i, j;
    425 	u32             rss_hash_config;
    426 
    427 	/* force use default RSS key. */
    428 #ifdef __NetBSD__
    429 	rss_getkey((uint8_t *) &rss_key);
    430 #else
    431 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    432 		/* Fetch the configured RSS key */
    433 		rss_getkey((uint8_t *) &rss_key);
    434 	} else {
    435 		/* set up random bits */
    436 		cprng_fast(&rss_key, sizeof(rss_key));
    437 	}
    438 #endif
    439 
    440 	/* Set multiplier for RETA setup and table size based on MAC */
    441 	index_mult = 0x1;
    442 	table_size = 128;
    443 	switch (adapter->hw.mac.type) {
    444 	case ixgbe_mac_82598EB:
    445 		index_mult = 0x11;
    446 		break;
    447 	case ixgbe_mac_X550:
    448 	case ixgbe_mac_X550EM_x:
    449 	case ixgbe_mac_X550EM_a:
    450 		table_size = 512;
    451 		break;
    452 	default:
    453 		break;
    454 	}
    455 
    456 	/* Set up the redirection table */
    457 	for (i = 0, j = 0; i < table_size; i++, j++) {
    458 		if (j == adapter->num_queues)
    459 			j = 0;
    460 
    461 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    462 			/*
    463 			 * Fetch the RSS bucket id for the given indirection
    464 			 * entry. Cap it at the number of configured buckets
    465 			 * (which is num_queues.)
    466 			 */
    467 			queue_id = rss_get_indirection_to_bucket(i);
    468 			queue_id = queue_id % adapter->num_queues;
    469 		} else
    470 			queue_id = (j * index_mult);
    471 
    472 		/*
    473 		 * The low 8 bits are for hash value (n+0);
    474 		 * The next 8 bits are for hash value (n+1), etc.
    475 		 */
    476 		reta = reta >> 8;
    477 		reta = reta | (((uint32_t) queue_id) << 24);
    478 		if ((i & 3) == 3) {
    479 			if (i < 128)
    480 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
    481 			else
    482 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
    483 				    reta);
    484 			reta = 0;
    485 		}
    486 	}
    487 
    488 	/* Now fill our hash function seeds */
    489 	for (i = 0; i < 10; i++)
    490 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
    491 
    492 	/* Perform hash on these packet types */
    493 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
    494 		rss_hash_config = rss_gethashconfig();
    495 	else {
    496 		/*
    497 		 * Disable UDP - IP fragments aren't currently being handled
    498 		 * and so we end up with a mix of 2-tuple and 4-tuple
    499 		 * traffic.
    500 		 */
    501 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
    502 		                | RSS_HASHTYPE_RSS_TCP_IPV4
    503 		                | RSS_HASHTYPE_RSS_IPV6
    504 		                | RSS_HASHTYPE_RSS_TCP_IPV6
    505 		                | RSS_HASHTYPE_RSS_IPV6_EX
    506 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
    507 	}
    508 
    509 	mrqc = IXGBE_MRQC_RSSEN;
    510 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
    511 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
    512 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
    513 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
    514 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
    515 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
    516 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
    517 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
    518 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
    519 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
    520 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
    521 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
    522 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
    523 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
    524 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
    525 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
    526 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
    527 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
    528 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
    529 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    530 } /* ixgbe_initialize_rss_mapping */
    531 
    532 /************************************************************************
    533  * ixgbe_initialize_receive_units - Setup receive registers and features.
    534  ************************************************************************/
    535 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
    536 
    537 static void
    538 ixgbe_initialize_receive_units(struct adapter *adapter)
    539 {
    540 	struct	rx_ring	*rxr = adapter->rx_rings;
    541 	struct ixgbe_hw	*hw = &adapter->hw;
    542 	struct ifnet    *ifp = adapter->ifp;
    543 	int             i, j;
    544 	u32		bufsz, fctrl, srrctl, rxcsum;
    545 	u32		hlreg;
    546 
    547 	/*
    548 	 * Make sure receives are disabled while
    549 	 * setting up the descriptor ring
    550 	 */
    551 	ixgbe_disable_rx(hw);
    552 
    553 	/* Enable broadcasts */
    554 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
    555 	fctrl |= IXGBE_FCTRL_BAM;
    556 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
    557 		fctrl |= IXGBE_FCTRL_DPF;
    558 		fctrl |= IXGBE_FCTRL_PMCF;
    559 	}
    560 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
    561 
    562 	/* Set for Jumbo Frames? */
    563 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
    564 	if (ifp->if_mtu > ETHERMTU)
    565 		hlreg |= IXGBE_HLREG0_JUMBOEN;
    566 	else
    567 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
    568 
    569 #ifdef DEV_NETMAP
    570 	/* CRC stripping is conditional in Netmap */
    571 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
    572 	    (ifp->if_capenable & IFCAP_NETMAP) &&
    573 	    !ix_crcstrip)
    574 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
    575 	else
    576 #endif /* DEV_NETMAP */
    577 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
    578 
    579 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
    580 
    581 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
    582 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
    583 
    584 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
    585 		u64 rdba = rxr->rxdma.dma_paddr;
    586 		u32 reg;
    587 		int regnum = i / 4;	/* 1 register per 4 queues */
    588 		int regshift = i % 4;	/* 4 bits per 1 queue */
    589 		j = rxr->me;
    590 
    591 		/* Setup the Base and Length of the Rx Descriptor Ring */
    592 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
    593 		    (rdba & 0x00000000ffffffffULL));
    594 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
    595 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
    596 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
    597 
    598 		/* Set up the SRRCTL register */
    599 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
    600 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
    601 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
    602 		srrctl |= bufsz;
    603 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
    604 
    605 		/* Set RQSMR (Receive Queue Statistic Mapping) register */
    606 		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
    607 		reg &= ~(0x000000ff << (regshift * 8));
    608 		reg |= i << (regshift * 8);
    609 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
    610 
    611 		/*
    612 		 * Set DROP_EN iff we have no flow control and >1 queue.
    613 		 * Note that srrctl was cleared shortly before during reset,
    614 		 * so we do not need to clear the bit, but do it just in case
    615 		 * this code is moved elsewhere.
    616 		 */
    617 		if (adapter->num_queues > 1 &&
    618 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
    619 			srrctl |= IXGBE_SRRCTL_DROP_EN;
    620 		} else {
    621 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
    622 		}
    623 
    624 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
    625 
    626 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
    627 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
    628 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
    629 
    630 		/* Set the driver rx tail address */
    631 		rxr->tail =  IXGBE_RDT(rxr->me);
    632 	}
    633 
    634 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    635 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
    636 		            | IXGBE_PSRTYPE_UDPHDR
    637 		            | IXGBE_PSRTYPE_IPV4HDR
    638 		            | IXGBE_PSRTYPE_IPV6HDR;
    639 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
    640 	}
    641 
    642 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
    643 
    644 	ixgbe_initialize_rss_mapping(adapter);
    645 
    646 	if (adapter->num_queues > 1) {
    647 		/* RSS and RX IPP Checksum are mutually exclusive */
    648 		rxcsum |= IXGBE_RXCSUM_PCSD;
    649 	}
    650 
    651 	if (ifp->if_capenable & IFCAP_RXCSUM)
    652 		rxcsum |= IXGBE_RXCSUM_PCSD;
    653 
    654 	/* This is useful for calculating UDP/IP fragment checksums */
    655 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
    656 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
    657 
    658 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
    659 
    660 } /* ixgbe_initialize_receive_units */
    661 
    662 /************************************************************************
    663  * ixgbe_initialize_transmit_units - Enable transmit units.
    664  ************************************************************************/
    665 static void
    666 ixgbe_initialize_transmit_units(struct adapter *adapter)
    667 {
    668 	struct tx_ring  *txr = adapter->tx_rings;
    669 	struct ixgbe_hw	*hw = &adapter->hw;
    670 	int i;
    671 
    672 	/* Setup the Base and Length of the Tx Descriptor Ring */
    673 	for (i = 0; i < adapter->num_queues; i++, txr++) {
    674 		u64 tdba = txr->txdma.dma_paddr;
    675 		u32 txctrl = 0;
    676 		u32 tqsmreg, reg;
    677 		int regnum = i / 4;	/* 1 register per 4 queues */
    678 		int regshift = i % 4;	/* 4 bits per 1 queue */
    679 		int j = txr->me;
    680 
    681 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
    682 		    (tdba & 0x00000000ffffffffULL));
    683 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
    684 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
    685 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
    686 
    687 		/*
    688 		 * Set TQSMR (Transmit Queue Statistic Mapping) register.
    689 		 * Register location is different between 82598 and others.
    690 		 */
    691 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
    692 			tqsmreg = IXGBE_TQSMR(regnum);
    693 		else
    694 			tqsmreg = IXGBE_TQSM(regnum);
    695 		reg = IXGBE_READ_REG(hw, tqsmreg);
    696 		reg &= ~(0x000000ff << (regshift * 8));
    697 		reg |= i << (regshift * 8);
    698 		IXGBE_WRITE_REG(hw, tqsmreg, reg);
    699 
    700 		/* Setup the HW Tx Head and Tail descriptor pointers */
    701 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
    702 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
    703 
    704 		/* Cache the tail address */
    705 		txr->tail = IXGBE_TDT(j);
    706 
    707 		txr->txr_no_space = false;
    708 
    709 		/* Disable Head Writeback */
    710 		/*
    711 		 * Note: for X550 series devices, these registers are actually
    712 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
    713 		 * fields remain the same.
    714 		 */
    715 		switch (hw->mac.type) {
    716 		case ixgbe_mac_82598EB:
    717 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
    718 			break;
    719 		default:
    720 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
    721 			break;
    722 		}
    723 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
    724 		switch (hw->mac.type) {
    725 		case ixgbe_mac_82598EB:
    726 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
    727 			break;
    728 		default:
    729 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
    730 			break;
    731 		}
    732 
    733 	}
    734 
    735 	if (hw->mac.type != ixgbe_mac_82598EB) {
    736 		u32 dmatxctl, rttdcs;
    737 
    738 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
    739 		dmatxctl |= IXGBE_DMATXCTL_TE;
    740 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
    741 		/* Disable arbiter to set MTQC */
    742 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
    743 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
    744 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    745 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
    746 		    ixgbe_get_mtqc(adapter->iov_mode));
    747 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
    748 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    749 	}
    750 
    751 	return;
    752 } /* ixgbe_initialize_transmit_units */
    753 
    754 /************************************************************************
    755  * ixgbe_attach - Device initialization routine
    756  *
    757  *   Called when the driver is being loaded.
    758  *   Identifies the type of hardware, allocates all resources
    759  *   and initializes the hardware.
    760  *
    761  *   return 0 on success, positive on failure
    762  ************************************************************************/
    763 static void
    764 ixgbe_attach(device_t parent, device_t dev, void *aux)
    765 {
    766 	struct adapter  *adapter;
    767 	struct ixgbe_hw *hw;
    768 	int             error = -1;
    769 	u32		ctrl_ext;
    770 	u16		high, low, nvmreg;
    771 	pcireg_t	id, subid;
    772 	const ixgbe_vendor_info_t *ent;
    773 	struct pci_attach_args *pa = aux;
    774 	const char *str;
    775 	char buf[256];
    776 
    777 	INIT_DEBUGOUT("ixgbe_attach: begin");
    778 
    779 	/* Allocate, clear, and link in our adapter structure */
    780 	adapter = device_private(dev);
    781 	adapter->hw.back = adapter;
    782 	adapter->dev = dev;
    783 	hw = &adapter->hw;
    784 	adapter->osdep.pc = pa->pa_pc;
    785 	adapter->osdep.tag = pa->pa_tag;
    786 	if (pci_dma64_available(pa))
    787 		adapter->osdep.dmat = pa->pa_dmat64;
    788 	else
    789 		adapter->osdep.dmat = pa->pa_dmat;
    790 	adapter->osdep.attached = false;
    791 
    792 	ent = ixgbe_lookup(pa);
    793 
    794 	KASSERT(ent != NULL);
    795 
    796 	aprint_normal(": %s, Version - %s\n",
    797 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    798 
    799 	/* Core Lock Init*/
    800 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    801 
    802 	/* Set up the timer callout */
    803 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    804 
    805 	/* Determine hardware revision */
    806 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    807 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    808 
    809 	hw->vendor_id = PCI_VENDOR(id);
    810 	hw->device_id = PCI_PRODUCT(id);
    811 	hw->revision_id =
    812 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    813 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    814 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    815 
    816 	/*
    817 	 * Make sure BUSMASTER is set
    818 	 */
    819 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    820 
    821 	/* Do base PCI setup - map BAR0 */
    822 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    823 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    824 		error = ENXIO;
    825 		goto err_out;
    826 	}
    827 
    828 	/* let hardware know driver is loaded */
    829 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    830 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    831 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    832 
    833 	/*
    834 	 * Initialize the shared code
    835 	 */
    836 	if (ixgbe_init_shared_code(hw) != 0) {
    837 		aprint_error_dev(dev, "Unable to initialize the shared code\n");
    838 		error = ENXIO;
    839 		goto err_out;
    840 	}
    841 
    842 	switch (hw->mac.type) {
    843 	case ixgbe_mac_82598EB:
    844 		str = "82598EB";
    845 		break;
    846 	case ixgbe_mac_82599EB:
    847 		str = "82599EB";
    848 		break;
    849 	case ixgbe_mac_X540:
    850 		str = "X540";
    851 		break;
    852 	case ixgbe_mac_X550:
    853 		str = "X550";
    854 		break;
    855 	case ixgbe_mac_X550EM_x:
    856 		str = "X550EM";
    857 		break;
    858 	case ixgbe_mac_X550EM_a:
    859 		str = "X550EM A";
    860 		break;
    861 	default:
    862 		str = "Unknown";
    863 		break;
    864 	}
    865 	aprint_normal_dev(dev, "device %s\n", str);
    866 
    867 	if (hw->mbx.ops.init_params)
    868 		hw->mbx.ops.init_params(hw);
    869 
    870 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
    871 
    872 	/* Pick up the 82599 settings */
    873 	if (hw->mac.type != ixgbe_mac_82598EB) {
    874 		hw->phy.smart_speed = ixgbe_smart_speed;
    875 		adapter->num_segs = IXGBE_82599_SCATTER;
    876 	} else
    877 		adapter->num_segs = IXGBE_82598_SCATTER;
    878 
    879 	hw->mac.ops.set_lan_id(hw);
    880 	ixgbe_init_device_features(adapter);
    881 
    882 	if (ixgbe_configure_interrupts(adapter)) {
    883 		error = ENXIO;
    884 		goto err_out;
    885 	}
    886 
    887 	/* Allocate multicast array memory. */
    888 	adapter->mta = malloc(sizeof(*adapter->mta) *
    889 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    890 	if (adapter->mta == NULL) {
    891 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    892 		error = ENOMEM;
    893 		goto err_out;
    894 	}
    895 
    896 	/* Enable WoL (if supported) */
    897 	ixgbe_check_wol_support(adapter);
    898 
    899 	/* Verify adapter fan is still functional (if applicable) */
    900 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
    901 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    902 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
    903 	}
    904 
    905 	/* Ensure SW/FW semaphore is free */
    906 	ixgbe_init_swfw_semaphore(hw);
    907 
    908 	/* Enable EEE power saving */
    909 	if (adapter->feat_en & IXGBE_FEATURE_EEE)
    910 		hw->mac.ops.setup_eee(hw, TRUE);
    911 
    912 	/* Set an initial default flow control value */
    913 	hw->fc.requested_mode = ixgbe_flow_control;
    914 
    915 	/* Sysctls for limiting the amount of work done in the taskqueues */
    916 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
    917 	    "max number of rx packets to process",
    918 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
    919 
    920 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
    921 	    "max number of tx packets to process",
    922 	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
    923 
    924 	/* Do descriptor calc and sanity checks */
    925 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    926 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    927 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    928 		adapter->num_tx_desc = DEFAULT_TXD;
    929 	} else
    930 		adapter->num_tx_desc = ixgbe_txd;
    931 
    932 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    933 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
    934 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    935 		adapter->num_rx_desc = DEFAULT_RXD;
    936 	} else
    937 		adapter->num_rx_desc = ixgbe_rxd;
    938 
    939 	/* Allocate our TX/RX Queues */
    940 	if (ixgbe_allocate_queues(adapter)) {
    941 		error = ENOMEM;
    942 		goto err_out;
    943 	}
    944 
    945 	hw->phy.reset_if_overtemp = TRUE;
    946 	error = ixgbe_reset_hw(hw);
    947 	hw->phy.reset_if_overtemp = FALSE;
    948 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    949 		/*
    950 		 * No optics in this port, set up
    951 		 * so the timer routine will probe
    952 		 * for later insertion.
    953 		 */
    954 		adapter->sfp_probe = TRUE;
    955 		error = IXGBE_SUCCESS;
    956 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    957 		aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
    958 		error = EIO;
    959 		goto err_late;
    960 	} else if (error) {
    961 		aprint_error_dev(dev, "Hardware initialization failed\n");
    962 		error = EIO;
    963 		goto err_late;
    964 	}
    965 
    966 	/* Make sure we have a good EEPROM before we read from it */
    967 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
    968 		aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
    969 		error = EIO;
    970 		goto err_late;
    971 	}
    972 
    973 	aprint_normal("%s:", device_xname(dev));
    974 	/* NVM Image Version */
    975 	high = low = 0;
    976 	switch (hw->mac.type) {
    977 	case ixgbe_mac_X540:
    978 	case ixgbe_mac_X550EM_a:
    979 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    980 		if (nvmreg == 0xffff)
    981 			break;
    982 		high = (nvmreg >> 12) & 0x0f;
    983 		low = (nvmreg >> 4) & 0xff;
    984 		id = nvmreg & 0x0f;
    985 		aprint_normal(" NVM Image Version %u.", high);
    986 		if (hw->mac.type == ixgbe_mac_X540)
    987 			str = "%x";
    988 		else
    989 			str = "%02x";
    990 		aprint_normal(str, low);
    991 		aprint_normal(" ID 0x%x,", id);
    992 		break;
    993 	case ixgbe_mac_X550EM_x:
    994 	case ixgbe_mac_X550:
    995 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    996 		if (nvmreg == 0xffff)
    997 			break;
    998 		high = (nvmreg >> 12) & 0x0f;
    999 		low = nvmreg & 0xff;
   1000 		aprint_normal(" NVM Image Version %u.%02x,", high, low);
   1001 		break;
   1002 	default:
   1003 		break;
   1004 	}
   1005 	hw->eeprom.nvm_image_ver_high = high;
   1006 	hw->eeprom.nvm_image_ver_low = low;
   1007 
   1008 	/* PHY firmware revision */
   1009 	switch (hw->mac.type) {
   1010 	case ixgbe_mac_X540:
   1011 	case ixgbe_mac_X550:
   1012 		hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
   1013 		if (nvmreg == 0xffff)
   1014 			break;
   1015 		high = (nvmreg >> 12) & 0x0f;
   1016 		low = (nvmreg >> 4) & 0xff;
   1017 		id = nvmreg & 0x000f;
   1018 		aprint_normal(" PHY FW Revision %u.", high);
   1019 		if (hw->mac.type == ixgbe_mac_X540)
   1020 			str = "%x";
   1021 		else
   1022 			str = "%02x";
   1023 		aprint_normal(str, low);
   1024 		aprint_normal(" ID 0x%x,", id);
   1025 		break;
   1026 	default:
   1027 		break;
   1028 	}
   1029 
   1030 	/* NVM Map version & OEM NVM Image version */
   1031 	switch (hw->mac.type) {
   1032 	case ixgbe_mac_X550:
   1033 	case ixgbe_mac_X550EM_x:
   1034 	case ixgbe_mac_X550EM_a:
   1035 		hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
   1036 		if (nvmreg != 0xffff) {
   1037 			high = (nvmreg >> 12) & 0x0f;
   1038 			low = nvmreg & 0x00ff;
   1039 			aprint_normal(" NVM Map version %u.%02x,", high, low);
   1040 		}
   1041 		hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
   1042 		if (nvmreg != 0xffff) {
   1043 			high = (nvmreg >> 12) & 0x0f;
   1044 			low = nvmreg & 0x00ff;
   1045 			aprint_verbose(" OEM NVM Image version %u.%02x,", high,
   1046 			    low);
   1047 		}
   1048 		break;
   1049 	default:
   1050 		break;
   1051 	}
   1052 
   1053 	/* Print the ETrackID */
   1054 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
   1055 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
   1056 	aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
   1057 
   1058 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   1059 		error = ixgbe_allocate_msix(adapter, pa);
   1060 		if (error) {
   1061 			/* Free allocated queue structures first */
   1062 			ixgbe_free_transmit_structures(adapter);
   1063 			ixgbe_free_receive_structures(adapter);
   1064 			free(adapter->queues, M_DEVBUF);
   1065 
   1066 			/* Fallback to legacy interrupt */
   1067 			adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
   1068 			if (adapter->feat_cap & IXGBE_FEATURE_MSI)
   1069 				adapter->feat_en |= IXGBE_FEATURE_MSI;
   1070 			adapter->num_queues = 1;
   1071 
   1072 			/* Allocate our TX/RX Queues again */
   1073 			if (ixgbe_allocate_queues(adapter)) {
   1074 				error = ENOMEM;
   1075 				goto err_out;
   1076 			}
   1077 		}
   1078 	}
   1079 	/* Recovery mode */
   1080 	switch (adapter->hw.mac.type) {
   1081 	case ixgbe_mac_X550:
   1082 	case ixgbe_mac_X550EM_x:
   1083 	case ixgbe_mac_X550EM_a:
   1084 		/* >= 2.00 */
   1085 		if (hw->eeprom.nvm_image_ver_high >= 2) {
   1086 			adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
   1087 			adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
   1088 		}
   1089 		break;
   1090 	default:
   1091 		break;
   1092 	}
   1093 
   1094 	if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
   1095 		error = ixgbe_allocate_legacy(adapter, pa);
   1096 	if (error)
   1097 		goto err_late;
   1098 
   1099 	/* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
   1100 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   1101 	    ixgbe_handle_link, adapter);
   1102 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1103 	    ixgbe_handle_mod, adapter);
   1104 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1105 	    ixgbe_handle_msf, adapter);
   1106 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1107 	    ixgbe_handle_phy, adapter);
   1108 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   1109 		adapter->fdir_si =
   1110 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1111 			ixgbe_reinit_fdir, adapter);
   1112 	if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
   1113 	    || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
   1114 	    || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
   1115 		&& (adapter->fdir_si == NULL))) {
   1116 		aprint_error_dev(dev,
   1117 		    "could not establish software interrupts ()\n");
   1118 		goto err_out;
   1119 	}
   1120 
   1121 	error = ixgbe_start_hw(hw);
   1122 	switch (error) {
   1123 	case IXGBE_ERR_EEPROM_VERSION:
   1124 		aprint_error_dev(dev, "This device is a pre-production adapter/"
   1125 		    "LOM.  Please be aware there may be issues associated "
   1126 		    "with your hardware.\nIf you are experiencing problems "
   1127 		    "please contact your Intel or hardware representative "
   1128 		    "who provided you with this hardware.\n");
   1129 		break;
   1130 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
   1131 		aprint_error_dev(dev, "Unsupported SFP+ Module\n");
   1132 		error = EIO;
   1133 		goto err_late;
   1134 	case IXGBE_ERR_SFP_NOT_PRESENT:
   1135 		aprint_error_dev(dev, "No SFP+ Module found\n");
   1136 		/* falls thru */
   1137 	default:
   1138 		break;
   1139 	}
   1140 
   1141 	/* Setup OS specific network interface */
   1142 	if (ixgbe_setup_interface(dev, adapter) != 0)
   1143 		goto err_late;
   1144 
   1145 	/*
   1146 	 *  Print PHY ID only for copper PHY. On device which has SFP(+) cage
   1147 	 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
   1148 	 */
   1149 	if (hw->phy.media_type == ixgbe_media_type_copper) {
   1150 		uint16_t id1, id2;
   1151 		int oui, model, rev;
   1152 		const char *descr;
   1153 
   1154 		id1 = hw->phy.id >> 16;
   1155 		id2 = hw->phy.id & 0xffff;
   1156 		oui = MII_OUI(id1, id2);
   1157 		model = MII_MODEL(id2);
   1158 		rev = MII_REV(id2);
   1159 		if ((descr = mii_get_descr(oui, model)) != NULL)
   1160 			aprint_normal_dev(dev,
   1161 			    "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
   1162 			    descr, oui, model, rev);
   1163 		else
   1164 			aprint_normal_dev(dev,
   1165 			    "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
   1166 			    oui, model, rev);
   1167 	}
   1168 
   1169 	/* Enable the optics for 82599 SFP+ fiber */
   1170 	ixgbe_enable_tx_laser(hw);
   1171 
   1172 	/* Enable power to the phy. */
   1173 	ixgbe_set_phy_power(hw, TRUE);
   1174 
   1175 	/* Initialize statistics */
   1176 	ixgbe_update_stats_counters(adapter);
   1177 
   1178 	/* Check PCIE slot type/speed/width */
   1179 	ixgbe_get_slot_info(adapter);
   1180 
   1181 	/*
   1182 	 * Do time init and sysctl init here, but
   1183 	 * only on the first port of a bypass adapter.
   1184 	 */
   1185 	ixgbe_bypass_init(adapter);
   1186 
   1187 	/* Set an initial dmac value */
   1188 	adapter->dmac = 0;
   1189 	/* Set initial advertised speeds (if applicable) */
   1190 	adapter->advertise = ixgbe_get_advertise(adapter);
   1191 
   1192 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   1193 		ixgbe_define_iov_schemas(dev, &error);
   1194 
   1195 	/* Add sysctls */
   1196 	ixgbe_add_device_sysctls(adapter);
   1197 	ixgbe_add_hw_stats(adapter);
   1198 
   1199 	/* For Netmap */
   1200 	adapter->init_locked = ixgbe_init_locked;
   1201 	adapter->stop_locked = ixgbe_stop;
   1202 
   1203 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1204 		ixgbe_netmap_attach(adapter);
   1205 
   1206 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
   1207 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
   1208 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
   1209 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
   1210 
   1211 	if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
   1212 		pmf_class_network_register(dev, adapter->ifp);
   1213 	else
   1214 		aprint_error_dev(dev, "couldn't establish power handler\n");
   1215 
   1216 	/* Init recovery mode timer and state variable */
   1217 	if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
   1218 		adapter->recovery_mode = 0;
   1219 
   1220 		/* Set up the timer callout */
   1221 		callout_init(&adapter->recovery_mode_timer,
   1222 		    IXGBE_CALLOUT_FLAGS);
   1223 
   1224 		/* Start the task */
   1225 		callout_reset(&adapter->recovery_mode_timer, hz,
   1226 		    ixgbe_recovery_mode_timer, adapter);
   1227 	}
   1228 
   1229 	INIT_DEBUGOUT("ixgbe_attach: end");
   1230 	adapter->osdep.attached = true;
   1231 
   1232 	return;
   1233 
   1234 err_late:
   1235 	ixgbe_free_transmit_structures(adapter);
   1236 	ixgbe_free_receive_structures(adapter);
   1237 	free(adapter->queues, M_DEVBUF);
   1238 err_out:
   1239 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   1240 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   1241 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   1242 	ixgbe_free_softint(adapter);
   1243 	ixgbe_free_pci_resources(adapter);
   1244 	if (adapter->mta != NULL)
   1245 		free(adapter->mta, M_DEVBUF);
   1246 	IXGBE_CORE_LOCK_DESTROY(adapter);
   1247 
   1248 	return;
   1249 } /* ixgbe_attach */
   1250 
   1251 /************************************************************************
   1252  * ixgbe_check_wol_support
   1253  *
   1254  *   Checks whether the adapter's ports are capable of
   1255  *   Wake On LAN by reading the adapter's NVM.
   1256  *
   1257  *   Sets each port's hw->wol_enabled value depending
   1258  *   on the value read here.
   1259  ************************************************************************/
   1260 static void
   1261 ixgbe_check_wol_support(struct adapter *adapter)
   1262 {
   1263 	struct ixgbe_hw *hw = &adapter->hw;
   1264 	u16             dev_caps = 0;
   1265 
   1266 	/* Find out WoL support for port */
   1267 	adapter->wol_support = hw->wol_enabled = 0;
   1268 	ixgbe_get_device_caps(hw, &dev_caps);
   1269 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
   1270 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
   1271 	     hw->bus.func == 0))
   1272 		adapter->wol_support = hw->wol_enabled = 1;
   1273 
   1274 	/* Save initial wake up filter configuration */
   1275 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
   1276 
   1277 	return;
   1278 } /* ixgbe_check_wol_support */
   1279 
   1280 /************************************************************************
   1281  * ixgbe_setup_interface
   1282  *
   1283  *   Setup networking device structure and register an interface.
   1284  ************************************************************************/
   1285 static int
   1286 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   1287 {
   1288 	struct ethercom *ec = &adapter->osdep.ec;
   1289 	struct ifnet   *ifp;
   1290 	int rv;
   1291 
   1292 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   1293 
   1294 	ifp = adapter->ifp = &ec->ec_if;
   1295 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1296 	ifp->if_baudrate = IF_Gbps(10);
   1297 	ifp->if_init = ixgbe_init;
   1298 	ifp->if_stop = ixgbe_ifstop;
   1299 	ifp->if_softc = adapter;
   1300 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1301 #ifdef IXGBE_MPSAFE
   1302 	ifp->if_extflags = IFEF_MPSAFE;
   1303 #endif
   1304 	ifp->if_ioctl = ixgbe_ioctl;
   1305 #if __FreeBSD_version >= 1100045
   1306 	/* TSO parameters */
   1307 	ifp->if_hw_tsomax = 65518;
   1308 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
   1309 	ifp->if_hw_tsomaxsegsize = 2048;
   1310 #endif
   1311 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1312 #if 0
   1313 		ixgbe_start_locked = ixgbe_legacy_start_locked;
   1314 #endif
   1315 	} else {
   1316 		ifp->if_transmit = ixgbe_mq_start;
   1317 #if 0
   1318 		ixgbe_start_locked = ixgbe_mq_start_locked;
   1319 #endif
   1320 	}
   1321 	ifp->if_start = ixgbe_legacy_start;
   1322 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1323 	IFQ_SET_READY(&ifp->if_snd);
   1324 
   1325 	rv = if_initialize(ifp);
   1326 	if (rv != 0) {
   1327 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1328 		return rv;
   1329 	}
   1330 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1331 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1332 	/*
   1333 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1334 	 * used.
   1335 	 */
   1336 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   1337 
   1338 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1339 
   1340 	/*
   1341 	 * Tell the upper layer(s) we support long frames.
   1342 	 */
   1343 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1344 
   1345 	/* Set capability flags */
   1346 	ifp->if_capabilities |= IFCAP_RXCSUM
   1347 			     |  IFCAP_TXCSUM
   1348 			     |  IFCAP_TSOv4
   1349 			     |  IFCAP_TSOv6;
   1350 	ifp->if_capenable = 0;
   1351 
   1352 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1353 	    		    |  ETHERCAP_VLAN_HWCSUM
   1354 	    		    |  ETHERCAP_JUMBO_MTU
   1355 	    		    |  ETHERCAP_VLAN_MTU;
   1356 
   1357 	/* Enable the above capabilities by default */
   1358 	ec->ec_capenable = ec->ec_capabilities;
   1359 
   1360 	/*
   1361 	 * Don't turn this on by default, if vlans are
   1362 	 * created on another pseudo device (eg. lagg)
   1363 	 * then vlan events are not passed thru, breaking
   1364 	 * operation, but with HW FILTER off it works. If
   1365 	 * using vlans directly on the ixgbe driver you can
   1366 	 * enable this and get full hardware tag filtering.
   1367 	 */
   1368 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1369 
   1370 	/*
   1371 	 * Specify the media types supported by this adapter and register
   1372 	 * callbacks to update media and link information
   1373 	 */
   1374 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   1375 	    ixgbe_media_status);
   1376 
   1377 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
   1378 	ixgbe_add_media_types(adapter);
   1379 
   1380 	/* Set autoselect media by default */
   1381 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1382 
   1383 	if_register(ifp);
   1384 
   1385 	return (0);
   1386 } /* ixgbe_setup_interface */
   1387 
   1388 /************************************************************************
   1389  * ixgbe_add_media_types
   1390  ************************************************************************/
   1391 static void
   1392 ixgbe_add_media_types(struct adapter *adapter)
   1393 {
   1394 	struct ixgbe_hw *hw = &adapter->hw;
   1395 	device_t        dev = adapter->dev;
   1396 	u64             layer;
   1397 
   1398 	layer = adapter->phy_layer;
   1399 
   1400 #define	ADD(mm, dd)							\
   1401 	ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
   1402 
   1403 	ADD(IFM_NONE, 0);
   1404 
   1405 	/* Media types with matching NetBSD media defines */
   1406 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
   1407 		ADD(IFM_10G_T | IFM_FDX, 0);
   1408 	}
   1409 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
   1410 		ADD(IFM_1000_T | IFM_FDX, 0);
   1411 	}
   1412 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
   1413 		ADD(IFM_100_TX | IFM_FDX, 0);
   1414 	}
   1415 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
   1416 		ADD(IFM_10_T | IFM_FDX, 0);
   1417 	}
   1418 
   1419 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   1420 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
   1421 		ADD(IFM_10G_TWINAX | IFM_FDX, 0);
   1422 	}
   1423 
   1424 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
   1425 		ADD(IFM_10G_LR | IFM_FDX, 0);
   1426 		if (hw->phy.multispeed_fiber) {
   1427 			ADD(IFM_1000_LX | IFM_FDX, 0);
   1428 		}
   1429 	}
   1430 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
   1431 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1432 		if (hw->phy.multispeed_fiber) {
   1433 			ADD(IFM_1000_SX | IFM_FDX, 0);
   1434 		}
   1435 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
   1436 		ADD(IFM_1000_SX | IFM_FDX, 0);
   1437 	}
   1438 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
   1439 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1440 	}
   1441 
   1442 #ifdef IFM_ETH_XTYPE
   1443 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1444 		ADD(IFM_10G_KR | IFM_FDX, 0);
   1445 	}
   1446 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1447 		ADD(AIFM_10G_KX4 | IFM_FDX, 0);
   1448 	}
   1449 #else
   1450 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1451 		device_printf(dev, "Media supported: 10GbaseKR\n");
   1452 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
   1453 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1454 	}
   1455 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1456 		device_printf(dev, "Media supported: 10GbaseKX4\n");
   1457 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
   1458 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1459 	}
   1460 #endif
   1461 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
   1462 		ADD(IFM_1000_KX | IFM_FDX, 0);
   1463 	}
   1464 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
   1465 		ADD(IFM_2500_KX | IFM_FDX, 0);
   1466 	}
   1467 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
   1468 		ADD(IFM_2500_T | IFM_FDX, 0);
   1469 	}
   1470 	if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
   1471 		ADD(IFM_5000_T | IFM_FDX, 0);
   1472 	}
   1473 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
   1474 		device_printf(dev, "Media supported: 1000baseBX\n");
   1475 	/* XXX no ifmedia_set? */
   1476 
   1477 	ADD(IFM_AUTO, 0);
   1478 
   1479 #undef ADD
   1480 } /* ixgbe_add_media_types */
   1481 
   1482 /************************************************************************
   1483  * ixgbe_is_sfp
   1484  ************************************************************************/
   1485 static inline bool
   1486 ixgbe_is_sfp(struct ixgbe_hw *hw)
   1487 {
   1488 	switch (hw->mac.type) {
   1489 	case ixgbe_mac_82598EB:
   1490 		if (hw->phy.type == ixgbe_phy_nl)
   1491 			return (TRUE);
   1492 		return (FALSE);
   1493 	case ixgbe_mac_82599EB:
   1494 		switch (hw->mac.ops.get_media_type(hw)) {
   1495 		case ixgbe_media_type_fiber:
   1496 		case ixgbe_media_type_fiber_qsfp:
   1497 			return (TRUE);
   1498 		default:
   1499 			return (FALSE);
   1500 		}
   1501 	case ixgbe_mac_X550EM_x:
   1502 	case ixgbe_mac_X550EM_a:
   1503 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
   1504 			return (TRUE);
   1505 		return (FALSE);
   1506 	default:
   1507 		return (FALSE);
   1508 	}
   1509 } /* ixgbe_is_sfp */
   1510 
   1511 /************************************************************************
   1512  * ixgbe_config_link
   1513  ************************************************************************/
   1514 static void
   1515 ixgbe_config_link(struct adapter *adapter)
   1516 {
   1517 	struct ixgbe_hw *hw = &adapter->hw;
   1518 	u32             autoneg, err = 0;
   1519 	bool            sfp, negotiate = false;
   1520 
   1521 	sfp = ixgbe_is_sfp(hw);
   1522 
   1523 	if (sfp) {
   1524 		if (hw->phy.multispeed_fiber) {
   1525 			ixgbe_enable_tx_laser(hw);
   1526 			kpreempt_disable();
   1527 			softint_schedule(adapter->msf_si);
   1528 			kpreempt_enable();
   1529 		}
   1530 		kpreempt_disable();
   1531 		softint_schedule(adapter->mod_si);
   1532 		kpreempt_enable();
   1533 	} else {
   1534 		struct ifmedia  *ifm = &adapter->media;
   1535 
   1536 		if (hw->mac.ops.check_link)
   1537 			err = ixgbe_check_link(hw, &adapter->link_speed,
   1538 			    &adapter->link_up, FALSE);
   1539 		if (err)
   1540 			return;
   1541 
   1542 		/*
   1543 		 * Check if it's the first call. If it's the first call,
   1544 		 * get value for auto negotiation.
   1545 		 */
   1546 		autoneg = hw->phy.autoneg_advertised;
   1547 		if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
   1548 		    && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
   1549                 	err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
   1550 			    &negotiate);
   1551 		if (err)
   1552 			return;
   1553 		if (hw->mac.ops.setup_link)
   1554                 	err = hw->mac.ops.setup_link(hw, autoneg,
   1555 			    adapter->link_up);
   1556 	}
   1557 
   1558 } /* ixgbe_config_link */
   1559 
   1560 /************************************************************************
   1561  * ixgbe_update_stats_counters - Update board statistics counters.
   1562  ************************************************************************/
   1563 static void
   1564 ixgbe_update_stats_counters(struct adapter *adapter)
   1565 {
   1566 	struct ifnet          *ifp = adapter->ifp;
   1567 	struct ixgbe_hw       *hw = &adapter->hw;
   1568 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1569 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
   1570 	u64                   total_missed_rx = 0;
   1571 	uint64_t              crcerrs, rlec;
   1572 	int		      i, j;
   1573 
   1574 	crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   1575 	stats->crcerrs.ev_count += crcerrs;
   1576 	stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   1577 	stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   1578 	stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   1579 	if (hw->mac.type == ixgbe_mac_X550)
   1580 		stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
   1581 
   1582 	/* 16 registers */
   1583 	for (i = 0; i < __arraycount(stats->qprc); i++) {
   1584 		j = i % adapter->num_queues;
   1585 
   1586 		stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   1587 		stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   1588 		if (hw->mac.type >= ixgbe_mac_82599EB) {
   1589 			stats->qprdc[j].ev_count
   1590 			    += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   1591 		}
   1592 	}
   1593 
   1594 	/* 8 registers */
   1595 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
   1596 		uint32_t mp;
   1597 
   1598 		/* MPC */
   1599 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   1600 		/* global total per queue */
   1601 		stats->mpc[i].ev_count += mp;
   1602 		/* running comprehensive total for stats display */
   1603 		total_missed_rx += mp;
   1604 
   1605 		if (hw->mac.type == ixgbe_mac_82598EB)
   1606 			stats->rnbc[i].ev_count
   1607 			    += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   1608 
   1609 		stats->pxontxc[i].ev_count
   1610 		    += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
   1611 		stats->pxofftxc[i].ev_count
   1612 		    += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
   1613 		if (hw->mac.type >= ixgbe_mac_82599EB) {
   1614 			stats->pxonrxc[i].ev_count
   1615 			    += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
   1616 			stats->pxoffrxc[i].ev_count
   1617 			    += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
   1618 			stats->pxon2offc[i].ev_count
   1619 			    += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
   1620 		} else {
   1621 			stats->pxonrxc[i].ev_count
   1622 			    += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
   1623 			stats->pxoffrxc[i].ev_count
   1624 			    += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
   1625 		}
   1626 	}
   1627 	stats->mpctotal.ev_count += total_missed_rx;
   1628 
   1629 	/* Document says M[LR]FC are valid when link is up and 10Gbps */
   1630 	if ((adapter->link_active == TRUE)
   1631 	    && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
   1632 		stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   1633 		stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   1634 	}
   1635 	rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
   1636 	stats->rlec.ev_count += rlec;
   1637 
   1638 	/* Hardware workaround, gprc counts missed packets */
   1639 	stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   1640 
   1641 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   1642 	stats->lxontxc.ev_count += lxon;
   1643 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   1644 	stats->lxofftxc.ev_count += lxoff;
   1645 	total = lxon + lxoff;
   1646 
   1647 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1648 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   1649 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   1650 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   1651 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   1652 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   1653 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   1654 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   1655 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   1656 	} else {
   1657 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   1658 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   1659 		/* 82598 only has a counter in the high register */
   1660 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   1661 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   1662 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   1663 	}
   1664 
   1665 	/*
   1666 	 * Workaround: mprc hardware is incorrectly counting
   1667 	 * broadcasts, so for now we subtract those.
   1668 	 */
   1669 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   1670 	stats->bprc.ev_count += bprc;
   1671 	stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
   1672 	    - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   1673 
   1674 	stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   1675 	stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   1676 	stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   1677 	stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   1678 	stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   1679 	stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   1680 
   1681 	stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   1682 	stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   1683 	stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   1684 
   1685 	stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   1686 	stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   1687 	stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   1688 	stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   1689 	stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   1690 	stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   1691 	stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   1692 	stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   1693 	stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   1694 	stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   1695 	stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   1696 	stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   1697 	stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   1698 	stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   1699 	stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   1700 	stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   1701 	stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   1702 	stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   1703 	/* Only read FCOE on 82599 */
   1704 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1705 		stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   1706 		stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   1707 		stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   1708 		stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   1709 		stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   1710 	}
   1711 
   1712 	/* Fill out the OS statistics structure */
   1713 	/*
   1714 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   1715 	 * adapter->stats counters. It's required to make ifconfig -z
   1716 	 * (SOICZIFDATA) work.
   1717 	 */
   1718 	ifp->if_collisions = 0;
   1719 
   1720 	/* Rx Errors */
   1721 	ifp->if_iqdrops += total_missed_rx;
   1722 	ifp->if_ierrors += crcerrs + rlec;
   1723 } /* ixgbe_update_stats_counters */
   1724 
   1725 /************************************************************************
   1726  * ixgbe_add_hw_stats
   1727  *
   1728  *   Add sysctl variables, one per statistic, to the system.
   1729  ************************************************************************/
   1730 static void
   1731 ixgbe_add_hw_stats(struct adapter *adapter)
   1732 {
   1733 	device_t dev = adapter->dev;
   1734 	const struct sysctlnode *rnode, *cnode;
   1735 	struct sysctllog **log = &adapter->sysctllog;
   1736 	struct tx_ring *txr = adapter->tx_rings;
   1737 	struct rx_ring *rxr = adapter->rx_rings;
   1738 	struct ixgbe_hw *hw = &adapter->hw;
   1739 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1740 	const char *xname = device_xname(dev);
   1741 	int i;
   1742 
   1743 	/* Driver Statistics */
   1744 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   1745 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   1746 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   1747 	    NULL, xname, "m_defrag() failed");
   1748 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   1749 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   1750 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   1751 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   1752 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   1753 	    NULL, xname, "Driver tx dma hard fail other");
   1754 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   1755 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   1756 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   1757 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   1758 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   1759 	    NULL, xname, "Watchdog timeouts");
   1760 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   1761 	    NULL, xname, "TSO errors");
   1762 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   1763 	    NULL, xname, "Link MSI-X IRQ Handled");
   1764 	evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
   1765 	    NULL, xname, "Link softint");
   1766 	evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
   1767 	    NULL, xname, "module softint");
   1768 	evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
   1769 	    NULL, xname, "multimode softint");
   1770 	evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
   1771 	    NULL, xname, "external PHY softint");
   1772 
   1773 	/* Max number of traffic class is 8 */
   1774 	KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
   1775 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
   1776 		snprintf(adapter->tcs[i].evnamebuf,
   1777 		    sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
   1778 		    xname, i);
   1779 		if (i < __arraycount(stats->mpc)) {
   1780 			evcnt_attach_dynamic(&stats->mpc[i],
   1781 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1782 			    "RX Missed Packet Count");
   1783 			if (hw->mac.type == ixgbe_mac_82598EB)
   1784 				evcnt_attach_dynamic(&stats->rnbc[i],
   1785 				    EVCNT_TYPE_MISC, NULL,
   1786 				    adapter->tcs[i].evnamebuf,
   1787 				    "Receive No Buffers");
   1788 		}
   1789 		if (i < __arraycount(stats->pxontxc)) {
   1790 			evcnt_attach_dynamic(&stats->pxontxc[i],
   1791 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1792 			    "pxontxc");
   1793 			evcnt_attach_dynamic(&stats->pxonrxc[i],
   1794 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1795 			    "pxonrxc");
   1796 			evcnt_attach_dynamic(&stats->pxofftxc[i],
   1797 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1798 			    "pxofftxc");
   1799 			evcnt_attach_dynamic(&stats->pxoffrxc[i],
   1800 			    EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
   1801 			    "pxoffrxc");
   1802 			if (hw->mac.type >= ixgbe_mac_82599EB)
   1803 				evcnt_attach_dynamic(&stats->pxon2offc[i],
   1804 				    EVCNT_TYPE_MISC, NULL,
   1805 				    adapter->tcs[i].evnamebuf,
   1806 			    "pxon2offc");
   1807 		}
   1808 	}
   1809 
   1810 	for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1811 #ifdef LRO
   1812 		struct lro_ctrl *lro = &rxr->lro;
   1813 #endif /* LRO */
   1814 
   1815 		snprintf(adapter->queues[i].evnamebuf,
   1816 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   1817 		    xname, i);
   1818 		snprintf(adapter->queues[i].namebuf,
   1819 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   1820 
   1821 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   1822 			aprint_error_dev(dev, "could not create sysctl root\n");
   1823 			break;
   1824 		}
   1825 
   1826 		if (sysctl_createv(log, 0, &rnode, &rnode,
   1827 		    0, CTLTYPE_NODE,
   1828 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   1829 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   1830 			break;
   1831 
   1832 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1833 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   1834 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   1835 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   1836 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   1837 			break;
   1838 
   1839 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1840 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1841 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   1842 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   1843 		    0, CTL_CREATE, CTL_EOL) != 0)
   1844 			break;
   1845 
   1846 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1847 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1848 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   1849 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   1850 		    0, CTL_CREATE, CTL_EOL) != 0)
   1851 			break;
   1852 
   1853 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   1854 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   1855 		evcnt_attach_dynamic(&adapter->queues[i].handleq,
   1856 		    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1857 		    "Handled queue in softint");
   1858 		evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
   1859 		    NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
   1860 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   1861 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   1862 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   1863 		    NULL, adapter->queues[i].evnamebuf,
   1864 		    "Queue No Descriptor Available");
   1865 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   1866 		    NULL, adapter->queues[i].evnamebuf,
   1867 		    "Queue Packets Transmitted");
   1868 #ifndef IXGBE_LEGACY_TX
   1869 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   1870 		    NULL, adapter->queues[i].evnamebuf,
   1871 		    "Packets dropped in pcq");
   1872 #endif
   1873 
   1874 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1875 		    CTLFLAG_READONLY,
   1876 		    CTLTYPE_INT,
   1877 		    "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
   1878 			ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
   1879 		    CTL_CREATE, CTL_EOL) != 0)
   1880 			break;
   1881 
   1882 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1883 		    CTLFLAG_READONLY,
   1884 		    CTLTYPE_INT,
   1885 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   1886 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   1887 		    CTL_CREATE, CTL_EOL) != 0)
   1888 			break;
   1889 
   1890 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1891 		    CTLFLAG_READONLY,
   1892 		    CTLTYPE_INT,
   1893 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   1894 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   1895 		    CTL_CREATE, CTL_EOL) != 0)
   1896 			break;
   1897 
   1898 		if (i < __arraycount(stats->qprc)) {
   1899 			evcnt_attach_dynamic(&stats->qprc[i],
   1900 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1901 			    "qprc");
   1902 			evcnt_attach_dynamic(&stats->qptc[i],
   1903 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1904 			    "qptc");
   1905 			evcnt_attach_dynamic(&stats->qbrc[i],
   1906 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1907 			    "qbrc");
   1908 			evcnt_attach_dynamic(&stats->qbtc[i],
   1909 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1910 			    "qbtc");
   1911 			if (hw->mac.type >= ixgbe_mac_82599EB)
   1912 				evcnt_attach_dynamic(&stats->qprdc[i],
   1913 				    EVCNT_TYPE_MISC, NULL,
   1914 				    adapter->queues[i].evnamebuf, "qprdc");
   1915 		}
   1916 
   1917 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   1918 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   1919 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   1920 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   1921 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   1922 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   1923 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   1924 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   1925 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   1926 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   1927 #ifdef LRO
   1928 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   1929 				CTLFLAG_RD, &lro->lro_queued, 0,
   1930 				"LRO Queued");
   1931 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   1932 				CTLFLAG_RD, &lro->lro_flushed, 0,
   1933 				"LRO Flushed");
   1934 #endif /* LRO */
   1935 	}
   1936 
   1937 	/* MAC stats get their own sub node */
   1938 
   1939 	snprintf(stats->namebuf,
   1940 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   1941 
   1942 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   1943 	    stats->namebuf, "rx csum offload - IP");
   1944 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   1945 	    stats->namebuf, "rx csum offload - L4");
   1946 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   1947 	    stats->namebuf, "rx csum offload - IP bad");
   1948 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   1949 	    stats->namebuf, "rx csum offload - L4 bad");
   1950 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   1951 	    stats->namebuf, "Interrupt conditions zero");
   1952 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   1953 	    stats->namebuf, "Legacy interrupts");
   1954 
   1955 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   1956 	    stats->namebuf, "CRC Errors");
   1957 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   1958 	    stats->namebuf, "Illegal Byte Errors");
   1959 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   1960 	    stats->namebuf, "Byte Errors");
   1961 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   1962 	    stats->namebuf, "MAC Short Packets Discarded");
   1963 	if (hw->mac.type >= ixgbe_mac_X550)
   1964 		evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
   1965 		    stats->namebuf, "Bad SFD");
   1966 	evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
   1967 	    stats->namebuf, "Total Packets Missed");
   1968 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   1969 	    stats->namebuf, "MAC Local Faults");
   1970 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   1971 	    stats->namebuf, "MAC Remote Faults");
   1972 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   1973 	    stats->namebuf, "Receive Length Errors");
   1974 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   1975 	    stats->namebuf, "Link XON Transmitted");
   1976 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   1977 	    stats->namebuf, "Link XON Received");
   1978 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   1979 	    stats->namebuf, "Link XOFF Transmitted");
   1980 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   1981 	    stats->namebuf, "Link XOFF Received");
   1982 
   1983 	/* Packet Reception Stats */
   1984 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   1985 	    stats->namebuf, "Total Octets Received");
   1986 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   1987 	    stats->namebuf, "Good Octets Received");
   1988 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   1989 	    stats->namebuf, "Total Packets Received");
   1990 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   1991 	    stats->namebuf, "Good Packets Received");
   1992 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   1993 	    stats->namebuf, "Multicast Packets Received");
   1994 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   1995 	    stats->namebuf, "Broadcast Packets Received");
   1996 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   1997 	    stats->namebuf, "64 byte frames received ");
   1998 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   1999 	    stats->namebuf, "65-127 byte frames received");
   2000 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   2001 	    stats->namebuf, "128-255 byte frames received");
   2002 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   2003 	    stats->namebuf, "256-511 byte frames received");
   2004 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   2005 	    stats->namebuf, "512-1023 byte frames received");
   2006 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   2007 	    stats->namebuf, "1023-1522 byte frames received");
   2008 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   2009 	    stats->namebuf, "Receive Undersized");
   2010 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   2011 	    stats->namebuf, "Fragmented Packets Received ");
   2012 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   2013 	    stats->namebuf, "Oversized Packets Received");
   2014 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   2015 	    stats->namebuf, "Received Jabber");
   2016 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   2017 	    stats->namebuf, "Management Packets Received");
   2018 	evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
   2019 	    stats->namebuf, "Management Packets Dropped");
   2020 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   2021 	    stats->namebuf, "Checksum Errors");
   2022 
   2023 	/* Packet Transmission Stats */
   2024 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   2025 	    stats->namebuf, "Good Octets Transmitted");
   2026 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   2027 	    stats->namebuf, "Total Packets Transmitted");
   2028 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   2029 	    stats->namebuf, "Good Packets Transmitted");
   2030 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   2031 	    stats->namebuf, "Broadcast Packets Transmitted");
   2032 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   2033 	    stats->namebuf, "Multicast Packets Transmitted");
   2034 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   2035 	    stats->namebuf, "Management Packets Transmitted");
   2036 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   2037 	    stats->namebuf, "64 byte frames transmitted ");
   2038 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   2039 	    stats->namebuf, "65-127 byte frames transmitted");
   2040 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   2041 	    stats->namebuf, "128-255 byte frames transmitted");
   2042 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   2043 	    stats->namebuf, "256-511 byte frames transmitted");
   2044 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   2045 	    stats->namebuf, "512-1023 byte frames transmitted");
   2046 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   2047 	    stats->namebuf, "1024-1522 byte frames transmitted");
   2048 } /* ixgbe_add_hw_stats */
   2049 
   2050 static void
   2051 ixgbe_clear_evcnt(struct adapter *adapter)
   2052 {
   2053 	struct tx_ring *txr = adapter->tx_rings;
   2054 	struct rx_ring *rxr = adapter->rx_rings;
   2055 	struct ixgbe_hw *hw = &adapter->hw;
   2056 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   2057 	int i;
   2058 
   2059 	adapter->efbig_tx_dma_setup.ev_count = 0;
   2060 	adapter->mbuf_defrag_failed.ev_count = 0;
   2061 	adapter->efbig2_tx_dma_setup.ev_count = 0;
   2062 	adapter->einval_tx_dma_setup.ev_count = 0;
   2063 	adapter->other_tx_dma_setup.ev_count = 0;
   2064 	adapter->eagain_tx_dma_setup.ev_count = 0;
   2065 	adapter->enomem_tx_dma_setup.ev_count = 0;
   2066 	adapter->tso_err.ev_count = 0;
   2067 	adapter->watchdog_events.ev_count = 0;
   2068 	adapter->link_irq.ev_count = 0;
   2069 	adapter->link_sicount.ev_count = 0;
   2070 	adapter->mod_sicount.ev_count = 0;
   2071 	adapter->msf_sicount.ev_count = 0;
   2072 	adapter->phy_sicount.ev_count = 0;
   2073 
   2074 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
   2075 		if (i < __arraycount(stats->mpc)) {
   2076 			stats->mpc[i].ev_count = 0;
   2077 			if (hw->mac.type == ixgbe_mac_82598EB)
   2078 				stats->rnbc[i].ev_count = 0;
   2079 		}
   2080 		if (i < __arraycount(stats->pxontxc)) {
   2081 			stats->pxontxc[i].ev_count = 0;
   2082 			stats->pxonrxc[i].ev_count = 0;
   2083 			stats->pxofftxc[i].ev_count = 0;
   2084 			stats->pxoffrxc[i].ev_count = 0;
   2085 			if (hw->mac.type >= ixgbe_mac_82599EB)
   2086 				stats->pxon2offc[i].ev_count = 0;
   2087 		}
   2088 	}
   2089 
   2090 	txr = adapter->tx_rings;
   2091 	for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2092 		adapter->queues[i].irqs.ev_count = 0;
   2093 		adapter->queues[i].handleq.ev_count = 0;
   2094 		adapter->queues[i].req.ev_count = 0;
   2095 		txr->no_desc_avail.ev_count = 0;
   2096 		txr->total_packets.ev_count = 0;
   2097 		txr->tso_tx.ev_count = 0;
   2098 #ifndef IXGBE_LEGACY_TX
   2099 		txr->pcq_drops.ev_count = 0;
   2100 #endif
   2101 		txr->q_efbig_tx_dma_setup = 0;
   2102 		txr->q_mbuf_defrag_failed = 0;
   2103 		txr->q_efbig2_tx_dma_setup = 0;
   2104 		txr->q_einval_tx_dma_setup = 0;
   2105 		txr->q_other_tx_dma_setup = 0;
   2106 		txr->q_eagain_tx_dma_setup = 0;
   2107 		txr->q_enomem_tx_dma_setup = 0;
   2108 		txr->q_tso_err = 0;
   2109 
   2110 		if (i < __arraycount(stats->qprc)) {
   2111 			stats->qprc[i].ev_count = 0;
   2112 			stats->qptc[i].ev_count = 0;
   2113 			stats->qbrc[i].ev_count = 0;
   2114 			stats->qbtc[i].ev_count = 0;
   2115 			if (hw->mac.type >= ixgbe_mac_82599EB)
   2116 				stats->qprdc[i].ev_count = 0;
   2117 		}
   2118 
   2119 		rxr->rx_packets.ev_count = 0;
   2120 		rxr->rx_bytes.ev_count = 0;
   2121 		rxr->rx_copies.ev_count = 0;
   2122 		rxr->no_jmbuf.ev_count = 0;
   2123 		rxr->rx_discarded.ev_count = 0;
   2124 	}
   2125 	stats->ipcs.ev_count = 0;
   2126 	stats->l4cs.ev_count = 0;
   2127 	stats->ipcs_bad.ev_count = 0;
   2128 	stats->l4cs_bad.ev_count = 0;
   2129 	stats->intzero.ev_count = 0;
   2130 	stats->legint.ev_count = 0;
   2131 	stats->crcerrs.ev_count = 0;
   2132 	stats->illerrc.ev_count = 0;
   2133 	stats->errbc.ev_count = 0;
   2134 	stats->mspdc.ev_count = 0;
   2135 	stats->mbsdc.ev_count = 0;
   2136 	stats->mpctotal.ev_count = 0;
   2137 	stats->mlfc.ev_count = 0;
   2138 	stats->mrfc.ev_count = 0;
   2139 	stats->rlec.ev_count = 0;
   2140 	stats->lxontxc.ev_count = 0;
   2141 	stats->lxonrxc.ev_count = 0;
   2142 	stats->lxofftxc.ev_count = 0;
   2143 	stats->lxoffrxc.ev_count = 0;
   2144 
   2145 	/* Packet Reception Stats */
   2146 	stats->tor.ev_count = 0;
   2147 	stats->gorc.ev_count = 0;
   2148 	stats->tpr.ev_count = 0;
   2149 	stats->gprc.ev_count = 0;
   2150 	stats->mprc.ev_count = 0;
   2151 	stats->bprc.ev_count = 0;
   2152 	stats->prc64.ev_count = 0;
   2153 	stats->prc127.ev_count = 0;
   2154 	stats->prc255.ev_count = 0;
   2155 	stats->prc511.ev_count = 0;
   2156 	stats->prc1023.ev_count = 0;
   2157 	stats->prc1522.ev_count = 0;
   2158 	stats->ruc.ev_count = 0;
   2159 	stats->rfc.ev_count = 0;
   2160 	stats->roc.ev_count = 0;
   2161 	stats->rjc.ev_count = 0;
   2162 	stats->mngprc.ev_count = 0;
   2163 	stats->mngpdc.ev_count = 0;
   2164 	stats->xec.ev_count = 0;
   2165 
   2166 	/* Packet Transmission Stats */
   2167 	stats->gotc.ev_count = 0;
   2168 	stats->tpt.ev_count = 0;
   2169 	stats->gptc.ev_count = 0;
   2170 	stats->bptc.ev_count = 0;
   2171 	stats->mptc.ev_count = 0;
   2172 	stats->mngptc.ev_count = 0;
   2173 	stats->ptc64.ev_count = 0;
   2174 	stats->ptc127.ev_count = 0;
   2175 	stats->ptc255.ev_count = 0;
   2176 	stats->ptc511.ev_count = 0;
   2177 	stats->ptc1023.ev_count = 0;
   2178 	stats->ptc1522.ev_count = 0;
   2179 }
   2180 
   2181 /************************************************************************
   2182  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
   2183  *
   2184  *   Retrieves the TDH value from the hardware
   2185  ************************************************************************/
   2186 static int
   2187 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   2188 {
   2189 	struct sysctlnode node = *rnode;
   2190 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2191 	struct adapter *adapter;
   2192 	uint32_t val;
   2193 
   2194 	if (!txr)
   2195 		return (0);
   2196 
   2197 	adapter = txr->adapter;
   2198 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2199 		return (EPERM);
   2200 
   2201 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
   2202 	node.sysctl_data = &val;
   2203 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2204 } /* ixgbe_sysctl_tdh_handler */
   2205 
   2206 /************************************************************************
   2207  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   2208  *
   2209  *   Retrieves the TDT value from the hardware
   2210  ************************************************************************/
   2211 static int
   2212 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   2213 {
   2214 	struct sysctlnode node = *rnode;
   2215 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2216 	struct adapter *adapter;
   2217 	uint32_t val;
   2218 
   2219 	if (!txr)
   2220 		return (0);
   2221 
   2222 	adapter = txr->adapter;
   2223 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2224 		return (EPERM);
   2225 
   2226 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
   2227 	node.sysctl_data = &val;
   2228 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2229 } /* ixgbe_sysctl_tdt_handler */
   2230 
   2231 /************************************************************************
   2232  * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
   2233  * handler function
   2234  *
   2235  *   Retrieves the next_to_check value
   2236  ************************************************************************/
   2237 static int
   2238 ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
   2239 {
   2240 	struct sysctlnode node = *rnode;
   2241 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2242 	struct adapter *adapter;
   2243 	uint32_t val;
   2244 
   2245 	if (!rxr)
   2246 		return (0);
   2247 
   2248 	adapter = rxr->adapter;
   2249 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2250 		return (EPERM);
   2251 
   2252 	val = rxr->next_to_check;
   2253 	node.sysctl_data = &val;
   2254 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2255 } /* ixgbe_sysctl_next_to_check_handler */
   2256 
   2257 /************************************************************************
   2258  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
   2259  *
   2260  *   Retrieves the RDH value from the hardware
   2261  ************************************************************************/
   2262 static int
   2263 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2264 {
   2265 	struct sysctlnode node = *rnode;
   2266 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2267 	struct adapter *adapter;
   2268 	uint32_t val;
   2269 
   2270 	if (!rxr)
   2271 		return (0);
   2272 
   2273 	adapter = rxr->adapter;
   2274 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2275 		return (EPERM);
   2276 
   2277 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
   2278 	node.sysctl_data = &val;
   2279 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2280 } /* ixgbe_sysctl_rdh_handler */
   2281 
   2282 /************************************************************************
   2283  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2284  *
   2285  *   Retrieves the RDT value from the hardware
   2286  ************************************************************************/
   2287 static int
   2288 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2289 {
   2290 	struct sysctlnode node = *rnode;
   2291 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2292 	struct adapter *adapter;
   2293 	uint32_t val;
   2294 
   2295 	if (!rxr)
   2296 		return (0);
   2297 
   2298 	adapter = rxr->adapter;
   2299 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   2300 		return (EPERM);
   2301 
   2302 	val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
   2303 	node.sysctl_data = &val;
   2304 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2305 } /* ixgbe_sysctl_rdt_handler */
   2306 
   2307 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   2308 /************************************************************************
   2309  * ixgbe_register_vlan
   2310  *
   2311  *   Run via vlan config EVENT, it enables us to use the
   2312  *   HW Filter table since we can get the vlan id. This
   2313  *   just creates the entry in the soft version of the
   2314  *   VFTA, init will repopulate the real table.
   2315  ************************************************************************/
   2316 static void
   2317 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2318 {
   2319 	struct adapter	*adapter = ifp->if_softc;
   2320 	u16		index, bit;
   2321 
   2322 	if (ifp->if_softc != arg)   /* Not our event */
   2323 		return;
   2324 
   2325 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2326 		return;
   2327 
   2328 	IXGBE_CORE_LOCK(adapter);
   2329 	index = (vtag >> 5) & 0x7F;
   2330 	bit = vtag & 0x1F;
   2331 	adapter->shadow_vfta[index] |= (1 << bit);
   2332 	ixgbe_setup_vlan_hw_support(adapter);
   2333 	IXGBE_CORE_UNLOCK(adapter);
   2334 } /* ixgbe_register_vlan */
   2335 
   2336 /************************************************************************
   2337  * ixgbe_unregister_vlan
   2338  *
   2339  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
   2340  ************************************************************************/
   2341 static void
   2342 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2343 {
   2344 	struct adapter	*adapter = ifp->if_softc;
   2345 	u16		index, bit;
   2346 
   2347 	if (ifp->if_softc != arg)
   2348 		return;
   2349 
   2350 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2351 		return;
   2352 
   2353 	IXGBE_CORE_LOCK(adapter);
   2354 	index = (vtag >> 5) & 0x7F;
   2355 	bit = vtag & 0x1F;
   2356 	adapter->shadow_vfta[index] &= ~(1 << bit);
   2357 	/* Re-init to load the changes */
   2358 	ixgbe_setup_vlan_hw_support(adapter);
   2359 	IXGBE_CORE_UNLOCK(adapter);
   2360 } /* ixgbe_unregister_vlan */
   2361 #endif
   2362 
   2363 static void
   2364 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   2365 {
   2366 	struct ethercom *ec = &adapter->osdep.ec;
   2367 	struct ixgbe_hw *hw = &adapter->hw;
   2368 	struct rx_ring	*rxr;
   2369 	int             i;
   2370 	u32		ctrl;
   2371 
   2372 
   2373 	/*
   2374 	 * We get here thru init_locked, meaning
   2375 	 * a soft reset, this has already cleared
   2376 	 * the VFTA and other state, so if there
   2377 	 * have been no vlan's registered do nothing.
   2378 	 */
   2379 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   2380 		return;
   2381 
   2382 	/* Setup the queues for vlans */
   2383 	if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
   2384 		for (i = 0; i < adapter->num_queues; i++) {
   2385 			rxr = &adapter->rx_rings[i];
   2386 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
   2387 			if (hw->mac.type != ixgbe_mac_82598EB) {
   2388 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   2389 				ctrl |= IXGBE_RXDCTL_VME;
   2390 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
   2391 			}
   2392 			rxr->vtag_strip = TRUE;
   2393 		}
   2394 	}
   2395 
   2396 	if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
   2397 		return;
   2398 	/*
   2399 	 * A soft reset zero's out the VFTA, so
   2400 	 * we need to repopulate it now.
   2401 	 */
   2402 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2403 		if (adapter->shadow_vfta[i] != 0)
   2404 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
   2405 			    adapter->shadow_vfta[i]);
   2406 
   2407 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2408 	/* Enable the Filter Table if enabled */
   2409 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
   2410 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
   2411 		ctrl |= IXGBE_VLNCTRL_VFE;
   2412 	}
   2413 	if (hw->mac.type == ixgbe_mac_82598EB)
   2414 		ctrl |= IXGBE_VLNCTRL_VME;
   2415 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2416 } /* ixgbe_setup_vlan_hw_support */
   2417 
   2418 /************************************************************************
   2419  * ixgbe_get_slot_info
   2420  *
   2421  *   Get the width and transaction speed of
   2422  *   the slot this adapter is plugged into.
   2423  ************************************************************************/
   2424 static void
   2425 ixgbe_get_slot_info(struct adapter *adapter)
   2426 {
   2427 	device_t		dev = adapter->dev;
   2428 	struct ixgbe_hw		*hw = &adapter->hw;
   2429 	u32                   offset;
   2430 	u16			link;
   2431 	int                   bus_info_valid = TRUE;
   2432 
   2433 	/* Some devices are behind an internal bridge */
   2434 	switch (hw->device_id) {
   2435 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
   2436 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
   2437 		goto get_parent_info;
   2438 	default:
   2439 		break;
   2440 	}
   2441 
   2442 	ixgbe_get_bus_info(hw);
   2443 
   2444 	/*
   2445 	 * Some devices don't use PCI-E, but there is no need
   2446 	 * to display "Unknown" for bus speed and width.
   2447 	 */
   2448 	switch (hw->mac.type) {
   2449 	case ixgbe_mac_X550EM_x:
   2450 	case ixgbe_mac_X550EM_a:
   2451 		return;
   2452 	default:
   2453 		goto display;
   2454 	}
   2455 
   2456 get_parent_info:
   2457 	/*
   2458 	 * For the Quad port adapter we need to parse back
   2459 	 * up the PCI tree to find the speed of the expansion
   2460 	 * slot into which this adapter is plugged. A bit more work.
   2461 	 */
   2462 	dev = device_parent(device_parent(dev));
   2463 #if 0
   2464 #ifdef IXGBE_DEBUG
   2465 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
   2466 	    pci_get_slot(dev), pci_get_function(dev));
   2467 #endif
   2468 	dev = device_parent(device_parent(dev));
   2469 #ifdef IXGBE_DEBUG
   2470 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
   2471 	    pci_get_slot(dev), pci_get_function(dev));
   2472 #endif
   2473 #endif
   2474 	/* Now get the PCI Express Capabilities offset */
   2475 	if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
   2476 	    PCI_CAP_PCIEXPRESS, &offset, NULL)) {
   2477 		/*
   2478 		 * Hmm...can't get PCI-Express capabilities.
   2479 		 * Falling back to default method.
   2480 		 */
   2481 		bus_info_valid = FALSE;
   2482 		ixgbe_get_bus_info(hw);
   2483 		goto display;
   2484 	}
   2485 	/* ...and read the Link Status Register */
   2486 	link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
   2487 	    offset + PCIE_LCSR) >> 16;
   2488 	ixgbe_set_pci_config_data_generic(hw, link);
   2489 
   2490 display:
   2491 	device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
   2492 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s" :
   2493 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s" :
   2494 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s" :
   2495 	     "Unknown"),
   2496 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
   2497 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
   2498 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
   2499 	     "Unknown"));
   2500 
   2501 	if (bus_info_valid) {
   2502 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2503 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
   2504 			(hw->bus.speed == ixgbe_bus_speed_2500))) {
   2505 			device_printf(dev, "PCI-Express bandwidth available"
   2506 			    " for this card\n     is not sufficient for"
   2507 			    " optimal performance.\n");
   2508 			device_printf(dev, "For optimal performance a x8 "
   2509 			    "PCIE, or x4 PCIE Gen2 slot is required.\n");
   2510 		}
   2511 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2512 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
   2513 			(hw->bus.speed < ixgbe_bus_speed_8000))) {
   2514 			device_printf(dev, "PCI-Express bandwidth available"
   2515 			    " for this card\n     is not sufficient for"
   2516 			    " optimal performance.\n");
   2517 			device_printf(dev, "For optimal performance a x8 "
   2518 			    "PCIE Gen3 slot is required.\n");
   2519 		}
   2520 	} else
   2521 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
   2522 
   2523 	return;
   2524 } /* ixgbe_get_slot_info */
   2525 
   2526 /************************************************************************
   2527  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
   2528  ************************************************************************/
   2529 static inline void
   2530 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   2531 {
   2532 	struct ixgbe_hw *hw = &adapter->hw;
   2533 	struct ix_queue *que = &adapter->queues[vector];
   2534 	u64             queue = (u64)(1ULL << vector);
   2535 	u32             mask;
   2536 
   2537 	mutex_enter(&que->dc_mtx);
   2538 	if (que->disabled_count > 0 && --que->disabled_count > 0)
   2539 		goto out;
   2540 
   2541 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2542 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2543 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   2544 	} else {
   2545 		mask = (queue & 0xFFFFFFFF);
   2546 		if (mask)
   2547 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   2548 		mask = (queue >> 32);
   2549 		if (mask)
   2550 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   2551 	}
   2552 out:
   2553 	mutex_exit(&que->dc_mtx);
   2554 } /* ixgbe_enable_queue */
   2555 
   2556 /************************************************************************
   2557  * ixgbe_disable_queue_internal
   2558  ************************************************************************/
   2559 static inline void
   2560 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
   2561 {
   2562 	struct ixgbe_hw *hw = &adapter->hw;
   2563 	struct ix_queue *que = &adapter->queues[vector];
   2564 	u64             queue = (u64)(1ULL << vector);
   2565 	u32             mask;
   2566 
   2567 	mutex_enter(&que->dc_mtx);
   2568 
   2569 	if (que->disabled_count > 0) {
   2570 		if (nestok)
   2571 			que->disabled_count++;
   2572 		goto out;
   2573 	}
   2574 	que->disabled_count++;
   2575 
   2576 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2577 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2578 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   2579 	} else {
   2580 		mask = (queue & 0xFFFFFFFF);
   2581 		if (mask)
   2582 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   2583 		mask = (queue >> 32);
   2584 		if (mask)
   2585 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   2586 	}
   2587 out:
   2588 	mutex_exit(&que->dc_mtx);
   2589 } /* ixgbe_disable_queue_internal */
   2590 
   2591 /************************************************************************
   2592  * ixgbe_disable_queue
   2593  ************************************************************************/
   2594 static inline void
   2595 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   2596 {
   2597 
   2598 	ixgbe_disable_queue_internal(adapter, vector, true);
   2599 } /* ixgbe_disable_queue */
   2600 
   2601 /************************************************************************
   2602  * ixgbe_sched_handle_que - schedule deferred packet processing
   2603  ************************************************************************/
   2604 static inline void
   2605 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
   2606 {
   2607 
   2608 	if(que->txrx_use_workqueue) {
   2609 		/*
   2610 		 * adapter->que_wq is bound to each CPU instead of
   2611 		 * each NIC queue to reduce workqueue kthread. As we
   2612 		 * should consider about interrupt affinity in this
   2613 		 * function, the workqueue kthread must be WQ_PERCPU.
   2614 		 * If create WQ_PERCPU workqueue kthread for each NIC
   2615 		 * queue, that number of created workqueue kthread is
   2616 		 * (number of used NIC queue) * (number of CPUs) =
   2617 		 * (number of CPUs) ^ 2 most often.
   2618 		 *
   2619 		 * The same NIC queue's interrupts are avoided by
   2620 		 * masking the queue's interrupt. And different
   2621 		 * NIC queue's interrupts use different struct work
   2622 		 * (que->wq_cookie). So, "enqueued flag" to avoid
   2623 		 * twice workqueue_enqueue() is not required .
   2624 		 */
   2625 		workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
   2626 	} else {
   2627 		softint_schedule(que->que_si);
   2628 	}
   2629 }
   2630 
   2631 /************************************************************************
   2632  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
   2633  ************************************************************************/
   2634 static int
   2635 ixgbe_msix_que(void *arg)
   2636 {
   2637 	struct ix_queue	*que = arg;
   2638 	struct adapter  *adapter = que->adapter;
   2639 	struct ifnet    *ifp = adapter->ifp;
   2640 	struct tx_ring	*txr = que->txr;
   2641 	struct rx_ring	*rxr = que->rxr;
   2642 	bool		more;
   2643 	u32		newitr = 0;
   2644 
   2645 	/* Protect against spurious interrupts */
   2646 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   2647 		return 0;
   2648 
   2649 	ixgbe_disable_queue(adapter, que->msix);
   2650 	++que->irqs.ev_count;
   2651 
   2652 	/*
   2653 	 * Don't change "que->txrx_use_workqueue" from this point to avoid
   2654 	 * flip-flopping softint/workqueue mode in one deferred processing.
   2655 	 */
   2656 	que->txrx_use_workqueue = adapter->txrx_use_workqueue;
   2657 
   2658 #ifdef __NetBSD__
   2659 	/* Don't run ixgbe_rxeof in interrupt context */
   2660 	more = true;
   2661 #else
   2662 	more = ixgbe_rxeof(que);
   2663 #endif
   2664 
   2665 	IXGBE_TX_LOCK(txr);
   2666 	ixgbe_txeof(txr);
   2667 	IXGBE_TX_UNLOCK(txr);
   2668 
   2669 	/* Do AIM now? */
   2670 
   2671 	if (adapter->enable_aim == false)
   2672 		goto no_calc;
   2673 	/*
   2674 	 * Do Adaptive Interrupt Moderation:
   2675 	 *  - Write out last calculated setting
   2676 	 *  - Calculate based on average size over
   2677 	 *    the last interval.
   2678 	 */
   2679 	if (que->eitr_setting)
   2680 		ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
   2681 
   2682 	que->eitr_setting = 0;
   2683 
   2684 	/* Idle, do nothing */
   2685         if ((txr->bytes == 0) && (rxr->bytes == 0))
   2686                 goto no_calc;
   2687 
   2688 	if ((txr->bytes) && (txr->packets))
   2689 		newitr = txr->bytes/txr->packets;
   2690 	if ((rxr->bytes) && (rxr->packets))
   2691 		newitr = uimax(newitr, (rxr->bytes / rxr->packets));
   2692 	newitr += 24; /* account for hardware frame, crc */
   2693 
   2694 	/* set an upper boundary */
   2695 	newitr = uimin(newitr, 3000);
   2696 
   2697 	/* Be nice to the mid range */
   2698 	if ((newitr > 300) && (newitr < 1200))
   2699 		newitr = (newitr / 3);
   2700 	else
   2701 		newitr = (newitr / 2);
   2702 
   2703 	/*
   2704 	 * When RSC is used, ITR interval must be larger than RSC_DELAY.
   2705 	 * Currently, we use 2us for RSC_DELAY. The minimum value is always
   2706 	 * greater than 2us on 100M (and 10M?(not documented)), but it's not
   2707 	 * on 1G and higher.
   2708 	 */
   2709 	if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   2710 	    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   2711 		if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
   2712 			newitr = IXGBE_MIN_RSC_EITR_10G1G;
   2713 	}
   2714 
   2715         /* save for next interrupt */
   2716         que->eitr_setting = newitr;
   2717 
   2718 	/* Reset state */
   2719 	txr->bytes = 0;
   2720 	txr->packets = 0;
   2721 	rxr->bytes = 0;
   2722 	rxr->packets = 0;
   2723 
   2724 no_calc:
   2725 	if (more)
   2726 		ixgbe_sched_handle_que(adapter, que);
   2727 	else
   2728 		ixgbe_enable_queue(adapter, que->msix);
   2729 
   2730 	return 1;
   2731 } /* ixgbe_msix_que */
   2732 
   2733 /************************************************************************
   2734  * ixgbe_media_status - Media Ioctl callback
   2735  *
   2736  *   Called whenever the user queries the status of
   2737  *   the interface using ifconfig.
   2738  ************************************************************************/
   2739 static void
   2740 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   2741 {
   2742 	struct adapter *adapter = ifp->if_softc;
   2743 	struct ixgbe_hw *hw = &adapter->hw;
   2744 	int layer;
   2745 
   2746 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   2747 	IXGBE_CORE_LOCK(adapter);
   2748 	ixgbe_update_link_status(adapter);
   2749 
   2750 	ifmr->ifm_status = IFM_AVALID;
   2751 	ifmr->ifm_active = IFM_ETHER;
   2752 
   2753 	if (!adapter->link_active) {
   2754 		ifmr->ifm_active |= IFM_NONE;
   2755 		IXGBE_CORE_UNLOCK(adapter);
   2756 		return;
   2757 	}
   2758 
   2759 	ifmr->ifm_status |= IFM_ACTIVE;
   2760 	layer = adapter->phy_layer;
   2761 
   2762 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
   2763 	    layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
   2764 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
   2765 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
   2766 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
   2767 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
   2768 		switch (adapter->link_speed) {
   2769 		case IXGBE_LINK_SPEED_10GB_FULL:
   2770 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   2771 			break;
   2772 		case IXGBE_LINK_SPEED_5GB_FULL:
   2773 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   2774 			break;
   2775 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2776 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   2777 			break;
   2778 		case IXGBE_LINK_SPEED_1GB_FULL:
   2779 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   2780 			break;
   2781 		case IXGBE_LINK_SPEED_100_FULL:
   2782 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   2783 			break;
   2784 		case IXGBE_LINK_SPEED_10_FULL:
   2785 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   2786 			break;
   2787 		}
   2788 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   2789 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
   2790 		switch (adapter->link_speed) {
   2791 		case IXGBE_LINK_SPEED_10GB_FULL:
   2792 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
   2793 			break;
   2794 		}
   2795 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
   2796 		switch (adapter->link_speed) {
   2797 		case IXGBE_LINK_SPEED_10GB_FULL:
   2798 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
   2799 			break;
   2800 		case IXGBE_LINK_SPEED_1GB_FULL:
   2801 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2802 			break;
   2803 		}
   2804 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
   2805 		switch (adapter->link_speed) {
   2806 		case IXGBE_LINK_SPEED_10GB_FULL:
   2807 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
   2808 			break;
   2809 		case IXGBE_LINK_SPEED_1GB_FULL:
   2810 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2811 			break;
   2812 		}
   2813 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
   2814 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
   2815 		switch (adapter->link_speed) {
   2816 		case IXGBE_LINK_SPEED_10GB_FULL:
   2817 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2818 			break;
   2819 		case IXGBE_LINK_SPEED_1GB_FULL:
   2820 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
   2821 			break;
   2822 		}
   2823 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
   2824 		switch (adapter->link_speed) {
   2825 		case IXGBE_LINK_SPEED_10GB_FULL:
   2826 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2827 			break;
   2828 		}
   2829 	/*
   2830 	 * XXX: These need to use the proper media types once
   2831 	 * they're added.
   2832 	 */
   2833 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
   2834 		switch (adapter->link_speed) {
   2835 		case IXGBE_LINK_SPEED_10GB_FULL:
   2836 #ifndef IFM_ETH_XTYPE
   2837 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2838 #else
   2839 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
   2840 #endif
   2841 			break;
   2842 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2843 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2844 			break;
   2845 		case IXGBE_LINK_SPEED_1GB_FULL:
   2846 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2847 			break;
   2848 		}
   2849 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
   2850 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
   2851 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
   2852 		switch (adapter->link_speed) {
   2853 		case IXGBE_LINK_SPEED_10GB_FULL:
   2854 #ifndef IFM_ETH_XTYPE
   2855 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2856 #else
   2857 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
   2858 #endif
   2859 			break;
   2860 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2861 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2862 			break;
   2863 		case IXGBE_LINK_SPEED_1GB_FULL:
   2864 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2865 			break;
   2866 		}
   2867 
   2868 	/* If nothing is recognized... */
   2869 #if 0
   2870 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
   2871 		ifmr->ifm_active |= IFM_UNKNOWN;
   2872 #endif
   2873 
   2874 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   2875 
   2876 	/* Display current flow control setting used on link */
   2877 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
   2878 	    hw->fc.current_mode == ixgbe_fc_full)
   2879 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
   2880 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
   2881 	    hw->fc.current_mode == ixgbe_fc_full)
   2882 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
   2883 
   2884 	IXGBE_CORE_UNLOCK(adapter);
   2885 
   2886 	return;
   2887 } /* ixgbe_media_status */
   2888 
   2889 /************************************************************************
   2890  * ixgbe_media_change - Media Ioctl callback
   2891  *
   2892  *   Called when the user changes speed/duplex using
   2893  *   media/mediopt option with ifconfig.
   2894  ************************************************************************/
   2895 static int
   2896 ixgbe_media_change(struct ifnet *ifp)
   2897 {
   2898 	struct adapter   *adapter = ifp->if_softc;
   2899 	struct ifmedia   *ifm = &adapter->media;
   2900 	struct ixgbe_hw  *hw = &adapter->hw;
   2901 	ixgbe_link_speed speed = 0;
   2902 	ixgbe_link_speed link_caps = 0;
   2903 	bool negotiate = false;
   2904 	s32 err = IXGBE_NOT_IMPLEMENTED;
   2905 
   2906 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   2907 
   2908 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   2909 		return (EINVAL);
   2910 
   2911 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   2912 		return (EPERM);
   2913 
   2914 	IXGBE_CORE_LOCK(adapter);
   2915 	/*
   2916 	 * We don't actually need to check against the supported
   2917 	 * media types of the adapter; ifmedia will take care of
   2918 	 * that for us.
   2919 	 */
   2920 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   2921 	case IFM_AUTO:
   2922 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   2923 		    &negotiate);
   2924 		if (err != IXGBE_SUCCESS) {
   2925 			device_printf(adapter->dev, "Unable to determine "
   2926 			    "supported advertise speeds\n");
   2927 			IXGBE_CORE_UNLOCK(adapter);
   2928 			return (ENODEV);
   2929 		}
   2930 		speed |= link_caps;
   2931 		break;
   2932 	case IFM_10G_T:
   2933 	case IFM_10G_LRM:
   2934 	case IFM_10G_LR:
   2935 	case IFM_10G_TWINAX:
   2936 #ifndef IFM_ETH_XTYPE
   2937 	case IFM_10G_SR: /* KR, too */
   2938 	case IFM_10G_CX4: /* KX4 */
   2939 #else
   2940 	case IFM_10G_KR:
   2941 	case IFM_10G_KX4:
   2942 #endif
   2943 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   2944 		break;
   2945 	case IFM_5000_T:
   2946 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   2947 		break;
   2948 	case IFM_2500_T:
   2949 	case IFM_2500_KX:
   2950 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   2951 		break;
   2952 	case IFM_1000_T:
   2953 	case IFM_1000_LX:
   2954 	case IFM_1000_SX:
   2955 	case IFM_1000_KX:
   2956 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   2957 		break;
   2958 	case IFM_100_TX:
   2959 		speed |= IXGBE_LINK_SPEED_100_FULL;
   2960 		break;
   2961 	case IFM_10_T:
   2962 		speed |= IXGBE_LINK_SPEED_10_FULL;
   2963 		break;
   2964 	case IFM_NONE:
   2965 		break;
   2966 	default:
   2967 		goto invalid;
   2968 	}
   2969 
   2970 	hw->mac.autotry_restart = TRUE;
   2971 	hw->mac.ops.setup_link(hw, speed, TRUE);
   2972 	adapter->advertise = 0;
   2973 	if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
   2974 		if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
   2975 			adapter->advertise |= 1 << 2;
   2976 		if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
   2977 			adapter->advertise |= 1 << 1;
   2978 		if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
   2979 			adapter->advertise |= 1 << 0;
   2980 		if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
   2981 			adapter->advertise |= 1 << 3;
   2982 		if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
   2983 			adapter->advertise |= 1 << 4;
   2984 		if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
   2985 			adapter->advertise |= 1 << 5;
   2986 	}
   2987 
   2988 	IXGBE_CORE_UNLOCK(adapter);
   2989 	return (0);
   2990 
   2991 invalid:
   2992 	device_printf(adapter->dev, "Invalid media type!\n");
   2993 	IXGBE_CORE_UNLOCK(adapter);
   2994 
   2995 	return (EINVAL);
   2996 } /* ixgbe_media_change */
   2997 
   2998 /************************************************************************
   2999  * ixgbe_set_promisc
   3000  ************************************************************************/
   3001 static void
   3002 ixgbe_set_promisc(struct adapter *adapter)
   3003 {
   3004 	struct ifnet *ifp = adapter->ifp;
   3005 	int          mcnt = 0;
   3006 	u32          rctl;
   3007 	struct ether_multi *enm;
   3008 	struct ether_multistep step;
   3009 	struct ethercom *ec = &adapter->osdep.ec;
   3010 
   3011 	KASSERT(mutex_owned(&adapter->core_mtx));
   3012 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   3013 	rctl &= (~IXGBE_FCTRL_UPE);
   3014 	if (ifp->if_flags & IFF_ALLMULTI)
   3015 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
   3016 	else {
   3017 		ETHER_LOCK(ec);
   3018 		ETHER_FIRST_MULTI(step, ec, enm);
   3019 		while (enm != NULL) {
   3020 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
   3021 				break;
   3022 			mcnt++;
   3023 			ETHER_NEXT_MULTI(step, enm);
   3024 		}
   3025 		ETHER_UNLOCK(ec);
   3026 	}
   3027 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
   3028 		rctl &= (~IXGBE_FCTRL_MPE);
   3029 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   3030 
   3031 	if (ifp->if_flags & IFF_PROMISC) {
   3032 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   3033 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   3034 	} else if (ifp->if_flags & IFF_ALLMULTI) {
   3035 		rctl |= IXGBE_FCTRL_MPE;
   3036 		rctl &= ~IXGBE_FCTRL_UPE;
   3037 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   3038 	}
   3039 } /* ixgbe_set_promisc */
   3040 
   3041 /************************************************************************
   3042  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
   3043  ************************************************************************/
   3044 static int
   3045 ixgbe_msix_link(void *arg)
   3046 {
   3047 	struct adapter	*adapter = arg;
   3048 	struct ixgbe_hw *hw = &adapter->hw;
   3049 	u32		eicr, eicr_mask;
   3050 	s32             retval;
   3051 
   3052 	++adapter->link_irq.ev_count;
   3053 
   3054 	/* Pause other interrupts */
   3055 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
   3056 
   3057 	/* First get the cause */
   3058 	/*
   3059 	 * The specifications of 82598, 82599, X540 and X550 say EICS register
   3060 	 * is write only. However, Linux says it is a workaround for silicon
   3061 	 * errata to read EICS instead of EICR to get interrupt cause. It seems
   3062 	 * there is a problem about read clear mechanism for EICR register.
   3063 	 */
   3064 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   3065 	/* Be sure the queue bits are not cleared */
   3066 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
   3067 	/* Clear interrupt with write */
   3068 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
   3069 
   3070 	/* Link status change */
   3071 	if (eicr & IXGBE_EICR_LSC) {
   3072 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
   3073 		softint_schedule(adapter->link_si);
   3074 	}
   3075 
   3076 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   3077 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
   3078 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
   3079 			/* This is probably overkill :) */
   3080 			if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
   3081 				return 1;
   3082 			/* Disable the interrupt */
   3083 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
   3084 			softint_schedule(adapter->fdir_si);
   3085 		}
   3086 
   3087 		if (eicr & IXGBE_EICR_ECC) {
   3088 			device_printf(adapter->dev,
   3089 			    "CRITICAL: ECC ERROR!! Please Reboot!!\n");
   3090 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   3091 		}
   3092 
   3093 		/* Check for over temp condition */
   3094 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
   3095 			switch (adapter->hw.mac.type) {
   3096 			case ixgbe_mac_X550EM_a:
   3097 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
   3098 					break;
   3099 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
   3100 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   3101 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
   3102 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   3103 				retval = hw->phy.ops.check_overtemp(hw);
   3104 				if (retval != IXGBE_ERR_OVERTEMP)
   3105 					break;
   3106 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   3107 				device_printf(adapter->dev, "System shutdown required!\n");
   3108 				break;
   3109 			default:
   3110 				if (!(eicr & IXGBE_EICR_TS))
   3111 					break;
   3112 				retval = hw->phy.ops.check_overtemp(hw);
   3113 				if (retval != IXGBE_ERR_OVERTEMP)
   3114 					break;
   3115 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   3116 				device_printf(adapter->dev, "System shutdown required!\n");
   3117 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
   3118 				break;
   3119 			}
   3120 		}
   3121 
   3122 		/* Check for VF message */
   3123 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
   3124 		    (eicr & IXGBE_EICR_MAILBOX))
   3125 			softint_schedule(adapter->mbx_si);
   3126 	}
   3127 
   3128 	if (ixgbe_is_sfp(hw)) {
   3129 		/* Pluggable optics-related interrupt */
   3130 		if (hw->mac.type >= ixgbe_mac_X540)
   3131 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   3132 		else
   3133 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   3134 
   3135 		if (eicr & eicr_mask) {
   3136 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   3137 			softint_schedule(adapter->mod_si);
   3138 		}
   3139 
   3140 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   3141 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   3142 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   3143 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   3144 			softint_schedule(adapter->msf_si);
   3145 		}
   3146 	}
   3147 
   3148 	/* Check for fan failure */
   3149 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   3150 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
   3151 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   3152 	}
   3153 
   3154 	/* External PHY interrupt */
   3155 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   3156 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
   3157 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
   3158 		softint_schedule(adapter->phy_si);
   3159  	}
   3160 
   3161 	/* Re-enable other interrupts */
   3162 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   3163 	return 1;
   3164 } /* ixgbe_msix_link */
   3165 
   3166 static void
   3167 ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
   3168 {
   3169 
   3170         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   3171                 itr |= itr << 16;
   3172         else
   3173                 itr |= IXGBE_EITR_CNT_WDIS;
   3174 
   3175 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
   3176 }
   3177 
   3178 
   3179 /************************************************************************
   3180  * ixgbe_sysctl_interrupt_rate_handler
   3181  ************************************************************************/
   3182 static int
   3183 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   3184 {
   3185 	struct sysctlnode node = *rnode;
   3186 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   3187 	struct adapter  *adapter;
   3188 	uint32_t reg, usec, rate;
   3189 	int error;
   3190 
   3191 	if (que == NULL)
   3192 		return 0;
   3193 
   3194 	adapter = que->adapter;
   3195 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   3196 		return (EPERM);
   3197 
   3198 	reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
   3199 	usec = ((reg & 0x0FF8) >> 3);
   3200 	if (usec > 0)
   3201 		rate = 500000 / usec;
   3202 	else
   3203 		rate = 0;
   3204 	node.sysctl_data = &rate;
   3205 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   3206 	if (error || newp == NULL)
   3207 		return error;
   3208 	reg &= ~0xfff; /* default, no limitation */
   3209 	if (rate > 0 && rate < 500000) {
   3210 		if (rate < 1000)
   3211 			rate = 1000;
   3212 		reg |= ((4000000/rate) & 0xff8);
   3213 		/*
   3214 		 * When RSC is used, ITR interval must be larger than
   3215 		 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
   3216 		 * The minimum value is always greater than 2us on 100M
   3217 		 * (and 10M?(not documented)), but it's not on 1G and higher.
   3218 		 */
   3219 		if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   3220 		    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   3221 			if ((adapter->num_queues > 1)
   3222 			    && (reg < IXGBE_MIN_RSC_EITR_10G1G))
   3223 				return EINVAL;
   3224 		}
   3225 		ixgbe_max_interrupt_rate = rate;
   3226 	} else
   3227 		ixgbe_max_interrupt_rate = 0;
   3228 	ixgbe_eitr_write(adapter, que->msix, reg);
   3229 
   3230 	return (0);
   3231 } /* ixgbe_sysctl_interrupt_rate_handler */
   3232 
   3233 const struct sysctlnode *
   3234 ixgbe_sysctl_instance(struct adapter *adapter)
   3235 {
   3236 	const char *dvname;
   3237 	struct sysctllog **log;
   3238 	int rc;
   3239 	const struct sysctlnode *rnode;
   3240 
   3241 	if (adapter->sysctltop != NULL)
   3242 		return adapter->sysctltop;
   3243 
   3244 	log = &adapter->sysctllog;
   3245 	dvname = device_xname(adapter->dev);
   3246 
   3247 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   3248 	    0, CTLTYPE_NODE, dvname,
   3249 	    SYSCTL_DESCR("ixgbe information and settings"),
   3250 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   3251 		goto err;
   3252 
   3253 	return rnode;
   3254 err:
   3255 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   3256 	return NULL;
   3257 }
   3258 
   3259 /************************************************************************
   3260  * ixgbe_add_device_sysctls
   3261  ************************************************************************/
   3262 static void
   3263 ixgbe_add_device_sysctls(struct adapter *adapter)
   3264 {
   3265 	device_t               dev = adapter->dev;
   3266 	struct ixgbe_hw        *hw = &adapter->hw;
   3267 	struct sysctllog **log;
   3268 	const struct sysctlnode *rnode, *cnode;
   3269 
   3270 	log = &adapter->sysctllog;
   3271 
   3272 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   3273 		aprint_error_dev(dev, "could not create sysctl root\n");
   3274 		return;
   3275 	}
   3276 
   3277 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3278 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   3279 	    "debug", SYSCTL_DESCR("Debug Info"),
   3280 	    ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   3281 		aprint_error_dev(dev, "could not create sysctl\n");
   3282 
   3283 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3284 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3285 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
   3286 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   3287 		aprint_error_dev(dev, "could not create sysctl\n");
   3288 
   3289 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3290 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3291 	    "num_queues", SYSCTL_DESCR("Number of queues"),
   3292 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
   3293 		aprint_error_dev(dev, "could not create sysctl\n");
   3294 
   3295 	/* Sysctls for all devices */
   3296 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3297 	    CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
   3298 	    ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
   3299 	    CTL_EOL) != 0)
   3300 		aprint_error_dev(dev, "could not create sysctl\n");
   3301 
   3302 	adapter->enable_aim = ixgbe_enable_aim;
   3303 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3304 	    CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   3305 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   3306 		aprint_error_dev(dev, "could not create sysctl\n");
   3307 
   3308 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3309 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   3310 	    "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
   3311 	    ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
   3312 	    CTL_EOL) != 0)
   3313 		aprint_error_dev(dev, "could not create sysctl\n");
   3314 
   3315 	/*
   3316 	 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
   3317 	 * it causesflip-flopping softint/workqueue mode in one deferred
   3318 	 * processing. Therefore, preempt_disable()/preempt_enable() are
   3319 	 * required in ixgbe_sched_handle_que() to avoid
   3320 	 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
   3321 	 * I think changing "que->txrx_use_workqueue" in interrupt handler
   3322 	 * is lighter than doing preempt_disable()/preempt_enable() in every
   3323 	 * ixgbe_sched_handle_que().
   3324 	 */
   3325 	adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
   3326 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3327 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   3328 	    NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
   3329 		aprint_error_dev(dev, "could not create sysctl\n");
   3330 
   3331 #ifdef IXGBE_DEBUG
   3332 	/* testing sysctls (for all devices) */
   3333 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3334 	    CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
   3335 	    ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
   3336 	    CTL_EOL) != 0)
   3337 		aprint_error_dev(dev, "could not create sysctl\n");
   3338 
   3339 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
   3340 	    CTLTYPE_STRING, "print_rss_config",
   3341 	    SYSCTL_DESCR("Prints RSS Configuration"),
   3342 	    ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
   3343 	    CTL_EOL) != 0)
   3344 		aprint_error_dev(dev, "could not create sysctl\n");
   3345 #endif
   3346 	/* for X550 series devices */
   3347 	if (hw->mac.type >= ixgbe_mac_X550)
   3348 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3349 		    CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
   3350 		    ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
   3351 		    CTL_EOL) != 0)
   3352 			aprint_error_dev(dev, "could not create sysctl\n");
   3353 
   3354 	/* for WoL-capable devices */
   3355 	if (adapter->wol_support) {
   3356 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3357 		    CTLTYPE_BOOL, "wol_enable",
   3358 		    SYSCTL_DESCR("Enable/Disable Wake on LAN"),
   3359 		    ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
   3360 		    CTL_EOL) != 0)
   3361 			aprint_error_dev(dev, "could not create sysctl\n");
   3362 
   3363 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3364 		    CTLTYPE_INT, "wufc",
   3365 		    SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
   3366 		    ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
   3367 		    CTL_EOL) != 0)
   3368 			aprint_error_dev(dev, "could not create sysctl\n");
   3369 	}
   3370 
   3371 	/* for X552/X557-AT devices */
   3372 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3373 		const struct sysctlnode *phy_node;
   3374 
   3375 		if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
   3376 		    "phy", SYSCTL_DESCR("External PHY sysctls"),
   3377 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
   3378 			aprint_error_dev(dev, "could not create sysctl\n");
   3379 			return;
   3380 		}
   3381 
   3382 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3383 		    CTLTYPE_INT, "temp",
   3384 		    SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
   3385 		    ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
   3386 		    CTL_EOL) != 0)
   3387 			aprint_error_dev(dev, "could not create sysctl\n");
   3388 
   3389 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3390 		    CTLTYPE_INT, "overtemp_occurred",
   3391 		    SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
   3392 		    ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
   3393 		    CTL_CREATE, CTL_EOL) != 0)
   3394 			aprint_error_dev(dev, "could not create sysctl\n");
   3395 	}
   3396 
   3397 	if ((hw->mac.type == ixgbe_mac_X550EM_a)
   3398 	    && (hw->phy.type == ixgbe_phy_fw))
   3399 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3400 		    CTLTYPE_BOOL, "force_10_100_autonego",
   3401 		    SYSCTL_DESCR("Force autonego on 10M and 100M"),
   3402 		    NULL, 0, &hw->phy.force_10_100_autonego, 0,
   3403 		    CTL_CREATE, CTL_EOL) != 0)
   3404 			aprint_error_dev(dev, "could not create sysctl\n");
   3405 
   3406 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
   3407 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3408 		    CTLTYPE_INT, "eee_state",
   3409 		    SYSCTL_DESCR("EEE Power Save State"),
   3410 		    ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
   3411 		    CTL_EOL) != 0)
   3412 			aprint_error_dev(dev, "could not create sysctl\n");
   3413 	}
   3414 } /* ixgbe_add_device_sysctls */
   3415 
   3416 /************************************************************************
   3417  * ixgbe_allocate_pci_resources
   3418  ************************************************************************/
   3419 static int
   3420 ixgbe_allocate_pci_resources(struct adapter *adapter,
   3421     const struct pci_attach_args *pa)
   3422 {
   3423 	pcireg_t	memtype;
   3424 	device_t dev = adapter->dev;
   3425 	bus_addr_t addr;
   3426 	int flags;
   3427 
   3428 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   3429 	switch (memtype) {
   3430 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   3431 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   3432 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   3433 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   3434 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   3435 			goto map_err;
   3436 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   3437 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   3438 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3439 		}
   3440 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   3441 		     adapter->osdep.mem_size, flags,
   3442 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   3443 map_err:
   3444 			adapter->osdep.mem_size = 0;
   3445 			aprint_error_dev(dev, "unable to map BAR0\n");
   3446 			return ENXIO;
   3447 		}
   3448 		break;
   3449 	default:
   3450 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   3451 		return ENXIO;
   3452 	}
   3453 
   3454 	return (0);
   3455 } /* ixgbe_allocate_pci_resources */
   3456 
   3457 static void
   3458 ixgbe_free_softint(struct adapter *adapter)
   3459 {
   3460 	struct ix_queue *que = adapter->queues;
   3461 	struct tx_ring *txr = adapter->tx_rings;
   3462 	int i;
   3463 
   3464 	for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
   3465 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   3466 			if (txr->txr_si != NULL)
   3467 				softint_disestablish(txr->txr_si);
   3468 		}
   3469 		if (que->que_si != NULL)
   3470 			softint_disestablish(que->que_si);
   3471 	}
   3472 	if (adapter->txr_wq != NULL)
   3473 		workqueue_destroy(adapter->txr_wq);
   3474 	if (adapter->txr_wq_enqueued != NULL)
   3475 		percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
   3476 	if (adapter->que_wq != NULL)
   3477 		workqueue_destroy(adapter->que_wq);
   3478 
   3479 	/* Drain the Link queue */
   3480 	if (adapter->link_si != NULL) {
   3481 		softint_disestablish(adapter->link_si);
   3482 		adapter->link_si = NULL;
   3483 	}
   3484 	if (adapter->mod_si != NULL) {
   3485 		softint_disestablish(adapter->mod_si);
   3486 		adapter->mod_si = NULL;
   3487 	}
   3488 	if (adapter->msf_si != NULL) {
   3489 		softint_disestablish(adapter->msf_si);
   3490 		adapter->msf_si = NULL;
   3491 	}
   3492 	if (adapter->phy_si != NULL) {
   3493 		softint_disestablish(adapter->phy_si);
   3494 		adapter->phy_si = NULL;
   3495 	}
   3496 	if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
   3497 		if (adapter->fdir_si != NULL) {
   3498 			softint_disestablish(adapter->fdir_si);
   3499 			adapter->fdir_si = NULL;
   3500 		}
   3501 	}
   3502 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   3503 		if (adapter->mbx_si != NULL) {
   3504 			softint_disestablish(adapter->mbx_si);
   3505 			adapter->mbx_si = NULL;
   3506 		}
   3507 	}
   3508 } /* ixgbe_free_softint */
   3509 
   3510 /************************************************************************
   3511  * ixgbe_detach - Device removal routine
   3512  *
   3513  *   Called when the driver is being removed.
   3514  *   Stops the adapter and deallocates all the resources
   3515  *   that were allocated for driver operation.
   3516  *
   3517  *   return 0 on success, positive on failure
   3518  ************************************************************************/
   3519 static int
   3520 ixgbe_detach(device_t dev, int flags)
   3521 {
   3522 	struct adapter *adapter = device_private(dev);
   3523 	struct rx_ring *rxr = adapter->rx_rings;
   3524 	struct tx_ring *txr = adapter->tx_rings;
   3525 	struct ixgbe_hw *hw = &adapter->hw;
   3526 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   3527 	u32	ctrl_ext;
   3528 	int i;
   3529 
   3530 	INIT_DEBUGOUT("ixgbe_detach: begin");
   3531 	if (adapter->osdep.attached == false)
   3532 		return 0;
   3533 
   3534 	if (ixgbe_pci_iov_detach(dev) != 0) {
   3535 		device_printf(dev, "SR-IOV in use; detach first.\n");
   3536 		return (EBUSY);
   3537 	}
   3538 
   3539 	/* Stop the interface. Callouts are stopped in it. */
   3540 	ixgbe_ifstop(adapter->ifp, 1);
   3541 #if NVLAN > 0
   3542 	/* Make sure VLANs are not using driver */
   3543 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   3544 		;	/* nothing to do: no VLANs */
   3545 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
   3546 		vlan_ifdetach(adapter->ifp);
   3547 	else {
   3548 		aprint_error_dev(dev, "VLANs in use, detach first\n");
   3549 		return (EBUSY);
   3550 	}
   3551 #endif
   3552 
   3553 	pmf_device_deregister(dev);
   3554 
   3555 	ether_ifdetach(adapter->ifp);
   3556 	/* Stop the adapter */
   3557 	IXGBE_CORE_LOCK(adapter);
   3558 	ixgbe_setup_low_power_mode(adapter);
   3559 	IXGBE_CORE_UNLOCK(adapter);
   3560 
   3561 	ixgbe_free_softint(adapter);
   3562 
   3563 	/* let hardware know driver is unloading */
   3564 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   3565 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   3566 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   3567 
   3568 	callout_halt(&adapter->timer, NULL);
   3569 	callout_halt(&adapter->recovery_mode_timer, NULL);
   3570 
   3571 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   3572 		netmap_detach(adapter->ifp);
   3573 
   3574 	ixgbe_free_pci_resources(adapter);
   3575 #if 0	/* XXX the NetBSD port is probably missing something here */
   3576 	bus_generic_detach(dev);
   3577 #endif
   3578 	if_detach(adapter->ifp);
   3579 	if_percpuq_destroy(adapter->ipq);
   3580 
   3581 	sysctl_teardown(&adapter->sysctllog);
   3582 	evcnt_detach(&adapter->efbig_tx_dma_setup);
   3583 	evcnt_detach(&adapter->mbuf_defrag_failed);
   3584 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
   3585 	evcnt_detach(&adapter->einval_tx_dma_setup);
   3586 	evcnt_detach(&adapter->other_tx_dma_setup);
   3587 	evcnt_detach(&adapter->eagain_tx_dma_setup);
   3588 	evcnt_detach(&adapter->enomem_tx_dma_setup);
   3589 	evcnt_detach(&adapter->watchdog_events);
   3590 	evcnt_detach(&adapter->tso_err);
   3591 	evcnt_detach(&adapter->link_irq);
   3592 	evcnt_detach(&adapter->link_sicount);
   3593 	evcnt_detach(&adapter->mod_sicount);
   3594 	evcnt_detach(&adapter->msf_sicount);
   3595 	evcnt_detach(&adapter->phy_sicount);
   3596 
   3597 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
   3598 		if (i < __arraycount(stats->mpc)) {
   3599 			evcnt_detach(&stats->mpc[i]);
   3600 			if (hw->mac.type == ixgbe_mac_82598EB)
   3601 				evcnt_detach(&stats->rnbc[i]);
   3602 		}
   3603 		if (i < __arraycount(stats->pxontxc)) {
   3604 			evcnt_detach(&stats->pxontxc[i]);
   3605 			evcnt_detach(&stats->pxonrxc[i]);
   3606 			evcnt_detach(&stats->pxofftxc[i]);
   3607 			evcnt_detach(&stats->pxoffrxc[i]);
   3608 			if (hw->mac.type >= ixgbe_mac_82599EB)
   3609 				evcnt_detach(&stats->pxon2offc[i]);
   3610 		}
   3611 	}
   3612 
   3613 	txr = adapter->tx_rings;
   3614 	for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   3615 		evcnt_detach(&adapter->queues[i].irqs);
   3616 		evcnt_detach(&adapter->queues[i].handleq);
   3617 		evcnt_detach(&adapter->queues[i].req);
   3618 		evcnt_detach(&txr->no_desc_avail);
   3619 		evcnt_detach(&txr->total_packets);
   3620 		evcnt_detach(&txr->tso_tx);
   3621 #ifndef IXGBE_LEGACY_TX
   3622 		evcnt_detach(&txr->pcq_drops);
   3623 #endif
   3624 
   3625 		if (i < __arraycount(stats->qprc)) {
   3626 			evcnt_detach(&stats->qprc[i]);
   3627 			evcnt_detach(&stats->qptc[i]);
   3628 			evcnt_detach(&stats->qbrc[i]);
   3629 			evcnt_detach(&stats->qbtc[i]);
   3630 			if (hw->mac.type >= ixgbe_mac_82599EB)
   3631 				evcnt_detach(&stats->qprdc[i]);
   3632 		}
   3633 
   3634 		evcnt_detach(&rxr->rx_packets);
   3635 		evcnt_detach(&rxr->rx_bytes);
   3636 		evcnt_detach(&rxr->rx_copies);
   3637 		evcnt_detach(&rxr->no_jmbuf);
   3638 		evcnt_detach(&rxr->rx_discarded);
   3639 	}
   3640 	evcnt_detach(&stats->ipcs);
   3641 	evcnt_detach(&stats->l4cs);
   3642 	evcnt_detach(&stats->ipcs_bad);
   3643 	evcnt_detach(&stats->l4cs_bad);
   3644 	evcnt_detach(&stats->intzero);
   3645 	evcnt_detach(&stats->legint);
   3646 	evcnt_detach(&stats->crcerrs);
   3647 	evcnt_detach(&stats->illerrc);
   3648 	evcnt_detach(&stats->errbc);
   3649 	evcnt_detach(&stats->mspdc);
   3650 	if (hw->mac.type >= ixgbe_mac_X550)
   3651 		evcnt_detach(&stats->mbsdc);
   3652 	evcnt_detach(&stats->mpctotal);
   3653 	evcnt_detach(&stats->mlfc);
   3654 	evcnt_detach(&stats->mrfc);
   3655 	evcnt_detach(&stats->rlec);
   3656 	evcnt_detach(&stats->lxontxc);
   3657 	evcnt_detach(&stats->lxonrxc);
   3658 	evcnt_detach(&stats->lxofftxc);
   3659 	evcnt_detach(&stats->lxoffrxc);
   3660 
   3661 	/* Packet Reception Stats */
   3662 	evcnt_detach(&stats->tor);
   3663 	evcnt_detach(&stats->gorc);
   3664 	evcnt_detach(&stats->tpr);
   3665 	evcnt_detach(&stats->gprc);
   3666 	evcnt_detach(&stats->mprc);
   3667 	evcnt_detach(&stats->bprc);
   3668 	evcnt_detach(&stats->prc64);
   3669 	evcnt_detach(&stats->prc127);
   3670 	evcnt_detach(&stats->prc255);
   3671 	evcnt_detach(&stats->prc511);
   3672 	evcnt_detach(&stats->prc1023);
   3673 	evcnt_detach(&stats->prc1522);
   3674 	evcnt_detach(&stats->ruc);
   3675 	evcnt_detach(&stats->rfc);
   3676 	evcnt_detach(&stats->roc);
   3677 	evcnt_detach(&stats->rjc);
   3678 	evcnt_detach(&stats->mngprc);
   3679 	evcnt_detach(&stats->mngpdc);
   3680 	evcnt_detach(&stats->xec);
   3681 
   3682 	/* Packet Transmission Stats */
   3683 	evcnt_detach(&stats->gotc);
   3684 	evcnt_detach(&stats->tpt);
   3685 	evcnt_detach(&stats->gptc);
   3686 	evcnt_detach(&stats->bptc);
   3687 	evcnt_detach(&stats->mptc);
   3688 	evcnt_detach(&stats->mngptc);
   3689 	evcnt_detach(&stats->ptc64);
   3690 	evcnt_detach(&stats->ptc127);
   3691 	evcnt_detach(&stats->ptc255);
   3692 	evcnt_detach(&stats->ptc511);
   3693 	evcnt_detach(&stats->ptc1023);
   3694 	evcnt_detach(&stats->ptc1522);
   3695 
   3696 	ixgbe_free_transmit_structures(adapter);
   3697 	ixgbe_free_receive_structures(adapter);
   3698 	for (i = 0; i < adapter->num_queues; i++) {
   3699 		struct ix_queue * que = &adapter->queues[i];
   3700 		mutex_destroy(&que->dc_mtx);
   3701 	}
   3702 	free(adapter->queues, M_DEVBUF);
   3703 	free(adapter->mta, M_DEVBUF);
   3704 
   3705 	IXGBE_CORE_LOCK_DESTROY(adapter);
   3706 
   3707 	return (0);
   3708 } /* ixgbe_detach */
   3709 
   3710 /************************************************************************
   3711  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
   3712  *
   3713  *   Prepare the adapter/port for LPLU and/or WoL
   3714  ************************************************************************/
   3715 static int
   3716 ixgbe_setup_low_power_mode(struct adapter *adapter)
   3717 {
   3718 	struct ixgbe_hw *hw = &adapter->hw;
   3719 	device_t        dev = adapter->dev;
   3720 	s32             error = 0;
   3721 
   3722 	KASSERT(mutex_owned(&adapter->core_mtx));
   3723 
   3724 	/* Limit power management flow to X550EM baseT */
   3725 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
   3726 	    hw->phy.ops.enter_lplu) {
   3727 		/* X550EM baseT adapters need a special LPLU flow */
   3728 		hw->phy.reset_disable = true;
   3729 		ixgbe_stop(adapter);
   3730 		error = hw->phy.ops.enter_lplu(hw);
   3731 		if (error)
   3732 			device_printf(dev,
   3733 			    "Error entering LPLU: %d\n", error);
   3734 		hw->phy.reset_disable = false;
   3735 	} else {
   3736 		/* Just stop for other adapters */
   3737 		ixgbe_stop(adapter);
   3738 	}
   3739 
   3740 	if (!hw->wol_enabled) {
   3741 		ixgbe_set_phy_power(hw, FALSE);
   3742 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3743 		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
   3744 	} else {
   3745 		/* Turn off support for APM wakeup. (Using ACPI instead) */
   3746 		IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
   3747 		    IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
   3748 
   3749 		/*
   3750 		 * Clear Wake Up Status register to prevent any previous wakeup
   3751 		 * events from waking us up immediately after we suspend.
   3752 		 */
   3753 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3754 
   3755 		/*
   3756 		 * Program the Wakeup Filter Control register with user filter
   3757 		 * settings
   3758 		 */
   3759 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
   3760 
   3761 		/* Enable wakeups and power management in Wakeup Control */
   3762 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
   3763 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
   3764 
   3765 	}
   3766 
   3767 	return error;
   3768 } /* ixgbe_setup_low_power_mode */
   3769 
   3770 /************************************************************************
   3771  * ixgbe_shutdown - Shutdown entry point
   3772  ************************************************************************/
   3773 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   3774 static int
   3775 ixgbe_shutdown(device_t dev)
   3776 {
   3777 	struct adapter *adapter = device_private(dev);
   3778 	int error = 0;
   3779 
   3780 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
   3781 
   3782 	IXGBE_CORE_LOCK(adapter);
   3783 	error = ixgbe_setup_low_power_mode(adapter);
   3784 	IXGBE_CORE_UNLOCK(adapter);
   3785 
   3786 	return (error);
   3787 } /* ixgbe_shutdown */
   3788 #endif
   3789 
   3790 /************************************************************************
   3791  * ixgbe_suspend
   3792  *
   3793  *   From D0 to D3
   3794  ************************************************************************/
   3795 static bool
   3796 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
   3797 {
   3798 	struct adapter *adapter = device_private(dev);
   3799 	int            error = 0;
   3800 
   3801 	INIT_DEBUGOUT("ixgbe_suspend: begin");
   3802 
   3803 	IXGBE_CORE_LOCK(adapter);
   3804 
   3805 	error = ixgbe_setup_low_power_mode(adapter);
   3806 
   3807 	IXGBE_CORE_UNLOCK(adapter);
   3808 
   3809 	return (error);
   3810 } /* ixgbe_suspend */
   3811 
   3812 /************************************************************************
   3813  * ixgbe_resume
   3814  *
   3815  *   From D3 to D0
   3816  ************************************************************************/
   3817 static bool
   3818 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
   3819 {
   3820 	struct adapter  *adapter = device_private(dev);
   3821 	struct ifnet    *ifp = adapter->ifp;
   3822 	struct ixgbe_hw *hw = &adapter->hw;
   3823 	u32             wus;
   3824 
   3825 	INIT_DEBUGOUT("ixgbe_resume: begin");
   3826 
   3827 	IXGBE_CORE_LOCK(adapter);
   3828 
   3829 	/* Read & clear WUS register */
   3830 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
   3831 	if (wus)
   3832 		device_printf(dev, "Woken up by (WUS): %#010x\n",
   3833 		    IXGBE_READ_REG(hw, IXGBE_WUS));
   3834 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3835 	/* And clear WUFC until next low-power transition */
   3836 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3837 
   3838 	/*
   3839 	 * Required after D3->D0 transition;
   3840 	 * will re-advertise all previous advertised speeds
   3841 	 */
   3842 	if (ifp->if_flags & IFF_UP)
   3843 		ixgbe_init_locked(adapter);
   3844 
   3845 	IXGBE_CORE_UNLOCK(adapter);
   3846 
   3847 	return true;
   3848 } /* ixgbe_resume */
   3849 
   3850 /*
   3851  * Set the various hardware offload abilities.
   3852  *
   3853  * This takes the ifnet's if_capenable flags (e.g. set by the user using
   3854  * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
   3855  * mbuf offload flags the driver will understand.
   3856  */
   3857 static void
   3858 ixgbe_set_if_hwassist(struct adapter *adapter)
   3859 {
   3860 	/* XXX */
   3861 }
   3862 
   3863 /************************************************************************
   3864  * ixgbe_init_locked - Init entry point
   3865  *
   3866  *   Used in two ways: It is used by the stack as an init
   3867  *   entry point in network interface structure. It is also
   3868  *   used by the driver as a hw/sw initialization routine to
   3869  *   get to a consistent state.
   3870  *
   3871  *   return 0 on success, positive on failure
   3872  ************************************************************************/
   3873 static void
   3874 ixgbe_init_locked(struct adapter *adapter)
   3875 {
   3876 	struct ifnet   *ifp = adapter->ifp;
   3877 	device_t 	dev = adapter->dev;
   3878 	struct ixgbe_hw *hw = &adapter->hw;
   3879 	struct ix_queue *que;
   3880 	struct tx_ring  *txr;
   3881 	struct rx_ring  *rxr;
   3882 	u32		txdctl, mhadd;
   3883 	u32		rxdctl, rxctrl;
   3884 	u32             ctrl_ext;
   3885 	int             i, j, err;
   3886 
   3887 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   3888 
   3889 	KASSERT(mutex_owned(&adapter->core_mtx));
   3890 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
   3891 
   3892 	hw->adapter_stopped = FALSE;
   3893 	ixgbe_stop_adapter(hw);
   3894         callout_stop(&adapter->timer);
   3895 	for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
   3896 		que->disabled_count = 0;
   3897 
   3898 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   3899 	adapter->max_frame_size =
   3900 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   3901 
   3902 	/* Queue indices may change with IOV mode */
   3903 	ixgbe_align_all_queue_indices(adapter);
   3904 
   3905 	/* reprogram the RAR[0] in case user changed it. */
   3906 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
   3907 
   3908 	/* Get the latest mac address, User can use a LAA */
   3909 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
   3910 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   3911 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
   3912 	hw->addr_ctrl.rar_used_count = 1;
   3913 
   3914 	/* Set hardware offload abilities from ifnet flags */
   3915 	ixgbe_set_if_hwassist(adapter);
   3916 
   3917 	/* Prepare transmit descriptors and buffers */
   3918 	if (ixgbe_setup_transmit_structures(adapter)) {
   3919 		device_printf(dev, "Could not setup transmit structures\n");
   3920 		ixgbe_stop(adapter);
   3921 		return;
   3922 	}
   3923 
   3924 	ixgbe_init_hw(hw);
   3925 
   3926 	ixgbe_initialize_iov(adapter);
   3927 
   3928 	ixgbe_initialize_transmit_units(adapter);
   3929 
   3930 	/* Setup Multicast table */
   3931 	ixgbe_set_multi(adapter);
   3932 
   3933 	/* Determine the correct mbuf pool, based on frame size */
   3934 	if (adapter->max_frame_size <= MCLBYTES)
   3935 		adapter->rx_mbuf_sz = MCLBYTES;
   3936 	else
   3937 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   3938 
   3939 	/* Prepare receive descriptors and buffers */
   3940 	if (ixgbe_setup_receive_structures(adapter)) {
   3941 		device_printf(dev, "Could not setup receive structures\n");
   3942 		ixgbe_stop(adapter);
   3943 		return;
   3944 	}
   3945 
   3946 	/* Configure RX settings */
   3947 	ixgbe_initialize_receive_units(adapter);
   3948 
   3949 	/* Enable SDP & MSI-X interrupts based on adapter */
   3950 	ixgbe_config_gpie(adapter);
   3951 
   3952 	/* Set MTU size */
   3953 	if (ifp->if_mtu > ETHERMTU) {
   3954 		/* aka IXGBE_MAXFRS on 82599 and newer */
   3955 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   3956 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   3957 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   3958 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   3959 	}
   3960 
   3961 	/* Now enable all the queues */
   3962 	for (i = 0; i < adapter->num_queues; i++) {
   3963 		txr = &adapter->tx_rings[i];
   3964 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
   3965 		txdctl |= IXGBE_TXDCTL_ENABLE;
   3966 		/* Set WTHRESH to 8, burst writeback */
   3967 		txdctl |= (8 << 16);
   3968 		/*
   3969 		 * When the internal queue falls below PTHRESH (32),
   3970 		 * start prefetching as long as there are at least
   3971 		 * HTHRESH (1) buffers ready. The values are taken
   3972 		 * from the Intel linux driver 3.8.21.
   3973 		 * Prefetching enables tx line rate even with 1 queue.
   3974 		 */
   3975 		txdctl |= (32 << 0) | (1 << 8);
   3976 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
   3977 	}
   3978 
   3979 	for (i = 0; i < adapter->num_queues; i++) {
   3980 		rxr = &adapter->rx_rings[i];
   3981 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   3982 		if (hw->mac.type == ixgbe_mac_82598EB) {
   3983 			/*
   3984 			 * PTHRESH = 21
   3985 			 * HTHRESH = 4
   3986 			 * WTHRESH = 8
   3987 			 */
   3988 			rxdctl &= ~0x3FFFFF;
   3989 			rxdctl |= 0x080420;
   3990 		}
   3991 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   3992 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
   3993 		for (j = 0; j < 10; j++) {
   3994 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
   3995 			    IXGBE_RXDCTL_ENABLE)
   3996 				break;
   3997 			else
   3998 				msec_delay(1);
   3999 		}
   4000 		wmb();
   4001 
   4002 		/*
   4003 		 * In netmap mode, we must preserve the buffers made
   4004 		 * available to userspace before the if_init()
   4005 		 * (this is true by default on the TX side, because
   4006 		 * init makes all buffers available to userspace).
   4007 		 *
   4008 		 * netmap_reset() and the device specific routines
   4009 		 * (e.g. ixgbe_setup_receive_rings()) map these
   4010 		 * buffers at the end of the NIC ring, so here we
   4011 		 * must set the RDT (tail) register to make sure
   4012 		 * they are not overwritten.
   4013 		 *
   4014 		 * In this driver the NIC ring starts at RDH = 0,
   4015 		 * RDT points to the last slot available for reception (?),
   4016 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   4017 		 */
   4018 #ifdef DEV_NETMAP
   4019 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   4020 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   4021 			struct netmap_adapter *na = NA(adapter->ifp);
   4022 			struct netmap_kring *kring = &na->rx_rings[i];
   4023 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   4024 
   4025 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
   4026 		} else
   4027 #endif /* DEV_NETMAP */
   4028 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
   4029 			    adapter->num_rx_desc - 1);
   4030 	}
   4031 
   4032 	/* Enable Receive engine */
   4033 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   4034 	if (hw->mac.type == ixgbe_mac_82598EB)
   4035 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   4036 	rxctrl |= IXGBE_RXCTRL_RXEN;
   4037 	ixgbe_enable_rx_dma(hw, rxctrl);
   4038 
   4039 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4040 
   4041 	/* Set up MSI/MSI-X routing */
   4042 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   4043 		ixgbe_configure_ivars(adapter);
   4044 		/* Set up auto-mask */
   4045 		if (hw->mac.type == ixgbe_mac_82598EB)
   4046 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   4047 		else {
   4048 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   4049 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   4050 		}
   4051 	} else {  /* Simple settings for Legacy/MSI */
   4052 		ixgbe_set_ivar(adapter, 0, 0, 0);
   4053 		ixgbe_set_ivar(adapter, 0, 0, 1);
   4054 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   4055 	}
   4056 
   4057 	ixgbe_init_fdir(adapter);
   4058 
   4059 	/*
   4060 	 * Check on any SFP devices that
   4061 	 * need to be kick-started
   4062 	 */
   4063 	if (hw->phy.type == ixgbe_phy_none) {
   4064 		err = hw->phy.ops.identify(hw);
   4065 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4066                 	device_printf(dev,
   4067 			    "Unsupported SFP+ module type was detected.\n");
   4068 			return;
   4069         	}
   4070 	}
   4071 
   4072 	/* Set moderation on the Link interrupt */
   4073 	ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
   4074 
   4075 	/* Enable power to the phy. */
   4076 	ixgbe_set_phy_power(hw, TRUE);
   4077 
   4078 	/* Config/Enable Link */
   4079 	ixgbe_config_link(adapter);
   4080 
   4081 	/* Hardware Packet Buffer & Flow Control setup */
   4082 	ixgbe_config_delay_values(adapter);
   4083 
   4084 	/* Initialize the FC settings */
   4085 	ixgbe_start_hw(hw);
   4086 
   4087 	/* Set up VLAN support and filter */
   4088 	ixgbe_setup_vlan_hw_support(adapter);
   4089 
   4090 	/* Setup DMA Coalescing */
   4091 	ixgbe_config_dmac(adapter);
   4092 
   4093 	/* And now turn on interrupts */
   4094 	ixgbe_enable_intr(adapter);
   4095 
   4096 	/* Enable the use of the MBX by the VF's */
   4097 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
   4098 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
   4099 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
   4100 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
   4101 	}
   4102 
   4103 	/* Update saved flags. See ixgbe_ifflags_cb() */
   4104 	adapter->if_flags = ifp->if_flags;
   4105 
   4106 	/* Now inform the stack we're ready */
   4107 	ifp->if_flags |= IFF_RUNNING;
   4108 
   4109 	return;
   4110 } /* ixgbe_init_locked */
   4111 
   4112 /************************************************************************
   4113  * ixgbe_init
   4114  ************************************************************************/
   4115 static int
   4116 ixgbe_init(struct ifnet *ifp)
   4117 {
   4118 	struct adapter *adapter = ifp->if_softc;
   4119 
   4120 	IXGBE_CORE_LOCK(adapter);
   4121 	ixgbe_init_locked(adapter);
   4122 	IXGBE_CORE_UNLOCK(adapter);
   4123 
   4124 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   4125 } /* ixgbe_init */
   4126 
   4127 /************************************************************************
   4128  * ixgbe_set_ivar
   4129  *
   4130  *   Setup the correct IVAR register for a particular MSI-X interrupt
   4131  *     (yes this is all very magic and confusing :)
   4132  *    - entry is the register array entry
   4133  *    - vector is the MSI-X vector for this queue
   4134  *    - type is RX/TX/MISC
   4135  ************************************************************************/
   4136 static void
   4137 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   4138 {
   4139 	struct ixgbe_hw *hw = &adapter->hw;
   4140 	u32 ivar, index;
   4141 
   4142 	vector |= IXGBE_IVAR_ALLOC_VAL;
   4143 
   4144 	switch (hw->mac.type) {
   4145 	case ixgbe_mac_82598EB:
   4146 		if (type == -1)
   4147 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   4148 		else
   4149 			entry += (type * 64);
   4150 		index = (entry >> 2) & 0x1F;
   4151 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   4152 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
   4153 		ivar |= (vector << (8 * (entry & 0x3)));
   4154 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   4155 		break;
   4156 	case ixgbe_mac_82599EB:
   4157 	case ixgbe_mac_X540:
   4158 	case ixgbe_mac_X550:
   4159 	case ixgbe_mac_X550EM_x:
   4160 	case ixgbe_mac_X550EM_a:
   4161 		if (type == -1) { /* MISC IVAR */
   4162 			index = (entry & 1) * 8;
   4163 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   4164 			ivar &= ~(0xFF << index);
   4165 			ivar |= (vector << index);
   4166 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   4167 		} else {	/* RX/TX IVARS */
   4168 			index = (16 * (entry & 1)) + (8 * type);
   4169 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   4170 			ivar &= ~(0xFF << index);
   4171 			ivar |= (vector << index);
   4172 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   4173 		}
   4174 		break;
   4175 	default:
   4176 		break;
   4177 	}
   4178 } /* ixgbe_set_ivar */
   4179 
   4180 /************************************************************************
   4181  * ixgbe_configure_ivars
   4182  ************************************************************************/
   4183 static void
   4184 ixgbe_configure_ivars(struct adapter *adapter)
   4185 {
   4186 	struct ix_queue *que = adapter->queues;
   4187 	u32             newitr;
   4188 
   4189 	if (ixgbe_max_interrupt_rate > 0)
   4190 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   4191 	else {
   4192 		/*
   4193 		 * Disable DMA coalescing if interrupt moderation is
   4194 		 * disabled.
   4195 		 */
   4196 		adapter->dmac = 0;
   4197 		newitr = 0;
   4198 	}
   4199 
   4200         for (int i = 0; i < adapter->num_queues; i++, que++) {
   4201 		struct rx_ring *rxr = &adapter->rx_rings[i];
   4202 		struct tx_ring *txr = &adapter->tx_rings[i];
   4203 		/* First the RX queue entry */
   4204                 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
   4205 		/* ... and the TX */
   4206 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
   4207 		/* Set an Initial EITR value */
   4208 		ixgbe_eitr_write(adapter, que->msix, newitr);
   4209 		/*
   4210 		 * To eliminate influence of the previous state.
   4211 		 * At this point, Tx/Rx interrupt handler
   4212 		 * (ixgbe_msix_que()) cannot be called, so  both
   4213 		 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
   4214 		 */
   4215 		que->eitr_setting = 0;
   4216 	}
   4217 
   4218 	/* For the Link interrupt */
   4219         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
   4220 } /* ixgbe_configure_ivars */
   4221 
   4222 /************************************************************************
   4223  * ixgbe_config_gpie
   4224  ************************************************************************/
   4225 static void
   4226 ixgbe_config_gpie(struct adapter *adapter)
   4227 {
   4228 	struct ixgbe_hw *hw = &adapter->hw;
   4229 	u32             gpie;
   4230 
   4231 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
   4232 
   4233 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   4234 		/* Enable Enhanced MSI-X mode */
   4235 		gpie |= IXGBE_GPIE_MSIX_MODE
   4236 		     |  IXGBE_GPIE_EIAME
   4237 		     |  IXGBE_GPIE_PBA_SUPPORT
   4238 		     |  IXGBE_GPIE_OCD;
   4239 	}
   4240 
   4241 	/* Fan Failure Interrupt */
   4242 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4243 		gpie |= IXGBE_SDP1_GPIEN;
   4244 
   4245 	/* Thermal Sensor Interrupt */
   4246 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
   4247 		gpie |= IXGBE_SDP0_GPIEN_X540;
   4248 
   4249 	/* Link detection */
   4250 	switch (hw->mac.type) {
   4251 	case ixgbe_mac_82599EB:
   4252 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
   4253 		break;
   4254 	case ixgbe_mac_X550EM_x:
   4255 	case ixgbe_mac_X550EM_a:
   4256 		gpie |= IXGBE_SDP0_GPIEN_X540;
   4257 		break;
   4258 	default:
   4259 		break;
   4260 	}
   4261 
   4262 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   4263 
   4264 } /* ixgbe_config_gpie */
   4265 
   4266 /************************************************************************
   4267  * ixgbe_config_delay_values
   4268  *
   4269  *   Requires adapter->max_frame_size to be set.
   4270  ************************************************************************/
   4271 static void
   4272 ixgbe_config_delay_values(struct adapter *adapter)
   4273 {
   4274 	struct ixgbe_hw *hw = &adapter->hw;
   4275 	u32             rxpb, frame, size, tmp;
   4276 
   4277 	frame = adapter->max_frame_size;
   4278 
   4279 	/* Calculate High Water */
   4280 	switch (hw->mac.type) {
   4281 	case ixgbe_mac_X540:
   4282 	case ixgbe_mac_X550:
   4283 	case ixgbe_mac_X550EM_x:
   4284 	case ixgbe_mac_X550EM_a:
   4285 		tmp = IXGBE_DV_X540(frame, frame);
   4286 		break;
   4287 	default:
   4288 		tmp = IXGBE_DV(frame, frame);
   4289 		break;
   4290 	}
   4291 	size = IXGBE_BT2KB(tmp);
   4292 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
   4293 	hw->fc.high_water[0] = rxpb - size;
   4294 
   4295 	/* Now calculate Low Water */
   4296 	switch (hw->mac.type) {
   4297 	case ixgbe_mac_X540:
   4298 	case ixgbe_mac_X550:
   4299 	case ixgbe_mac_X550EM_x:
   4300 	case ixgbe_mac_X550EM_a:
   4301 		tmp = IXGBE_LOW_DV_X540(frame);
   4302 		break;
   4303 	default:
   4304 		tmp = IXGBE_LOW_DV(frame);
   4305 		break;
   4306 	}
   4307 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
   4308 
   4309 	hw->fc.pause_time = IXGBE_FC_PAUSE;
   4310 	hw->fc.send_xon = TRUE;
   4311 } /* ixgbe_config_delay_values */
   4312 
   4313 /************************************************************************
   4314  * ixgbe_set_multi - Multicast Update
   4315  *
   4316  *   Called whenever multicast address list is updated.
   4317  ************************************************************************/
   4318 static void
   4319 ixgbe_set_multi(struct adapter *adapter)
   4320 {
   4321 	struct ixgbe_mc_addr	*mta;
   4322 	struct ifnet		*ifp = adapter->ifp;
   4323 	u8			*update_ptr;
   4324 	int			mcnt = 0;
   4325 	u32			fctrl;
   4326 	struct ethercom		*ec = &adapter->osdep.ec;
   4327 	struct ether_multi	*enm;
   4328 	struct ether_multistep	step;
   4329 
   4330 	KASSERT(mutex_owned(&adapter->core_mtx));
   4331 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   4332 
   4333 	mta = adapter->mta;
   4334 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
   4335 
   4336 	ifp->if_flags &= ~IFF_ALLMULTI;
   4337 	ETHER_LOCK(ec);
   4338 	ETHER_FIRST_MULTI(step, ec, enm);
   4339 	while (enm != NULL) {
   4340 		if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
   4341 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   4342 			ETHER_ADDR_LEN) != 0)) {
   4343 			ifp->if_flags |= IFF_ALLMULTI;
   4344 			break;
   4345 		}
   4346 		bcopy(enm->enm_addrlo,
   4347 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
   4348 		mta[mcnt].vmdq = adapter->pool;
   4349 		mcnt++;
   4350 		ETHER_NEXT_MULTI(step, enm);
   4351 	}
   4352 	ETHER_UNLOCK(ec);
   4353 
   4354 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   4355 	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4356 	if (ifp->if_flags & IFF_PROMISC)
   4357 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4358 	else if (ifp->if_flags & IFF_ALLMULTI) {
   4359 		fctrl |= IXGBE_FCTRL_MPE;
   4360 	}
   4361 
   4362 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   4363 
   4364 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
   4365 		update_ptr = (u8 *)mta;
   4366 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   4367 		    ixgbe_mc_array_itr, TRUE);
   4368 	}
   4369 
   4370 } /* ixgbe_set_multi */
   4371 
   4372 /************************************************************************
   4373  * ixgbe_mc_array_itr
   4374  *
   4375  *   An iterator function needed by the multicast shared code.
   4376  *   It feeds the shared code routine the addresses in the
   4377  *   array of ixgbe_set_multi() one by one.
   4378  ************************************************************************/
   4379 static u8 *
   4380 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   4381 {
   4382 	struct ixgbe_mc_addr *mta;
   4383 
   4384 	mta = (struct ixgbe_mc_addr *)*update_ptr;
   4385 	*vmdq = mta->vmdq;
   4386 
   4387 	*update_ptr = (u8*)(mta + 1);
   4388 
   4389 	return (mta->addr);
   4390 } /* ixgbe_mc_array_itr */
   4391 
   4392 /************************************************************************
   4393  * ixgbe_local_timer - Timer routine
   4394  *
   4395  *   Checks for link status, updates statistics,
   4396  *   and runs the watchdog check.
   4397  ************************************************************************/
   4398 static void
   4399 ixgbe_local_timer(void *arg)
   4400 {
   4401 	struct adapter *adapter = arg;
   4402 
   4403 	IXGBE_CORE_LOCK(adapter);
   4404 	ixgbe_local_timer1(adapter);
   4405 	IXGBE_CORE_UNLOCK(adapter);
   4406 }
   4407 
   4408 static void
   4409 ixgbe_local_timer1(void *arg)
   4410 {
   4411 	struct adapter	*adapter = arg;
   4412 	device_t	dev = adapter->dev;
   4413 	struct ix_queue *que = adapter->queues;
   4414 	u64		queues = 0;
   4415 	u64		v0, v1, v2, v3, v4, v5, v6, v7;
   4416 	int		hung = 0;
   4417 	int		i;
   4418 
   4419 	KASSERT(mutex_owned(&adapter->core_mtx));
   4420 
   4421 	/* Check for pluggable optics */
   4422 	if (adapter->sfp_probe)
   4423 		if (!ixgbe_sfp_probe(adapter))
   4424 			goto out; /* Nothing to do */
   4425 
   4426 	ixgbe_update_link_status(adapter);
   4427 	ixgbe_update_stats_counters(adapter);
   4428 
   4429 	/* Update some event counters */
   4430 	v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
   4431 	que = adapter->queues;
   4432 	for (i = 0; i < adapter->num_queues; i++, que++) {
   4433 		struct tx_ring  *txr = que->txr;
   4434 
   4435 		v0 += txr->q_efbig_tx_dma_setup;
   4436 		v1 += txr->q_mbuf_defrag_failed;
   4437 		v2 += txr->q_efbig2_tx_dma_setup;
   4438 		v3 += txr->q_einval_tx_dma_setup;
   4439 		v4 += txr->q_other_tx_dma_setup;
   4440 		v5 += txr->q_eagain_tx_dma_setup;
   4441 		v6 += txr->q_enomem_tx_dma_setup;
   4442 		v7 += txr->q_tso_err;
   4443 	}
   4444 	adapter->efbig_tx_dma_setup.ev_count = v0;
   4445 	adapter->mbuf_defrag_failed.ev_count = v1;
   4446 	adapter->efbig2_tx_dma_setup.ev_count = v2;
   4447 	adapter->einval_tx_dma_setup.ev_count = v3;
   4448 	adapter->other_tx_dma_setup.ev_count = v4;
   4449 	adapter->eagain_tx_dma_setup.ev_count = v5;
   4450 	adapter->enomem_tx_dma_setup.ev_count = v6;
   4451 	adapter->tso_err.ev_count = v7;
   4452 
   4453 	/*
   4454 	 * Check the TX queues status
   4455 	 *      - mark hung queues so we don't schedule on them
   4456 	 *      - watchdog only if all queues show hung
   4457 	 */
   4458 	que = adapter->queues;
   4459 	for (i = 0; i < adapter->num_queues; i++, que++) {
   4460 		/* Keep track of queues with work for soft irq */
   4461 		if (que->txr->busy)
   4462 			queues |= ((u64)1 << que->me);
   4463 		/*
   4464 		 * Each time txeof runs without cleaning, but there
   4465 		 * are uncleaned descriptors it increments busy. If
   4466 		 * we get to the MAX we declare it hung.
   4467 		 */
   4468 		if (que->busy == IXGBE_QUEUE_HUNG) {
   4469 			++hung;
   4470 			/* Mark the queue as inactive */
   4471 			adapter->active_queues &= ~((u64)1 << que->me);
   4472 			continue;
   4473 		} else {
   4474 			/* Check if we've come back from hung */
   4475 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   4476 				adapter->active_queues |= ((u64)1 << que->me);
   4477 		}
   4478 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   4479 			device_printf(dev,
   4480 			    "Warning queue %d appears to be hung!\n", i);
   4481 			que->txr->busy = IXGBE_QUEUE_HUNG;
   4482 			++hung;
   4483 		}
   4484 	}
   4485 
   4486 	/* Only truely watchdog if all queues show hung */
   4487 	if (hung == adapter->num_queues)
   4488 		goto watchdog;
   4489 #if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
   4490 	else if (queues != 0) { /* Force an IRQ on queues with work */
   4491 		que = adapter->queues;
   4492 		for (i = 0; i < adapter->num_queues; i++, que++) {
   4493 			mutex_enter(&que->dc_mtx);
   4494 			if (que->disabled_count == 0)
   4495 				ixgbe_rearm_queues(adapter,
   4496 				    queues & ((u64)1 << i));
   4497 			mutex_exit(&que->dc_mtx);
   4498 		}
   4499 	}
   4500 #endif
   4501 
   4502 out:
   4503 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4504 	return;
   4505 
   4506 watchdog:
   4507 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   4508 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   4509 	adapter->watchdog_events.ev_count++;
   4510 	ixgbe_init_locked(adapter);
   4511 } /* ixgbe_local_timer */
   4512 
   4513 /************************************************************************
   4514  * ixgbe_recovery_mode_timer - Recovery mode timer routine
   4515  ************************************************************************/
   4516 static void
   4517 ixgbe_recovery_mode_timer(void *arg)
   4518 {
   4519 	struct adapter *adapter = arg;
   4520 	struct ixgbe_hw *hw = &adapter->hw;
   4521 
   4522 	IXGBE_CORE_LOCK(adapter);
   4523 	if (ixgbe_fw_recovery_mode(hw)) {
   4524 		if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
   4525 			/* Firmware error detected, entering recovery mode */
   4526 			device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
   4527 
   4528 			if (hw->adapter_stopped == FALSE)
   4529 				ixgbe_stop(adapter);
   4530 		}
   4531 	} else
   4532 		atomic_cas_uint(&adapter->recovery_mode, 1, 0);
   4533 
   4534 	callout_reset(&adapter->recovery_mode_timer, hz,
   4535 	    ixgbe_recovery_mode_timer, adapter);
   4536 	IXGBE_CORE_UNLOCK(adapter);
   4537 } /* ixgbe_recovery_mode_timer */
   4538 
   4539 /************************************************************************
   4540  * ixgbe_sfp_probe
   4541  *
   4542  *   Determine if a port had optics inserted.
   4543  ************************************************************************/
   4544 static bool
   4545 ixgbe_sfp_probe(struct adapter *adapter)
   4546 {
   4547 	struct ixgbe_hw	*hw = &adapter->hw;
   4548 	device_t	dev = adapter->dev;
   4549 	bool		result = FALSE;
   4550 
   4551 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4552 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4553 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4554 		if (ret)
   4555 			goto out;
   4556 		ret = hw->phy.ops.reset(hw);
   4557 		adapter->sfp_probe = FALSE;
   4558 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4559 			device_printf(dev,"Unsupported SFP+ module detected!");
   4560 			device_printf(dev,
   4561 			    "Reload driver with supported module.\n");
   4562                         goto out;
   4563 		} else
   4564 			device_printf(dev, "SFP+ module detected!\n");
   4565 		/* We now have supported optics */
   4566 		result = TRUE;
   4567 	}
   4568 out:
   4569 
   4570 	return (result);
   4571 } /* ixgbe_sfp_probe */
   4572 
   4573 /************************************************************************
   4574  * ixgbe_handle_mod - Tasklet for SFP module interrupts
   4575  ************************************************************************/
   4576 static void
   4577 ixgbe_handle_mod(void *context)
   4578 {
   4579 	struct adapter  *adapter = context;
   4580 	struct ixgbe_hw *hw = &adapter->hw;
   4581 	device_t	dev = adapter->dev;
   4582 	u32             err, cage_full = 0;
   4583 
   4584 	++adapter->mod_sicount.ev_count;
   4585 	if (adapter->hw.need_crosstalk_fix) {
   4586 		switch (hw->mac.type) {
   4587 		case ixgbe_mac_82599EB:
   4588 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4589 			    IXGBE_ESDP_SDP2;
   4590 			break;
   4591 		case ixgbe_mac_X550EM_x:
   4592 		case ixgbe_mac_X550EM_a:
   4593 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4594 			    IXGBE_ESDP_SDP0;
   4595 			break;
   4596 		default:
   4597 			break;
   4598 		}
   4599 
   4600 		if (!cage_full)
   4601 			return;
   4602 	}
   4603 
   4604 	err = hw->phy.ops.identify_sfp(hw);
   4605 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4606 		device_printf(dev,
   4607 		    "Unsupported SFP+ module type was detected.\n");
   4608 		return;
   4609 	}
   4610 
   4611 	if (hw->mac.type == ixgbe_mac_82598EB)
   4612 		err = hw->phy.ops.reset(hw);
   4613 	else
   4614 		err = hw->mac.ops.setup_sfp(hw);
   4615 
   4616 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4617 		device_printf(dev,
   4618 		    "Setup failure - unsupported SFP+ module type.\n");
   4619 		return;
   4620 	}
   4621 	softint_schedule(adapter->msf_si);
   4622 } /* ixgbe_handle_mod */
   4623 
   4624 
   4625 /************************************************************************
   4626  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
   4627  ************************************************************************/
   4628 static void
   4629 ixgbe_handle_msf(void *context)
   4630 {
   4631 	struct adapter  *adapter = context;
   4632 	struct ixgbe_hw *hw = &adapter->hw;
   4633 	u32             autoneg;
   4634 	bool            negotiate;
   4635 
   4636 	IXGBE_CORE_LOCK(adapter);
   4637 	++adapter->msf_sicount.ev_count;
   4638 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
   4639 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
   4640 
   4641 	autoneg = hw->phy.autoneg_advertised;
   4642 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   4643 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   4644 	else
   4645 		negotiate = 0;
   4646 	if (hw->mac.ops.setup_link)
   4647 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
   4648 
   4649 	/* Adjust media types shown in ifconfig */
   4650 	ifmedia_removeall(&adapter->media);
   4651 	ixgbe_add_media_types(adapter);
   4652 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   4653 	IXGBE_CORE_UNLOCK(adapter);
   4654 } /* ixgbe_handle_msf */
   4655 
   4656 /************************************************************************
   4657  * ixgbe_handle_phy - Tasklet for external PHY interrupts
   4658  ************************************************************************/
   4659 static void
   4660 ixgbe_handle_phy(void *context)
   4661 {
   4662 	struct adapter  *adapter = context;
   4663 	struct ixgbe_hw *hw = &adapter->hw;
   4664 	int error;
   4665 
   4666 	++adapter->phy_sicount.ev_count;
   4667 	error = hw->phy.ops.handle_lasi(hw);
   4668 	if (error == IXGBE_ERR_OVERTEMP)
   4669 		device_printf(adapter->dev,
   4670 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
   4671 		    " PHY will downshift to lower power state!\n");
   4672 	else if (error)
   4673 		device_printf(adapter->dev,
   4674 		    "Error handling LASI interrupt: %d\n", error);
   4675 } /* ixgbe_handle_phy */
   4676 
   4677 static void
   4678 ixgbe_ifstop(struct ifnet *ifp, int disable)
   4679 {
   4680 	struct adapter *adapter = ifp->if_softc;
   4681 
   4682 	IXGBE_CORE_LOCK(adapter);
   4683 	ixgbe_stop(adapter);
   4684 	IXGBE_CORE_UNLOCK(adapter);
   4685 }
   4686 
   4687 /************************************************************************
   4688  * ixgbe_stop - Stop the hardware
   4689  *
   4690  *   Disables all traffic on the adapter by issuing a
   4691  *   global reset on the MAC and deallocates TX/RX buffers.
   4692  ************************************************************************/
   4693 static void
   4694 ixgbe_stop(void *arg)
   4695 {
   4696 	struct ifnet    *ifp;
   4697 	struct adapter  *adapter = arg;
   4698 	struct ixgbe_hw *hw = &adapter->hw;
   4699 
   4700 	ifp = adapter->ifp;
   4701 
   4702 	KASSERT(mutex_owned(&adapter->core_mtx));
   4703 
   4704 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   4705 	ixgbe_disable_intr(adapter);
   4706 	callout_stop(&adapter->timer);
   4707 
   4708 	/* Let the stack know...*/
   4709 	ifp->if_flags &= ~IFF_RUNNING;
   4710 
   4711 	ixgbe_reset_hw(hw);
   4712 	hw->adapter_stopped = FALSE;
   4713 	ixgbe_stop_adapter(hw);
   4714 	if (hw->mac.type == ixgbe_mac_82599EB)
   4715 		ixgbe_stop_mac_link_on_d3_82599(hw);
   4716 	/* Turn off the laser - noop with no optics */
   4717 	ixgbe_disable_tx_laser(hw);
   4718 
   4719 	/* Update the stack */
   4720 	adapter->link_up = FALSE;
   4721 	ixgbe_update_link_status(adapter);
   4722 
   4723 	/* reprogram the RAR[0] in case user changed it. */
   4724 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   4725 
   4726 	return;
   4727 } /* ixgbe_stop */
   4728 
   4729 /************************************************************************
   4730  * ixgbe_update_link_status - Update OS on link state
   4731  *
   4732  * Note: Only updates the OS on the cached link state.
   4733  *       The real check of the hardware only happens with
   4734  *       a link interrupt.
   4735  ************************************************************************/
   4736 static void
   4737 ixgbe_update_link_status(struct adapter *adapter)
   4738 {
   4739 	struct ifnet	*ifp = adapter->ifp;
   4740 	device_t        dev = adapter->dev;
   4741 	struct ixgbe_hw *hw = &adapter->hw;
   4742 
   4743 	KASSERT(mutex_owned(&adapter->core_mtx));
   4744 
   4745 	if (adapter->link_up) {
   4746 		if (adapter->link_active == FALSE) {
   4747 			/*
   4748 			 * To eliminate influence of the previous state
   4749 			 * in the same way as ixgbe_init_locked().
   4750 			 */
   4751 			struct ix_queue	*que = adapter->queues;
   4752 			for (int i = 0; i < adapter->num_queues; i++, que++)
   4753 				que->eitr_setting = 0;
   4754 
   4755 			if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
   4756 				/*
   4757 				 *  Discard count for both MAC Local Fault and
   4758 				 * Remote Fault because those registers are
   4759 				 * valid only when the link speed is up and
   4760 				 * 10Gbps.
   4761 				 */
   4762 				IXGBE_READ_REG(hw, IXGBE_MLFC);
   4763 				IXGBE_READ_REG(hw, IXGBE_MRFC);
   4764 			}
   4765 
   4766 			if (bootverbose) {
   4767 				const char *bpsmsg;
   4768 
   4769 				switch (adapter->link_speed) {
   4770 				case IXGBE_LINK_SPEED_10GB_FULL:
   4771 					bpsmsg = "10 Gbps";
   4772 					break;
   4773 				case IXGBE_LINK_SPEED_5GB_FULL:
   4774 					bpsmsg = "5 Gbps";
   4775 					break;
   4776 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   4777 					bpsmsg = "2.5 Gbps";
   4778 					break;
   4779 				case IXGBE_LINK_SPEED_1GB_FULL:
   4780 					bpsmsg = "1 Gbps";
   4781 					break;
   4782 				case IXGBE_LINK_SPEED_100_FULL:
   4783 					bpsmsg = "100 Mbps";
   4784 					break;
   4785 				case IXGBE_LINK_SPEED_10_FULL:
   4786 					bpsmsg = "10 Mbps";
   4787 					break;
   4788 				default:
   4789 					bpsmsg = "unknown speed";
   4790 					break;
   4791 				}
   4792 				device_printf(dev, "Link is up %s %s \n",
   4793 				    bpsmsg, "Full Duplex");
   4794 			}
   4795 			adapter->link_active = TRUE;
   4796 			/* Update any Flow Control changes */
   4797 			ixgbe_fc_enable(&adapter->hw);
   4798 			/* Update DMA coalescing config */
   4799 			ixgbe_config_dmac(adapter);
   4800 			if_link_state_change(ifp, LINK_STATE_UP);
   4801 
   4802 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4803 				ixgbe_ping_all_vfs(adapter);
   4804 		}
   4805 	} else { /* Link down */
   4806 		if (adapter->link_active == TRUE) {
   4807 			if (bootverbose)
   4808 				device_printf(dev, "Link is Down\n");
   4809 			if_link_state_change(ifp, LINK_STATE_DOWN);
   4810 			adapter->link_active = FALSE;
   4811 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4812 				ixgbe_ping_all_vfs(adapter);
   4813 			ixgbe_drain_all(adapter);
   4814 		}
   4815 	}
   4816 } /* ixgbe_update_link_status */
   4817 
   4818 /************************************************************************
   4819  * ixgbe_config_dmac - Configure DMA Coalescing
   4820  ************************************************************************/
   4821 static void
   4822 ixgbe_config_dmac(struct adapter *adapter)
   4823 {
   4824 	struct ixgbe_hw *hw = &adapter->hw;
   4825 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
   4826 
   4827 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
   4828 		return;
   4829 
   4830 	if (dcfg->watchdog_timer ^ adapter->dmac ||
   4831 	    dcfg->link_speed ^ adapter->link_speed) {
   4832 		dcfg->watchdog_timer = adapter->dmac;
   4833 		dcfg->fcoe_en = false;
   4834 		dcfg->link_speed = adapter->link_speed;
   4835 		dcfg->num_tcs = 1;
   4836 
   4837 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
   4838 		    dcfg->watchdog_timer, dcfg->link_speed);
   4839 
   4840 		hw->mac.ops.dmac_config(hw);
   4841 	}
   4842 } /* ixgbe_config_dmac */
   4843 
   4844 /************************************************************************
   4845  * ixgbe_enable_intr
   4846  ************************************************************************/
   4847 static void
   4848 ixgbe_enable_intr(struct adapter *adapter)
   4849 {
   4850 	struct ixgbe_hw	*hw = &adapter->hw;
   4851 	struct ix_queue	*que = adapter->queues;
   4852 	u32		mask, fwsm;
   4853 
   4854 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4855 
   4856 	switch (adapter->hw.mac.type) {
   4857 	case ixgbe_mac_82599EB:
   4858 		mask |= IXGBE_EIMS_ECC;
   4859 		/* Temperature sensor on some adapters */
   4860 		mask |= IXGBE_EIMS_GPI_SDP0;
   4861 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
   4862 		mask |= IXGBE_EIMS_GPI_SDP1;
   4863 		mask |= IXGBE_EIMS_GPI_SDP2;
   4864 		break;
   4865 	case ixgbe_mac_X540:
   4866 		/* Detect if Thermal Sensor is enabled */
   4867 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
   4868 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
   4869 			mask |= IXGBE_EIMS_TS;
   4870 		mask |= IXGBE_EIMS_ECC;
   4871 		break;
   4872 	case ixgbe_mac_X550:
   4873 		/* MAC thermal sensor is automatically enabled */
   4874 		mask |= IXGBE_EIMS_TS;
   4875 		mask |= IXGBE_EIMS_ECC;
   4876 		break;
   4877 	case ixgbe_mac_X550EM_x:
   4878 	case ixgbe_mac_X550EM_a:
   4879 		/* Some devices use SDP0 for important information */
   4880 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
   4881 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
   4882 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
   4883 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
   4884 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
   4885 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
   4886 			mask |= IXGBE_EICR_GPI_SDP0_X540;
   4887 		mask |= IXGBE_EIMS_ECC;
   4888 		break;
   4889 	default:
   4890 		break;
   4891 	}
   4892 
   4893 	/* Enable Fan Failure detection */
   4894 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4895 		mask |= IXGBE_EIMS_GPI_SDP1;
   4896 	/* Enable SR-IOV */
   4897 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4898 		mask |= IXGBE_EIMS_MAILBOX;
   4899 	/* Enable Flow Director */
   4900 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   4901 		mask |= IXGBE_EIMS_FLOW_DIR;
   4902 
   4903 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4904 
   4905 	/* With MSI-X we use auto clear */
   4906 	if (adapter->msix_mem) {
   4907 		mask = IXGBE_EIMS_ENABLE_MASK;
   4908 		/* Don't autoclear Link */
   4909 		mask &= ~IXGBE_EIMS_OTHER;
   4910 		mask &= ~IXGBE_EIMS_LSC;
   4911 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   4912 			mask &= ~IXGBE_EIMS_MAILBOX;
   4913 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4914 	}
   4915 
   4916 	/*
   4917 	 * Now enable all queues, this is done separately to
   4918 	 * allow for handling the extended (beyond 32) MSI-X
   4919 	 * vectors that can be used by 82599
   4920 	 */
   4921         for (int i = 0; i < adapter->num_queues; i++, que++)
   4922                 ixgbe_enable_queue(adapter, que->msix);
   4923 
   4924 	IXGBE_WRITE_FLUSH(hw);
   4925 
   4926 } /* ixgbe_enable_intr */
   4927 
   4928 /************************************************************************
   4929  * ixgbe_disable_intr_internal
   4930  ************************************************************************/
   4931 static void
   4932 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
   4933 {
   4934 	struct ix_queue	*que = adapter->queues;
   4935 
   4936 	/* disable interrupts other than queues */
   4937 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
   4938 
   4939 	if (adapter->msix_mem)
   4940 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4941 
   4942 	for (int i = 0; i < adapter->num_queues; i++, que++)
   4943 		ixgbe_disable_queue_internal(adapter, que->msix, nestok);
   4944 
   4945 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4946 
   4947 } /* ixgbe_do_disable_intr_internal */
   4948 
   4949 /************************************************************************
   4950  * ixgbe_disable_intr
   4951  ************************************************************************/
   4952 static void
   4953 ixgbe_disable_intr(struct adapter *adapter)
   4954 {
   4955 
   4956 	ixgbe_disable_intr_internal(adapter, true);
   4957 } /* ixgbe_disable_intr */
   4958 
   4959 /************************************************************************
   4960  * ixgbe_ensure_disabled_intr
   4961  ************************************************************************/
   4962 void
   4963 ixgbe_ensure_disabled_intr(struct adapter *adapter)
   4964 {
   4965 
   4966 	ixgbe_disable_intr_internal(adapter, false);
   4967 } /* ixgbe_ensure_disabled_intr */
   4968 
   4969 /************************************************************************
   4970  * ixgbe_legacy_irq - Legacy Interrupt Service routine
   4971  ************************************************************************/
   4972 static int
   4973 ixgbe_legacy_irq(void *arg)
   4974 {
   4975 	struct ix_queue *que = arg;
   4976 	struct adapter	*adapter = que->adapter;
   4977 	struct ixgbe_hw	*hw = &adapter->hw;
   4978 	struct ifnet    *ifp = adapter->ifp;
   4979 	struct 		tx_ring *txr = adapter->tx_rings;
   4980 	bool		more = false;
   4981 	u32             eicr, eicr_mask;
   4982 
   4983 	/* Silicon errata #26 on 82598 */
   4984 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
   4985 
   4986 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   4987 
   4988 	adapter->stats.pf.legint.ev_count++;
   4989 	++que->irqs.ev_count;
   4990 	if (eicr == 0) {
   4991 		adapter->stats.pf.intzero.ev_count++;
   4992 		if ((ifp->if_flags & IFF_UP) != 0)
   4993 			ixgbe_enable_intr(adapter);
   4994 		return 0;
   4995 	}
   4996 
   4997 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   4998 		/*
   4999 		 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
   5000 		 */
   5001 		que->txrx_use_workqueue = adapter->txrx_use_workqueue;
   5002 
   5003 #ifdef __NetBSD__
   5004 		/* Don't run ixgbe_rxeof in interrupt context */
   5005 		more = true;
   5006 #else
   5007 		more = ixgbe_rxeof(que);
   5008 #endif
   5009 
   5010 		IXGBE_TX_LOCK(txr);
   5011 		ixgbe_txeof(txr);
   5012 #ifdef notyet
   5013 		if (!ixgbe_ring_empty(ifp, txr->br))
   5014 			ixgbe_start_locked(ifp, txr);
   5015 #endif
   5016 		IXGBE_TX_UNLOCK(txr);
   5017 	}
   5018 
   5019 	/* Check for fan failure */
   5020 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   5021 		ixgbe_check_fan_failure(adapter, eicr, true);
   5022 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   5023 	}
   5024 
   5025 	/* Link status change */
   5026 	if (eicr & IXGBE_EICR_LSC)
   5027 		softint_schedule(adapter->link_si);
   5028 
   5029 	if (ixgbe_is_sfp(hw)) {
   5030 		/* Pluggable optics-related interrupt */
   5031 		if (hw->mac.type >= ixgbe_mac_X540)
   5032 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   5033 		else
   5034 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   5035 
   5036 		if (eicr & eicr_mask) {
   5037 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   5038 			softint_schedule(adapter->mod_si);
   5039 		}
   5040 
   5041 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   5042 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   5043 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   5044 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   5045 			softint_schedule(adapter->msf_si);
   5046 		}
   5047 	}
   5048 
   5049 	/* External PHY interrupt */
   5050 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   5051 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
   5052 		softint_schedule(adapter->phy_si);
   5053 
   5054 	if (more) {
   5055 		que->req.ev_count++;
   5056 		ixgbe_sched_handle_que(adapter, que);
   5057 	} else
   5058 		ixgbe_enable_intr(adapter);
   5059 
   5060 	return 1;
   5061 } /* ixgbe_legacy_irq */
   5062 
   5063 /************************************************************************
   5064  * ixgbe_free_pciintr_resources
   5065  ************************************************************************/
   5066 static void
   5067 ixgbe_free_pciintr_resources(struct adapter *adapter)
   5068 {
   5069 	struct ix_queue *que = adapter->queues;
   5070 	int		rid;
   5071 
   5072 	/*
   5073 	 * Release all msix queue resources:
   5074 	 */
   5075 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   5076 		if (que->res != NULL) {
   5077 			pci_intr_disestablish(adapter->osdep.pc,
   5078 			    adapter->osdep.ihs[i]);
   5079 			adapter->osdep.ihs[i] = NULL;
   5080 		}
   5081 	}
   5082 
   5083 	/* Clean the Legacy or Link interrupt last */
   5084 	if (adapter->vector) /* we are doing MSIX */
   5085 		rid = adapter->vector;
   5086 	else
   5087 		rid = 0;
   5088 
   5089 	if (adapter->osdep.ihs[rid] != NULL) {
   5090 		pci_intr_disestablish(adapter->osdep.pc,
   5091 		    adapter->osdep.ihs[rid]);
   5092 		adapter->osdep.ihs[rid] = NULL;
   5093 	}
   5094 
   5095 	if (adapter->osdep.intrs != NULL) {
   5096 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   5097 		    adapter->osdep.nintrs);
   5098 		adapter->osdep.intrs = NULL;
   5099 	}
   5100 } /* ixgbe_free_pciintr_resources */
   5101 
   5102 /************************************************************************
   5103  * ixgbe_free_pci_resources
   5104  ************************************************************************/
   5105 static void
   5106 ixgbe_free_pci_resources(struct adapter *adapter)
   5107 {
   5108 
   5109 	ixgbe_free_pciintr_resources(adapter);
   5110 
   5111 	if (adapter->osdep.mem_size != 0) {
   5112 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   5113 		    adapter->osdep.mem_bus_space_handle,
   5114 		    adapter->osdep.mem_size);
   5115 	}
   5116 
   5117 } /* ixgbe_free_pci_resources */
   5118 
   5119 /************************************************************************
   5120  * ixgbe_set_sysctl_value
   5121  ************************************************************************/
   5122 static void
   5123 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
   5124     const char *description, int *limit, int value)
   5125 {
   5126 	device_t dev =  adapter->dev;
   5127 	struct sysctllog **log;
   5128 	const struct sysctlnode *rnode, *cnode;
   5129 
   5130 	/*
   5131 	 * It's not required to check recovery mode because this function never
   5132 	 * touches hardware.
   5133 	 */
   5134 
   5135 	log = &adapter->sysctllog;
   5136 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   5137 		aprint_error_dev(dev, "could not create sysctl root\n");
   5138 		return;
   5139 	}
   5140 	if (sysctl_createv(log, 0, &rnode, &cnode,
   5141 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   5142 	    name, SYSCTL_DESCR(description),
   5143 		NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   5144 		aprint_error_dev(dev, "could not create sysctl\n");
   5145 	*limit = value;
   5146 } /* ixgbe_set_sysctl_value */
   5147 
   5148 /************************************************************************
   5149  * ixgbe_sysctl_flowcntl
   5150  *
   5151  *   SYSCTL wrapper around setting Flow Control
   5152  ************************************************************************/
   5153 static int
   5154 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
   5155 {
   5156 	struct sysctlnode node = *rnode;
   5157 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5158 	int error, fc;
   5159 
   5160 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5161 		return (EPERM);
   5162 
   5163 	fc = adapter->hw.fc.current_mode;
   5164 	node.sysctl_data = &fc;
   5165 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5166 	if (error != 0 || newp == NULL)
   5167 		return error;
   5168 
   5169 	/* Don't bother if it's not changed */
   5170 	if (fc == adapter->hw.fc.current_mode)
   5171 		return (0);
   5172 
   5173 	return ixgbe_set_flowcntl(adapter, fc);
   5174 } /* ixgbe_sysctl_flowcntl */
   5175 
   5176 /************************************************************************
   5177  * ixgbe_set_flowcntl - Set flow control
   5178  *
   5179  *   Flow control values:
   5180  *     0 - off
   5181  *     1 - rx pause
   5182  *     2 - tx pause
   5183  *     3 - full
   5184  ************************************************************************/
   5185 static int
   5186 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
   5187 {
   5188 	switch (fc) {
   5189 		case ixgbe_fc_rx_pause:
   5190 		case ixgbe_fc_tx_pause:
   5191 		case ixgbe_fc_full:
   5192 			adapter->hw.fc.requested_mode = fc;
   5193 			if (adapter->num_queues > 1)
   5194 				ixgbe_disable_rx_drop(adapter);
   5195 			break;
   5196 		case ixgbe_fc_none:
   5197 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   5198 			if (adapter->num_queues > 1)
   5199 				ixgbe_enable_rx_drop(adapter);
   5200 			break;
   5201 		default:
   5202 			return (EINVAL);
   5203 	}
   5204 
   5205 #if 0 /* XXX NetBSD */
   5206 	/* Don't autoneg if forcing a value */
   5207 	adapter->hw.fc.disable_fc_autoneg = TRUE;
   5208 #endif
   5209 	ixgbe_fc_enable(&adapter->hw);
   5210 
   5211 	return (0);
   5212 } /* ixgbe_set_flowcntl */
   5213 
   5214 /************************************************************************
   5215  * ixgbe_enable_rx_drop
   5216  *
   5217  *   Enable the hardware to drop packets when the buffer is
   5218  *   full. This is useful with multiqueue, so that no single
   5219  *   queue being full stalls the entire RX engine. We only
   5220  *   enable this when Multiqueue is enabled AND Flow Control
   5221  *   is disabled.
   5222  ************************************************************************/
   5223 static void
   5224 ixgbe_enable_rx_drop(struct adapter *adapter)
   5225 {
   5226 	struct ixgbe_hw *hw = &adapter->hw;
   5227 	struct rx_ring  *rxr;
   5228 	u32             srrctl;
   5229 
   5230 	for (int i = 0; i < adapter->num_queues; i++) {
   5231 		rxr = &adapter->rx_rings[i];
   5232 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   5233 		srrctl |= IXGBE_SRRCTL_DROP_EN;
   5234 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   5235 	}
   5236 
   5237 	/* enable drop for each vf */
   5238 	for (int i = 0; i < adapter->num_vfs; i++) {
   5239 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   5240 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
   5241 		    IXGBE_QDE_ENABLE));
   5242 	}
   5243 } /* ixgbe_enable_rx_drop */
   5244 
   5245 /************************************************************************
   5246  * ixgbe_disable_rx_drop
   5247  ************************************************************************/
   5248 static void
   5249 ixgbe_disable_rx_drop(struct adapter *adapter)
   5250 {
   5251 	struct ixgbe_hw *hw = &adapter->hw;
   5252 	struct rx_ring  *rxr;
   5253 	u32             srrctl;
   5254 
   5255 	for (int i = 0; i < adapter->num_queues; i++) {
   5256 		rxr = &adapter->rx_rings[i];
   5257         	srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   5258         	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
   5259         	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   5260 	}
   5261 
   5262 	/* disable drop for each vf */
   5263 	for (int i = 0; i < adapter->num_vfs; i++) {
   5264 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   5265 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
   5266 	}
   5267 } /* ixgbe_disable_rx_drop */
   5268 
   5269 /************************************************************************
   5270  * ixgbe_sysctl_advertise
   5271  *
   5272  *   SYSCTL wrapper around setting advertised speed
   5273  ************************************************************************/
   5274 static int
   5275 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
   5276 {
   5277 	struct sysctlnode node = *rnode;
   5278 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5279 	int            error = 0, advertise;
   5280 
   5281 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5282 		return (EPERM);
   5283 
   5284 	advertise = adapter->advertise;
   5285 	node.sysctl_data = &advertise;
   5286 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5287 	if (error != 0 || newp == NULL)
   5288 		return error;
   5289 
   5290 	return ixgbe_set_advertise(adapter, advertise);
   5291 } /* ixgbe_sysctl_advertise */
   5292 
   5293 /************************************************************************
   5294  * ixgbe_set_advertise - Control advertised link speed
   5295  *
   5296  *   Flags:
   5297  *     0x00 - Default (all capable link speed)
   5298  *     0x01 - advertise 100 Mb
   5299  *     0x02 - advertise 1G
   5300  *     0x04 - advertise 10G
   5301  *     0x08 - advertise 10 Mb
   5302  *     0x10 - advertise 2.5G
   5303  *     0x20 - advertise 5G
   5304  ************************************************************************/
   5305 static int
   5306 ixgbe_set_advertise(struct adapter *adapter, int advertise)
   5307 {
   5308 	device_t         dev;
   5309 	struct ixgbe_hw  *hw;
   5310 	ixgbe_link_speed speed = 0;
   5311 	ixgbe_link_speed link_caps = 0;
   5312 	s32              err = IXGBE_NOT_IMPLEMENTED;
   5313 	bool             negotiate = FALSE;
   5314 
   5315 	/* Checks to validate new value */
   5316 	if (adapter->advertise == advertise) /* no change */
   5317 		return (0);
   5318 
   5319 	dev = adapter->dev;
   5320 	hw = &adapter->hw;
   5321 
   5322 	/* No speed changes for backplane media */
   5323 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   5324 		return (ENODEV);
   5325 
   5326 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   5327 	    (hw->phy.multispeed_fiber))) {
   5328 		device_printf(dev,
   5329 		    "Advertised speed can only be set on copper or "
   5330 		    "multispeed fiber media types.\n");
   5331 		return (EINVAL);
   5332 	}
   5333 
   5334 	if (advertise < 0x0 || advertise > 0x2f) {
   5335 		device_printf(dev,
   5336 		    "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
   5337 		return (EINVAL);
   5338 	}
   5339 
   5340 	if (hw->mac.ops.get_link_capabilities) {
   5341 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   5342 		    &negotiate);
   5343 		if (err != IXGBE_SUCCESS) {
   5344 			device_printf(dev, "Unable to determine supported advertise speeds\n");
   5345 			return (ENODEV);
   5346 		}
   5347 	}
   5348 
   5349 	/* Set new value and report new advertised mode */
   5350 	if (advertise & 0x1) {
   5351 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
   5352 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
   5353 			return (EINVAL);
   5354 		}
   5355 		speed |= IXGBE_LINK_SPEED_100_FULL;
   5356 	}
   5357 	if (advertise & 0x2) {
   5358 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
   5359 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
   5360 			return (EINVAL);
   5361 		}
   5362 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   5363 	}
   5364 	if (advertise & 0x4) {
   5365 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
   5366 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
   5367 			return (EINVAL);
   5368 		}
   5369 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   5370 	}
   5371 	if (advertise & 0x8) {
   5372 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
   5373 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
   5374 			return (EINVAL);
   5375 		}
   5376 		speed |= IXGBE_LINK_SPEED_10_FULL;
   5377 	}
   5378 	if (advertise & 0x10) {
   5379 		if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
   5380 			device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
   5381 			return (EINVAL);
   5382 		}
   5383 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   5384 	}
   5385 	if (advertise & 0x20) {
   5386 		if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
   5387 			device_printf(dev, "Interface does not support 5Gb advertised speed\n");
   5388 			return (EINVAL);
   5389 		}
   5390 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   5391 	}
   5392 	if (advertise == 0)
   5393 		speed = link_caps; /* All capable link speed */
   5394 
   5395 	hw->mac.autotry_restart = TRUE;
   5396 	hw->mac.ops.setup_link(hw, speed, TRUE);
   5397 	adapter->advertise = advertise;
   5398 
   5399 	return (0);
   5400 } /* ixgbe_set_advertise */
   5401 
   5402 /************************************************************************
   5403  * ixgbe_get_advertise - Get current advertised speed settings
   5404  *
   5405  *   Formatted for sysctl usage.
   5406  *   Flags:
   5407  *     0x01 - advertise 100 Mb
   5408  *     0x02 - advertise 1G
   5409  *     0x04 - advertise 10G
   5410  *     0x08 - advertise 10 Mb (yes, Mb)
   5411  *     0x10 - advertise 2.5G
   5412  *     0x20 - advertise 5G
   5413  ************************************************************************/
   5414 static int
   5415 ixgbe_get_advertise(struct adapter *adapter)
   5416 {
   5417 	struct ixgbe_hw  *hw = &adapter->hw;
   5418 	int              speed;
   5419 	ixgbe_link_speed link_caps = 0;
   5420 	s32              err;
   5421 	bool             negotiate = FALSE;
   5422 
   5423 	/*
   5424 	 * Advertised speed means nothing unless it's copper or
   5425 	 * multi-speed fiber
   5426 	 */
   5427 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
   5428 	    !(hw->phy.multispeed_fiber))
   5429 		return (0);
   5430 
   5431 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
   5432 	if (err != IXGBE_SUCCESS)
   5433 		return (0);
   5434 
   5435 	speed =
   5436 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x04 : 0) |
   5437 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x02 : 0) |
   5438 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)   ? 0x01 : 0) |
   5439 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)    ? 0x08 : 0) |
   5440 	    ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
   5441 	    ((link_caps & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0);
   5442 
   5443 	return speed;
   5444 } /* ixgbe_get_advertise */
   5445 
   5446 /************************************************************************
   5447  * ixgbe_sysctl_dmac - Manage DMA Coalescing
   5448  *
   5449  *   Control values:
   5450  *     0/1 - off / on (use default value of 1000)
   5451  *
   5452  *     Legal timer values are:
   5453  *     50,100,250,500,1000,2000,5000,10000
   5454  *
   5455  *     Turning off interrupt moderation will also turn this off.
   5456  ************************************************************************/
   5457 static int
   5458 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
   5459 {
   5460 	struct sysctlnode node = *rnode;
   5461 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5462 	struct ifnet   *ifp = adapter->ifp;
   5463 	int            error;
   5464 	int            newval;
   5465 
   5466 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5467 		return (EPERM);
   5468 
   5469 	newval = adapter->dmac;
   5470 	node.sysctl_data = &newval;
   5471 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5472 	if ((error) || (newp == NULL))
   5473 		return (error);
   5474 
   5475 	switch (newval) {
   5476 	case 0:
   5477 		/* Disabled */
   5478 		adapter->dmac = 0;
   5479 		break;
   5480 	case 1:
   5481 		/* Enable and use default */
   5482 		adapter->dmac = 1000;
   5483 		break;
   5484 	case 50:
   5485 	case 100:
   5486 	case 250:
   5487 	case 500:
   5488 	case 1000:
   5489 	case 2000:
   5490 	case 5000:
   5491 	case 10000:
   5492 		/* Legal values - allow */
   5493 		adapter->dmac = newval;
   5494 		break;
   5495 	default:
   5496 		/* Do nothing, illegal value */
   5497 		return (EINVAL);
   5498 	}
   5499 
   5500 	/* Re-initialize hardware if it's already running */
   5501 	if (ifp->if_flags & IFF_RUNNING)
   5502 		ifp->if_init(ifp);
   5503 
   5504 	return (0);
   5505 }
   5506 
   5507 #ifdef IXGBE_DEBUG
   5508 /************************************************************************
   5509  * ixgbe_sysctl_power_state
   5510  *
   5511  *   Sysctl to test power states
   5512  *   Values:
   5513  *     0      - set device to D0
   5514  *     3      - set device to D3
   5515  *     (none) - get current device power state
   5516  ************************************************************************/
   5517 static int
   5518 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
   5519 {
   5520 #ifdef notyet
   5521 	struct sysctlnode node = *rnode;
   5522 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5523 	device_t       dev =  adapter->dev;
   5524 	int            curr_ps, new_ps, error = 0;
   5525 
   5526 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5527 		return (EPERM);
   5528 
   5529 	curr_ps = new_ps = pci_get_powerstate(dev);
   5530 
   5531 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5532 	if ((error) || (req->newp == NULL))
   5533 		return (error);
   5534 
   5535 	if (new_ps == curr_ps)
   5536 		return (0);
   5537 
   5538 	if (new_ps == 3 && curr_ps == 0)
   5539 		error = DEVICE_SUSPEND(dev);
   5540 	else if (new_ps == 0 && curr_ps == 3)
   5541 		error = DEVICE_RESUME(dev);
   5542 	else
   5543 		return (EINVAL);
   5544 
   5545 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
   5546 
   5547 	return (error);
   5548 #else
   5549 	return 0;
   5550 #endif
   5551 } /* ixgbe_sysctl_power_state */
   5552 #endif
   5553 
   5554 /************************************************************************
   5555  * ixgbe_sysctl_wol_enable
   5556  *
   5557  *   Sysctl to enable/disable the WoL capability,
   5558  *   if supported by the adapter.
   5559  *
   5560  *   Values:
   5561  *     0 - disabled
   5562  *     1 - enabled
   5563  ************************************************************************/
   5564 static int
   5565 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
   5566 {
   5567 	struct sysctlnode node = *rnode;
   5568 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5569 	struct ixgbe_hw *hw = &adapter->hw;
   5570 	bool            new_wol_enabled;
   5571 	int             error = 0;
   5572 
   5573 	/*
   5574 	 * It's not required to check recovery mode because this function never
   5575 	 * touches hardware.
   5576 	 */
   5577 	new_wol_enabled = hw->wol_enabled;
   5578 	node.sysctl_data = &new_wol_enabled;
   5579 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5580 	if ((error) || (newp == NULL))
   5581 		return (error);
   5582 	if (new_wol_enabled == hw->wol_enabled)
   5583 		return (0);
   5584 
   5585 	if (new_wol_enabled && !adapter->wol_support)
   5586 		return (ENODEV);
   5587 	else
   5588 		hw->wol_enabled = new_wol_enabled;
   5589 
   5590 	return (0);
   5591 } /* ixgbe_sysctl_wol_enable */
   5592 
   5593 /************************************************************************
   5594  * ixgbe_sysctl_wufc - Wake Up Filter Control
   5595  *
   5596  *   Sysctl to enable/disable the types of packets that the
   5597  *   adapter will wake up on upon receipt.
   5598  *   Flags:
   5599  *     0x1  - Link Status Change
   5600  *     0x2  - Magic Packet
   5601  *     0x4  - Direct Exact
   5602  *     0x8  - Directed Multicast
   5603  *     0x10 - Broadcast
   5604  *     0x20 - ARP/IPv4 Request Packet
   5605  *     0x40 - Direct IPv4 Packet
   5606  *     0x80 - Direct IPv6 Packet
   5607  *
   5608  *   Settings not listed above will cause the sysctl to return an error.
   5609  ************************************************************************/
   5610 static int
   5611 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
   5612 {
   5613 	struct sysctlnode node = *rnode;
   5614 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5615 	int error = 0;
   5616 	u32 new_wufc;
   5617 
   5618 	/*
   5619 	 * It's not required to check recovery mode because this function never
   5620 	 * touches hardware.
   5621 	 */
   5622 	new_wufc = adapter->wufc;
   5623 	node.sysctl_data = &new_wufc;
   5624 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5625 	if ((error) || (newp == NULL))
   5626 		return (error);
   5627 	if (new_wufc == adapter->wufc)
   5628 		return (0);
   5629 
   5630 	if (new_wufc & 0xffffff00)
   5631 		return (EINVAL);
   5632 
   5633 	new_wufc &= 0xff;
   5634 	new_wufc |= (0xffffff & adapter->wufc);
   5635 	adapter->wufc = new_wufc;
   5636 
   5637 	return (0);
   5638 } /* ixgbe_sysctl_wufc */
   5639 
   5640 #ifdef IXGBE_DEBUG
   5641 /************************************************************************
   5642  * ixgbe_sysctl_print_rss_config
   5643  ************************************************************************/
   5644 static int
   5645 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
   5646 {
   5647 #ifdef notyet
   5648 	struct sysctlnode node = *rnode;
   5649 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5650 	struct ixgbe_hw *hw = &adapter->hw;
   5651 	device_t        dev = adapter->dev;
   5652 	struct sbuf     *buf;
   5653 	int             error = 0, reta_size;
   5654 	u32             reg;
   5655 
   5656 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5657 		return (EPERM);
   5658 
   5659 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
   5660 	if (!buf) {
   5661 		device_printf(dev, "Could not allocate sbuf for output.\n");
   5662 		return (ENOMEM);
   5663 	}
   5664 
   5665 	// TODO: use sbufs to make a string to print out
   5666 	/* Set multiplier for RETA setup and table size based on MAC */
   5667 	switch (adapter->hw.mac.type) {
   5668 	case ixgbe_mac_X550:
   5669 	case ixgbe_mac_X550EM_x:
   5670 	case ixgbe_mac_X550EM_a:
   5671 		reta_size = 128;
   5672 		break;
   5673 	default:
   5674 		reta_size = 32;
   5675 		break;
   5676 	}
   5677 
   5678 	/* Print out the redirection table */
   5679 	sbuf_cat(buf, "\n");
   5680 	for (int i = 0; i < reta_size; i++) {
   5681 		if (i < 32) {
   5682 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
   5683 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
   5684 		} else {
   5685 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
   5686 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
   5687 		}
   5688 	}
   5689 
   5690 	// TODO: print more config
   5691 
   5692 	error = sbuf_finish(buf);
   5693 	if (error)
   5694 		device_printf(dev, "Error finishing sbuf: %d\n", error);
   5695 
   5696 	sbuf_delete(buf);
   5697 #endif
   5698 	return (0);
   5699 } /* ixgbe_sysctl_print_rss_config */
   5700 #endif /* IXGBE_DEBUG */
   5701 
   5702 /************************************************************************
   5703  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
   5704  *
   5705  *   For X552/X557-AT devices using an external PHY
   5706  ************************************************************************/
   5707 static int
   5708 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
   5709 {
   5710 	struct sysctlnode node = *rnode;
   5711 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5712 	struct ixgbe_hw *hw = &adapter->hw;
   5713 	int val;
   5714 	u16 reg;
   5715 	int		error;
   5716 
   5717 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5718 		return (EPERM);
   5719 
   5720 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5721 		device_printf(adapter->dev,
   5722 		    "Device has no supported external thermal sensor.\n");
   5723 		return (ENODEV);
   5724 	}
   5725 
   5726 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
   5727 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5728 		device_printf(adapter->dev,
   5729 		    "Error reading from PHY's current temperature register\n");
   5730 		return (EAGAIN);
   5731 	}
   5732 
   5733 	node.sysctl_data = &val;
   5734 
   5735 	/* Shift temp for output */
   5736 	val = reg >> 8;
   5737 
   5738 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5739 	if ((error) || (newp == NULL))
   5740 		return (error);
   5741 
   5742 	return (0);
   5743 } /* ixgbe_sysctl_phy_temp */
   5744 
   5745 /************************************************************************
   5746  * ixgbe_sysctl_phy_overtemp_occurred
   5747  *
   5748  *   Reports (directly from the PHY) whether the current PHY
   5749  *   temperature is over the overtemp threshold.
   5750  ************************************************************************/
   5751 static int
   5752 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
   5753 {
   5754 	struct sysctlnode node = *rnode;
   5755 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5756 	struct ixgbe_hw *hw = &adapter->hw;
   5757 	int val, error;
   5758 	u16 reg;
   5759 
   5760 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5761 		return (EPERM);
   5762 
   5763 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5764 		device_printf(adapter->dev,
   5765 		    "Device has no supported external thermal sensor.\n");
   5766 		return (ENODEV);
   5767 	}
   5768 
   5769 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
   5770 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5771 		device_printf(adapter->dev,
   5772 		    "Error reading from PHY's temperature status register\n");
   5773 		return (EAGAIN);
   5774 	}
   5775 
   5776 	node.sysctl_data = &val;
   5777 
   5778 	/* Get occurrence bit */
   5779 	val = !!(reg & 0x4000);
   5780 
   5781 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5782 	if ((error) || (newp == NULL))
   5783 		return (error);
   5784 
   5785 	return (0);
   5786 } /* ixgbe_sysctl_phy_overtemp_occurred */
   5787 
   5788 /************************************************************************
   5789  * ixgbe_sysctl_eee_state
   5790  *
   5791  *   Sysctl to set EEE power saving feature
   5792  *   Values:
   5793  *     0      - disable EEE
   5794  *     1      - enable EEE
   5795  *     (none) - get current device EEE state
   5796  ************************************************************************/
   5797 static int
   5798 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
   5799 {
   5800 	struct sysctlnode node = *rnode;
   5801 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5802 	struct ifnet   *ifp = adapter->ifp;
   5803 	device_t       dev = adapter->dev;
   5804 	int            curr_eee, new_eee, error = 0;
   5805 	s32            retval;
   5806 
   5807 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5808 		return (EPERM);
   5809 
   5810 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
   5811 	node.sysctl_data = &new_eee;
   5812 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5813 	if ((error) || (newp == NULL))
   5814 		return (error);
   5815 
   5816 	/* Nothing to do */
   5817 	if (new_eee == curr_eee)
   5818 		return (0);
   5819 
   5820 	/* Not supported */
   5821 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
   5822 		return (EINVAL);
   5823 
   5824 	/* Bounds checking */
   5825 	if ((new_eee < 0) || (new_eee > 1))
   5826 		return (EINVAL);
   5827 
   5828 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
   5829 	if (retval) {
   5830 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
   5831 		return (EINVAL);
   5832 	}
   5833 
   5834 	/* Restart auto-neg */
   5835 	ifp->if_init(ifp);
   5836 
   5837 	device_printf(dev, "New EEE state: %d\n", new_eee);
   5838 
   5839 	/* Cache new value */
   5840 	if (new_eee)
   5841 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5842 	else
   5843 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
   5844 
   5845 	return (error);
   5846 } /* ixgbe_sysctl_eee_state */
   5847 
   5848 #define PRINTQS(adapter, regname)					\
   5849 	do {								\
   5850 		struct ixgbe_hw	*_hw = &(adapter)->hw;			\
   5851 		int _i;							\
   5852 									\
   5853 		printf("%s: %s", device_xname((adapter)->dev), #regname); \
   5854 		for (_i = 0; _i < (adapter)->num_queues; _i++) {	\
   5855 			printf((_i == 0) ? "\t" : " ");			\
   5856 			printf("%08x", IXGBE_READ_REG(_hw,		\
   5857 				IXGBE_##regname(_i)));			\
   5858 		}							\
   5859 		printf("\n");						\
   5860 	} while (0)
   5861 
   5862 /************************************************************************
   5863  * ixgbe_print_debug_info
   5864  *
   5865  *   Called only when em_display_debug_stats is enabled.
   5866  *   Provides a way to take a look at important statistics
   5867  *   maintained by the driver and hardware.
   5868  ************************************************************************/
   5869 static void
   5870 ixgbe_print_debug_info(struct adapter *adapter)
   5871 {
   5872         device_t        dev = adapter->dev;
   5873         struct ixgbe_hw *hw = &adapter->hw;
   5874 	int table_size;
   5875 	int i;
   5876 
   5877 	switch (adapter->hw.mac.type) {
   5878 	case ixgbe_mac_X550:
   5879 	case ixgbe_mac_X550EM_x:
   5880 	case ixgbe_mac_X550EM_a:
   5881 		table_size = 128;
   5882 		break;
   5883 	default:
   5884 		table_size = 32;
   5885 		break;
   5886 	}
   5887 
   5888 	device_printf(dev, "[E]RETA:\n");
   5889 	for (i = 0; i < table_size; i++) {
   5890 		if (i < 32)
   5891 			printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
   5892 				IXGBE_RETA(i)));
   5893 		else
   5894 			printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
   5895 				IXGBE_ERETA(i - 32)));
   5896 	}
   5897 
   5898 	device_printf(dev, "queue:");
   5899 	for (i = 0; i < adapter->num_queues; i++) {
   5900 		printf((i == 0) ? "\t" : " ");
   5901 		printf("%8d", i);
   5902 	}
   5903 	printf("\n");
   5904 	PRINTQS(adapter, RDBAL);
   5905 	PRINTQS(adapter, RDBAH);
   5906 	PRINTQS(adapter, RDLEN);
   5907 	PRINTQS(adapter, SRRCTL);
   5908 	PRINTQS(adapter, RDH);
   5909 	PRINTQS(adapter, RDT);
   5910 	PRINTQS(adapter, RXDCTL);
   5911 
   5912 	device_printf(dev, "RQSMR:");
   5913 	for (i = 0; i < adapter->num_queues / 4; i++) {
   5914 		printf((i == 0) ? "\t" : " ");
   5915 		printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
   5916 	}
   5917 	printf("\n");
   5918 
   5919 	device_printf(dev, "disabled_count:");
   5920 	for (i = 0; i < adapter->num_queues; i++) {
   5921 		printf((i == 0) ? "\t" : " ");
   5922 		printf("%8d", adapter->queues[i].disabled_count);
   5923 	}
   5924 	printf("\n");
   5925 
   5926 	device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
   5927 	if (hw->mac.type != ixgbe_mac_82598EB) {
   5928 		device_printf(dev, "EIMS_EX(0):\t%08x\n",
   5929 			      IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
   5930 		device_printf(dev, "EIMS_EX(1):\t%08x\n",
   5931 			      IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
   5932 	}
   5933 } /* ixgbe_print_debug_info */
   5934 
   5935 /************************************************************************
   5936  * ixgbe_sysctl_debug
   5937  ************************************************************************/
   5938 static int
   5939 ixgbe_sysctl_debug(SYSCTLFN_ARGS)
   5940 {
   5941 	struct sysctlnode node = *rnode;
   5942 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5943 	int            error, result = 0;
   5944 
   5945 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   5946 		return (EPERM);
   5947 
   5948 	node.sysctl_data = &result;
   5949 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5950 
   5951 	if (error || newp == NULL)
   5952 		return error;
   5953 
   5954 	if (result == 1)
   5955 		ixgbe_print_debug_info(adapter);
   5956 
   5957 	return 0;
   5958 } /* ixgbe_sysctl_debug */
   5959 
   5960 /************************************************************************
   5961  * ixgbe_init_device_features
   5962  ************************************************************************/
   5963 static void
   5964 ixgbe_init_device_features(struct adapter *adapter)
   5965 {
   5966 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   5967 	                  | IXGBE_FEATURE_RSS
   5968 	                  | IXGBE_FEATURE_MSI
   5969 	                  | IXGBE_FEATURE_MSIX
   5970 	                  | IXGBE_FEATURE_LEGACY_IRQ
   5971 	                  | IXGBE_FEATURE_LEGACY_TX;
   5972 
   5973 	/* Set capabilities first... */
   5974 	switch (adapter->hw.mac.type) {
   5975 	case ixgbe_mac_82598EB:
   5976 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
   5977 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
   5978 		break;
   5979 	case ixgbe_mac_X540:
   5980 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5981 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5982 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
   5983 		    (adapter->hw.bus.func == 0))
   5984 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5985 		break;
   5986 	case ixgbe_mac_X550:
   5987 		/*
   5988 		 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
   5989 		 * NVM Image version.
   5990 		 */
   5991 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5992 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5993 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5994 		break;
   5995 	case ixgbe_mac_X550EM_x:
   5996 		/*
   5997 		 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
   5998 		 * NVM Image version.
   5999 		 */
   6000 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6001 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6002 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
   6003 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   6004 		break;
   6005 	case ixgbe_mac_X550EM_a:
   6006 		/*
   6007 		 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
   6008 		 * NVM Image version.
   6009 		 */
   6010 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6011 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6012 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   6013 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
   6014 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
   6015 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   6016 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   6017 		}
   6018 		break;
   6019 	case ixgbe_mac_82599EB:
   6020 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   6021 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   6022 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
   6023 		    (adapter->hw.bus.func == 0))
   6024 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   6025 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
   6026 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   6027 		break;
   6028 	default:
   6029 		break;
   6030 	}
   6031 
   6032 	/* Enabled by default... */
   6033 	/* Fan failure detection */
   6034 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
   6035 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
   6036 	/* Netmap */
   6037 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   6038 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   6039 	/* EEE */
   6040 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   6041 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   6042 	/* Thermal Sensor */
   6043 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
   6044 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
   6045 	/*
   6046 	 * Recovery mode:
   6047 	 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
   6048 	 * NVM Image version.
   6049 	 */
   6050 
   6051 	/* Enabled via global sysctl... */
   6052 	/* Flow Director */
   6053 	if (ixgbe_enable_fdir) {
   6054 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
   6055 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
   6056 		else
   6057 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
   6058 	}
   6059 	/* Legacy (single queue) transmit */
   6060 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   6061 	    ixgbe_enable_legacy_tx)
   6062 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   6063 	/*
   6064 	 * Message Signal Interrupts - Extended (MSI-X)
   6065 	 * Normal MSI is only enabled if MSI-X calls fail.
   6066 	 */
   6067 	if (!ixgbe_enable_msix)
   6068 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
   6069 	/* Receive-Side Scaling (RSS) */
   6070 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
   6071 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   6072 
   6073 	/* Disable features with unmet dependencies... */
   6074 	/* No MSI-X */
   6075 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
   6076 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6077 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6078 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
   6079 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
   6080 	}
   6081 } /* ixgbe_init_device_features */
   6082 
   6083 /************************************************************************
   6084  * ixgbe_probe - Device identification routine
   6085  *
   6086  *   Determines if the driver should be loaded on
   6087  *   adapter based on its PCI vendor/device ID.
   6088  *
   6089  *   return BUS_PROBE_DEFAULT on success, positive on failure
   6090  ************************************************************************/
   6091 static int
   6092 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
   6093 {
   6094 	const struct pci_attach_args *pa = aux;
   6095 
   6096 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
   6097 }
   6098 
   6099 static const ixgbe_vendor_info_t *
   6100 ixgbe_lookup(const struct pci_attach_args *pa)
   6101 {
   6102 	const ixgbe_vendor_info_t *ent;
   6103 	pcireg_t subid;
   6104 
   6105 	INIT_DEBUGOUT("ixgbe_lookup: begin");
   6106 
   6107 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
   6108 		return NULL;
   6109 
   6110 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
   6111 
   6112 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
   6113 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
   6114 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
   6115 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
   6116 			(ent->subvendor_id == 0)) &&
   6117 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
   6118 			(ent->subdevice_id == 0))) {
   6119 			return ent;
   6120 		}
   6121 	}
   6122 	return NULL;
   6123 }
   6124 
   6125 static int
   6126 ixgbe_ifflags_cb(struct ethercom *ec)
   6127 {
   6128 	struct ifnet *ifp = &ec->ec_if;
   6129 	struct adapter *adapter = ifp->if_softc;
   6130 	int change, rc = 0;
   6131 
   6132 	IXGBE_CORE_LOCK(adapter);
   6133 
   6134 	change = ifp->if_flags ^ adapter->if_flags;
   6135 	if (change != 0)
   6136 		adapter->if_flags = ifp->if_flags;
   6137 
   6138 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   6139 		rc = ENETRESET;
   6140 	else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   6141 		ixgbe_set_promisc(adapter);
   6142 
   6143 	/* Set up VLAN support and filter */
   6144 	ixgbe_setup_vlan_hw_support(adapter);
   6145 
   6146 	IXGBE_CORE_UNLOCK(adapter);
   6147 
   6148 	return rc;
   6149 }
   6150 
   6151 /************************************************************************
   6152  * ixgbe_ioctl - Ioctl entry point
   6153  *
   6154  *   Called when the user wants to configure the interface.
   6155  *
   6156  *   return 0 on success, positive on failure
   6157  ************************************************************************/
   6158 static int
   6159 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   6160 {
   6161 	struct adapter	*adapter = ifp->if_softc;
   6162 	struct ixgbe_hw *hw = &adapter->hw;
   6163 	struct ifcapreq *ifcr = data;
   6164 	struct ifreq	*ifr = data;
   6165 	int             error = 0;
   6166 	int l4csum_en;
   6167 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   6168 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   6169 
   6170 	if (ixgbe_fw_recovery_mode_swflag(adapter))
   6171 		return (EPERM);
   6172 
   6173 	switch (command) {
   6174 	case SIOCSIFFLAGS:
   6175 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   6176 		break;
   6177 	case SIOCADDMULTI:
   6178 	case SIOCDELMULTI:
   6179 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   6180 		break;
   6181 	case SIOCSIFMEDIA:
   6182 	case SIOCGIFMEDIA:
   6183 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   6184 		break;
   6185 	case SIOCSIFCAP:
   6186 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   6187 		break;
   6188 	case SIOCSIFMTU:
   6189 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   6190 		break;
   6191 #ifdef __NetBSD__
   6192 	case SIOCINITIFADDR:
   6193 		IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
   6194 		break;
   6195 	case SIOCGIFFLAGS:
   6196 		IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
   6197 		break;
   6198 	case SIOCGIFAFLAG_IN:
   6199 		IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
   6200 		break;
   6201 	case SIOCGIFADDR:
   6202 		IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
   6203 		break;
   6204 	case SIOCGIFMTU:
   6205 		IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
   6206 		break;
   6207 	case SIOCGIFCAP:
   6208 		IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
   6209 		break;
   6210 	case SIOCGETHERCAP:
   6211 		IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
   6212 		break;
   6213 	case SIOCGLIFADDR:
   6214 		IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
   6215 		break;
   6216 	case SIOCZIFDATA:
   6217 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   6218 		hw->mac.ops.clear_hw_cntrs(hw);
   6219 		ixgbe_clear_evcnt(adapter);
   6220 		break;
   6221 	case SIOCAIFADDR:
   6222 		IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
   6223 		break;
   6224 #endif
   6225 	default:
   6226 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   6227 		break;
   6228 	}
   6229 
   6230 	switch (command) {
   6231 	case SIOCSIFMEDIA:
   6232 	case SIOCGIFMEDIA:
   6233 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   6234 	case SIOCGI2C:
   6235 	{
   6236 		struct ixgbe_i2c_req	i2c;
   6237 
   6238 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
   6239 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
   6240 		if (error != 0)
   6241 			break;
   6242 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
   6243 			error = EINVAL;
   6244 			break;
   6245 		}
   6246 		if (i2c.len > sizeof(i2c.data)) {
   6247 			error = EINVAL;
   6248 			break;
   6249 		}
   6250 
   6251 		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
   6252 		    i2c.dev_addr, i2c.data);
   6253 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
   6254 		break;
   6255 	}
   6256 	case SIOCSIFCAP:
   6257 		/* Layer-4 Rx checksum offload has to be turned on and
   6258 		 * off as a unit.
   6259 		 */
   6260 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   6261 		if (l4csum_en != l4csum && l4csum_en != 0)
   6262 			return EINVAL;
   6263 		/*FALLTHROUGH*/
   6264 	case SIOCADDMULTI:
   6265 	case SIOCDELMULTI:
   6266 	case SIOCSIFFLAGS:
   6267 	case SIOCSIFMTU:
   6268 	default:
   6269 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   6270 			return error;
   6271 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   6272 			;
   6273 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   6274 			IXGBE_CORE_LOCK(adapter);
   6275 			if ((ifp->if_flags & IFF_RUNNING) != 0)
   6276 				ixgbe_init_locked(adapter);
   6277 			ixgbe_recalculate_max_frame(adapter);
   6278 			IXGBE_CORE_UNLOCK(adapter);
   6279 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   6280 			/*
   6281 			 * Multicast list has changed; set the hardware filter
   6282 			 * accordingly.
   6283 			 */
   6284 			IXGBE_CORE_LOCK(adapter);
   6285 			ixgbe_disable_intr(adapter);
   6286 			ixgbe_set_multi(adapter);
   6287 			ixgbe_enable_intr(adapter);
   6288 			IXGBE_CORE_UNLOCK(adapter);
   6289 		}
   6290 		return 0;
   6291 	}
   6292 
   6293 	return error;
   6294 } /* ixgbe_ioctl */
   6295 
   6296 /************************************************************************
   6297  * ixgbe_check_fan_failure
   6298  ************************************************************************/
   6299 static void
   6300 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
   6301 {
   6302 	u32 mask;
   6303 
   6304 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
   6305 	    IXGBE_ESDP_SDP1;
   6306 
   6307 	if (reg & mask)
   6308 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
   6309 } /* ixgbe_check_fan_failure */
   6310 
   6311 /************************************************************************
   6312  * ixgbe_handle_que
   6313  ************************************************************************/
   6314 static void
   6315 ixgbe_handle_que(void *context)
   6316 {
   6317 	struct ix_queue *que = context;
   6318 	struct adapter  *adapter = que->adapter;
   6319 	struct tx_ring  *txr = que->txr;
   6320 	struct ifnet    *ifp = adapter->ifp;
   6321 	bool		more = false;
   6322 
   6323 	que->handleq.ev_count++;
   6324 
   6325 	if (ifp->if_flags & IFF_RUNNING) {
   6326 		more = ixgbe_rxeof(que);
   6327 		IXGBE_TX_LOCK(txr);
   6328 		more |= ixgbe_txeof(txr);
   6329 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   6330 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   6331 				ixgbe_mq_start_locked(ifp, txr);
   6332 		/* Only for queue 0 */
   6333 		/* NetBSD still needs this for CBQ */
   6334 		if ((&adapter->queues[0] == que)
   6335 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   6336 			ixgbe_legacy_start_locked(ifp, txr);
   6337 		IXGBE_TX_UNLOCK(txr);
   6338 	}
   6339 
   6340 	if (more) {
   6341 		que->req.ev_count++;
   6342 		ixgbe_sched_handle_que(adapter, que);
   6343 	} else if (que->res != NULL) {
   6344 		/* Re-enable this interrupt */
   6345 		ixgbe_enable_queue(adapter, que->msix);
   6346 	} else
   6347 		ixgbe_enable_intr(adapter);
   6348 
   6349 	return;
   6350 } /* ixgbe_handle_que */
   6351 
   6352 /************************************************************************
   6353  * ixgbe_handle_que_work
   6354  ************************************************************************/
   6355 static void
   6356 ixgbe_handle_que_work(struct work *wk, void *context)
   6357 {
   6358 	struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
   6359 
   6360 	/*
   6361 	 * "enqueued flag" is not required here.
   6362 	 * See ixgbe_msix_que().
   6363 	 */
   6364 	ixgbe_handle_que(que);
   6365 }
   6366 
   6367 /************************************************************************
   6368  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
   6369  ************************************************************************/
   6370 static int
   6371 ixgbe_allocate_legacy(struct adapter *adapter,
   6372     const struct pci_attach_args *pa)
   6373 {
   6374 	device_t	dev = adapter->dev;
   6375 	struct ix_queue *que = adapter->queues;
   6376 	struct tx_ring  *txr = adapter->tx_rings;
   6377 	int		counts[PCI_INTR_TYPE_SIZE];
   6378 	pci_intr_type_t intr_type, max_type;
   6379 	char            intrbuf[PCI_INTRSTR_LEN];
   6380 	const char	*intrstr = NULL;
   6381 
   6382 	/* We allocate a single interrupt resource */
   6383 	max_type = PCI_INTR_TYPE_MSI;
   6384 	counts[PCI_INTR_TYPE_MSIX] = 0;
   6385 	counts[PCI_INTR_TYPE_MSI] =
   6386 	    (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
   6387 	/* Check not feat_en but feat_cap to fallback to INTx */
   6388 	counts[PCI_INTR_TYPE_INTX] =
   6389 	    (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
   6390 
   6391 alloc_retry:
   6392 	if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
   6393 		aprint_error_dev(dev, "couldn't alloc interrupt\n");
   6394 		return ENXIO;
   6395 	}
   6396 	adapter->osdep.nintrs = 1;
   6397 	intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
   6398 	    intrbuf, sizeof(intrbuf));
   6399 	adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
   6400 	    adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
   6401 	    device_xname(dev));
   6402 	intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
   6403 	if (adapter->osdep.ihs[0] == NULL) {
   6404 		aprint_error_dev(dev,"unable to establish %s\n",
   6405 		    (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6406 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   6407 		adapter->osdep.intrs = NULL;
   6408 		switch (intr_type) {
   6409 		case PCI_INTR_TYPE_MSI:
   6410 			/* The next try is for INTx: Disable MSI */
   6411 			max_type = PCI_INTR_TYPE_INTX;
   6412 			counts[PCI_INTR_TYPE_INTX] = 1;
   6413 			adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   6414 			if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
   6415 				adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6416 				goto alloc_retry;
   6417 			} else
   6418 				break;
   6419 		case PCI_INTR_TYPE_INTX:
   6420 		default:
   6421 			/* See below */
   6422 			break;
   6423 		}
   6424 	}
   6425 	if (intr_type == PCI_INTR_TYPE_INTX) {
   6426 		adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   6427 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6428 	}
   6429 	if (adapter->osdep.ihs[0] == NULL) {
   6430 		aprint_error_dev(dev,
   6431 		    "couldn't establish interrupt%s%s\n",
   6432 		    intrstr ? " at " : "", intrstr ? intrstr : "");
   6433 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   6434 		adapter->osdep.intrs = NULL;
   6435 		return ENXIO;
   6436 	}
   6437 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   6438 	/*
   6439 	 * Try allocating a fast interrupt and the associated deferred
   6440 	 * processing contexts.
   6441 	 */
   6442 	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   6443 		txr->txr_si =
   6444 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6445 			ixgbe_deferred_mq_start, txr);
   6446 	que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6447 	    ixgbe_handle_que, que);
   6448 
   6449 	if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
   6450 		& (txr->txr_si == NULL)) || (que->que_si == NULL)) {
   6451 		aprint_error_dev(dev,
   6452 		    "could not establish software interrupts\n");
   6453 
   6454 		return ENXIO;
   6455 	}
   6456 	/* For simplicity in the handlers */
   6457 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
   6458 
   6459 	return (0);
   6460 } /* ixgbe_allocate_legacy */
   6461 
   6462 /************************************************************************
   6463  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
   6464  ************************************************************************/
   6465 static int
   6466 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   6467 {
   6468 	device_t        dev = adapter->dev;
   6469 	struct 		ix_queue *que = adapter->queues;
   6470 	struct  	tx_ring *txr = adapter->tx_rings;
   6471 	pci_chipset_tag_t pc;
   6472 	char		intrbuf[PCI_INTRSTR_LEN];
   6473 	char		intr_xname[32];
   6474 	char		wqname[MAXCOMLEN];
   6475 	const char	*intrstr = NULL;
   6476 	int 		error, vector = 0;
   6477 	int		cpu_id = 0;
   6478 	kcpuset_t	*affinity;
   6479 #ifdef RSS
   6480 	unsigned int    rss_buckets = 0;
   6481 	kcpuset_t	cpu_mask;
   6482 #endif
   6483 
   6484 	pc = adapter->osdep.pc;
   6485 #ifdef	RSS
   6486 	/*
   6487 	 * If we're doing RSS, the number of queues needs to
   6488 	 * match the number of RSS buckets that are configured.
   6489 	 *
   6490 	 * + If there's more queues than RSS buckets, we'll end
   6491 	 *   up with queues that get no traffic.
   6492 	 *
   6493 	 * + If there's more RSS buckets than queues, we'll end
   6494 	 *   up having multiple RSS buckets map to the same queue,
   6495 	 *   so there'll be some contention.
   6496 	 */
   6497 	rss_buckets = rss_getnumbuckets();
   6498 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
   6499 	    (adapter->num_queues != rss_buckets)) {
   6500 		device_printf(dev,
   6501 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
   6502 		    "; performance will be impacted.\n",
   6503 		    __func__, adapter->num_queues, rss_buckets);
   6504 	}
   6505 #endif
   6506 
   6507 	adapter->osdep.nintrs = adapter->num_queues + 1;
   6508 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   6509 	    adapter->osdep.nintrs) != 0) {
   6510 		aprint_error_dev(dev,
   6511 		    "failed to allocate MSI-X interrupt\n");
   6512 		return (ENXIO);
   6513 	}
   6514 
   6515 	kcpuset_create(&affinity, false);
   6516 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   6517 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   6518 		    device_xname(dev), i);
   6519 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   6520 		    sizeof(intrbuf));
   6521 #ifdef IXGBE_MPSAFE
   6522 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   6523 		    true);
   6524 #endif
   6525 		/* Set the handler function */
   6526 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   6527 		    adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
   6528 		    intr_xname);
   6529 		if (que->res == NULL) {
   6530 			aprint_error_dev(dev,
   6531 			    "Failed to register QUE handler\n");
   6532 			error = ENXIO;
   6533 			goto err_out;
   6534 		}
   6535 		que->msix = vector;
   6536 		adapter->active_queues |= (u64)(1 << que->msix);
   6537 
   6538 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   6539 #ifdef	RSS
   6540 			/*
   6541 			 * The queue ID is used as the RSS layer bucket ID.
   6542 			 * We look up the queue ID -> RSS CPU ID and select
   6543 			 * that.
   6544 			 */
   6545 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
   6546 			CPU_SETOF(cpu_id, &cpu_mask);
   6547 #endif
   6548 		} else {
   6549 			/*
   6550 			 * Bind the MSI-X vector, and thus the
   6551 			 * rings to the corresponding CPU.
   6552 			 *
   6553 			 * This just happens to match the default RSS
   6554 			 * round-robin bucket -> queue -> CPU allocation.
   6555 			 */
   6556 			if (adapter->num_queues > 1)
   6557 				cpu_id = i;
   6558 		}
   6559 		/* Round-robin affinity */
   6560 		kcpuset_zero(affinity);
   6561 		kcpuset_set(affinity, cpu_id % ncpu);
   6562 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   6563 		    NULL);
   6564 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   6565 		    intrstr);
   6566 		if (error == 0) {
   6567 #if 1 /* def IXGBE_DEBUG */
   6568 #ifdef	RSS
   6569 			aprintf_normal(", bound RSS bucket %d to CPU %d", i,
   6570 			    cpu_id % ncpu);
   6571 #else
   6572 			aprint_normal(", bound queue %d to cpu %d", i,
   6573 			    cpu_id % ncpu);
   6574 #endif
   6575 #endif /* IXGBE_DEBUG */
   6576 		}
   6577 		aprint_normal("\n");
   6578 
   6579 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   6580 			txr->txr_si = softint_establish(
   6581 				SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6582 				ixgbe_deferred_mq_start, txr);
   6583 			if (txr->txr_si == NULL) {
   6584 				aprint_error_dev(dev,
   6585 				    "couldn't establish software interrupt\n");
   6586 				error = ENXIO;
   6587 				goto err_out;
   6588 			}
   6589 		}
   6590 		que->que_si
   6591 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6592 			ixgbe_handle_que, que);
   6593 		if (que->que_si == NULL) {
   6594 			aprint_error_dev(dev,
   6595 			    "couldn't establish software interrupt\n");
   6596 			error = ENXIO;
   6597 			goto err_out;
   6598 		}
   6599 	}
   6600 	snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
   6601 	error = workqueue_create(&adapter->txr_wq, wqname,
   6602 	    ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   6603 	    IXGBE_WORKQUEUE_FLAGS);
   6604 	if (error) {
   6605 		aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
   6606 		goto err_out;
   6607 	}
   6608 	adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
   6609 
   6610 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
   6611 	error = workqueue_create(&adapter->que_wq, wqname,
   6612 	    ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   6613 	    IXGBE_WORKQUEUE_FLAGS);
   6614 	if (error) {
   6615 		aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
   6616 		goto err_out;
   6617 	}
   6618 
   6619 	/* and Link */
   6620 	cpu_id++;
   6621 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   6622 	adapter->vector = vector;
   6623 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   6624 	    sizeof(intrbuf));
   6625 #ifdef IXGBE_MPSAFE
   6626 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   6627 	    true);
   6628 #endif
   6629 	/* Set the link handler function */
   6630 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   6631 	    adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
   6632 	    intr_xname);
   6633 	if (adapter->osdep.ihs[vector] == NULL) {
   6634 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   6635 		error = ENXIO;
   6636 		goto err_out;
   6637 	}
   6638 	/* Round-robin affinity */
   6639 	kcpuset_zero(affinity);
   6640 	kcpuset_set(affinity, cpu_id % ncpu);
   6641 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
   6642 	    NULL);
   6643 
   6644 	aprint_normal_dev(dev,
   6645 	    "for link, interrupting at %s", intrstr);
   6646 	if (error == 0)
   6647 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   6648 	else
   6649 		aprint_normal("\n");
   6650 
   6651 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   6652 		adapter->mbx_si =
   6653 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6654 			ixgbe_handle_mbx, adapter);
   6655 		if (adapter->mbx_si == NULL) {
   6656 			aprint_error_dev(dev,
   6657 			    "could not establish software interrupts\n");
   6658 
   6659 			error = ENXIO;
   6660 			goto err_out;
   6661 		}
   6662 	}
   6663 
   6664 	kcpuset_destroy(affinity);
   6665 	aprint_normal_dev(dev,
   6666 	    "Using MSI-X interrupts with %d vectors\n", vector + 1);
   6667 
   6668 	return (0);
   6669 
   6670 err_out:
   6671 	kcpuset_destroy(affinity);
   6672 	ixgbe_free_softint(adapter);
   6673 	ixgbe_free_pciintr_resources(adapter);
   6674 	return (error);
   6675 } /* ixgbe_allocate_msix */
   6676 
   6677 /************************************************************************
   6678  * ixgbe_configure_interrupts
   6679  *
   6680  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
   6681  *   This will also depend on user settings.
   6682  ************************************************************************/
   6683 static int
   6684 ixgbe_configure_interrupts(struct adapter *adapter)
   6685 {
   6686 	device_t dev = adapter->dev;
   6687 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
   6688 	int want, queues, msgs;
   6689 
   6690 	/* Default to 1 queue if MSI-X setup fails */
   6691 	adapter->num_queues = 1;
   6692 
   6693 	/* Override by tuneable */
   6694 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
   6695 		goto msi;
   6696 
   6697 	/*
   6698 	 *  NetBSD only: Use single vector MSI when number of CPU is 1 to save
   6699 	 * interrupt slot.
   6700 	 */
   6701 	if (ncpu == 1)
   6702 		goto msi;
   6703 
   6704 	/* First try MSI-X */
   6705 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   6706 	msgs = MIN(msgs, IXG_MAX_NINTR);
   6707 	if (msgs < 2)
   6708 		goto msi;
   6709 
   6710 	adapter->msix_mem = (void *)1; /* XXX */
   6711 
   6712 	/* Figure out a reasonable auto config value */
   6713 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   6714 
   6715 #ifdef	RSS
   6716 	/* If we're doing RSS, clamp at the number of RSS buckets */
   6717 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   6718 		queues = uimin(queues, rss_getnumbuckets());
   6719 #endif
   6720 	if (ixgbe_num_queues > queues) {
   6721 		aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
   6722 		ixgbe_num_queues = queues;
   6723 	}
   6724 
   6725 	if (ixgbe_num_queues != 0)
   6726 		queues = ixgbe_num_queues;
   6727 	else
   6728 		queues = uimin(queues,
   6729 		    uimin(mac->max_tx_queues, mac->max_rx_queues));
   6730 
   6731 	/* reflect correct sysctl value */
   6732 	ixgbe_num_queues = queues;
   6733 
   6734 	/*
   6735 	 * Want one vector (RX/TX pair) per queue
   6736 	 * plus an additional for Link.
   6737 	 */
   6738 	want = queues + 1;
   6739 	if (msgs >= want)
   6740 		msgs = want;
   6741 	else {
   6742                	aprint_error_dev(dev, "MSI-X Configuration Problem, "
   6743 		    "%d vectors but %d queues wanted!\n",
   6744 		    msgs, want);
   6745 		goto msi;
   6746 	}
   6747 	adapter->num_queues = queues;
   6748 	adapter->feat_en |= IXGBE_FEATURE_MSIX;
   6749 	return (0);
   6750 
   6751 	/*
   6752 	 * MSI-X allocation failed or provided us with
   6753 	 * less vectors than needed. Free MSI-X resources
   6754 	 * and we'll try enabling MSI.
   6755 	 */
   6756 msi:
   6757 	/* Without MSI-X, some features are no longer supported */
   6758 	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6759 	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
   6760 	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6761 	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
   6762 
   6763        	msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
   6764 	adapter->msix_mem = NULL; /* XXX */
   6765 	if (msgs > 1)
   6766 		msgs = 1;
   6767 	if (msgs != 0) {
   6768 		msgs = 1;
   6769 		adapter->feat_en |= IXGBE_FEATURE_MSI;
   6770 		return (0);
   6771 	}
   6772 
   6773 	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
   6774 		aprint_error_dev(dev,
   6775 		    "Device does not support legacy interrupts.\n");
   6776 		return 1;
   6777 	}
   6778 
   6779 	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6780 
   6781 	return (0);
   6782 } /* ixgbe_configure_interrupts */
   6783 
   6784 
   6785 /************************************************************************
   6786  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
   6787  *
   6788  *   Done outside of interrupt context since the driver might sleep
   6789  ************************************************************************/
   6790 static void
   6791 ixgbe_handle_link(void *context)
   6792 {
   6793 	struct adapter  *adapter = context;
   6794 	struct ixgbe_hw *hw = &adapter->hw;
   6795 
   6796 	IXGBE_CORE_LOCK(adapter);
   6797 	++adapter->link_sicount.ev_count;
   6798 	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
   6799 	ixgbe_update_link_status(adapter);
   6800 
   6801 	/* Re-enable link interrupts */
   6802 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
   6803 
   6804 	IXGBE_CORE_UNLOCK(adapter);
   6805 } /* ixgbe_handle_link */
   6806 
   6807 #if 0
   6808 /************************************************************************
   6809  * ixgbe_rearm_queues
   6810  ************************************************************************/
   6811 static __inline void
   6812 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   6813 {
   6814 	u32 mask;
   6815 
   6816 	switch (adapter->hw.mac.type) {
   6817 	case ixgbe_mac_82598EB:
   6818 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   6819 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   6820 		break;
   6821 	case ixgbe_mac_82599EB:
   6822 	case ixgbe_mac_X540:
   6823 	case ixgbe_mac_X550:
   6824 	case ixgbe_mac_X550EM_x:
   6825 	case ixgbe_mac_X550EM_a:
   6826 		mask = (queues & 0xFFFFFFFF);
   6827 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   6828 		mask = (queues >> 32);
   6829 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   6830 		break;
   6831 	default:
   6832 		break;
   6833 	}
   6834 } /* ixgbe_rearm_queues */
   6835 #endif
   6836