Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.120
      1 /* $NetBSD: ixgbe.c,v 1.120 2018/01/26 09:07:46 msaitoh Exp $ */
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 320916 2017-07-12 17:35:32Z sbruno $*/
     36 
     37 /*
     38  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39  * All rights reserved.
     40  *
     41  * This code is derived from software contributed to The NetBSD Foundation
     42  * by Coyote Point Systems, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63  * POSSIBILITY OF SUCH DAMAGE.
     64  */
     65 
     66 #ifdef _KERNEL_OPT
     67 #include "opt_inet.h"
     68 #include "opt_inet6.h"
     69 #include "opt_net_mpsafe.h"
     70 #endif
     71 
     72 #include "ixgbe.h"
     73 #include "vlan.h"
     74 
     75 #include <sys/cprng.h>
     76 #include <dev/mii/mii.h>
     77 #include <dev/mii/miivar.h>
     78 
     79 /************************************************************************
     80  * Driver version
     81  ************************************************************************/
     82 char ixgbe_driver_version[] = "3.2.12-k";
     83 
     84 
     85 /************************************************************************
     86  * PCI Device ID Table
     87  *
     88  *   Used by probe to select devices to load on
     89  *   Last field stores an index into ixgbe_strings
     90  *   Last entry must be all 0s
     91  *
     92  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     93  ************************************************************************/
     94 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     95 {
     96 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    111 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    112 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    113 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    114 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    115 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
    116 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    117 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
    118 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
    119 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
    120 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
    121 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
    122 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
    123 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
    124 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
    125 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
    126 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
    127 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
    128 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
    129 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
    130 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
    131 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
    132 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
    133 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
    134 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
    135 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
    136 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
    137 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
    138 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
    139 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
    140 	/* required last entry */
    141 	{0, 0, 0, 0, 0}
    142 };
    143 
    144 /************************************************************************
    145  * Table of branding strings
    146  ************************************************************************/
    147 static const char    *ixgbe_strings[] = {
    148 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    149 };
    150 
    151 /************************************************************************
    152  * Function prototypes
    153  ************************************************************************/
    154 static int      ixgbe_probe(device_t, cfdata_t, void *);
    155 static void     ixgbe_attach(device_t, device_t, void *);
    156 static int      ixgbe_detach(device_t, int);
    157 #if 0
    158 static int      ixgbe_shutdown(device_t);
    159 #endif
    160 static bool	ixgbe_suspend(device_t, const pmf_qual_t *);
    161 static bool	ixgbe_resume(device_t, const pmf_qual_t *);
    162 static int	ixgbe_ifflags_cb(struct ethercom *);
    163 static int      ixgbe_ioctl(struct ifnet *, u_long, void *);
    164 static void	ixgbe_ifstop(struct ifnet *, int);
    165 static int	ixgbe_init(struct ifnet *);
    166 static void	ixgbe_init_locked(struct adapter *);
    167 static void     ixgbe_stop(void *);
    168 static void     ixgbe_init_device_features(struct adapter *);
    169 static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
    170 static void	ixgbe_add_media_types(struct adapter *);
    171 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    172 static int      ixgbe_media_change(struct ifnet *);
    173 static int      ixgbe_allocate_pci_resources(struct adapter *,
    174 		    const struct pci_attach_args *);
    175 static void      ixgbe_free_softint(struct adapter *);
    176 static void	ixgbe_get_slot_info(struct adapter *);
    177 static int      ixgbe_allocate_msix(struct adapter *,
    178 		    const struct pci_attach_args *);
    179 static int      ixgbe_allocate_legacy(struct adapter *,
    180 		    const struct pci_attach_args *);
    181 static int      ixgbe_configure_interrupts(struct adapter *);
    182 static void	ixgbe_free_pciintr_resources(struct adapter *);
    183 static void	ixgbe_free_pci_resources(struct adapter *);
    184 static void	ixgbe_local_timer(void *);
    185 static void	ixgbe_local_timer1(void *);
    186 static int	ixgbe_setup_interface(device_t, struct adapter *);
    187 static void	ixgbe_config_gpie(struct adapter *);
    188 static void	ixgbe_config_dmac(struct adapter *);
    189 static void	ixgbe_config_delay_values(struct adapter *);
    190 static void	ixgbe_config_link(struct adapter *);
    191 static void	ixgbe_check_wol_support(struct adapter *);
    192 static int	ixgbe_setup_low_power_mode(struct adapter *);
    193 static void	ixgbe_rearm_queues(struct adapter *, u64);
    194 
    195 static void     ixgbe_initialize_transmit_units(struct adapter *);
    196 static void     ixgbe_initialize_receive_units(struct adapter *);
    197 static void	ixgbe_enable_rx_drop(struct adapter *);
    198 static void	ixgbe_disable_rx_drop(struct adapter *);
    199 static void	ixgbe_initialize_rss_mapping(struct adapter *);
    200 
    201 static void     ixgbe_enable_intr(struct adapter *);
    202 static void     ixgbe_disable_intr(struct adapter *);
    203 static void     ixgbe_update_stats_counters(struct adapter *);
    204 static void     ixgbe_set_promisc(struct adapter *);
    205 static void     ixgbe_set_multi(struct adapter *);
    206 static void     ixgbe_update_link_status(struct adapter *);
    207 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    208 static void	ixgbe_configure_ivars(struct adapter *);
    209 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    210 
    211 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    212 #if 0
    213 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
    214 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
    215 #endif
    216 
    217 static void	ixgbe_add_device_sysctls(struct adapter *);
    218 static void     ixgbe_add_hw_stats(struct adapter *);
    219 static void	ixgbe_clear_evcnt(struct adapter *);
    220 static int	ixgbe_set_flowcntl(struct adapter *, int);
    221 static int	ixgbe_set_advertise(struct adapter *, int);
    222 static int      ixgbe_get_advertise(struct adapter *);
    223 
    224 /* Sysctl handlers */
    225 static void	ixgbe_set_sysctl_value(struct adapter *, const char *,
    226 		     const char *, int *, int);
    227 static int	ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
    228 static int	ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
    229 static int      ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    230 static int	ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
    231 static int	ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
    232 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
    233 #ifdef IXGBE_DEBUG
    234 static int	ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
    235 static int	ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
    236 #endif
    237 static int      ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
    238 static int      ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
    239 static int      ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
    240 static int      ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
    241 static int      ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
    242 static int	ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
    243 static int	ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
    244 
    245 /* Support for pluggable optic modules */
    246 static bool	ixgbe_sfp_probe(struct adapter *);
    247 
    248 /* Legacy (single vector) interrupt handler */
    249 static int	ixgbe_legacy_irq(void *);
    250 
    251 /* The MSI/MSI-X Interrupt handlers */
    252 static int	ixgbe_msix_que(void *);
    253 static int	ixgbe_msix_link(void *);
    254 
    255 /* Software interrupts for deferred work */
    256 static void	ixgbe_handle_que(void *);
    257 static void	ixgbe_handle_link(void *);
    258 static void	ixgbe_handle_msf(void *);
    259 static void	ixgbe_handle_mod(void *);
    260 static void	ixgbe_handle_phy(void *);
    261 
    262 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    263 
    264 /************************************************************************
    265  *  NetBSD Device Interface Entry Points
    266  ************************************************************************/
    267 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    268     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    269     DVF_DETACH_SHUTDOWN);
    270 
    271 #if 0
    272 devclass_t ix_devclass;
    273 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
    274 
    275 MODULE_DEPEND(ix, pci, 1, 1, 1);
    276 MODULE_DEPEND(ix, ether, 1, 1, 1);
    277 #ifdef DEV_NETMAP
    278 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    279 #endif
    280 #endif
    281 
    282 /*
    283  * TUNEABLE PARAMETERS:
    284  */
    285 
    286 /*
    287  * AIM: Adaptive Interrupt Moderation
    288  * which means that the interrupt rate
    289  * is varied over time based on the
    290  * traffic for that interrupt vector
    291  */
    292 static bool ixgbe_enable_aim = true;
    293 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
    294 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
    295     "Enable adaptive interrupt moderation");
    296 
    297 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    298 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
    299     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
    300 
    301 /* How many packets rxeof tries to clean at a time */
    302 static int ixgbe_rx_process_limit = 256;
    303 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
    304     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
    305 
    306 /* How many packets txeof tries to clean at a time */
    307 static int ixgbe_tx_process_limit = 256;
    308 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
    309     &ixgbe_tx_process_limit, 0,
    310     "Maximum number of sent packets to process at a time, -1 means unlimited");
    311 
    312 /* Flow control setting, default to full */
    313 static int ixgbe_flow_control = ixgbe_fc_full;
    314 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
    315     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
    316 
    317 /*
    318  * Smart speed setting, default to on
    319  * this only works as a compile option
    320  * right now as its during attach, set
    321  * this to 'ixgbe_smart_speed_off' to
    322  * disable.
    323  */
    324 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    325 
    326 /*
    327  * MSI-X should be the default for best performance,
    328  * but this allows it to be forced off for testing.
    329  */
    330 static int ixgbe_enable_msix = 1;
    331 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
    332     "Enable MSI-X interrupts");
    333 
    334 /*
    335  * Number of Queues, can be set to 0,
    336  * it then autoconfigures based on the
    337  * number of cpus with a max of 8. This
    338  * can be overriden manually here.
    339  */
    340 static int ixgbe_num_queues = 0;
    341 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
    342     "Number of queues to configure, 0 indicates autoconfigure");
    343 
    344 /*
    345  * Number of TX descriptors per ring,
    346  * setting higher than RX as this seems
    347  * the better performing choice.
    348  */
    349 static int ixgbe_txd = PERFORM_TXD;
    350 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
    351     "Number of transmit descriptors per queue");
    352 
    353 /* Number of RX descriptors per ring */
    354 static int ixgbe_rxd = PERFORM_RXD;
    355 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
    356     "Number of receive descriptors per queue");
    357 
    358 /*
    359  * Defining this on will allow the use
    360  * of unsupported SFP+ modules, note that
    361  * doing so you are on your own :)
    362  */
    363 static int allow_unsupported_sfp = false;
    364 #define TUNABLE_INT(__x, __y)
    365 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
    366 
    367 /*
    368  * Not sure if Flow Director is fully baked,
    369  * so we'll default to turning it off.
    370  */
    371 static int ixgbe_enable_fdir = 0;
    372 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
    373     "Enable Flow Director");
    374 
    375 /* Legacy Transmit (single queue) */
    376 static int ixgbe_enable_legacy_tx = 0;
    377 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
    378     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
    379 
    380 /* Receive-Side Scaling */
    381 static int ixgbe_enable_rss = 1;
    382 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
    383     "Enable Receive-Side Scaling (RSS)");
    384 
    385 /* Keep running tab on them for sanity check */
    386 static int ixgbe_total_ports;
    387 
    388 #if 0
    389 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
    390 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
    391 #endif
    392 
    393 #ifdef NET_MPSAFE
    394 #define IXGBE_MPSAFE		1
    395 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    396 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    397 #else
    398 #define IXGBE_CALLOUT_FLAGS	0
    399 #define IXGBE_SOFTINFT_FLAGS	0
    400 #endif
    401 
    402 /************************************************************************
    403  * ixgbe_initialize_rss_mapping
    404  ************************************************************************/
    405 static void
    406 ixgbe_initialize_rss_mapping(struct adapter *adapter)
    407 {
    408 	struct ixgbe_hw	*hw = &adapter->hw;
    409 	u32             reta = 0, mrqc, rss_key[10];
    410 	int             queue_id, table_size, index_mult;
    411 	int             i, j;
    412 	u32             rss_hash_config;
    413 
    414 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    415 		/* Fetch the configured RSS key */
    416 		rss_getkey((uint8_t *) &rss_key);
    417 	} else {
    418 		/* set up random bits */
    419 		cprng_fast(&rss_key, sizeof(rss_key));
    420 	}
    421 
    422 	/* Set multiplier for RETA setup and table size based on MAC */
    423 	index_mult = 0x1;
    424 	table_size = 128;
    425 	switch (adapter->hw.mac.type) {
    426 	case ixgbe_mac_82598EB:
    427 		index_mult = 0x11;
    428 		break;
    429 	case ixgbe_mac_X550:
    430 	case ixgbe_mac_X550EM_x:
    431 	case ixgbe_mac_X550EM_a:
    432 		table_size = 512;
    433 		break;
    434 	default:
    435 		break;
    436 	}
    437 
    438 	/* Set up the redirection table */
    439 	for (i = 0, j = 0; i < table_size; i++, j++) {
    440 		if (j == adapter->num_queues)
    441 			j = 0;
    442 
    443 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    444 			/*
    445 			 * Fetch the RSS bucket id for the given indirection
    446 			 * entry. Cap it at the number of configured buckets
    447 			 * (which is num_queues.)
    448 			 */
    449 			queue_id = rss_get_indirection_to_bucket(i);
    450 			queue_id = queue_id % adapter->num_queues;
    451 		} else
    452 			queue_id = (j * index_mult);
    453 
    454 		/*
    455 		 * The low 8 bits are for hash value (n+0);
    456 		 * The next 8 bits are for hash value (n+1), etc.
    457 		 */
    458 		reta = reta >> 8;
    459 		reta = reta | (((uint32_t) queue_id) << 24);
    460 		if ((i & 3) == 3) {
    461 			if (i < 128)
    462 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
    463 			else
    464 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
    465 				    reta);
    466 			reta = 0;
    467 		}
    468 	}
    469 
    470 	/* Now fill our hash function seeds */
    471 	for (i = 0; i < 10; i++)
    472 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
    473 
    474 	/* Perform hash on these packet types */
    475 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
    476 		rss_hash_config = rss_gethashconfig();
    477 	else {
    478 		/*
    479 		 * Disable UDP - IP fragments aren't currently being handled
    480 		 * and so we end up with a mix of 2-tuple and 4-tuple
    481 		 * traffic.
    482 		 */
    483 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
    484 		                | RSS_HASHTYPE_RSS_TCP_IPV4
    485 		                | RSS_HASHTYPE_RSS_IPV6
    486 		                | RSS_HASHTYPE_RSS_TCP_IPV6
    487 		                | RSS_HASHTYPE_RSS_IPV6_EX
    488 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
    489 	}
    490 
    491 	mrqc = IXGBE_MRQC_RSSEN;
    492 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
    493 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
    494 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
    495 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
    496 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
    497 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
    498 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
    499 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
    500 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
    501 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
    502 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
    503 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
    504 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
    505 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
    506 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
    507 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
    508 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
    509 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
    510 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
    511 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    512 } /* ixgbe_initialize_rss_mapping */
    513 
    514 /************************************************************************
    515  * ixgbe_initialize_receive_units - Setup receive registers and features.
    516  ************************************************************************/
    517 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
    518 
    519 static void
    520 ixgbe_initialize_receive_units(struct adapter *adapter)
    521 {
    522 	struct	rx_ring	*rxr = adapter->rx_rings;
    523 	struct ixgbe_hw	*hw = &adapter->hw;
    524 	struct ifnet    *ifp = adapter->ifp;
    525 	int             i, j;
    526 	u32		bufsz, fctrl, srrctl, rxcsum;
    527 	u32		hlreg;
    528 
    529 	/*
    530 	 * Make sure receives are disabled while
    531 	 * setting up the descriptor ring
    532 	 */
    533 	ixgbe_disable_rx(hw);
    534 
    535 	/* Enable broadcasts */
    536 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
    537 	fctrl |= IXGBE_FCTRL_BAM;
    538 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
    539 		fctrl |= IXGBE_FCTRL_DPF;
    540 		fctrl |= IXGBE_FCTRL_PMCF;
    541 	}
    542 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
    543 
    544 	/* Set for Jumbo Frames? */
    545 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
    546 	if (ifp->if_mtu > ETHERMTU)
    547 		hlreg |= IXGBE_HLREG0_JUMBOEN;
    548 	else
    549 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
    550 
    551 #ifdef DEV_NETMAP
    552 	/* CRC stripping is conditional in Netmap */
    553 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
    554 	    (ifp->if_capenable & IFCAP_NETMAP) &&
    555 	    !ix_crcstrip)
    556 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
    557 	else
    558 #endif /* DEV_NETMAP */
    559 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
    560 
    561 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
    562 
    563 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
    564 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
    565 
    566 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
    567 		u64 rdba = rxr->rxdma.dma_paddr;
    568 		u32 tqsmreg, reg;
    569 		int regnum = i / 4;	/* 1 register per 4 queues */
    570 		int regshift = i % 4;	/* 4 bits per 1 queue */
    571 		j = rxr->me;
    572 
    573 		/* Setup the Base and Length of the Rx Descriptor Ring */
    574 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
    575 		    (rdba & 0x00000000ffffffffULL));
    576 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
    577 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
    578 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
    579 
    580 		/* Set up the SRRCTL register */
    581 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
    582 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
    583 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
    584 		srrctl |= bufsz;
    585 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
    586 
    587 		/* Set RQSMR (Receive Queue Statistic Mapping) register */
    588 		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
    589 		reg &= ~(0x000000ff << (regshift * 8));
    590 		reg |= i << (regshift * 8);
    591 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
    592 
    593 		/*
    594 		 * Set RQSMR (Receive Queue Statistic Mapping) register.
    595 		 * Register location for queue 0...7 are different between
    596 		 * 82598 and newer.
    597 		 */
    598 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
    599 			tqsmreg = IXGBE_TQSMR(regnum);
    600 		else
    601 			tqsmreg = IXGBE_TQSM(regnum);
    602 		reg = IXGBE_READ_REG(hw, tqsmreg);
    603 		reg &= ~(0x000000ff << (regshift * 8));
    604 		reg |= i << (regshift * 8);
    605 		IXGBE_WRITE_REG(hw, tqsmreg, reg);
    606 
    607 		/*
    608 		 * Set DROP_EN iff we have no flow control and >1 queue.
    609 		 * Note that srrctl was cleared shortly before during reset,
    610 		 * so we do not need to clear the bit, but do it just in case
    611 		 * this code is moved elsewhere.
    612 		 */
    613 		if (adapter->num_queues > 1 &&
    614 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
    615 			srrctl |= IXGBE_SRRCTL_DROP_EN;
    616 		} else {
    617 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
    618 		}
    619 
    620 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
    621 
    622 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
    623 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
    624 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
    625 
    626 		/* Set the driver rx tail address */
    627 		rxr->tail =  IXGBE_RDT(rxr->me);
    628 	}
    629 
    630 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    631 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
    632 		            | IXGBE_PSRTYPE_UDPHDR
    633 		            | IXGBE_PSRTYPE_IPV4HDR
    634 		            | IXGBE_PSRTYPE_IPV6HDR;
    635 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
    636 	}
    637 
    638 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
    639 
    640 	ixgbe_initialize_rss_mapping(adapter);
    641 
    642 	if (adapter->num_queues > 1) {
    643 		/* RSS and RX IPP Checksum are mutually exclusive */
    644 		rxcsum |= IXGBE_RXCSUM_PCSD;
    645 	}
    646 
    647 	if (ifp->if_capenable & IFCAP_RXCSUM)
    648 		rxcsum |= IXGBE_RXCSUM_PCSD;
    649 
    650 	/* This is useful for calculating UDP/IP fragment checksums */
    651 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
    652 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
    653 
    654 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
    655 
    656 	return;
    657 } /* ixgbe_initialize_receive_units */
    658 
    659 /************************************************************************
    660  * ixgbe_initialize_transmit_units - Enable transmit units.
    661  ************************************************************************/
    662 static void
    663 ixgbe_initialize_transmit_units(struct adapter *adapter)
    664 {
    665 	struct tx_ring  *txr = adapter->tx_rings;
    666 	struct ixgbe_hw	*hw = &adapter->hw;
    667 
    668 	/* Setup the Base and Length of the Tx Descriptor Ring */
    669 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    670 		u64 tdba = txr->txdma.dma_paddr;
    671 		u32 txctrl = 0;
    672 		int j = txr->me;
    673 
    674 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
    675 		    (tdba & 0x00000000ffffffffULL));
    676 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
    677 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
    678 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
    679 
    680 		/* Setup the HW Tx Head and Tail descriptor pointers */
    681 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
    682 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
    683 
    684 		/* Cache the tail address */
    685 		txr->tail = IXGBE_TDT(j);
    686 
    687 		/* Disable Head Writeback */
    688 		/*
    689 		 * Note: for X550 series devices, these registers are actually
    690 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
    691 		 * fields remain the same.
    692 		 */
    693 		switch (hw->mac.type) {
    694 		case ixgbe_mac_82598EB:
    695 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
    696 			break;
    697 		default:
    698 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
    699 			break;
    700 		}
    701 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
    702 		switch (hw->mac.type) {
    703 		case ixgbe_mac_82598EB:
    704 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
    705 			break;
    706 		default:
    707 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
    708 			break;
    709 		}
    710 
    711 	}
    712 
    713 	if (hw->mac.type != ixgbe_mac_82598EB) {
    714 		u32 dmatxctl, rttdcs;
    715 
    716 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
    717 		dmatxctl |= IXGBE_DMATXCTL_TE;
    718 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
    719 		/* Disable arbiter to set MTQC */
    720 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
    721 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
    722 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    723 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
    724 		    ixgbe_get_mtqc(adapter->iov_mode));
    725 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
    726 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    727 	}
    728 
    729 	return;
    730 } /* ixgbe_initialize_transmit_units */
    731 
    732 /************************************************************************
    733  * ixgbe_attach - Device initialization routine
    734  *
    735  *   Called when the driver is being loaded.
    736  *   Identifies the type of hardware, allocates all resources
    737  *   and initializes the hardware.
    738  *
    739  *   return 0 on success, positive on failure
    740  ************************************************************************/
    741 static void
    742 ixgbe_attach(device_t parent, device_t dev, void *aux)
    743 {
    744 	struct adapter  *adapter;
    745 	struct ixgbe_hw *hw;
    746 	int             error = -1;
    747 	u32		ctrl_ext;
    748 	u16		high, low, nvmreg;
    749 	pcireg_t	id, subid;
    750 	ixgbe_vendor_info_t *ent;
    751 	struct pci_attach_args *pa = aux;
    752 	const char *str;
    753 	char buf[256];
    754 
    755 	INIT_DEBUGOUT("ixgbe_attach: begin");
    756 
    757 	/* Allocate, clear, and link in our adapter structure */
    758 	adapter = device_private(dev);
    759 	adapter->hw.back = adapter;
    760 	adapter->dev = dev;
    761 	hw = &adapter->hw;
    762 	adapter->osdep.pc = pa->pa_pc;
    763 	adapter->osdep.tag = pa->pa_tag;
    764 	if (pci_dma64_available(pa))
    765 		adapter->osdep.dmat = pa->pa_dmat64;
    766 	else
    767 		adapter->osdep.dmat = pa->pa_dmat;
    768 	adapter->osdep.attached = false;
    769 
    770 	ent = ixgbe_lookup(pa);
    771 
    772 	KASSERT(ent != NULL);
    773 
    774 	aprint_normal(": %s, Version - %s\n",
    775 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    776 
    777 	/* Core Lock Init*/
    778 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    779 
    780 	/* Set up the timer callout */
    781 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    782 
    783 	/* Determine hardware revision */
    784 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    785 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    786 
    787 	hw->vendor_id = PCI_VENDOR(id);
    788 	hw->device_id = PCI_PRODUCT(id);
    789 	hw->revision_id =
    790 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    791 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    792 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    793 
    794 	/*
    795 	 * Make sure BUSMASTER is set
    796 	 */
    797 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    798 
    799 	/* Do base PCI setup - map BAR0 */
    800 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    801 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    802 		error = ENXIO;
    803 		goto err_out;
    804 	}
    805 
    806 	/* let hardware know driver is loaded */
    807 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    808 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    809 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    810 
    811 	/*
    812 	 * Initialize the shared code
    813 	 */
    814 	if (ixgbe_init_shared_code(hw)) {
    815 		aprint_error_dev(dev, "Unable to initialize the shared code\n");
    816 		error = ENXIO;
    817 		goto err_out;
    818 	}
    819 
    820 	switch (hw->mac.type) {
    821 	case ixgbe_mac_82598EB:
    822 		str = "82598EB";
    823 		break;
    824 	case ixgbe_mac_82599EB:
    825 		str = "82599EB";
    826 		break;
    827 	case ixgbe_mac_X540:
    828 		str = "X540";
    829 		break;
    830 	case ixgbe_mac_X550:
    831 		str = "X550";
    832 		break;
    833 	case ixgbe_mac_X550EM_x:
    834 		str = "X550EM";
    835 		break;
    836 	case ixgbe_mac_X550EM_a:
    837 		str = "X550EM A";
    838 		break;
    839 	default:
    840 		str = "Unknown";
    841 		break;
    842 	}
    843 	aprint_normal_dev(dev, "device %s\n", str);
    844 
    845 	if (hw->mbx.ops.init_params)
    846 		hw->mbx.ops.init_params(hw);
    847 
    848 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
    849 
    850 	/* Pick up the 82599 settings */
    851 	if (hw->mac.type != ixgbe_mac_82598EB) {
    852 		hw->phy.smart_speed = ixgbe_smart_speed;
    853 		adapter->num_segs = IXGBE_82599_SCATTER;
    854 	} else
    855 		adapter->num_segs = IXGBE_82598_SCATTER;
    856 
    857 	hw->mac.ops.set_lan_id(hw);
    858 	ixgbe_init_device_features(adapter);
    859 
    860 	if (ixgbe_configure_interrupts(adapter)) {
    861 		error = ENXIO;
    862 		goto err_out;
    863 	}
    864 
    865 	/* Allocate multicast array memory. */
    866 	adapter->mta = malloc(sizeof(*adapter->mta) *
    867 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    868 	if (adapter->mta == NULL) {
    869 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    870 		error = ENOMEM;
    871 		goto err_out;
    872 	}
    873 
    874 	/* Enable WoL (if supported) */
    875 	ixgbe_check_wol_support(adapter);
    876 
    877 	/* Verify adapter fan is still functional (if applicable) */
    878 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
    879 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    880 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
    881 	}
    882 
    883 	/* Ensure SW/FW semaphore is free */
    884 	ixgbe_init_swfw_semaphore(hw);
    885 
    886 	/* Enable EEE power saving */
    887 	if (adapter->feat_en & IXGBE_FEATURE_EEE)
    888 		hw->mac.ops.setup_eee(hw, TRUE);
    889 
    890 	/* Set an initial default flow control value */
    891 	hw->fc.requested_mode = ixgbe_flow_control;
    892 
    893 	/* Sysctls for limiting the amount of work done in the taskqueues */
    894 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
    895 	    "max number of rx packets to process",
    896 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
    897 
    898 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
    899 	    "max number of tx packets to process",
    900 	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
    901 
    902 	/* Do descriptor calc and sanity checks */
    903 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    904 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    905 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    906 		adapter->num_tx_desc = DEFAULT_TXD;
    907 	} else
    908 		adapter->num_tx_desc = ixgbe_txd;
    909 
    910 	/*
    911 	 * With many RX rings it is easy to exceed the
    912 	 * system mbuf allocation. Tuning nmbclusters
    913 	 * can alleviate this.
    914 	 */
    915 	if (nmbclusters > 0) {
    916 		int s;
    917 		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
    918 		if (s > nmbclusters) {
    919 			aprint_error_dev(dev, "RX Descriptors exceed "
    920 			    "system mbuf max, using default instead!\n");
    921 			ixgbe_rxd = DEFAULT_RXD;
    922 		}
    923 	}
    924 
    925 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    926 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
    927 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    928 		adapter->num_rx_desc = DEFAULT_RXD;
    929 	} else
    930 		adapter->num_rx_desc = ixgbe_rxd;
    931 
    932 	/* Allocate our TX/RX Queues */
    933 	if (ixgbe_allocate_queues(adapter)) {
    934 		error = ENOMEM;
    935 		goto err_out;
    936 	}
    937 
    938 	hw->phy.reset_if_overtemp = TRUE;
    939 	error = ixgbe_reset_hw(hw);
    940 	hw->phy.reset_if_overtemp = FALSE;
    941 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    942 		/*
    943 		 * No optics in this port, set up
    944 		 * so the timer routine will probe
    945 		 * for later insertion.
    946 		 */
    947 		adapter->sfp_probe = TRUE;
    948 		error = IXGBE_SUCCESS;
    949 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    950 		aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
    951 		error = EIO;
    952 		goto err_late;
    953 	} else if (error) {
    954 		aprint_error_dev(dev, "Hardware initialization failed\n");
    955 		error = EIO;
    956 		goto err_late;
    957 	}
    958 
    959 	/* Make sure we have a good EEPROM before we read from it */
    960 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
    961 		aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
    962 		error = EIO;
    963 		goto err_late;
    964 	}
    965 
    966 	aprint_normal("%s:", device_xname(dev));
    967 	/* NVM Image Version */
    968 	switch (hw->mac.type) {
    969 	case ixgbe_mac_X540:
    970 	case ixgbe_mac_X550EM_a:
    971 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    972 		if (nvmreg == 0xffff)
    973 			break;
    974 		high = (nvmreg >> 12) & 0x0f;
    975 		low = (nvmreg >> 4) & 0xff;
    976 		id = nvmreg & 0x0f;
    977 		aprint_normal(" NVM Image Version %u.", high);
    978 		if (hw->mac.type == ixgbe_mac_X540)
    979 			str = "%x";
    980 		else
    981 			str = "%02x";
    982 		aprint_normal(str, low);
    983 		aprint_normal(" ID 0x%x,", id);
    984 		break;
    985 	case ixgbe_mac_X550EM_x:
    986 	case ixgbe_mac_X550:
    987 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    988 		if (nvmreg == 0xffff)
    989 			break;
    990 		high = (nvmreg >> 12) & 0x0f;
    991 		low = nvmreg & 0xff;
    992 		aprint_normal(" NVM Image Version %u.%02x,", high, low);
    993 		break;
    994 	default:
    995 		break;
    996 	}
    997 
    998 	/* PHY firmware revision */
    999 	switch (hw->mac.type) {
   1000 	case ixgbe_mac_X540:
   1001 	case ixgbe_mac_X550:
   1002 		hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
   1003 		if (nvmreg == 0xffff)
   1004 			break;
   1005 		high = (nvmreg >> 12) & 0x0f;
   1006 		low = (nvmreg >> 4) & 0xff;
   1007 		id = nvmreg & 0x000f;
   1008 		aprint_normal(" PHY FW Revision %u.", high);
   1009 		if (hw->mac.type == ixgbe_mac_X540)
   1010 			str = "%x";
   1011 		else
   1012 			str = "%02x";
   1013 		aprint_normal(str, low);
   1014 		aprint_normal(" ID 0x%x,", id);
   1015 		break;
   1016 	default:
   1017 		break;
   1018 	}
   1019 
   1020 	/* NVM Map version & OEM NVM Image version */
   1021 	switch (hw->mac.type) {
   1022 	case ixgbe_mac_X550:
   1023 	case ixgbe_mac_X550EM_x:
   1024 	case ixgbe_mac_X550EM_a:
   1025 		hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
   1026 		if (nvmreg != 0xffff) {
   1027 			high = (nvmreg >> 12) & 0x0f;
   1028 			low = nvmreg & 0x00ff;
   1029 			aprint_normal(" NVM Map version %u.%02x,", high, low);
   1030 		}
   1031 		hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
   1032 		if (nvmreg != 0xffff) {
   1033 			high = (nvmreg >> 12) & 0x0f;
   1034 			low = nvmreg & 0x00ff;
   1035 			aprint_verbose(" OEM NVM Image version %u.%02x,", high,
   1036 			    low);
   1037 		}
   1038 		break;
   1039 	default:
   1040 		break;
   1041 	}
   1042 
   1043 	/* Print the ETrackID */
   1044 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
   1045 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
   1046 	aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
   1047 
   1048 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   1049 		error = ixgbe_allocate_msix(adapter, pa);
   1050 		if (error) {
   1051 			/* Free allocated queue structures first */
   1052 			ixgbe_free_transmit_structures(adapter);
   1053 			ixgbe_free_receive_structures(adapter);
   1054 			free(adapter->queues, M_DEVBUF);
   1055 
   1056 			/* Fallback to legacy interrupt */
   1057 			adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
   1058 			if (adapter->feat_cap & IXGBE_FEATURE_MSI)
   1059 				adapter->feat_en |= IXGBE_FEATURE_MSI;
   1060 			adapter->num_queues = 1;
   1061 
   1062 			/* Allocate our TX/RX Queues again */
   1063 			if (ixgbe_allocate_queues(adapter)) {
   1064 				error = ENOMEM;
   1065 				goto err_out;
   1066 			}
   1067 		}
   1068 	}
   1069 	if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
   1070 		error = ixgbe_allocate_legacy(adapter, pa);
   1071 	if (error)
   1072 		goto err_late;
   1073 
   1074 	/* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
   1075 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   1076 	    ixgbe_handle_link, adapter);
   1077 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1078 	    ixgbe_handle_mod, adapter);
   1079 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1080 	    ixgbe_handle_msf, adapter);
   1081 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1082 	    ixgbe_handle_phy, adapter);
   1083 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   1084 		adapter->fdir_si =
   1085 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1086 			ixgbe_reinit_fdir, adapter);
   1087 	if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
   1088 	    || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
   1089 	    || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
   1090 		&& (adapter->fdir_si == NULL))) {
   1091 		aprint_error_dev(dev,
   1092 		    "could not establish software interrupts ()\n");
   1093 		goto err_out;
   1094 	}
   1095 
   1096 	error = ixgbe_start_hw(hw);
   1097 	switch (error) {
   1098 	case IXGBE_ERR_EEPROM_VERSION:
   1099 		aprint_error_dev(dev, "This device is a pre-production adapter/"
   1100 		    "LOM.  Please be aware there may be issues associated "
   1101 		    "with your hardware.\nIf you are experiencing problems "
   1102 		    "please contact your Intel or hardware representative "
   1103 		    "who provided you with this hardware.\n");
   1104 		break;
   1105 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
   1106 		aprint_error_dev(dev, "Unsupported SFP+ Module\n");
   1107 		error = EIO;
   1108 		goto err_late;
   1109 	case IXGBE_ERR_SFP_NOT_PRESENT:
   1110 		aprint_error_dev(dev, "No SFP+ Module found\n");
   1111 		/* falls thru */
   1112 	default:
   1113 		break;
   1114 	}
   1115 
   1116 	/* Setup OS specific network interface */
   1117 	if (ixgbe_setup_interface(dev, adapter) != 0)
   1118 		goto err_late;
   1119 
   1120 	/*
   1121 	 *  Print PHY ID only for copper PHY. On device which has SFP(+) cage
   1122 	 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
   1123 	 */
   1124 	if (hw->phy.media_type == ixgbe_media_type_copper) {
   1125 		uint16_t id1, id2;
   1126 		int oui, model, rev;
   1127 		const char *descr;
   1128 
   1129 		id1 = hw->phy.id >> 16;
   1130 		id2 = hw->phy.id & 0xffff;
   1131 		oui = MII_OUI(id1, id2);
   1132 		model = MII_MODEL(id2);
   1133 		rev = MII_REV(id2);
   1134 		if ((descr = mii_get_descr(oui, model)) != NULL)
   1135 			aprint_normal_dev(dev,
   1136 			    "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
   1137 			    descr, oui, model, rev);
   1138 		else
   1139 			aprint_normal_dev(dev,
   1140 			    "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
   1141 			    oui, model, rev);
   1142 	}
   1143 
   1144 	/* Enable the optics for 82599 SFP+ fiber */
   1145 	ixgbe_enable_tx_laser(hw);
   1146 
   1147 	/* Enable power to the phy. */
   1148 	ixgbe_set_phy_power(hw, TRUE);
   1149 
   1150 	/* Initialize statistics */
   1151 	ixgbe_update_stats_counters(adapter);
   1152 
   1153 	/* Check PCIE slot type/speed/width */
   1154 	ixgbe_get_slot_info(adapter);
   1155 
   1156 	/*
   1157 	 * Do time init and sysctl init here, but
   1158 	 * only on the first port of a bypass adapter.
   1159 	 */
   1160 	ixgbe_bypass_init(adapter);
   1161 
   1162 	/* Set an initial dmac value */
   1163 	adapter->dmac = 0;
   1164 	/* Set initial advertised speeds (if applicable) */
   1165 	adapter->advertise = ixgbe_get_advertise(adapter);
   1166 
   1167 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   1168 		ixgbe_define_iov_schemas(dev, &error);
   1169 
   1170 	/* Add sysctls */
   1171 	ixgbe_add_device_sysctls(adapter);
   1172 	ixgbe_add_hw_stats(adapter);
   1173 
   1174 	/* For Netmap */
   1175 	adapter->init_locked = ixgbe_init_locked;
   1176 	adapter->stop_locked = ixgbe_stop;
   1177 
   1178 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1179 		ixgbe_netmap_attach(adapter);
   1180 
   1181 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
   1182 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
   1183 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
   1184 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
   1185 
   1186 	if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
   1187 		pmf_class_network_register(dev, adapter->ifp);
   1188 	else
   1189 		aprint_error_dev(dev, "couldn't establish power handler\n");
   1190 
   1191 	INIT_DEBUGOUT("ixgbe_attach: end");
   1192 	adapter->osdep.attached = true;
   1193 
   1194 	return;
   1195 
   1196 err_late:
   1197 	ixgbe_free_transmit_structures(adapter);
   1198 	ixgbe_free_receive_structures(adapter);
   1199 	free(adapter->queues, M_DEVBUF);
   1200 err_out:
   1201 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   1202 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   1203 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   1204 	ixgbe_free_softint(adapter);
   1205 	ixgbe_free_pci_resources(adapter);
   1206 	if (adapter->mta != NULL)
   1207 		free(adapter->mta, M_DEVBUF);
   1208 	IXGBE_CORE_LOCK_DESTROY(adapter);
   1209 
   1210 	return;
   1211 } /* ixgbe_attach */
   1212 
   1213 /************************************************************************
   1214  * ixgbe_check_wol_support
   1215  *
   1216  *   Checks whether the adapter's ports are capable of
   1217  *   Wake On LAN by reading the adapter's NVM.
   1218  *
   1219  *   Sets each port's hw->wol_enabled value depending
   1220  *   on the value read here.
   1221  ************************************************************************/
   1222 static void
   1223 ixgbe_check_wol_support(struct adapter *adapter)
   1224 {
   1225 	struct ixgbe_hw *hw = &adapter->hw;
   1226 	u16             dev_caps = 0;
   1227 
   1228 	/* Find out WoL support for port */
   1229 	adapter->wol_support = hw->wol_enabled = 0;
   1230 	ixgbe_get_device_caps(hw, &dev_caps);
   1231 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
   1232 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
   1233 	     hw->bus.func == 0))
   1234 		adapter->wol_support = hw->wol_enabled = 1;
   1235 
   1236 	/* Save initial wake up filter configuration */
   1237 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
   1238 
   1239 	return;
   1240 } /* ixgbe_check_wol_support */
   1241 
   1242 /************************************************************************
   1243  * ixgbe_setup_interface
   1244  *
   1245  *   Setup networking device structure and register an interface.
   1246  ************************************************************************/
   1247 static int
   1248 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   1249 {
   1250 	struct ethercom *ec = &adapter->osdep.ec;
   1251 	struct ifnet   *ifp;
   1252 	int rv;
   1253 
   1254 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   1255 
   1256 	ifp = adapter->ifp = &ec->ec_if;
   1257 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1258 	ifp->if_baudrate = IF_Gbps(10);
   1259 	ifp->if_init = ixgbe_init;
   1260 	ifp->if_stop = ixgbe_ifstop;
   1261 	ifp->if_softc = adapter;
   1262 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1263 #ifdef IXGBE_MPSAFE
   1264 	ifp->if_extflags = IFEF_MPSAFE;
   1265 #endif
   1266 	ifp->if_ioctl = ixgbe_ioctl;
   1267 #if __FreeBSD_version >= 1100045
   1268 	/* TSO parameters */
   1269 	ifp->if_hw_tsomax = 65518;
   1270 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
   1271 	ifp->if_hw_tsomaxsegsize = 2048;
   1272 #endif
   1273 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1274 #if 0
   1275 		ixgbe_start_locked = ixgbe_legacy_start_locked;
   1276 #endif
   1277 	} else {
   1278 		ifp->if_transmit = ixgbe_mq_start;
   1279 #if 0
   1280 		ixgbe_start_locked = ixgbe_mq_start_locked;
   1281 #endif
   1282 	}
   1283 	ifp->if_start = ixgbe_legacy_start;
   1284 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1285 	IFQ_SET_READY(&ifp->if_snd);
   1286 
   1287 	rv = if_initialize(ifp);
   1288 	if (rv != 0) {
   1289 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1290 		return rv;
   1291 	}
   1292 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1293 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1294 	/*
   1295 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1296 	 * used.
   1297 	 */
   1298 	if_register(ifp);
   1299 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   1300 
   1301 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1302 
   1303 	/*
   1304 	 * Tell the upper layer(s) we support long frames.
   1305 	 */
   1306 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1307 
   1308 	/* Set capability flags */
   1309 	ifp->if_capabilities |= IFCAP_RXCSUM
   1310 			     |  IFCAP_TXCSUM
   1311 			     |  IFCAP_TSOv4
   1312 			     |  IFCAP_TSOv6
   1313 			     |  IFCAP_LRO;
   1314 	ifp->if_capenable = 0;
   1315 
   1316 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1317 	    		    |  ETHERCAP_VLAN_HWCSUM
   1318 	    		    |  ETHERCAP_JUMBO_MTU
   1319 	    		    |  ETHERCAP_VLAN_MTU;
   1320 
   1321 	/* Enable the above capabilities by default */
   1322 	ec->ec_capenable = ec->ec_capabilities;
   1323 
   1324 	/*
   1325 	 * Don't turn this on by default, if vlans are
   1326 	 * created on another pseudo device (eg. lagg)
   1327 	 * then vlan events are not passed thru, breaking
   1328 	 * operation, but with HW FILTER off it works. If
   1329 	 * using vlans directly on the ixgbe driver you can
   1330 	 * enable this and get full hardware tag filtering.
   1331 	 */
   1332 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1333 
   1334 	/*
   1335 	 * Specify the media types supported by this adapter and register
   1336 	 * callbacks to update media and link information
   1337 	 */
   1338 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   1339 	    ixgbe_media_status);
   1340 
   1341 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
   1342 	ixgbe_add_media_types(adapter);
   1343 
   1344 	/* Set autoselect media by default */
   1345 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1346 
   1347 	return (0);
   1348 } /* ixgbe_setup_interface */
   1349 
   1350 /************************************************************************
   1351  * ixgbe_add_media_types
   1352  ************************************************************************/
   1353 static void
   1354 ixgbe_add_media_types(struct adapter *adapter)
   1355 {
   1356 	struct ixgbe_hw *hw = &adapter->hw;
   1357 	device_t        dev = adapter->dev;
   1358 	u64             layer;
   1359 
   1360 	layer = adapter->phy_layer;
   1361 
   1362 #define	ADD(mm, dd)							\
   1363 	ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
   1364 
   1365 	/* Media types with matching NetBSD media defines */
   1366 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
   1367 		ADD(IFM_10G_T | IFM_FDX, 0);
   1368 	}
   1369 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
   1370 		ADD(IFM_1000_T | IFM_FDX, 0);
   1371 	}
   1372 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
   1373 		ADD(IFM_100_TX | IFM_FDX, 0);
   1374 	}
   1375 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
   1376 		ADD(IFM_10_T | IFM_FDX, 0);
   1377 	}
   1378 
   1379 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   1380 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
   1381 		ADD(IFM_10G_TWINAX | IFM_FDX, 0);
   1382 	}
   1383 
   1384 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
   1385 		ADD(IFM_10G_LR | IFM_FDX, 0);
   1386 		if (hw->phy.multispeed_fiber) {
   1387 			ADD(IFM_1000_LX | IFM_FDX, 0);
   1388 		}
   1389 	}
   1390 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
   1391 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1392 		if (hw->phy.multispeed_fiber) {
   1393 			ADD(IFM_1000_SX | IFM_FDX, 0);
   1394 		}
   1395 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
   1396 		ADD(IFM_1000_SX | IFM_FDX, 0);
   1397 	}
   1398 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
   1399 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1400 	}
   1401 
   1402 #ifdef IFM_ETH_XTYPE
   1403 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1404 		ADD(IFM_10G_KR | IFM_FDX, 0);
   1405 	}
   1406 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1407 		ADD(AIFM_10G_KX4 | IFM_FDX, 0);
   1408 	}
   1409 #else
   1410 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1411 		device_printf(dev, "Media supported: 10GbaseKR\n");
   1412 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
   1413 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1414 	}
   1415 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1416 		device_printf(dev, "Media supported: 10GbaseKX4\n");
   1417 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
   1418 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1419 	}
   1420 #endif
   1421 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
   1422 		ADD(IFM_1000_KX | IFM_FDX, 0);
   1423 	}
   1424 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
   1425 		ADD(IFM_2500_KX | IFM_FDX, 0);
   1426 	}
   1427 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
   1428 		ADD(IFM_2500_T | IFM_FDX, 0);
   1429 	}
   1430 	if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
   1431 		ADD(IFM_5000_T | IFM_FDX, 0);
   1432 	}
   1433 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
   1434 		device_printf(dev, "Media supported: 1000baseBX\n");
   1435 	/* XXX no ifmedia_set? */
   1436 
   1437 	ADD(IFM_AUTO, 0);
   1438 
   1439 #undef ADD
   1440 } /* ixgbe_add_media_types */
   1441 
   1442 /************************************************************************
   1443  * ixgbe_is_sfp
   1444  ************************************************************************/
   1445 static inline bool
   1446 ixgbe_is_sfp(struct ixgbe_hw *hw)
   1447 {
   1448 	switch (hw->mac.type) {
   1449 	case ixgbe_mac_82598EB:
   1450 		if (hw->phy.type == ixgbe_phy_nl)
   1451 			return TRUE;
   1452 		return FALSE;
   1453 	case ixgbe_mac_82599EB:
   1454 		switch (hw->mac.ops.get_media_type(hw)) {
   1455 		case ixgbe_media_type_fiber:
   1456 		case ixgbe_media_type_fiber_qsfp:
   1457 			return TRUE;
   1458 		default:
   1459 			return FALSE;
   1460 		}
   1461 	case ixgbe_mac_X550EM_x:
   1462 	case ixgbe_mac_X550EM_a:
   1463 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
   1464 			return TRUE;
   1465 		return FALSE;
   1466 	default:
   1467 		return FALSE;
   1468 	}
   1469 } /* ixgbe_is_sfp */
   1470 
   1471 /************************************************************************
   1472  * ixgbe_config_link
   1473  ************************************************************************/
   1474 static void
   1475 ixgbe_config_link(struct adapter *adapter)
   1476 {
   1477 	struct ixgbe_hw *hw = &adapter->hw;
   1478 	u32             autoneg, err = 0;
   1479 	bool            sfp, negotiate = false;
   1480 
   1481 	sfp = ixgbe_is_sfp(hw);
   1482 
   1483 	if (sfp) {
   1484 		if (hw->phy.multispeed_fiber) {
   1485 			hw->mac.ops.setup_sfp(hw);
   1486 			ixgbe_enable_tx_laser(hw);
   1487 			kpreempt_disable();
   1488 			softint_schedule(adapter->msf_si);
   1489 			kpreempt_enable();
   1490 		} else {
   1491 			kpreempt_disable();
   1492 			softint_schedule(adapter->mod_si);
   1493 			kpreempt_enable();
   1494 		}
   1495 	} else {
   1496 		if (hw->mac.ops.check_link)
   1497 			err = ixgbe_check_link(hw, &adapter->link_speed,
   1498 			    &adapter->link_up, FALSE);
   1499 		if (err)
   1500 			goto out;
   1501 		autoneg = hw->phy.autoneg_advertised;
   1502 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   1503                 	err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
   1504 			    &negotiate);
   1505 		if (err)
   1506 			goto out;
   1507 		if (hw->mac.ops.setup_link)
   1508                 	err = hw->mac.ops.setup_link(hw, autoneg,
   1509 			    adapter->link_up);
   1510 	}
   1511 out:
   1512 
   1513 	return;
   1514 } /* ixgbe_config_link */
   1515 
   1516 /************************************************************************
   1517  * ixgbe_update_stats_counters - Update board statistics counters.
   1518  ************************************************************************/
   1519 static void
   1520 ixgbe_update_stats_counters(struct adapter *adapter)
   1521 {
   1522 	struct ifnet          *ifp = adapter->ifp;
   1523 	struct ixgbe_hw       *hw = &adapter->hw;
   1524 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1525 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
   1526 	u64                   total_missed_rx = 0;
   1527 	uint64_t              crcerrs, rlec;
   1528 
   1529 	crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   1530 	stats->crcerrs.ev_count += crcerrs;
   1531 	stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   1532 	stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   1533 	stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   1534 	if (hw->mac.type == ixgbe_mac_X550)
   1535 		stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
   1536 
   1537 	for (int i = 0; i < __arraycount(stats->qprc); i++) {
   1538 		int j = i % adapter->num_queues;
   1539 		stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   1540 		stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   1541 		stats->qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   1542 	}
   1543 	for (int i = 0; i < __arraycount(stats->mpc); i++) {
   1544 		uint32_t mp;
   1545 		int j = i % adapter->num_queues;
   1546 
   1547 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   1548 		/* global total per queue */
   1549 		stats->mpc[j].ev_count += mp;
   1550 		/* running comprehensive total for stats display */
   1551 		total_missed_rx += mp;
   1552 
   1553 		if (hw->mac.type == ixgbe_mac_82598EB)
   1554 			stats->rnbc[j].ev_count
   1555 			    += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   1556 
   1557 	}
   1558 	stats->mpctotal.ev_count += total_missed_rx;
   1559 
   1560 	/* Document says M[LR]FC are valid when link is up and 10Gbps */
   1561 	if ((adapter->link_active == TRUE)
   1562 	    && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
   1563 		stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   1564 		stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   1565 	}
   1566 	rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
   1567 	stats->rlec.ev_count += rlec;
   1568 
   1569 	/* Hardware workaround, gprc counts missed packets */
   1570 	stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   1571 
   1572 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   1573 	stats->lxontxc.ev_count += lxon;
   1574 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   1575 	stats->lxofftxc.ev_count += lxoff;
   1576 	total = lxon + lxoff;
   1577 
   1578 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1579 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   1580 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   1581 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   1582 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   1583 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   1584 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   1585 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   1586 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   1587 	} else {
   1588 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   1589 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   1590 		/* 82598 only has a counter in the high register */
   1591 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   1592 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   1593 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   1594 	}
   1595 
   1596 	/*
   1597 	 * Workaround: mprc hardware is incorrectly counting
   1598 	 * broadcasts, so for now we subtract those.
   1599 	 */
   1600 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   1601 	stats->bprc.ev_count += bprc;
   1602 	stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
   1603 	    - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   1604 
   1605 	stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   1606 	stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   1607 	stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   1608 	stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   1609 	stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   1610 	stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   1611 
   1612 	stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   1613 	stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   1614 	stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   1615 
   1616 	stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   1617 	stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   1618 	stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   1619 	stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   1620 	stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   1621 	stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   1622 	stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   1623 	stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   1624 	stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   1625 	stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   1626 	stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   1627 	stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   1628 	stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   1629 	stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   1630 	stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   1631 	stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   1632 	stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   1633 	stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   1634 	/* Only read FCOE on 82599 */
   1635 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1636 		stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   1637 		stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   1638 		stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   1639 		stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   1640 		stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   1641 	}
   1642 
   1643 	/* Fill out the OS statistics structure */
   1644 	/*
   1645 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   1646 	 * adapter->stats counters. It's required to make ifconfig -z
   1647 	 * (SOICZIFDATA) work.
   1648 	 */
   1649 	ifp->if_collisions = 0;
   1650 
   1651 	/* Rx Errors */
   1652 	ifp->if_iqdrops += total_missed_rx;
   1653 	ifp->if_ierrors += crcerrs + rlec;
   1654 } /* ixgbe_update_stats_counters */
   1655 
   1656 /************************************************************************
   1657  * ixgbe_add_hw_stats
   1658  *
   1659  *   Add sysctl variables, one per statistic, to the system.
   1660  ************************************************************************/
   1661 static void
   1662 ixgbe_add_hw_stats(struct adapter *adapter)
   1663 {
   1664 	device_t dev = adapter->dev;
   1665 	const struct sysctlnode *rnode, *cnode;
   1666 	struct sysctllog **log = &adapter->sysctllog;
   1667 	struct tx_ring *txr = adapter->tx_rings;
   1668 	struct rx_ring *rxr = adapter->rx_rings;
   1669 	struct ixgbe_hw *hw = &adapter->hw;
   1670 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1671 	const char *xname = device_xname(dev);
   1672 
   1673 	/* Driver Statistics */
   1674 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   1675 	    NULL, xname, "Handled queue in softint");
   1676 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   1677 	    NULL, xname, "Requeued in softint");
   1678 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   1679 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   1680 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   1681 	    NULL, xname, "m_defrag() failed");
   1682 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   1683 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   1684 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   1685 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   1686 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   1687 	    NULL, xname, "Driver tx dma hard fail other");
   1688 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   1689 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   1690 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   1691 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   1692 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   1693 	    NULL, xname, "Watchdog timeouts");
   1694 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   1695 	    NULL, xname, "TSO errors");
   1696 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   1697 	    NULL, xname, "Link MSI-X IRQ Handled");
   1698 
   1699 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1700 		snprintf(adapter->queues[i].evnamebuf,
   1701 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   1702 		    xname, i);
   1703 		snprintf(adapter->queues[i].namebuf,
   1704 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   1705 
   1706 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   1707 			aprint_error_dev(dev, "could not create sysctl root\n");
   1708 			break;
   1709 		}
   1710 
   1711 		if (sysctl_createv(log, 0, &rnode, &rnode,
   1712 		    0, CTLTYPE_NODE,
   1713 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   1714 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   1715 			break;
   1716 
   1717 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1718 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   1719 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   1720 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   1721 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   1722 			break;
   1723 
   1724 #if 0 /* XXX msaitoh */
   1725 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1726 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   1727 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   1728 			NULL, 0, &(adapter->queues[i].irqs),
   1729 		    0, CTL_CREATE, CTL_EOL) != 0)
   1730 			break;
   1731 #endif
   1732 
   1733 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1734 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1735 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   1736 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   1737 		    0, CTL_CREATE, CTL_EOL) != 0)
   1738 			break;
   1739 
   1740 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1741 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1742 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   1743 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   1744 		    0, CTL_CREATE, CTL_EOL) != 0)
   1745 			break;
   1746 
   1747 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   1748 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   1749 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   1750 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   1751 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   1752 		    NULL, adapter->queues[i].evnamebuf,
   1753 		    "Queue No Descriptor Available");
   1754 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   1755 		    NULL, adapter->queues[i].evnamebuf,
   1756 		    "Queue Packets Transmitted");
   1757 #ifndef IXGBE_LEGACY_TX
   1758 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   1759 		    NULL, adapter->queues[i].evnamebuf,
   1760 		    "Packets dropped in pcq");
   1761 #endif
   1762 
   1763 #ifdef LRO
   1764 		struct lro_ctrl *lro = &rxr->lro;
   1765 #endif /* LRO */
   1766 
   1767 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1768 		    CTLFLAG_READONLY,
   1769 		    CTLTYPE_INT,
   1770 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   1771 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   1772 		    CTL_CREATE, CTL_EOL) != 0)
   1773 			break;
   1774 
   1775 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1776 		    CTLFLAG_READONLY,
   1777 		    CTLTYPE_INT,
   1778 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   1779 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   1780 		    CTL_CREATE, CTL_EOL) != 0)
   1781 			break;
   1782 
   1783 		if (i < __arraycount(stats->mpc)) {
   1784 			evcnt_attach_dynamic(&stats->mpc[i],
   1785 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1786 			    "RX Missed Packet Count");
   1787 			if (hw->mac.type == ixgbe_mac_82598EB)
   1788 				evcnt_attach_dynamic(&stats->rnbc[i],
   1789 				    EVCNT_TYPE_MISC, NULL,
   1790 				    adapter->queues[i].evnamebuf,
   1791 				    "Receive No Buffers");
   1792 		}
   1793 		if (i < __arraycount(stats->pxontxc)) {
   1794 			evcnt_attach_dynamic(&stats->pxontxc[i],
   1795 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1796 			    "pxontxc");
   1797 			evcnt_attach_dynamic(&stats->pxonrxc[i],
   1798 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1799 			    "pxonrxc");
   1800 			evcnt_attach_dynamic(&stats->pxofftxc[i],
   1801 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1802 			    "pxofftxc");
   1803 			evcnt_attach_dynamic(&stats->pxoffrxc[i],
   1804 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1805 			    "pxoffrxc");
   1806 			evcnt_attach_dynamic(&stats->pxon2offc[i],
   1807 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1808 			    "pxon2offc");
   1809 		}
   1810 		if (i < __arraycount(stats->qprc)) {
   1811 			evcnt_attach_dynamic(&stats->qprc[i],
   1812 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1813 			    "qprc");
   1814 			evcnt_attach_dynamic(&stats->qptc[i],
   1815 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1816 			    "qptc");
   1817 			evcnt_attach_dynamic(&stats->qbrc[i],
   1818 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1819 			    "qbrc");
   1820 			evcnt_attach_dynamic(&stats->qbtc[i],
   1821 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1822 			    "qbtc");
   1823 			evcnt_attach_dynamic(&stats->qprdc[i],
   1824 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1825 			    "qprdc");
   1826 		}
   1827 
   1828 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   1829 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   1830 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   1831 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   1832 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   1833 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   1834 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   1835 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   1836 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   1837 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   1838 #ifdef LRO
   1839 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   1840 				CTLFLAG_RD, &lro->lro_queued, 0,
   1841 				"LRO Queued");
   1842 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   1843 				CTLFLAG_RD, &lro->lro_flushed, 0,
   1844 				"LRO Flushed");
   1845 #endif /* LRO */
   1846 	}
   1847 
   1848 	/* MAC stats get their own sub node */
   1849 
   1850 	snprintf(stats->namebuf,
   1851 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   1852 
   1853 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   1854 	    stats->namebuf, "rx csum offload - IP");
   1855 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   1856 	    stats->namebuf, "rx csum offload - L4");
   1857 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   1858 	    stats->namebuf, "rx csum offload - IP bad");
   1859 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   1860 	    stats->namebuf, "rx csum offload - L4 bad");
   1861 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   1862 	    stats->namebuf, "Interrupt conditions zero");
   1863 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   1864 	    stats->namebuf, "Legacy interrupts");
   1865 
   1866 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   1867 	    stats->namebuf, "CRC Errors");
   1868 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   1869 	    stats->namebuf, "Illegal Byte Errors");
   1870 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   1871 	    stats->namebuf, "Byte Errors");
   1872 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   1873 	    stats->namebuf, "MAC Short Packets Discarded");
   1874 	if (hw->mac.type >= ixgbe_mac_X550)
   1875 		evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
   1876 		    stats->namebuf, "Bad SFD");
   1877 	evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
   1878 	    stats->namebuf, "Total Packets Missed");
   1879 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   1880 	    stats->namebuf, "MAC Local Faults");
   1881 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   1882 	    stats->namebuf, "MAC Remote Faults");
   1883 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   1884 	    stats->namebuf, "Receive Length Errors");
   1885 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   1886 	    stats->namebuf, "Link XON Transmitted");
   1887 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   1888 	    stats->namebuf, "Link XON Received");
   1889 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   1890 	    stats->namebuf, "Link XOFF Transmitted");
   1891 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   1892 	    stats->namebuf, "Link XOFF Received");
   1893 
   1894 	/* Packet Reception Stats */
   1895 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   1896 	    stats->namebuf, "Total Octets Received");
   1897 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   1898 	    stats->namebuf, "Good Octets Received");
   1899 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   1900 	    stats->namebuf, "Total Packets Received");
   1901 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   1902 	    stats->namebuf, "Good Packets Received");
   1903 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   1904 	    stats->namebuf, "Multicast Packets Received");
   1905 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   1906 	    stats->namebuf, "Broadcast Packets Received");
   1907 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   1908 	    stats->namebuf, "64 byte frames received ");
   1909 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   1910 	    stats->namebuf, "65-127 byte frames received");
   1911 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   1912 	    stats->namebuf, "128-255 byte frames received");
   1913 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   1914 	    stats->namebuf, "256-511 byte frames received");
   1915 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   1916 	    stats->namebuf, "512-1023 byte frames received");
   1917 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   1918 	    stats->namebuf, "1023-1522 byte frames received");
   1919 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   1920 	    stats->namebuf, "Receive Undersized");
   1921 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   1922 	    stats->namebuf, "Fragmented Packets Received ");
   1923 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   1924 	    stats->namebuf, "Oversized Packets Received");
   1925 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   1926 	    stats->namebuf, "Received Jabber");
   1927 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   1928 	    stats->namebuf, "Management Packets Received");
   1929 	evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
   1930 	    stats->namebuf, "Management Packets Dropped");
   1931 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   1932 	    stats->namebuf, "Checksum Errors");
   1933 
   1934 	/* Packet Transmission Stats */
   1935 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   1936 	    stats->namebuf, "Good Octets Transmitted");
   1937 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   1938 	    stats->namebuf, "Total Packets Transmitted");
   1939 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   1940 	    stats->namebuf, "Good Packets Transmitted");
   1941 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   1942 	    stats->namebuf, "Broadcast Packets Transmitted");
   1943 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   1944 	    stats->namebuf, "Multicast Packets Transmitted");
   1945 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   1946 	    stats->namebuf, "Management Packets Transmitted");
   1947 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   1948 	    stats->namebuf, "64 byte frames transmitted ");
   1949 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   1950 	    stats->namebuf, "65-127 byte frames transmitted");
   1951 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   1952 	    stats->namebuf, "128-255 byte frames transmitted");
   1953 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   1954 	    stats->namebuf, "256-511 byte frames transmitted");
   1955 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   1956 	    stats->namebuf, "512-1023 byte frames transmitted");
   1957 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   1958 	    stats->namebuf, "1024-1522 byte frames transmitted");
   1959 } /* ixgbe_add_hw_stats */
   1960 
   1961 static void
   1962 ixgbe_clear_evcnt(struct adapter *adapter)
   1963 {
   1964 	struct tx_ring *txr = adapter->tx_rings;
   1965 	struct rx_ring *rxr = adapter->rx_rings;
   1966 	struct ixgbe_hw *hw = &adapter->hw;
   1967 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1968 
   1969 	adapter->handleq.ev_count = 0;
   1970 	adapter->req.ev_count = 0;
   1971 	adapter->efbig_tx_dma_setup.ev_count = 0;
   1972 	adapter->mbuf_defrag_failed.ev_count = 0;
   1973 	adapter->efbig2_tx_dma_setup.ev_count = 0;
   1974 	adapter->einval_tx_dma_setup.ev_count = 0;
   1975 	adapter->other_tx_dma_setup.ev_count = 0;
   1976 	adapter->eagain_tx_dma_setup.ev_count = 0;
   1977 	adapter->enomem_tx_dma_setup.ev_count = 0;
   1978 	adapter->watchdog_events.ev_count = 0;
   1979 	adapter->tso_err.ev_count = 0;
   1980 	adapter->link_irq.ev_count = 0;
   1981 
   1982 	txr = adapter->tx_rings;
   1983 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1984 		adapter->queues[i].irqs.ev_count = 0;
   1985 		txr->no_desc_avail.ev_count = 0;
   1986 		txr->total_packets.ev_count = 0;
   1987 		txr->tso_tx.ev_count = 0;
   1988 #ifndef IXGBE_LEGACY_TX
   1989 		txr->pcq_drops.ev_count = 0;
   1990 #endif
   1991 
   1992 		if (i < __arraycount(stats->mpc)) {
   1993 			stats->mpc[i].ev_count = 0;
   1994 			if (hw->mac.type == ixgbe_mac_82598EB)
   1995 				stats->rnbc[i].ev_count = 0;
   1996 		}
   1997 		if (i < __arraycount(stats->pxontxc)) {
   1998 			stats->pxontxc[i].ev_count = 0;
   1999 			stats->pxonrxc[i].ev_count = 0;
   2000 			stats->pxofftxc[i].ev_count = 0;
   2001 			stats->pxoffrxc[i].ev_count = 0;
   2002 			stats->pxon2offc[i].ev_count = 0;
   2003 		}
   2004 		if (i < __arraycount(stats->qprc)) {
   2005 			stats->qprc[i].ev_count = 0;
   2006 			stats->qptc[i].ev_count = 0;
   2007 			stats->qbrc[i].ev_count = 0;
   2008 			stats->qbtc[i].ev_count = 0;
   2009 			stats->qprdc[i].ev_count = 0;
   2010 		}
   2011 
   2012 		rxr->rx_packets.ev_count = 0;
   2013 		rxr->rx_bytes.ev_count = 0;
   2014 		rxr->rx_copies.ev_count = 0;
   2015 		rxr->no_jmbuf.ev_count = 0;
   2016 		rxr->rx_discarded.ev_count = 0;
   2017 	}
   2018 	stats->ipcs.ev_count = 0;
   2019 	stats->l4cs.ev_count = 0;
   2020 	stats->ipcs_bad.ev_count = 0;
   2021 	stats->l4cs_bad.ev_count = 0;
   2022 	stats->intzero.ev_count = 0;
   2023 	stats->legint.ev_count = 0;
   2024 	stats->crcerrs.ev_count = 0;
   2025 	stats->illerrc.ev_count = 0;
   2026 	stats->errbc.ev_count = 0;
   2027 	stats->mspdc.ev_count = 0;
   2028 	stats->mbsdc.ev_count = 0;
   2029 	stats->mpctotal.ev_count = 0;
   2030 	stats->mlfc.ev_count = 0;
   2031 	stats->mrfc.ev_count = 0;
   2032 	stats->rlec.ev_count = 0;
   2033 	stats->lxontxc.ev_count = 0;
   2034 	stats->lxonrxc.ev_count = 0;
   2035 	stats->lxofftxc.ev_count = 0;
   2036 	stats->lxoffrxc.ev_count = 0;
   2037 
   2038 	/* Packet Reception Stats */
   2039 	stats->tor.ev_count = 0;
   2040 	stats->gorc.ev_count = 0;
   2041 	stats->tpr.ev_count = 0;
   2042 	stats->gprc.ev_count = 0;
   2043 	stats->mprc.ev_count = 0;
   2044 	stats->bprc.ev_count = 0;
   2045 	stats->prc64.ev_count = 0;
   2046 	stats->prc127.ev_count = 0;
   2047 	stats->prc255.ev_count = 0;
   2048 	stats->prc511.ev_count = 0;
   2049 	stats->prc1023.ev_count = 0;
   2050 	stats->prc1522.ev_count = 0;
   2051 	stats->ruc.ev_count = 0;
   2052 	stats->rfc.ev_count = 0;
   2053 	stats->roc.ev_count = 0;
   2054 	stats->rjc.ev_count = 0;
   2055 	stats->mngprc.ev_count = 0;
   2056 	stats->mngpdc.ev_count = 0;
   2057 	stats->xec.ev_count = 0;
   2058 
   2059 	/* Packet Transmission Stats */
   2060 	stats->gotc.ev_count = 0;
   2061 	stats->tpt.ev_count = 0;
   2062 	stats->gptc.ev_count = 0;
   2063 	stats->bptc.ev_count = 0;
   2064 	stats->mptc.ev_count = 0;
   2065 	stats->mngptc.ev_count = 0;
   2066 	stats->ptc64.ev_count = 0;
   2067 	stats->ptc127.ev_count = 0;
   2068 	stats->ptc255.ev_count = 0;
   2069 	stats->ptc511.ev_count = 0;
   2070 	stats->ptc1023.ev_count = 0;
   2071 	stats->ptc1522.ev_count = 0;
   2072 }
   2073 
   2074 /************************************************************************
   2075  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
   2076  *
   2077  *   Retrieves the TDH value from the hardware
   2078  ************************************************************************/
   2079 static int
   2080 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   2081 {
   2082 	struct sysctlnode node = *rnode;
   2083 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2084 	uint32_t val;
   2085 
   2086 	if (!txr)
   2087 		return (0);
   2088 
   2089 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
   2090 	node.sysctl_data = &val;
   2091 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2092 } /* ixgbe_sysctl_tdh_handler */
   2093 
   2094 /************************************************************************
   2095  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   2096  *
   2097  *   Retrieves the TDT value from the hardware
   2098  ************************************************************************/
   2099 static int
   2100 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   2101 {
   2102 	struct sysctlnode node = *rnode;
   2103 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2104 	uint32_t val;
   2105 
   2106 	if (!txr)
   2107 		return (0);
   2108 
   2109 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
   2110 	node.sysctl_data = &val;
   2111 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2112 } /* ixgbe_sysctl_tdt_handler */
   2113 
   2114 /************************************************************************
   2115  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
   2116  *
   2117  *   Retrieves the RDH value from the hardware
   2118  ************************************************************************/
   2119 static int
   2120 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2121 {
   2122 	struct sysctlnode node = *rnode;
   2123 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2124 	uint32_t val;
   2125 
   2126 	if (!rxr)
   2127 		return (0);
   2128 
   2129 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
   2130 	node.sysctl_data = &val;
   2131 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2132 } /* ixgbe_sysctl_rdh_handler */
   2133 
   2134 /************************************************************************
   2135  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2136  *
   2137  *   Retrieves the RDT value from the hardware
   2138  ************************************************************************/
   2139 static int
   2140 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2141 {
   2142 	struct sysctlnode node = *rnode;
   2143 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2144 	uint32_t val;
   2145 
   2146 	if (!rxr)
   2147 		return (0);
   2148 
   2149 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
   2150 	node.sysctl_data = &val;
   2151 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2152 } /* ixgbe_sysctl_rdt_handler */
   2153 
   2154 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   2155 /************************************************************************
   2156  * ixgbe_register_vlan
   2157  *
   2158  *   Run via vlan config EVENT, it enables us to use the
   2159  *   HW Filter table since we can get the vlan id. This
   2160  *   just creates the entry in the soft version of the
   2161  *   VFTA, init will repopulate the real table.
   2162  ************************************************************************/
   2163 static void
   2164 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2165 {
   2166 	struct adapter	*adapter = ifp->if_softc;
   2167 	u16		index, bit;
   2168 
   2169 	if (ifp->if_softc != arg)   /* Not our event */
   2170 		return;
   2171 
   2172 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2173 		return;
   2174 
   2175 	IXGBE_CORE_LOCK(adapter);
   2176 	index = (vtag >> 5) & 0x7F;
   2177 	bit = vtag & 0x1F;
   2178 	adapter->shadow_vfta[index] |= (1 << bit);
   2179 	ixgbe_setup_vlan_hw_support(adapter);
   2180 	IXGBE_CORE_UNLOCK(adapter);
   2181 } /* ixgbe_register_vlan */
   2182 
   2183 /************************************************************************
   2184  * ixgbe_unregister_vlan
   2185  *
   2186  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
   2187  ************************************************************************/
   2188 static void
   2189 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2190 {
   2191 	struct adapter	*adapter = ifp->if_softc;
   2192 	u16		index, bit;
   2193 
   2194 	if (ifp->if_softc != arg)
   2195 		return;
   2196 
   2197 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2198 		return;
   2199 
   2200 	IXGBE_CORE_LOCK(adapter);
   2201 	index = (vtag >> 5) & 0x7F;
   2202 	bit = vtag & 0x1F;
   2203 	adapter->shadow_vfta[index] &= ~(1 << bit);
   2204 	/* Re-init to load the changes */
   2205 	ixgbe_setup_vlan_hw_support(adapter);
   2206 	IXGBE_CORE_UNLOCK(adapter);
   2207 } /* ixgbe_unregister_vlan */
   2208 #endif
   2209 
   2210 static void
   2211 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   2212 {
   2213 	struct ethercom *ec = &adapter->osdep.ec;
   2214 	struct ixgbe_hw *hw = &adapter->hw;
   2215 	struct rx_ring	*rxr;
   2216 	int             i;
   2217 	u32		ctrl;
   2218 
   2219 
   2220 	/*
   2221 	 * We get here thru init_locked, meaning
   2222 	 * a soft reset, this has already cleared
   2223 	 * the VFTA and other state, so if there
   2224 	 * have been no vlan's registered do nothing.
   2225 	 */
   2226 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   2227 		return;
   2228 
   2229 	/* Setup the queues for vlans */
   2230 	if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
   2231 		for (i = 0; i < adapter->num_queues; i++) {
   2232 			rxr = &adapter->rx_rings[i];
   2233 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
   2234 			if (hw->mac.type != ixgbe_mac_82598EB) {
   2235 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   2236 				ctrl |= IXGBE_RXDCTL_VME;
   2237 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
   2238 			}
   2239 			rxr->vtag_strip = TRUE;
   2240 		}
   2241 	}
   2242 
   2243 	if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
   2244 		return;
   2245 	/*
   2246 	 * A soft reset zero's out the VFTA, so
   2247 	 * we need to repopulate it now.
   2248 	 */
   2249 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2250 		if (adapter->shadow_vfta[i] != 0)
   2251 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
   2252 			    adapter->shadow_vfta[i]);
   2253 
   2254 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2255 	/* Enable the Filter Table if enabled */
   2256 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
   2257 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
   2258 		ctrl |= IXGBE_VLNCTRL_VFE;
   2259 	}
   2260 	if (hw->mac.type == ixgbe_mac_82598EB)
   2261 		ctrl |= IXGBE_VLNCTRL_VME;
   2262 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2263 } /* ixgbe_setup_vlan_hw_support */
   2264 
   2265 /************************************************************************
   2266  * ixgbe_get_slot_info
   2267  *
   2268  *   Get the width and transaction speed of
   2269  *   the slot this adapter is plugged into.
   2270  ************************************************************************/
   2271 static void
   2272 ixgbe_get_slot_info(struct adapter *adapter)
   2273 {
   2274 	device_t		dev = adapter->dev;
   2275 	struct ixgbe_hw		*hw = &adapter->hw;
   2276 	u32                   offset;
   2277 //	struct ixgbe_mac_info	*mac = &hw->mac;
   2278 	u16			link;
   2279 	int                   bus_info_valid = TRUE;
   2280 
   2281 	/* Some devices are behind an internal bridge */
   2282 	switch (hw->device_id) {
   2283 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
   2284 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
   2285 		goto get_parent_info;
   2286 	default:
   2287 		break;
   2288 	}
   2289 
   2290 	ixgbe_get_bus_info(hw);
   2291 
   2292 	/*
   2293 	 * Some devices don't use PCI-E, but there is no need
   2294 	 * to display "Unknown" for bus speed and width.
   2295 	 */
   2296 	switch (hw->mac.type) {
   2297 	case ixgbe_mac_X550EM_x:
   2298 	case ixgbe_mac_X550EM_a:
   2299 		return;
   2300 	default:
   2301 		goto display;
   2302 	}
   2303 
   2304 get_parent_info:
   2305 	/*
   2306 	 * For the Quad port adapter we need to parse back
   2307 	 * up the PCI tree to find the speed of the expansion
   2308 	 * slot into which this adapter is plugged. A bit more work.
   2309 	 */
   2310 	dev = device_parent(device_parent(dev));
   2311 #if 0
   2312 #ifdef IXGBE_DEBUG
   2313 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
   2314 	    pci_get_slot(dev), pci_get_function(dev));
   2315 #endif
   2316 	dev = device_parent(device_parent(dev));
   2317 #ifdef IXGBE_DEBUG
   2318 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
   2319 	    pci_get_slot(dev), pci_get_function(dev));
   2320 #endif
   2321 #endif
   2322 	/* Now get the PCI Express Capabilities offset */
   2323 	if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
   2324 	    PCI_CAP_PCIEXPRESS, &offset, NULL)) {
   2325 		/*
   2326 		 * Hmm...can't get PCI-Express capabilities.
   2327 		 * Falling back to default method.
   2328 		 */
   2329 		bus_info_valid = FALSE;
   2330 		ixgbe_get_bus_info(hw);
   2331 		goto display;
   2332 	}
   2333 	/* ...and read the Link Status Register */
   2334 	link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
   2335 	    offset + PCIE_LCSR) >> 16;
   2336 	ixgbe_set_pci_config_data_generic(hw, link);
   2337 
   2338 display:
   2339 	device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
   2340 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s" :
   2341 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s" :
   2342 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s" :
   2343 	     "Unknown"),
   2344 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
   2345 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
   2346 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
   2347 	     "Unknown"));
   2348 
   2349 	if (bus_info_valid) {
   2350 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2351 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
   2352 			(hw->bus.speed == ixgbe_bus_speed_2500))) {
   2353 			device_printf(dev, "PCI-Express bandwidth available"
   2354 			    " for this card\n     is not sufficient for"
   2355 			    " optimal performance.\n");
   2356 			device_printf(dev, "For optimal performance a x8 "
   2357 			    "PCIE, or x4 PCIE Gen2 slot is required.\n");
   2358 		}
   2359 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2360 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
   2361 			(hw->bus.speed < ixgbe_bus_speed_8000))) {
   2362 			device_printf(dev, "PCI-Express bandwidth available"
   2363 			    " for this card\n     is not sufficient for"
   2364 			    " optimal performance.\n");
   2365 			device_printf(dev, "For optimal performance a x8 "
   2366 			    "PCIE Gen3 slot is required.\n");
   2367 		}
   2368 	} else
   2369 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
   2370 
   2371 	return;
   2372 } /* ixgbe_get_slot_info */
   2373 
   2374 /************************************************************************
   2375  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
   2376  ************************************************************************/
   2377 static inline void
   2378 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   2379 {
   2380 	struct ixgbe_hw *hw = &adapter->hw;
   2381 	u64             queue = (u64)(1ULL << vector);
   2382 	u32             mask;
   2383 
   2384 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2385 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2386 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   2387 	} else {
   2388 		mask = (queue & 0xFFFFFFFF);
   2389 		if (mask)
   2390 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   2391 		mask = (queue >> 32);
   2392 		if (mask)
   2393 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   2394 	}
   2395 } /* ixgbe_enable_queue */
   2396 
   2397 /************************************************************************
   2398  * ixgbe_disable_queue
   2399  ************************************************************************/
   2400 static inline void
   2401 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   2402 {
   2403 	struct ixgbe_hw *hw = &adapter->hw;
   2404 	u64             queue = (u64)(1ULL << vector);
   2405 	u32             mask;
   2406 
   2407 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2408 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2409 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   2410 	} else {
   2411 		mask = (queue & 0xFFFFFFFF);
   2412 		if (mask)
   2413 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   2414 		mask = (queue >> 32);
   2415 		if (mask)
   2416 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   2417 	}
   2418 } /* ixgbe_disable_queue */
   2419 
   2420 /************************************************************************
   2421  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
   2422  ************************************************************************/
   2423 static int
   2424 ixgbe_msix_que(void *arg)
   2425 {
   2426 	struct ix_queue	*que = arg;
   2427 	struct adapter  *adapter = que->adapter;
   2428 	struct ifnet    *ifp = adapter->ifp;
   2429 	struct tx_ring	*txr = que->txr;
   2430 	struct rx_ring	*rxr = que->rxr;
   2431 	bool		more;
   2432 	u32		newitr = 0;
   2433 
   2434 	/* Protect against spurious interrupts */
   2435 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   2436 		return 0;
   2437 
   2438 	ixgbe_disable_queue(adapter, que->msix);
   2439 	++que->irqs.ev_count;
   2440 
   2441 #ifdef __NetBSD__
   2442 	/* Don't run ixgbe_rxeof in interrupt context */
   2443 	more = true;
   2444 #else
   2445 	more = ixgbe_rxeof(que);
   2446 #endif
   2447 
   2448 	IXGBE_TX_LOCK(txr);
   2449 	ixgbe_txeof(txr);
   2450 	IXGBE_TX_UNLOCK(txr);
   2451 
   2452 	/* Do AIM now? */
   2453 
   2454 	if (adapter->enable_aim == false)
   2455 		goto no_calc;
   2456 	/*
   2457 	 * Do Adaptive Interrupt Moderation:
   2458 	 *  - Write out last calculated setting
   2459 	 *  - Calculate based on average size over
   2460 	 *    the last interval.
   2461 	 */
   2462 	if (que->eitr_setting)
   2463 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
   2464 		    que->eitr_setting);
   2465 
   2466 	que->eitr_setting = 0;
   2467 
   2468 	/* Idle, do nothing */
   2469         if ((txr->bytes == 0) && (rxr->bytes == 0))
   2470                 goto no_calc;
   2471 
   2472 	if ((txr->bytes) && (txr->packets))
   2473 		newitr = txr->bytes/txr->packets;
   2474 	if ((rxr->bytes) && (rxr->packets))
   2475 		newitr = max(newitr, (rxr->bytes / rxr->packets));
   2476 	newitr += 24; /* account for hardware frame, crc */
   2477 
   2478 	/* set an upper boundary */
   2479 	newitr = min(newitr, 3000);
   2480 
   2481 	/* Be nice to the mid range */
   2482 	if ((newitr > 300) && (newitr < 1200))
   2483 		newitr = (newitr / 3);
   2484 	else
   2485 		newitr = (newitr / 2);
   2486 
   2487         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   2488                 newitr |= newitr << 16;
   2489         else
   2490                 newitr |= IXGBE_EITR_CNT_WDIS;
   2491 
   2492         /* save for next interrupt */
   2493         que->eitr_setting = newitr;
   2494 
   2495 	/* Reset state */
   2496 	txr->bytes = 0;
   2497 	txr->packets = 0;
   2498 	rxr->bytes = 0;
   2499 	rxr->packets = 0;
   2500 
   2501 no_calc:
   2502 	if (more)
   2503 		softint_schedule(que->que_si);
   2504 	else
   2505 		ixgbe_enable_queue(adapter, que->msix);
   2506 
   2507 	return 1;
   2508 } /* ixgbe_msix_que */
   2509 
   2510 /************************************************************************
   2511  * ixgbe_media_status - Media Ioctl callback
   2512  *
   2513  *   Called whenever the user queries the status of
   2514  *   the interface using ifconfig.
   2515  ************************************************************************/
   2516 static void
   2517 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   2518 {
   2519 	struct adapter *adapter = ifp->if_softc;
   2520 	struct ixgbe_hw *hw = &adapter->hw;
   2521 	int layer;
   2522 
   2523 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   2524 	IXGBE_CORE_LOCK(adapter);
   2525 	ixgbe_update_link_status(adapter);
   2526 
   2527 	ifmr->ifm_status = IFM_AVALID;
   2528 	ifmr->ifm_active = IFM_ETHER;
   2529 
   2530 	if (!adapter->link_active) {
   2531 		ifmr->ifm_active |= IFM_NONE;
   2532 		IXGBE_CORE_UNLOCK(adapter);
   2533 		return;
   2534 	}
   2535 
   2536 	ifmr->ifm_status |= IFM_ACTIVE;
   2537 	layer = adapter->phy_layer;
   2538 
   2539 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
   2540 	    layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
   2541 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
   2542 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
   2543 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
   2544 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
   2545 		switch (adapter->link_speed) {
   2546 		case IXGBE_LINK_SPEED_10GB_FULL:
   2547 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   2548 			break;
   2549 		case IXGBE_LINK_SPEED_5GB_FULL:
   2550 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   2551 			break;
   2552 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2553 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   2554 			break;
   2555 		case IXGBE_LINK_SPEED_1GB_FULL:
   2556 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   2557 			break;
   2558 		case IXGBE_LINK_SPEED_100_FULL:
   2559 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   2560 			break;
   2561 		case IXGBE_LINK_SPEED_10_FULL:
   2562 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   2563 			break;
   2564 		}
   2565 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   2566 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
   2567 		switch (adapter->link_speed) {
   2568 		case IXGBE_LINK_SPEED_10GB_FULL:
   2569 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
   2570 			break;
   2571 		}
   2572 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
   2573 		switch (adapter->link_speed) {
   2574 		case IXGBE_LINK_SPEED_10GB_FULL:
   2575 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
   2576 			break;
   2577 		case IXGBE_LINK_SPEED_1GB_FULL:
   2578 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2579 			break;
   2580 		}
   2581 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
   2582 		switch (adapter->link_speed) {
   2583 		case IXGBE_LINK_SPEED_10GB_FULL:
   2584 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
   2585 			break;
   2586 		case IXGBE_LINK_SPEED_1GB_FULL:
   2587 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2588 			break;
   2589 		}
   2590 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
   2591 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
   2592 		switch (adapter->link_speed) {
   2593 		case IXGBE_LINK_SPEED_10GB_FULL:
   2594 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2595 			break;
   2596 		case IXGBE_LINK_SPEED_1GB_FULL:
   2597 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
   2598 			break;
   2599 		}
   2600 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
   2601 		switch (adapter->link_speed) {
   2602 		case IXGBE_LINK_SPEED_10GB_FULL:
   2603 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2604 			break;
   2605 		}
   2606 	/*
   2607 	 * XXX: These need to use the proper media types once
   2608 	 * they're added.
   2609 	 */
   2610 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
   2611 		switch (adapter->link_speed) {
   2612 		case IXGBE_LINK_SPEED_10GB_FULL:
   2613 #ifndef IFM_ETH_XTYPE
   2614 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2615 #else
   2616 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
   2617 #endif
   2618 			break;
   2619 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2620 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2621 			break;
   2622 		case IXGBE_LINK_SPEED_1GB_FULL:
   2623 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2624 			break;
   2625 		}
   2626 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
   2627 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
   2628 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
   2629 		switch (adapter->link_speed) {
   2630 		case IXGBE_LINK_SPEED_10GB_FULL:
   2631 #ifndef IFM_ETH_XTYPE
   2632 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2633 #else
   2634 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
   2635 #endif
   2636 			break;
   2637 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2638 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2639 			break;
   2640 		case IXGBE_LINK_SPEED_1GB_FULL:
   2641 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2642 			break;
   2643 		}
   2644 
   2645 	/* If nothing is recognized... */
   2646 #if 0
   2647 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
   2648 		ifmr->ifm_active |= IFM_UNKNOWN;
   2649 #endif
   2650 
   2651 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   2652 
   2653 	/* Display current flow control setting used on link */
   2654 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
   2655 	    hw->fc.current_mode == ixgbe_fc_full)
   2656 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
   2657 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
   2658 	    hw->fc.current_mode == ixgbe_fc_full)
   2659 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
   2660 
   2661 	IXGBE_CORE_UNLOCK(adapter);
   2662 
   2663 	return;
   2664 } /* ixgbe_media_status */
   2665 
   2666 /************************************************************************
   2667  * ixgbe_media_change - Media Ioctl callback
   2668  *
   2669  *   Called when the user changes speed/duplex using
   2670  *   media/mediopt option with ifconfig.
   2671  ************************************************************************/
   2672 static int
   2673 ixgbe_media_change(struct ifnet *ifp)
   2674 {
   2675 	struct adapter   *adapter = ifp->if_softc;
   2676 	struct ifmedia   *ifm = &adapter->media;
   2677 	struct ixgbe_hw  *hw = &adapter->hw;
   2678 	ixgbe_link_speed speed = 0;
   2679 	ixgbe_link_speed link_caps = 0;
   2680 	bool negotiate = false;
   2681 	s32 err = IXGBE_NOT_IMPLEMENTED;
   2682 
   2683 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   2684 
   2685 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   2686 		return (EINVAL);
   2687 
   2688 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   2689 		return (ENODEV);
   2690 
   2691 	/*
   2692 	 * We don't actually need to check against the supported
   2693 	 * media types of the adapter; ifmedia will take care of
   2694 	 * that for us.
   2695 	 */
   2696 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   2697 	case IFM_AUTO:
   2698 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   2699 		    &negotiate);
   2700 		if (err != IXGBE_SUCCESS) {
   2701 			device_printf(adapter->dev, "Unable to determine "
   2702 			    "supported advertise speeds\n");
   2703 			return (ENODEV);
   2704 		}
   2705 		speed |= link_caps;
   2706 		break;
   2707 	case IFM_10G_T:
   2708 	case IFM_10G_LRM:
   2709 	case IFM_10G_LR:
   2710 	case IFM_10G_TWINAX:
   2711 #ifndef IFM_ETH_XTYPE
   2712 	case IFM_10G_SR: /* KR, too */
   2713 	case IFM_10G_CX4: /* KX4 */
   2714 #else
   2715 	case IFM_10G_KR:
   2716 	case IFM_10G_KX4:
   2717 #endif
   2718 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   2719 		break;
   2720 	case IFM_5000_T:
   2721 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   2722 		break;
   2723 	case IFM_2500_T:
   2724 	case IFM_2500_KX:
   2725 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   2726 		break;
   2727 	case IFM_1000_T:
   2728 	case IFM_1000_LX:
   2729 	case IFM_1000_SX:
   2730 	case IFM_1000_KX:
   2731 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   2732 		break;
   2733 	case IFM_100_TX:
   2734 		speed |= IXGBE_LINK_SPEED_100_FULL;
   2735 		break;
   2736 	case IFM_10_T:
   2737 		speed |= IXGBE_LINK_SPEED_10_FULL;
   2738 		break;
   2739 	default:
   2740 		goto invalid;
   2741 	}
   2742 
   2743 	hw->mac.autotry_restart = TRUE;
   2744 	hw->mac.ops.setup_link(hw, speed, TRUE);
   2745 	adapter->advertise = 0;
   2746 	if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
   2747 		if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
   2748 			adapter->advertise |= 1 << 2;
   2749 		if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
   2750 			adapter->advertise |= 1 << 1;
   2751 		if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
   2752 			adapter->advertise |= 1 << 0;
   2753 		if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
   2754 			adapter->advertise |= 1 << 3;
   2755 		if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
   2756 			adapter->advertise |= 1 << 4;
   2757 		if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
   2758 			adapter->advertise |= 1 << 5;
   2759 	}
   2760 
   2761 	return (0);
   2762 
   2763 invalid:
   2764 	device_printf(adapter->dev, "Invalid media type!\n");
   2765 
   2766 	return (EINVAL);
   2767 } /* ixgbe_media_change */
   2768 
   2769 /************************************************************************
   2770  * ixgbe_set_promisc
   2771  ************************************************************************/
   2772 static void
   2773 ixgbe_set_promisc(struct adapter *adapter)
   2774 {
   2775 	struct ifnet *ifp = adapter->ifp;
   2776 	int          mcnt = 0;
   2777 	u32          rctl;
   2778 	struct ether_multi *enm;
   2779 	struct ether_multistep step;
   2780 	struct ethercom *ec = &adapter->osdep.ec;
   2781 
   2782 	KASSERT(mutex_owned(&adapter->core_mtx));
   2783 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   2784 	rctl &= (~IXGBE_FCTRL_UPE);
   2785 	if (ifp->if_flags & IFF_ALLMULTI)
   2786 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
   2787 	else {
   2788 		ETHER_LOCK(ec);
   2789 		ETHER_FIRST_MULTI(step, ec, enm);
   2790 		while (enm != NULL) {
   2791 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
   2792 				break;
   2793 			mcnt++;
   2794 			ETHER_NEXT_MULTI(step, enm);
   2795 		}
   2796 		ETHER_UNLOCK(ec);
   2797 	}
   2798 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
   2799 		rctl &= (~IXGBE_FCTRL_MPE);
   2800 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2801 
   2802 	if (ifp->if_flags & IFF_PROMISC) {
   2803 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   2804 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2805 	} else if (ifp->if_flags & IFF_ALLMULTI) {
   2806 		rctl |= IXGBE_FCTRL_MPE;
   2807 		rctl &= ~IXGBE_FCTRL_UPE;
   2808 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2809 	}
   2810 } /* ixgbe_set_promisc */
   2811 
   2812 /************************************************************************
   2813  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
   2814  ************************************************************************/
   2815 static int
   2816 ixgbe_msix_link(void *arg)
   2817 {
   2818 	struct adapter	*adapter = arg;
   2819 	struct ixgbe_hw *hw = &adapter->hw;
   2820 	u32		eicr, eicr_mask;
   2821 	s32             retval;
   2822 
   2823 	++adapter->link_irq.ev_count;
   2824 
   2825 	/* Pause other interrupts */
   2826 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
   2827 
   2828 	/* First get the cause */
   2829 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   2830 	/* Be sure the queue bits are not cleared */
   2831 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
   2832 	/* Clear interrupt with write */
   2833 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
   2834 
   2835 	/* Link status change */
   2836 	if (eicr & IXGBE_EICR_LSC) {
   2837 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
   2838 		softint_schedule(adapter->link_si);
   2839 	}
   2840 
   2841 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   2842 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
   2843 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
   2844 			/* This is probably overkill :) */
   2845 			if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
   2846 				return 1;
   2847 			/* Disable the interrupt */
   2848 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
   2849 			softint_schedule(adapter->fdir_si);
   2850 		}
   2851 
   2852 		if (eicr & IXGBE_EICR_ECC) {
   2853 			device_printf(adapter->dev,
   2854 			    "CRITICAL: ECC ERROR!! Please Reboot!!\n");
   2855 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   2856 		}
   2857 
   2858 		/* Check for over temp condition */
   2859 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
   2860 			switch (adapter->hw.mac.type) {
   2861 			case ixgbe_mac_X550EM_a:
   2862 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
   2863 					break;
   2864 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
   2865 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2866 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2867 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2868 				retval = hw->phy.ops.check_overtemp(hw);
   2869 				if (retval != IXGBE_ERR_OVERTEMP)
   2870 					break;
   2871 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2872 				device_printf(adapter->dev, "System shutdown required!\n");
   2873 				break;
   2874 			default:
   2875 				if (!(eicr & IXGBE_EICR_TS))
   2876 					break;
   2877 				retval = hw->phy.ops.check_overtemp(hw);
   2878 				if (retval != IXGBE_ERR_OVERTEMP)
   2879 					break;
   2880 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2881 				device_printf(adapter->dev, "System shutdown required!\n");
   2882 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
   2883 				break;
   2884 			}
   2885 		}
   2886 
   2887 		/* Check for VF message */
   2888 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
   2889 		    (eicr & IXGBE_EICR_MAILBOX))
   2890 			softint_schedule(adapter->mbx_si);
   2891 	}
   2892 
   2893 	if (ixgbe_is_sfp(hw)) {
   2894 		/* Pluggable optics-related interrupt */
   2895 		if (hw->mac.type >= ixgbe_mac_X540)
   2896 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   2897 		else
   2898 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   2899 
   2900 		if (eicr & eicr_mask) {
   2901 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   2902 			softint_schedule(adapter->mod_si);
   2903 		}
   2904 
   2905 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   2906 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   2907 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2908 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2909 			softint_schedule(adapter->msf_si);
   2910 		}
   2911 	}
   2912 
   2913 	/* Check for fan failure */
   2914 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   2915 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
   2916 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2917 	}
   2918 
   2919 	/* External PHY interrupt */
   2920 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   2921 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
   2922 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
   2923 		softint_schedule(adapter->phy_si);
   2924  	}
   2925 
   2926 	/* Re-enable other interrupts */
   2927 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   2928 	return 1;
   2929 } /* ixgbe_msix_link */
   2930 
   2931 /************************************************************************
   2932  * ixgbe_sysctl_interrupt_rate_handler
   2933  ************************************************************************/
   2934 static int
   2935 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   2936 {
   2937 	struct sysctlnode node = *rnode;
   2938 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   2939 	uint32_t reg, usec, rate;
   2940 	int error;
   2941 
   2942 	if (que == NULL)
   2943 		return 0;
   2944 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
   2945 	usec = ((reg & 0x0FF8) >> 3);
   2946 	if (usec > 0)
   2947 		rate = 500000 / usec;
   2948 	else
   2949 		rate = 0;
   2950 	node.sysctl_data = &rate;
   2951 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2952 	if (error || newp == NULL)
   2953 		return error;
   2954 	reg &= ~0xfff; /* default, no limitation */
   2955 	ixgbe_max_interrupt_rate = 0;
   2956 	if (rate > 0 && rate < 500000) {
   2957 		if (rate < 1000)
   2958 			rate = 1000;
   2959 		ixgbe_max_interrupt_rate = rate;
   2960 		reg |= ((4000000/rate) & 0xff8);
   2961 	}
   2962 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
   2963 
   2964 	return (0);
   2965 } /* ixgbe_sysctl_interrupt_rate_handler */
   2966 
   2967 const struct sysctlnode *
   2968 ixgbe_sysctl_instance(struct adapter *adapter)
   2969 {
   2970 	const char *dvname;
   2971 	struct sysctllog **log;
   2972 	int rc;
   2973 	const struct sysctlnode *rnode;
   2974 
   2975 	if (adapter->sysctltop != NULL)
   2976 		return adapter->sysctltop;
   2977 
   2978 	log = &adapter->sysctllog;
   2979 	dvname = device_xname(adapter->dev);
   2980 
   2981 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2982 	    0, CTLTYPE_NODE, dvname,
   2983 	    SYSCTL_DESCR("ixgbe information and settings"),
   2984 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2985 		goto err;
   2986 
   2987 	return rnode;
   2988 err:
   2989 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2990 	return NULL;
   2991 }
   2992 
   2993 /************************************************************************
   2994  * ixgbe_add_device_sysctls
   2995  ************************************************************************/
   2996 static void
   2997 ixgbe_add_device_sysctls(struct adapter *adapter)
   2998 {
   2999 	device_t               dev = adapter->dev;
   3000 	struct ixgbe_hw        *hw = &adapter->hw;
   3001 	struct sysctllog **log;
   3002 	const struct sysctlnode *rnode, *cnode;
   3003 
   3004 	log = &adapter->sysctllog;
   3005 
   3006 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   3007 		aprint_error_dev(dev, "could not create sysctl root\n");
   3008 		return;
   3009 	}
   3010 
   3011 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3012 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3013 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
   3014 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   3015 		aprint_error_dev(dev, "could not create sysctl\n");
   3016 
   3017 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3018 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3019 	    "num_queues", SYSCTL_DESCR("Number of queues"),
   3020 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
   3021 		aprint_error_dev(dev, "could not create sysctl\n");
   3022 
   3023 	/* Sysctls for all devices */
   3024 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3025 	    CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
   3026 	    ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
   3027 	    CTL_EOL) != 0)
   3028 		aprint_error_dev(dev, "could not create sysctl\n");
   3029 
   3030 	adapter->enable_aim = ixgbe_enable_aim;
   3031 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3032 	    CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   3033 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   3034 		aprint_error_dev(dev, "could not create sysctl\n");
   3035 
   3036 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3037 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   3038 	    "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
   3039 	    ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
   3040 	    CTL_EOL) != 0)
   3041 		aprint_error_dev(dev, "could not create sysctl\n");
   3042 
   3043 #ifdef IXGBE_DEBUG
   3044 	/* testing sysctls (for all devices) */
   3045 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3046 	    CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
   3047 	    ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
   3048 	    CTL_EOL) != 0)
   3049 		aprint_error_dev(dev, "could not create sysctl\n");
   3050 
   3051 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
   3052 	    CTLTYPE_STRING, "print_rss_config",
   3053 	    SYSCTL_DESCR("Prints RSS Configuration"),
   3054 	    ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
   3055 	    CTL_EOL) != 0)
   3056 		aprint_error_dev(dev, "could not create sysctl\n");
   3057 #endif
   3058 	/* for X550 series devices */
   3059 	if (hw->mac.type >= ixgbe_mac_X550)
   3060 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3061 		    CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
   3062 		    ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
   3063 		    CTL_EOL) != 0)
   3064 			aprint_error_dev(dev, "could not create sysctl\n");
   3065 
   3066 	/* for WoL-capable devices */
   3067 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3068 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3069 		    CTLTYPE_BOOL, "wol_enable",
   3070 		    SYSCTL_DESCR("Enable/Disable Wake on LAN"),
   3071 		    ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
   3072 		    CTL_EOL) != 0)
   3073 			aprint_error_dev(dev, "could not create sysctl\n");
   3074 
   3075 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3076 		    CTLTYPE_INT, "wufc",
   3077 		    SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
   3078 		    ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
   3079 		    CTL_EOL) != 0)
   3080 			aprint_error_dev(dev, "could not create sysctl\n");
   3081 	}
   3082 
   3083 	/* for X552/X557-AT devices */
   3084 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3085 		const struct sysctlnode *phy_node;
   3086 
   3087 		if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
   3088 		    "phy", SYSCTL_DESCR("External PHY sysctls"),
   3089 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
   3090 			aprint_error_dev(dev, "could not create sysctl\n");
   3091 			return;
   3092 		}
   3093 
   3094 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3095 		    CTLTYPE_INT, "temp",
   3096 		    SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
   3097 		    ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
   3098 		    CTL_EOL) != 0)
   3099 			aprint_error_dev(dev, "could not create sysctl\n");
   3100 
   3101 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3102 		    CTLTYPE_INT, "overtemp_occurred",
   3103 		    SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
   3104 		    ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
   3105 		    CTL_CREATE, CTL_EOL) != 0)
   3106 			aprint_error_dev(dev, "could not create sysctl\n");
   3107 	}
   3108 
   3109 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
   3110 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3111 		    CTLTYPE_INT, "eee_state",
   3112 		    SYSCTL_DESCR("EEE Power Save State"),
   3113 		    ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
   3114 		    CTL_EOL) != 0)
   3115 			aprint_error_dev(dev, "could not create sysctl\n");
   3116 	}
   3117 } /* ixgbe_add_device_sysctls */
   3118 
   3119 /************************************************************************
   3120  * ixgbe_allocate_pci_resources
   3121  ************************************************************************/
   3122 static int
   3123 ixgbe_allocate_pci_resources(struct adapter *adapter,
   3124     const struct pci_attach_args *pa)
   3125 {
   3126 	pcireg_t	memtype;
   3127 	device_t dev = adapter->dev;
   3128 	bus_addr_t addr;
   3129 	int flags;
   3130 
   3131 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   3132 	switch (memtype) {
   3133 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   3134 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   3135 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   3136 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   3137 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   3138 			goto map_err;
   3139 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   3140 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   3141 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3142 		}
   3143 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   3144 		     adapter->osdep.mem_size, flags,
   3145 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   3146 map_err:
   3147 			adapter->osdep.mem_size = 0;
   3148 			aprint_error_dev(dev, "unable to map BAR0\n");
   3149 			return ENXIO;
   3150 		}
   3151 		break;
   3152 	default:
   3153 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   3154 		return ENXIO;
   3155 	}
   3156 
   3157 	return (0);
   3158 } /* ixgbe_allocate_pci_resources */
   3159 
   3160 static void
   3161 ixgbe_free_softint(struct adapter *adapter)
   3162 {
   3163 	struct ix_queue *que = adapter->queues;
   3164 	struct tx_ring *txr = adapter->tx_rings;
   3165 	int i;
   3166 
   3167 	for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
   3168 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   3169 			if (txr->txr_si != NULL)
   3170 				softint_disestablish(txr->txr_si);
   3171 		}
   3172 		if (que->que_si != NULL)
   3173 			softint_disestablish(que->que_si);
   3174 	}
   3175 
   3176 	/* Drain the Link queue */
   3177 	if (adapter->link_si != NULL) {
   3178 		softint_disestablish(adapter->link_si);
   3179 		adapter->link_si = NULL;
   3180 	}
   3181 	if (adapter->mod_si != NULL) {
   3182 		softint_disestablish(adapter->mod_si);
   3183 		adapter->mod_si = NULL;
   3184 	}
   3185 	if (adapter->msf_si != NULL) {
   3186 		softint_disestablish(adapter->msf_si);
   3187 		adapter->msf_si = NULL;
   3188 	}
   3189 	if (adapter->phy_si != NULL) {
   3190 		softint_disestablish(adapter->phy_si);
   3191 		adapter->phy_si = NULL;
   3192 	}
   3193 	if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
   3194 		if (adapter->fdir_si != NULL) {
   3195 			softint_disestablish(adapter->fdir_si);
   3196 			adapter->fdir_si = NULL;
   3197 		}
   3198 	}
   3199 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   3200 		if (adapter->mbx_si != NULL) {
   3201 			softint_disestablish(adapter->mbx_si);
   3202 			adapter->mbx_si = NULL;
   3203 		}
   3204 	}
   3205 } /* ixgbe_free_softint */
   3206 
   3207 /************************************************************************
   3208  * ixgbe_detach - Device removal routine
   3209  *
   3210  *   Called when the driver is being removed.
   3211  *   Stops the adapter and deallocates all the resources
   3212  *   that were allocated for driver operation.
   3213  *
   3214  *   return 0 on success, positive on failure
   3215  ************************************************************************/
   3216 static int
   3217 ixgbe_detach(device_t dev, int flags)
   3218 {
   3219 	struct adapter *adapter = device_private(dev);
   3220 	struct rx_ring *rxr = adapter->rx_rings;
   3221 	struct tx_ring *txr = adapter->tx_rings;
   3222 	struct ixgbe_hw *hw = &adapter->hw;
   3223 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   3224 	u32	ctrl_ext;
   3225 
   3226 	INIT_DEBUGOUT("ixgbe_detach: begin");
   3227 	if (adapter->osdep.attached == false)
   3228 		return 0;
   3229 
   3230 	if (ixgbe_pci_iov_detach(dev) != 0) {
   3231 		device_printf(dev, "SR-IOV in use; detach first.\n");
   3232 		return (EBUSY);
   3233 	}
   3234 
   3235 	/* Stop the interface. Callouts are stopped in it. */
   3236 	ixgbe_ifstop(adapter->ifp, 1);
   3237 #if NVLAN > 0
   3238 	/* Make sure VLANs are not using driver */
   3239 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   3240 		;	/* nothing to do: no VLANs */
   3241 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
   3242 		vlan_ifdetach(adapter->ifp);
   3243 	else {
   3244 		aprint_error_dev(dev, "VLANs in use, detach first\n");
   3245 		return (EBUSY);
   3246 	}
   3247 #endif
   3248 
   3249 	pmf_device_deregister(dev);
   3250 
   3251 	ether_ifdetach(adapter->ifp);
   3252 	/* Stop the adapter */
   3253 	IXGBE_CORE_LOCK(adapter);
   3254 	ixgbe_setup_low_power_mode(adapter);
   3255 	IXGBE_CORE_UNLOCK(adapter);
   3256 
   3257 	ixgbe_free_softint(adapter);
   3258 
   3259 	/* let hardware know driver is unloading */
   3260 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   3261 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   3262 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   3263 
   3264 	callout_halt(&adapter->timer, NULL);
   3265 
   3266 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   3267 		netmap_detach(adapter->ifp);
   3268 
   3269 	ixgbe_free_pci_resources(adapter);
   3270 #if 0	/* XXX the NetBSD port is probably missing something here */
   3271 	bus_generic_detach(dev);
   3272 #endif
   3273 	if_detach(adapter->ifp);
   3274 	if_percpuq_destroy(adapter->ipq);
   3275 
   3276 	sysctl_teardown(&adapter->sysctllog);
   3277 	evcnt_detach(&adapter->handleq);
   3278 	evcnt_detach(&adapter->req);
   3279 	evcnt_detach(&adapter->efbig_tx_dma_setup);
   3280 	evcnt_detach(&adapter->mbuf_defrag_failed);
   3281 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
   3282 	evcnt_detach(&adapter->einval_tx_dma_setup);
   3283 	evcnt_detach(&adapter->other_tx_dma_setup);
   3284 	evcnt_detach(&adapter->eagain_tx_dma_setup);
   3285 	evcnt_detach(&adapter->enomem_tx_dma_setup);
   3286 	evcnt_detach(&adapter->watchdog_events);
   3287 	evcnt_detach(&adapter->tso_err);
   3288 	evcnt_detach(&adapter->link_irq);
   3289 
   3290 	txr = adapter->tx_rings;
   3291 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   3292 		evcnt_detach(&adapter->queues[i].irqs);
   3293 		evcnt_detach(&txr->no_desc_avail);
   3294 		evcnt_detach(&txr->total_packets);
   3295 		evcnt_detach(&txr->tso_tx);
   3296 #ifndef IXGBE_LEGACY_TX
   3297 		evcnt_detach(&txr->pcq_drops);
   3298 #endif
   3299 
   3300 		if (i < __arraycount(stats->mpc)) {
   3301 			evcnt_detach(&stats->mpc[i]);
   3302 			if (hw->mac.type == ixgbe_mac_82598EB)
   3303 				evcnt_detach(&stats->rnbc[i]);
   3304 		}
   3305 		if (i < __arraycount(stats->pxontxc)) {
   3306 			evcnt_detach(&stats->pxontxc[i]);
   3307 			evcnt_detach(&stats->pxonrxc[i]);
   3308 			evcnt_detach(&stats->pxofftxc[i]);
   3309 			evcnt_detach(&stats->pxoffrxc[i]);
   3310 			evcnt_detach(&stats->pxon2offc[i]);
   3311 		}
   3312 		if (i < __arraycount(stats->qprc)) {
   3313 			evcnt_detach(&stats->qprc[i]);
   3314 			evcnt_detach(&stats->qptc[i]);
   3315 			evcnt_detach(&stats->qbrc[i]);
   3316 			evcnt_detach(&stats->qbtc[i]);
   3317 			evcnt_detach(&stats->qprdc[i]);
   3318 		}
   3319 
   3320 		evcnt_detach(&rxr->rx_packets);
   3321 		evcnt_detach(&rxr->rx_bytes);
   3322 		evcnt_detach(&rxr->rx_copies);
   3323 		evcnt_detach(&rxr->no_jmbuf);
   3324 		evcnt_detach(&rxr->rx_discarded);
   3325 	}
   3326 	evcnt_detach(&stats->ipcs);
   3327 	evcnt_detach(&stats->l4cs);
   3328 	evcnt_detach(&stats->ipcs_bad);
   3329 	evcnt_detach(&stats->l4cs_bad);
   3330 	evcnt_detach(&stats->intzero);
   3331 	evcnt_detach(&stats->legint);
   3332 	evcnt_detach(&stats->crcerrs);
   3333 	evcnt_detach(&stats->illerrc);
   3334 	evcnt_detach(&stats->errbc);
   3335 	evcnt_detach(&stats->mspdc);
   3336 	if (hw->mac.type >= ixgbe_mac_X550)
   3337 		evcnt_detach(&stats->mbsdc);
   3338 	evcnt_detach(&stats->mpctotal);
   3339 	evcnt_detach(&stats->mlfc);
   3340 	evcnt_detach(&stats->mrfc);
   3341 	evcnt_detach(&stats->rlec);
   3342 	evcnt_detach(&stats->lxontxc);
   3343 	evcnt_detach(&stats->lxonrxc);
   3344 	evcnt_detach(&stats->lxofftxc);
   3345 	evcnt_detach(&stats->lxoffrxc);
   3346 
   3347 	/* Packet Reception Stats */
   3348 	evcnt_detach(&stats->tor);
   3349 	evcnt_detach(&stats->gorc);
   3350 	evcnt_detach(&stats->tpr);
   3351 	evcnt_detach(&stats->gprc);
   3352 	evcnt_detach(&stats->mprc);
   3353 	evcnt_detach(&stats->bprc);
   3354 	evcnt_detach(&stats->prc64);
   3355 	evcnt_detach(&stats->prc127);
   3356 	evcnt_detach(&stats->prc255);
   3357 	evcnt_detach(&stats->prc511);
   3358 	evcnt_detach(&stats->prc1023);
   3359 	evcnt_detach(&stats->prc1522);
   3360 	evcnt_detach(&stats->ruc);
   3361 	evcnt_detach(&stats->rfc);
   3362 	evcnt_detach(&stats->roc);
   3363 	evcnt_detach(&stats->rjc);
   3364 	evcnt_detach(&stats->mngprc);
   3365 	evcnt_detach(&stats->mngpdc);
   3366 	evcnt_detach(&stats->xec);
   3367 
   3368 	/* Packet Transmission Stats */
   3369 	evcnt_detach(&stats->gotc);
   3370 	evcnt_detach(&stats->tpt);
   3371 	evcnt_detach(&stats->gptc);
   3372 	evcnt_detach(&stats->bptc);
   3373 	evcnt_detach(&stats->mptc);
   3374 	evcnt_detach(&stats->mngptc);
   3375 	evcnt_detach(&stats->ptc64);
   3376 	evcnt_detach(&stats->ptc127);
   3377 	evcnt_detach(&stats->ptc255);
   3378 	evcnt_detach(&stats->ptc511);
   3379 	evcnt_detach(&stats->ptc1023);
   3380 	evcnt_detach(&stats->ptc1522);
   3381 
   3382 	ixgbe_free_transmit_structures(adapter);
   3383 	ixgbe_free_receive_structures(adapter);
   3384 	free(adapter->queues, M_DEVBUF);
   3385 	free(adapter->mta, M_DEVBUF);
   3386 
   3387 	IXGBE_CORE_LOCK_DESTROY(adapter);
   3388 
   3389 	return (0);
   3390 } /* ixgbe_detach */
   3391 
   3392 /************************************************************************
   3393  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
   3394  *
   3395  *   Prepare the adapter/port for LPLU and/or WoL
   3396  ************************************************************************/
   3397 static int
   3398 ixgbe_setup_low_power_mode(struct adapter *adapter)
   3399 {
   3400 	struct ixgbe_hw *hw = &adapter->hw;
   3401 	device_t        dev = adapter->dev;
   3402 	s32             error = 0;
   3403 
   3404 	KASSERT(mutex_owned(&adapter->core_mtx));
   3405 
   3406 	/* Limit power management flow to X550EM baseT */
   3407 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
   3408 	    hw->phy.ops.enter_lplu) {
   3409 		/* X550EM baseT adapters need a special LPLU flow */
   3410 		hw->phy.reset_disable = true;
   3411 		ixgbe_stop(adapter);
   3412 		error = hw->phy.ops.enter_lplu(hw);
   3413 		if (error)
   3414 			device_printf(dev,
   3415 			    "Error entering LPLU: %d\n", error);
   3416 		hw->phy.reset_disable = false;
   3417 	} else {
   3418 		/* Just stop for other adapters */
   3419 		ixgbe_stop(adapter);
   3420 	}
   3421 
   3422 	if (!hw->wol_enabled) {
   3423 		ixgbe_set_phy_power(hw, FALSE);
   3424 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3425 		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
   3426 	} else {
   3427 		/* Turn off support for APM wakeup. (Using ACPI instead) */
   3428 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
   3429 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
   3430 
   3431 		/*
   3432 		 * Clear Wake Up Status register to prevent any previous wakeup
   3433 		 * events from waking us up immediately after we suspend.
   3434 		 */
   3435 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3436 
   3437 		/*
   3438 		 * Program the Wakeup Filter Control register with user filter
   3439 		 * settings
   3440 		 */
   3441 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
   3442 
   3443 		/* Enable wakeups and power management in Wakeup Control */
   3444 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
   3445 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
   3446 
   3447 	}
   3448 
   3449 	return error;
   3450 } /* ixgbe_setup_low_power_mode */
   3451 
   3452 /************************************************************************
   3453  * ixgbe_shutdown - Shutdown entry point
   3454  ************************************************************************/
   3455 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   3456 static int
   3457 ixgbe_shutdown(device_t dev)
   3458 {
   3459 	struct adapter *adapter = device_private(dev);
   3460 	int error = 0;
   3461 
   3462 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
   3463 
   3464 	IXGBE_CORE_LOCK(adapter);
   3465 	error = ixgbe_setup_low_power_mode(adapter);
   3466 	IXGBE_CORE_UNLOCK(adapter);
   3467 
   3468 	return (error);
   3469 } /* ixgbe_shutdown */
   3470 #endif
   3471 
   3472 /************************************************************************
   3473  * ixgbe_suspend
   3474  *
   3475  *   From D0 to D3
   3476  ************************************************************************/
   3477 static bool
   3478 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
   3479 {
   3480 	struct adapter *adapter = device_private(dev);
   3481 	int            error = 0;
   3482 
   3483 	INIT_DEBUGOUT("ixgbe_suspend: begin");
   3484 
   3485 	IXGBE_CORE_LOCK(adapter);
   3486 
   3487 	error = ixgbe_setup_low_power_mode(adapter);
   3488 
   3489 	IXGBE_CORE_UNLOCK(adapter);
   3490 
   3491 	return (error);
   3492 } /* ixgbe_suspend */
   3493 
   3494 /************************************************************************
   3495  * ixgbe_resume
   3496  *
   3497  *   From D3 to D0
   3498  ************************************************************************/
   3499 static bool
   3500 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
   3501 {
   3502 	struct adapter  *adapter = device_private(dev);
   3503 	struct ifnet    *ifp = adapter->ifp;
   3504 	struct ixgbe_hw *hw = &adapter->hw;
   3505 	u32             wus;
   3506 
   3507 	INIT_DEBUGOUT("ixgbe_resume: begin");
   3508 
   3509 	IXGBE_CORE_LOCK(adapter);
   3510 
   3511 	/* Read & clear WUS register */
   3512 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
   3513 	if (wus)
   3514 		device_printf(dev, "Woken up by (WUS): %#010x\n",
   3515 		    IXGBE_READ_REG(hw, IXGBE_WUS));
   3516 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3517 	/* And clear WUFC until next low-power transition */
   3518 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3519 
   3520 	/*
   3521 	 * Required after D3->D0 transition;
   3522 	 * will re-advertise all previous advertised speeds
   3523 	 */
   3524 	if (ifp->if_flags & IFF_UP)
   3525 		ixgbe_init_locked(adapter);
   3526 
   3527 	IXGBE_CORE_UNLOCK(adapter);
   3528 
   3529 	return true;
   3530 } /* ixgbe_resume */
   3531 
   3532 /*
   3533  * Set the various hardware offload abilities.
   3534  *
   3535  * This takes the ifnet's if_capenable flags (e.g. set by the user using
   3536  * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
   3537  * mbuf offload flags the driver will understand.
   3538  */
   3539 static void
   3540 ixgbe_set_if_hwassist(struct adapter *adapter)
   3541 {
   3542 	/* XXX */
   3543 }
   3544 
   3545 /************************************************************************
   3546  * ixgbe_init_locked - Init entry point
   3547  *
   3548  *   Used in two ways: It is used by the stack as an init
   3549  *   entry point in network interface structure. It is also
   3550  *   used by the driver as a hw/sw initialization routine to
   3551  *   get to a consistent state.
   3552  *
   3553  *   return 0 on success, positive on failure
   3554  ************************************************************************/
   3555 static void
   3556 ixgbe_init_locked(struct adapter *adapter)
   3557 {
   3558 	struct ifnet   *ifp = adapter->ifp;
   3559 	device_t 	dev = adapter->dev;
   3560 	struct ixgbe_hw *hw = &adapter->hw;
   3561 	struct tx_ring  *txr;
   3562 	struct rx_ring  *rxr;
   3563 	u32		txdctl, mhadd;
   3564 	u32		rxdctl, rxctrl;
   3565 	u32             ctrl_ext;
   3566 	int             err = 0;
   3567 
   3568 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   3569 
   3570 	KASSERT(mutex_owned(&adapter->core_mtx));
   3571 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
   3572 
   3573 	hw->adapter_stopped = FALSE;
   3574 	ixgbe_stop_adapter(hw);
   3575         callout_stop(&adapter->timer);
   3576 
   3577 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   3578 	adapter->max_frame_size =
   3579 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   3580 
   3581 	/* Queue indices may change with IOV mode */
   3582 	ixgbe_align_all_queue_indices(adapter);
   3583 
   3584 	/* reprogram the RAR[0] in case user changed it. */
   3585 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
   3586 
   3587 	/* Get the latest mac address, User can use a LAA */
   3588 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
   3589 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   3590 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
   3591 	hw->addr_ctrl.rar_used_count = 1;
   3592 
   3593 	/* Set hardware offload abilities from ifnet flags */
   3594 	ixgbe_set_if_hwassist(adapter);
   3595 
   3596 	/* Prepare transmit descriptors and buffers */
   3597 	if (ixgbe_setup_transmit_structures(adapter)) {
   3598 		device_printf(dev, "Could not setup transmit structures\n");
   3599 		ixgbe_stop(adapter);
   3600 		return;
   3601 	}
   3602 
   3603 	ixgbe_init_hw(hw);
   3604 	ixgbe_initialize_iov(adapter);
   3605 	ixgbe_initialize_transmit_units(adapter);
   3606 
   3607 	/* Setup Multicast table */
   3608 	ixgbe_set_multi(adapter);
   3609 
   3610 	/* Determine the correct mbuf pool, based on frame size */
   3611 	if (adapter->max_frame_size <= MCLBYTES)
   3612 		adapter->rx_mbuf_sz = MCLBYTES;
   3613 	else
   3614 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   3615 
   3616 	/* Prepare receive descriptors and buffers */
   3617 	if (ixgbe_setup_receive_structures(adapter)) {
   3618 		device_printf(dev, "Could not setup receive structures\n");
   3619 		ixgbe_stop(adapter);
   3620 		return;
   3621 	}
   3622 
   3623 	/* Configure RX settings */
   3624 	ixgbe_initialize_receive_units(adapter);
   3625 
   3626 	/* Enable SDP & MSI-X interrupts based on adapter */
   3627 	ixgbe_config_gpie(adapter);
   3628 
   3629 	/* Set MTU size */
   3630 	if (ifp->if_mtu > ETHERMTU) {
   3631 		/* aka IXGBE_MAXFRS on 82599 and newer */
   3632 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   3633 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   3634 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   3635 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   3636 	}
   3637 
   3638 	/* Now enable all the queues */
   3639 	for (int i = 0; i < adapter->num_queues; i++) {
   3640 		txr = &adapter->tx_rings[i];
   3641 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
   3642 		txdctl |= IXGBE_TXDCTL_ENABLE;
   3643 		/* Set WTHRESH to 8, burst writeback */
   3644 		txdctl |= (8 << 16);
   3645 		/*
   3646 		 * When the internal queue falls below PTHRESH (32),
   3647 		 * start prefetching as long as there are at least
   3648 		 * HTHRESH (1) buffers ready. The values are taken
   3649 		 * from the Intel linux driver 3.8.21.
   3650 		 * Prefetching enables tx line rate even with 1 queue.
   3651 		 */
   3652 		txdctl |= (32 << 0) | (1 << 8);
   3653 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
   3654 	}
   3655 
   3656 	for (int i = 0, j = 0; i < adapter->num_queues; i++) {
   3657 		rxr = &adapter->rx_rings[i];
   3658 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   3659 		if (hw->mac.type == ixgbe_mac_82598EB) {
   3660 			/*
   3661 			 * PTHRESH = 21
   3662 			 * HTHRESH = 4
   3663 			 * WTHRESH = 8
   3664 			 */
   3665 			rxdctl &= ~0x3FFFFF;
   3666 			rxdctl |= 0x080420;
   3667 		}
   3668 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   3669 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
   3670 		for (; j < 10; j++) {
   3671 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
   3672 			    IXGBE_RXDCTL_ENABLE)
   3673 				break;
   3674 			else
   3675 				msec_delay(1);
   3676 		}
   3677 		wmb();
   3678 
   3679 		/*
   3680 		 * In netmap mode, we must preserve the buffers made
   3681 		 * available to userspace before the if_init()
   3682 		 * (this is true by default on the TX side, because
   3683 		 * init makes all buffers available to userspace).
   3684 		 *
   3685 		 * netmap_reset() and the device specific routines
   3686 		 * (e.g. ixgbe_setup_receive_rings()) map these
   3687 		 * buffers at the end of the NIC ring, so here we
   3688 		 * must set the RDT (tail) register to make sure
   3689 		 * they are not overwritten.
   3690 		 *
   3691 		 * In this driver the NIC ring starts at RDH = 0,
   3692 		 * RDT points to the last slot available for reception (?),
   3693 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   3694 		 */
   3695 #ifdef DEV_NETMAP
   3696 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   3697 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   3698 			struct netmap_adapter *na = NA(adapter->ifp);
   3699 			struct netmap_kring *kring = &na->rx_rings[i];
   3700 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   3701 
   3702 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
   3703 		} else
   3704 #endif /* DEV_NETMAP */
   3705 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
   3706 			    adapter->num_rx_desc - 1);
   3707 	}
   3708 
   3709 	/* Enable Receive engine */
   3710 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   3711 	if (hw->mac.type == ixgbe_mac_82598EB)
   3712 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   3713 	rxctrl |= IXGBE_RXCTRL_RXEN;
   3714 	ixgbe_enable_rx_dma(hw, rxctrl);
   3715 
   3716 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   3717 
   3718 	/* Set up MSI-X routing */
   3719 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3720 		ixgbe_configure_ivars(adapter);
   3721 		/* Set up auto-mask */
   3722 		if (hw->mac.type == ixgbe_mac_82598EB)
   3723 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3724 		else {
   3725 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   3726 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   3727 		}
   3728 	} else {  /* Simple settings for Legacy/MSI */
   3729 		ixgbe_set_ivar(adapter, 0, 0, 0);
   3730 		ixgbe_set_ivar(adapter, 0, 0, 1);
   3731 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3732 	}
   3733 
   3734 	ixgbe_init_fdir(adapter);
   3735 
   3736 	/*
   3737 	 * Check on any SFP devices that
   3738 	 * need to be kick-started
   3739 	 */
   3740 	if (hw->phy.type == ixgbe_phy_none) {
   3741 		err = hw->phy.ops.identify(hw);
   3742 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   3743                 	device_printf(dev,
   3744 			    "Unsupported SFP+ module type was detected.\n");
   3745 			return;
   3746         	}
   3747 	}
   3748 
   3749 	/* Set moderation on the Link interrupt */
   3750 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
   3751 
   3752 	/* Config/Enable Link */
   3753 	ixgbe_config_link(adapter);
   3754 
   3755 	/* Hardware Packet Buffer & Flow Control setup */
   3756 	ixgbe_config_delay_values(adapter);
   3757 
   3758 	/* Initialize the FC settings */
   3759 	ixgbe_start_hw(hw);
   3760 
   3761 	/* Set up VLAN support and filter */
   3762 	ixgbe_setup_vlan_hw_support(adapter);
   3763 
   3764 	/* Setup DMA Coalescing */
   3765 	ixgbe_config_dmac(adapter);
   3766 
   3767 	/* And now turn on interrupts */
   3768 	ixgbe_enable_intr(adapter);
   3769 
   3770 	/* Enable the use of the MBX by the VF's */
   3771 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
   3772 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
   3773 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
   3774 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
   3775 	}
   3776 
   3777 	/* Now inform the stack we're ready */
   3778 	ifp->if_flags |= IFF_RUNNING;
   3779 
   3780 	return;
   3781 } /* ixgbe_init_locked */
   3782 
   3783 /************************************************************************
   3784  * ixgbe_init
   3785  ************************************************************************/
   3786 static int
   3787 ixgbe_init(struct ifnet *ifp)
   3788 {
   3789 	struct adapter *adapter = ifp->if_softc;
   3790 
   3791 	IXGBE_CORE_LOCK(adapter);
   3792 	ixgbe_init_locked(adapter);
   3793 	IXGBE_CORE_UNLOCK(adapter);
   3794 
   3795 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   3796 } /* ixgbe_init */
   3797 
   3798 /************************************************************************
   3799  * ixgbe_set_ivar
   3800  *
   3801  *   Setup the correct IVAR register for a particular MSI-X interrupt
   3802  *     (yes this is all very magic and confusing :)
   3803  *    - entry is the register array entry
   3804  *    - vector is the MSI-X vector for this queue
   3805  *    - type is RX/TX/MISC
   3806  ************************************************************************/
   3807 static void
   3808 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   3809 {
   3810 	struct ixgbe_hw *hw = &adapter->hw;
   3811 	u32 ivar, index;
   3812 
   3813 	vector |= IXGBE_IVAR_ALLOC_VAL;
   3814 
   3815 	switch (hw->mac.type) {
   3816 
   3817 	case ixgbe_mac_82598EB:
   3818 		if (type == -1)
   3819 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   3820 		else
   3821 			entry += (type * 64);
   3822 		index = (entry >> 2) & 0x1F;
   3823 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   3824 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
   3825 		ivar |= (vector << (8 * (entry & 0x3)));
   3826 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   3827 		break;
   3828 
   3829 	case ixgbe_mac_82599EB:
   3830 	case ixgbe_mac_X540:
   3831 	case ixgbe_mac_X550:
   3832 	case ixgbe_mac_X550EM_x:
   3833 	case ixgbe_mac_X550EM_a:
   3834 		if (type == -1) { /* MISC IVAR */
   3835 			index = (entry & 1) * 8;
   3836 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   3837 			ivar &= ~(0xFF << index);
   3838 			ivar |= (vector << index);
   3839 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   3840 		} else {	/* RX/TX IVARS */
   3841 			index = (16 * (entry & 1)) + (8 * type);
   3842 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   3843 			ivar &= ~(0xFF << index);
   3844 			ivar |= (vector << index);
   3845 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   3846 		}
   3847 
   3848 	default:
   3849 		break;
   3850 	}
   3851 } /* ixgbe_set_ivar */
   3852 
   3853 /************************************************************************
   3854  * ixgbe_configure_ivars
   3855  ************************************************************************/
   3856 static void
   3857 ixgbe_configure_ivars(struct adapter *adapter)
   3858 {
   3859 	struct ix_queue *que = adapter->queues;
   3860 	u32             newitr;
   3861 
   3862 	if (ixgbe_max_interrupt_rate > 0)
   3863 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   3864 	else {
   3865 		/*
   3866 		 * Disable DMA coalescing if interrupt moderation is
   3867 		 * disabled.
   3868 		 */
   3869 		adapter->dmac = 0;
   3870 		newitr = 0;
   3871 	}
   3872 
   3873         for (int i = 0; i < adapter->num_queues; i++, que++) {
   3874 		struct rx_ring *rxr = &adapter->rx_rings[i];
   3875 		struct tx_ring *txr = &adapter->tx_rings[i];
   3876 		/* First the RX queue entry */
   3877                 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
   3878 		/* ... and the TX */
   3879 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
   3880 		/* Set an Initial EITR value */
   3881 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
   3882 	}
   3883 
   3884 	/* For the Link interrupt */
   3885         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
   3886 } /* ixgbe_configure_ivars */
   3887 
   3888 /************************************************************************
   3889  * ixgbe_config_gpie
   3890  ************************************************************************/
   3891 static void
   3892 ixgbe_config_gpie(struct adapter *adapter)
   3893 {
   3894 	struct ixgbe_hw *hw = &adapter->hw;
   3895 	u32             gpie;
   3896 
   3897 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
   3898 
   3899 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3900 		/* Enable Enhanced MSI-X mode */
   3901 		gpie |= IXGBE_GPIE_MSIX_MODE
   3902 		     |  IXGBE_GPIE_EIAME
   3903 		     |  IXGBE_GPIE_PBA_SUPPORT
   3904 		     |  IXGBE_GPIE_OCD;
   3905 	}
   3906 
   3907 	/* Fan Failure Interrupt */
   3908 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   3909 		gpie |= IXGBE_SDP1_GPIEN;
   3910 
   3911 	/* Thermal Sensor Interrupt */
   3912 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
   3913 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3914 
   3915 	/* Link detection */
   3916 	switch (hw->mac.type) {
   3917 	case ixgbe_mac_82599EB:
   3918 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
   3919 		break;
   3920 	case ixgbe_mac_X550EM_x:
   3921 	case ixgbe_mac_X550EM_a:
   3922 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3923 		break;
   3924 	default:
   3925 		break;
   3926 	}
   3927 
   3928 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   3929 
   3930 	return;
   3931 } /* ixgbe_config_gpie */
   3932 
   3933 /************************************************************************
   3934  * ixgbe_config_delay_values
   3935  *
   3936  *   Requires adapter->max_frame_size to be set.
   3937  ************************************************************************/
   3938 static void
   3939 ixgbe_config_delay_values(struct adapter *adapter)
   3940 {
   3941 	struct ixgbe_hw *hw = &adapter->hw;
   3942 	u32             rxpb, frame, size, tmp;
   3943 
   3944 	frame = adapter->max_frame_size;
   3945 
   3946 	/* Calculate High Water */
   3947 	switch (hw->mac.type) {
   3948 	case ixgbe_mac_X540:
   3949 	case ixgbe_mac_X550:
   3950 	case ixgbe_mac_X550EM_x:
   3951 	case ixgbe_mac_X550EM_a:
   3952 		tmp = IXGBE_DV_X540(frame, frame);
   3953 		break;
   3954 	default:
   3955 		tmp = IXGBE_DV(frame, frame);
   3956 		break;
   3957 	}
   3958 	size = IXGBE_BT2KB(tmp);
   3959 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
   3960 	hw->fc.high_water[0] = rxpb - size;
   3961 
   3962 	/* Now calculate Low Water */
   3963 	switch (hw->mac.type) {
   3964 	case ixgbe_mac_X540:
   3965 	case ixgbe_mac_X550:
   3966 	case ixgbe_mac_X550EM_x:
   3967 	case ixgbe_mac_X550EM_a:
   3968 		tmp = IXGBE_LOW_DV_X540(frame);
   3969 		break;
   3970 	default:
   3971 		tmp = IXGBE_LOW_DV(frame);
   3972 		break;
   3973 	}
   3974 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
   3975 
   3976 	hw->fc.pause_time = IXGBE_FC_PAUSE;
   3977 	hw->fc.send_xon = TRUE;
   3978 } /* ixgbe_config_delay_values */
   3979 
   3980 /************************************************************************
   3981  * ixgbe_set_multi - Multicast Update
   3982  *
   3983  *   Called whenever multicast address list is updated.
   3984  ************************************************************************/
   3985 static void
   3986 ixgbe_set_multi(struct adapter *adapter)
   3987 {
   3988 	struct ixgbe_mc_addr	*mta;
   3989 	struct ifnet		*ifp = adapter->ifp;
   3990 	u8			*update_ptr;
   3991 	int			mcnt = 0;
   3992 	u32			fctrl;
   3993 	struct ethercom		*ec = &adapter->osdep.ec;
   3994 	struct ether_multi	*enm;
   3995 	struct ether_multistep	step;
   3996 
   3997 	KASSERT(mutex_owned(&adapter->core_mtx));
   3998 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   3999 
   4000 	mta = adapter->mta;
   4001 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
   4002 
   4003 	ifp->if_flags &= ~IFF_ALLMULTI;
   4004 	ETHER_LOCK(ec);
   4005 	ETHER_FIRST_MULTI(step, ec, enm);
   4006 	while (enm != NULL) {
   4007 		if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
   4008 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   4009 			ETHER_ADDR_LEN) != 0)) {
   4010 			ifp->if_flags |= IFF_ALLMULTI;
   4011 			break;
   4012 		}
   4013 		bcopy(enm->enm_addrlo,
   4014 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
   4015 		mta[mcnt].vmdq = adapter->pool;
   4016 		mcnt++;
   4017 		ETHER_NEXT_MULTI(step, enm);
   4018 	}
   4019 	ETHER_UNLOCK(ec);
   4020 
   4021 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   4022 	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4023 	if (ifp->if_flags & IFF_PROMISC)
   4024 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4025 	else if (ifp->if_flags & IFF_ALLMULTI) {
   4026 		fctrl |= IXGBE_FCTRL_MPE;
   4027 	}
   4028 
   4029 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   4030 
   4031 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
   4032 		update_ptr = (u8 *)mta;
   4033 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   4034 		    ixgbe_mc_array_itr, TRUE);
   4035 	}
   4036 
   4037 	return;
   4038 } /* ixgbe_set_multi */
   4039 
   4040 /************************************************************************
   4041  * ixgbe_mc_array_itr
   4042  *
   4043  *   An iterator function needed by the multicast shared code.
   4044  *   It feeds the shared code routine the addresses in the
   4045  *   array of ixgbe_set_multi() one by one.
   4046  ************************************************************************/
   4047 static u8 *
   4048 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   4049 {
   4050 	struct ixgbe_mc_addr *mta;
   4051 
   4052 	mta = (struct ixgbe_mc_addr *)*update_ptr;
   4053 	*vmdq = mta->vmdq;
   4054 
   4055 	*update_ptr = (u8*)(mta + 1);
   4056 
   4057 	return (mta->addr);
   4058 } /* ixgbe_mc_array_itr */
   4059 
   4060 /************************************************************************
   4061  * ixgbe_local_timer - Timer routine
   4062  *
   4063  *   Checks for link status, updates statistics,
   4064  *   and runs the watchdog check.
   4065  ************************************************************************/
   4066 static void
   4067 ixgbe_local_timer(void *arg)
   4068 {
   4069 	struct adapter *adapter = arg;
   4070 
   4071 	IXGBE_CORE_LOCK(adapter);
   4072 	ixgbe_local_timer1(adapter);
   4073 	IXGBE_CORE_UNLOCK(adapter);
   4074 }
   4075 
   4076 static void
   4077 ixgbe_local_timer1(void *arg)
   4078 {
   4079 	struct adapter	*adapter = arg;
   4080 	device_t	dev = adapter->dev;
   4081 	struct ix_queue *que = adapter->queues;
   4082 	u64		queues = 0;
   4083 	int		hung = 0;
   4084 
   4085 	KASSERT(mutex_owned(&adapter->core_mtx));
   4086 
   4087 	/* Check for pluggable optics */
   4088 	if (adapter->sfp_probe)
   4089 		if (!ixgbe_sfp_probe(adapter))
   4090 			goto out; /* Nothing to do */
   4091 
   4092 	ixgbe_update_link_status(adapter);
   4093 	ixgbe_update_stats_counters(adapter);
   4094 
   4095 	/*
   4096 	 * Check the TX queues status
   4097 	 *      - mark hung queues so we don't schedule on them
   4098 	 *      - watchdog only if all queues show hung
   4099 	 */
   4100 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4101 		/* Keep track of queues with work for soft irq */
   4102 		if (que->txr->busy)
   4103 			queues |= ((u64)1 << que->me);
   4104 		/*
   4105 		 * Each time txeof runs without cleaning, but there
   4106 		 * are uncleaned descriptors it increments busy. If
   4107 		 * we get to the MAX we declare it hung.
   4108 		 */
   4109 		if (que->busy == IXGBE_QUEUE_HUNG) {
   4110 			++hung;
   4111 			/* Mark the queue as inactive */
   4112 			adapter->active_queues &= ~((u64)1 << que->me);
   4113 			continue;
   4114 		} else {
   4115 			/* Check if we've come back from hung */
   4116 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   4117 				adapter->active_queues |= ((u64)1 << que->me);
   4118 		}
   4119 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   4120 			device_printf(dev,
   4121 			    "Warning queue %d appears to be hung!\n", i);
   4122 			que->txr->busy = IXGBE_QUEUE_HUNG;
   4123 			++hung;
   4124 		}
   4125 	}
   4126 
   4127 	/* Only truely watchdog if all queues show hung */
   4128 	if (hung == adapter->num_queues)
   4129 		goto watchdog;
   4130 	else if (queues != 0) { /* Force an IRQ on queues with work */
   4131 		ixgbe_rearm_queues(adapter, queues);
   4132 	}
   4133 
   4134 out:
   4135 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4136 	return;
   4137 
   4138 watchdog:
   4139 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   4140 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   4141 	adapter->watchdog_events.ev_count++;
   4142 	ixgbe_init_locked(adapter);
   4143 } /* ixgbe_local_timer */
   4144 
   4145 /************************************************************************
   4146  * ixgbe_sfp_probe
   4147  *
   4148  *   Determine if a port had optics inserted.
   4149  ************************************************************************/
   4150 static bool
   4151 ixgbe_sfp_probe(struct adapter *adapter)
   4152 {
   4153 	struct ixgbe_hw	*hw = &adapter->hw;
   4154 	device_t	dev = adapter->dev;
   4155 	bool		result = FALSE;
   4156 
   4157 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4158 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4159 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4160 		if (ret)
   4161 			goto out;
   4162 		ret = hw->phy.ops.reset(hw);
   4163 		adapter->sfp_probe = FALSE;
   4164 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4165 			device_printf(dev,"Unsupported SFP+ module detected!");
   4166 			device_printf(dev,
   4167 			    "Reload driver with supported module.\n");
   4168                         goto out;
   4169 		} else
   4170 			device_printf(dev, "SFP+ module detected!\n");
   4171 		/* We now have supported optics */
   4172 		result = TRUE;
   4173 	}
   4174 out:
   4175 
   4176 	return (result);
   4177 } /* ixgbe_sfp_probe */
   4178 
   4179 /************************************************************************
   4180  * ixgbe_handle_mod - Tasklet for SFP module interrupts
   4181  ************************************************************************/
   4182 static void
   4183 ixgbe_handle_mod(void *context)
   4184 {
   4185 	struct adapter  *adapter = context;
   4186 	struct ixgbe_hw *hw = &adapter->hw;
   4187 	device_t	dev = adapter->dev;
   4188 	u32             err, cage_full = 0;
   4189 
   4190 	if (adapter->hw.need_crosstalk_fix) {
   4191 		switch (hw->mac.type) {
   4192 		case ixgbe_mac_82599EB:
   4193 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4194 			    IXGBE_ESDP_SDP2;
   4195 			break;
   4196 		case ixgbe_mac_X550EM_x:
   4197 		case ixgbe_mac_X550EM_a:
   4198 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4199 			    IXGBE_ESDP_SDP0;
   4200 			break;
   4201 		default:
   4202 			break;
   4203 		}
   4204 
   4205 		if (!cage_full)
   4206 			return;
   4207 	}
   4208 
   4209 	err = hw->phy.ops.identify_sfp(hw);
   4210 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4211 		device_printf(dev,
   4212 		    "Unsupported SFP+ module type was detected.\n");
   4213 		return;
   4214 	}
   4215 
   4216 	err = hw->mac.ops.setup_sfp(hw);
   4217 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4218 		device_printf(dev,
   4219 		    "Setup failure - unsupported SFP+ module type.\n");
   4220 		return;
   4221 	}
   4222 	softint_schedule(adapter->msf_si);
   4223 } /* ixgbe_handle_mod */
   4224 
   4225 
   4226 /************************************************************************
   4227  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
   4228  ************************************************************************/
   4229 static void
   4230 ixgbe_handle_msf(void *context)
   4231 {
   4232 	struct adapter  *adapter = context;
   4233 	struct ixgbe_hw *hw = &adapter->hw;
   4234 	u32             autoneg;
   4235 	bool            negotiate;
   4236 
   4237 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
   4238 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
   4239 
   4240 	autoneg = hw->phy.autoneg_advertised;
   4241 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   4242 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   4243 	else
   4244 		negotiate = 0;
   4245 	if (hw->mac.ops.setup_link)
   4246 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
   4247 
   4248 	/* Adjust media types shown in ifconfig */
   4249 	ifmedia_removeall(&adapter->media);
   4250 	ixgbe_add_media_types(adapter);
   4251 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   4252 } /* ixgbe_handle_msf */
   4253 
   4254 /************************************************************************
   4255  * ixgbe_handle_phy - Tasklet for external PHY interrupts
   4256  ************************************************************************/
   4257 static void
   4258 ixgbe_handle_phy(void *context)
   4259 {
   4260 	struct adapter  *adapter = context;
   4261 	struct ixgbe_hw *hw = &adapter->hw;
   4262 	int error;
   4263 
   4264 	error = hw->phy.ops.handle_lasi(hw);
   4265 	if (error == IXGBE_ERR_OVERTEMP)
   4266 		device_printf(adapter->dev,
   4267 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
   4268 		    " PHY will downshift to lower power state!\n");
   4269 	else if (error)
   4270 		device_printf(adapter->dev,
   4271 		    "Error handling LASI interrupt: %d\n", error);
   4272 } /* ixgbe_handle_phy */
   4273 
   4274 static void
   4275 ixgbe_ifstop(struct ifnet *ifp, int disable)
   4276 {
   4277 	struct adapter *adapter = ifp->if_softc;
   4278 
   4279 	IXGBE_CORE_LOCK(adapter);
   4280 	ixgbe_stop(adapter);
   4281 	IXGBE_CORE_UNLOCK(adapter);
   4282 }
   4283 
   4284 /************************************************************************
   4285  * ixgbe_stop - Stop the hardware
   4286  *
   4287  *   Disables all traffic on the adapter by issuing a
   4288  *   global reset on the MAC and deallocates TX/RX buffers.
   4289  ************************************************************************/
   4290 static void
   4291 ixgbe_stop(void *arg)
   4292 {
   4293 	struct ifnet    *ifp;
   4294 	struct adapter  *adapter = arg;
   4295 	struct ixgbe_hw *hw = &adapter->hw;
   4296 
   4297 	ifp = adapter->ifp;
   4298 
   4299 	KASSERT(mutex_owned(&adapter->core_mtx));
   4300 
   4301 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   4302 	ixgbe_disable_intr(adapter);
   4303 	callout_stop(&adapter->timer);
   4304 
   4305 	/* Let the stack know...*/
   4306 	ifp->if_flags &= ~IFF_RUNNING;
   4307 
   4308 	ixgbe_reset_hw(hw);
   4309 	hw->adapter_stopped = FALSE;
   4310 	ixgbe_stop_adapter(hw);
   4311 	if (hw->mac.type == ixgbe_mac_82599EB)
   4312 		ixgbe_stop_mac_link_on_d3_82599(hw);
   4313 	/* Turn off the laser - noop with no optics */
   4314 	ixgbe_disable_tx_laser(hw);
   4315 
   4316 	/* Update the stack */
   4317 	adapter->link_up = FALSE;
   4318 	ixgbe_update_link_status(adapter);
   4319 
   4320 	/* reprogram the RAR[0] in case user changed it. */
   4321 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   4322 
   4323 	return;
   4324 } /* ixgbe_stop */
   4325 
   4326 /************************************************************************
   4327  * ixgbe_update_link_status - Update OS on link state
   4328  *
   4329  * Note: Only updates the OS on the cached link state.
   4330  *       The real check of the hardware only happens with
   4331  *       a link interrupt.
   4332  ************************************************************************/
   4333 static void
   4334 ixgbe_update_link_status(struct adapter *adapter)
   4335 {
   4336 	struct ifnet	*ifp = adapter->ifp;
   4337 	device_t        dev = adapter->dev;
   4338 	struct ixgbe_hw *hw = &adapter->hw;
   4339 
   4340 	if (adapter->link_up) {
   4341 		if (adapter->link_active == FALSE) {
   4342 			if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
   4343 				/*
   4344 				 *  Discard count for both MAC Local Fault and
   4345 				 * Remote Fault because those registers are
   4346 				 * valid only when the link speed is up and
   4347 				 * 10Gbps.
   4348 				 */
   4349 				IXGBE_READ_REG(hw, IXGBE_MLFC);
   4350 				IXGBE_READ_REG(hw, IXGBE_MRFC);
   4351 			}
   4352 
   4353 			if (bootverbose) {
   4354 				const char *bpsmsg;
   4355 
   4356 				switch (adapter->link_speed) {
   4357 				case IXGBE_LINK_SPEED_10GB_FULL:
   4358 					bpsmsg = "10 Gbps";
   4359 					break;
   4360 				case IXGBE_LINK_SPEED_5GB_FULL:
   4361 					bpsmsg = "5 Gbps";
   4362 					break;
   4363 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   4364 					bpsmsg = "2.5 Gbps";
   4365 					break;
   4366 				case IXGBE_LINK_SPEED_1GB_FULL:
   4367 					bpsmsg = "1 Gbps";
   4368 					break;
   4369 				case IXGBE_LINK_SPEED_100_FULL:
   4370 					bpsmsg = "100 Mbps";
   4371 					break;
   4372 				case IXGBE_LINK_SPEED_10_FULL:
   4373 					bpsmsg = "10 Mbps";
   4374 					break;
   4375 				default:
   4376 					bpsmsg = "unknown speed";
   4377 					break;
   4378 				}
   4379 				device_printf(dev, "Link is up %s %s \n",
   4380 				    bpsmsg, "Full Duplex");
   4381 			}
   4382 			adapter->link_active = TRUE;
   4383 			/* Update any Flow Control changes */
   4384 			ixgbe_fc_enable(&adapter->hw);
   4385 			/* Update DMA coalescing config */
   4386 			ixgbe_config_dmac(adapter);
   4387 			if_link_state_change(ifp, LINK_STATE_UP);
   4388 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4389 				ixgbe_ping_all_vfs(adapter);
   4390 		}
   4391 	} else { /* Link down */
   4392 		if (adapter->link_active == TRUE) {
   4393 			if (bootverbose)
   4394 				device_printf(dev, "Link is Down\n");
   4395 			if_link_state_change(ifp, LINK_STATE_DOWN);
   4396 			adapter->link_active = FALSE;
   4397 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4398 				ixgbe_ping_all_vfs(adapter);
   4399 		}
   4400 	}
   4401 
   4402 	return;
   4403 } /* ixgbe_update_link_status */
   4404 
   4405 /************************************************************************
   4406  * ixgbe_config_dmac - Configure DMA Coalescing
   4407  ************************************************************************/
   4408 static void
   4409 ixgbe_config_dmac(struct adapter *adapter)
   4410 {
   4411 	struct ixgbe_hw *hw = &adapter->hw;
   4412 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
   4413 
   4414 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
   4415 		return;
   4416 
   4417 	if (dcfg->watchdog_timer ^ adapter->dmac ||
   4418 	    dcfg->link_speed ^ adapter->link_speed) {
   4419 		dcfg->watchdog_timer = adapter->dmac;
   4420 		dcfg->fcoe_en = false;
   4421 		dcfg->link_speed = adapter->link_speed;
   4422 		dcfg->num_tcs = 1;
   4423 
   4424 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
   4425 		    dcfg->watchdog_timer, dcfg->link_speed);
   4426 
   4427 		hw->mac.ops.dmac_config(hw);
   4428 	}
   4429 } /* ixgbe_config_dmac */
   4430 
   4431 /************************************************************************
   4432  * ixgbe_enable_intr
   4433  ************************************************************************/
   4434 static void
   4435 ixgbe_enable_intr(struct adapter *adapter)
   4436 {
   4437 	struct ixgbe_hw	*hw = &adapter->hw;
   4438 	struct ix_queue	*que = adapter->queues;
   4439 	u32		mask, fwsm;
   4440 
   4441 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4442 
   4443 	switch (adapter->hw.mac.type) {
   4444 	case ixgbe_mac_82599EB:
   4445 		mask |= IXGBE_EIMS_ECC;
   4446 		/* Temperature sensor on some adapters */
   4447 		mask |= IXGBE_EIMS_GPI_SDP0;
   4448 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
   4449 		mask |= IXGBE_EIMS_GPI_SDP1;
   4450 		mask |= IXGBE_EIMS_GPI_SDP2;
   4451 		break;
   4452 	case ixgbe_mac_X540:
   4453 		/* Detect if Thermal Sensor is enabled */
   4454 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
   4455 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
   4456 			mask |= IXGBE_EIMS_TS;
   4457 		mask |= IXGBE_EIMS_ECC;
   4458 		break;
   4459 	case ixgbe_mac_X550:
   4460 		/* MAC thermal sensor is automatically enabled */
   4461 		mask |= IXGBE_EIMS_TS;
   4462 		mask |= IXGBE_EIMS_ECC;
   4463 		break;
   4464 	case ixgbe_mac_X550EM_x:
   4465 	case ixgbe_mac_X550EM_a:
   4466 		/* Some devices use SDP0 for important information */
   4467 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
   4468 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
   4469 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
   4470 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
   4471 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
   4472 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
   4473 			mask |= IXGBE_EICR_GPI_SDP0_X540;
   4474 		mask |= IXGBE_EIMS_ECC;
   4475 		break;
   4476 	default:
   4477 		break;
   4478 	}
   4479 
   4480 	/* Enable Fan Failure detection */
   4481 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4482 		mask |= IXGBE_EIMS_GPI_SDP1;
   4483 	/* Enable SR-IOV */
   4484 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4485 		mask |= IXGBE_EIMS_MAILBOX;
   4486 	/* Enable Flow Director */
   4487 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   4488 		mask |= IXGBE_EIMS_FLOW_DIR;
   4489 
   4490 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4491 
   4492 	/* With MSI-X we use auto clear */
   4493 	if (adapter->msix_mem) {
   4494 		mask = IXGBE_EIMS_ENABLE_MASK;
   4495 		/* Don't autoclear Link */
   4496 		mask &= ~IXGBE_EIMS_OTHER;
   4497 		mask &= ~IXGBE_EIMS_LSC;
   4498 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   4499 			mask &= ~IXGBE_EIMS_MAILBOX;
   4500 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4501 	}
   4502 
   4503 	/*
   4504 	 * Now enable all queues, this is done separately to
   4505 	 * allow for handling the extended (beyond 32) MSI-X
   4506 	 * vectors that can be used by 82599
   4507 	 */
   4508         for (int i = 0; i < adapter->num_queues; i++, que++)
   4509                 ixgbe_enable_queue(adapter, que->msix);
   4510 
   4511 	IXGBE_WRITE_FLUSH(hw);
   4512 
   4513 	return;
   4514 } /* ixgbe_enable_intr */
   4515 
   4516 /************************************************************************
   4517  * ixgbe_disable_intr
   4518  ************************************************************************/
   4519 static void
   4520 ixgbe_disable_intr(struct adapter *adapter)
   4521 {
   4522 	if (adapter->msix_mem)
   4523 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4524 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
   4525 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
   4526 	} else {
   4527 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
   4528 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
   4529 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
   4530 	}
   4531 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4532 
   4533 	return;
   4534 } /* ixgbe_disable_intr */
   4535 
   4536 /************************************************************************
   4537  * ixgbe_legacy_irq - Legacy Interrupt Service routine
   4538  ************************************************************************/
   4539 static int
   4540 ixgbe_legacy_irq(void *arg)
   4541 {
   4542 	struct ix_queue *que = arg;
   4543 	struct adapter	*adapter = que->adapter;
   4544 	struct ixgbe_hw	*hw = &adapter->hw;
   4545 	struct ifnet    *ifp = adapter->ifp;
   4546 	struct 		tx_ring *txr = adapter->tx_rings;
   4547 	bool		more = false;
   4548 	u32             eicr, eicr_mask;
   4549 
   4550 	/* Silicon errata #26 on 82598 */
   4551 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
   4552 
   4553 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   4554 
   4555 	adapter->stats.pf.legint.ev_count++;
   4556 	++que->irqs.ev_count;
   4557 	if (eicr == 0) {
   4558 		adapter->stats.pf.intzero.ev_count++;
   4559 		if ((ifp->if_flags & IFF_UP) != 0)
   4560 			ixgbe_enable_intr(adapter);
   4561 		return 0;
   4562 	}
   4563 
   4564 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   4565 #ifdef __NetBSD__
   4566 		/* Don't run ixgbe_rxeof in interrupt context */
   4567 		more = true;
   4568 #else
   4569 		more = ixgbe_rxeof(que);
   4570 #endif
   4571 
   4572 		IXGBE_TX_LOCK(txr);
   4573 		ixgbe_txeof(txr);
   4574 #ifdef notyet
   4575 		if (!ixgbe_ring_empty(ifp, txr->br))
   4576 			ixgbe_start_locked(ifp, txr);
   4577 #endif
   4578 		IXGBE_TX_UNLOCK(txr);
   4579 	}
   4580 
   4581 	/* Check for fan failure */
   4582 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   4583 		ixgbe_check_fan_failure(adapter, eicr, true);
   4584 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4585 	}
   4586 
   4587 	/* Link status change */
   4588 	if (eicr & IXGBE_EICR_LSC)
   4589 		softint_schedule(adapter->link_si);
   4590 
   4591 	if (ixgbe_is_sfp(hw)) {
   4592 		/* Pluggable optics-related interrupt */
   4593 		if (hw->mac.type >= ixgbe_mac_X540)
   4594 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   4595 		else
   4596 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   4597 
   4598 		if (eicr & eicr_mask) {
   4599 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   4600 			softint_schedule(adapter->mod_si);
   4601 		}
   4602 
   4603 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   4604 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   4605 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   4606 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4607 			softint_schedule(adapter->msf_si);
   4608 		}
   4609 	}
   4610 
   4611 	/* External PHY interrupt */
   4612 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   4613 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
   4614 		softint_schedule(adapter->phy_si);
   4615 
   4616 	if (more)
   4617 		softint_schedule(que->que_si);
   4618 	else
   4619 		ixgbe_enable_intr(adapter);
   4620 
   4621 	return 1;
   4622 } /* ixgbe_legacy_irq */
   4623 
   4624 /************************************************************************
   4625  * ixgbe_free_pciintr_resources
   4626  ************************************************************************/
   4627 static void
   4628 ixgbe_free_pciintr_resources(struct adapter *adapter)
   4629 {
   4630 	struct ix_queue *que = adapter->queues;
   4631 	int		rid;
   4632 
   4633 	/*
   4634 	 * Release all msix queue resources:
   4635 	 */
   4636 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4637 		if (que->res != NULL) {
   4638 			pci_intr_disestablish(adapter->osdep.pc,
   4639 			    adapter->osdep.ihs[i]);
   4640 			adapter->osdep.ihs[i] = NULL;
   4641 		}
   4642 	}
   4643 
   4644 	/* Clean the Legacy or Link interrupt last */
   4645 	if (adapter->vector) /* we are doing MSIX */
   4646 		rid = adapter->vector;
   4647 	else
   4648 		rid = 0;
   4649 
   4650 	if (adapter->osdep.ihs[rid] != NULL) {
   4651 		pci_intr_disestablish(adapter->osdep.pc,
   4652 		    adapter->osdep.ihs[rid]);
   4653 		adapter->osdep.ihs[rid] = NULL;
   4654 	}
   4655 
   4656 	if (adapter->osdep.intrs != NULL) {
   4657 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   4658 		    adapter->osdep.nintrs);
   4659 		adapter->osdep.intrs = NULL;
   4660 	}
   4661 
   4662 	return;
   4663 } /* ixgbe_free_pciintr_resources */
   4664 
   4665 /************************************************************************
   4666  * ixgbe_free_pci_resources
   4667  ************************************************************************/
   4668 static void
   4669 ixgbe_free_pci_resources(struct adapter *adapter)
   4670 {
   4671 
   4672 	ixgbe_free_pciintr_resources(adapter);
   4673 
   4674 	if (adapter->osdep.mem_size != 0) {
   4675 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   4676 		    adapter->osdep.mem_bus_space_handle,
   4677 		    adapter->osdep.mem_size);
   4678 	}
   4679 
   4680 	return;
   4681 } /* ixgbe_free_pci_resources */
   4682 
   4683 /************************************************************************
   4684  * ixgbe_set_sysctl_value
   4685  ************************************************************************/
   4686 static void
   4687 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
   4688     const char *description, int *limit, int value)
   4689 {
   4690 	device_t dev =  adapter->dev;
   4691 	struct sysctllog **log;
   4692 	const struct sysctlnode *rnode, *cnode;
   4693 
   4694 	log = &adapter->sysctllog;
   4695 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   4696 		aprint_error_dev(dev, "could not create sysctl root\n");
   4697 		return;
   4698 	}
   4699 	if (sysctl_createv(log, 0, &rnode, &cnode,
   4700 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   4701 	    name, SYSCTL_DESCR(description),
   4702 		NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   4703 		aprint_error_dev(dev, "could not create sysctl\n");
   4704 	*limit = value;
   4705 } /* ixgbe_set_sysctl_value */
   4706 
   4707 /************************************************************************
   4708  * ixgbe_sysctl_flowcntl
   4709  *
   4710  *   SYSCTL wrapper around setting Flow Control
   4711  ************************************************************************/
   4712 static int
   4713 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
   4714 {
   4715 	struct sysctlnode node = *rnode;
   4716 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4717 	int error, fc;
   4718 
   4719 	fc = adapter->hw.fc.current_mode;
   4720 	node.sysctl_data = &fc;
   4721 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4722 	if (error != 0 || newp == NULL)
   4723 		return error;
   4724 
   4725 	/* Don't bother if it's not changed */
   4726 	if (fc == adapter->hw.fc.current_mode)
   4727 		return (0);
   4728 
   4729 	return ixgbe_set_flowcntl(adapter, fc);
   4730 } /* ixgbe_sysctl_flowcntl */
   4731 
   4732 /************************************************************************
   4733  * ixgbe_set_flowcntl - Set flow control
   4734  *
   4735  *   Flow control values:
   4736  *     0 - off
   4737  *     1 - rx pause
   4738  *     2 - tx pause
   4739  *     3 - full
   4740  ************************************************************************/
   4741 static int
   4742 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
   4743 {
   4744 	switch (fc) {
   4745 		case ixgbe_fc_rx_pause:
   4746 		case ixgbe_fc_tx_pause:
   4747 		case ixgbe_fc_full:
   4748 			adapter->hw.fc.requested_mode = fc;
   4749 			if (adapter->num_queues > 1)
   4750 				ixgbe_disable_rx_drop(adapter);
   4751 			break;
   4752 		case ixgbe_fc_none:
   4753 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   4754 			if (adapter->num_queues > 1)
   4755 				ixgbe_enable_rx_drop(adapter);
   4756 			break;
   4757 		default:
   4758 			return (EINVAL);
   4759 	}
   4760 
   4761 #if 0 /* XXX NetBSD */
   4762 	/* Don't autoneg if forcing a value */
   4763 	adapter->hw.fc.disable_fc_autoneg = TRUE;
   4764 #endif
   4765 	ixgbe_fc_enable(&adapter->hw);
   4766 
   4767 	return (0);
   4768 } /* ixgbe_set_flowcntl */
   4769 
   4770 /************************************************************************
   4771  * ixgbe_enable_rx_drop
   4772  *
   4773  *   Enable the hardware to drop packets when the buffer is
   4774  *   full. This is useful with multiqueue, so that no single
   4775  *   queue being full stalls the entire RX engine. We only
   4776  *   enable this when Multiqueue is enabled AND Flow Control
   4777  *   is disabled.
   4778  ************************************************************************/
   4779 static void
   4780 ixgbe_enable_rx_drop(struct adapter *adapter)
   4781 {
   4782 	struct ixgbe_hw *hw = &adapter->hw;
   4783 	struct rx_ring  *rxr;
   4784 	u32             srrctl;
   4785 
   4786 	for (int i = 0; i < adapter->num_queues; i++) {
   4787 		rxr = &adapter->rx_rings[i];
   4788 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4789 		srrctl |= IXGBE_SRRCTL_DROP_EN;
   4790 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4791 	}
   4792 
   4793 	/* enable drop for each vf */
   4794 	for (int i = 0; i < adapter->num_vfs; i++) {
   4795 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4796 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
   4797 		    IXGBE_QDE_ENABLE));
   4798 	}
   4799 } /* ixgbe_enable_rx_drop */
   4800 
   4801 /************************************************************************
   4802  * ixgbe_disable_rx_drop
   4803  ************************************************************************/
   4804 static void
   4805 ixgbe_disable_rx_drop(struct adapter *adapter)
   4806 {
   4807 	struct ixgbe_hw *hw = &adapter->hw;
   4808 	struct rx_ring  *rxr;
   4809 	u32             srrctl;
   4810 
   4811 	for (int i = 0; i < adapter->num_queues; i++) {
   4812 		rxr = &adapter->rx_rings[i];
   4813         	srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4814         	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
   4815         	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4816 	}
   4817 
   4818 	/* disable drop for each vf */
   4819 	for (int i = 0; i < adapter->num_vfs; i++) {
   4820 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4821 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
   4822 	}
   4823 } /* ixgbe_disable_rx_drop */
   4824 
   4825 /************************************************************************
   4826  * ixgbe_sysctl_advertise
   4827  *
   4828  *   SYSCTL wrapper around setting advertised speed
   4829  ************************************************************************/
   4830 static int
   4831 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
   4832 {
   4833 	struct sysctlnode node = *rnode;
   4834 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4835 	int            error = 0, advertise;
   4836 
   4837 	advertise = adapter->advertise;
   4838 	node.sysctl_data = &advertise;
   4839 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4840 	if (error != 0 || newp == NULL)
   4841 		return error;
   4842 
   4843 	return ixgbe_set_advertise(adapter, advertise);
   4844 } /* ixgbe_sysctl_advertise */
   4845 
   4846 /************************************************************************
   4847  * ixgbe_set_advertise - Control advertised link speed
   4848  *
   4849  *   Flags:
   4850  *     0x00 - Default (all capable link speed)
   4851  *     0x01 - advertise 100 Mb
   4852  *     0x02 - advertise 1G
   4853  *     0x04 - advertise 10G
   4854  *     0x08 - advertise 10 Mb
   4855  *     0x10 - advertise 2.5G
   4856  *     0x20 - advertise 5G
   4857  ************************************************************************/
   4858 static int
   4859 ixgbe_set_advertise(struct adapter *adapter, int advertise)
   4860 {
   4861 	device_t         dev;
   4862 	struct ixgbe_hw  *hw;
   4863 	ixgbe_link_speed speed = 0;
   4864 	ixgbe_link_speed link_caps = 0;
   4865 	s32              err = IXGBE_NOT_IMPLEMENTED;
   4866 	bool             negotiate = FALSE;
   4867 
   4868 	/* Checks to validate new value */
   4869 	if (adapter->advertise == advertise) /* no change */
   4870 		return (0);
   4871 
   4872 	dev = adapter->dev;
   4873 	hw = &adapter->hw;
   4874 
   4875 	/* No speed changes for backplane media */
   4876 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   4877 		return (ENODEV);
   4878 
   4879 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   4880 	    (hw->phy.multispeed_fiber))) {
   4881 		device_printf(dev,
   4882 		    "Advertised speed can only be set on copper or "
   4883 		    "multispeed fiber media types.\n");
   4884 		return (EINVAL);
   4885 	}
   4886 
   4887 	if (advertise < 0x0 || advertise > 0x2f) {
   4888 		device_printf(dev,
   4889 		    "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
   4890 		return (EINVAL);
   4891 	}
   4892 
   4893 	if (hw->mac.ops.get_link_capabilities) {
   4894 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   4895 		    &negotiate);
   4896 		if (err != IXGBE_SUCCESS) {
   4897 			device_printf(dev, "Unable to determine supported advertise speeds\n");
   4898 			return (ENODEV);
   4899 		}
   4900 	}
   4901 
   4902 	/* Set new value and report new advertised mode */
   4903 	if (advertise & 0x1) {
   4904 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
   4905 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
   4906 			return (EINVAL);
   4907 		}
   4908 		speed |= IXGBE_LINK_SPEED_100_FULL;
   4909 	}
   4910 	if (advertise & 0x2) {
   4911 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
   4912 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
   4913 			return (EINVAL);
   4914 		}
   4915 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   4916 	}
   4917 	if (advertise & 0x4) {
   4918 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
   4919 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
   4920 			return (EINVAL);
   4921 		}
   4922 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   4923 	}
   4924 	if (advertise & 0x8) {
   4925 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
   4926 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
   4927 			return (EINVAL);
   4928 		}
   4929 		speed |= IXGBE_LINK_SPEED_10_FULL;
   4930 	}
   4931 	if (advertise & 0x10) {
   4932 		if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
   4933 			device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
   4934 			return (EINVAL);
   4935 		}
   4936 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   4937 	}
   4938 	if (advertise & 0x20) {
   4939 		if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
   4940 			device_printf(dev, "Interface does not support 5Gb advertised speed\n");
   4941 			return (EINVAL);
   4942 		}
   4943 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   4944 	}
   4945 	if (advertise == 0)
   4946 		speed = link_caps; /* All capable link speed */
   4947 
   4948 	hw->mac.autotry_restart = TRUE;
   4949 	hw->mac.ops.setup_link(hw, speed, TRUE);
   4950 	adapter->advertise = advertise;
   4951 
   4952 	return (0);
   4953 } /* ixgbe_set_advertise */
   4954 
   4955 /************************************************************************
   4956  * ixgbe_get_advertise - Get current advertised speed settings
   4957  *
   4958  *   Formatted for sysctl usage.
   4959  *   Flags:
   4960  *     0x01 - advertise 100 Mb
   4961  *     0x02 - advertise 1G
   4962  *     0x04 - advertise 10G
   4963  *     0x08 - advertise 10 Mb (yes, Mb)
   4964  *     0x10 - advertise 2.5G
   4965  *     0x20 - advertise 5G
   4966  ************************************************************************/
   4967 static int
   4968 ixgbe_get_advertise(struct adapter *adapter)
   4969 {
   4970 	struct ixgbe_hw  *hw = &adapter->hw;
   4971 	int              speed;
   4972 	ixgbe_link_speed link_caps = 0;
   4973 	s32              err;
   4974 	bool             negotiate = FALSE;
   4975 
   4976 	/*
   4977 	 * Advertised speed means nothing unless it's copper or
   4978 	 * multi-speed fiber
   4979 	 */
   4980 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
   4981 	    !(hw->phy.multispeed_fiber))
   4982 		return (0);
   4983 
   4984 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
   4985 	if (err != IXGBE_SUCCESS)
   4986 		return (0);
   4987 
   4988 	speed =
   4989 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x04 : 0) |
   4990 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x02 : 0) |
   4991 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)   ? 0x01 : 0) |
   4992 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)    ? 0x08 : 0) |
   4993 	    ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
   4994 	    ((link_caps & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0);
   4995 
   4996 	return speed;
   4997 } /* ixgbe_get_advertise */
   4998 
   4999 /************************************************************************
   5000  * ixgbe_sysctl_dmac - Manage DMA Coalescing
   5001  *
   5002  *   Control values:
   5003  *     0/1 - off / on (use default value of 1000)
   5004  *
   5005  *     Legal timer values are:
   5006  *     50,100,250,500,1000,2000,5000,10000
   5007  *
   5008  *     Turning off interrupt moderation will also turn this off.
   5009  ************************************************************************/
   5010 static int
   5011 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
   5012 {
   5013 	struct sysctlnode node = *rnode;
   5014 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5015 	struct ifnet   *ifp = adapter->ifp;
   5016 	int            error;
   5017 	int            newval;
   5018 
   5019 	newval = adapter->dmac;
   5020 	node.sysctl_data = &newval;
   5021 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5022 	if ((error) || (newp == NULL))
   5023 		return (error);
   5024 
   5025 	switch (newval) {
   5026 	case 0:
   5027 		/* Disabled */
   5028 		adapter->dmac = 0;
   5029 		break;
   5030 	case 1:
   5031 		/* Enable and use default */
   5032 		adapter->dmac = 1000;
   5033 		break;
   5034 	case 50:
   5035 	case 100:
   5036 	case 250:
   5037 	case 500:
   5038 	case 1000:
   5039 	case 2000:
   5040 	case 5000:
   5041 	case 10000:
   5042 		/* Legal values - allow */
   5043 		adapter->dmac = newval;
   5044 		break;
   5045 	default:
   5046 		/* Do nothing, illegal value */
   5047 		return (EINVAL);
   5048 	}
   5049 
   5050 	/* Re-initialize hardware if it's already running */
   5051 	if (ifp->if_flags & IFF_RUNNING)
   5052 		ixgbe_init(ifp);
   5053 
   5054 	return (0);
   5055 }
   5056 
   5057 #ifdef IXGBE_DEBUG
   5058 /************************************************************************
   5059  * ixgbe_sysctl_power_state
   5060  *
   5061  *   Sysctl to test power states
   5062  *   Values:
   5063  *     0      - set device to D0
   5064  *     3      - set device to D3
   5065  *     (none) - get current device power state
   5066  ************************************************************************/
   5067 static int
   5068 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
   5069 {
   5070 #ifdef notyet
   5071 	struct sysctlnode node = *rnode;
   5072 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5073 	device_t       dev =  adapter->dev;
   5074 	int            curr_ps, new_ps, error = 0;
   5075 
   5076 	curr_ps = new_ps = pci_get_powerstate(dev);
   5077 
   5078 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5079 	if ((error) || (req->newp == NULL))
   5080 		return (error);
   5081 
   5082 	if (new_ps == curr_ps)
   5083 		return (0);
   5084 
   5085 	if (new_ps == 3 && curr_ps == 0)
   5086 		error = DEVICE_SUSPEND(dev);
   5087 	else if (new_ps == 0 && curr_ps == 3)
   5088 		error = DEVICE_RESUME(dev);
   5089 	else
   5090 		return (EINVAL);
   5091 
   5092 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
   5093 
   5094 	return (error);
   5095 #else
   5096 	return 0;
   5097 #endif
   5098 } /* ixgbe_sysctl_power_state */
   5099 #endif
   5100 
   5101 /************************************************************************
   5102  * ixgbe_sysctl_wol_enable
   5103  *
   5104  *   Sysctl to enable/disable the WoL capability,
   5105  *   if supported by the adapter.
   5106  *
   5107  *   Values:
   5108  *     0 - disabled
   5109  *     1 - enabled
   5110  ************************************************************************/
   5111 static int
   5112 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
   5113 {
   5114 	struct sysctlnode node = *rnode;
   5115 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5116 	struct ixgbe_hw *hw = &adapter->hw;
   5117 	bool            new_wol_enabled;
   5118 	int             error = 0;
   5119 
   5120 	new_wol_enabled = hw->wol_enabled;
   5121 	node.sysctl_data = &new_wol_enabled;
   5122 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5123 	if ((error) || (newp == NULL))
   5124 		return (error);
   5125 	if (new_wol_enabled == hw->wol_enabled)
   5126 		return (0);
   5127 
   5128 	if (new_wol_enabled && !adapter->wol_support)
   5129 		return (ENODEV);
   5130 	else
   5131 		hw->wol_enabled = new_wol_enabled;
   5132 
   5133 	return (0);
   5134 } /* ixgbe_sysctl_wol_enable */
   5135 
   5136 /************************************************************************
   5137  * ixgbe_sysctl_wufc - Wake Up Filter Control
   5138  *
   5139  *   Sysctl to enable/disable the types of packets that the
   5140  *   adapter will wake up on upon receipt.
   5141  *   Flags:
   5142  *     0x1  - Link Status Change
   5143  *     0x2  - Magic Packet
   5144  *     0x4  - Direct Exact
   5145  *     0x8  - Directed Multicast
   5146  *     0x10 - Broadcast
   5147  *     0x20 - ARP/IPv4 Request Packet
   5148  *     0x40 - Direct IPv4 Packet
   5149  *     0x80 - Direct IPv6 Packet
   5150  *
   5151  *   Settings not listed above will cause the sysctl to return an error.
   5152  ************************************************************************/
   5153 static int
   5154 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
   5155 {
   5156 	struct sysctlnode node = *rnode;
   5157 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5158 	int error = 0;
   5159 	u32 new_wufc;
   5160 
   5161 	new_wufc = adapter->wufc;
   5162 	node.sysctl_data = &new_wufc;
   5163 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5164 	if ((error) || (newp == NULL))
   5165 		return (error);
   5166 	if (new_wufc == adapter->wufc)
   5167 		return (0);
   5168 
   5169 	if (new_wufc & 0xffffff00)
   5170 		return (EINVAL);
   5171 
   5172 	new_wufc &= 0xff;
   5173 	new_wufc |= (0xffffff & adapter->wufc);
   5174 	adapter->wufc = new_wufc;
   5175 
   5176 	return (0);
   5177 } /* ixgbe_sysctl_wufc */
   5178 
   5179 #ifdef IXGBE_DEBUG
   5180 /************************************************************************
   5181  * ixgbe_sysctl_print_rss_config
   5182  ************************************************************************/
   5183 static int
   5184 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
   5185 {
   5186 #ifdef notyet
   5187 	struct sysctlnode node = *rnode;
   5188 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5189 	struct ixgbe_hw *hw = &adapter->hw;
   5190 	device_t        dev = adapter->dev;
   5191 	struct sbuf     *buf;
   5192 	int             error = 0, reta_size;
   5193 	u32             reg;
   5194 
   5195 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
   5196 	if (!buf) {
   5197 		device_printf(dev, "Could not allocate sbuf for output.\n");
   5198 		return (ENOMEM);
   5199 	}
   5200 
   5201 	// TODO: use sbufs to make a string to print out
   5202 	/* Set multiplier for RETA setup and table size based on MAC */
   5203 	switch (adapter->hw.mac.type) {
   5204 	case ixgbe_mac_X550:
   5205 	case ixgbe_mac_X550EM_x:
   5206 	case ixgbe_mac_X550EM_a:
   5207 		reta_size = 128;
   5208 		break;
   5209 	default:
   5210 		reta_size = 32;
   5211 		break;
   5212 	}
   5213 
   5214 	/* Print out the redirection table */
   5215 	sbuf_cat(buf, "\n");
   5216 	for (int i = 0; i < reta_size; i++) {
   5217 		if (i < 32) {
   5218 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
   5219 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
   5220 		} else {
   5221 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
   5222 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
   5223 		}
   5224 	}
   5225 
   5226 	// TODO: print more config
   5227 
   5228 	error = sbuf_finish(buf);
   5229 	if (error)
   5230 		device_printf(dev, "Error finishing sbuf: %d\n", error);
   5231 
   5232 	sbuf_delete(buf);
   5233 #endif
   5234 	return (0);
   5235 } /* ixgbe_sysctl_print_rss_config */
   5236 #endif /* IXGBE_DEBUG */
   5237 
   5238 /************************************************************************
   5239  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
   5240  *
   5241  *   For X552/X557-AT devices using an external PHY
   5242  ************************************************************************/
   5243 static int
   5244 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
   5245 {
   5246 	struct sysctlnode node = *rnode;
   5247 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5248 	struct ixgbe_hw *hw = &adapter->hw;
   5249 	int val;
   5250 	u16 reg;
   5251 	int		error;
   5252 
   5253 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5254 		device_printf(adapter->dev,
   5255 		    "Device has no supported external thermal sensor.\n");
   5256 		return (ENODEV);
   5257 	}
   5258 
   5259 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
   5260 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5261 		device_printf(adapter->dev,
   5262 		    "Error reading from PHY's current temperature register\n");
   5263 		return (EAGAIN);
   5264 	}
   5265 
   5266 	node.sysctl_data = &val;
   5267 
   5268 	/* Shift temp for output */
   5269 	val = reg >> 8;
   5270 
   5271 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5272 	if ((error) || (newp == NULL))
   5273 		return (error);
   5274 
   5275 	return (0);
   5276 } /* ixgbe_sysctl_phy_temp */
   5277 
   5278 /************************************************************************
   5279  * ixgbe_sysctl_phy_overtemp_occurred
   5280  *
   5281  *   Reports (directly from the PHY) whether the current PHY
   5282  *   temperature is over the overtemp threshold.
   5283  ************************************************************************/
   5284 static int
   5285 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
   5286 {
   5287 	struct sysctlnode node = *rnode;
   5288 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5289 	struct ixgbe_hw *hw = &adapter->hw;
   5290 	int val, error;
   5291 	u16 reg;
   5292 
   5293 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5294 		device_printf(adapter->dev,
   5295 		    "Device has no supported external thermal sensor.\n");
   5296 		return (ENODEV);
   5297 	}
   5298 
   5299 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
   5300 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5301 		device_printf(adapter->dev,
   5302 		    "Error reading from PHY's temperature status register\n");
   5303 		return (EAGAIN);
   5304 	}
   5305 
   5306 	node.sysctl_data = &val;
   5307 
   5308 	/* Get occurrence bit */
   5309 	val = !!(reg & 0x4000);
   5310 
   5311 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5312 	if ((error) || (newp == NULL))
   5313 		return (error);
   5314 
   5315 	return (0);
   5316 } /* ixgbe_sysctl_phy_overtemp_occurred */
   5317 
   5318 /************************************************************************
   5319  * ixgbe_sysctl_eee_state
   5320  *
   5321  *   Sysctl to set EEE power saving feature
   5322  *   Values:
   5323  *     0      - disable EEE
   5324  *     1      - enable EEE
   5325  *     (none) - get current device EEE state
   5326  ************************************************************************/
   5327 static int
   5328 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
   5329 {
   5330 	struct sysctlnode node = *rnode;
   5331 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5332 	struct ifnet   *ifp = adapter->ifp;
   5333 	device_t       dev = adapter->dev;
   5334 	int            curr_eee, new_eee, error = 0;
   5335 	s32            retval;
   5336 
   5337 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
   5338 	node.sysctl_data = &new_eee;
   5339 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5340 	if ((error) || (newp == NULL))
   5341 		return (error);
   5342 
   5343 	/* Nothing to do */
   5344 	if (new_eee == curr_eee)
   5345 		return (0);
   5346 
   5347 	/* Not supported */
   5348 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
   5349 		return (EINVAL);
   5350 
   5351 	/* Bounds checking */
   5352 	if ((new_eee < 0) || (new_eee > 1))
   5353 		return (EINVAL);
   5354 
   5355 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
   5356 	if (retval) {
   5357 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
   5358 		return (EINVAL);
   5359 	}
   5360 
   5361 	/* Restart auto-neg */
   5362 	ixgbe_init(ifp);
   5363 
   5364 	device_printf(dev, "New EEE state: %d\n", new_eee);
   5365 
   5366 	/* Cache new value */
   5367 	if (new_eee)
   5368 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5369 	else
   5370 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
   5371 
   5372 	return (error);
   5373 } /* ixgbe_sysctl_eee_state */
   5374 
   5375 /************************************************************************
   5376  * ixgbe_init_device_features
   5377  ************************************************************************/
   5378 static void
   5379 ixgbe_init_device_features(struct adapter *adapter)
   5380 {
   5381 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   5382 	                  | IXGBE_FEATURE_RSS
   5383 	                  | IXGBE_FEATURE_MSI
   5384 	                  | IXGBE_FEATURE_MSIX
   5385 	                  | IXGBE_FEATURE_LEGACY_IRQ
   5386 	                  | IXGBE_FEATURE_LEGACY_TX;
   5387 
   5388 	/* Set capabilities first... */
   5389 	switch (adapter->hw.mac.type) {
   5390 	case ixgbe_mac_82598EB:
   5391 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
   5392 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
   5393 		break;
   5394 	case ixgbe_mac_X540:
   5395 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5396 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5397 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
   5398 		    (adapter->hw.bus.func == 0))
   5399 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5400 		break;
   5401 	case ixgbe_mac_X550:
   5402 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5403 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5404 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5405 		break;
   5406 	case ixgbe_mac_X550EM_x:
   5407 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5408 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5409 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
   5410 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5411 		break;
   5412 	case ixgbe_mac_X550EM_a:
   5413 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5414 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5415 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5416 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
   5417 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
   5418 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5419 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5420 		}
   5421 		break;
   5422 	case ixgbe_mac_82599EB:
   5423 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5424 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5425 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
   5426 		    (adapter->hw.bus.func == 0))
   5427 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5428 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
   5429 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5430 		break;
   5431 	default:
   5432 		break;
   5433 	}
   5434 
   5435 	/* Enabled by default... */
   5436 	/* Fan failure detection */
   5437 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
   5438 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
   5439 	/* Netmap */
   5440 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   5441 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   5442 	/* EEE */
   5443 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   5444 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5445 	/* Thermal Sensor */
   5446 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
   5447 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
   5448 
   5449 	/* Enabled via global sysctl... */
   5450 	/* Flow Director */
   5451 	if (ixgbe_enable_fdir) {
   5452 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
   5453 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
   5454 		else
   5455 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
   5456 	}
   5457 	/* Legacy (single queue) transmit */
   5458 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   5459 	    ixgbe_enable_legacy_tx)
   5460 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   5461 	/*
   5462 	 * Message Signal Interrupts - Extended (MSI-X)
   5463 	 * Normal MSI is only enabled if MSI-X calls fail.
   5464 	 */
   5465 	if (!ixgbe_enable_msix)
   5466 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
   5467 	/* Receive-Side Scaling (RSS) */
   5468 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
   5469 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   5470 
   5471 	/* Disable features with unmet dependencies... */
   5472 	/* No MSI-X */
   5473 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
   5474 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   5475 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   5476 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
   5477 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
   5478 	}
   5479 } /* ixgbe_init_device_features */
   5480 
   5481 /************************************************************************
   5482  * ixgbe_probe - Device identification routine
   5483  *
   5484  *   Determines if the driver should be loaded on
   5485  *   adapter based on its PCI vendor/device ID.
   5486  *
   5487  *   return BUS_PROBE_DEFAULT on success, positive on failure
   5488  ************************************************************************/
   5489 static int
   5490 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
   5491 {
   5492 	const struct pci_attach_args *pa = aux;
   5493 
   5494 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
   5495 }
   5496 
   5497 static ixgbe_vendor_info_t *
   5498 ixgbe_lookup(const struct pci_attach_args *pa)
   5499 {
   5500 	ixgbe_vendor_info_t *ent;
   5501 	pcireg_t subid;
   5502 
   5503 	INIT_DEBUGOUT("ixgbe_lookup: begin");
   5504 
   5505 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
   5506 		return NULL;
   5507 
   5508 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
   5509 
   5510 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
   5511 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
   5512 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
   5513 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
   5514 			(ent->subvendor_id == 0)) &&
   5515 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
   5516 			(ent->subdevice_id == 0))) {
   5517 			++ixgbe_total_ports;
   5518 			return ent;
   5519 		}
   5520 	}
   5521 	return NULL;
   5522 }
   5523 
   5524 static int
   5525 ixgbe_ifflags_cb(struct ethercom *ec)
   5526 {
   5527 	struct ifnet *ifp = &ec->ec_if;
   5528 	struct adapter *adapter = ifp->if_softc;
   5529 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   5530 
   5531 	IXGBE_CORE_LOCK(adapter);
   5532 
   5533 	if (change != 0)
   5534 		adapter->if_flags = ifp->if_flags;
   5535 
   5536 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   5537 		rc = ENETRESET;
   5538 	else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   5539 		ixgbe_set_promisc(adapter);
   5540 
   5541 	/* Set up VLAN support and filter */
   5542 	ixgbe_setup_vlan_hw_support(adapter);
   5543 
   5544 	IXGBE_CORE_UNLOCK(adapter);
   5545 
   5546 	return rc;
   5547 }
   5548 
   5549 /************************************************************************
   5550  * ixgbe_ioctl - Ioctl entry point
   5551  *
   5552  *   Called when the user wants to configure the interface.
   5553  *
   5554  *   return 0 on success, positive on failure
   5555  ************************************************************************/
   5556 static int
   5557 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   5558 {
   5559 	struct adapter	*adapter = ifp->if_softc;
   5560 	struct ixgbe_hw *hw = &adapter->hw;
   5561 	struct ifcapreq *ifcr = data;
   5562 	struct ifreq	*ifr = data;
   5563 	int             error = 0;
   5564 	int l4csum_en;
   5565 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   5566 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   5567 
   5568 	switch (command) {
   5569 	case SIOCSIFFLAGS:
   5570 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   5571 		break;
   5572 	case SIOCADDMULTI:
   5573 	case SIOCDELMULTI:
   5574 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   5575 		break;
   5576 	case SIOCSIFMEDIA:
   5577 	case SIOCGIFMEDIA:
   5578 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   5579 		break;
   5580 	case SIOCSIFCAP:
   5581 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   5582 		break;
   5583 	case SIOCSIFMTU:
   5584 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   5585 		break;
   5586 #ifdef __NetBSD__
   5587 	case SIOCINITIFADDR:
   5588 		IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
   5589 		break;
   5590 	case SIOCGIFFLAGS:
   5591 		IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
   5592 		break;
   5593 	case SIOCGIFAFLAG_IN:
   5594 		IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
   5595 		break;
   5596 	case SIOCGIFADDR:
   5597 		IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
   5598 		break;
   5599 	case SIOCGIFMTU:
   5600 		IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
   5601 		break;
   5602 	case SIOCGIFCAP:
   5603 		IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
   5604 		break;
   5605 	case SIOCGETHERCAP:
   5606 		IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
   5607 		break;
   5608 	case SIOCGLIFADDR:
   5609 		IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
   5610 		break;
   5611 	case SIOCZIFDATA:
   5612 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   5613 		hw->mac.ops.clear_hw_cntrs(hw);
   5614 		ixgbe_clear_evcnt(adapter);
   5615 		break;
   5616 	case SIOCAIFADDR:
   5617 		IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
   5618 		break;
   5619 #endif
   5620 	default:
   5621 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   5622 		break;
   5623 	}
   5624 
   5625 	switch (command) {
   5626 	case SIOCSIFMEDIA:
   5627 	case SIOCGIFMEDIA:
   5628 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   5629 	case SIOCGI2C:
   5630 	{
   5631 		struct ixgbe_i2c_req	i2c;
   5632 
   5633 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
   5634 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
   5635 		if (error != 0)
   5636 			break;
   5637 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
   5638 			error = EINVAL;
   5639 			break;
   5640 		}
   5641 		if (i2c.len > sizeof(i2c.data)) {
   5642 			error = EINVAL;
   5643 			break;
   5644 		}
   5645 
   5646 		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
   5647 		    i2c.dev_addr, i2c.data);
   5648 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
   5649 		break;
   5650 	}
   5651 	case SIOCSIFCAP:
   5652 		/* Layer-4 Rx checksum offload has to be turned on and
   5653 		 * off as a unit.
   5654 		 */
   5655 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   5656 		if (l4csum_en != l4csum && l4csum_en != 0)
   5657 			return EINVAL;
   5658 		/*FALLTHROUGH*/
   5659 	case SIOCADDMULTI:
   5660 	case SIOCDELMULTI:
   5661 	case SIOCSIFFLAGS:
   5662 	case SIOCSIFMTU:
   5663 	default:
   5664 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   5665 			return error;
   5666 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   5667 			;
   5668 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   5669 			IXGBE_CORE_LOCK(adapter);
   5670 			ixgbe_init_locked(adapter);
   5671 			ixgbe_recalculate_max_frame(adapter);
   5672 			IXGBE_CORE_UNLOCK(adapter);
   5673 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   5674 			/*
   5675 			 * Multicast list has changed; set the hardware filter
   5676 			 * accordingly.
   5677 			 */
   5678 			IXGBE_CORE_LOCK(adapter);
   5679 			ixgbe_disable_intr(adapter);
   5680 			ixgbe_set_multi(adapter);
   5681 			ixgbe_enable_intr(adapter);
   5682 			IXGBE_CORE_UNLOCK(adapter);
   5683 		}
   5684 		return 0;
   5685 	}
   5686 
   5687 	return error;
   5688 } /* ixgbe_ioctl */
   5689 
   5690 /************************************************************************
   5691  * ixgbe_check_fan_failure
   5692  ************************************************************************/
   5693 static void
   5694 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
   5695 {
   5696 	u32 mask;
   5697 
   5698 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
   5699 	    IXGBE_ESDP_SDP1;
   5700 
   5701 	if (reg & mask)
   5702 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
   5703 } /* ixgbe_check_fan_failure */
   5704 
   5705 /************************************************************************
   5706  * ixgbe_handle_que
   5707  ************************************************************************/
   5708 static void
   5709 ixgbe_handle_que(void *context)
   5710 {
   5711 	struct ix_queue *que = context;
   5712 	struct adapter  *adapter = que->adapter;
   5713 	struct tx_ring  *txr = que->txr;
   5714 	struct ifnet    *ifp = adapter->ifp;
   5715 
   5716 	adapter->handleq.ev_count++;
   5717 
   5718 	if (ifp->if_flags & IFF_RUNNING) {
   5719 		ixgbe_rxeof(que);
   5720 		IXGBE_TX_LOCK(txr);
   5721 		ixgbe_txeof(txr);
   5722 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5723 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   5724 				ixgbe_mq_start_locked(ifp, txr);
   5725 		/* Only for queue 0 */
   5726 		/* NetBSD still needs this for CBQ */
   5727 		if ((&adapter->queues[0] == que)
   5728 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   5729 			ixgbe_legacy_start_locked(ifp, txr);
   5730 		IXGBE_TX_UNLOCK(txr);
   5731 	}
   5732 
   5733 	/* Re-enable this interrupt */
   5734 	if (que->res != NULL)
   5735 		ixgbe_enable_queue(adapter, que->msix);
   5736 	else
   5737 		ixgbe_enable_intr(adapter);
   5738 
   5739 	return;
   5740 } /* ixgbe_handle_que */
   5741 
   5742 /************************************************************************
   5743  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
   5744  ************************************************************************/
   5745 static int
   5746 ixgbe_allocate_legacy(struct adapter *adapter,
   5747     const struct pci_attach_args *pa)
   5748 {
   5749 	device_t	dev = adapter->dev;
   5750 	struct ix_queue *que = adapter->queues;
   5751 	struct tx_ring  *txr = adapter->tx_rings;
   5752 	int		counts[PCI_INTR_TYPE_SIZE];
   5753 	pci_intr_type_t intr_type, max_type;
   5754 	char            intrbuf[PCI_INTRSTR_LEN];
   5755 	const char	*intrstr = NULL;
   5756 
   5757 	/* We allocate a single interrupt resource */
   5758 	max_type = PCI_INTR_TYPE_MSI;
   5759 	counts[PCI_INTR_TYPE_MSIX] = 0;
   5760 	counts[PCI_INTR_TYPE_MSI] =
   5761 	    (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
   5762 	/* Check not feat_en but feat_cap to fallback to INTx */
   5763 	counts[PCI_INTR_TYPE_INTX] =
   5764 	    (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
   5765 
   5766 alloc_retry:
   5767 	if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
   5768 		aprint_error_dev(dev, "couldn't alloc interrupt\n");
   5769 		return ENXIO;
   5770 	}
   5771 	adapter->osdep.nintrs = 1;
   5772 	intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
   5773 	    intrbuf, sizeof(intrbuf));
   5774 	adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
   5775 	    adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
   5776 	    device_xname(dev));
   5777 	intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
   5778 	if (adapter->osdep.ihs[0] == NULL) {
   5779 		aprint_error_dev(dev,"unable to establish %s\n",
   5780 		    (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5781 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5782 		adapter->osdep.intrs = NULL;
   5783 		switch (intr_type) {
   5784 		case PCI_INTR_TYPE_MSI:
   5785 			/* The next try is for INTx: Disable MSI */
   5786 			max_type = PCI_INTR_TYPE_INTX;
   5787 			counts[PCI_INTR_TYPE_INTX] = 1;
   5788 			adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   5789 			if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
   5790 				adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   5791 				goto alloc_retry;
   5792 			} else
   5793 				break;
   5794 		case PCI_INTR_TYPE_INTX:
   5795 		default:
   5796 			/* See below */
   5797 			break;
   5798 		}
   5799 	}
   5800 	if (intr_type == PCI_INTR_TYPE_INTX) {
   5801 		adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   5802 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   5803 	}
   5804 	if (adapter->osdep.ihs[0] == NULL) {
   5805 		aprint_error_dev(dev,
   5806 		    "couldn't establish interrupt%s%s\n",
   5807 		    intrstr ? " at " : "", intrstr ? intrstr : "");
   5808 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5809 		adapter->osdep.intrs = NULL;
   5810 		return ENXIO;
   5811 	}
   5812 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   5813 	/*
   5814 	 * Try allocating a fast interrupt and the associated deferred
   5815 	 * processing contexts.
   5816 	 */
   5817 	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5818 		txr->txr_si =
   5819 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5820 			ixgbe_deferred_mq_start, txr);
   5821 	que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5822 	    ixgbe_handle_que, que);
   5823 
   5824 	if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
   5825 		& (txr->txr_si == NULL)) || (que->que_si == NULL)) {
   5826 		aprint_error_dev(dev,
   5827 		    "could not establish software interrupts\n");
   5828 
   5829 		return ENXIO;
   5830 	}
   5831 	/* For simplicity in the handlers */
   5832 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
   5833 
   5834 	return (0);
   5835 } /* ixgbe_allocate_legacy */
   5836 
   5837 
   5838 /************************************************************************
   5839  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
   5840  ************************************************************************/
   5841 static int
   5842 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   5843 {
   5844 	device_t        dev = adapter->dev;
   5845 	struct 		ix_queue *que = adapter->queues;
   5846 	struct  	tx_ring *txr = adapter->tx_rings;
   5847 	pci_chipset_tag_t pc;
   5848 	char		intrbuf[PCI_INTRSTR_LEN];
   5849 	char		intr_xname[32];
   5850 	const char	*intrstr = NULL;
   5851 	int 		error, vector = 0;
   5852 	int		cpu_id = 0;
   5853 	kcpuset_t	*affinity;
   5854 #ifdef RSS
   5855 	unsigned int    rss_buckets = 0;
   5856 	kcpuset_t	cpu_mask;
   5857 #endif
   5858 
   5859 	pc = adapter->osdep.pc;
   5860 #ifdef	RSS
   5861 	/*
   5862 	 * If we're doing RSS, the number of queues needs to
   5863 	 * match the number of RSS buckets that are configured.
   5864 	 *
   5865 	 * + If there's more queues than RSS buckets, we'll end
   5866 	 *   up with queues that get no traffic.
   5867 	 *
   5868 	 * + If there's more RSS buckets than queues, we'll end
   5869 	 *   up having multiple RSS buckets map to the same queue,
   5870 	 *   so there'll be some contention.
   5871 	 */
   5872 	rss_buckets = rss_getnumbuckets();
   5873 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
   5874 	    (adapter->num_queues != rss_buckets)) {
   5875 		device_printf(dev,
   5876 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
   5877 		    "; performance will be impacted.\n",
   5878 		    __func__, adapter->num_queues, rss_buckets);
   5879 	}
   5880 #endif
   5881 
   5882 	adapter->osdep.nintrs = adapter->num_queues + 1;
   5883 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   5884 	    adapter->osdep.nintrs) != 0) {
   5885 		aprint_error_dev(dev,
   5886 		    "failed to allocate MSI-X interrupt\n");
   5887 		return (ENXIO);
   5888 	}
   5889 
   5890 	kcpuset_create(&affinity, false);
   5891 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   5892 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   5893 		    device_xname(dev), i);
   5894 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   5895 		    sizeof(intrbuf));
   5896 #ifdef IXGBE_MPSAFE
   5897 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   5898 		    true);
   5899 #endif
   5900 		/* Set the handler function */
   5901 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   5902 		    adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
   5903 		    intr_xname);
   5904 		if (que->res == NULL) {
   5905 			aprint_error_dev(dev,
   5906 			    "Failed to register QUE handler\n");
   5907 			error = ENXIO;
   5908 			goto err_out;
   5909 		}
   5910 		que->msix = vector;
   5911 		adapter->active_queues |= (u64)(1 << que->msix);
   5912 
   5913 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   5914 #ifdef	RSS
   5915 			/*
   5916 			 * The queue ID is used as the RSS layer bucket ID.
   5917 			 * We look up the queue ID -> RSS CPU ID and select
   5918 			 * that.
   5919 			 */
   5920 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
   5921 			CPU_SETOF(cpu_id, &cpu_mask);
   5922 #endif
   5923 		} else {
   5924 			/*
   5925 			 * Bind the MSI-X vector, and thus the
   5926 			 * rings to the corresponding CPU.
   5927 			 *
   5928 			 * This just happens to match the default RSS
   5929 			 * round-robin bucket -> queue -> CPU allocation.
   5930 			 */
   5931 			if (adapter->num_queues > 1)
   5932 				cpu_id = i;
   5933 		}
   5934 		/* Round-robin affinity */
   5935 		kcpuset_zero(affinity);
   5936 		kcpuset_set(affinity, cpu_id % ncpu);
   5937 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   5938 		    NULL);
   5939 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   5940 		    intrstr);
   5941 		if (error == 0) {
   5942 #if 1 /* def IXGBE_DEBUG */
   5943 #ifdef	RSS
   5944 			aprintf_normal(", bound RSS bucket %d to CPU %d", i,
   5945 			    cpu_id % ncpu);
   5946 #else
   5947 			aprint_normal(", bound queue %d to cpu %d", i,
   5948 			    cpu_id % ncpu);
   5949 #endif
   5950 #endif /* IXGBE_DEBUG */
   5951 		}
   5952 		aprint_normal("\n");
   5953 
   5954 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   5955 			txr->txr_si = softint_establish(
   5956 				SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5957 				ixgbe_deferred_mq_start, txr);
   5958 			if (txr->txr_si == NULL) {
   5959 				aprint_error_dev(dev,
   5960 				    "couldn't establish software interrupt\n");
   5961 				error = ENXIO;
   5962 				goto err_out;
   5963 			}
   5964 		}
   5965 		que->que_si
   5966 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5967 			ixgbe_handle_que, que);
   5968 		if (que->que_si == NULL) {
   5969 			aprint_error_dev(dev,
   5970 			    "couldn't establish software interrupt\n");
   5971 			error = ENXIO;
   5972 			goto err_out;
   5973 		}
   5974 	}
   5975 
   5976 	/* and Link */
   5977 	cpu_id++;
   5978 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   5979 	adapter->vector = vector;
   5980 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   5981 	    sizeof(intrbuf));
   5982 #ifdef IXGBE_MPSAFE
   5983 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   5984 	    true);
   5985 #endif
   5986 	/* Set the link handler function */
   5987 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   5988 	    adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
   5989 	    intr_xname);
   5990 	if (adapter->osdep.ihs[vector] == NULL) {
   5991 		adapter->res = NULL;
   5992 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   5993 		error = ENXIO;
   5994 		goto err_out;
   5995 	}
   5996 	/* Round-robin affinity */
   5997 	kcpuset_zero(affinity);
   5998 	kcpuset_set(affinity, cpu_id % ncpu);
   5999 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
   6000 	    NULL);
   6001 
   6002 	aprint_normal_dev(dev,
   6003 	    "for link, interrupting at %s", intrstr);
   6004 	if (error == 0)
   6005 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   6006 	else
   6007 		aprint_normal("\n");
   6008 
   6009 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   6010 		adapter->mbx_si =
   6011 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6012 			ixgbe_handle_mbx, adapter);
   6013 		if (adapter->mbx_si == NULL) {
   6014 			aprint_error_dev(dev,
   6015 			    "could not establish software interrupts\n");
   6016 
   6017 			error = ENXIO;
   6018 			goto err_out;
   6019 		}
   6020 	}
   6021 
   6022 	kcpuset_destroy(affinity);
   6023 	aprint_normal_dev(dev,
   6024 	    "Using MSI-X interrupts with %d vectors\n", vector + 1);
   6025 
   6026 	return (0);
   6027 
   6028 err_out:
   6029 	kcpuset_destroy(affinity);
   6030 	ixgbe_free_softint(adapter);
   6031 	ixgbe_free_pciintr_resources(adapter);
   6032 	return (error);
   6033 } /* ixgbe_allocate_msix */
   6034 
   6035 /************************************************************************
   6036  * ixgbe_configure_interrupts
   6037  *
   6038  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
   6039  *   This will also depend on user settings.
   6040  ************************************************************************/
   6041 static int
   6042 ixgbe_configure_interrupts(struct adapter *adapter)
   6043 {
   6044 	device_t dev = adapter->dev;
   6045 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
   6046 	int want, queues, msgs;
   6047 
   6048 	/* Default to 1 queue if MSI-X setup fails */
   6049 	adapter->num_queues = 1;
   6050 
   6051 	/* Override by tuneable */
   6052 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
   6053 		goto msi;
   6054 
   6055 	/*
   6056 	 *  NetBSD only: Use single vector MSI when number of CPU is 1 to save
   6057 	 * interrupt slot.
   6058 	 */
   6059 	if (ncpu == 1)
   6060 		goto msi;
   6061 
   6062 	/* First try MSI-X */
   6063 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   6064 	msgs = MIN(msgs, IXG_MAX_NINTR);
   6065 	if (msgs < 2)
   6066 		goto msi;
   6067 
   6068 	adapter->msix_mem = (void *)1; /* XXX */
   6069 
   6070 	/* Figure out a reasonable auto config value */
   6071 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   6072 
   6073 #ifdef	RSS
   6074 	/* If we're doing RSS, clamp at the number of RSS buckets */
   6075 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   6076 		queues = min(queues, rss_getnumbuckets());
   6077 #endif
   6078 	if (ixgbe_num_queues > queues) {
   6079 		aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
   6080 		ixgbe_num_queues = queues;
   6081 	}
   6082 
   6083 	if (ixgbe_num_queues != 0)
   6084 		queues = ixgbe_num_queues;
   6085 	else
   6086 		queues = min(queues,
   6087 		    min(mac->max_tx_queues, mac->max_rx_queues));
   6088 
   6089 	/* reflect correct sysctl value */
   6090 	ixgbe_num_queues = queues;
   6091 
   6092 	/*
   6093 	 * Want one vector (RX/TX pair) per queue
   6094 	 * plus an additional for Link.
   6095 	 */
   6096 	want = queues + 1;
   6097 	if (msgs >= want)
   6098 		msgs = want;
   6099 	else {
   6100                	aprint_error_dev(dev, "MSI-X Configuration Problem, "
   6101 		    "%d vectors but %d queues wanted!\n",
   6102 		    msgs, want);
   6103 		goto msi;
   6104 	}
   6105 	adapter->num_queues = queues;
   6106 	adapter->feat_en |= IXGBE_FEATURE_MSIX;
   6107 	return (0);
   6108 
   6109 	/*
   6110 	 * MSI-X allocation failed or provided us with
   6111 	 * less vectors than needed. Free MSI-X resources
   6112 	 * and we'll try enabling MSI.
   6113 	 */
   6114 msi:
   6115 	/* Without MSI-X, some features are no longer supported */
   6116 	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6117 	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
   6118 	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6119 	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
   6120 
   6121        	msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
   6122 	adapter->msix_mem = NULL; /* XXX */
   6123 	if (msgs > 1)
   6124 		msgs = 1;
   6125 	if (msgs != 0) {
   6126 		msgs = 1;
   6127 		adapter->feat_en |= IXGBE_FEATURE_MSI;
   6128 		return (0);
   6129 	}
   6130 
   6131 	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
   6132 		aprint_error_dev(dev,
   6133 		    "Device does not support legacy interrupts.\n");
   6134 		return 1;
   6135 	}
   6136 
   6137 	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6138 
   6139 	return (0);
   6140 } /* ixgbe_configure_interrupts */
   6141 
   6142 
   6143 /************************************************************************
   6144  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
   6145  *
   6146  *   Done outside of interrupt context since the driver might sleep
   6147  ************************************************************************/
   6148 static void
   6149 ixgbe_handle_link(void *context)
   6150 {
   6151 	struct adapter  *adapter = context;
   6152 	struct ixgbe_hw *hw = &adapter->hw;
   6153 
   6154 	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
   6155 	ixgbe_update_link_status(adapter);
   6156 
   6157 	/* Re-enable link interrupts */
   6158 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
   6159 } /* ixgbe_handle_link */
   6160 
   6161 /************************************************************************
   6162  * ixgbe_rearm_queues
   6163  ************************************************************************/
   6164 static void
   6165 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   6166 {
   6167 	u32 mask;
   6168 
   6169 	switch (adapter->hw.mac.type) {
   6170 	case ixgbe_mac_82598EB:
   6171 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   6172 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   6173 		break;
   6174 	case ixgbe_mac_82599EB:
   6175 	case ixgbe_mac_X540:
   6176 	case ixgbe_mac_X550:
   6177 	case ixgbe_mac_X550EM_x:
   6178 	case ixgbe_mac_X550EM_a:
   6179 		mask = (queues & 0xFFFFFFFF);
   6180 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   6181 		mask = (queues >> 32);
   6182 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   6183 		break;
   6184 	default:
   6185 		break;
   6186 	}
   6187 } /* ixgbe_rearm_queues */
   6188