Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.122
      1 /* $NetBSD: ixgbe.c,v 1.122 2018/02/16 04:50:19 knakahara Exp $ */
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 320916 2017-07-12 17:35:32Z sbruno $*/
     36 
     37 /*
     38  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39  * All rights reserved.
     40  *
     41  * This code is derived from software contributed to The NetBSD Foundation
     42  * by Coyote Point Systems, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63  * POSSIBILITY OF SUCH DAMAGE.
     64  */
     65 
     66 #ifdef _KERNEL_OPT
     67 #include "opt_inet.h"
     68 #include "opt_inet6.h"
     69 #include "opt_net_mpsafe.h"
     70 #endif
     71 
     72 #include "ixgbe.h"
     73 #include "vlan.h"
     74 
     75 #include <sys/cprng.h>
     76 #include <dev/mii/mii.h>
     77 #include <dev/mii/miivar.h>
     78 
     79 /************************************************************************
     80  * Driver version
     81  ************************************************************************/
     82 char ixgbe_driver_version[] = "3.2.12-k";
     83 
     84 
     85 /************************************************************************
     86  * PCI Device ID Table
     87  *
     88  *   Used by probe to select devices to load on
     89  *   Last field stores an index into ixgbe_strings
     90  *   Last entry must be all 0s
     91  *
     92  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     93  ************************************************************************/
     94 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     95 {
     96 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    111 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    112 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    113 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    114 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    115 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
    116 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    117 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
    118 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
    119 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
    120 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
    121 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
    122 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
    123 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
    124 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
    125 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
    126 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
    127 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
    128 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
    129 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
    130 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
    131 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
    132 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
    133 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
    134 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
    135 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
    136 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
    137 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
    138 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
    139 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
    140 	/* required last entry */
    141 	{0, 0, 0, 0, 0}
    142 };
    143 
    144 /************************************************************************
    145  * Table of branding strings
    146  ************************************************************************/
    147 static const char    *ixgbe_strings[] = {
    148 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    149 };
    150 
    151 /************************************************************************
    152  * Function prototypes
    153  ************************************************************************/
    154 static int      ixgbe_probe(device_t, cfdata_t, void *);
    155 static void     ixgbe_attach(device_t, device_t, void *);
    156 static int      ixgbe_detach(device_t, int);
    157 #if 0
    158 static int      ixgbe_shutdown(device_t);
    159 #endif
    160 static bool	ixgbe_suspend(device_t, const pmf_qual_t *);
    161 static bool	ixgbe_resume(device_t, const pmf_qual_t *);
    162 static int	ixgbe_ifflags_cb(struct ethercom *);
    163 static int      ixgbe_ioctl(struct ifnet *, u_long, void *);
    164 static void	ixgbe_ifstop(struct ifnet *, int);
    165 static int	ixgbe_init(struct ifnet *);
    166 static void	ixgbe_init_locked(struct adapter *);
    167 static void     ixgbe_stop(void *);
    168 static void     ixgbe_init_device_features(struct adapter *);
    169 static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
    170 static void	ixgbe_add_media_types(struct adapter *);
    171 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    172 static int      ixgbe_media_change(struct ifnet *);
    173 static int      ixgbe_allocate_pci_resources(struct adapter *,
    174 		    const struct pci_attach_args *);
    175 static void      ixgbe_free_softint(struct adapter *);
    176 static void	ixgbe_get_slot_info(struct adapter *);
    177 static int      ixgbe_allocate_msix(struct adapter *,
    178 		    const struct pci_attach_args *);
    179 static int      ixgbe_allocate_legacy(struct adapter *,
    180 		    const struct pci_attach_args *);
    181 static int      ixgbe_configure_interrupts(struct adapter *);
    182 static void	ixgbe_free_pciintr_resources(struct adapter *);
    183 static void	ixgbe_free_pci_resources(struct adapter *);
    184 static void	ixgbe_local_timer(void *);
    185 static void	ixgbe_local_timer1(void *);
    186 static int	ixgbe_setup_interface(device_t, struct adapter *);
    187 static void	ixgbe_config_gpie(struct adapter *);
    188 static void	ixgbe_config_dmac(struct adapter *);
    189 static void	ixgbe_config_delay_values(struct adapter *);
    190 static void	ixgbe_config_link(struct adapter *);
    191 static void	ixgbe_check_wol_support(struct adapter *);
    192 static int	ixgbe_setup_low_power_mode(struct adapter *);
    193 static void	ixgbe_rearm_queues(struct adapter *, u64);
    194 
    195 static void     ixgbe_initialize_transmit_units(struct adapter *);
    196 static void     ixgbe_initialize_receive_units(struct adapter *);
    197 static void	ixgbe_enable_rx_drop(struct adapter *);
    198 static void	ixgbe_disable_rx_drop(struct adapter *);
    199 static void	ixgbe_initialize_rss_mapping(struct adapter *);
    200 
    201 static void     ixgbe_enable_intr(struct adapter *);
    202 static void     ixgbe_disable_intr(struct adapter *);
    203 static void     ixgbe_update_stats_counters(struct adapter *);
    204 static void     ixgbe_set_promisc(struct adapter *);
    205 static void     ixgbe_set_multi(struct adapter *);
    206 static void     ixgbe_update_link_status(struct adapter *);
    207 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    208 static void	ixgbe_configure_ivars(struct adapter *);
    209 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    210 
    211 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    212 #if 0
    213 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
    214 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
    215 #endif
    216 
    217 static void	ixgbe_add_device_sysctls(struct adapter *);
    218 static void     ixgbe_add_hw_stats(struct adapter *);
    219 static void	ixgbe_clear_evcnt(struct adapter *);
    220 static int	ixgbe_set_flowcntl(struct adapter *, int);
    221 static int	ixgbe_set_advertise(struct adapter *, int);
    222 static int      ixgbe_get_advertise(struct adapter *);
    223 
    224 /* Sysctl handlers */
    225 static void	ixgbe_set_sysctl_value(struct adapter *, const char *,
    226 		     const char *, int *, int);
    227 static int	ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
    228 static int	ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
    229 static int      ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    230 static int	ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
    231 static int	ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
    232 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
    233 #ifdef IXGBE_DEBUG
    234 static int	ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
    235 static int	ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
    236 #endif
    237 static int      ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
    238 static int      ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
    239 static int      ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
    240 static int      ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
    241 static int      ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
    242 static int	ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
    243 static int	ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
    244 
    245 /* Support for pluggable optic modules */
    246 static bool	ixgbe_sfp_probe(struct adapter *);
    247 
    248 /* Legacy (single vector) interrupt handler */
    249 static int	ixgbe_legacy_irq(void *);
    250 
    251 /* The MSI/MSI-X Interrupt handlers */
    252 static int	ixgbe_msix_que(void *);
    253 static int	ixgbe_msix_link(void *);
    254 
    255 /* Software interrupts for deferred work */
    256 static void	ixgbe_handle_que(void *);
    257 static void	ixgbe_handle_link(void *);
    258 static void	ixgbe_handle_msf(void *);
    259 static void	ixgbe_handle_mod(void *);
    260 static void	ixgbe_handle_phy(void *);
    261 
    262 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    263 
    264 /************************************************************************
    265  *  NetBSD Device Interface Entry Points
    266  ************************************************************************/
    267 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    268     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    269     DVF_DETACH_SHUTDOWN);
    270 
    271 #if 0
    272 devclass_t ix_devclass;
    273 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
    274 
    275 MODULE_DEPEND(ix, pci, 1, 1, 1);
    276 MODULE_DEPEND(ix, ether, 1, 1, 1);
    277 #ifdef DEV_NETMAP
    278 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    279 #endif
    280 #endif
    281 
    282 /*
    283  * TUNEABLE PARAMETERS:
    284  */
    285 
    286 /*
    287  * AIM: Adaptive Interrupt Moderation
    288  * which means that the interrupt rate
    289  * is varied over time based on the
    290  * traffic for that interrupt vector
    291  */
    292 static bool ixgbe_enable_aim = true;
    293 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
    294 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
    295     "Enable adaptive interrupt moderation");
    296 
    297 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    298 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
    299     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
    300 
    301 /* How many packets rxeof tries to clean at a time */
    302 static int ixgbe_rx_process_limit = 256;
    303 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
    304     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
    305 
    306 /* How many packets txeof tries to clean at a time */
    307 static int ixgbe_tx_process_limit = 256;
    308 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
    309     &ixgbe_tx_process_limit, 0,
    310     "Maximum number of sent packets to process at a time, -1 means unlimited");
    311 
    312 /* Flow control setting, default to full */
    313 static int ixgbe_flow_control = ixgbe_fc_full;
    314 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
    315     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
    316 
    317 /*
    318  * Smart speed setting, default to on
    319  * this only works as a compile option
    320  * right now as its during attach, set
    321  * this to 'ixgbe_smart_speed_off' to
    322  * disable.
    323  */
    324 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    325 
    326 /*
    327  * MSI-X should be the default for best performance,
    328  * but this allows it to be forced off for testing.
    329  */
    330 static int ixgbe_enable_msix = 1;
    331 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
    332     "Enable MSI-X interrupts");
    333 
    334 /*
    335  * Number of Queues, can be set to 0,
    336  * it then autoconfigures based on the
    337  * number of cpus with a max of 8. This
    338  * can be overriden manually here.
    339  */
    340 static int ixgbe_num_queues = 0;
    341 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
    342     "Number of queues to configure, 0 indicates autoconfigure");
    343 
    344 /*
    345  * Number of TX descriptors per ring,
    346  * setting higher than RX as this seems
    347  * the better performing choice.
    348  */
    349 static int ixgbe_txd = PERFORM_TXD;
    350 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
    351     "Number of transmit descriptors per queue");
    352 
    353 /* Number of RX descriptors per ring */
    354 static int ixgbe_rxd = PERFORM_RXD;
    355 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
    356     "Number of receive descriptors per queue");
    357 
    358 /*
    359  * Defining this on will allow the use
    360  * of unsupported SFP+ modules, note that
    361  * doing so you are on your own :)
    362  */
    363 static int allow_unsupported_sfp = false;
    364 #define TUNABLE_INT(__x, __y)
    365 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
    366 
    367 /*
    368  * Not sure if Flow Director is fully baked,
    369  * so we'll default to turning it off.
    370  */
    371 static int ixgbe_enable_fdir = 0;
    372 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
    373     "Enable Flow Director");
    374 
    375 /* Legacy Transmit (single queue) */
    376 static int ixgbe_enable_legacy_tx = 0;
    377 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
    378     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
    379 
    380 /* Receive-Side Scaling */
    381 static int ixgbe_enable_rss = 1;
    382 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
    383     "Enable Receive-Side Scaling (RSS)");
    384 
    385 /* Keep running tab on them for sanity check */
    386 static int ixgbe_total_ports;
    387 
    388 #if 0
    389 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
    390 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
    391 #endif
    392 
    393 #ifdef NET_MPSAFE
    394 #define IXGBE_MPSAFE		1
    395 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    396 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    397 #else
    398 #define IXGBE_CALLOUT_FLAGS	0
    399 #define IXGBE_SOFTINFT_FLAGS	0
    400 #endif
    401 
    402 /************************************************************************
    403  * ixgbe_initialize_rss_mapping
    404  ************************************************************************/
    405 static void
    406 ixgbe_initialize_rss_mapping(struct adapter *adapter)
    407 {
    408 	struct ixgbe_hw	*hw = &adapter->hw;
    409 	u32             reta = 0, mrqc, rss_key[10];
    410 	int             queue_id, table_size, index_mult;
    411 	int             i, j;
    412 	u32             rss_hash_config;
    413 
    414 	/* force use default RSS key. */
    415 #ifdef __NetBSD__
    416 	rss_getkey((uint8_t *) &rss_key);
    417 #else
    418 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    419 		/* Fetch the configured RSS key */
    420 		rss_getkey((uint8_t *) &rss_key);
    421 	} else {
    422 		/* set up random bits */
    423 		cprng_fast(&rss_key, sizeof(rss_key));
    424 	}
    425 #endif
    426 
    427 	/* Set multiplier for RETA setup and table size based on MAC */
    428 	index_mult = 0x1;
    429 	table_size = 128;
    430 	switch (adapter->hw.mac.type) {
    431 	case ixgbe_mac_82598EB:
    432 		index_mult = 0x11;
    433 		break;
    434 	case ixgbe_mac_X550:
    435 	case ixgbe_mac_X550EM_x:
    436 	case ixgbe_mac_X550EM_a:
    437 		table_size = 512;
    438 		break;
    439 	default:
    440 		break;
    441 	}
    442 
    443 	/* Set up the redirection table */
    444 	for (i = 0, j = 0; i < table_size; i++, j++) {
    445 		if (j == adapter->num_queues)
    446 			j = 0;
    447 
    448 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    449 			/*
    450 			 * Fetch the RSS bucket id for the given indirection
    451 			 * entry. Cap it at the number of configured buckets
    452 			 * (which is num_queues.)
    453 			 */
    454 			queue_id = rss_get_indirection_to_bucket(i);
    455 			queue_id = queue_id % adapter->num_queues;
    456 		} else
    457 			queue_id = (j * index_mult);
    458 
    459 		/*
    460 		 * The low 8 bits are for hash value (n+0);
    461 		 * The next 8 bits are for hash value (n+1), etc.
    462 		 */
    463 		reta = reta >> 8;
    464 		reta = reta | (((uint32_t) queue_id) << 24);
    465 		if ((i & 3) == 3) {
    466 			if (i < 128)
    467 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
    468 			else
    469 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
    470 				    reta);
    471 			reta = 0;
    472 		}
    473 	}
    474 
    475 	/* Now fill our hash function seeds */
    476 	for (i = 0; i < 10; i++)
    477 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
    478 
    479 	/* Perform hash on these packet types */
    480 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
    481 		rss_hash_config = rss_gethashconfig();
    482 	else {
    483 		/*
    484 		 * Disable UDP - IP fragments aren't currently being handled
    485 		 * and so we end up with a mix of 2-tuple and 4-tuple
    486 		 * traffic.
    487 		 */
    488 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
    489 		                | RSS_HASHTYPE_RSS_TCP_IPV4
    490 		                | RSS_HASHTYPE_RSS_IPV6
    491 		                | RSS_HASHTYPE_RSS_TCP_IPV6
    492 		                | RSS_HASHTYPE_RSS_IPV6_EX
    493 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
    494 	}
    495 
    496 	mrqc = IXGBE_MRQC_RSSEN;
    497 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
    498 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
    499 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
    500 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
    501 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
    502 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
    503 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
    504 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
    505 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
    506 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
    507 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
    508 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
    509 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
    510 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
    511 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
    512 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
    513 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
    514 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
    515 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
    516 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    517 } /* ixgbe_initialize_rss_mapping */
    518 
    519 /************************************************************************
    520  * ixgbe_initialize_receive_units - Setup receive registers and features.
    521  ************************************************************************/
    522 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
    523 
    524 static void
    525 ixgbe_initialize_receive_units(struct adapter *adapter)
    526 {
    527 	struct	rx_ring	*rxr = adapter->rx_rings;
    528 	struct ixgbe_hw	*hw = &adapter->hw;
    529 	struct ifnet    *ifp = adapter->ifp;
    530 	int             i, j;
    531 	u32		bufsz, fctrl, srrctl, rxcsum;
    532 	u32		hlreg;
    533 
    534 	/*
    535 	 * Make sure receives are disabled while
    536 	 * setting up the descriptor ring
    537 	 */
    538 	ixgbe_disable_rx(hw);
    539 
    540 	/* Enable broadcasts */
    541 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
    542 	fctrl |= IXGBE_FCTRL_BAM;
    543 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
    544 		fctrl |= IXGBE_FCTRL_DPF;
    545 		fctrl |= IXGBE_FCTRL_PMCF;
    546 	}
    547 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
    548 
    549 	/* Set for Jumbo Frames? */
    550 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
    551 	if (ifp->if_mtu > ETHERMTU)
    552 		hlreg |= IXGBE_HLREG0_JUMBOEN;
    553 	else
    554 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
    555 
    556 #ifdef DEV_NETMAP
    557 	/* CRC stripping is conditional in Netmap */
    558 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
    559 	    (ifp->if_capenable & IFCAP_NETMAP) &&
    560 	    !ix_crcstrip)
    561 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
    562 	else
    563 #endif /* DEV_NETMAP */
    564 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
    565 
    566 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
    567 
    568 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
    569 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
    570 
    571 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
    572 		u64 rdba = rxr->rxdma.dma_paddr;
    573 		u32 tqsmreg, reg;
    574 		int regnum = i / 4;	/* 1 register per 4 queues */
    575 		int regshift = i % 4;	/* 4 bits per 1 queue */
    576 		j = rxr->me;
    577 
    578 		/* Setup the Base and Length of the Rx Descriptor Ring */
    579 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
    580 		    (rdba & 0x00000000ffffffffULL));
    581 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
    582 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
    583 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
    584 
    585 		/* Set up the SRRCTL register */
    586 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
    587 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
    588 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
    589 		srrctl |= bufsz;
    590 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
    591 
    592 		/* Set RQSMR (Receive Queue Statistic Mapping) register */
    593 		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
    594 		reg &= ~(0x000000ff << (regshift * 8));
    595 		reg |= i << (regshift * 8);
    596 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
    597 
    598 		/*
    599 		 * Set RQSMR (Receive Queue Statistic Mapping) register.
    600 		 * Register location for queue 0...7 are different between
    601 		 * 82598 and newer.
    602 		 */
    603 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
    604 			tqsmreg = IXGBE_TQSMR(regnum);
    605 		else
    606 			tqsmreg = IXGBE_TQSM(regnum);
    607 		reg = IXGBE_READ_REG(hw, tqsmreg);
    608 		reg &= ~(0x000000ff << (regshift * 8));
    609 		reg |= i << (regshift * 8);
    610 		IXGBE_WRITE_REG(hw, tqsmreg, reg);
    611 
    612 		/*
    613 		 * Set DROP_EN iff we have no flow control and >1 queue.
    614 		 * Note that srrctl was cleared shortly before during reset,
    615 		 * so we do not need to clear the bit, but do it just in case
    616 		 * this code is moved elsewhere.
    617 		 */
    618 		if (adapter->num_queues > 1 &&
    619 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
    620 			srrctl |= IXGBE_SRRCTL_DROP_EN;
    621 		} else {
    622 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
    623 		}
    624 
    625 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
    626 
    627 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
    628 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
    629 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
    630 
    631 		/* Set the driver rx tail address */
    632 		rxr->tail =  IXGBE_RDT(rxr->me);
    633 	}
    634 
    635 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    636 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
    637 		            | IXGBE_PSRTYPE_UDPHDR
    638 		            | IXGBE_PSRTYPE_IPV4HDR
    639 		            | IXGBE_PSRTYPE_IPV6HDR;
    640 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
    641 	}
    642 
    643 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
    644 
    645 	ixgbe_initialize_rss_mapping(adapter);
    646 
    647 	if (adapter->num_queues > 1) {
    648 		/* RSS and RX IPP Checksum are mutually exclusive */
    649 		rxcsum |= IXGBE_RXCSUM_PCSD;
    650 	}
    651 
    652 	if (ifp->if_capenable & IFCAP_RXCSUM)
    653 		rxcsum |= IXGBE_RXCSUM_PCSD;
    654 
    655 	/* This is useful for calculating UDP/IP fragment checksums */
    656 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
    657 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
    658 
    659 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
    660 
    661 	return;
    662 } /* ixgbe_initialize_receive_units */
    663 
    664 /************************************************************************
    665  * ixgbe_initialize_transmit_units - Enable transmit units.
    666  ************************************************************************/
    667 static void
    668 ixgbe_initialize_transmit_units(struct adapter *adapter)
    669 {
    670 	struct tx_ring  *txr = adapter->tx_rings;
    671 	struct ixgbe_hw	*hw = &adapter->hw;
    672 
    673 	/* Setup the Base and Length of the Tx Descriptor Ring */
    674 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    675 		u64 tdba = txr->txdma.dma_paddr;
    676 		u32 txctrl = 0;
    677 		int j = txr->me;
    678 
    679 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
    680 		    (tdba & 0x00000000ffffffffULL));
    681 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
    682 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
    683 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
    684 
    685 		/* Setup the HW Tx Head and Tail descriptor pointers */
    686 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
    687 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
    688 
    689 		/* Cache the tail address */
    690 		txr->tail = IXGBE_TDT(j);
    691 
    692 		/* Disable Head Writeback */
    693 		/*
    694 		 * Note: for X550 series devices, these registers are actually
    695 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
    696 		 * fields remain the same.
    697 		 */
    698 		switch (hw->mac.type) {
    699 		case ixgbe_mac_82598EB:
    700 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
    701 			break;
    702 		default:
    703 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
    704 			break;
    705 		}
    706 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
    707 		switch (hw->mac.type) {
    708 		case ixgbe_mac_82598EB:
    709 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
    710 			break;
    711 		default:
    712 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
    713 			break;
    714 		}
    715 
    716 	}
    717 
    718 	if (hw->mac.type != ixgbe_mac_82598EB) {
    719 		u32 dmatxctl, rttdcs;
    720 
    721 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
    722 		dmatxctl |= IXGBE_DMATXCTL_TE;
    723 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
    724 		/* Disable arbiter to set MTQC */
    725 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
    726 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
    727 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    728 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
    729 		    ixgbe_get_mtqc(adapter->iov_mode));
    730 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
    731 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    732 	}
    733 
    734 	return;
    735 } /* ixgbe_initialize_transmit_units */
    736 
    737 /************************************************************************
    738  * ixgbe_attach - Device initialization routine
    739  *
    740  *   Called when the driver is being loaded.
    741  *   Identifies the type of hardware, allocates all resources
    742  *   and initializes the hardware.
    743  *
    744  *   return 0 on success, positive on failure
    745  ************************************************************************/
    746 static void
    747 ixgbe_attach(device_t parent, device_t dev, void *aux)
    748 {
    749 	struct adapter  *adapter;
    750 	struct ixgbe_hw *hw;
    751 	int             error = -1;
    752 	u32		ctrl_ext;
    753 	u16		high, low, nvmreg;
    754 	pcireg_t	id, subid;
    755 	ixgbe_vendor_info_t *ent;
    756 	struct pci_attach_args *pa = aux;
    757 	const char *str;
    758 	char buf[256];
    759 
    760 	INIT_DEBUGOUT("ixgbe_attach: begin");
    761 
    762 	/* Allocate, clear, and link in our adapter structure */
    763 	adapter = device_private(dev);
    764 	adapter->hw.back = adapter;
    765 	adapter->dev = dev;
    766 	hw = &adapter->hw;
    767 	adapter->osdep.pc = pa->pa_pc;
    768 	adapter->osdep.tag = pa->pa_tag;
    769 	if (pci_dma64_available(pa))
    770 		adapter->osdep.dmat = pa->pa_dmat64;
    771 	else
    772 		adapter->osdep.dmat = pa->pa_dmat;
    773 	adapter->osdep.attached = false;
    774 
    775 	ent = ixgbe_lookup(pa);
    776 
    777 	KASSERT(ent != NULL);
    778 
    779 	aprint_normal(": %s, Version - %s\n",
    780 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    781 
    782 	/* Core Lock Init*/
    783 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    784 
    785 	/* Set up the timer callout */
    786 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    787 
    788 	/* Determine hardware revision */
    789 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    790 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    791 
    792 	hw->vendor_id = PCI_VENDOR(id);
    793 	hw->device_id = PCI_PRODUCT(id);
    794 	hw->revision_id =
    795 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    796 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    797 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    798 
    799 	/*
    800 	 * Make sure BUSMASTER is set
    801 	 */
    802 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    803 
    804 	/* Do base PCI setup - map BAR0 */
    805 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    806 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    807 		error = ENXIO;
    808 		goto err_out;
    809 	}
    810 
    811 	/* let hardware know driver is loaded */
    812 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    813 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    814 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    815 
    816 	/*
    817 	 * Initialize the shared code
    818 	 */
    819 	if (ixgbe_init_shared_code(hw)) {
    820 		aprint_error_dev(dev, "Unable to initialize the shared code\n");
    821 		error = ENXIO;
    822 		goto err_out;
    823 	}
    824 
    825 	switch (hw->mac.type) {
    826 	case ixgbe_mac_82598EB:
    827 		str = "82598EB";
    828 		break;
    829 	case ixgbe_mac_82599EB:
    830 		str = "82599EB";
    831 		break;
    832 	case ixgbe_mac_X540:
    833 		str = "X540";
    834 		break;
    835 	case ixgbe_mac_X550:
    836 		str = "X550";
    837 		break;
    838 	case ixgbe_mac_X550EM_x:
    839 		str = "X550EM";
    840 		break;
    841 	case ixgbe_mac_X550EM_a:
    842 		str = "X550EM A";
    843 		break;
    844 	default:
    845 		str = "Unknown";
    846 		break;
    847 	}
    848 	aprint_normal_dev(dev, "device %s\n", str);
    849 
    850 	if (hw->mbx.ops.init_params)
    851 		hw->mbx.ops.init_params(hw);
    852 
    853 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
    854 
    855 	/* Pick up the 82599 settings */
    856 	if (hw->mac.type != ixgbe_mac_82598EB) {
    857 		hw->phy.smart_speed = ixgbe_smart_speed;
    858 		adapter->num_segs = IXGBE_82599_SCATTER;
    859 	} else
    860 		adapter->num_segs = IXGBE_82598_SCATTER;
    861 
    862 	hw->mac.ops.set_lan_id(hw);
    863 	ixgbe_init_device_features(adapter);
    864 
    865 	if (ixgbe_configure_interrupts(adapter)) {
    866 		error = ENXIO;
    867 		goto err_out;
    868 	}
    869 
    870 	/* Allocate multicast array memory. */
    871 	adapter->mta = malloc(sizeof(*adapter->mta) *
    872 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    873 	if (adapter->mta == NULL) {
    874 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    875 		error = ENOMEM;
    876 		goto err_out;
    877 	}
    878 
    879 	/* Enable WoL (if supported) */
    880 	ixgbe_check_wol_support(adapter);
    881 
    882 	/* Verify adapter fan is still functional (if applicable) */
    883 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
    884 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    885 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
    886 	}
    887 
    888 	/* Ensure SW/FW semaphore is free */
    889 	ixgbe_init_swfw_semaphore(hw);
    890 
    891 	/* Enable EEE power saving */
    892 	if (adapter->feat_en & IXGBE_FEATURE_EEE)
    893 		hw->mac.ops.setup_eee(hw, TRUE);
    894 
    895 	/* Set an initial default flow control value */
    896 	hw->fc.requested_mode = ixgbe_flow_control;
    897 
    898 	/* Sysctls for limiting the amount of work done in the taskqueues */
    899 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
    900 	    "max number of rx packets to process",
    901 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
    902 
    903 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
    904 	    "max number of tx packets to process",
    905 	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
    906 
    907 	/* Do descriptor calc and sanity checks */
    908 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    909 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    910 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    911 		adapter->num_tx_desc = DEFAULT_TXD;
    912 	} else
    913 		adapter->num_tx_desc = ixgbe_txd;
    914 
    915 	/*
    916 	 * With many RX rings it is easy to exceed the
    917 	 * system mbuf allocation. Tuning nmbclusters
    918 	 * can alleviate this.
    919 	 */
    920 	if (nmbclusters > 0) {
    921 		int s;
    922 		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
    923 		if (s > nmbclusters) {
    924 			aprint_error_dev(dev, "RX Descriptors exceed "
    925 			    "system mbuf max, using default instead!\n");
    926 			ixgbe_rxd = DEFAULT_RXD;
    927 		}
    928 	}
    929 
    930 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    931 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
    932 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    933 		adapter->num_rx_desc = DEFAULT_RXD;
    934 	} else
    935 		adapter->num_rx_desc = ixgbe_rxd;
    936 
    937 	/* Allocate our TX/RX Queues */
    938 	if (ixgbe_allocate_queues(adapter)) {
    939 		error = ENOMEM;
    940 		goto err_out;
    941 	}
    942 
    943 	hw->phy.reset_if_overtemp = TRUE;
    944 	error = ixgbe_reset_hw(hw);
    945 	hw->phy.reset_if_overtemp = FALSE;
    946 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    947 		/*
    948 		 * No optics in this port, set up
    949 		 * so the timer routine will probe
    950 		 * for later insertion.
    951 		 */
    952 		adapter->sfp_probe = TRUE;
    953 		error = IXGBE_SUCCESS;
    954 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    955 		aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
    956 		error = EIO;
    957 		goto err_late;
    958 	} else if (error) {
    959 		aprint_error_dev(dev, "Hardware initialization failed\n");
    960 		error = EIO;
    961 		goto err_late;
    962 	}
    963 
    964 	/* Make sure we have a good EEPROM before we read from it */
    965 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
    966 		aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
    967 		error = EIO;
    968 		goto err_late;
    969 	}
    970 
    971 	aprint_normal("%s:", device_xname(dev));
    972 	/* NVM Image Version */
    973 	switch (hw->mac.type) {
    974 	case ixgbe_mac_X540:
    975 	case ixgbe_mac_X550EM_a:
    976 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    977 		if (nvmreg == 0xffff)
    978 			break;
    979 		high = (nvmreg >> 12) & 0x0f;
    980 		low = (nvmreg >> 4) & 0xff;
    981 		id = nvmreg & 0x0f;
    982 		aprint_normal(" NVM Image Version %u.", high);
    983 		if (hw->mac.type == ixgbe_mac_X540)
    984 			str = "%x";
    985 		else
    986 			str = "%02x";
    987 		aprint_normal(str, low);
    988 		aprint_normal(" ID 0x%x,", id);
    989 		break;
    990 	case ixgbe_mac_X550EM_x:
    991 	case ixgbe_mac_X550:
    992 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    993 		if (nvmreg == 0xffff)
    994 			break;
    995 		high = (nvmreg >> 12) & 0x0f;
    996 		low = nvmreg & 0xff;
    997 		aprint_normal(" NVM Image Version %u.%02x,", high, low);
    998 		break;
    999 	default:
   1000 		break;
   1001 	}
   1002 
   1003 	/* PHY firmware revision */
   1004 	switch (hw->mac.type) {
   1005 	case ixgbe_mac_X540:
   1006 	case ixgbe_mac_X550:
   1007 		hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
   1008 		if (nvmreg == 0xffff)
   1009 			break;
   1010 		high = (nvmreg >> 12) & 0x0f;
   1011 		low = (nvmreg >> 4) & 0xff;
   1012 		id = nvmreg & 0x000f;
   1013 		aprint_normal(" PHY FW Revision %u.", high);
   1014 		if (hw->mac.type == ixgbe_mac_X540)
   1015 			str = "%x";
   1016 		else
   1017 			str = "%02x";
   1018 		aprint_normal(str, low);
   1019 		aprint_normal(" ID 0x%x,", id);
   1020 		break;
   1021 	default:
   1022 		break;
   1023 	}
   1024 
   1025 	/* NVM Map version & OEM NVM Image version */
   1026 	switch (hw->mac.type) {
   1027 	case ixgbe_mac_X550:
   1028 	case ixgbe_mac_X550EM_x:
   1029 	case ixgbe_mac_X550EM_a:
   1030 		hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
   1031 		if (nvmreg != 0xffff) {
   1032 			high = (nvmreg >> 12) & 0x0f;
   1033 			low = nvmreg & 0x00ff;
   1034 			aprint_normal(" NVM Map version %u.%02x,", high, low);
   1035 		}
   1036 		hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
   1037 		if (nvmreg != 0xffff) {
   1038 			high = (nvmreg >> 12) & 0x0f;
   1039 			low = nvmreg & 0x00ff;
   1040 			aprint_verbose(" OEM NVM Image version %u.%02x,", high,
   1041 			    low);
   1042 		}
   1043 		break;
   1044 	default:
   1045 		break;
   1046 	}
   1047 
   1048 	/* Print the ETrackID */
   1049 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
   1050 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
   1051 	aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
   1052 
   1053 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   1054 		error = ixgbe_allocate_msix(adapter, pa);
   1055 		if (error) {
   1056 			/* Free allocated queue structures first */
   1057 			ixgbe_free_transmit_structures(adapter);
   1058 			ixgbe_free_receive_structures(adapter);
   1059 			free(adapter->queues, M_DEVBUF);
   1060 
   1061 			/* Fallback to legacy interrupt */
   1062 			adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
   1063 			if (adapter->feat_cap & IXGBE_FEATURE_MSI)
   1064 				adapter->feat_en |= IXGBE_FEATURE_MSI;
   1065 			adapter->num_queues = 1;
   1066 
   1067 			/* Allocate our TX/RX Queues again */
   1068 			if (ixgbe_allocate_queues(adapter)) {
   1069 				error = ENOMEM;
   1070 				goto err_out;
   1071 			}
   1072 		}
   1073 	}
   1074 	if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
   1075 		error = ixgbe_allocate_legacy(adapter, pa);
   1076 	if (error)
   1077 		goto err_late;
   1078 
   1079 	/* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
   1080 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   1081 	    ixgbe_handle_link, adapter);
   1082 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1083 	    ixgbe_handle_mod, adapter);
   1084 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1085 	    ixgbe_handle_msf, adapter);
   1086 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1087 	    ixgbe_handle_phy, adapter);
   1088 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   1089 		adapter->fdir_si =
   1090 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1091 			ixgbe_reinit_fdir, adapter);
   1092 	if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
   1093 	    || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
   1094 	    || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
   1095 		&& (adapter->fdir_si == NULL))) {
   1096 		aprint_error_dev(dev,
   1097 		    "could not establish software interrupts ()\n");
   1098 		goto err_out;
   1099 	}
   1100 
   1101 	error = ixgbe_start_hw(hw);
   1102 	switch (error) {
   1103 	case IXGBE_ERR_EEPROM_VERSION:
   1104 		aprint_error_dev(dev, "This device is a pre-production adapter/"
   1105 		    "LOM.  Please be aware there may be issues associated "
   1106 		    "with your hardware.\nIf you are experiencing problems "
   1107 		    "please contact your Intel or hardware representative "
   1108 		    "who provided you with this hardware.\n");
   1109 		break;
   1110 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
   1111 		aprint_error_dev(dev, "Unsupported SFP+ Module\n");
   1112 		error = EIO;
   1113 		goto err_late;
   1114 	case IXGBE_ERR_SFP_NOT_PRESENT:
   1115 		aprint_error_dev(dev, "No SFP+ Module found\n");
   1116 		/* falls thru */
   1117 	default:
   1118 		break;
   1119 	}
   1120 
   1121 	/* Setup OS specific network interface */
   1122 	if (ixgbe_setup_interface(dev, adapter) != 0)
   1123 		goto err_late;
   1124 
   1125 	/*
   1126 	 *  Print PHY ID only for copper PHY. On device which has SFP(+) cage
   1127 	 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
   1128 	 */
   1129 	if (hw->phy.media_type == ixgbe_media_type_copper) {
   1130 		uint16_t id1, id2;
   1131 		int oui, model, rev;
   1132 		const char *descr;
   1133 
   1134 		id1 = hw->phy.id >> 16;
   1135 		id2 = hw->phy.id & 0xffff;
   1136 		oui = MII_OUI(id1, id2);
   1137 		model = MII_MODEL(id2);
   1138 		rev = MII_REV(id2);
   1139 		if ((descr = mii_get_descr(oui, model)) != NULL)
   1140 			aprint_normal_dev(dev,
   1141 			    "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
   1142 			    descr, oui, model, rev);
   1143 		else
   1144 			aprint_normal_dev(dev,
   1145 			    "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
   1146 			    oui, model, rev);
   1147 	}
   1148 
   1149 	/* Enable the optics for 82599 SFP+ fiber */
   1150 	ixgbe_enable_tx_laser(hw);
   1151 
   1152 	/* Enable power to the phy. */
   1153 	ixgbe_set_phy_power(hw, TRUE);
   1154 
   1155 	/* Initialize statistics */
   1156 	ixgbe_update_stats_counters(adapter);
   1157 
   1158 	/* Check PCIE slot type/speed/width */
   1159 	ixgbe_get_slot_info(adapter);
   1160 
   1161 	/*
   1162 	 * Do time init and sysctl init here, but
   1163 	 * only on the first port of a bypass adapter.
   1164 	 */
   1165 	ixgbe_bypass_init(adapter);
   1166 
   1167 	/* Set an initial dmac value */
   1168 	adapter->dmac = 0;
   1169 	/* Set initial advertised speeds (if applicable) */
   1170 	adapter->advertise = ixgbe_get_advertise(adapter);
   1171 
   1172 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   1173 		ixgbe_define_iov_schemas(dev, &error);
   1174 
   1175 	/* Add sysctls */
   1176 	ixgbe_add_device_sysctls(adapter);
   1177 	ixgbe_add_hw_stats(adapter);
   1178 
   1179 	/* For Netmap */
   1180 	adapter->init_locked = ixgbe_init_locked;
   1181 	adapter->stop_locked = ixgbe_stop;
   1182 
   1183 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1184 		ixgbe_netmap_attach(adapter);
   1185 
   1186 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
   1187 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
   1188 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
   1189 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
   1190 
   1191 	if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
   1192 		pmf_class_network_register(dev, adapter->ifp);
   1193 	else
   1194 		aprint_error_dev(dev, "couldn't establish power handler\n");
   1195 
   1196 	INIT_DEBUGOUT("ixgbe_attach: end");
   1197 	adapter->osdep.attached = true;
   1198 
   1199 	return;
   1200 
   1201 err_late:
   1202 	ixgbe_free_transmit_structures(adapter);
   1203 	ixgbe_free_receive_structures(adapter);
   1204 	free(adapter->queues, M_DEVBUF);
   1205 err_out:
   1206 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   1207 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   1208 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   1209 	ixgbe_free_softint(adapter);
   1210 	ixgbe_free_pci_resources(adapter);
   1211 	if (adapter->mta != NULL)
   1212 		free(adapter->mta, M_DEVBUF);
   1213 	IXGBE_CORE_LOCK_DESTROY(adapter);
   1214 
   1215 	return;
   1216 } /* ixgbe_attach */
   1217 
   1218 /************************************************************************
   1219  * ixgbe_check_wol_support
   1220  *
   1221  *   Checks whether the adapter's ports are capable of
   1222  *   Wake On LAN by reading the adapter's NVM.
   1223  *
   1224  *   Sets each port's hw->wol_enabled value depending
   1225  *   on the value read here.
   1226  ************************************************************************/
   1227 static void
   1228 ixgbe_check_wol_support(struct adapter *adapter)
   1229 {
   1230 	struct ixgbe_hw *hw = &adapter->hw;
   1231 	u16             dev_caps = 0;
   1232 
   1233 	/* Find out WoL support for port */
   1234 	adapter->wol_support = hw->wol_enabled = 0;
   1235 	ixgbe_get_device_caps(hw, &dev_caps);
   1236 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
   1237 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
   1238 	     hw->bus.func == 0))
   1239 		adapter->wol_support = hw->wol_enabled = 1;
   1240 
   1241 	/* Save initial wake up filter configuration */
   1242 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
   1243 
   1244 	return;
   1245 } /* ixgbe_check_wol_support */
   1246 
   1247 /************************************************************************
   1248  * ixgbe_setup_interface
   1249  *
   1250  *   Setup networking device structure and register an interface.
   1251  ************************************************************************/
   1252 static int
   1253 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   1254 {
   1255 	struct ethercom *ec = &adapter->osdep.ec;
   1256 	struct ifnet   *ifp;
   1257 	int rv;
   1258 
   1259 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   1260 
   1261 	ifp = adapter->ifp = &ec->ec_if;
   1262 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1263 	ifp->if_baudrate = IF_Gbps(10);
   1264 	ifp->if_init = ixgbe_init;
   1265 	ifp->if_stop = ixgbe_ifstop;
   1266 	ifp->if_softc = adapter;
   1267 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1268 #ifdef IXGBE_MPSAFE
   1269 	ifp->if_extflags = IFEF_MPSAFE;
   1270 #endif
   1271 	ifp->if_ioctl = ixgbe_ioctl;
   1272 #if __FreeBSD_version >= 1100045
   1273 	/* TSO parameters */
   1274 	ifp->if_hw_tsomax = 65518;
   1275 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
   1276 	ifp->if_hw_tsomaxsegsize = 2048;
   1277 #endif
   1278 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1279 #if 0
   1280 		ixgbe_start_locked = ixgbe_legacy_start_locked;
   1281 #endif
   1282 	} else {
   1283 		ifp->if_transmit = ixgbe_mq_start;
   1284 #if 0
   1285 		ixgbe_start_locked = ixgbe_mq_start_locked;
   1286 #endif
   1287 	}
   1288 	ifp->if_start = ixgbe_legacy_start;
   1289 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1290 	IFQ_SET_READY(&ifp->if_snd);
   1291 
   1292 	rv = if_initialize(ifp);
   1293 	if (rv != 0) {
   1294 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1295 		return rv;
   1296 	}
   1297 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1298 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1299 	/*
   1300 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1301 	 * used.
   1302 	 */
   1303 	if_register(ifp);
   1304 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   1305 
   1306 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1307 
   1308 	/*
   1309 	 * Tell the upper layer(s) we support long frames.
   1310 	 */
   1311 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1312 
   1313 	/* Set capability flags */
   1314 	ifp->if_capabilities |= IFCAP_RXCSUM
   1315 			     |  IFCAP_TXCSUM
   1316 			     |  IFCAP_TSOv4
   1317 			     |  IFCAP_TSOv6
   1318 			     |  IFCAP_LRO;
   1319 	ifp->if_capenable = 0;
   1320 
   1321 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1322 	    		    |  ETHERCAP_VLAN_HWCSUM
   1323 	    		    |  ETHERCAP_JUMBO_MTU
   1324 	    		    |  ETHERCAP_VLAN_MTU;
   1325 
   1326 	/* Enable the above capabilities by default */
   1327 	ec->ec_capenable = ec->ec_capabilities;
   1328 
   1329 	/*
   1330 	 * Don't turn this on by default, if vlans are
   1331 	 * created on another pseudo device (eg. lagg)
   1332 	 * then vlan events are not passed thru, breaking
   1333 	 * operation, but with HW FILTER off it works. If
   1334 	 * using vlans directly on the ixgbe driver you can
   1335 	 * enable this and get full hardware tag filtering.
   1336 	 */
   1337 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1338 
   1339 	/*
   1340 	 * Specify the media types supported by this adapter and register
   1341 	 * callbacks to update media and link information
   1342 	 */
   1343 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   1344 	    ixgbe_media_status);
   1345 
   1346 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
   1347 	ixgbe_add_media_types(adapter);
   1348 
   1349 	/* Set autoselect media by default */
   1350 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1351 
   1352 	return (0);
   1353 } /* ixgbe_setup_interface */
   1354 
   1355 /************************************************************************
   1356  * ixgbe_add_media_types
   1357  ************************************************************************/
   1358 static void
   1359 ixgbe_add_media_types(struct adapter *adapter)
   1360 {
   1361 	struct ixgbe_hw *hw = &adapter->hw;
   1362 	device_t        dev = adapter->dev;
   1363 	u64             layer;
   1364 
   1365 	layer = adapter->phy_layer;
   1366 
   1367 #define	ADD(mm, dd)							\
   1368 	ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
   1369 
   1370 	/* Media types with matching NetBSD media defines */
   1371 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
   1372 		ADD(IFM_10G_T | IFM_FDX, 0);
   1373 	}
   1374 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
   1375 		ADD(IFM_1000_T | IFM_FDX, 0);
   1376 	}
   1377 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
   1378 		ADD(IFM_100_TX | IFM_FDX, 0);
   1379 	}
   1380 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
   1381 		ADD(IFM_10_T | IFM_FDX, 0);
   1382 	}
   1383 
   1384 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   1385 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
   1386 		ADD(IFM_10G_TWINAX | IFM_FDX, 0);
   1387 	}
   1388 
   1389 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
   1390 		ADD(IFM_10G_LR | IFM_FDX, 0);
   1391 		if (hw->phy.multispeed_fiber) {
   1392 			ADD(IFM_1000_LX | IFM_FDX, 0);
   1393 		}
   1394 	}
   1395 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
   1396 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1397 		if (hw->phy.multispeed_fiber) {
   1398 			ADD(IFM_1000_SX | IFM_FDX, 0);
   1399 		}
   1400 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
   1401 		ADD(IFM_1000_SX | IFM_FDX, 0);
   1402 	}
   1403 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
   1404 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1405 	}
   1406 
   1407 #ifdef IFM_ETH_XTYPE
   1408 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1409 		ADD(IFM_10G_KR | IFM_FDX, 0);
   1410 	}
   1411 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1412 		ADD(AIFM_10G_KX4 | IFM_FDX, 0);
   1413 	}
   1414 #else
   1415 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1416 		device_printf(dev, "Media supported: 10GbaseKR\n");
   1417 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
   1418 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1419 	}
   1420 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1421 		device_printf(dev, "Media supported: 10GbaseKX4\n");
   1422 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
   1423 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1424 	}
   1425 #endif
   1426 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
   1427 		ADD(IFM_1000_KX | IFM_FDX, 0);
   1428 	}
   1429 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
   1430 		ADD(IFM_2500_KX | IFM_FDX, 0);
   1431 	}
   1432 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
   1433 		ADD(IFM_2500_T | IFM_FDX, 0);
   1434 	}
   1435 	if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
   1436 		ADD(IFM_5000_T | IFM_FDX, 0);
   1437 	}
   1438 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
   1439 		device_printf(dev, "Media supported: 1000baseBX\n");
   1440 	/* XXX no ifmedia_set? */
   1441 
   1442 	ADD(IFM_AUTO, 0);
   1443 
   1444 #undef ADD
   1445 } /* ixgbe_add_media_types */
   1446 
   1447 /************************************************************************
   1448  * ixgbe_is_sfp
   1449  ************************************************************************/
   1450 static inline bool
   1451 ixgbe_is_sfp(struct ixgbe_hw *hw)
   1452 {
   1453 	switch (hw->mac.type) {
   1454 	case ixgbe_mac_82598EB:
   1455 		if (hw->phy.type == ixgbe_phy_nl)
   1456 			return TRUE;
   1457 		return FALSE;
   1458 	case ixgbe_mac_82599EB:
   1459 		switch (hw->mac.ops.get_media_type(hw)) {
   1460 		case ixgbe_media_type_fiber:
   1461 		case ixgbe_media_type_fiber_qsfp:
   1462 			return TRUE;
   1463 		default:
   1464 			return FALSE;
   1465 		}
   1466 	case ixgbe_mac_X550EM_x:
   1467 	case ixgbe_mac_X550EM_a:
   1468 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
   1469 			return TRUE;
   1470 		return FALSE;
   1471 	default:
   1472 		return FALSE;
   1473 	}
   1474 } /* ixgbe_is_sfp */
   1475 
   1476 /************************************************************************
   1477  * ixgbe_config_link
   1478  ************************************************************************/
   1479 static void
   1480 ixgbe_config_link(struct adapter *adapter)
   1481 {
   1482 	struct ixgbe_hw *hw = &adapter->hw;
   1483 	u32             autoneg, err = 0;
   1484 	bool            sfp, negotiate = false;
   1485 
   1486 	sfp = ixgbe_is_sfp(hw);
   1487 
   1488 	if (sfp) {
   1489 		if (hw->phy.multispeed_fiber) {
   1490 			hw->mac.ops.setup_sfp(hw);
   1491 			ixgbe_enable_tx_laser(hw);
   1492 			kpreempt_disable();
   1493 			softint_schedule(adapter->msf_si);
   1494 			kpreempt_enable();
   1495 		} else {
   1496 			kpreempt_disable();
   1497 			softint_schedule(adapter->mod_si);
   1498 			kpreempt_enable();
   1499 		}
   1500 	} else {
   1501 		if (hw->mac.ops.check_link)
   1502 			err = ixgbe_check_link(hw, &adapter->link_speed,
   1503 			    &adapter->link_up, FALSE);
   1504 		if (err)
   1505 			goto out;
   1506 		autoneg = hw->phy.autoneg_advertised;
   1507 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   1508                 	err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
   1509 			    &negotiate);
   1510 		if (err)
   1511 			goto out;
   1512 		if (hw->mac.ops.setup_link)
   1513                 	err = hw->mac.ops.setup_link(hw, autoneg,
   1514 			    adapter->link_up);
   1515 	}
   1516 out:
   1517 
   1518 	return;
   1519 } /* ixgbe_config_link */
   1520 
   1521 /************************************************************************
   1522  * ixgbe_update_stats_counters - Update board statistics counters.
   1523  ************************************************************************/
   1524 static void
   1525 ixgbe_update_stats_counters(struct adapter *adapter)
   1526 {
   1527 	struct ifnet          *ifp = adapter->ifp;
   1528 	struct ixgbe_hw       *hw = &adapter->hw;
   1529 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1530 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
   1531 	u64                   total_missed_rx = 0;
   1532 	uint64_t              crcerrs, rlec;
   1533 
   1534 	crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   1535 	stats->crcerrs.ev_count += crcerrs;
   1536 	stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   1537 	stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   1538 	stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   1539 	if (hw->mac.type == ixgbe_mac_X550)
   1540 		stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
   1541 
   1542 	for (int i = 0; i < __arraycount(stats->qprc); i++) {
   1543 		int j = i % adapter->num_queues;
   1544 		stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   1545 		stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   1546 		stats->qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   1547 	}
   1548 	for (int i = 0; i < __arraycount(stats->mpc); i++) {
   1549 		uint32_t mp;
   1550 		int j = i % adapter->num_queues;
   1551 
   1552 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   1553 		/* global total per queue */
   1554 		stats->mpc[j].ev_count += mp;
   1555 		/* running comprehensive total for stats display */
   1556 		total_missed_rx += mp;
   1557 
   1558 		if (hw->mac.type == ixgbe_mac_82598EB)
   1559 			stats->rnbc[j].ev_count
   1560 			    += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   1561 
   1562 	}
   1563 	stats->mpctotal.ev_count += total_missed_rx;
   1564 
   1565 	/* Document says M[LR]FC are valid when link is up and 10Gbps */
   1566 	if ((adapter->link_active == TRUE)
   1567 	    && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
   1568 		stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   1569 		stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   1570 	}
   1571 	rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
   1572 	stats->rlec.ev_count += rlec;
   1573 
   1574 	/* Hardware workaround, gprc counts missed packets */
   1575 	stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   1576 
   1577 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   1578 	stats->lxontxc.ev_count += lxon;
   1579 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   1580 	stats->lxofftxc.ev_count += lxoff;
   1581 	total = lxon + lxoff;
   1582 
   1583 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1584 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   1585 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   1586 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   1587 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   1588 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   1589 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   1590 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   1591 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   1592 	} else {
   1593 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   1594 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   1595 		/* 82598 only has a counter in the high register */
   1596 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   1597 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   1598 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   1599 	}
   1600 
   1601 	/*
   1602 	 * Workaround: mprc hardware is incorrectly counting
   1603 	 * broadcasts, so for now we subtract those.
   1604 	 */
   1605 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   1606 	stats->bprc.ev_count += bprc;
   1607 	stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
   1608 	    - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   1609 
   1610 	stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   1611 	stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   1612 	stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   1613 	stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   1614 	stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   1615 	stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   1616 
   1617 	stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   1618 	stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   1619 	stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   1620 
   1621 	stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   1622 	stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   1623 	stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   1624 	stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   1625 	stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   1626 	stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   1627 	stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   1628 	stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   1629 	stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   1630 	stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   1631 	stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   1632 	stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   1633 	stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   1634 	stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   1635 	stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   1636 	stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   1637 	stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   1638 	stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   1639 	/* Only read FCOE on 82599 */
   1640 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1641 		stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   1642 		stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   1643 		stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   1644 		stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   1645 		stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   1646 	}
   1647 
   1648 	/* Fill out the OS statistics structure */
   1649 	/*
   1650 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   1651 	 * adapter->stats counters. It's required to make ifconfig -z
   1652 	 * (SOICZIFDATA) work.
   1653 	 */
   1654 	ifp->if_collisions = 0;
   1655 
   1656 	/* Rx Errors */
   1657 	ifp->if_iqdrops += total_missed_rx;
   1658 	ifp->if_ierrors += crcerrs + rlec;
   1659 } /* ixgbe_update_stats_counters */
   1660 
   1661 /************************************************************************
   1662  * ixgbe_add_hw_stats
   1663  *
   1664  *   Add sysctl variables, one per statistic, to the system.
   1665  ************************************************************************/
   1666 static void
   1667 ixgbe_add_hw_stats(struct adapter *adapter)
   1668 {
   1669 	device_t dev = adapter->dev;
   1670 	const struct sysctlnode *rnode, *cnode;
   1671 	struct sysctllog **log = &adapter->sysctllog;
   1672 	struct tx_ring *txr = adapter->tx_rings;
   1673 	struct rx_ring *rxr = adapter->rx_rings;
   1674 	struct ixgbe_hw *hw = &adapter->hw;
   1675 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1676 	const char *xname = device_xname(dev);
   1677 
   1678 	/* Driver Statistics */
   1679 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   1680 	    NULL, xname, "Handled queue in softint");
   1681 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   1682 	    NULL, xname, "Requeued in softint");
   1683 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   1684 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   1685 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   1686 	    NULL, xname, "m_defrag() failed");
   1687 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   1688 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   1689 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   1690 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   1691 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   1692 	    NULL, xname, "Driver tx dma hard fail other");
   1693 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   1694 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   1695 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   1696 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   1697 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   1698 	    NULL, xname, "Watchdog timeouts");
   1699 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   1700 	    NULL, xname, "TSO errors");
   1701 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   1702 	    NULL, xname, "Link MSI-X IRQ Handled");
   1703 
   1704 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1705 		snprintf(adapter->queues[i].evnamebuf,
   1706 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   1707 		    xname, i);
   1708 		snprintf(adapter->queues[i].namebuf,
   1709 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   1710 
   1711 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   1712 			aprint_error_dev(dev, "could not create sysctl root\n");
   1713 			break;
   1714 		}
   1715 
   1716 		if (sysctl_createv(log, 0, &rnode, &rnode,
   1717 		    0, CTLTYPE_NODE,
   1718 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   1719 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   1720 			break;
   1721 
   1722 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1723 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   1724 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   1725 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   1726 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   1727 			break;
   1728 
   1729 #if 0 /* XXX msaitoh */
   1730 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1731 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   1732 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   1733 			NULL, 0, &(adapter->queues[i].irqs),
   1734 		    0, CTL_CREATE, CTL_EOL) != 0)
   1735 			break;
   1736 #endif
   1737 
   1738 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1739 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1740 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   1741 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   1742 		    0, CTL_CREATE, CTL_EOL) != 0)
   1743 			break;
   1744 
   1745 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1746 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1747 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   1748 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   1749 		    0, CTL_CREATE, CTL_EOL) != 0)
   1750 			break;
   1751 
   1752 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   1753 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   1754 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   1755 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   1756 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   1757 		    NULL, adapter->queues[i].evnamebuf,
   1758 		    "Queue No Descriptor Available");
   1759 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   1760 		    NULL, adapter->queues[i].evnamebuf,
   1761 		    "Queue Packets Transmitted");
   1762 #ifndef IXGBE_LEGACY_TX
   1763 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   1764 		    NULL, adapter->queues[i].evnamebuf,
   1765 		    "Packets dropped in pcq");
   1766 #endif
   1767 
   1768 #ifdef LRO
   1769 		struct lro_ctrl *lro = &rxr->lro;
   1770 #endif /* LRO */
   1771 
   1772 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1773 		    CTLFLAG_READONLY,
   1774 		    CTLTYPE_INT,
   1775 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   1776 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   1777 		    CTL_CREATE, CTL_EOL) != 0)
   1778 			break;
   1779 
   1780 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1781 		    CTLFLAG_READONLY,
   1782 		    CTLTYPE_INT,
   1783 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   1784 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   1785 		    CTL_CREATE, CTL_EOL) != 0)
   1786 			break;
   1787 
   1788 		if (i < __arraycount(stats->mpc)) {
   1789 			evcnt_attach_dynamic(&stats->mpc[i],
   1790 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1791 			    "RX Missed Packet Count");
   1792 			if (hw->mac.type == ixgbe_mac_82598EB)
   1793 				evcnt_attach_dynamic(&stats->rnbc[i],
   1794 				    EVCNT_TYPE_MISC, NULL,
   1795 				    adapter->queues[i].evnamebuf,
   1796 				    "Receive No Buffers");
   1797 		}
   1798 		if (i < __arraycount(stats->pxontxc)) {
   1799 			evcnt_attach_dynamic(&stats->pxontxc[i],
   1800 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1801 			    "pxontxc");
   1802 			evcnt_attach_dynamic(&stats->pxonrxc[i],
   1803 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1804 			    "pxonrxc");
   1805 			evcnt_attach_dynamic(&stats->pxofftxc[i],
   1806 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1807 			    "pxofftxc");
   1808 			evcnt_attach_dynamic(&stats->pxoffrxc[i],
   1809 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1810 			    "pxoffrxc");
   1811 			evcnt_attach_dynamic(&stats->pxon2offc[i],
   1812 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1813 			    "pxon2offc");
   1814 		}
   1815 		if (i < __arraycount(stats->qprc)) {
   1816 			evcnt_attach_dynamic(&stats->qprc[i],
   1817 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1818 			    "qprc");
   1819 			evcnt_attach_dynamic(&stats->qptc[i],
   1820 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1821 			    "qptc");
   1822 			evcnt_attach_dynamic(&stats->qbrc[i],
   1823 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1824 			    "qbrc");
   1825 			evcnt_attach_dynamic(&stats->qbtc[i],
   1826 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1827 			    "qbtc");
   1828 			evcnt_attach_dynamic(&stats->qprdc[i],
   1829 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1830 			    "qprdc");
   1831 		}
   1832 
   1833 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   1834 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   1835 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   1836 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   1837 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   1838 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   1839 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   1840 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   1841 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   1842 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   1843 #ifdef LRO
   1844 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   1845 				CTLFLAG_RD, &lro->lro_queued, 0,
   1846 				"LRO Queued");
   1847 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   1848 				CTLFLAG_RD, &lro->lro_flushed, 0,
   1849 				"LRO Flushed");
   1850 #endif /* LRO */
   1851 	}
   1852 
   1853 	/* MAC stats get their own sub node */
   1854 
   1855 	snprintf(stats->namebuf,
   1856 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   1857 
   1858 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   1859 	    stats->namebuf, "rx csum offload - IP");
   1860 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   1861 	    stats->namebuf, "rx csum offload - L4");
   1862 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   1863 	    stats->namebuf, "rx csum offload - IP bad");
   1864 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   1865 	    stats->namebuf, "rx csum offload - L4 bad");
   1866 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   1867 	    stats->namebuf, "Interrupt conditions zero");
   1868 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   1869 	    stats->namebuf, "Legacy interrupts");
   1870 
   1871 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   1872 	    stats->namebuf, "CRC Errors");
   1873 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   1874 	    stats->namebuf, "Illegal Byte Errors");
   1875 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   1876 	    stats->namebuf, "Byte Errors");
   1877 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   1878 	    stats->namebuf, "MAC Short Packets Discarded");
   1879 	if (hw->mac.type >= ixgbe_mac_X550)
   1880 		evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
   1881 		    stats->namebuf, "Bad SFD");
   1882 	evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
   1883 	    stats->namebuf, "Total Packets Missed");
   1884 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   1885 	    stats->namebuf, "MAC Local Faults");
   1886 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   1887 	    stats->namebuf, "MAC Remote Faults");
   1888 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   1889 	    stats->namebuf, "Receive Length Errors");
   1890 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   1891 	    stats->namebuf, "Link XON Transmitted");
   1892 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   1893 	    stats->namebuf, "Link XON Received");
   1894 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   1895 	    stats->namebuf, "Link XOFF Transmitted");
   1896 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   1897 	    stats->namebuf, "Link XOFF Received");
   1898 
   1899 	/* Packet Reception Stats */
   1900 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   1901 	    stats->namebuf, "Total Octets Received");
   1902 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   1903 	    stats->namebuf, "Good Octets Received");
   1904 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   1905 	    stats->namebuf, "Total Packets Received");
   1906 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   1907 	    stats->namebuf, "Good Packets Received");
   1908 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   1909 	    stats->namebuf, "Multicast Packets Received");
   1910 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   1911 	    stats->namebuf, "Broadcast Packets Received");
   1912 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   1913 	    stats->namebuf, "64 byte frames received ");
   1914 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   1915 	    stats->namebuf, "65-127 byte frames received");
   1916 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   1917 	    stats->namebuf, "128-255 byte frames received");
   1918 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   1919 	    stats->namebuf, "256-511 byte frames received");
   1920 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   1921 	    stats->namebuf, "512-1023 byte frames received");
   1922 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   1923 	    stats->namebuf, "1023-1522 byte frames received");
   1924 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   1925 	    stats->namebuf, "Receive Undersized");
   1926 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   1927 	    stats->namebuf, "Fragmented Packets Received ");
   1928 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   1929 	    stats->namebuf, "Oversized Packets Received");
   1930 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   1931 	    stats->namebuf, "Received Jabber");
   1932 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   1933 	    stats->namebuf, "Management Packets Received");
   1934 	evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
   1935 	    stats->namebuf, "Management Packets Dropped");
   1936 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   1937 	    stats->namebuf, "Checksum Errors");
   1938 
   1939 	/* Packet Transmission Stats */
   1940 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   1941 	    stats->namebuf, "Good Octets Transmitted");
   1942 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   1943 	    stats->namebuf, "Total Packets Transmitted");
   1944 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   1945 	    stats->namebuf, "Good Packets Transmitted");
   1946 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   1947 	    stats->namebuf, "Broadcast Packets Transmitted");
   1948 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   1949 	    stats->namebuf, "Multicast Packets Transmitted");
   1950 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   1951 	    stats->namebuf, "Management Packets Transmitted");
   1952 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   1953 	    stats->namebuf, "64 byte frames transmitted ");
   1954 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   1955 	    stats->namebuf, "65-127 byte frames transmitted");
   1956 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   1957 	    stats->namebuf, "128-255 byte frames transmitted");
   1958 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   1959 	    stats->namebuf, "256-511 byte frames transmitted");
   1960 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   1961 	    stats->namebuf, "512-1023 byte frames transmitted");
   1962 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   1963 	    stats->namebuf, "1024-1522 byte frames transmitted");
   1964 } /* ixgbe_add_hw_stats */
   1965 
   1966 static void
   1967 ixgbe_clear_evcnt(struct adapter *adapter)
   1968 {
   1969 	struct tx_ring *txr = adapter->tx_rings;
   1970 	struct rx_ring *rxr = adapter->rx_rings;
   1971 	struct ixgbe_hw *hw = &adapter->hw;
   1972 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1973 
   1974 	adapter->handleq.ev_count = 0;
   1975 	adapter->req.ev_count = 0;
   1976 	adapter->efbig_tx_dma_setup.ev_count = 0;
   1977 	adapter->mbuf_defrag_failed.ev_count = 0;
   1978 	adapter->efbig2_tx_dma_setup.ev_count = 0;
   1979 	adapter->einval_tx_dma_setup.ev_count = 0;
   1980 	adapter->other_tx_dma_setup.ev_count = 0;
   1981 	adapter->eagain_tx_dma_setup.ev_count = 0;
   1982 	adapter->enomem_tx_dma_setup.ev_count = 0;
   1983 	adapter->watchdog_events.ev_count = 0;
   1984 	adapter->tso_err.ev_count = 0;
   1985 	adapter->link_irq.ev_count = 0;
   1986 
   1987 	txr = adapter->tx_rings;
   1988 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1989 		adapter->queues[i].irqs.ev_count = 0;
   1990 		txr->no_desc_avail.ev_count = 0;
   1991 		txr->total_packets.ev_count = 0;
   1992 		txr->tso_tx.ev_count = 0;
   1993 #ifndef IXGBE_LEGACY_TX
   1994 		txr->pcq_drops.ev_count = 0;
   1995 #endif
   1996 
   1997 		if (i < __arraycount(stats->mpc)) {
   1998 			stats->mpc[i].ev_count = 0;
   1999 			if (hw->mac.type == ixgbe_mac_82598EB)
   2000 				stats->rnbc[i].ev_count = 0;
   2001 		}
   2002 		if (i < __arraycount(stats->pxontxc)) {
   2003 			stats->pxontxc[i].ev_count = 0;
   2004 			stats->pxonrxc[i].ev_count = 0;
   2005 			stats->pxofftxc[i].ev_count = 0;
   2006 			stats->pxoffrxc[i].ev_count = 0;
   2007 			stats->pxon2offc[i].ev_count = 0;
   2008 		}
   2009 		if (i < __arraycount(stats->qprc)) {
   2010 			stats->qprc[i].ev_count = 0;
   2011 			stats->qptc[i].ev_count = 0;
   2012 			stats->qbrc[i].ev_count = 0;
   2013 			stats->qbtc[i].ev_count = 0;
   2014 			stats->qprdc[i].ev_count = 0;
   2015 		}
   2016 
   2017 		rxr->rx_packets.ev_count = 0;
   2018 		rxr->rx_bytes.ev_count = 0;
   2019 		rxr->rx_copies.ev_count = 0;
   2020 		rxr->no_jmbuf.ev_count = 0;
   2021 		rxr->rx_discarded.ev_count = 0;
   2022 	}
   2023 	stats->ipcs.ev_count = 0;
   2024 	stats->l4cs.ev_count = 0;
   2025 	stats->ipcs_bad.ev_count = 0;
   2026 	stats->l4cs_bad.ev_count = 0;
   2027 	stats->intzero.ev_count = 0;
   2028 	stats->legint.ev_count = 0;
   2029 	stats->crcerrs.ev_count = 0;
   2030 	stats->illerrc.ev_count = 0;
   2031 	stats->errbc.ev_count = 0;
   2032 	stats->mspdc.ev_count = 0;
   2033 	stats->mbsdc.ev_count = 0;
   2034 	stats->mpctotal.ev_count = 0;
   2035 	stats->mlfc.ev_count = 0;
   2036 	stats->mrfc.ev_count = 0;
   2037 	stats->rlec.ev_count = 0;
   2038 	stats->lxontxc.ev_count = 0;
   2039 	stats->lxonrxc.ev_count = 0;
   2040 	stats->lxofftxc.ev_count = 0;
   2041 	stats->lxoffrxc.ev_count = 0;
   2042 
   2043 	/* Packet Reception Stats */
   2044 	stats->tor.ev_count = 0;
   2045 	stats->gorc.ev_count = 0;
   2046 	stats->tpr.ev_count = 0;
   2047 	stats->gprc.ev_count = 0;
   2048 	stats->mprc.ev_count = 0;
   2049 	stats->bprc.ev_count = 0;
   2050 	stats->prc64.ev_count = 0;
   2051 	stats->prc127.ev_count = 0;
   2052 	stats->prc255.ev_count = 0;
   2053 	stats->prc511.ev_count = 0;
   2054 	stats->prc1023.ev_count = 0;
   2055 	stats->prc1522.ev_count = 0;
   2056 	stats->ruc.ev_count = 0;
   2057 	stats->rfc.ev_count = 0;
   2058 	stats->roc.ev_count = 0;
   2059 	stats->rjc.ev_count = 0;
   2060 	stats->mngprc.ev_count = 0;
   2061 	stats->mngpdc.ev_count = 0;
   2062 	stats->xec.ev_count = 0;
   2063 
   2064 	/* Packet Transmission Stats */
   2065 	stats->gotc.ev_count = 0;
   2066 	stats->tpt.ev_count = 0;
   2067 	stats->gptc.ev_count = 0;
   2068 	stats->bptc.ev_count = 0;
   2069 	stats->mptc.ev_count = 0;
   2070 	stats->mngptc.ev_count = 0;
   2071 	stats->ptc64.ev_count = 0;
   2072 	stats->ptc127.ev_count = 0;
   2073 	stats->ptc255.ev_count = 0;
   2074 	stats->ptc511.ev_count = 0;
   2075 	stats->ptc1023.ev_count = 0;
   2076 	stats->ptc1522.ev_count = 0;
   2077 }
   2078 
   2079 /************************************************************************
   2080  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
   2081  *
   2082  *   Retrieves the TDH value from the hardware
   2083  ************************************************************************/
   2084 static int
   2085 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   2086 {
   2087 	struct sysctlnode node = *rnode;
   2088 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2089 	uint32_t val;
   2090 
   2091 	if (!txr)
   2092 		return (0);
   2093 
   2094 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
   2095 	node.sysctl_data = &val;
   2096 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2097 } /* ixgbe_sysctl_tdh_handler */
   2098 
   2099 /************************************************************************
   2100  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   2101  *
   2102  *   Retrieves the TDT value from the hardware
   2103  ************************************************************************/
   2104 static int
   2105 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   2106 {
   2107 	struct sysctlnode node = *rnode;
   2108 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2109 	uint32_t val;
   2110 
   2111 	if (!txr)
   2112 		return (0);
   2113 
   2114 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
   2115 	node.sysctl_data = &val;
   2116 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2117 } /* ixgbe_sysctl_tdt_handler */
   2118 
   2119 /************************************************************************
   2120  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
   2121  *
   2122  *   Retrieves the RDH value from the hardware
   2123  ************************************************************************/
   2124 static int
   2125 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2126 {
   2127 	struct sysctlnode node = *rnode;
   2128 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2129 	uint32_t val;
   2130 
   2131 	if (!rxr)
   2132 		return (0);
   2133 
   2134 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
   2135 	node.sysctl_data = &val;
   2136 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2137 } /* ixgbe_sysctl_rdh_handler */
   2138 
   2139 /************************************************************************
   2140  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2141  *
   2142  *   Retrieves the RDT value from the hardware
   2143  ************************************************************************/
   2144 static int
   2145 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2146 {
   2147 	struct sysctlnode node = *rnode;
   2148 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2149 	uint32_t val;
   2150 
   2151 	if (!rxr)
   2152 		return (0);
   2153 
   2154 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
   2155 	node.sysctl_data = &val;
   2156 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2157 } /* ixgbe_sysctl_rdt_handler */
   2158 
   2159 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   2160 /************************************************************************
   2161  * ixgbe_register_vlan
   2162  *
   2163  *   Run via vlan config EVENT, it enables us to use the
   2164  *   HW Filter table since we can get the vlan id. This
   2165  *   just creates the entry in the soft version of the
   2166  *   VFTA, init will repopulate the real table.
   2167  ************************************************************************/
   2168 static void
   2169 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2170 {
   2171 	struct adapter	*adapter = ifp->if_softc;
   2172 	u16		index, bit;
   2173 
   2174 	if (ifp->if_softc != arg)   /* Not our event */
   2175 		return;
   2176 
   2177 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2178 		return;
   2179 
   2180 	IXGBE_CORE_LOCK(adapter);
   2181 	index = (vtag >> 5) & 0x7F;
   2182 	bit = vtag & 0x1F;
   2183 	adapter->shadow_vfta[index] |= (1 << bit);
   2184 	ixgbe_setup_vlan_hw_support(adapter);
   2185 	IXGBE_CORE_UNLOCK(adapter);
   2186 } /* ixgbe_register_vlan */
   2187 
   2188 /************************************************************************
   2189  * ixgbe_unregister_vlan
   2190  *
   2191  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
   2192  ************************************************************************/
   2193 static void
   2194 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2195 {
   2196 	struct adapter	*adapter = ifp->if_softc;
   2197 	u16		index, bit;
   2198 
   2199 	if (ifp->if_softc != arg)
   2200 		return;
   2201 
   2202 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2203 		return;
   2204 
   2205 	IXGBE_CORE_LOCK(adapter);
   2206 	index = (vtag >> 5) & 0x7F;
   2207 	bit = vtag & 0x1F;
   2208 	adapter->shadow_vfta[index] &= ~(1 << bit);
   2209 	/* Re-init to load the changes */
   2210 	ixgbe_setup_vlan_hw_support(adapter);
   2211 	IXGBE_CORE_UNLOCK(adapter);
   2212 } /* ixgbe_unregister_vlan */
   2213 #endif
   2214 
   2215 static void
   2216 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   2217 {
   2218 	struct ethercom *ec = &adapter->osdep.ec;
   2219 	struct ixgbe_hw *hw = &adapter->hw;
   2220 	struct rx_ring	*rxr;
   2221 	int             i;
   2222 	u32		ctrl;
   2223 
   2224 
   2225 	/*
   2226 	 * We get here thru init_locked, meaning
   2227 	 * a soft reset, this has already cleared
   2228 	 * the VFTA and other state, so if there
   2229 	 * have been no vlan's registered do nothing.
   2230 	 */
   2231 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   2232 		return;
   2233 
   2234 	/* Setup the queues for vlans */
   2235 	if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
   2236 		for (i = 0; i < adapter->num_queues; i++) {
   2237 			rxr = &adapter->rx_rings[i];
   2238 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
   2239 			if (hw->mac.type != ixgbe_mac_82598EB) {
   2240 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   2241 				ctrl |= IXGBE_RXDCTL_VME;
   2242 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
   2243 			}
   2244 			rxr->vtag_strip = TRUE;
   2245 		}
   2246 	}
   2247 
   2248 	if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
   2249 		return;
   2250 	/*
   2251 	 * A soft reset zero's out the VFTA, so
   2252 	 * we need to repopulate it now.
   2253 	 */
   2254 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2255 		if (adapter->shadow_vfta[i] != 0)
   2256 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
   2257 			    adapter->shadow_vfta[i]);
   2258 
   2259 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2260 	/* Enable the Filter Table if enabled */
   2261 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
   2262 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
   2263 		ctrl |= IXGBE_VLNCTRL_VFE;
   2264 	}
   2265 	if (hw->mac.type == ixgbe_mac_82598EB)
   2266 		ctrl |= IXGBE_VLNCTRL_VME;
   2267 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2268 } /* ixgbe_setup_vlan_hw_support */
   2269 
   2270 /************************************************************************
   2271  * ixgbe_get_slot_info
   2272  *
   2273  *   Get the width and transaction speed of
   2274  *   the slot this adapter is plugged into.
   2275  ************************************************************************/
   2276 static void
   2277 ixgbe_get_slot_info(struct adapter *adapter)
   2278 {
   2279 	device_t		dev = adapter->dev;
   2280 	struct ixgbe_hw		*hw = &adapter->hw;
   2281 	u32                   offset;
   2282 //	struct ixgbe_mac_info	*mac = &hw->mac;
   2283 	u16			link;
   2284 	int                   bus_info_valid = TRUE;
   2285 
   2286 	/* Some devices are behind an internal bridge */
   2287 	switch (hw->device_id) {
   2288 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
   2289 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
   2290 		goto get_parent_info;
   2291 	default:
   2292 		break;
   2293 	}
   2294 
   2295 	ixgbe_get_bus_info(hw);
   2296 
   2297 	/*
   2298 	 * Some devices don't use PCI-E, but there is no need
   2299 	 * to display "Unknown" for bus speed and width.
   2300 	 */
   2301 	switch (hw->mac.type) {
   2302 	case ixgbe_mac_X550EM_x:
   2303 	case ixgbe_mac_X550EM_a:
   2304 		return;
   2305 	default:
   2306 		goto display;
   2307 	}
   2308 
   2309 get_parent_info:
   2310 	/*
   2311 	 * For the Quad port adapter we need to parse back
   2312 	 * up the PCI tree to find the speed of the expansion
   2313 	 * slot into which this adapter is plugged. A bit more work.
   2314 	 */
   2315 	dev = device_parent(device_parent(dev));
   2316 #if 0
   2317 #ifdef IXGBE_DEBUG
   2318 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
   2319 	    pci_get_slot(dev), pci_get_function(dev));
   2320 #endif
   2321 	dev = device_parent(device_parent(dev));
   2322 #ifdef IXGBE_DEBUG
   2323 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
   2324 	    pci_get_slot(dev), pci_get_function(dev));
   2325 #endif
   2326 #endif
   2327 	/* Now get the PCI Express Capabilities offset */
   2328 	if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
   2329 	    PCI_CAP_PCIEXPRESS, &offset, NULL)) {
   2330 		/*
   2331 		 * Hmm...can't get PCI-Express capabilities.
   2332 		 * Falling back to default method.
   2333 		 */
   2334 		bus_info_valid = FALSE;
   2335 		ixgbe_get_bus_info(hw);
   2336 		goto display;
   2337 	}
   2338 	/* ...and read the Link Status Register */
   2339 	link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
   2340 	    offset + PCIE_LCSR) >> 16;
   2341 	ixgbe_set_pci_config_data_generic(hw, link);
   2342 
   2343 display:
   2344 	device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
   2345 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s" :
   2346 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s" :
   2347 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s" :
   2348 	     "Unknown"),
   2349 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
   2350 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
   2351 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
   2352 	     "Unknown"));
   2353 
   2354 	if (bus_info_valid) {
   2355 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2356 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
   2357 			(hw->bus.speed == ixgbe_bus_speed_2500))) {
   2358 			device_printf(dev, "PCI-Express bandwidth available"
   2359 			    " for this card\n     is not sufficient for"
   2360 			    " optimal performance.\n");
   2361 			device_printf(dev, "For optimal performance a x8 "
   2362 			    "PCIE, or x4 PCIE Gen2 slot is required.\n");
   2363 		}
   2364 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2365 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
   2366 			(hw->bus.speed < ixgbe_bus_speed_8000))) {
   2367 			device_printf(dev, "PCI-Express bandwidth available"
   2368 			    " for this card\n     is not sufficient for"
   2369 			    " optimal performance.\n");
   2370 			device_printf(dev, "For optimal performance a x8 "
   2371 			    "PCIE Gen3 slot is required.\n");
   2372 		}
   2373 	} else
   2374 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
   2375 
   2376 	return;
   2377 } /* ixgbe_get_slot_info */
   2378 
   2379 /************************************************************************
   2380  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
   2381  ************************************************************************/
   2382 static inline void
   2383 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   2384 {
   2385 	struct ixgbe_hw *hw = &adapter->hw;
   2386 	u64             queue = (u64)(1ULL << vector);
   2387 	u32             mask;
   2388 
   2389 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2390 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2391 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   2392 	} else {
   2393 		mask = (queue & 0xFFFFFFFF);
   2394 		if (mask)
   2395 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   2396 		mask = (queue >> 32);
   2397 		if (mask)
   2398 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   2399 	}
   2400 } /* ixgbe_enable_queue */
   2401 
   2402 /************************************************************************
   2403  * ixgbe_disable_queue
   2404  ************************************************************************/
   2405 static inline void
   2406 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   2407 {
   2408 	struct ixgbe_hw *hw = &adapter->hw;
   2409 	u64             queue = (u64)(1ULL << vector);
   2410 	u32             mask;
   2411 
   2412 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2413 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2414 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   2415 	} else {
   2416 		mask = (queue & 0xFFFFFFFF);
   2417 		if (mask)
   2418 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   2419 		mask = (queue >> 32);
   2420 		if (mask)
   2421 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   2422 	}
   2423 } /* ixgbe_disable_queue */
   2424 
   2425 /************************************************************************
   2426  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
   2427  ************************************************************************/
   2428 static int
   2429 ixgbe_msix_que(void *arg)
   2430 {
   2431 	struct ix_queue	*que = arg;
   2432 	struct adapter  *adapter = que->adapter;
   2433 	struct ifnet    *ifp = adapter->ifp;
   2434 	struct tx_ring	*txr = que->txr;
   2435 	struct rx_ring	*rxr = que->rxr;
   2436 	bool		more;
   2437 	u32		newitr = 0;
   2438 
   2439 	/* Protect against spurious interrupts */
   2440 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   2441 		return 0;
   2442 
   2443 	ixgbe_disable_queue(adapter, que->msix);
   2444 	++que->irqs.ev_count;
   2445 
   2446 #ifdef __NetBSD__
   2447 	/* Don't run ixgbe_rxeof in interrupt context */
   2448 	more = true;
   2449 #else
   2450 	more = ixgbe_rxeof(que);
   2451 #endif
   2452 
   2453 	IXGBE_TX_LOCK(txr);
   2454 	ixgbe_txeof(txr);
   2455 	IXGBE_TX_UNLOCK(txr);
   2456 
   2457 	/* Do AIM now? */
   2458 
   2459 	if (adapter->enable_aim == false)
   2460 		goto no_calc;
   2461 	/*
   2462 	 * Do Adaptive Interrupt Moderation:
   2463 	 *  - Write out last calculated setting
   2464 	 *  - Calculate based on average size over
   2465 	 *    the last interval.
   2466 	 */
   2467 	if (que->eitr_setting)
   2468 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
   2469 		    que->eitr_setting);
   2470 
   2471 	que->eitr_setting = 0;
   2472 
   2473 	/* Idle, do nothing */
   2474         if ((txr->bytes == 0) && (rxr->bytes == 0))
   2475                 goto no_calc;
   2476 
   2477 	if ((txr->bytes) && (txr->packets))
   2478 		newitr = txr->bytes/txr->packets;
   2479 	if ((rxr->bytes) && (rxr->packets))
   2480 		newitr = max(newitr, (rxr->bytes / rxr->packets));
   2481 	newitr += 24; /* account for hardware frame, crc */
   2482 
   2483 	/* set an upper boundary */
   2484 	newitr = min(newitr, 3000);
   2485 
   2486 	/* Be nice to the mid range */
   2487 	if ((newitr > 300) && (newitr < 1200))
   2488 		newitr = (newitr / 3);
   2489 	else
   2490 		newitr = (newitr / 2);
   2491 
   2492         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   2493                 newitr |= newitr << 16;
   2494         else
   2495                 newitr |= IXGBE_EITR_CNT_WDIS;
   2496 
   2497         /* save for next interrupt */
   2498         que->eitr_setting = newitr;
   2499 
   2500 	/* Reset state */
   2501 	txr->bytes = 0;
   2502 	txr->packets = 0;
   2503 	rxr->bytes = 0;
   2504 	rxr->packets = 0;
   2505 
   2506 no_calc:
   2507 	if (more)
   2508 		softint_schedule(que->que_si);
   2509 	else
   2510 		ixgbe_enable_queue(adapter, que->msix);
   2511 
   2512 	return 1;
   2513 } /* ixgbe_msix_que */
   2514 
   2515 /************************************************************************
   2516  * ixgbe_media_status - Media Ioctl callback
   2517  *
   2518  *   Called whenever the user queries the status of
   2519  *   the interface using ifconfig.
   2520  ************************************************************************/
   2521 static void
   2522 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   2523 {
   2524 	struct adapter *adapter = ifp->if_softc;
   2525 	struct ixgbe_hw *hw = &adapter->hw;
   2526 	int layer;
   2527 
   2528 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   2529 	IXGBE_CORE_LOCK(adapter);
   2530 	ixgbe_update_link_status(adapter);
   2531 
   2532 	ifmr->ifm_status = IFM_AVALID;
   2533 	ifmr->ifm_active = IFM_ETHER;
   2534 
   2535 	if (!adapter->link_active) {
   2536 		ifmr->ifm_active |= IFM_NONE;
   2537 		IXGBE_CORE_UNLOCK(adapter);
   2538 		return;
   2539 	}
   2540 
   2541 	ifmr->ifm_status |= IFM_ACTIVE;
   2542 	layer = adapter->phy_layer;
   2543 
   2544 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
   2545 	    layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
   2546 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
   2547 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
   2548 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
   2549 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
   2550 		switch (adapter->link_speed) {
   2551 		case IXGBE_LINK_SPEED_10GB_FULL:
   2552 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   2553 			break;
   2554 		case IXGBE_LINK_SPEED_5GB_FULL:
   2555 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   2556 			break;
   2557 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2558 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   2559 			break;
   2560 		case IXGBE_LINK_SPEED_1GB_FULL:
   2561 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   2562 			break;
   2563 		case IXGBE_LINK_SPEED_100_FULL:
   2564 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   2565 			break;
   2566 		case IXGBE_LINK_SPEED_10_FULL:
   2567 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   2568 			break;
   2569 		}
   2570 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   2571 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
   2572 		switch (adapter->link_speed) {
   2573 		case IXGBE_LINK_SPEED_10GB_FULL:
   2574 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
   2575 			break;
   2576 		}
   2577 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
   2578 		switch (adapter->link_speed) {
   2579 		case IXGBE_LINK_SPEED_10GB_FULL:
   2580 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
   2581 			break;
   2582 		case IXGBE_LINK_SPEED_1GB_FULL:
   2583 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2584 			break;
   2585 		}
   2586 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
   2587 		switch (adapter->link_speed) {
   2588 		case IXGBE_LINK_SPEED_10GB_FULL:
   2589 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
   2590 			break;
   2591 		case IXGBE_LINK_SPEED_1GB_FULL:
   2592 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2593 			break;
   2594 		}
   2595 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
   2596 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
   2597 		switch (adapter->link_speed) {
   2598 		case IXGBE_LINK_SPEED_10GB_FULL:
   2599 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2600 			break;
   2601 		case IXGBE_LINK_SPEED_1GB_FULL:
   2602 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
   2603 			break;
   2604 		}
   2605 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
   2606 		switch (adapter->link_speed) {
   2607 		case IXGBE_LINK_SPEED_10GB_FULL:
   2608 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2609 			break;
   2610 		}
   2611 	/*
   2612 	 * XXX: These need to use the proper media types once
   2613 	 * they're added.
   2614 	 */
   2615 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
   2616 		switch (adapter->link_speed) {
   2617 		case IXGBE_LINK_SPEED_10GB_FULL:
   2618 #ifndef IFM_ETH_XTYPE
   2619 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2620 #else
   2621 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
   2622 #endif
   2623 			break;
   2624 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2625 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2626 			break;
   2627 		case IXGBE_LINK_SPEED_1GB_FULL:
   2628 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2629 			break;
   2630 		}
   2631 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
   2632 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
   2633 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
   2634 		switch (adapter->link_speed) {
   2635 		case IXGBE_LINK_SPEED_10GB_FULL:
   2636 #ifndef IFM_ETH_XTYPE
   2637 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2638 #else
   2639 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
   2640 #endif
   2641 			break;
   2642 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2643 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2644 			break;
   2645 		case IXGBE_LINK_SPEED_1GB_FULL:
   2646 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2647 			break;
   2648 		}
   2649 
   2650 	/* If nothing is recognized... */
   2651 #if 0
   2652 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
   2653 		ifmr->ifm_active |= IFM_UNKNOWN;
   2654 #endif
   2655 
   2656 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   2657 
   2658 	/* Display current flow control setting used on link */
   2659 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
   2660 	    hw->fc.current_mode == ixgbe_fc_full)
   2661 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
   2662 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
   2663 	    hw->fc.current_mode == ixgbe_fc_full)
   2664 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
   2665 
   2666 	IXGBE_CORE_UNLOCK(adapter);
   2667 
   2668 	return;
   2669 } /* ixgbe_media_status */
   2670 
   2671 /************************************************************************
   2672  * ixgbe_media_change - Media Ioctl callback
   2673  *
   2674  *   Called when the user changes speed/duplex using
   2675  *   media/mediopt option with ifconfig.
   2676  ************************************************************************/
   2677 static int
   2678 ixgbe_media_change(struct ifnet *ifp)
   2679 {
   2680 	struct adapter   *adapter = ifp->if_softc;
   2681 	struct ifmedia   *ifm = &adapter->media;
   2682 	struct ixgbe_hw  *hw = &adapter->hw;
   2683 	ixgbe_link_speed speed = 0;
   2684 	ixgbe_link_speed link_caps = 0;
   2685 	bool negotiate = false;
   2686 	s32 err = IXGBE_NOT_IMPLEMENTED;
   2687 
   2688 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   2689 
   2690 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   2691 		return (EINVAL);
   2692 
   2693 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   2694 		return (ENODEV);
   2695 
   2696 	/*
   2697 	 * We don't actually need to check against the supported
   2698 	 * media types of the adapter; ifmedia will take care of
   2699 	 * that for us.
   2700 	 */
   2701 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   2702 	case IFM_AUTO:
   2703 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   2704 		    &negotiate);
   2705 		if (err != IXGBE_SUCCESS) {
   2706 			device_printf(adapter->dev, "Unable to determine "
   2707 			    "supported advertise speeds\n");
   2708 			return (ENODEV);
   2709 		}
   2710 		speed |= link_caps;
   2711 		break;
   2712 	case IFM_10G_T:
   2713 	case IFM_10G_LRM:
   2714 	case IFM_10G_LR:
   2715 	case IFM_10G_TWINAX:
   2716 #ifndef IFM_ETH_XTYPE
   2717 	case IFM_10G_SR: /* KR, too */
   2718 	case IFM_10G_CX4: /* KX4 */
   2719 #else
   2720 	case IFM_10G_KR:
   2721 	case IFM_10G_KX4:
   2722 #endif
   2723 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   2724 		break;
   2725 	case IFM_5000_T:
   2726 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   2727 		break;
   2728 	case IFM_2500_T:
   2729 	case IFM_2500_KX:
   2730 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   2731 		break;
   2732 	case IFM_1000_T:
   2733 	case IFM_1000_LX:
   2734 	case IFM_1000_SX:
   2735 	case IFM_1000_KX:
   2736 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   2737 		break;
   2738 	case IFM_100_TX:
   2739 		speed |= IXGBE_LINK_SPEED_100_FULL;
   2740 		break;
   2741 	case IFM_10_T:
   2742 		speed |= IXGBE_LINK_SPEED_10_FULL;
   2743 		break;
   2744 	default:
   2745 		goto invalid;
   2746 	}
   2747 
   2748 	hw->mac.autotry_restart = TRUE;
   2749 	hw->mac.ops.setup_link(hw, speed, TRUE);
   2750 	adapter->advertise = 0;
   2751 	if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
   2752 		if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
   2753 			adapter->advertise |= 1 << 2;
   2754 		if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
   2755 			adapter->advertise |= 1 << 1;
   2756 		if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
   2757 			adapter->advertise |= 1 << 0;
   2758 		if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
   2759 			adapter->advertise |= 1 << 3;
   2760 		if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
   2761 			adapter->advertise |= 1 << 4;
   2762 		if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
   2763 			adapter->advertise |= 1 << 5;
   2764 	}
   2765 
   2766 	return (0);
   2767 
   2768 invalid:
   2769 	device_printf(adapter->dev, "Invalid media type!\n");
   2770 
   2771 	return (EINVAL);
   2772 } /* ixgbe_media_change */
   2773 
   2774 /************************************************************************
   2775  * ixgbe_set_promisc
   2776  ************************************************************************/
   2777 static void
   2778 ixgbe_set_promisc(struct adapter *adapter)
   2779 {
   2780 	struct ifnet *ifp = adapter->ifp;
   2781 	int          mcnt = 0;
   2782 	u32          rctl;
   2783 	struct ether_multi *enm;
   2784 	struct ether_multistep step;
   2785 	struct ethercom *ec = &adapter->osdep.ec;
   2786 
   2787 	KASSERT(mutex_owned(&adapter->core_mtx));
   2788 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   2789 	rctl &= (~IXGBE_FCTRL_UPE);
   2790 	if (ifp->if_flags & IFF_ALLMULTI)
   2791 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
   2792 	else {
   2793 		ETHER_LOCK(ec);
   2794 		ETHER_FIRST_MULTI(step, ec, enm);
   2795 		while (enm != NULL) {
   2796 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
   2797 				break;
   2798 			mcnt++;
   2799 			ETHER_NEXT_MULTI(step, enm);
   2800 		}
   2801 		ETHER_UNLOCK(ec);
   2802 	}
   2803 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
   2804 		rctl &= (~IXGBE_FCTRL_MPE);
   2805 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2806 
   2807 	if (ifp->if_flags & IFF_PROMISC) {
   2808 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   2809 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2810 	} else if (ifp->if_flags & IFF_ALLMULTI) {
   2811 		rctl |= IXGBE_FCTRL_MPE;
   2812 		rctl &= ~IXGBE_FCTRL_UPE;
   2813 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2814 	}
   2815 } /* ixgbe_set_promisc */
   2816 
   2817 /************************************************************************
   2818  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
   2819  ************************************************************************/
   2820 static int
   2821 ixgbe_msix_link(void *arg)
   2822 {
   2823 	struct adapter	*adapter = arg;
   2824 	struct ixgbe_hw *hw = &adapter->hw;
   2825 	u32		eicr, eicr_mask;
   2826 	s32             retval;
   2827 
   2828 	++adapter->link_irq.ev_count;
   2829 
   2830 	/* Pause other interrupts */
   2831 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
   2832 
   2833 	/* First get the cause */
   2834 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   2835 	/* Be sure the queue bits are not cleared */
   2836 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
   2837 	/* Clear interrupt with write */
   2838 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
   2839 
   2840 	/* Link status change */
   2841 	if (eicr & IXGBE_EICR_LSC) {
   2842 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
   2843 		softint_schedule(adapter->link_si);
   2844 	}
   2845 
   2846 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   2847 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
   2848 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
   2849 			/* This is probably overkill :) */
   2850 			if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
   2851 				return 1;
   2852 			/* Disable the interrupt */
   2853 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
   2854 			softint_schedule(adapter->fdir_si);
   2855 		}
   2856 
   2857 		if (eicr & IXGBE_EICR_ECC) {
   2858 			device_printf(adapter->dev,
   2859 			    "CRITICAL: ECC ERROR!! Please Reboot!!\n");
   2860 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   2861 		}
   2862 
   2863 		/* Check for over temp condition */
   2864 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
   2865 			switch (adapter->hw.mac.type) {
   2866 			case ixgbe_mac_X550EM_a:
   2867 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
   2868 					break;
   2869 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
   2870 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2871 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2872 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2873 				retval = hw->phy.ops.check_overtemp(hw);
   2874 				if (retval != IXGBE_ERR_OVERTEMP)
   2875 					break;
   2876 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2877 				device_printf(adapter->dev, "System shutdown required!\n");
   2878 				break;
   2879 			default:
   2880 				if (!(eicr & IXGBE_EICR_TS))
   2881 					break;
   2882 				retval = hw->phy.ops.check_overtemp(hw);
   2883 				if (retval != IXGBE_ERR_OVERTEMP)
   2884 					break;
   2885 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2886 				device_printf(adapter->dev, "System shutdown required!\n");
   2887 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
   2888 				break;
   2889 			}
   2890 		}
   2891 
   2892 		/* Check for VF message */
   2893 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
   2894 		    (eicr & IXGBE_EICR_MAILBOX))
   2895 			softint_schedule(adapter->mbx_si);
   2896 	}
   2897 
   2898 	if (ixgbe_is_sfp(hw)) {
   2899 		/* Pluggable optics-related interrupt */
   2900 		if (hw->mac.type >= ixgbe_mac_X540)
   2901 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   2902 		else
   2903 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   2904 
   2905 		if (eicr & eicr_mask) {
   2906 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   2907 			softint_schedule(adapter->mod_si);
   2908 		}
   2909 
   2910 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   2911 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   2912 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2913 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2914 			softint_schedule(adapter->msf_si);
   2915 		}
   2916 	}
   2917 
   2918 	/* Check for fan failure */
   2919 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   2920 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
   2921 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2922 	}
   2923 
   2924 	/* External PHY interrupt */
   2925 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   2926 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
   2927 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
   2928 		softint_schedule(adapter->phy_si);
   2929  	}
   2930 
   2931 	/* Re-enable other interrupts */
   2932 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   2933 	return 1;
   2934 } /* ixgbe_msix_link */
   2935 
   2936 /************************************************************************
   2937  * ixgbe_sysctl_interrupt_rate_handler
   2938  ************************************************************************/
   2939 static int
   2940 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   2941 {
   2942 	struct sysctlnode node = *rnode;
   2943 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   2944 	uint32_t reg, usec, rate;
   2945 	int error;
   2946 
   2947 	if (que == NULL)
   2948 		return 0;
   2949 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
   2950 	usec = ((reg & 0x0FF8) >> 3);
   2951 	if (usec > 0)
   2952 		rate = 500000 / usec;
   2953 	else
   2954 		rate = 0;
   2955 	node.sysctl_data = &rate;
   2956 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2957 	if (error || newp == NULL)
   2958 		return error;
   2959 	reg &= ~0xfff; /* default, no limitation */
   2960 	ixgbe_max_interrupt_rate = 0;
   2961 	if (rate > 0 && rate < 500000) {
   2962 		if (rate < 1000)
   2963 			rate = 1000;
   2964 		ixgbe_max_interrupt_rate = rate;
   2965 		reg |= ((4000000/rate) & 0xff8);
   2966 	}
   2967 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
   2968 
   2969 	return (0);
   2970 } /* ixgbe_sysctl_interrupt_rate_handler */
   2971 
   2972 const struct sysctlnode *
   2973 ixgbe_sysctl_instance(struct adapter *adapter)
   2974 {
   2975 	const char *dvname;
   2976 	struct sysctllog **log;
   2977 	int rc;
   2978 	const struct sysctlnode *rnode;
   2979 
   2980 	if (adapter->sysctltop != NULL)
   2981 		return adapter->sysctltop;
   2982 
   2983 	log = &adapter->sysctllog;
   2984 	dvname = device_xname(adapter->dev);
   2985 
   2986 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2987 	    0, CTLTYPE_NODE, dvname,
   2988 	    SYSCTL_DESCR("ixgbe information and settings"),
   2989 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2990 		goto err;
   2991 
   2992 	return rnode;
   2993 err:
   2994 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2995 	return NULL;
   2996 }
   2997 
   2998 /************************************************************************
   2999  * ixgbe_add_device_sysctls
   3000  ************************************************************************/
   3001 static void
   3002 ixgbe_add_device_sysctls(struct adapter *adapter)
   3003 {
   3004 	device_t               dev = adapter->dev;
   3005 	struct ixgbe_hw        *hw = &adapter->hw;
   3006 	struct sysctllog **log;
   3007 	const struct sysctlnode *rnode, *cnode;
   3008 
   3009 	log = &adapter->sysctllog;
   3010 
   3011 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   3012 		aprint_error_dev(dev, "could not create sysctl root\n");
   3013 		return;
   3014 	}
   3015 
   3016 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3017 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3018 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
   3019 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   3020 		aprint_error_dev(dev, "could not create sysctl\n");
   3021 
   3022 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3023 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3024 	    "num_queues", SYSCTL_DESCR("Number of queues"),
   3025 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
   3026 		aprint_error_dev(dev, "could not create sysctl\n");
   3027 
   3028 	/* Sysctls for all devices */
   3029 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3030 	    CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
   3031 	    ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
   3032 	    CTL_EOL) != 0)
   3033 		aprint_error_dev(dev, "could not create sysctl\n");
   3034 
   3035 	adapter->enable_aim = ixgbe_enable_aim;
   3036 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3037 	    CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   3038 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   3039 		aprint_error_dev(dev, "could not create sysctl\n");
   3040 
   3041 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3042 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   3043 	    "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
   3044 	    ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
   3045 	    CTL_EOL) != 0)
   3046 		aprint_error_dev(dev, "could not create sysctl\n");
   3047 
   3048 #ifdef IXGBE_DEBUG
   3049 	/* testing sysctls (for all devices) */
   3050 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3051 	    CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
   3052 	    ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
   3053 	    CTL_EOL) != 0)
   3054 		aprint_error_dev(dev, "could not create sysctl\n");
   3055 
   3056 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
   3057 	    CTLTYPE_STRING, "print_rss_config",
   3058 	    SYSCTL_DESCR("Prints RSS Configuration"),
   3059 	    ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
   3060 	    CTL_EOL) != 0)
   3061 		aprint_error_dev(dev, "could not create sysctl\n");
   3062 #endif
   3063 	/* for X550 series devices */
   3064 	if (hw->mac.type >= ixgbe_mac_X550)
   3065 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3066 		    CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
   3067 		    ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
   3068 		    CTL_EOL) != 0)
   3069 			aprint_error_dev(dev, "could not create sysctl\n");
   3070 
   3071 	/* for WoL-capable devices */
   3072 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3073 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3074 		    CTLTYPE_BOOL, "wol_enable",
   3075 		    SYSCTL_DESCR("Enable/Disable Wake on LAN"),
   3076 		    ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
   3077 		    CTL_EOL) != 0)
   3078 			aprint_error_dev(dev, "could not create sysctl\n");
   3079 
   3080 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3081 		    CTLTYPE_INT, "wufc",
   3082 		    SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
   3083 		    ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
   3084 		    CTL_EOL) != 0)
   3085 			aprint_error_dev(dev, "could not create sysctl\n");
   3086 	}
   3087 
   3088 	/* for X552/X557-AT devices */
   3089 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3090 		const struct sysctlnode *phy_node;
   3091 
   3092 		if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
   3093 		    "phy", SYSCTL_DESCR("External PHY sysctls"),
   3094 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
   3095 			aprint_error_dev(dev, "could not create sysctl\n");
   3096 			return;
   3097 		}
   3098 
   3099 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3100 		    CTLTYPE_INT, "temp",
   3101 		    SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
   3102 		    ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
   3103 		    CTL_EOL) != 0)
   3104 			aprint_error_dev(dev, "could not create sysctl\n");
   3105 
   3106 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3107 		    CTLTYPE_INT, "overtemp_occurred",
   3108 		    SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
   3109 		    ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
   3110 		    CTL_CREATE, CTL_EOL) != 0)
   3111 			aprint_error_dev(dev, "could not create sysctl\n");
   3112 	}
   3113 
   3114 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
   3115 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3116 		    CTLTYPE_INT, "eee_state",
   3117 		    SYSCTL_DESCR("EEE Power Save State"),
   3118 		    ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
   3119 		    CTL_EOL) != 0)
   3120 			aprint_error_dev(dev, "could not create sysctl\n");
   3121 	}
   3122 } /* ixgbe_add_device_sysctls */
   3123 
   3124 /************************************************************************
   3125  * ixgbe_allocate_pci_resources
   3126  ************************************************************************/
   3127 static int
   3128 ixgbe_allocate_pci_resources(struct adapter *adapter,
   3129     const struct pci_attach_args *pa)
   3130 {
   3131 	pcireg_t	memtype;
   3132 	device_t dev = adapter->dev;
   3133 	bus_addr_t addr;
   3134 	int flags;
   3135 
   3136 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   3137 	switch (memtype) {
   3138 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   3139 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   3140 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   3141 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   3142 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   3143 			goto map_err;
   3144 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   3145 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   3146 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3147 		}
   3148 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   3149 		     adapter->osdep.mem_size, flags,
   3150 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   3151 map_err:
   3152 			adapter->osdep.mem_size = 0;
   3153 			aprint_error_dev(dev, "unable to map BAR0\n");
   3154 			return ENXIO;
   3155 		}
   3156 		break;
   3157 	default:
   3158 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   3159 		return ENXIO;
   3160 	}
   3161 
   3162 	return (0);
   3163 } /* ixgbe_allocate_pci_resources */
   3164 
   3165 static void
   3166 ixgbe_free_softint(struct adapter *adapter)
   3167 {
   3168 	struct ix_queue *que = adapter->queues;
   3169 	struct tx_ring *txr = adapter->tx_rings;
   3170 	int i;
   3171 
   3172 	for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
   3173 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   3174 			if (txr->txr_si != NULL)
   3175 				softint_disestablish(txr->txr_si);
   3176 		}
   3177 		if (que->que_si != NULL)
   3178 			softint_disestablish(que->que_si);
   3179 	}
   3180 
   3181 	/* Drain the Link queue */
   3182 	if (adapter->link_si != NULL) {
   3183 		softint_disestablish(adapter->link_si);
   3184 		adapter->link_si = NULL;
   3185 	}
   3186 	if (adapter->mod_si != NULL) {
   3187 		softint_disestablish(adapter->mod_si);
   3188 		adapter->mod_si = NULL;
   3189 	}
   3190 	if (adapter->msf_si != NULL) {
   3191 		softint_disestablish(adapter->msf_si);
   3192 		adapter->msf_si = NULL;
   3193 	}
   3194 	if (adapter->phy_si != NULL) {
   3195 		softint_disestablish(adapter->phy_si);
   3196 		adapter->phy_si = NULL;
   3197 	}
   3198 	if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
   3199 		if (adapter->fdir_si != NULL) {
   3200 			softint_disestablish(adapter->fdir_si);
   3201 			adapter->fdir_si = NULL;
   3202 		}
   3203 	}
   3204 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   3205 		if (adapter->mbx_si != NULL) {
   3206 			softint_disestablish(adapter->mbx_si);
   3207 			adapter->mbx_si = NULL;
   3208 		}
   3209 	}
   3210 } /* ixgbe_free_softint */
   3211 
   3212 /************************************************************************
   3213  * ixgbe_detach - Device removal routine
   3214  *
   3215  *   Called when the driver is being removed.
   3216  *   Stops the adapter and deallocates all the resources
   3217  *   that were allocated for driver operation.
   3218  *
   3219  *   return 0 on success, positive on failure
   3220  ************************************************************************/
   3221 static int
   3222 ixgbe_detach(device_t dev, int flags)
   3223 {
   3224 	struct adapter *adapter = device_private(dev);
   3225 	struct rx_ring *rxr = adapter->rx_rings;
   3226 	struct tx_ring *txr = adapter->tx_rings;
   3227 	struct ixgbe_hw *hw = &adapter->hw;
   3228 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   3229 	u32	ctrl_ext;
   3230 
   3231 	INIT_DEBUGOUT("ixgbe_detach: begin");
   3232 	if (adapter->osdep.attached == false)
   3233 		return 0;
   3234 
   3235 	if (ixgbe_pci_iov_detach(dev) != 0) {
   3236 		device_printf(dev, "SR-IOV in use; detach first.\n");
   3237 		return (EBUSY);
   3238 	}
   3239 
   3240 	/* Stop the interface. Callouts are stopped in it. */
   3241 	ixgbe_ifstop(adapter->ifp, 1);
   3242 #if NVLAN > 0
   3243 	/* Make sure VLANs are not using driver */
   3244 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   3245 		;	/* nothing to do: no VLANs */
   3246 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
   3247 		vlan_ifdetach(adapter->ifp);
   3248 	else {
   3249 		aprint_error_dev(dev, "VLANs in use, detach first\n");
   3250 		return (EBUSY);
   3251 	}
   3252 #endif
   3253 
   3254 	pmf_device_deregister(dev);
   3255 
   3256 	ether_ifdetach(adapter->ifp);
   3257 	/* Stop the adapter */
   3258 	IXGBE_CORE_LOCK(adapter);
   3259 	ixgbe_setup_low_power_mode(adapter);
   3260 	IXGBE_CORE_UNLOCK(adapter);
   3261 
   3262 	ixgbe_free_softint(adapter);
   3263 
   3264 	/* let hardware know driver is unloading */
   3265 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   3266 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   3267 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   3268 
   3269 	callout_halt(&adapter->timer, NULL);
   3270 
   3271 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   3272 		netmap_detach(adapter->ifp);
   3273 
   3274 	ixgbe_free_pci_resources(adapter);
   3275 #if 0	/* XXX the NetBSD port is probably missing something here */
   3276 	bus_generic_detach(dev);
   3277 #endif
   3278 	if_detach(adapter->ifp);
   3279 	if_percpuq_destroy(adapter->ipq);
   3280 
   3281 	sysctl_teardown(&adapter->sysctllog);
   3282 	evcnt_detach(&adapter->handleq);
   3283 	evcnt_detach(&adapter->req);
   3284 	evcnt_detach(&adapter->efbig_tx_dma_setup);
   3285 	evcnt_detach(&adapter->mbuf_defrag_failed);
   3286 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
   3287 	evcnt_detach(&adapter->einval_tx_dma_setup);
   3288 	evcnt_detach(&adapter->other_tx_dma_setup);
   3289 	evcnt_detach(&adapter->eagain_tx_dma_setup);
   3290 	evcnt_detach(&adapter->enomem_tx_dma_setup);
   3291 	evcnt_detach(&adapter->watchdog_events);
   3292 	evcnt_detach(&adapter->tso_err);
   3293 	evcnt_detach(&adapter->link_irq);
   3294 
   3295 	txr = adapter->tx_rings;
   3296 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   3297 		evcnt_detach(&adapter->queues[i].irqs);
   3298 		evcnt_detach(&txr->no_desc_avail);
   3299 		evcnt_detach(&txr->total_packets);
   3300 		evcnt_detach(&txr->tso_tx);
   3301 #ifndef IXGBE_LEGACY_TX
   3302 		evcnt_detach(&txr->pcq_drops);
   3303 #endif
   3304 
   3305 		if (i < __arraycount(stats->mpc)) {
   3306 			evcnt_detach(&stats->mpc[i]);
   3307 			if (hw->mac.type == ixgbe_mac_82598EB)
   3308 				evcnt_detach(&stats->rnbc[i]);
   3309 		}
   3310 		if (i < __arraycount(stats->pxontxc)) {
   3311 			evcnt_detach(&stats->pxontxc[i]);
   3312 			evcnt_detach(&stats->pxonrxc[i]);
   3313 			evcnt_detach(&stats->pxofftxc[i]);
   3314 			evcnt_detach(&stats->pxoffrxc[i]);
   3315 			evcnt_detach(&stats->pxon2offc[i]);
   3316 		}
   3317 		if (i < __arraycount(stats->qprc)) {
   3318 			evcnt_detach(&stats->qprc[i]);
   3319 			evcnt_detach(&stats->qptc[i]);
   3320 			evcnt_detach(&stats->qbrc[i]);
   3321 			evcnt_detach(&stats->qbtc[i]);
   3322 			evcnt_detach(&stats->qprdc[i]);
   3323 		}
   3324 
   3325 		evcnt_detach(&rxr->rx_packets);
   3326 		evcnt_detach(&rxr->rx_bytes);
   3327 		evcnt_detach(&rxr->rx_copies);
   3328 		evcnt_detach(&rxr->no_jmbuf);
   3329 		evcnt_detach(&rxr->rx_discarded);
   3330 	}
   3331 	evcnt_detach(&stats->ipcs);
   3332 	evcnt_detach(&stats->l4cs);
   3333 	evcnt_detach(&stats->ipcs_bad);
   3334 	evcnt_detach(&stats->l4cs_bad);
   3335 	evcnt_detach(&stats->intzero);
   3336 	evcnt_detach(&stats->legint);
   3337 	evcnt_detach(&stats->crcerrs);
   3338 	evcnt_detach(&stats->illerrc);
   3339 	evcnt_detach(&stats->errbc);
   3340 	evcnt_detach(&stats->mspdc);
   3341 	if (hw->mac.type >= ixgbe_mac_X550)
   3342 		evcnt_detach(&stats->mbsdc);
   3343 	evcnt_detach(&stats->mpctotal);
   3344 	evcnt_detach(&stats->mlfc);
   3345 	evcnt_detach(&stats->mrfc);
   3346 	evcnt_detach(&stats->rlec);
   3347 	evcnt_detach(&stats->lxontxc);
   3348 	evcnt_detach(&stats->lxonrxc);
   3349 	evcnt_detach(&stats->lxofftxc);
   3350 	evcnt_detach(&stats->lxoffrxc);
   3351 
   3352 	/* Packet Reception Stats */
   3353 	evcnt_detach(&stats->tor);
   3354 	evcnt_detach(&stats->gorc);
   3355 	evcnt_detach(&stats->tpr);
   3356 	evcnt_detach(&stats->gprc);
   3357 	evcnt_detach(&stats->mprc);
   3358 	evcnt_detach(&stats->bprc);
   3359 	evcnt_detach(&stats->prc64);
   3360 	evcnt_detach(&stats->prc127);
   3361 	evcnt_detach(&stats->prc255);
   3362 	evcnt_detach(&stats->prc511);
   3363 	evcnt_detach(&stats->prc1023);
   3364 	evcnt_detach(&stats->prc1522);
   3365 	evcnt_detach(&stats->ruc);
   3366 	evcnt_detach(&stats->rfc);
   3367 	evcnt_detach(&stats->roc);
   3368 	evcnt_detach(&stats->rjc);
   3369 	evcnt_detach(&stats->mngprc);
   3370 	evcnt_detach(&stats->mngpdc);
   3371 	evcnt_detach(&stats->xec);
   3372 
   3373 	/* Packet Transmission Stats */
   3374 	evcnt_detach(&stats->gotc);
   3375 	evcnt_detach(&stats->tpt);
   3376 	evcnt_detach(&stats->gptc);
   3377 	evcnt_detach(&stats->bptc);
   3378 	evcnt_detach(&stats->mptc);
   3379 	evcnt_detach(&stats->mngptc);
   3380 	evcnt_detach(&stats->ptc64);
   3381 	evcnt_detach(&stats->ptc127);
   3382 	evcnt_detach(&stats->ptc255);
   3383 	evcnt_detach(&stats->ptc511);
   3384 	evcnt_detach(&stats->ptc1023);
   3385 	evcnt_detach(&stats->ptc1522);
   3386 
   3387 	ixgbe_free_transmit_structures(adapter);
   3388 	ixgbe_free_receive_structures(adapter);
   3389 	free(adapter->queues, M_DEVBUF);
   3390 	free(adapter->mta, M_DEVBUF);
   3391 
   3392 	IXGBE_CORE_LOCK_DESTROY(adapter);
   3393 
   3394 	return (0);
   3395 } /* ixgbe_detach */
   3396 
   3397 /************************************************************************
   3398  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
   3399  *
   3400  *   Prepare the adapter/port for LPLU and/or WoL
   3401  ************************************************************************/
   3402 static int
   3403 ixgbe_setup_low_power_mode(struct adapter *adapter)
   3404 {
   3405 	struct ixgbe_hw *hw = &adapter->hw;
   3406 	device_t        dev = adapter->dev;
   3407 	s32             error = 0;
   3408 
   3409 	KASSERT(mutex_owned(&adapter->core_mtx));
   3410 
   3411 	/* Limit power management flow to X550EM baseT */
   3412 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
   3413 	    hw->phy.ops.enter_lplu) {
   3414 		/* X550EM baseT adapters need a special LPLU flow */
   3415 		hw->phy.reset_disable = true;
   3416 		ixgbe_stop(adapter);
   3417 		error = hw->phy.ops.enter_lplu(hw);
   3418 		if (error)
   3419 			device_printf(dev,
   3420 			    "Error entering LPLU: %d\n", error);
   3421 		hw->phy.reset_disable = false;
   3422 	} else {
   3423 		/* Just stop for other adapters */
   3424 		ixgbe_stop(adapter);
   3425 	}
   3426 
   3427 	if (!hw->wol_enabled) {
   3428 		ixgbe_set_phy_power(hw, FALSE);
   3429 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3430 		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
   3431 	} else {
   3432 		/* Turn off support for APM wakeup. (Using ACPI instead) */
   3433 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
   3434 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
   3435 
   3436 		/*
   3437 		 * Clear Wake Up Status register to prevent any previous wakeup
   3438 		 * events from waking us up immediately after we suspend.
   3439 		 */
   3440 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3441 
   3442 		/*
   3443 		 * Program the Wakeup Filter Control register with user filter
   3444 		 * settings
   3445 		 */
   3446 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
   3447 
   3448 		/* Enable wakeups and power management in Wakeup Control */
   3449 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
   3450 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
   3451 
   3452 	}
   3453 
   3454 	return error;
   3455 } /* ixgbe_setup_low_power_mode */
   3456 
   3457 /************************************************************************
   3458  * ixgbe_shutdown - Shutdown entry point
   3459  ************************************************************************/
   3460 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   3461 static int
   3462 ixgbe_shutdown(device_t dev)
   3463 {
   3464 	struct adapter *adapter = device_private(dev);
   3465 	int error = 0;
   3466 
   3467 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
   3468 
   3469 	IXGBE_CORE_LOCK(adapter);
   3470 	error = ixgbe_setup_low_power_mode(adapter);
   3471 	IXGBE_CORE_UNLOCK(adapter);
   3472 
   3473 	return (error);
   3474 } /* ixgbe_shutdown */
   3475 #endif
   3476 
   3477 /************************************************************************
   3478  * ixgbe_suspend
   3479  *
   3480  *   From D0 to D3
   3481  ************************************************************************/
   3482 static bool
   3483 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
   3484 {
   3485 	struct adapter *adapter = device_private(dev);
   3486 	int            error = 0;
   3487 
   3488 	INIT_DEBUGOUT("ixgbe_suspend: begin");
   3489 
   3490 	IXGBE_CORE_LOCK(adapter);
   3491 
   3492 	error = ixgbe_setup_low_power_mode(adapter);
   3493 
   3494 	IXGBE_CORE_UNLOCK(adapter);
   3495 
   3496 	return (error);
   3497 } /* ixgbe_suspend */
   3498 
   3499 /************************************************************************
   3500  * ixgbe_resume
   3501  *
   3502  *   From D3 to D0
   3503  ************************************************************************/
   3504 static bool
   3505 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
   3506 {
   3507 	struct adapter  *adapter = device_private(dev);
   3508 	struct ifnet    *ifp = adapter->ifp;
   3509 	struct ixgbe_hw *hw = &adapter->hw;
   3510 	u32             wus;
   3511 
   3512 	INIT_DEBUGOUT("ixgbe_resume: begin");
   3513 
   3514 	IXGBE_CORE_LOCK(adapter);
   3515 
   3516 	/* Read & clear WUS register */
   3517 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
   3518 	if (wus)
   3519 		device_printf(dev, "Woken up by (WUS): %#010x\n",
   3520 		    IXGBE_READ_REG(hw, IXGBE_WUS));
   3521 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3522 	/* And clear WUFC until next low-power transition */
   3523 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3524 
   3525 	/*
   3526 	 * Required after D3->D0 transition;
   3527 	 * will re-advertise all previous advertised speeds
   3528 	 */
   3529 	if (ifp->if_flags & IFF_UP)
   3530 		ixgbe_init_locked(adapter);
   3531 
   3532 	IXGBE_CORE_UNLOCK(adapter);
   3533 
   3534 	return true;
   3535 } /* ixgbe_resume */
   3536 
   3537 /*
   3538  * Set the various hardware offload abilities.
   3539  *
   3540  * This takes the ifnet's if_capenable flags (e.g. set by the user using
   3541  * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
   3542  * mbuf offload flags the driver will understand.
   3543  */
   3544 static void
   3545 ixgbe_set_if_hwassist(struct adapter *adapter)
   3546 {
   3547 	/* XXX */
   3548 }
   3549 
   3550 /************************************************************************
   3551  * ixgbe_init_locked - Init entry point
   3552  *
   3553  *   Used in two ways: It is used by the stack as an init
   3554  *   entry point in network interface structure. It is also
   3555  *   used by the driver as a hw/sw initialization routine to
   3556  *   get to a consistent state.
   3557  *
   3558  *   return 0 on success, positive on failure
   3559  ************************************************************************/
   3560 static void
   3561 ixgbe_init_locked(struct adapter *adapter)
   3562 {
   3563 	struct ifnet   *ifp = adapter->ifp;
   3564 	device_t 	dev = adapter->dev;
   3565 	struct ixgbe_hw *hw = &adapter->hw;
   3566 	struct tx_ring  *txr;
   3567 	struct rx_ring  *rxr;
   3568 	u32		txdctl, mhadd;
   3569 	u32		rxdctl, rxctrl;
   3570 	u32             ctrl_ext;
   3571 	int             err = 0;
   3572 
   3573 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   3574 
   3575 	KASSERT(mutex_owned(&adapter->core_mtx));
   3576 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
   3577 
   3578 	hw->adapter_stopped = FALSE;
   3579 	ixgbe_stop_adapter(hw);
   3580         callout_stop(&adapter->timer);
   3581 
   3582 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   3583 	adapter->max_frame_size =
   3584 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   3585 
   3586 	/* Queue indices may change with IOV mode */
   3587 	ixgbe_align_all_queue_indices(adapter);
   3588 
   3589 	/* reprogram the RAR[0] in case user changed it. */
   3590 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
   3591 
   3592 	/* Get the latest mac address, User can use a LAA */
   3593 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
   3594 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   3595 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
   3596 	hw->addr_ctrl.rar_used_count = 1;
   3597 
   3598 	/* Set hardware offload abilities from ifnet flags */
   3599 	ixgbe_set_if_hwassist(adapter);
   3600 
   3601 	/* Prepare transmit descriptors and buffers */
   3602 	if (ixgbe_setup_transmit_structures(adapter)) {
   3603 		device_printf(dev, "Could not setup transmit structures\n");
   3604 		ixgbe_stop(adapter);
   3605 		return;
   3606 	}
   3607 
   3608 	ixgbe_init_hw(hw);
   3609 	ixgbe_initialize_iov(adapter);
   3610 	ixgbe_initialize_transmit_units(adapter);
   3611 
   3612 	/* Setup Multicast table */
   3613 	ixgbe_set_multi(adapter);
   3614 
   3615 	/* Determine the correct mbuf pool, based on frame size */
   3616 	if (adapter->max_frame_size <= MCLBYTES)
   3617 		adapter->rx_mbuf_sz = MCLBYTES;
   3618 	else
   3619 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   3620 
   3621 	/* Prepare receive descriptors and buffers */
   3622 	if (ixgbe_setup_receive_structures(adapter)) {
   3623 		device_printf(dev, "Could not setup receive structures\n");
   3624 		ixgbe_stop(adapter);
   3625 		return;
   3626 	}
   3627 
   3628 	/* Configure RX settings */
   3629 	ixgbe_initialize_receive_units(adapter);
   3630 
   3631 	/* Enable SDP & MSI-X interrupts based on adapter */
   3632 	ixgbe_config_gpie(adapter);
   3633 
   3634 	/* Set MTU size */
   3635 	if (ifp->if_mtu > ETHERMTU) {
   3636 		/* aka IXGBE_MAXFRS on 82599 and newer */
   3637 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   3638 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   3639 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   3640 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   3641 	}
   3642 
   3643 	/* Now enable all the queues */
   3644 	for (int i = 0; i < adapter->num_queues; i++) {
   3645 		txr = &adapter->tx_rings[i];
   3646 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
   3647 		txdctl |= IXGBE_TXDCTL_ENABLE;
   3648 		/* Set WTHRESH to 8, burst writeback */
   3649 		txdctl |= (8 << 16);
   3650 		/*
   3651 		 * When the internal queue falls below PTHRESH (32),
   3652 		 * start prefetching as long as there are at least
   3653 		 * HTHRESH (1) buffers ready. The values are taken
   3654 		 * from the Intel linux driver 3.8.21.
   3655 		 * Prefetching enables tx line rate even with 1 queue.
   3656 		 */
   3657 		txdctl |= (32 << 0) | (1 << 8);
   3658 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
   3659 	}
   3660 
   3661 	for (int i = 0, j = 0; i < adapter->num_queues; i++) {
   3662 		rxr = &adapter->rx_rings[i];
   3663 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   3664 		if (hw->mac.type == ixgbe_mac_82598EB) {
   3665 			/*
   3666 			 * PTHRESH = 21
   3667 			 * HTHRESH = 4
   3668 			 * WTHRESH = 8
   3669 			 */
   3670 			rxdctl &= ~0x3FFFFF;
   3671 			rxdctl |= 0x080420;
   3672 		}
   3673 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   3674 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
   3675 		for (; j < 10; j++) {
   3676 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
   3677 			    IXGBE_RXDCTL_ENABLE)
   3678 				break;
   3679 			else
   3680 				msec_delay(1);
   3681 		}
   3682 		wmb();
   3683 
   3684 		/*
   3685 		 * In netmap mode, we must preserve the buffers made
   3686 		 * available to userspace before the if_init()
   3687 		 * (this is true by default on the TX side, because
   3688 		 * init makes all buffers available to userspace).
   3689 		 *
   3690 		 * netmap_reset() and the device specific routines
   3691 		 * (e.g. ixgbe_setup_receive_rings()) map these
   3692 		 * buffers at the end of the NIC ring, so here we
   3693 		 * must set the RDT (tail) register to make sure
   3694 		 * they are not overwritten.
   3695 		 *
   3696 		 * In this driver the NIC ring starts at RDH = 0,
   3697 		 * RDT points to the last slot available for reception (?),
   3698 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   3699 		 */
   3700 #ifdef DEV_NETMAP
   3701 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   3702 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   3703 			struct netmap_adapter *na = NA(adapter->ifp);
   3704 			struct netmap_kring *kring = &na->rx_rings[i];
   3705 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   3706 
   3707 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
   3708 		} else
   3709 #endif /* DEV_NETMAP */
   3710 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
   3711 			    adapter->num_rx_desc - 1);
   3712 	}
   3713 
   3714 	/* Enable Receive engine */
   3715 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   3716 	if (hw->mac.type == ixgbe_mac_82598EB)
   3717 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   3718 	rxctrl |= IXGBE_RXCTRL_RXEN;
   3719 	ixgbe_enable_rx_dma(hw, rxctrl);
   3720 
   3721 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   3722 
   3723 	/* Set up MSI-X routing */
   3724 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3725 		ixgbe_configure_ivars(adapter);
   3726 		/* Set up auto-mask */
   3727 		if (hw->mac.type == ixgbe_mac_82598EB)
   3728 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3729 		else {
   3730 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   3731 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   3732 		}
   3733 	} else {  /* Simple settings for Legacy/MSI */
   3734 		ixgbe_set_ivar(adapter, 0, 0, 0);
   3735 		ixgbe_set_ivar(adapter, 0, 0, 1);
   3736 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3737 	}
   3738 
   3739 	ixgbe_init_fdir(adapter);
   3740 
   3741 	/*
   3742 	 * Check on any SFP devices that
   3743 	 * need to be kick-started
   3744 	 */
   3745 	if (hw->phy.type == ixgbe_phy_none) {
   3746 		err = hw->phy.ops.identify(hw);
   3747 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   3748                 	device_printf(dev,
   3749 			    "Unsupported SFP+ module type was detected.\n");
   3750 			return;
   3751         	}
   3752 	}
   3753 
   3754 	/* Set moderation on the Link interrupt */
   3755 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
   3756 
   3757 	/* Config/Enable Link */
   3758 	ixgbe_config_link(adapter);
   3759 
   3760 	/* Hardware Packet Buffer & Flow Control setup */
   3761 	ixgbe_config_delay_values(adapter);
   3762 
   3763 	/* Initialize the FC settings */
   3764 	ixgbe_start_hw(hw);
   3765 
   3766 	/* Set up VLAN support and filter */
   3767 	ixgbe_setup_vlan_hw_support(adapter);
   3768 
   3769 	/* Setup DMA Coalescing */
   3770 	ixgbe_config_dmac(adapter);
   3771 
   3772 	/* And now turn on interrupts */
   3773 	ixgbe_enable_intr(adapter);
   3774 
   3775 	/* Enable the use of the MBX by the VF's */
   3776 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
   3777 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
   3778 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
   3779 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
   3780 	}
   3781 
   3782 	/* Now inform the stack we're ready */
   3783 	ifp->if_flags |= IFF_RUNNING;
   3784 
   3785 	return;
   3786 } /* ixgbe_init_locked */
   3787 
   3788 /************************************************************************
   3789  * ixgbe_init
   3790  ************************************************************************/
   3791 static int
   3792 ixgbe_init(struct ifnet *ifp)
   3793 {
   3794 	struct adapter *adapter = ifp->if_softc;
   3795 
   3796 	IXGBE_CORE_LOCK(adapter);
   3797 	ixgbe_init_locked(adapter);
   3798 	IXGBE_CORE_UNLOCK(adapter);
   3799 
   3800 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   3801 } /* ixgbe_init */
   3802 
   3803 /************************************************************************
   3804  * ixgbe_set_ivar
   3805  *
   3806  *   Setup the correct IVAR register for a particular MSI-X interrupt
   3807  *     (yes this is all very magic and confusing :)
   3808  *    - entry is the register array entry
   3809  *    - vector is the MSI-X vector for this queue
   3810  *    - type is RX/TX/MISC
   3811  ************************************************************************/
   3812 static void
   3813 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   3814 {
   3815 	struct ixgbe_hw *hw = &adapter->hw;
   3816 	u32 ivar, index;
   3817 
   3818 	vector |= IXGBE_IVAR_ALLOC_VAL;
   3819 
   3820 	switch (hw->mac.type) {
   3821 
   3822 	case ixgbe_mac_82598EB:
   3823 		if (type == -1)
   3824 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   3825 		else
   3826 			entry += (type * 64);
   3827 		index = (entry >> 2) & 0x1F;
   3828 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   3829 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
   3830 		ivar |= (vector << (8 * (entry & 0x3)));
   3831 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   3832 		break;
   3833 
   3834 	case ixgbe_mac_82599EB:
   3835 	case ixgbe_mac_X540:
   3836 	case ixgbe_mac_X550:
   3837 	case ixgbe_mac_X550EM_x:
   3838 	case ixgbe_mac_X550EM_a:
   3839 		if (type == -1) { /* MISC IVAR */
   3840 			index = (entry & 1) * 8;
   3841 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   3842 			ivar &= ~(0xFF << index);
   3843 			ivar |= (vector << index);
   3844 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   3845 		} else {	/* RX/TX IVARS */
   3846 			index = (16 * (entry & 1)) + (8 * type);
   3847 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   3848 			ivar &= ~(0xFF << index);
   3849 			ivar |= (vector << index);
   3850 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   3851 		}
   3852 
   3853 	default:
   3854 		break;
   3855 	}
   3856 } /* ixgbe_set_ivar */
   3857 
   3858 /************************************************************************
   3859  * ixgbe_configure_ivars
   3860  ************************************************************************/
   3861 static void
   3862 ixgbe_configure_ivars(struct adapter *adapter)
   3863 {
   3864 	struct ix_queue *que = adapter->queues;
   3865 	u32             newitr;
   3866 
   3867 	if (ixgbe_max_interrupt_rate > 0)
   3868 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   3869 	else {
   3870 		/*
   3871 		 * Disable DMA coalescing if interrupt moderation is
   3872 		 * disabled.
   3873 		 */
   3874 		adapter->dmac = 0;
   3875 		newitr = 0;
   3876 	}
   3877 
   3878         for (int i = 0; i < adapter->num_queues; i++, que++) {
   3879 		struct rx_ring *rxr = &adapter->rx_rings[i];
   3880 		struct tx_ring *txr = &adapter->tx_rings[i];
   3881 		/* First the RX queue entry */
   3882                 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
   3883 		/* ... and the TX */
   3884 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
   3885 		/* Set an Initial EITR value */
   3886 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
   3887 	}
   3888 
   3889 	/* For the Link interrupt */
   3890         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
   3891 } /* ixgbe_configure_ivars */
   3892 
   3893 /************************************************************************
   3894  * ixgbe_config_gpie
   3895  ************************************************************************/
   3896 static void
   3897 ixgbe_config_gpie(struct adapter *adapter)
   3898 {
   3899 	struct ixgbe_hw *hw = &adapter->hw;
   3900 	u32             gpie;
   3901 
   3902 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
   3903 
   3904 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3905 		/* Enable Enhanced MSI-X mode */
   3906 		gpie |= IXGBE_GPIE_MSIX_MODE
   3907 		     |  IXGBE_GPIE_EIAME
   3908 		     |  IXGBE_GPIE_PBA_SUPPORT
   3909 		     |  IXGBE_GPIE_OCD;
   3910 	}
   3911 
   3912 	/* Fan Failure Interrupt */
   3913 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   3914 		gpie |= IXGBE_SDP1_GPIEN;
   3915 
   3916 	/* Thermal Sensor Interrupt */
   3917 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
   3918 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3919 
   3920 	/* Link detection */
   3921 	switch (hw->mac.type) {
   3922 	case ixgbe_mac_82599EB:
   3923 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
   3924 		break;
   3925 	case ixgbe_mac_X550EM_x:
   3926 	case ixgbe_mac_X550EM_a:
   3927 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3928 		break;
   3929 	default:
   3930 		break;
   3931 	}
   3932 
   3933 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   3934 
   3935 	return;
   3936 } /* ixgbe_config_gpie */
   3937 
   3938 /************************************************************************
   3939  * ixgbe_config_delay_values
   3940  *
   3941  *   Requires adapter->max_frame_size to be set.
   3942  ************************************************************************/
   3943 static void
   3944 ixgbe_config_delay_values(struct adapter *adapter)
   3945 {
   3946 	struct ixgbe_hw *hw = &adapter->hw;
   3947 	u32             rxpb, frame, size, tmp;
   3948 
   3949 	frame = adapter->max_frame_size;
   3950 
   3951 	/* Calculate High Water */
   3952 	switch (hw->mac.type) {
   3953 	case ixgbe_mac_X540:
   3954 	case ixgbe_mac_X550:
   3955 	case ixgbe_mac_X550EM_x:
   3956 	case ixgbe_mac_X550EM_a:
   3957 		tmp = IXGBE_DV_X540(frame, frame);
   3958 		break;
   3959 	default:
   3960 		tmp = IXGBE_DV(frame, frame);
   3961 		break;
   3962 	}
   3963 	size = IXGBE_BT2KB(tmp);
   3964 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
   3965 	hw->fc.high_water[0] = rxpb - size;
   3966 
   3967 	/* Now calculate Low Water */
   3968 	switch (hw->mac.type) {
   3969 	case ixgbe_mac_X540:
   3970 	case ixgbe_mac_X550:
   3971 	case ixgbe_mac_X550EM_x:
   3972 	case ixgbe_mac_X550EM_a:
   3973 		tmp = IXGBE_LOW_DV_X540(frame);
   3974 		break;
   3975 	default:
   3976 		tmp = IXGBE_LOW_DV(frame);
   3977 		break;
   3978 	}
   3979 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
   3980 
   3981 	hw->fc.pause_time = IXGBE_FC_PAUSE;
   3982 	hw->fc.send_xon = TRUE;
   3983 } /* ixgbe_config_delay_values */
   3984 
   3985 /************************************************************************
   3986  * ixgbe_set_multi - Multicast Update
   3987  *
   3988  *   Called whenever multicast address list is updated.
   3989  ************************************************************************/
   3990 static void
   3991 ixgbe_set_multi(struct adapter *adapter)
   3992 {
   3993 	struct ixgbe_mc_addr	*mta;
   3994 	struct ifnet		*ifp = adapter->ifp;
   3995 	u8			*update_ptr;
   3996 	int			mcnt = 0;
   3997 	u32			fctrl;
   3998 	struct ethercom		*ec = &adapter->osdep.ec;
   3999 	struct ether_multi	*enm;
   4000 	struct ether_multistep	step;
   4001 
   4002 	KASSERT(mutex_owned(&adapter->core_mtx));
   4003 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   4004 
   4005 	mta = adapter->mta;
   4006 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
   4007 
   4008 	ifp->if_flags &= ~IFF_ALLMULTI;
   4009 	ETHER_LOCK(ec);
   4010 	ETHER_FIRST_MULTI(step, ec, enm);
   4011 	while (enm != NULL) {
   4012 		if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
   4013 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   4014 			ETHER_ADDR_LEN) != 0)) {
   4015 			ifp->if_flags |= IFF_ALLMULTI;
   4016 			break;
   4017 		}
   4018 		bcopy(enm->enm_addrlo,
   4019 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
   4020 		mta[mcnt].vmdq = adapter->pool;
   4021 		mcnt++;
   4022 		ETHER_NEXT_MULTI(step, enm);
   4023 	}
   4024 	ETHER_UNLOCK(ec);
   4025 
   4026 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   4027 	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4028 	if (ifp->if_flags & IFF_PROMISC)
   4029 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4030 	else if (ifp->if_flags & IFF_ALLMULTI) {
   4031 		fctrl |= IXGBE_FCTRL_MPE;
   4032 	}
   4033 
   4034 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   4035 
   4036 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
   4037 		update_ptr = (u8 *)mta;
   4038 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   4039 		    ixgbe_mc_array_itr, TRUE);
   4040 	}
   4041 
   4042 	return;
   4043 } /* ixgbe_set_multi */
   4044 
   4045 /************************************************************************
   4046  * ixgbe_mc_array_itr
   4047  *
   4048  *   An iterator function needed by the multicast shared code.
   4049  *   It feeds the shared code routine the addresses in the
   4050  *   array of ixgbe_set_multi() one by one.
   4051  ************************************************************************/
   4052 static u8 *
   4053 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   4054 {
   4055 	struct ixgbe_mc_addr *mta;
   4056 
   4057 	mta = (struct ixgbe_mc_addr *)*update_ptr;
   4058 	*vmdq = mta->vmdq;
   4059 
   4060 	*update_ptr = (u8*)(mta + 1);
   4061 
   4062 	return (mta->addr);
   4063 } /* ixgbe_mc_array_itr */
   4064 
   4065 /************************************************************************
   4066  * ixgbe_local_timer - Timer routine
   4067  *
   4068  *   Checks for link status, updates statistics,
   4069  *   and runs the watchdog check.
   4070  ************************************************************************/
   4071 static void
   4072 ixgbe_local_timer(void *arg)
   4073 {
   4074 	struct adapter *adapter = arg;
   4075 
   4076 	IXGBE_CORE_LOCK(adapter);
   4077 	ixgbe_local_timer1(adapter);
   4078 	IXGBE_CORE_UNLOCK(adapter);
   4079 }
   4080 
   4081 static void
   4082 ixgbe_local_timer1(void *arg)
   4083 {
   4084 	struct adapter	*adapter = arg;
   4085 	device_t	dev = adapter->dev;
   4086 	struct ix_queue *que = adapter->queues;
   4087 	u64		queues = 0;
   4088 	int		hung = 0;
   4089 
   4090 	KASSERT(mutex_owned(&adapter->core_mtx));
   4091 
   4092 	/* Check for pluggable optics */
   4093 	if (adapter->sfp_probe)
   4094 		if (!ixgbe_sfp_probe(adapter))
   4095 			goto out; /* Nothing to do */
   4096 
   4097 	ixgbe_update_link_status(adapter);
   4098 	ixgbe_update_stats_counters(adapter);
   4099 
   4100 	/*
   4101 	 * Check the TX queues status
   4102 	 *      - mark hung queues so we don't schedule on them
   4103 	 *      - watchdog only if all queues show hung
   4104 	 */
   4105 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4106 		/* Keep track of queues with work for soft irq */
   4107 		if (que->txr->busy)
   4108 			queues |= ((u64)1 << que->me);
   4109 		/*
   4110 		 * Each time txeof runs without cleaning, but there
   4111 		 * are uncleaned descriptors it increments busy. If
   4112 		 * we get to the MAX we declare it hung.
   4113 		 */
   4114 		if (que->busy == IXGBE_QUEUE_HUNG) {
   4115 			++hung;
   4116 			/* Mark the queue as inactive */
   4117 			adapter->active_queues &= ~((u64)1 << que->me);
   4118 			continue;
   4119 		} else {
   4120 			/* Check if we've come back from hung */
   4121 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   4122 				adapter->active_queues |= ((u64)1 << que->me);
   4123 		}
   4124 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   4125 			device_printf(dev,
   4126 			    "Warning queue %d appears to be hung!\n", i);
   4127 			que->txr->busy = IXGBE_QUEUE_HUNG;
   4128 			++hung;
   4129 		}
   4130 	}
   4131 
   4132 	/* Only truely watchdog if all queues show hung */
   4133 	if (hung == adapter->num_queues)
   4134 		goto watchdog;
   4135 	else if (queues != 0) { /* Force an IRQ on queues with work */
   4136 		ixgbe_rearm_queues(adapter, queues);
   4137 	}
   4138 
   4139 out:
   4140 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4141 	return;
   4142 
   4143 watchdog:
   4144 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   4145 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   4146 	adapter->watchdog_events.ev_count++;
   4147 	ixgbe_init_locked(adapter);
   4148 } /* ixgbe_local_timer */
   4149 
   4150 /************************************************************************
   4151  * ixgbe_sfp_probe
   4152  *
   4153  *   Determine if a port had optics inserted.
   4154  ************************************************************************/
   4155 static bool
   4156 ixgbe_sfp_probe(struct adapter *adapter)
   4157 {
   4158 	struct ixgbe_hw	*hw = &adapter->hw;
   4159 	device_t	dev = adapter->dev;
   4160 	bool		result = FALSE;
   4161 
   4162 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4163 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4164 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4165 		if (ret)
   4166 			goto out;
   4167 		ret = hw->phy.ops.reset(hw);
   4168 		adapter->sfp_probe = FALSE;
   4169 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4170 			device_printf(dev,"Unsupported SFP+ module detected!");
   4171 			device_printf(dev,
   4172 			    "Reload driver with supported module.\n");
   4173                         goto out;
   4174 		} else
   4175 			device_printf(dev, "SFP+ module detected!\n");
   4176 		/* We now have supported optics */
   4177 		result = TRUE;
   4178 	}
   4179 out:
   4180 
   4181 	return (result);
   4182 } /* ixgbe_sfp_probe */
   4183 
   4184 /************************************************************************
   4185  * ixgbe_handle_mod - Tasklet for SFP module interrupts
   4186  ************************************************************************/
   4187 static void
   4188 ixgbe_handle_mod(void *context)
   4189 {
   4190 	struct adapter  *adapter = context;
   4191 	struct ixgbe_hw *hw = &adapter->hw;
   4192 	device_t	dev = adapter->dev;
   4193 	u32             err, cage_full = 0;
   4194 
   4195 	if (adapter->hw.need_crosstalk_fix) {
   4196 		switch (hw->mac.type) {
   4197 		case ixgbe_mac_82599EB:
   4198 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4199 			    IXGBE_ESDP_SDP2;
   4200 			break;
   4201 		case ixgbe_mac_X550EM_x:
   4202 		case ixgbe_mac_X550EM_a:
   4203 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4204 			    IXGBE_ESDP_SDP0;
   4205 			break;
   4206 		default:
   4207 			break;
   4208 		}
   4209 
   4210 		if (!cage_full)
   4211 			return;
   4212 	}
   4213 
   4214 	err = hw->phy.ops.identify_sfp(hw);
   4215 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4216 		device_printf(dev,
   4217 		    "Unsupported SFP+ module type was detected.\n");
   4218 		return;
   4219 	}
   4220 
   4221 	err = hw->mac.ops.setup_sfp(hw);
   4222 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4223 		device_printf(dev,
   4224 		    "Setup failure - unsupported SFP+ module type.\n");
   4225 		return;
   4226 	}
   4227 	softint_schedule(adapter->msf_si);
   4228 } /* ixgbe_handle_mod */
   4229 
   4230 
   4231 /************************************************************************
   4232  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
   4233  ************************************************************************/
   4234 static void
   4235 ixgbe_handle_msf(void *context)
   4236 {
   4237 	struct adapter  *adapter = context;
   4238 	struct ixgbe_hw *hw = &adapter->hw;
   4239 	u32             autoneg;
   4240 	bool            negotiate;
   4241 
   4242 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
   4243 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
   4244 
   4245 	autoneg = hw->phy.autoneg_advertised;
   4246 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   4247 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   4248 	else
   4249 		negotiate = 0;
   4250 	if (hw->mac.ops.setup_link)
   4251 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
   4252 
   4253 	/* Adjust media types shown in ifconfig */
   4254 	ifmedia_removeall(&adapter->media);
   4255 	ixgbe_add_media_types(adapter);
   4256 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   4257 } /* ixgbe_handle_msf */
   4258 
   4259 /************************************************************************
   4260  * ixgbe_handle_phy - Tasklet for external PHY interrupts
   4261  ************************************************************************/
   4262 static void
   4263 ixgbe_handle_phy(void *context)
   4264 {
   4265 	struct adapter  *adapter = context;
   4266 	struct ixgbe_hw *hw = &adapter->hw;
   4267 	int error;
   4268 
   4269 	error = hw->phy.ops.handle_lasi(hw);
   4270 	if (error == IXGBE_ERR_OVERTEMP)
   4271 		device_printf(adapter->dev,
   4272 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
   4273 		    " PHY will downshift to lower power state!\n");
   4274 	else if (error)
   4275 		device_printf(adapter->dev,
   4276 		    "Error handling LASI interrupt: %d\n", error);
   4277 } /* ixgbe_handle_phy */
   4278 
   4279 static void
   4280 ixgbe_ifstop(struct ifnet *ifp, int disable)
   4281 {
   4282 	struct adapter *adapter = ifp->if_softc;
   4283 
   4284 	IXGBE_CORE_LOCK(adapter);
   4285 	ixgbe_stop(adapter);
   4286 	IXGBE_CORE_UNLOCK(adapter);
   4287 }
   4288 
   4289 /************************************************************************
   4290  * ixgbe_stop - Stop the hardware
   4291  *
   4292  *   Disables all traffic on the adapter by issuing a
   4293  *   global reset on the MAC and deallocates TX/RX buffers.
   4294  ************************************************************************/
   4295 static void
   4296 ixgbe_stop(void *arg)
   4297 {
   4298 	struct ifnet    *ifp;
   4299 	struct adapter  *adapter = arg;
   4300 	struct ixgbe_hw *hw = &adapter->hw;
   4301 
   4302 	ifp = adapter->ifp;
   4303 
   4304 	KASSERT(mutex_owned(&adapter->core_mtx));
   4305 
   4306 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   4307 	ixgbe_disable_intr(adapter);
   4308 	callout_stop(&adapter->timer);
   4309 
   4310 	/* Let the stack know...*/
   4311 	ifp->if_flags &= ~IFF_RUNNING;
   4312 
   4313 	ixgbe_reset_hw(hw);
   4314 	hw->adapter_stopped = FALSE;
   4315 	ixgbe_stop_adapter(hw);
   4316 	if (hw->mac.type == ixgbe_mac_82599EB)
   4317 		ixgbe_stop_mac_link_on_d3_82599(hw);
   4318 	/* Turn off the laser - noop with no optics */
   4319 	ixgbe_disable_tx_laser(hw);
   4320 
   4321 	/* Update the stack */
   4322 	adapter->link_up = FALSE;
   4323 	ixgbe_update_link_status(adapter);
   4324 
   4325 	/* reprogram the RAR[0] in case user changed it. */
   4326 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   4327 
   4328 	return;
   4329 } /* ixgbe_stop */
   4330 
   4331 /************************************************************************
   4332  * ixgbe_update_link_status - Update OS on link state
   4333  *
   4334  * Note: Only updates the OS on the cached link state.
   4335  *       The real check of the hardware only happens with
   4336  *       a link interrupt.
   4337  ************************************************************************/
   4338 static void
   4339 ixgbe_update_link_status(struct adapter *adapter)
   4340 {
   4341 	struct ifnet	*ifp = adapter->ifp;
   4342 	device_t        dev = adapter->dev;
   4343 	struct ixgbe_hw *hw = &adapter->hw;
   4344 
   4345 	if (adapter->link_up) {
   4346 		if (adapter->link_active == FALSE) {
   4347 			if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
   4348 				/*
   4349 				 *  Discard count for both MAC Local Fault and
   4350 				 * Remote Fault because those registers are
   4351 				 * valid only when the link speed is up and
   4352 				 * 10Gbps.
   4353 				 */
   4354 				IXGBE_READ_REG(hw, IXGBE_MLFC);
   4355 				IXGBE_READ_REG(hw, IXGBE_MRFC);
   4356 			}
   4357 
   4358 			if (bootverbose) {
   4359 				const char *bpsmsg;
   4360 
   4361 				switch (adapter->link_speed) {
   4362 				case IXGBE_LINK_SPEED_10GB_FULL:
   4363 					bpsmsg = "10 Gbps";
   4364 					break;
   4365 				case IXGBE_LINK_SPEED_5GB_FULL:
   4366 					bpsmsg = "5 Gbps";
   4367 					break;
   4368 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   4369 					bpsmsg = "2.5 Gbps";
   4370 					break;
   4371 				case IXGBE_LINK_SPEED_1GB_FULL:
   4372 					bpsmsg = "1 Gbps";
   4373 					break;
   4374 				case IXGBE_LINK_SPEED_100_FULL:
   4375 					bpsmsg = "100 Mbps";
   4376 					break;
   4377 				case IXGBE_LINK_SPEED_10_FULL:
   4378 					bpsmsg = "10 Mbps";
   4379 					break;
   4380 				default:
   4381 					bpsmsg = "unknown speed";
   4382 					break;
   4383 				}
   4384 				device_printf(dev, "Link is up %s %s \n",
   4385 				    bpsmsg, "Full Duplex");
   4386 			}
   4387 			adapter->link_active = TRUE;
   4388 			/* Update any Flow Control changes */
   4389 			ixgbe_fc_enable(&adapter->hw);
   4390 			/* Update DMA coalescing config */
   4391 			ixgbe_config_dmac(adapter);
   4392 			if_link_state_change(ifp, LINK_STATE_UP);
   4393 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4394 				ixgbe_ping_all_vfs(adapter);
   4395 		}
   4396 	} else { /* Link down */
   4397 		if (adapter->link_active == TRUE) {
   4398 			if (bootverbose)
   4399 				device_printf(dev, "Link is Down\n");
   4400 			if_link_state_change(ifp, LINK_STATE_DOWN);
   4401 			adapter->link_active = FALSE;
   4402 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4403 				ixgbe_ping_all_vfs(adapter);
   4404 		}
   4405 	}
   4406 
   4407 	return;
   4408 } /* ixgbe_update_link_status */
   4409 
   4410 /************************************************************************
   4411  * ixgbe_config_dmac - Configure DMA Coalescing
   4412  ************************************************************************/
   4413 static void
   4414 ixgbe_config_dmac(struct adapter *adapter)
   4415 {
   4416 	struct ixgbe_hw *hw = &adapter->hw;
   4417 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
   4418 
   4419 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
   4420 		return;
   4421 
   4422 	if (dcfg->watchdog_timer ^ adapter->dmac ||
   4423 	    dcfg->link_speed ^ adapter->link_speed) {
   4424 		dcfg->watchdog_timer = adapter->dmac;
   4425 		dcfg->fcoe_en = false;
   4426 		dcfg->link_speed = adapter->link_speed;
   4427 		dcfg->num_tcs = 1;
   4428 
   4429 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
   4430 		    dcfg->watchdog_timer, dcfg->link_speed);
   4431 
   4432 		hw->mac.ops.dmac_config(hw);
   4433 	}
   4434 } /* ixgbe_config_dmac */
   4435 
   4436 /************************************************************************
   4437  * ixgbe_enable_intr
   4438  ************************************************************************/
   4439 static void
   4440 ixgbe_enable_intr(struct adapter *adapter)
   4441 {
   4442 	struct ixgbe_hw	*hw = &adapter->hw;
   4443 	struct ix_queue	*que = adapter->queues;
   4444 	u32		mask, fwsm;
   4445 
   4446 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4447 
   4448 	switch (adapter->hw.mac.type) {
   4449 	case ixgbe_mac_82599EB:
   4450 		mask |= IXGBE_EIMS_ECC;
   4451 		/* Temperature sensor on some adapters */
   4452 		mask |= IXGBE_EIMS_GPI_SDP0;
   4453 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
   4454 		mask |= IXGBE_EIMS_GPI_SDP1;
   4455 		mask |= IXGBE_EIMS_GPI_SDP2;
   4456 		break;
   4457 	case ixgbe_mac_X540:
   4458 		/* Detect if Thermal Sensor is enabled */
   4459 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
   4460 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
   4461 			mask |= IXGBE_EIMS_TS;
   4462 		mask |= IXGBE_EIMS_ECC;
   4463 		break;
   4464 	case ixgbe_mac_X550:
   4465 		/* MAC thermal sensor is automatically enabled */
   4466 		mask |= IXGBE_EIMS_TS;
   4467 		mask |= IXGBE_EIMS_ECC;
   4468 		break;
   4469 	case ixgbe_mac_X550EM_x:
   4470 	case ixgbe_mac_X550EM_a:
   4471 		/* Some devices use SDP0 for important information */
   4472 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
   4473 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
   4474 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
   4475 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
   4476 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
   4477 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
   4478 			mask |= IXGBE_EICR_GPI_SDP0_X540;
   4479 		mask |= IXGBE_EIMS_ECC;
   4480 		break;
   4481 	default:
   4482 		break;
   4483 	}
   4484 
   4485 	/* Enable Fan Failure detection */
   4486 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4487 		mask |= IXGBE_EIMS_GPI_SDP1;
   4488 	/* Enable SR-IOV */
   4489 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4490 		mask |= IXGBE_EIMS_MAILBOX;
   4491 	/* Enable Flow Director */
   4492 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   4493 		mask |= IXGBE_EIMS_FLOW_DIR;
   4494 
   4495 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4496 
   4497 	/* With MSI-X we use auto clear */
   4498 	if (adapter->msix_mem) {
   4499 		mask = IXGBE_EIMS_ENABLE_MASK;
   4500 		/* Don't autoclear Link */
   4501 		mask &= ~IXGBE_EIMS_OTHER;
   4502 		mask &= ~IXGBE_EIMS_LSC;
   4503 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   4504 			mask &= ~IXGBE_EIMS_MAILBOX;
   4505 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4506 	}
   4507 
   4508 	/*
   4509 	 * Now enable all queues, this is done separately to
   4510 	 * allow for handling the extended (beyond 32) MSI-X
   4511 	 * vectors that can be used by 82599
   4512 	 */
   4513         for (int i = 0; i < adapter->num_queues; i++, que++)
   4514                 ixgbe_enable_queue(adapter, que->msix);
   4515 
   4516 	IXGBE_WRITE_FLUSH(hw);
   4517 
   4518 	return;
   4519 } /* ixgbe_enable_intr */
   4520 
   4521 /************************************************************************
   4522  * ixgbe_disable_intr
   4523  ************************************************************************/
   4524 static void
   4525 ixgbe_disable_intr(struct adapter *adapter)
   4526 {
   4527 	if (adapter->msix_mem)
   4528 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4529 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
   4530 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
   4531 	} else {
   4532 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
   4533 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
   4534 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
   4535 	}
   4536 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4537 
   4538 	return;
   4539 } /* ixgbe_disable_intr */
   4540 
   4541 /************************************************************************
   4542  * ixgbe_legacy_irq - Legacy Interrupt Service routine
   4543  ************************************************************************/
   4544 static int
   4545 ixgbe_legacy_irq(void *arg)
   4546 {
   4547 	struct ix_queue *que = arg;
   4548 	struct adapter	*adapter = que->adapter;
   4549 	struct ixgbe_hw	*hw = &adapter->hw;
   4550 	struct ifnet    *ifp = adapter->ifp;
   4551 	struct 		tx_ring *txr = adapter->tx_rings;
   4552 	bool		more = false;
   4553 	u32             eicr, eicr_mask;
   4554 
   4555 	/* Silicon errata #26 on 82598 */
   4556 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
   4557 
   4558 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   4559 
   4560 	adapter->stats.pf.legint.ev_count++;
   4561 	++que->irqs.ev_count;
   4562 	if (eicr == 0) {
   4563 		adapter->stats.pf.intzero.ev_count++;
   4564 		if ((ifp->if_flags & IFF_UP) != 0)
   4565 			ixgbe_enable_intr(adapter);
   4566 		return 0;
   4567 	}
   4568 
   4569 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   4570 #ifdef __NetBSD__
   4571 		/* Don't run ixgbe_rxeof in interrupt context */
   4572 		more = true;
   4573 #else
   4574 		more = ixgbe_rxeof(que);
   4575 #endif
   4576 
   4577 		IXGBE_TX_LOCK(txr);
   4578 		ixgbe_txeof(txr);
   4579 #ifdef notyet
   4580 		if (!ixgbe_ring_empty(ifp, txr->br))
   4581 			ixgbe_start_locked(ifp, txr);
   4582 #endif
   4583 		IXGBE_TX_UNLOCK(txr);
   4584 	}
   4585 
   4586 	/* Check for fan failure */
   4587 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   4588 		ixgbe_check_fan_failure(adapter, eicr, true);
   4589 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4590 	}
   4591 
   4592 	/* Link status change */
   4593 	if (eicr & IXGBE_EICR_LSC)
   4594 		softint_schedule(adapter->link_si);
   4595 
   4596 	if (ixgbe_is_sfp(hw)) {
   4597 		/* Pluggable optics-related interrupt */
   4598 		if (hw->mac.type >= ixgbe_mac_X540)
   4599 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   4600 		else
   4601 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   4602 
   4603 		if (eicr & eicr_mask) {
   4604 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   4605 			softint_schedule(adapter->mod_si);
   4606 		}
   4607 
   4608 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   4609 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   4610 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   4611 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4612 			softint_schedule(adapter->msf_si);
   4613 		}
   4614 	}
   4615 
   4616 	/* External PHY interrupt */
   4617 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   4618 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
   4619 		softint_schedule(adapter->phy_si);
   4620 
   4621 	if (more)
   4622 		softint_schedule(que->que_si);
   4623 	else
   4624 		ixgbe_enable_intr(adapter);
   4625 
   4626 	return 1;
   4627 } /* ixgbe_legacy_irq */
   4628 
   4629 /************************************************************************
   4630  * ixgbe_free_pciintr_resources
   4631  ************************************************************************/
   4632 static void
   4633 ixgbe_free_pciintr_resources(struct adapter *adapter)
   4634 {
   4635 	struct ix_queue *que = adapter->queues;
   4636 	int		rid;
   4637 
   4638 	/*
   4639 	 * Release all msix queue resources:
   4640 	 */
   4641 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4642 		if (que->res != NULL) {
   4643 			pci_intr_disestablish(adapter->osdep.pc,
   4644 			    adapter->osdep.ihs[i]);
   4645 			adapter->osdep.ihs[i] = NULL;
   4646 		}
   4647 	}
   4648 
   4649 	/* Clean the Legacy or Link interrupt last */
   4650 	if (adapter->vector) /* we are doing MSIX */
   4651 		rid = adapter->vector;
   4652 	else
   4653 		rid = 0;
   4654 
   4655 	if (adapter->osdep.ihs[rid] != NULL) {
   4656 		pci_intr_disestablish(adapter->osdep.pc,
   4657 		    adapter->osdep.ihs[rid]);
   4658 		adapter->osdep.ihs[rid] = NULL;
   4659 	}
   4660 
   4661 	if (adapter->osdep.intrs != NULL) {
   4662 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   4663 		    adapter->osdep.nintrs);
   4664 		adapter->osdep.intrs = NULL;
   4665 	}
   4666 
   4667 	return;
   4668 } /* ixgbe_free_pciintr_resources */
   4669 
   4670 /************************************************************************
   4671  * ixgbe_free_pci_resources
   4672  ************************************************************************/
   4673 static void
   4674 ixgbe_free_pci_resources(struct adapter *adapter)
   4675 {
   4676 
   4677 	ixgbe_free_pciintr_resources(adapter);
   4678 
   4679 	if (adapter->osdep.mem_size != 0) {
   4680 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   4681 		    adapter->osdep.mem_bus_space_handle,
   4682 		    adapter->osdep.mem_size);
   4683 	}
   4684 
   4685 	return;
   4686 } /* ixgbe_free_pci_resources */
   4687 
   4688 /************************************************************************
   4689  * ixgbe_set_sysctl_value
   4690  ************************************************************************/
   4691 static void
   4692 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
   4693     const char *description, int *limit, int value)
   4694 {
   4695 	device_t dev =  adapter->dev;
   4696 	struct sysctllog **log;
   4697 	const struct sysctlnode *rnode, *cnode;
   4698 
   4699 	log = &adapter->sysctllog;
   4700 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   4701 		aprint_error_dev(dev, "could not create sysctl root\n");
   4702 		return;
   4703 	}
   4704 	if (sysctl_createv(log, 0, &rnode, &cnode,
   4705 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   4706 	    name, SYSCTL_DESCR(description),
   4707 		NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   4708 		aprint_error_dev(dev, "could not create sysctl\n");
   4709 	*limit = value;
   4710 } /* ixgbe_set_sysctl_value */
   4711 
   4712 /************************************************************************
   4713  * ixgbe_sysctl_flowcntl
   4714  *
   4715  *   SYSCTL wrapper around setting Flow Control
   4716  ************************************************************************/
   4717 static int
   4718 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
   4719 {
   4720 	struct sysctlnode node = *rnode;
   4721 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4722 	int error, fc;
   4723 
   4724 	fc = adapter->hw.fc.current_mode;
   4725 	node.sysctl_data = &fc;
   4726 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4727 	if (error != 0 || newp == NULL)
   4728 		return error;
   4729 
   4730 	/* Don't bother if it's not changed */
   4731 	if (fc == adapter->hw.fc.current_mode)
   4732 		return (0);
   4733 
   4734 	return ixgbe_set_flowcntl(adapter, fc);
   4735 } /* ixgbe_sysctl_flowcntl */
   4736 
   4737 /************************************************************************
   4738  * ixgbe_set_flowcntl - Set flow control
   4739  *
   4740  *   Flow control values:
   4741  *     0 - off
   4742  *     1 - rx pause
   4743  *     2 - tx pause
   4744  *     3 - full
   4745  ************************************************************************/
   4746 static int
   4747 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
   4748 {
   4749 	switch (fc) {
   4750 		case ixgbe_fc_rx_pause:
   4751 		case ixgbe_fc_tx_pause:
   4752 		case ixgbe_fc_full:
   4753 			adapter->hw.fc.requested_mode = fc;
   4754 			if (adapter->num_queues > 1)
   4755 				ixgbe_disable_rx_drop(adapter);
   4756 			break;
   4757 		case ixgbe_fc_none:
   4758 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   4759 			if (adapter->num_queues > 1)
   4760 				ixgbe_enable_rx_drop(adapter);
   4761 			break;
   4762 		default:
   4763 			return (EINVAL);
   4764 	}
   4765 
   4766 #if 0 /* XXX NetBSD */
   4767 	/* Don't autoneg if forcing a value */
   4768 	adapter->hw.fc.disable_fc_autoneg = TRUE;
   4769 #endif
   4770 	ixgbe_fc_enable(&adapter->hw);
   4771 
   4772 	return (0);
   4773 } /* ixgbe_set_flowcntl */
   4774 
   4775 /************************************************************************
   4776  * ixgbe_enable_rx_drop
   4777  *
   4778  *   Enable the hardware to drop packets when the buffer is
   4779  *   full. This is useful with multiqueue, so that no single
   4780  *   queue being full stalls the entire RX engine. We only
   4781  *   enable this when Multiqueue is enabled AND Flow Control
   4782  *   is disabled.
   4783  ************************************************************************/
   4784 static void
   4785 ixgbe_enable_rx_drop(struct adapter *adapter)
   4786 {
   4787 	struct ixgbe_hw *hw = &adapter->hw;
   4788 	struct rx_ring  *rxr;
   4789 	u32             srrctl;
   4790 
   4791 	for (int i = 0; i < adapter->num_queues; i++) {
   4792 		rxr = &adapter->rx_rings[i];
   4793 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4794 		srrctl |= IXGBE_SRRCTL_DROP_EN;
   4795 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4796 	}
   4797 
   4798 	/* enable drop for each vf */
   4799 	for (int i = 0; i < adapter->num_vfs; i++) {
   4800 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4801 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
   4802 		    IXGBE_QDE_ENABLE));
   4803 	}
   4804 } /* ixgbe_enable_rx_drop */
   4805 
   4806 /************************************************************************
   4807  * ixgbe_disable_rx_drop
   4808  ************************************************************************/
   4809 static void
   4810 ixgbe_disable_rx_drop(struct adapter *adapter)
   4811 {
   4812 	struct ixgbe_hw *hw = &adapter->hw;
   4813 	struct rx_ring  *rxr;
   4814 	u32             srrctl;
   4815 
   4816 	for (int i = 0; i < adapter->num_queues; i++) {
   4817 		rxr = &adapter->rx_rings[i];
   4818         	srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4819         	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
   4820         	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4821 	}
   4822 
   4823 	/* disable drop for each vf */
   4824 	for (int i = 0; i < adapter->num_vfs; i++) {
   4825 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4826 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
   4827 	}
   4828 } /* ixgbe_disable_rx_drop */
   4829 
   4830 /************************************************************************
   4831  * ixgbe_sysctl_advertise
   4832  *
   4833  *   SYSCTL wrapper around setting advertised speed
   4834  ************************************************************************/
   4835 static int
   4836 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
   4837 {
   4838 	struct sysctlnode node = *rnode;
   4839 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4840 	int            error = 0, advertise;
   4841 
   4842 	advertise = adapter->advertise;
   4843 	node.sysctl_data = &advertise;
   4844 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4845 	if (error != 0 || newp == NULL)
   4846 		return error;
   4847 
   4848 	return ixgbe_set_advertise(adapter, advertise);
   4849 } /* ixgbe_sysctl_advertise */
   4850 
   4851 /************************************************************************
   4852  * ixgbe_set_advertise - Control advertised link speed
   4853  *
   4854  *   Flags:
   4855  *     0x00 - Default (all capable link speed)
   4856  *     0x01 - advertise 100 Mb
   4857  *     0x02 - advertise 1G
   4858  *     0x04 - advertise 10G
   4859  *     0x08 - advertise 10 Mb
   4860  *     0x10 - advertise 2.5G
   4861  *     0x20 - advertise 5G
   4862  ************************************************************************/
   4863 static int
   4864 ixgbe_set_advertise(struct adapter *adapter, int advertise)
   4865 {
   4866 	device_t         dev;
   4867 	struct ixgbe_hw  *hw;
   4868 	ixgbe_link_speed speed = 0;
   4869 	ixgbe_link_speed link_caps = 0;
   4870 	s32              err = IXGBE_NOT_IMPLEMENTED;
   4871 	bool             negotiate = FALSE;
   4872 
   4873 	/* Checks to validate new value */
   4874 	if (adapter->advertise == advertise) /* no change */
   4875 		return (0);
   4876 
   4877 	dev = adapter->dev;
   4878 	hw = &adapter->hw;
   4879 
   4880 	/* No speed changes for backplane media */
   4881 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   4882 		return (ENODEV);
   4883 
   4884 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   4885 	    (hw->phy.multispeed_fiber))) {
   4886 		device_printf(dev,
   4887 		    "Advertised speed can only be set on copper or "
   4888 		    "multispeed fiber media types.\n");
   4889 		return (EINVAL);
   4890 	}
   4891 
   4892 	if (advertise < 0x0 || advertise > 0x2f) {
   4893 		device_printf(dev,
   4894 		    "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
   4895 		return (EINVAL);
   4896 	}
   4897 
   4898 	if (hw->mac.ops.get_link_capabilities) {
   4899 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   4900 		    &negotiate);
   4901 		if (err != IXGBE_SUCCESS) {
   4902 			device_printf(dev, "Unable to determine supported advertise speeds\n");
   4903 			return (ENODEV);
   4904 		}
   4905 	}
   4906 
   4907 	/* Set new value and report new advertised mode */
   4908 	if (advertise & 0x1) {
   4909 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
   4910 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
   4911 			return (EINVAL);
   4912 		}
   4913 		speed |= IXGBE_LINK_SPEED_100_FULL;
   4914 	}
   4915 	if (advertise & 0x2) {
   4916 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
   4917 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
   4918 			return (EINVAL);
   4919 		}
   4920 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   4921 	}
   4922 	if (advertise & 0x4) {
   4923 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
   4924 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
   4925 			return (EINVAL);
   4926 		}
   4927 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   4928 	}
   4929 	if (advertise & 0x8) {
   4930 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
   4931 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
   4932 			return (EINVAL);
   4933 		}
   4934 		speed |= IXGBE_LINK_SPEED_10_FULL;
   4935 	}
   4936 	if (advertise & 0x10) {
   4937 		if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
   4938 			device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
   4939 			return (EINVAL);
   4940 		}
   4941 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   4942 	}
   4943 	if (advertise & 0x20) {
   4944 		if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
   4945 			device_printf(dev, "Interface does not support 5Gb advertised speed\n");
   4946 			return (EINVAL);
   4947 		}
   4948 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   4949 	}
   4950 	if (advertise == 0)
   4951 		speed = link_caps; /* All capable link speed */
   4952 
   4953 	hw->mac.autotry_restart = TRUE;
   4954 	hw->mac.ops.setup_link(hw, speed, TRUE);
   4955 	adapter->advertise = advertise;
   4956 
   4957 	return (0);
   4958 } /* ixgbe_set_advertise */
   4959 
   4960 /************************************************************************
   4961  * ixgbe_get_advertise - Get current advertised speed settings
   4962  *
   4963  *   Formatted for sysctl usage.
   4964  *   Flags:
   4965  *     0x01 - advertise 100 Mb
   4966  *     0x02 - advertise 1G
   4967  *     0x04 - advertise 10G
   4968  *     0x08 - advertise 10 Mb (yes, Mb)
   4969  *     0x10 - advertise 2.5G
   4970  *     0x20 - advertise 5G
   4971  ************************************************************************/
   4972 static int
   4973 ixgbe_get_advertise(struct adapter *adapter)
   4974 {
   4975 	struct ixgbe_hw  *hw = &adapter->hw;
   4976 	int              speed;
   4977 	ixgbe_link_speed link_caps = 0;
   4978 	s32              err;
   4979 	bool             negotiate = FALSE;
   4980 
   4981 	/*
   4982 	 * Advertised speed means nothing unless it's copper or
   4983 	 * multi-speed fiber
   4984 	 */
   4985 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
   4986 	    !(hw->phy.multispeed_fiber))
   4987 		return (0);
   4988 
   4989 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
   4990 	if (err != IXGBE_SUCCESS)
   4991 		return (0);
   4992 
   4993 	speed =
   4994 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x04 : 0) |
   4995 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x02 : 0) |
   4996 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)   ? 0x01 : 0) |
   4997 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)    ? 0x08 : 0) |
   4998 	    ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
   4999 	    ((link_caps & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0);
   5000 
   5001 	return speed;
   5002 } /* ixgbe_get_advertise */
   5003 
   5004 /************************************************************************
   5005  * ixgbe_sysctl_dmac - Manage DMA Coalescing
   5006  *
   5007  *   Control values:
   5008  *     0/1 - off / on (use default value of 1000)
   5009  *
   5010  *     Legal timer values are:
   5011  *     50,100,250,500,1000,2000,5000,10000
   5012  *
   5013  *     Turning off interrupt moderation will also turn this off.
   5014  ************************************************************************/
   5015 static int
   5016 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
   5017 {
   5018 	struct sysctlnode node = *rnode;
   5019 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5020 	struct ifnet   *ifp = adapter->ifp;
   5021 	int            error;
   5022 	int            newval;
   5023 
   5024 	newval = adapter->dmac;
   5025 	node.sysctl_data = &newval;
   5026 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5027 	if ((error) || (newp == NULL))
   5028 		return (error);
   5029 
   5030 	switch (newval) {
   5031 	case 0:
   5032 		/* Disabled */
   5033 		adapter->dmac = 0;
   5034 		break;
   5035 	case 1:
   5036 		/* Enable and use default */
   5037 		adapter->dmac = 1000;
   5038 		break;
   5039 	case 50:
   5040 	case 100:
   5041 	case 250:
   5042 	case 500:
   5043 	case 1000:
   5044 	case 2000:
   5045 	case 5000:
   5046 	case 10000:
   5047 		/* Legal values - allow */
   5048 		adapter->dmac = newval;
   5049 		break;
   5050 	default:
   5051 		/* Do nothing, illegal value */
   5052 		return (EINVAL);
   5053 	}
   5054 
   5055 	/* Re-initialize hardware if it's already running */
   5056 	if (ifp->if_flags & IFF_RUNNING)
   5057 		ixgbe_init(ifp);
   5058 
   5059 	return (0);
   5060 }
   5061 
   5062 #ifdef IXGBE_DEBUG
   5063 /************************************************************************
   5064  * ixgbe_sysctl_power_state
   5065  *
   5066  *   Sysctl to test power states
   5067  *   Values:
   5068  *     0      - set device to D0
   5069  *     3      - set device to D3
   5070  *     (none) - get current device power state
   5071  ************************************************************************/
   5072 static int
   5073 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
   5074 {
   5075 #ifdef notyet
   5076 	struct sysctlnode node = *rnode;
   5077 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5078 	device_t       dev =  adapter->dev;
   5079 	int            curr_ps, new_ps, error = 0;
   5080 
   5081 	curr_ps = new_ps = pci_get_powerstate(dev);
   5082 
   5083 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5084 	if ((error) || (req->newp == NULL))
   5085 		return (error);
   5086 
   5087 	if (new_ps == curr_ps)
   5088 		return (0);
   5089 
   5090 	if (new_ps == 3 && curr_ps == 0)
   5091 		error = DEVICE_SUSPEND(dev);
   5092 	else if (new_ps == 0 && curr_ps == 3)
   5093 		error = DEVICE_RESUME(dev);
   5094 	else
   5095 		return (EINVAL);
   5096 
   5097 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
   5098 
   5099 	return (error);
   5100 #else
   5101 	return 0;
   5102 #endif
   5103 } /* ixgbe_sysctl_power_state */
   5104 #endif
   5105 
   5106 /************************************************************************
   5107  * ixgbe_sysctl_wol_enable
   5108  *
   5109  *   Sysctl to enable/disable the WoL capability,
   5110  *   if supported by the adapter.
   5111  *
   5112  *   Values:
   5113  *     0 - disabled
   5114  *     1 - enabled
   5115  ************************************************************************/
   5116 static int
   5117 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
   5118 {
   5119 	struct sysctlnode node = *rnode;
   5120 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5121 	struct ixgbe_hw *hw = &adapter->hw;
   5122 	bool            new_wol_enabled;
   5123 	int             error = 0;
   5124 
   5125 	new_wol_enabled = hw->wol_enabled;
   5126 	node.sysctl_data = &new_wol_enabled;
   5127 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5128 	if ((error) || (newp == NULL))
   5129 		return (error);
   5130 	if (new_wol_enabled == hw->wol_enabled)
   5131 		return (0);
   5132 
   5133 	if (new_wol_enabled && !adapter->wol_support)
   5134 		return (ENODEV);
   5135 	else
   5136 		hw->wol_enabled = new_wol_enabled;
   5137 
   5138 	return (0);
   5139 } /* ixgbe_sysctl_wol_enable */
   5140 
   5141 /************************************************************************
   5142  * ixgbe_sysctl_wufc - Wake Up Filter Control
   5143  *
   5144  *   Sysctl to enable/disable the types of packets that the
   5145  *   adapter will wake up on upon receipt.
   5146  *   Flags:
   5147  *     0x1  - Link Status Change
   5148  *     0x2  - Magic Packet
   5149  *     0x4  - Direct Exact
   5150  *     0x8  - Directed Multicast
   5151  *     0x10 - Broadcast
   5152  *     0x20 - ARP/IPv4 Request Packet
   5153  *     0x40 - Direct IPv4 Packet
   5154  *     0x80 - Direct IPv6 Packet
   5155  *
   5156  *   Settings not listed above will cause the sysctl to return an error.
   5157  ************************************************************************/
   5158 static int
   5159 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
   5160 {
   5161 	struct sysctlnode node = *rnode;
   5162 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5163 	int error = 0;
   5164 	u32 new_wufc;
   5165 
   5166 	new_wufc = adapter->wufc;
   5167 	node.sysctl_data = &new_wufc;
   5168 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5169 	if ((error) || (newp == NULL))
   5170 		return (error);
   5171 	if (new_wufc == adapter->wufc)
   5172 		return (0);
   5173 
   5174 	if (new_wufc & 0xffffff00)
   5175 		return (EINVAL);
   5176 
   5177 	new_wufc &= 0xff;
   5178 	new_wufc |= (0xffffff & adapter->wufc);
   5179 	adapter->wufc = new_wufc;
   5180 
   5181 	return (0);
   5182 } /* ixgbe_sysctl_wufc */
   5183 
   5184 #ifdef IXGBE_DEBUG
   5185 /************************************************************************
   5186  * ixgbe_sysctl_print_rss_config
   5187  ************************************************************************/
   5188 static int
   5189 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
   5190 {
   5191 #ifdef notyet
   5192 	struct sysctlnode node = *rnode;
   5193 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5194 	struct ixgbe_hw *hw = &adapter->hw;
   5195 	device_t        dev = adapter->dev;
   5196 	struct sbuf     *buf;
   5197 	int             error = 0, reta_size;
   5198 	u32             reg;
   5199 
   5200 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
   5201 	if (!buf) {
   5202 		device_printf(dev, "Could not allocate sbuf for output.\n");
   5203 		return (ENOMEM);
   5204 	}
   5205 
   5206 	// TODO: use sbufs to make a string to print out
   5207 	/* Set multiplier for RETA setup and table size based on MAC */
   5208 	switch (adapter->hw.mac.type) {
   5209 	case ixgbe_mac_X550:
   5210 	case ixgbe_mac_X550EM_x:
   5211 	case ixgbe_mac_X550EM_a:
   5212 		reta_size = 128;
   5213 		break;
   5214 	default:
   5215 		reta_size = 32;
   5216 		break;
   5217 	}
   5218 
   5219 	/* Print out the redirection table */
   5220 	sbuf_cat(buf, "\n");
   5221 	for (int i = 0; i < reta_size; i++) {
   5222 		if (i < 32) {
   5223 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
   5224 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
   5225 		} else {
   5226 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
   5227 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
   5228 		}
   5229 	}
   5230 
   5231 	// TODO: print more config
   5232 
   5233 	error = sbuf_finish(buf);
   5234 	if (error)
   5235 		device_printf(dev, "Error finishing sbuf: %d\n", error);
   5236 
   5237 	sbuf_delete(buf);
   5238 #endif
   5239 	return (0);
   5240 } /* ixgbe_sysctl_print_rss_config */
   5241 #endif /* IXGBE_DEBUG */
   5242 
   5243 /************************************************************************
   5244  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
   5245  *
   5246  *   For X552/X557-AT devices using an external PHY
   5247  ************************************************************************/
   5248 static int
   5249 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
   5250 {
   5251 	struct sysctlnode node = *rnode;
   5252 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5253 	struct ixgbe_hw *hw = &adapter->hw;
   5254 	int val;
   5255 	u16 reg;
   5256 	int		error;
   5257 
   5258 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5259 		device_printf(adapter->dev,
   5260 		    "Device has no supported external thermal sensor.\n");
   5261 		return (ENODEV);
   5262 	}
   5263 
   5264 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
   5265 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5266 		device_printf(adapter->dev,
   5267 		    "Error reading from PHY's current temperature register\n");
   5268 		return (EAGAIN);
   5269 	}
   5270 
   5271 	node.sysctl_data = &val;
   5272 
   5273 	/* Shift temp for output */
   5274 	val = reg >> 8;
   5275 
   5276 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5277 	if ((error) || (newp == NULL))
   5278 		return (error);
   5279 
   5280 	return (0);
   5281 } /* ixgbe_sysctl_phy_temp */
   5282 
   5283 /************************************************************************
   5284  * ixgbe_sysctl_phy_overtemp_occurred
   5285  *
   5286  *   Reports (directly from the PHY) whether the current PHY
   5287  *   temperature is over the overtemp threshold.
   5288  ************************************************************************/
   5289 static int
   5290 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
   5291 {
   5292 	struct sysctlnode node = *rnode;
   5293 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5294 	struct ixgbe_hw *hw = &adapter->hw;
   5295 	int val, error;
   5296 	u16 reg;
   5297 
   5298 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5299 		device_printf(adapter->dev,
   5300 		    "Device has no supported external thermal sensor.\n");
   5301 		return (ENODEV);
   5302 	}
   5303 
   5304 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
   5305 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5306 		device_printf(adapter->dev,
   5307 		    "Error reading from PHY's temperature status register\n");
   5308 		return (EAGAIN);
   5309 	}
   5310 
   5311 	node.sysctl_data = &val;
   5312 
   5313 	/* Get occurrence bit */
   5314 	val = !!(reg & 0x4000);
   5315 
   5316 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5317 	if ((error) || (newp == NULL))
   5318 		return (error);
   5319 
   5320 	return (0);
   5321 } /* ixgbe_sysctl_phy_overtemp_occurred */
   5322 
   5323 /************************************************************************
   5324  * ixgbe_sysctl_eee_state
   5325  *
   5326  *   Sysctl to set EEE power saving feature
   5327  *   Values:
   5328  *     0      - disable EEE
   5329  *     1      - enable EEE
   5330  *     (none) - get current device EEE state
   5331  ************************************************************************/
   5332 static int
   5333 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
   5334 {
   5335 	struct sysctlnode node = *rnode;
   5336 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5337 	struct ifnet   *ifp = adapter->ifp;
   5338 	device_t       dev = adapter->dev;
   5339 	int            curr_eee, new_eee, error = 0;
   5340 	s32            retval;
   5341 
   5342 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
   5343 	node.sysctl_data = &new_eee;
   5344 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5345 	if ((error) || (newp == NULL))
   5346 		return (error);
   5347 
   5348 	/* Nothing to do */
   5349 	if (new_eee == curr_eee)
   5350 		return (0);
   5351 
   5352 	/* Not supported */
   5353 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
   5354 		return (EINVAL);
   5355 
   5356 	/* Bounds checking */
   5357 	if ((new_eee < 0) || (new_eee > 1))
   5358 		return (EINVAL);
   5359 
   5360 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
   5361 	if (retval) {
   5362 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
   5363 		return (EINVAL);
   5364 	}
   5365 
   5366 	/* Restart auto-neg */
   5367 	ixgbe_init(ifp);
   5368 
   5369 	device_printf(dev, "New EEE state: %d\n", new_eee);
   5370 
   5371 	/* Cache new value */
   5372 	if (new_eee)
   5373 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5374 	else
   5375 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
   5376 
   5377 	return (error);
   5378 } /* ixgbe_sysctl_eee_state */
   5379 
   5380 /************************************************************************
   5381  * ixgbe_init_device_features
   5382  ************************************************************************/
   5383 static void
   5384 ixgbe_init_device_features(struct adapter *adapter)
   5385 {
   5386 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   5387 	                  | IXGBE_FEATURE_RSS
   5388 	                  | IXGBE_FEATURE_MSI
   5389 	                  | IXGBE_FEATURE_MSIX
   5390 	                  | IXGBE_FEATURE_LEGACY_IRQ
   5391 	                  | IXGBE_FEATURE_LEGACY_TX;
   5392 
   5393 	/* Set capabilities first... */
   5394 	switch (adapter->hw.mac.type) {
   5395 	case ixgbe_mac_82598EB:
   5396 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
   5397 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
   5398 		break;
   5399 	case ixgbe_mac_X540:
   5400 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5401 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5402 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
   5403 		    (adapter->hw.bus.func == 0))
   5404 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5405 		break;
   5406 	case ixgbe_mac_X550:
   5407 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5408 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5409 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5410 		break;
   5411 	case ixgbe_mac_X550EM_x:
   5412 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5413 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5414 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
   5415 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5416 		break;
   5417 	case ixgbe_mac_X550EM_a:
   5418 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5419 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5420 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5421 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
   5422 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
   5423 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5424 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5425 		}
   5426 		break;
   5427 	case ixgbe_mac_82599EB:
   5428 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5429 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5430 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
   5431 		    (adapter->hw.bus.func == 0))
   5432 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5433 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
   5434 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5435 		break;
   5436 	default:
   5437 		break;
   5438 	}
   5439 
   5440 	/* Enabled by default... */
   5441 	/* Fan failure detection */
   5442 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
   5443 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
   5444 	/* Netmap */
   5445 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   5446 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   5447 	/* EEE */
   5448 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   5449 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5450 	/* Thermal Sensor */
   5451 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
   5452 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
   5453 
   5454 	/* Enabled via global sysctl... */
   5455 	/* Flow Director */
   5456 	if (ixgbe_enable_fdir) {
   5457 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
   5458 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
   5459 		else
   5460 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
   5461 	}
   5462 	/* Legacy (single queue) transmit */
   5463 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   5464 	    ixgbe_enable_legacy_tx)
   5465 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   5466 	/*
   5467 	 * Message Signal Interrupts - Extended (MSI-X)
   5468 	 * Normal MSI is only enabled if MSI-X calls fail.
   5469 	 */
   5470 	if (!ixgbe_enable_msix)
   5471 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
   5472 	/* Receive-Side Scaling (RSS) */
   5473 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
   5474 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   5475 
   5476 	/* Disable features with unmet dependencies... */
   5477 	/* No MSI-X */
   5478 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
   5479 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   5480 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   5481 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
   5482 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
   5483 	}
   5484 } /* ixgbe_init_device_features */
   5485 
   5486 /************************************************************************
   5487  * ixgbe_probe - Device identification routine
   5488  *
   5489  *   Determines if the driver should be loaded on
   5490  *   adapter based on its PCI vendor/device ID.
   5491  *
   5492  *   return BUS_PROBE_DEFAULT on success, positive on failure
   5493  ************************************************************************/
   5494 static int
   5495 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
   5496 {
   5497 	const struct pci_attach_args *pa = aux;
   5498 
   5499 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
   5500 }
   5501 
   5502 static ixgbe_vendor_info_t *
   5503 ixgbe_lookup(const struct pci_attach_args *pa)
   5504 {
   5505 	ixgbe_vendor_info_t *ent;
   5506 	pcireg_t subid;
   5507 
   5508 	INIT_DEBUGOUT("ixgbe_lookup: begin");
   5509 
   5510 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
   5511 		return NULL;
   5512 
   5513 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
   5514 
   5515 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
   5516 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
   5517 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
   5518 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
   5519 			(ent->subvendor_id == 0)) &&
   5520 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
   5521 			(ent->subdevice_id == 0))) {
   5522 			++ixgbe_total_ports;
   5523 			return ent;
   5524 		}
   5525 	}
   5526 	return NULL;
   5527 }
   5528 
   5529 static int
   5530 ixgbe_ifflags_cb(struct ethercom *ec)
   5531 {
   5532 	struct ifnet *ifp = &ec->ec_if;
   5533 	struct adapter *adapter = ifp->if_softc;
   5534 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   5535 
   5536 	IXGBE_CORE_LOCK(adapter);
   5537 
   5538 	if (change != 0)
   5539 		adapter->if_flags = ifp->if_flags;
   5540 
   5541 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   5542 		rc = ENETRESET;
   5543 	else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   5544 		ixgbe_set_promisc(adapter);
   5545 
   5546 	/* Set up VLAN support and filter */
   5547 	ixgbe_setup_vlan_hw_support(adapter);
   5548 
   5549 	IXGBE_CORE_UNLOCK(adapter);
   5550 
   5551 	return rc;
   5552 }
   5553 
   5554 /************************************************************************
   5555  * ixgbe_ioctl - Ioctl entry point
   5556  *
   5557  *   Called when the user wants to configure the interface.
   5558  *
   5559  *   return 0 on success, positive on failure
   5560  ************************************************************************/
   5561 static int
   5562 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   5563 {
   5564 	struct adapter	*adapter = ifp->if_softc;
   5565 	struct ixgbe_hw *hw = &adapter->hw;
   5566 	struct ifcapreq *ifcr = data;
   5567 	struct ifreq	*ifr = data;
   5568 	int             error = 0;
   5569 	int l4csum_en;
   5570 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   5571 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   5572 
   5573 	switch (command) {
   5574 	case SIOCSIFFLAGS:
   5575 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   5576 		break;
   5577 	case SIOCADDMULTI:
   5578 	case SIOCDELMULTI:
   5579 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   5580 		break;
   5581 	case SIOCSIFMEDIA:
   5582 	case SIOCGIFMEDIA:
   5583 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   5584 		break;
   5585 	case SIOCSIFCAP:
   5586 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   5587 		break;
   5588 	case SIOCSIFMTU:
   5589 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   5590 		break;
   5591 #ifdef __NetBSD__
   5592 	case SIOCINITIFADDR:
   5593 		IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
   5594 		break;
   5595 	case SIOCGIFFLAGS:
   5596 		IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
   5597 		break;
   5598 	case SIOCGIFAFLAG_IN:
   5599 		IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
   5600 		break;
   5601 	case SIOCGIFADDR:
   5602 		IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
   5603 		break;
   5604 	case SIOCGIFMTU:
   5605 		IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
   5606 		break;
   5607 	case SIOCGIFCAP:
   5608 		IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
   5609 		break;
   5610 	case SIOCGETHERCAP:
   5611 		IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
   5612 		break;
   5613 	case SIOCGLIFADDR:
   5614 		IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
   5615 		break;
   5616 	case SIOCZIFDATA:
   5617 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   5618 		hw->mac.ops.clear_hw_cntrs(hw);
   5619 		ixgbe_clear_evcnt(adapter);
   5620 		break;
   5621 	case SIOCAIFADDR:
   5622 		IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
   5623 		break;
   5624 #endif
   5625 	default:
   5626 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   5627 		break;
   5628 	}
   5629 
   5630 	switch (command) {
   5631 	case SIOCSIFMEDIA:
   5632 	case SIOCGIFMEDIA:
   5633 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   5634 	case SIOCGI2C:
   5635 	{
   5636 		struct ixgbe_i2c_req	i2c;
   5637 
   5638 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
   5639 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
   5640 		if (error != 0)
   5641 			break;
   5642 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
   5643 			error = EINVAL;
   5644 			break;
   5645 		}
   5646 		if (i2c.len > sizeof(i2c.data)) {
   5647 			error = EINVAL;
   5648 			break;
   5649 		}
   5650 
   5651 		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
   5652 		    i2c.dev_addr, i2c.data);
   5653 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
   5654 		break;
   5655 	}
   5656 	case SIOCSIFCAP:
   5657 		/* Layer-4 Rx checksum offload has to be turned on and
   5658 		 * off as a unit.
   5659 		 */
   5660 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   5661 		if (l4csum_en != l4csum && l4csum_en != 0)
   5662 			return EINVAL;
   5663 		/*FALLTHROUGH*/
   5664 	case SIOCADDMULTI:
   5665 	case SIOCDELMULTI:
   5666 	case SIOCSIFFLAGS:
   5667 	case SIOCSIFMTU:
   5668 	default:
   5669 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   5670 			return error;
   5671 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   5672 			;
   5673 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   5674 			IXGBE_CORE_LOCK(adapter);
   5675 			ixgbe_init_locked(adapter);
   5676 			ixgbe_recalculate_max_frame(adapter);
   5677 			IXGBE_CORE_UNLOCK(adapter);
   5678 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   5679 			/*
   5680 			 * Multicast list has changed; set the hardware filter
   5681 			 * accordingly.
   5682 			 */
   5683 			IXGBE_CORE_LOCK(adapter);
   5684 			ixgbe_disable_intr(adapter);
   5685 			ixgbe_set_multi(adapter);
   5686 			ixgbe_enable_intr(adapter);
   5687 			IXGBE_CORE_UNLOCK(adapter);
   5688 		}
   5689 		return 0;
   5690 	}
   5691 
   5692 	return error;
   5693 } /* ixgbe_ioctl */
   5694 
   5695 /************************************************************************
   5696  * ixgbe_check_fan_failure
   5697  ************************************************************************/
   5698 static void
   5699 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
   5700 {
   5701 	u32 mask;
   5702 
   5703 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
   5704 	    IXGBE_ESDP_SDP1;
   5705 
   5706 	if (reg & mask)
   5707 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
   5708 } /* ixgbe_check_fan_failure */
   5709 
   5710 /************************************************************************
   5711  * ixgbe_handle_que
   5712  ************************************************************************/
   5713 static void
   5714 ixgbe_handle_que(void *context)
   5715 {
   5716 	struct ix_queue *que = context;
   5717 	struct adapter  *adapter = que->adapter;
   5718 	struct tx_ring  *txr = que->txr;
   5719 	struct ifnet    *ifp = adapter->ifp;
   5720 	bool		more = false;
   5721 
   5722 	adapter->handleq.ev_count++;
   5723 
   5724 	if (ifp->if_flags & IFF_RUNNING) {
   5725 		more = ixgbe_rxeof(que);
   5726 		IXGBE_TX_LOCK(txr);
   5727 		ixgbe_txeof(txr);
   5728 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5729 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   5730 				ixgbe_mq_start_locked(ifp, txr);
   5731 		/* Only for queue 0 */
   5732 		/* NetBSD still needs this for CBQ */
   5733 		if ((&adapter->queues[0] == que)
   5734 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   5735 			ixgbe_legacy_start_locked(ifp, txr);
   5736 		IXGBE_TX_UNLOCK(txr);
   5737 	}
   5738 
   5739 	if (more)
   5740 		softint_schedule(que->que_si);
   5741 	else if (que->res != NULL) {
   5742 		/* Re-enable this interrupt */
   5743 		ixgbe_enable_queue(adapter, que->msix);
   5744 	} else
   5745 		ixgbe_enable_intr(adapter);
   5746 
   5747 	return;
   5748 } /* ixgbe_handle_que */
   5749 
   5750 /************************************************************************
   5751  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
   5752  ************************************************************************/
   5753 static int
   5754 ixgbe_allocate_legacy(struct adapter *adapter,
   5755     const struct pci_attach_args *pa)
   5756 {
   5757 	device_t	dev = adapter->dev;
   5758 	struct ix_queue *que = adapter->queues;
   5759 	struct tx_ring  *txr = adapter->tx_rings;
   5760 	int		counts[PCI_INTR_TYPE_SIZE];
   5761 	pci_intr_type_t intr_type, max_type;
   5762 	char            intrbuf[PCI_INTRSTR_LEN];
   5763 	const char	*intrstr = NULL;
   5764 
   5765 	/* We allocate a single interrupt resource */
   5766 	max_type = PCI_INTR_TYPE_MSI;
   5767 	counts[PCI_INTR_TYPE_MSIX] = 0;
   5768 	counts[PCI_INTR_TYPE_MSI] =
   5769 	    (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
   5770 	/* Check not feat_en but feat_cap to fallback to INTx */
   5771 	counts[PCI_INTR_TYPE_INTX] =
   5772 	    (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
   5773 
   5774 alloc_retry:
   5775 	if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
   5776 		aprint_error_dev(dev, "couldn't alloc interrupt\n");
   5777 		return ENXIO;
   5778 	}
   5779 	adapter->osdep.nintrs = 1;
   5780 	intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
   5781 	    intrbuf, sizeof(intrbuf));
   5782 	adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
   5783 	    adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
   5784 	    device_xname(dev));
   5785 	intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
   5786 	if (adapter->osdep.ihs[0] == NULL) {
   5787 		aprint_error_dev(dev,"unable to establish %s\n",
   5788 		    (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5789 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5790 		adapter->osdep.intrs = NULL;
   5791 		switch (intr_type) {
   5792 		case PCI_INTR_TYPE_MSI:
   5793 			/* The next try is for INTx: Disable MSI */
   5794 			max_type = PCI_INTR_TYPE_INTX;
   5795 			counts[PCI_INTR_TYPE_INTX] = 1;
   5796 			adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   5797 			if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
   5798 				adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   5799 				goto alloc_retry;
   5800 			} else
   5801 				break;
   5802 		case PCI_INTR_TYPE_INTX:
   5803 		default:
   5804 			/* See below */
   5805 			break;
   5806 		}
   5807 	}
   5808 	if (intr_type == PCI_INTR_TYPE_INTX) {
   5809 		adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   5810 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   5811 	}
   5812 	if (adapter->osdep.ihs[0] == NULL) {
   5813 		aprint_error_dev(dev,
   5814 		    "couldn't establish interrupt%s%s\n",
   5815 		    intrstr ? " at " : "", intrstr ? intrstr : "");
   5816 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5817 		adapter->osdep.intrs = NULL;
   5818 		return ENXIO;
   5819 	}
   5820 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   5821 	/*
   5822 	 * Try allocating a fast interrupt and the associated deferred
   5823 	 * processing contexts.
   5824 	 */
   5825 	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5826 		txr->txr_si =
   5827 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5828 			ixgbe_deferred_mq_start, txr);
   5829 	que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5830 	    ixgbe_handle_que, que);
   5831 
   5832 	if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
   5833 		& (txr->txr_si == NULL)) || (que->que_si == NULL)) {
   5834 		aprint_error_dev(dev,
   5835 		    "could not establish software interrupts\n");
   5836 
   5837 		return ENXIO;
   5838 	}
   5839 	/* For simplicity in the handlers */
   5840 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
   5841 
   5842 	return (0);
   5843 } /* ixgbe_allocate_legacy */
   5844 
   5845 
   5846 /************************************************************************
   5847  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
   5848  ************************************************************************/
   5849 static int
   5850 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   5851 {
   5852 	device_t        dev = adapter->dev;
   5853 	struct 		ix_queue *que = adapter->queues;
   5854 	struct  	tx_ring *txr = adapter->tx_rings;
   5855 	pci_chipset_tag_t pc;
   5856 	char		intrbuf[PCI_INTRSTR_LEN];
   5857 	char		intr_xname[32];
   5858 	const char	*intrstr = NULL;
   5859 	int 		error, vector = 0;
   5860 	int		cpu_id = 0;
   5861 	kcpuset_t	*affinity;
   5862 #ifdef RSS
   5863 	unsigned int    rss_buckets = 0;
   5864 	kcpuset_t	cpu_mask;
   5865 #endif
   5866 
   5867 	pc = adapter->osdep.pc;
   5868 #ifdef	RSS
   5869 	/*
   5870 	 * If we're doing RSS, the number of queues needs to
   5871 	 * match the number of RSS buckets that are configured.
   5872 	 *
   5873 	 * + If there's more queues than RSS buckets, we'll end
   5874 	 *   up with queues that get no traffic.
   5875 	 *
   5876 	 * + If there's more RSS buckets than queues, we'll end
   5877 	 *   up having multiple RSS buckets map to the same queue,
   5878 	 *   so there'll be some contention.
   5879 	 */
   5880 	rss_buckets = rss_getnumbuckets();
   5881 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
   5882 	    (adapter->num_queues != rss_buckets)) {
   5883 		device_printf(dev,
   5884 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
   5885 		    "; performance will be impacted.\n",
   5886 		    __func__, adapter->num_queues, rss_buckets);
   5887 	}
   5888 #endif
   5889 
   5890 	adapter->osdep.nintrs = adapter->num_queues + 1;
   5891 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   5892 	    adapter->osdep.nintrs) != 0) {
   5893 		aprint_error_dev(dev,
   5894 		    "failed to allocate MSI-X interrupt\n");
   5895 		return (ENXIO);
   5896 	}
   5897 
   5898 	kcpuset_create(&affinity, false);
   5899 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   5900 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   5901 		    device_xname(dev), i);
   5902 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   5903 		    sizeof(intrbuf));
   5904 #ifdef IXGBE_MPSAFE
   5905 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   5906 		    true);
   5907 #endif
   5908 		/* Set the handler function */
   5909 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   5910 		    adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
   5911 		    intr_xname);
   5912 		if (que->res == NULL) {
   5913 			aprint_error_dev(dev,
   5914 			    "Failed to register QUE handler\n");
   5915 			error = ENXIO;
   5916 			goto err_out;
   5917 		}
   5918 		que->msix = vector;
   5919 		adapter->active_queues |= (u64)(1 << que->msix);
   5920 
   5921 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   5922 #ifdef	RSS
   5923 			/*
   5924 			 * The queue ID is used as the RSS layer bucket ID.
   5925 			 * We look up the queue ID -> RSS CPU ID and select
   5926 			 * that.
   5927 			 */
   5928 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
   5929 			CPU_SETOF(cpu_id, &cpu_mask);
   5930 #endif
   5931 		} else {
   5932 			/*
   5933 			 * Bind the MSI-X vector, and thus the
   5934 			 * rings to the corresponding CPU.
   5935 			 *
   5936 			 * This just happens to match the default RSS
   5937 			 * round-robin bucket -> queue -> CPU allocation.
   5938 			 */
   5939 			if (adapter->num_queues > 1)
   5940 				cpu_id = i;
   5941 		}
   5942 		/* Round-robin affinity */
   5943 		kcpuset_zero(affinity);
   5944 		kcpuset_set(affinity, cpu_id % ncpu);
   5945 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   5946 		    NULL);
   5947 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   5948 		    intrstr);
   5949 		if (error == 0) {
   5950 #if 1 /* def IXGBE_DEBUG */
   5951 #ifdef	RSS
   5952 			aprintf_normal(", bound RSS bucket %d to CPU %d", i,
   5953 			    cpu_id % ncpu);
   5954 #else
   5955 			aprint_normal(", bound queue %d to cpu %d", i,
   5956 			    cpu_id % ncpu);
   5957 #endif
   5958 #endif /* IXGBE_DEBUG */
   5959 		}
   5960 		aprint_normal("\n");
   5961 
   5962 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   5963 			txr->txr_si = softint_establish(
   5964 				SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5965 				ixgbe_deferred_mq_start, txr);
   5966 			if (txr->txr_si == NULL) {
   5967 				aprint_error_dev(dev,
   5968 				    "couldn't establish software interrupt\n");
   5969 				error = ENXIO;
   5970 				goto err_out;
   5971 			}
   5972 		}
   5973 		que->que_si
   5974 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5975 			ixgbe_handle_que, que);
   5976 		if (que->que_si == NULL) {
   5977 			aprint_error_dev(dev,
   5978 			    "couldn't establish software interrupt\n");
   5979 			error = ENXIO;
   5980 			goto err_out;
   5981 		}
   5982 	}
   5983 
   5984 	/* and Link */
   5985 	cpu_id++;
   5986 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   5987 	adapter->vector = vector;
   5988 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   5989 	    sizeof(intrbuf));
   5990 #ifdef IXGBE_MPSAFE
   5991 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   5992 	    true);
   5993 #endif
   5994 	/* Set the link handler function */
   5995 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   5996 	    adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
   5997 	    intr_xname);
   5998 	if (adapter->osdep.ihs[vector] == NULL) {
   5999 		adapter->res = NULL;
   6000 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   6001 		error = ENXIO;
   6002 		goto err_out;
   6003 	}
   6004 	/* Round-robin affinity */
   6005 	kcpuset_zero(affinity);
   6006 	kcpuset_set(affinity, cpu_id % ncpu);
   6007 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
   6008 	    NULL);
   6009 
   6010 	aprint_normal_dev(dev,
   6011 	    "for link, interrupting at %s", intrstr);
   6012 	if (error == 0)
   6013 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   6014 	else
   6015 		aprint_normal("\n");
   6016 
   6017 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   6018 		adapter->mbx_si =
   6019 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6020 			ixgbe_handle_mbx, adapter);
   6021 		if (adapter->mbx_si == NULL) {
   6022 			aprint_error_dev(dev,
   6023 			    "could not establish software interrupts\n");
   6024 
   6025 			error = ENXIO;
   6026 			goto err_out;
   6027 		}
   6028 	}
   6029 
   6030 	kcpuset_destroy(affinity);
   6031 	aprint_normal_dev(dev,
   6032 	    "Using MSI-X interrupts with %d vectors\n", vector + 1);
   6033 
   6034 	return (0);
   6035 
   6036 err_out:
   6037 	kcpuset_destroy(affinity);
   6038 	ixgbe_free_softint(adapter);
   6039 	ixgbe_free_pciintr_resources(adapter);
   6040 	return (error);
   6041 } /* ixgbe_allocate_msix */
   6042 
   6043 /************************************************************************
   6044  * ixgbe_configure_interrupts
   6045  *
   6046  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
   6047  *   This will also depend on user settings.
   6048  ************************************************************************/
   6049 static int
   6050 ixgbe_configure_interrupts(struct adapter *adapter)
   6051 {
   6052 	device_t dev = adapter->dev;
   6053 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
   6054 	int want, queues, msgs;
   6055 
   6056 	/* Default to 1 queue if MSI-X setup fails */
   6057 	adapter->num_queues = 1;
   6058 
   6059 	/* Override by tuneable */
   6060 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
   6061 		goto msi;
   6062 
   6063 	/*
   6064 	 *  NetBSD only: Use single vector MSI when number of CPU is 1 to save
   6065 	 * interrupt slot.
   6066 	 */
   6067 	if (ncpu == 1)
   6068 		goto msi;
   6069 
   6070 	/* First try MSI-X */
   6071 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   6072 	msgs = MIN(msgs, IXG_MAX_NINTR);
   6073 	if (msgs < 2)
   6074 		goto msi;
   6075 
   6076 	adapter->msix_mem = (void *)1; /* XXX */
   6077 
   6078 	/* Figure out a reasonable auto config value */
   6079 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   6080 
   6081 #ifdef	RSS
   6082 	/* If we're doing RSS, clamp at the number of RSS buckets */
   6083 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   6084 		queues = min(queues, rss_getnumbuckets());
   6085 #endif
   6086 	if (ixgbe_num_queues > queues) {
   6087 		aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
   6088 		ixgbe_num_queues = queues;
   6089 	}
   6090 
   6091 	if (ixgbe_num_queues != 0)
   6092 		queues = ixgbe_num_queues;
   6093 	else
   6094 		queues = min(queues,
   6095 		    min(mac->max_tx_queues, mac->max_rx_queues));
   6096 
   6097 	/* reflect correct sysctl value */
   6098 	ixgbe_num_queues = queues;
   6099 
   6100 	/*
   6101 	 * Want one vector (RX/TX pair) per queue
   6102 	 * plus an additional for Link.
   6103 	 */
   6104 	want = queues + 1;
   6105 	if (msgs >= want)
   6106 		msgs = want;
   6107 	else {
   6108                	aprint_error_dev(dev, "MSI-X Configuration Problem, "
   6109 		    "%d vectors but %d queues wanted!\n",
   6110 		    msgs, want);
   6111 		goto msi;
   6112 	}
   6113 	adapter->num_queues = queues;
   6114 	adapter->feat_en |= IXGBE_FEATURE_MSIX;
   6115 	return (0);
   6116 
   6117 	/*
   6118 	 * MSI-X allocation failed or provided us with
   6119 	 * less vectors than needed. Free MSI-X resources
   6120 	 * and we'll try enabling MSI.
   6121 	 */
   6122 msi:
   6123 	/* Without MSI-X, some features are no longer supported */
   6124 	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6125 	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
   6126 	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6127 	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
   6128 
   6129        	msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
   6130 	adapter->msix_mem = NULL; /* XXX */
   6131 	if (msgs > 1)
   6132 		msgs = 1;
   6133 	if (msgs != 0) {
   6134 		msgs = 1;
   6135 		adapter->feat_en |= IXGBE_FEATURE_MSI;
   6136 		return (0);
   6137 	}
   6138 
   6139 	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
   6140 		aprint_error_dev(dev,
   6141 		    "Device does not support legacy interrupts.\n");
   6142 		return 1;
   6143 	}
   6144 
   6145 	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6146 
   6147 	return (0);
   6148 } /* ixgbe_configure_interrupts */
   6149 
   6150 
   6151 /************************************************************************
   6152  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
   6153  *
   6154  *   Done outside of interrupt context since the driver might sleep
   6155  ************************************************************************/
   6156 static void
   6157 ixgbe_handle_link(void *context)
   6158 {
   6159 	struct adapter  *adapter = context;
   6160 	struct ixgbe_hw *hw = &adapter->hw;
   6161 
   6162 	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
   6163 	ixgbe_update_link_status(adapter);
   6164 
   6165 	/* Re-enable link interrupts */
   6166 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
   6167 } /* ixgbe_handle_link */
   6168 
   6169 /************************************************************************
   6170  * ixgbe_rearm_queues
   6171  ************************************************************************/
   6172 static void
   6173 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   6174 {
   6175 	u32 mask;
   6176 
   6177 	switch (adapter->hw.mac.type) {
   6178 	case ixgbe_mac_82598EB:
   6179 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   6180 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   6181 		break;
   6182 	case ixgbe_mac_82599EB:
   6183 	case ixgbe_mac_X540:
   6184 	case ixgbe_mac_X550:
   6185 	case ixgbe_mac_X550EM_x:
   6186 	case ixgbe_mac_X550EM_a:
   6187 		mask = (queues & 0xFFFFFFFF);
   6188 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   6189 		mask = (queues >> 32);
   6190 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   6191 		break;
   6192 	default:
   6193 		break;
   6194 	}
   6195 } /* ixgbe_rearm_queues */
   6196