Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.102
      1 /* $NetBSD: ixgbe.c,v 1.102 2017/10/04 06:19:47 msaitoh Exp $ */
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 320916 2017-07-12 17:35:32Z sbruno $*/
     36 
     37 /*
     38  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39  * All rights reserved.
     40  *
     41  * This code is derived from software contributed to The NetBSD Foundation
     42  * by Coyote Point Systems, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63  * POSSIBILITY OF SUCH DAMAGE.
     64  */
     65 
     66 #ifdef _KERNEL_OPT
     67 #include "opt_inet.h"
     68 #include "opt_inet6.h"
     69 #include "opt_net_mpsafe.h"
     70 #endif
     71 
     72 #include "ixgbe.h"
     73 #include "vlan.h"
     74 
     75 #include <sys/cprng.h>
     76 #include <dev/mii/mii.h>
     77 #include <dev/mii/miivar.h>
     78 
     79 /************************************************************************
     80  * Driver version
     81  ************************************************************************/
     82 char ixgbe_driver_version[] = "3.2.12-k";
     83 
     84 
     85 /************************************************************************
     86  * PCI Device ID Table
     87  *
     88  *   Used by probe to select devices to load on
     89  *   Last field stores an index into ixgbe_strings
     90  *   Last entry must be all 0s
     91  *
     92  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     93  ************************************************************************/
     94 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     95 {
     96 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    111 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    112 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    113 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    114 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    115 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
    116 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    117 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
    118 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
    119 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
    120 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
    121 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
    122 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
    123 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
    124 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
    125 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
    126 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
    127 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
    128 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
    129 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
    130 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
    131 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
    132 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
    133 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
    134 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
    135 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
    136 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
    137 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
    138 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
    139 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
    140 	/* required last entry */
    141 	{0, 0, 0, 0, 0}
    142 };
    143 
    144 /************************************************************************
    145  * Table of branding strings
    146  ************************************************************************/
    147 static const char    *ixgbe_strings[] = {
    148 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    149 };
    150 
    151 /************************************************************************
    152  * Function prototypes
    153  ************************************************************************/
    154 static int      ixgbe_probe(device_t, cfdata_t, void *);
    155 static void     ixgbe_attach(device_t, device_t, void *);
    156 static int      ixgbe_detach(device_t, int);
    157 #if 0
    158 static int      ixgbe_shutdown(device_t);
    159 #endif
    160 static bool	ixgbe_suspend(device_t, const pmf_qual_t *);
    161 static bool	ixgbe_resume(device_t, const pmf_qual_t *);
    162 static int	ixgbe_ifflags_cb(struct ethercom *);
    163 static int      ixgbe_ioctl(struct ifnet *, u_long, void *);
    164 static void	ixgbe_ifstop(struct ifnet *, int);
    165 static int	ixgbe_init(struct ifnet *);
    166 static void	ixgbe_init_locked(struct adapter *);
    167 static void     ixgbe_stop(void *);
    168 static void     ixgbe_init_device_features(struct adapter *);
    169 static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
    170 static void	ixgbe_add_media_types(struct adapter *);
    171 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    172 static int      ixgbe_media_change(struct ifnet *);
    173 static int      ixgbe_allocate_pci_resources(struct adapter *,
    174 		    const struct pci_attach_args *);
    175 static void	ixgbe_get_slot_info(struct adapter *);
    176 static int      ixgbe_allocate_msix(struct adapter *,
    177 		    const struct pci_attach_args *);
    178 static int      ixgbe_allocate_legacy(struct adapter *,
    179 		    const struct pci_attach_args *);
    180 static int      ixgbe_configure_interrupts(struct adapter *);
    181 static void	ixgbe_free_pci_resources(struct adapter *);
    182 static void	ixgbe_local_timer(void *);
    183 static void	ixgbe_local_timer1(void *);
    184 static int	ixgbe_setup_interface(device_t, struct adapter *);
    185 static void	ixgbe_config_gpie(struct adapter *);
    186 static void	ixgbe_config_dmac(struct adapter *);
    187 static void	ixgbe_config_delay_values(struct adapter *);
    188 static void	ixgbe_config_link(struct adapter *);
    189 static void	ixgbe_check_wol_support(struct adapter *);
    190 static int	ixgbe_setup_low_power_mode(struct adapter *);
    191 static void	ixgbe_rearm_queues(struct adapter *, u64);
    192 
    193 static void     ixgbe_initialize_transmit_units(struct adapter *);
    194 static void     ixgbe_initialize_receive_units(struct adapter *);
    195 static void	ixgbe_enable_rx_drop(struct adapter *);
    196 static void	ixgbe_disable_rx_drop(struct adapter *);
    197 static void	ixgbe_initialize_rss_mapping(struct adapter *);
    198 
    199 static void     ixgbe_enable_intr(struct adapter *);
    200 static void     ixgbe_disable_intr(struct adapter *);
    201 static void     ixgbe_update_stats_counters(struct adapter *);
    202 static void     ixgbe_set_promisc(struct adapter *);
    203 static void     ixgbe_set_multi(struct adapter *);
    204 static void     ixgbe_update_link_status(struct adapter *);
    205 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    206 static void	ixgbe_configure_ivars(struct adapter *);
    207 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    208 
    209 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    210 #if 0
    211 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
    212 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
    213 #endif
    214 
    215 static void	ixgbe_add_device_sysctls(struct adapter *);
    216 static void     ixgbe_add_hw_stats(struct adapter *);
    217 static void	ixgbe_clear_evcnt(struct adapter *);
    218 static int	ixgbe_set_flowcntl(struct adapter *, int);
    219 static int	ixgbe_set_advertise(struct adapter *, int);
    220 static int      ixgbe_get_advertise(struct adapter *);
    221 
    222 /* Sysctl handlers */
    223 static void	ixgbe_set_sysctl_value(struct adapter *, const char *,
    224 		     const char *, int *, int);
    225 static int	ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
    226 static int	ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
    227 static int      ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    228 static int	ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
    229 static int	ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
    230 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
    231 #ifdef IXGBE_DEBUG
    232 static int	ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
    233 static int	ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
    234 #endif
    235 static int      ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
    236 static int      ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
    237 static int      ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
    238 static int      ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
    239 static int      ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
    240 static int	ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
    241 static int	ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
    242 
    243 /* Support for pluggable optic modules */
    244 static bool	ixgbe_sfp_probe(struct adapter *);
    245 
    246 /* Legacy (single vector) interrupt handler */
    247 static int	ixgbe_legacy_irq(void *);
    248 
    249 /* The MSI/MSI-X Interrupt handlers */
    250 static int	ixgbe_msix_que(void *);
    251 static int	ixgbe_msix_link(void *);
    252 
    253 /* Software interrupts for deferred work */
    254 static void	ixgbe_handle_que(void *);
    255 static void	ixgbe_handle_link(void *);
    256 static void	ixgbe_handle_msf(void *);
    257 static void	ixgbe_handle_mod(void *);
    258 static void	ixgbe_handle_phy(void *);
    259 
    260 const struct sysctlnode *ixgbe_sysctl_instance(struct adapter *);
    261 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    262 
    263 /************************************************************************
    264  *  NetBSD Device Interface Entry Points
    265  ************************************************************************/
    266 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    267     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    268     DVF_DETACH_SHUTDOWN);
    269 
    270 #if 0
    271 devclass_t ix_devclass;
    272 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
    273 
    274 MODULE_DEPEND(ix, pci, 1, 1, 1);
    275 MODULE_DEPEND(ix, ether, 1, 1, 1);
    276 #endif
    277 
    278 /*
    279  * TUNEABLE PARAMETERS:
    280  */
    281 
    282 /*
    283  * AIM: Adaptive Interrupt Moderation
    284  * which means that the interrupt rate
    285  * is varied over time based on the
    286  * traffic for that interrupt vector
    287  */
    288 static bool ixgbe_enable_aim = true;
    289 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
    290 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
    291     "Enable adaptive interrupt moderation");
    292 
    293 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    294 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
    295     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
    296 
    297 /* How many packets rxeof tries to clean at a time */
    298 static int ixgbe_rx_process_limit = 256;
    299 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
    300     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
    301 
    302 /* How many packets txeof tries to clean at a time */
    303 static int ixgbe_tx_process_limit = 256;
    304 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
    305     &ixgbe_tx_process_limit, 0,
    306     "Maximum number of sent packets to process at a time, -1 means unlimited");
    307 
    308 /* Flow control setting, default to full */
    309 static int ixgbe_flow_control = ixgbe_fc_full;
    310 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
    311     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
    312 
    313 /*
    314  * Smart speed setting, default to on
    315  * this only works as a compile option
    316  * right now as its during attach, set
    317  * this to 'ixgbe_smart_speed_off' to
    318  * disable.
    319  */
    320 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    321 
    322 /*
    323  * MSI-X should be the default for best performance,
    324  * but this allows it to be forced off for testing.
    325  */
    326 static int ixgbe_enable_msix = 1;
    327 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
    328     "Enable MSI-X interrupts");
    329 
    330 /*
    331  * Number of Queues, can be set to 0,
    332  * it then autoconfigures based on the
    333  * number of cpus with a max of 8. This
    334  * can be overriden manually here.
    335  */
    336 static int ixgbe_num_queues = 0;
    337 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
    338     "Number of queues to configure, 0 indicates autoconfigure");
    339 
    340 /*
    341  * Number of TX descriptors per ring,
    342  * setting higher than RX as this seems
    343  * the better performing choice.
    344  */
    345 static int ixgbe_txd = PERFORM_TXD;
    346 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
    347     "Number of transmit descriptors per queue");
    348 
    349 /* Number of RX descriptors per ring */
    350 static int ixgbe_rxd = PERFORM_RXD;
    351 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
    352     "Number of receive descriptors per queue");
    353 
    354 /*
    355  * Defining this on will allow the use
    356  * of unsupported SFP+ modules, note that
    357  * doing so you are on your own :)
    358  */
    359 static int allow_unsupported_sfp = false;
    360 #define TUNABLE_INT(__x, __y)
    361 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
    362 
    363 /*
    364  * Not sure if Flow Director is fully baked,
    365  * so we'll default to turning it off.
    366  */
    367 static int ixgbe_enable_fdir = 0;
    368 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
    369     "Enable Flow Director");
    370 
    371 /* Legacy Transmit (single queue) */
    372 static int ixgbe_enable_legacy_tx = 0;
    373 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
    374     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
    375 
    376 /* Receive-Side Scaling */
    377 static int ixgbe_enable_rss = 1;
    378 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
    379     "Enable Receive-Side Scaling (RSS)");
    380 
    381 /* Keep running tab on them for sanity check */
    382 static int ixgbe_total_ports;
    383 
    384 #if 0
    385 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
    386 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
    387 #endif
    388 
    389 #ifdef NET_MPSAFE
    390 #define IXGBE_MPSAFE		1
    391 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    392 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    393 #else
    394 #define IXGBE_CALLOUT_FLAGS	0
    395 #define IXGBE_SOFTINFT_FLAGS	0
    396 #endif
    397 
    398 /************************************************************************
    399  * ixgbe_initialize_rss_mapping
    400  ************************************************************************/
    401 static void
    402 ixgbe_initialize_rss_mapping(struct adapter *adapter)
    403 {
    404 	struct ixgbe_hw	*hw = &adapter->hw;
    405 	u32             reta = 0, mrqc, rss_key[10];
    406 	int             queue_id, table_size, index_mult;
    407 	int             i, j;
    408 	u32             rss_hash_config;
    409 
    410 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    411 		/* Fetch the configured RSS key */
    412 		rss_getkey((uint8_t *) &rss_key);
    413 	} else {
    414 		/* set up random bits */
    415 		cprng_fast(&rss_key, sizeof(rss_key));
    416 	}
    417 
    418 	/* Set multiplier for RETA setup and table size based on MAC */
    419 	index_mult = 0x1;
    420 	table_size = 128;
    421 	switch (adapter->hw.mac.type) {
    422 	case ixgbe_mac_82598EB:
    423 		index_mult = 0x11;
    424 		break;
    425 	case ixgbe_mac_X550:
    426 	case ixgbe_mac_X550EM_x:
    427 	case ixgbe_mac_X550EM_a:
    428 		table_size = 512;
    429 		break;
    430 	default:
    431 		break;
    432 	}
    433 
    434 	/* Set up the redirection table */
    435 	for (i = 0, j = 0; i < table_size; i++, j++) {
    436 		if (j == adapter->num_queues)
    437 			j = 0;
    438 
    439 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    440 			/*
    441 			 * Fetch the RSS bucket id for the given indirection
    442 			 * entry. Cap it at the number of configured buckets
    443 			 * (which is num_queues.)
    444 			 */
    445 			queue_id = rss_get_indirection_to_bucket(i);
    446 			queue_id = queue_id % adapter->num_queues;
    447 		} else
    448 			queue_id = (j * index_mult);
    449 
    450 		/*
    451 		 * The low 8 bits are for hash value (n+0);
    452 		 * The next 8 bits are for hash value (n+1), etc.
    453 		 */
    454 		reta = reta >> 8;
    455 		reta = reta | (((uint32_t) queue_id) << 24);
    456 		if ((i & 3) == 3) {
    457 			if (i < 128)
    458 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
    459 			else
    460 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
    461 				    reta);
    462 			reta = 0;
    463 		}
    464 	}
    465 
    466 	/* Now fill our hash function seeds */
    467 	for (i = 0; i < 10; i++)
    468 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
    469 
    470 	/* Perform hash on these packet types */
    471 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
    472 		rss_hash_config = rss_gethashconfig();
    473 	else {
    474 		/*
    475 		 * Disable UDP - IP fragments aren't currently being handled
    476 		 * and so we end up with a mix of 2-tuple and 4-tuple
    477 		 * traffic.
    478 		 */
    479 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
    480 		                | RSS_HASHTYPE_RSS_TCP_IPV4
    481 		                | RSS_HASHTYPE_RSS_IPV6
    482 		                | RSS_HASHTYPE_RSS_TCP_IPV6
    483 		                | RSS_HASHTYPE_RSS_IPV6_EX
    484 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
    485 	}
    486 
    487 	mrqc = IXGBE_MRQC_RSSEN;
    488 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
    489 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
    490 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
    491 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
    492 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
    493 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
    494 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
    495 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
    496 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
    497 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
    498 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
    499 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
    500 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
    501 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
    502 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
    503 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
    504 		    __func__);
    505 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
    506 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
    507 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
    508 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
    509 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
    510 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    511 } /* ixgbe_initialize_rss_mapping */
    512 
    513 /************************************************************************
    514  * ixgbe_initialize_receive_units - Setup receive registers and features.
    515  ************************************************************************/
    516 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
    517 
    518 static void
    519 ixgbe_initialize_receive_units(struct adapter *adapter)
    520 {
    521 	struct	rx_ring	*rxr = adapter->rx_rings;
    522 	struct ixgbe_hw	*hw = &adapter->hw;
    523 	struct ifnet    *ifp = adapter->ifp;
    524 	int             i, j;
    525 	u32		bufsz, fctrl, srrctl, rxcsum;
    526 	u32		hlreg;
    527 
    528 	/*
    529 	 * Make sure receives are disabled while
    530 	 * setting up the descriptor ring
    531 	 */
    532 	ixgbe_disable_rx(hw);
    533 
    534 	/* Enable broadcasts */
    535 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
    536 	fctrl |= IXGBE_FCTRL_BAM;
    537 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
    538 		fctrl |= IXGBE_FCTRL_DPF;
    539 		fctrl |= IXGBE_FCTRL_PMCF;
    540 	}
    541 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
    542 
    543 	/* Set for Jumbo Frames? */
    544 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
    545 	if (ifp->if_mtu > ETHERMTU)
    546 		hlreg |= IXGBE_HLREG0_JUMBOEN;
    547 	else
    548 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
    549 
    550 #ifdef DEV_NETMAP
    551 	/* CRC stripping is conditional in Netmap */
    552 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
    553 	    (ifp->if_capenable & IFCAP_NETMAP) &&
    554 	    !ix_crcstrip)
    555 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
    556 	else
    557 #endif /* DEV_NETMAP */
    558 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
    559 
    560 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
    561 
    562 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
    563 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
    564 
    565 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
    566 		u64 rdba = rxr->rxdma.dma_paddr;
    567 		u32 tqsmreg, reg;
    568 		int regnum = i / 4;	/* 1 register per 4 queues */
    569 		int regshift = i % 4;	/* 4 bits per 1 queue */
    570 		j = rxr->me;
    571 
    572 		/* Setup the Base and Length of the Rx Descriptor Ring */
    573 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
    574 		    (rdba & 0x00000000ffffffffULL));
    575 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
    576 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
    577 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
    578 
    579 		/* Set up the SRRCTL register */
    580 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
    581 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
    582 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
    583 		srrctl |= bufsz;
    584 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
    585 
    586 		/* Set RQSMR (Receive Queue Statistic Mapping) register */
    587 		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
    588 		reg &= ~(0x000000ff << (regshift * 8));
    589 		reg |= i << (regshift * 8);
    590 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
    591 
    592 		/*
    593 		 * Set RQSMR (Receive Queue Statistic Mapping) register.
    594 		 * Register location for queue 0...7 are different between
    595 		 * 82598 and newer.
    596 		 */
    597 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
    598 			tqsmreg = IXGBE_TQSMR(regnum);
    599 		else
    600 			tqsmreg = IXGBE_TQSM(regnum);
    601 		reg = IXGBE_READ_REG(hw, tqsmreg);
    602 		reg &= ~(0x000000ff << (regshift * 8));
    603 		reg |= i << (regshift * 8);
    604 		IXGBE_WRITE_REG(hw, tqsmreg, reg);
    605 
    606 		/*
    607 		 * Set DROP_EN iff we have no flow control and >1 queue.
    608 		 * Note that srrctl was cleared shortly before during reset,
    609 		 * so we do not need to clear the bit, but do it just in case
    610 		 * this code is moved elsewhere.
    611 		 */
    612 		if (adapter->num_queues > 1 &&
    613 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
    614 			srrctl |= IXGBE_SRRCTL_DROP_EN;
    615 		} else {
    616 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
    617 		}
    618 
    619 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
    620 
    621 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
    622 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
    623 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
    624 
    625 		/* Set the driver rx tail address */
    626 		rxr->tail =  IXGBE_RDT(rxr->me);
    627 	}
    628 
    629 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    630 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
    631 		            | IXGBE_PSRTYPE_UDPHDR
    632 		            | IXGBE_PSRTYPE_IPV4HDR
    633 		            | IXGBE_PSRTYPE_IPV6HDR;
    634 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
    635 	}
    636 
    637 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
    638 
    639 	ixgbe_initialize_rss_mapping(adapter);
    640 
    641 	if (adapter->num_queues > 1) {
    642 		/* RSS and RX IPP Checksum are mutually exclusive */
    643 		rxcsum |= IXGBE_RXCSUM_PCSD;
    644 	}
    645 
    646 	if (ifp->if_capenable & IFCAP_RXCSUM)
    647 		rxcsum |= IXGBE_RXCSUM_PCSD;
    648 
    649 	/* This is useful for calculating UDP/IP fragment checksums */
    650 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
    651 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
    652 
    653 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
    654 
    655 	return;
    656 } /* ixgbe_initialize_receive_units */
    657 
    658 /************************************************************************
    659  * ixgbe_initialize_transmit_units - Enable transmit units.
    660  ************************************************************************/
    661 static void
    662 ixgbe_initialize_transmit_units(struct adapter *adapter)
    663 {
    664 	struct tx_ring  *txr = adapter->tx_rings;
    665 	struct ixgbe_hw	*hw = &adapter->hw;
    666 
    667 	/* Setup the Base and Length of the Tx Descriptor Ring */
    668 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    669 		u64 tdba = txr->txdma.dma_paddr;
    670 		u32 txctrl = 0;
    671 		int j = txr->me;
    672 
    673 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
    674 		    (tdba & 0x00000000ffffffffULL));
    675 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
    676 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
    677 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
    678 
    679 		/* Setup the HW Tx Head and Tail descriptor pointers */
    680 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
    681 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
    682 
    683 		/* Cache the tail address */
    684 		txr->tail = IXGBE_TDT(j);
    685 
    686 		/* Disable Head Writeback */
    687 		/*
    688 		 * Note: for X550 series devices, these registers are actually
    689 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
    690 		 * fields remain the same.
    691 		 */
    692 		switch (hw->mac.type) {
    693 		case ixgbe_mac_82598EB:
    694 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
    695 			break;
    696 		default:
    697 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
    698 			break;
    699 		}
    700 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
    701 		switch (hw->mac.type) {
    702 		case ixgbe_mac_82598EB:
    703 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
    704 			break;
    705 		default:
    706 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
    707 			break;
    708 		}
    709 
    710 	}
    711 
    712 	if (hw->mac.type != ixgbe_mac_82598EB) {
    713 		u32 dmatxctl, rttdcs;
    714 
    715 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
    716 		dmatxctl |= IXGBE_DMATXCTL_TE;
    717 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
    718 		/* Disable arbiter to set MTQC */
    719 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
    720 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
    721 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    722 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
    723 		    ixgbe_get_mtqc(adapter->iov_mode));
    724 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
    725 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    726 	}
    727 
    728 	return;
    729 } /* ixgbe_initialize_transmit_units */
    730 
    731 /************************************************************************
    732  * ixgbe_attach - Device initialization routine
    733  *
    734  *   Called when the driver is being loaded.
    735  *   Identifies the type of hardware, allocates all resources
    736  *   and initializes the hardware.
    737  *
    738  *   return 0 on success, positive on failure
    739  ************************************************************************/
    740 static void
    741 ixgbe_attach(device_t parent, device_t dev, void *aux)
    742 {
    743 	struct adapter  *adapter;
    744 	struct ixgbe_hw *hw;
    745 	int             error = -1;
    746 	u32		ctrl_ext;
    747 	u16		high, low, nvmreg;
    748 	pcireg_t	id, subid;
    749 	ixgbe_vendor_info_t *ent;
    750 	struct pci_attach_args *pa = aux;
    751 	const char *str;
    752 	char buf[256];
    753 
    754 	INIT_DEBUGOUT("ixgbe_attach: begin");
    755 
    756 	/* Allocate, clear, and link in our adapter structure */
    757 	adapter = device_private(dev);
    758 	adapter->hw.back = adapter;
    759 	adapter->dev = dev;
    760 	hw = &adapter->hw;
    761 	adapter->osdep.pc = pa->pa_pc;
    762 	adapter->osdep.tag = pa->pa_tag;
    763 	if (pci_dma64_available(pa))
    764 		adapter->osdep.dmat = pa->pa_dmat64;
    765 	else
    766 		adapter->osdep.dmat = pa->pa_dmat;
    767 	adapter->osdep.attached = false;
    768 
    769 	ent = ixgbe_lookup(pa);
    770 
    771 	KASSERT(ent != NULL);
    772 
    773 	aprint_normal(": %s, Version - %s\n",
    774 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    775 
    776 	/* Core Lock Init*/
    777 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    778 
    779 	/* Set up the timer callout */
    780 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    781 
    782 	/* Determine hardware revision */
    783 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    784 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    785 
    786 	hw->vendor_id = PCI_VENDOR(id);
    787 	hw->device_id = PCI_PRODUCT(id);
    788 	hw->revision_id =
    789 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    790 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    791 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    792 
    793 	/*
    794 	 * Make sure BUSMASTER is set
    795 	 */
    796 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    797 
    798 	/* Do base PCI setup - map BAR0 */
    799 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    800 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    801 		error = ENXIO;
    802 		goto err_out;
    803 	}
    804 
    805 	/* let hardware know driver is loaded */
    806 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    807 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    808 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    809 
    810 	/*
    811 	 * Initialize the shared code
    812 	 */
    813 	if (ixgbe_init_shared_code(hw)) {
    814 		aprint_error_dev(dev, "Unable to initialize the shared code\n");
    815 		error = ENXIO;
    816 		goto err_out;
    817 	}
    818 
    819 	switch (hw->mac.type) {
    820 	case ixgbe_mac_82598EB:
    821 		str = "82598EB";
    822 		break;
    823 	case ixgbe_mac_82599EB:
    824 		str = "82599EB";
    825 		break;
    826 	case ixgbe_mac_X540:
    827 		str = "X540";
    828 		break;
    829 	case ixgbe_mac_X550:
    830 		str = "X550";
    831 		break;
    832 	case ixgbe_mac_X550EM_x:
    833 		str = "X550EM";
    834 		break;
    835 	case ixgbe_mac_X550EM_a:
    836 		str = "X550EM A";
    837 		break;
    838 	default:
    839 		str = "Unknown";
    840 		break;
    841 	}
    842 	aprint_normal_dev(dev, "device %s\n", str);
    843 
    844 	if (hw->mbx.ops.init_params)
    845 		hw->mbx.ops.init_params(hw);
    846 
    847 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
    848 
    849 	/* Pick up the 82599 settings */
    850 	if (hw->mac.type != ixgbe_mac_82598EB) {
    851 		hw->phy.smart_speed = ixgbe_smart_speed;
    852 		adapter->num_segs = IXGBE_82599_SCATTER;
    853 	} else
    854 		adapter->num_segs = IXGBE_82598_SCATTER;
    855 
    856 	ixgbe_init_device_features(adapter);
    857 
    858 	if (ixgbe_configure_interrupts(adapter)) {
    859 		error = ENXIO;
    860 		goto err_out;
    861 	}
    862 
    863 	/* Allocate multicast array memory. */
    864 	adapter->mta = malloc(sizeof(*adapter->mta) *
    865 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    866 	if (adapter->mta == NULL) {
    867 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    868 		error = ENOMEM;
    869 		goto err_out;
    870 	}
    871 
    872 	/* Enable WoL (if supported) */
    873 	ixgbe_check_wol_support(adapter);
    874 
    875 	/* Verify adapter fan is still functional (if applicable) */
    876 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
    877 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    878 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
    879 	}
    880 
    881 	/* Ensure SW/FW semaphore is free */
    882 	ixgbe_init_swfw_semaphore(hw);
    883 
    884 	/* Enable EEE power saving */
    885 	if (adapter->feat_en & IXGBE_FEATURE_EEE)
    886 		hw->mac.ops.setup_eee(hw, TRUE);
    887 
    888 	/* Set an initial default flow control value */
    889 	hw->fc.requested_mode = ixgbe_flow_control;
    890 
    891 	/* Sysctls for limiting the amount of work done in the taskqueues */
    892 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
    893 	    "max number of rx packets to process",
    894 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
    895 
    896 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
    897 	    "max number of tx packets to process",
    898 	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
    899 
    900 	/* Do descriptor calc and sanity checks */
    901 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    902 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    903 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    904 		adapter->num_tx_desc = DEFAULT_TXD;
    905 	} else
    906 		adapter->num_tx_desc = ixgbe_txd;
    907 
    908 	/*
    909 	 * With many RX rings it is easy to exceed the
    910 	 * system mbuf allocation. Tuning nmbclusters
    911 	 * can alleviate this.
    912 	 */
    913 	if (nmbclusters > 0) {
    914 		int s;
    915 		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
    916 		if (s > nmbclusters) {
    917 			aprint_error_dev(dev, "RX Descriptors exceed "
    918 			    "system mbuf max, using default instead!\n");
    919 			ixgbe_rxd = DEFAULT_RXD;
    920 		}
    921 	}
    922 
    923 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    924 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
    925 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    926 		adapter->num_rx_desc = DEFAULT_RXD;
    927 	} else
    928 		adapter->num_rx_desc = ixgbe_rxd;
    929 
    930 	/* Allocate our TX/RX Queues */
    931 	if (ixgbe_allocate_queues(adapter)) {
    932 		error = ENOMEM;
    933 		goto err_out;
    934 	}
    935 
    936 	hw->phy.reset_if_overtemp = TRUE;
    937 	error = ixgbe_reset_hw(hw);
    938 	hw->phy.reset_if_overtemp = FALSE;
    939 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    940 		/*
    941 		 * No optics in this port, set up
    942 		 * so the timer routine will probe
    943 		 * for later insertion.
    944 		 */
    945 		adapter->sfp_probe = TRUE;
    946 		error = IXGBE_SUCCESS;
    947 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    948 		aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
    949 		error = EIO;
    950 		goto err_late;
    951 	} else if (error) {
    952 		aprint_error_dev(dev, "Hardware initialization failed\n");
    953 		error = EIO;
    954 		goto err_late;
    955 	}
    956 
    957 	/* Make sure we have a good EEPROM before we read from it */
    958 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
    959 		aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
    960 		error = EIO;
    961 		goto err_late;
    962 	}
    963 
    964 	aprint_normal("%s:", device_xname(dev));
    965 	/* NVM Image Version */
    966 	switch (hw->mac.type) {
    967 	case ixgbe_mac_X540:
    968 	case ixgbe_mac_X550EM_a:
    969 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    970 		if (nvmreg == 0xffff)
    971 			break;
    972 		high = (nvmreg >> 12) & 0x0f;
    973 		low = (nvmreg >> 4) & 0xff;
    974 		id = nvmreg & 0x0f;
    975 		aprint_normal(" NVM Image Version %u.%u ID 0x%x,", high, low,
    976 		    id);
    977 		break;
    978 	case ixgbe_mac_X550EM_x:
    979 	case ixgbe_mac_X550:
    980 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    981 		if (nvmreg == 0xffff)
    982 			break;
    983 		high = (nvmreg >> 12) & 0x0f;
    984 		low = nvmreg & 0xff;
    985 		aprint_normal(" NVM Image Version %u.%u,", high, low);
    986 		break;
    987 	default:
    988 		break;
    989 	}
    990 
    991 	/* PHY firmware revision */
    992 	switch (hw->mac.type) {
    993 	case ixgbe_mac_X540:
    994 	case ixgbe_mac_X550:
    995 		hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
    996 		if (nvmreg == 0xffff)
    997 			break;
    998 		high = (nvmreg >> 12) & 0x0f;
    999 		low = (nvmreg >> 4) & 0xff;
   1000 		id = nvmreg & 0x000f;
   1001 		aprint_normal(" PHY FW Revision %u.%u ID 0x%x,", high, low,
   1002 		    id);
   1003 		break;
   1004 	default:
   1005 		break;
   1006 	}
   1007 
   1008 	/* NVM Map version & OEM NVM Image version */
   1009 	switch (hw->mac.type) {
   1010 	case ixgbe_mac_X550:
   1011 	case ixgbe_mac_X550EM_x:
   1012 	case ixgbe_mac_X550EM_a:
   1013 		hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
   1014 		if (nvmreg != 0xffff) {
   1015 			high = (nvmreg >> 12) & 0x0f;
   1016 			low = nvmreg & 0x00ff;
   1017 			aprint_normal(" NVM Map version %u.%02x,", high, low);
   1018 		}
   1019 		hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
   1020 		if (nvmreg == 0xffff) {
   1021 			high = (nvmreg >> 12) & 0x0f;
   1022 			low = nvmreg & 0x00ff;
   1023 			aprint_verbose(" OEM NVM Image version %u.%02x,", high,
   1024 			    low);
   1025 		}
   1026 		break;
   1027 	default:
   1028 		break;
   1029 	}
   1030 
   1031 	/* Print the ETrackID */
   1032 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
   1033 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
   1034 	aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
   1035 
   1036 	/* Setup OS specific network interface */
   1037 	if (ixgbe_setup_interface(dev, adapter) != 0)
   1038 		goto err_late;
   1039 
   1040 	if (adapter->feat_en & IXGBE_FEATURE_MSIX)
   1041 		error = ixgbe_allocate_msix(adapter, pa);
   1042 	else
   1043 		error = ixgbe_allocate_legacy(adapter, pa);
   1044 	if (error)
   1045 		goto err_late;
   1046 
   1047 	error = ixgbe_start_hw(hw);
   1048 	switch (error) {
   1049 	case IXGBE_ERR_EEPROM_VERSION:
   1050 		aprint_error_dev(dev, "This device is a pre-production adapter/"
   1051 		    "LOM.  Please be aware there may be issues associated "
   1052 		    "with your hardware.\nIf you are experiencing problems "
   1053 		    "please contact your Intel or hardware representative "
   1054 		    "who provided you with this hardware.\n");
   1055 		break;
   1056 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
   1057 		aprint_error_dev(dev, "Unsupported SFP+ Module\n");
   1058 		error = EIO;
   1059 		goto err_late;
   1060 	case IXGBE_ERR_SFP_NOT_PRESENT:
   1061 		aprint_error_dev(dev, "No SFP+ Module found\n");
   1062 		/* falls thru */
   1063 	default:
   1064 		break;
   1065 	}
   1066 
   1067 	if (hw->phy.id != 0) {
   1068 		uint16_t id1, id2;
   1069 		int oui, model, rev;
   1070 		const char *descr;
   1071 
   1072 		id1 = hw->phy.id >> 16;
   1073 		id2 = hw->phy.id & 0xffff;
   1074 		oui = MII_OUI(id1, id2);
   1075 		model = MII_MODEL(id2);
   1076 		rev = MII_REV(id2);
   1077 		if ((descr = mii_get_descr(oui, model)) != NULL)
   1078 			aprint_normal_dev(dev,
   1079 			    "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
   1080 			    descr, oui, model, rev);
   1081 		else
   1082 			aprint_normal_dev(dev,
   1083 			    "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
   1084 			    oui, model, rev);
   1085 	}
   1086 
   1087 	/* Enable the optics for 82599 SFP+ fiber */
   1088 	ixgbe_enable_tx_laser(hw);
   1089 
   1090 	/* Enable power to the phy. */
   1091 	ixgbe_set_phy_power(hw, TRUE);
   1092 
   1093 	/* Initialize statistics */
   1094 	ixgbe_update_stats_counters(adapter);
   1095 
   1096 	/* Check PCIE slot type/speed/width */
   1097 	ixgbe_get_slot_info(adapter);
   1098 
   1099 	/*
   1100 	 * Do time init and sysctl init here, but
   1101 	 * only on the first port of a bypass adapter.
   1102 	 */
   1103 	ixgbe_bypass_init(adapter);
   1104 
   1105 	/* Set an initial dmac value */
   1106 	adapter->dmac = 0;
   1107 	/* Set initial advertised speeds (if applicable) */
   1108 	adapter->advertise = ixgbe_get_advertise(adapter);
   1109 
   1110 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   1111 		ixgbe_define_iov_schemas(dev, &error);
   1112 
   1113 	/* Add sysctls */
   1114 	ixgbe_add_device_sysctls(adapter);
   1115 	ixgbe_add_hw_stats(adapter);
   1116 
   1117 	/* For Netmap */
   1118 	adapter->init_locked = ixgbe_init_locked;
   1119 	adapter->stop_locked = ixgbe_stop;
   1120 
   1121 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1122 		ixgbe_netmap_attach(adapter);
   1123 
   1124 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
   1125 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
   1126 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
   1127 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
   1128 
   1129 	if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
   1130 		pmf_class_network_register(dev, adapter->ifp);
   1131 	else
   1132 		aprint_error_dev(dev, "couldn't establish power handler\n");
   1133 
   1134 	INIT_DEBUGOUT("ixgbe_attach: end");
   1135 	adapter->osdep.attached = true;
   1136 
   1137 	return;
   1138 
   1139 err_late:
   1140 	ixgbe_free_transmit_structures(adapter);
   1141 	ixgbe_free_receive_structures(adapter);
   1142 	free(adapter->queues, M_DEVBUF);
   1143 err_out:
   1144 	if (adapter->ifp != NULL)
   1145 		if_free(adapter->ifp);
   1146 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   1147 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   1148 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   1149 	ixgbe_free_pci_resources(adapter);
   1150 	if (adapter->mta != NULL)
   1151 		free(adapter->mta, M_DEVBUF);
   1152 	IXGBE_CORE_LOCK_DESTROY(adapter);
   1153 
   1154 	return;
   1155 } /* ixgbe_attach */
   1156 
   1157 /************************************************************************
   1158  * ixgbe_check_wol_support
   1159  *
   1160  *   Checks whether the adapter's ports are capable of
   1161  *   Wake On LAN by reading the adapter's NVM.
   1162  *
   1163  *   Sets each port's hw->wol_enabled value depending
   1164  *   on the value read here.
   1165  ************************************************************************/
   1166 static void
   1167 ixgbe_check_wol_support(struct adapter *adapter)
   1168 {
   1169 	struct ixgbe_hw *hw = &adapter->hw;
   1170 	u16             dev_caps = 0;
   1171 
   1172 	/* Find out WoL support for port */
   1173 	adapter->wol_support = hw->wol_enabled = 0;
   1174 	ixgbe_get_device_caps(hw, &dev_caps);
   1175 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
   1176 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
   1177 	     hw->bus.func == 0))
   1178 		adapter->wol_support = hw->wol_enabled = 1;
   1179 
   1180 	/* Save initial wake up filter configuration */
   1181 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
   1182 
   1183 	return;
   1184 } /* ixgbe_check_wol_support */
   1185 
   1186 /************************************************************************
   1187  * ixgbe_setup_interface
   1188  *
   1189  *   Setup networking device structure and register an interface.
   1190  ************************************************************************/
   1191 static int
   1192 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   1193 {
   1194 	struct ethercom *ec = &adapter->osdep.ec;
   1195 	struct ifnet   *ifp;
   1196 
   1197 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   1198 
   1199 	ifp = adapter->ifp = &ec->ec_if;
   1200 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1201 	ifp->if_baudrate = IF_Gbps(10);
   1202 	ifp->if_init = ixgbe_init;
   1203 	ifp->if_stop = ixgbe_ifstop;
   1204 	ifp->if_softc = adapter;
   1205 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1206 #ifdef IXGBE_MPSAFE
   1207 	ifp->if_extflags = IFEF_START_MPSAFE;
   1208 #endif
   1209 	ifp->if_ioctl = ixgbe_ioctl;
   1210 #if __FreeBSD_version >= 1100045
   1211 	/* TSO parameters */
   1212 	ifp->if_hw_tsomax = 65518;
   1213 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
   1214 	ifp->if_hw_tsomaxsegsize = 2048;
   1215 #endif
   1216 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1217 #if 0
   1218 		ixgbe_start_locked = ixgbe_legacy_start_locked;
   1219 #endif
   1220 	} else {
   1221 		ifp->if_transmit = ixgbe_mq_start;
   1222 #if 0
   1223 		ixgbe_start_locked = ixgbe_mq_start_locked;
   1224 #endif
   1225 	}
   1226 	ifp->if_start = ixgbe_legacy_start;
   1227 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1228 	IFQ_SET_READY(&ifp->if_snd);
   1229 
   1230 	if_initialize(ifp);
   1231 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1232 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1233 	/*
   1234 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1235 	 * used.
   1236 	 */
   1237 	if_register(ifp);
   1238 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   1239 
   1240 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1241 
   1242 	/*
   1243 	 * Tell the upper layer(s) we support long frames.
   1244 	 */
   1245 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1246 
   1247 	/* Set capability flags */
   1248 	ifp->if_capabilities |= IFCAP_RXCSUM
   1249 			     |  IFCAP_TXCSUM
   1250 			     |  IFCAP_TSOv4
   1251 			     |  IFCAP_TSOv6
   1252 			     |  IFCAP_LRO;
   1253 	ifp->if_capenable = 0;
   1254 
   1255 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1256 	    		    |  ETHERCAP_VLAN_HWCSUM
   1257 	    		    |  ETHERCAP_JUMBO_MTU
   1258 	    		    |  ETHERCAP_VLAN_MTU;
   1259 
   1260 	/* Enable the above capabilities by default */
   1261 	ec->ec_capenable = ec->ec_capabilities;
   1262 
   1263 	/*
   1264 	 * Don't turn this on by default, if vlans are
   1265 	 * created on another pseudo device (eg. lagg)
   1266 	 * then vlan events are not passed thru, breaking
   1267 	 * operation, but with HW FILTER off it works. If
   1268 	 * using vlans directly on the ixgbe driver you can
   1269 	 * enable this and get full hardware tag filtering.
   1270 	 */
   1271 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1272 
   1273 	/*
   1274 	 * Specify the media types supported by this adapter and register
   1275 	 * callbacks to update media and link information
   1276 	 */
   1277 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   1278 	    ixgbe_media_status);
   1279 
   1280 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
   1281 	ixgbe_add_media_types(adapter);
   1282 
   1283 	/* Set autoselect media by default */
   1284 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1285 
   1286 	return (0);
   1287 } /* ixgbe_setup_interface */
   1288 
   1289 /************************************************************************
   1290  * ixgbe_add_media_types
   1291  ************************************************************************/
   1292 static void
   1293 ixgbe_add_media_types(struct adapter *adapter)
   1294 {
   1295 	struct ixgbe_hw *hw = &adapter->hw;
   1296 	device_t        dev = adapter->dev;
   1297 	u64             layer;
   1298 
   1299 	layer = adapter->phy_layer;
   1300 
   1301 #define	ADD(mm, dd)							\
   1302 	ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
   1303 
   1304 	/* Media types with matching NetBSD media defines */
   1305 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
   1306 		ADD(IFM_10G_T, 0);
   1307 		ADD(IFM_10G_T | IFM_FDX, 0);
   1308 	}
   1309 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
   1310 		ADD(IFM_1000_T, 0);
   1311 		ADD(IFM_1000_T | IFM_FDX, 0);
   1312 	}
   1313 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
   1314 		ADD(IFM_100_TX, 0);
   1315 		ADD(IFM_100_TX | IFM_FDX, 0);
   1316 	}
   1317 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
   1318 		ADD(IFM_10_T, 0);
   1319 		ADD(IFM_10_T | IFM_FDX, 0);
   1320 	}
   1321 
   1322 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   1323 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
   1324 		ADD(IFM_10G_TWINAX, 0);
   1325 		ADD(IFM_10G_TWINAX | IFM_FDX, 0);
   1326 	}
   1327 
   1328 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
   1329 		ADD(IFM_10G_LR, 0);
   1330 		ADD(IFM_10G_LR | IFM_FDX, 0);
   1331 		if (hw->phy.multispeed_fiber) {
   1332 			ADD(IFM_1000_LX, 0);
   1333 			ADD(IFM_1000_LX | IFM_FDX, 0);
   1334 		}
   1335 	}
   1336 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
   1337 		ADD(IFM_10G_SR, 0);
   1338 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1339 		if (hw->phy.multispeed_fiber) {
   1340 			ADD(IFM_1000_SX, 0);
   1341 			ADD(IFM_1000_SX | IFM_FDX, 0);
   1342 		}
   1343 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
   1344 		ADD(IFM_1000_SX, 0);
   1345 		ADD(IFM_1000_SX | IFM_FDX, 0);
   1346 	}
   1347 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
   1348 		ADD(IFM_10G_CX4, 0);
   1349 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1350 	}
   1351 
   1352 #ifdef IFM_ETH_XTYPE
   1353 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1354 		ADD(IFM_10G_KR, 0);
   1355 		ADD(IFM_10G_KR | IFM_FDX, 0);
   1356 	}
   1357 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1358 		ADD(AIFM_10G_KX4, 0);
   1359 		ADD(AIFM_10G_KX4 | IFM_FDX, 0);
   1360 	}
   1361 #else
   1362 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1363 		device_printf(dev, "Media supported: 10GbaseKR\n");
   1364 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
   1365 		ADD(IFM_10G_SR, 0);
   1366 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1367 	}
   1368 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1369 		device_printf(dev, "Media supported: 10GbaseKX4\n");
   1370 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
   1371 		ADD(IFM_10G_CX4, 0);
   1372 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1373 	}
   1374 #endif
   1375 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
   1376 		ADD(IFM_1000_KX, 0);
   1377 		ADD(IFM_1000_KX | IFM_FDX, 0);
   1378 	}
   1379 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
   1380 		ADD(IFM_2500_KX, 0);
   1381 		ADD(IFM_2500_KX | IFM_FDX, 0);
   1382 	}
   1383 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
   1384 		device_printf(dev, "Media supported: 1000baseBX\n");
   1385 	/* XXX no ifmedia_set? */
   1386 
   1387 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
   1388 		ADD(IFM_1000_T | IFM_FDX, 0);
   1389 		ADD(IFM_1000_T, 0);
   1390 	}
   1391 
   1392 	ADD(IFM_AUTO, 0);
   1393 
   1394 #undef ADD
   1395 } /* ixgbe_add_media_types */
   1396 
   1397 /************************************************************************
   1398  * ixgbe_is_sfp
   1399  ************************************************************************/
   1400 static inline bool
   1401 ixgbe_is_sfp(struct ixgbe_hw *hw)
   1402 {
   1403 	switch (hw->mac.type) {
   1404 	case ixgbe_mac_82598EB:
   1405 		if (hw->phy.type == ixgbe_phy_nl)
   1406 			return TRUE;
   1407 		return FALSE;
   1408 	case ixgbe_mac_82599EB:
   1409 		switch (hw->mac.ops.get_media_type(hw)) {
   1410 		case ixgbe_media_type_fiber:
   1411 		case ixgbe_media_type_fiber_qsfp:
   1412 			return TRUE;
   1413 		default:
   1414 			return FALSE;
   1415 		}
   1416 	case ixgbe_mac_X550EM_x:
   1417 	case ixgbe_mac_X550EM_a:
   1418 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
   1419 			return TRUE;
   1420 		return FALSE;
   1421 	default:
   1422 		return FALSE;
   1423 	}
   1424 } /* ixgbe_is_sfp */
   1425 
   1426 /************************************************************************
   1427  * ixgbe_config_link
   1428  ************************************************************************/
   1429 static void
   1430 ixgbe_config_link(struct adapter *adapter)
   1431 {
   1432 	struct ixgbe_hw *hw = &adapter->hw;
   1433 	u32             autoneg, err = 0;
   1434 	bool            sfp, negotiate = false;
   1435 
   1436 	sfp = ixgbe_is_sfp(hw);
   1437 
   1438 	if (sfp) {
   1439 		if (hw->phy.multispeed_fiber) {
   1440 			hw->mac.ops.setup_sfp(hw);
   1441 			ixgbe_enable_tx_laser(hw);
   1442 			kpreempt_disable();
   1443 			softint_schedule(adapter->msf_si);
   1444 			kpreempt_enable();
   1445 		} else {
   1446 			kpreempt_disable();
   1447 			softint_schedule(adapter->mod_si);
   1448 			kpreempt_enable();
   1449 		}
   1450 	} else {
   1451 		if (hw->mac.ops.check_link)
   1452 			err = ixgbe_check_link(hw, &adapter->link_speed,
   1453 			    &adapter->link_up, FALSE);
   1454 		if (err)
   1455 			goto out;
   1456 		autoneg = hw->phy.autoneg_advertised;
   1457 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   1458                 	err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
   1459 			    &negotiate);
   1460 		if (err)
   1461 			goto out;
   1462 		if (hw->mac.ops.setup_link)
   1463                 	err = hw->mac.ops.setup_link(hw, autoneg,
   1464 			    adapter->link_up);
   1465 	}
   1466 out:
   1467 
   1468 	return;
   1469 } /* ixgbe_config_link */
   1470 
   1471 /************************************************************************
   1472  * ixgbe_update_stats_counters - Update board statistics counters.
   1473  ************************************************************************/
   1474 static void
   1475 ixgbe_update_stats_counters(struct adapter *adapter)
   1476 {
   1477 	struct ifnet          *ifp = adapter->ifp;
   1478 	struct ixgbe_hw       *hw = &adapter->hw;
   1479 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1480 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
   1481 	u64                   total_missed_rx = 0;
   1482 	uint64_t              crcerrs, rlec;
   1483 
   1484 	crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   1485 	stats->crcerrs.ev_count += crcerrs;
   1486 	stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   1487 	stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   1488 	stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   1489 	if (hw->mac.type == ixgbe_mac_X550)
   1490 		stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
   1491 
   1492 	for (int i = 0; i < __arraycount(stats->qprc); i++) {
   1493 		int j = i % adapter->num_queues;
   1494 		stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   1495 		stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   1496 		stats->qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   1497 	}
   1498 	for (int i = 0; i < __arraycount(stats->mpc); i++) {
   1499 		uint32_t mp;
   1500 		int j = i % adapter->num_queues;
   1501 
   1502 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   1503 		/* global total per queue */
   1504 		stats->mpc[j].ev_count += mp;
   1505 		/* running comprehensive total for stats display */
   1506 		total_missed_rx += mp;
   1507 
   1508 		if (hw->mac.type == ixgbe_mac_82598EB)
   1509 			stats->rnbc[j].ev_count
   1510 			    += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   1511 
   1512 	}
   1513 	stats->mpctotal.ev_count += total_missed_rx;
   1514 
   1515 	/* Document says M[LR]FC are valid when link is up and 10Gbps */
   1516 	if ((adapter->link_active == TRUE)
   1517 	    && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
   1518 		stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   1519 		stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   1520 	}
   1521 	rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
   1522 	stats->rlec.ev_count += rlec;
   1523 
   1524 	/* Hardware workaround, gprc counts missed packets */
   1525 	stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   1526 
   1527 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   1528 	stats->lxontxc.ev_count += lxon;
   1529 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   1530 	stats->lxofftxc.ev_count += lxoff;
   1531 	total = lxon + lxoff;
   1532 
   1533 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1534 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   1535 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   1536 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   1537 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   1538 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   1539 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   1540 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   1541 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   1542 	} else {
   1543 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   1544 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   1545 		/* 82598 only has a counter in the high register */
   1546 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   1547 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   1548 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   1549 	}
   1550 
   1551 	/*
   1552 	 * Workaround: mprc hardware is incorrectly counting
   1553 	 * broadcasts, so for now we subtract those.
   1554 	 */
   1555 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   1556 	stats->bprc.ev_count += bprc;
   1557 	stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
   1558 	    - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   1559 
   1560 	stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   1561 	stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   1562 	stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   1563 	stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   1564 	stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   1565 	stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   1566 
   1567 	stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   1568 	stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   1569 	stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   1570 
   1571 	stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   1572 	stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   1573 	stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   1574 	stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   1575 	stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   1576 	stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   1577 	stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   1578 	stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   1579 	stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   1580 	stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   1581 	stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   1582 	stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   1583 	stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   1584 	stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   1585 	stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   1586 	stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   1587 	stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   1588 	stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   1589 	/* Only read FCOE on 82599 */
   1590 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1591 		stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   1592 		stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   1593 		stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   1594 		stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   1595 		stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   1596 	}
   1597 
   1598 	/* Fill out the OS statistics structure */
   1599 	/*
   1600 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   1601 	 * adapter->stats counters. It's required to make ifconfig -z
   1602 	 * (SOICZIFDATA) work.
   1603 	 */
   1604 	ifp->if_collisions = 0;
   1605 
   1606 	/* Rx Errors */
   1607 	ifp->if_iqdrops += total_missed_rx;
   1608 	ifp->if_ierrors += crcerrs + rlec;
   1609 } /* ixgbe_update_stats_counters */
   1610 
   1611 /************************************************************************
   1612  * ixgbe_add_hw_stats
   1613  *
   1614  *   Add sysctl variables, one per statistic, to the system.
   1615  ************************************************************************/
   1616 static void
   1617 ixgbe_add_hw_stats(struct adapter *adapter)
   1618 {
   1619 	device_t dev = adapter->dev;
   1620 	const struct sysctlnode *rnode, *cnode;
   1621 	struct sysctllog **log = &adapter->sysctllog;
   1622 	struct tx_ring *txr = adapter->tx_rings;
   1623 	struct rx_ring *rxr = adapter->rx_rings;
   1624 	struct ixgbe_hw *hw = &adapter->hw;
   1625 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1626 	const char *xname = device_xname(dev);
   1627 
   1628 	/* Driver Statistics */
   1629 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   1630 	    NULL, xname, "Handled queue in softint");
   1631 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   1632 	    NULL, xname, "Requeued in softint");
   1633 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   1634 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   1635 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   1636 	    NULL, xname, "m_defrag() failed");
   1637 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   1638 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   1639 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   1640 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   1641 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   1642 	    NULL, xname, "Driver tx dma hard fail other");
   1643 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   1644 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   1645 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   1646 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   1647 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   1648 	    NULL, xname, "Watchdog timeouts");
   1649 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   1650 	    NULL, xname, "TSO errors");
   1651 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   1652 	    NULL, xname, "Link MSI-X IRQ Handled");
   1653 
   1654 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1655 		snprintf(adapter->queues[i].evnamebuf,
   1656 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   1657 		    xname, i);
   1658 		snprintf(adapter->queues[i].namebuf,
   1659 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   1660 
   1661 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   1662 			aprint_error_dev(dev, "could not create sysctl root\n");
   1663 			break;
   1664 		}
   1665 
   1666 		if (sysctl_createv(log, 0, &rnode, &rnode,
   1667 		    0, CTLTYPE_NODE,
   1668 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   1669 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   1670 			break;
   1671 
   1672 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1673 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   1674 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   1675 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   1676 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   1677 			break;
   1678 
   1679 #if 0 /* XXX msaitoh */
   1680 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1681 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   1682 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   1683 			NULL, 0, &(adapter->queues[i].irqs),
   1684 		    0, CTL_CREATE, CTL_EOL) != 0)
   1685 			break;
   1686 #endif
   1687 
   1688 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1689 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1690 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   1691 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   1692 		    0, CTL_CREATE, CTL_EOL) != 0)
   1693 			break;
   1694 
   1695 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1696 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1697 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   1698 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   1699 		    0, CTL_CREATE, CTL_EOL) != 0)
   1700 			break;
   1701 
   1702 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   1703 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   1704 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   1705 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   1706 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   1707 		    NULL, adapter->queues[i].evnamebuf,
   1708 		    "Queue No Descriptor Available");
   1709 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   1710 		    NULL, adapter->queues[i].evnamebuf,
   1711 		    "Queue Packets Transmitted");
   1712 #ifndef IXGBE_LEGACY_TX
   1713 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   1714 		    NULL, adapter->queues[i].evnamebuf,
   1715 		    "Packets dropped in pcq");
   1716 #endif
   1717 
   1718 #ifdef LRO
   1719 		struct lro_ctrl *lro = &rxr->lro;
   1720 #endif /* LRO */
   1721 
   1722 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1723 		    CTLFLAG_READONLY,
   1724 		    CTLTYPE_INT,
   1725 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   1726 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   1727 		    CTL_CREATE, CTL_EOL) != 0)
   1728 			break;
   1729 
   1730 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1731 		    CTLFLAG_READONLY,
   1732 		    CTLTYPE_INT,
   1733 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   1734 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   1735 		    CTL_CREATE, CTL_EOL) != 0)
   1736 			break;
   1737 
   1738 		if (i < __arraycount(stats->mpc)) {
   1739 			evcnt_attach_dynamic(&stats->mpc[i],
   1740 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1741 			    "RX Missed Packet Count");
   1742 			if (hw->mac.type == ixgbe_mac_82598EB)
   1743 				evcnt_attach_dynamic(&stats->rnbc[i],
   1744 				    EVCNT_TYPE_MISC, NULL,
   1745 				    adapter->queues[i].evnamebuf,
   1746 				    "Receive No Buffers");
   1747 		}
   1748 		if (i < __arraycount(stats->pxontxc)) {
   1749 			evcnt_attach_dynamic(&stats->pxontxc[i],
   1750 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1751 			    "pxontxc");
   1752 			evcnt_attach_dynamic(&stats->pxonrxc[i],
   1753 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1754 			    "pxonrxc");
   1755 			evcnt_attach_dynamic(&stats->pxofftxc[i],
   1756 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1757 			    "pxofftxc");
   1758 			evcnt_attach_dynamic(&stats->pxoffrxc[i],
   1759 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1760 			    "pxoffrxc");
   1761 			evcnt_attach_dynamic(&stats->pxon2offc[i],
   1762 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1763 			    "pxon2offc");
   1764 		}
   1765 		if (i < __arraycount(stats->qprc)) {
   1766 			evcnt_attach_dynamic(&stats->qprc[i],
   1767 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1768 			    "qprc");
   1769 			evcnt_attach_dynamic(&stats->qptc[i],
   1770 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1771 			    "qptc");
   1772 			evcnt_attach_dynamic(&stats->qbrc[i],
   1773 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1774 			    "qbrc");
   1775 			evcnt_attach_dynamic(&stats->qbtc[i],
   1776 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1777 			    "qbtc");
   1778 			evcnt_attach_dynamic(&stats->qprdc[i],
   1779 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1780 			    "qprdc");
   1781 		}
   1782 
   1783 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   1784 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   1785 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   1786 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   1787 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   1788 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   1789 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   1790 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   1791 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   1792 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   1793 #ifdef LRO
   1794 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   1795 				CTLFLAG_RD, &lro->lro_queued, 0,
   1796 				"LRO Queued");
   1797 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   1798 				CTLFLAG_RD, &lro->lro_flushed, 0,
   1799 				"LRO Flushed");
   1800 #endif /* LRO */
   1801 	}
   1802 
   1803 	/* MAC stats get their own sub node */
   1804 
   1805 	snprintf(stats->namebuf,
   1806 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   1807 
   1808 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   1809 	    stats->namebuf, "rx csum offload - IP");
   1810 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   1811 	    stats->namebuf, "rx csum offload - L4");
   1812 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   1813 	    stats->namebuf, "rx csum offload - IP bad");
   1814 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   1815 	    stats->namebuf, "rx csum offload - L4 bad");
   1816 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   1817 	    stats->namebuf, "Interrupt conditions zero");
   1818 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   1819 	    stats->namebuf, "Legacy interrupts");
   1820 
   1821 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   1822 	    stats->namebuf, "CRC Errors");
   1823 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   1824 	    stats->namebuf, "Illegal Byte Errors");
   1825 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   1826 	    stats->namebuf, "Byte Errors");
   1827 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   1828 	    stats->namebuf, "MAC Short Packets Discarded");
   1829 	if (hw->mac.type >= ixgbe_mac_X550)
   1830 		evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
   1831 		    stats->namebuf, "Bad SFD");
   1832 	evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
   1833 	    stats->namebuf, "Total Packets Missed");
   1834 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   1835 	    stats->namebuf, "MAC Local Faults");
   1836 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   1837 	    stats->namebuf, "MAC Remote Faults");
   1838 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   1839 	    stats->namebuf, "Receive Length Errors");
   1840 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   1841 	    stats->namebuf, "Link XON Transmitted");
   1842 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   1843 	    stats->namebuf, "Link XON Received");
   1844 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   1845 	    stats->namebuf, "Link XOFF Transmitted");
   1846 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   1847 	    stats->namebuf, "Link XOFF Received");
   1848 
   1849 	/* Packet Reception Stats */
   1850 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   1851 	    stats->namebuf, "Total Octets Received");
   1852 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   1853 	    stats->namebuf, "Good Octets Received");
   1854 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   1855 	    stats->namebuf, "Total Packets Received");
   1856 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   1857 	    stats->namebuf, "Good Packets Received");
   1858 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   1859 	    stats->namebuf, "Multicast Packets Received");
   1860 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   1861 	    stats->namebuf, "Broadcast Packets Received");
   1862 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   1863 	    stats->namebuf, "64 byte frames received ");
   1864 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   1865 	    stats->namebuf, "65-127 byte frames received");
   1866 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   1867 	    stats->namebuf, "128-255 byte frames received");
   1868 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   1869 	    stats->namebuf, "256-511 byte frames received");
   1870 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   1871 	    stats->namebuf, "512-1023 byte frames received");
   1872 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   1873 	    stats->namebuf, "1023-1522 byte frames received");
   1874 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   1875 	    stats->namebuf, "Receive Undersized");
   1876 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   1877 	    stats->namebuf, "Fragmented Packets Received ");
   1878 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   1879 	    stats->namebuf, "Oversized Packets Received");
   1880 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   1881 	    stats->namebuf, "Received Jabber");
   1882 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   1883 	    stats->namebuf, "Management Packets Received");
   1884 	evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
   1885 	    stats->namebuf, "Management Packets Dropped");
   1886 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   1887 	    stats->namebuf, "Checksum Errors");
   1888 
   1889 	/* Packet Transmission Stats */
   1890 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   1891 	    stats->namebuf, "Good Octets Transmitted");
   1892 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   1893 	    stats->namebuf, "Total Packets Transmitted");
   1894 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   1895 	    stats->namebuf, "Good Packets Transmitted");
   1896 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   1897 	    stats->namebuf, "Broadcast Packets Transmitted");
   1898 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   1899 	    stats->namebuf, "Multicast Packets Transmitted");
   1900 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   1901 	    stats->namebuf, "Management Packets Transmitted");
   1902 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   1903 	    stats->namebuf, "64 byte frames transmitted ");
   1904 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   1905 	    stats->namebuf, "65-127 byte frames transmitted");
   1906 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   1907 	    stats->namebuf, "128-255 byte frames transmitted");
   1908 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   1909 	    stats->namebuf, "256-511 byte frames transmitted");
   1910 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   1911 	    stats->namebuf, "512-1023 byte frames transmitted");
   1912 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   1913 	    stats->namebuf, "1024-1522 byte frames transmitted");
   1914 } /* ixgbe_add_hw_stats */
   1915 
   1916 static void
   1917 ixgbe_clear_evcnt(struct adapter *adapter)
   1918 {
   1919 	struct tx_ring *txr = adapter->tx_rings;
   1920 	struct rx_ring *rxr = adapter->rx_rings;
   1921 	struct ixgbe_hw *hw = &adapter->hw;
   1922 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1923 
   1924 	adapter->handleq.ev_count = 0;
   1925 	adapter->req.ev_count = 0;
   1926 	adapter->efbig_tx_dma_setup.ev_count = 0;
   1927 	adapter->mbuf_defrag_failed.ev_count = 0;
   1928 	adapter->efbig2_tx_dma_setup.ev_count = 0;
   1929 	adapter->einval_tx_dma_setup.ev_count = 0;
   1930 	adapter->other_tx_dma_setup.ev_count = 0;
   1931 	adapter->eagain_tx_dma_setup.ev_count = 0;
   1932 	adapter->enomem_tx_dma_setup.ev_count = 0;
   1933 	adapter->watchdog_events.ev_count = 0;
   1934 	adapter->tso_err.ev_count = 0;
   1935 	adapter->link_irq.ev_count = 0;
   1936 
   1937 	txr = adapter->tx_rings;
   1938 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1939 		adapter->queues[i].irqs.ev_count = 0;
   1940 		txr->no_desc_avail.ev_count = 0;
   1941 		txr->total_packets.ev_count = 0;
   1942 		txr->tso_tx.ev_count = 0;
   1943 #ifndef IXGBE_LEGACY_TX
   1944 		txr->pcq_drops.ev_count = 0;
   1945 #endif
   1946 
   1947 		if (i < __arraycount(stats->mpc)) {
   1948 			stats->mpc[i].ev_count = 0;
   1949 			if (hw->mac.type == ixgbe_mac_82598EB)
   1950 				stats->rnbc[i].ev_count = 0;
   1951 		}
   1952 		if (i < __arraycount(stats->pxontxc)) {
   1953 			stats->pxontxc[i].ev_count = 0;
   1954 			stats->pxonrxc[i].ev_count = 0;
   1955 			stats->pxofftxc[i].ev_count = 0;
   1956 			stats->pxoffrxc[i].ev_count = 0;
   1957 			stats->pxon2offc[i].ev_count = 0;
   1958 		}
   1959 		if (i < __arraycount(stats->qprc)) {
   1960 			stats->qprc[i].ev_count = 0;
   1961 			stats->qptc[i].ev_count = 0;
   1962 			stats->qbrc[i].ev_count = 0;
   1963 			stats->qbtc[i].ev_count = 0;
   1964 			stats->qprdc[i].ev_count = 0;
   1965 		}
   1966 
   1967 		rxr->rx_packets.ev_count = 0;
   1968 		rxr->rx_bytes.ev_count = 0;
   1969 		rxr->rx_copies.ev_count = 0;
   1970 		rxr->no_jmbuf.ev_count = 0;
   1971 		rxr->rx_discarded.ev_count = 0;
   1972 	}
   1973 	stats->ipcs.ev_count = 0;
   1974 	stats->l4cs.ev_count = 0;
   1975 	stats->ipcs_bad.ev_count = 0;
   1976 	stats->l4cs_bad.ev_count = 0;
   1977 	stats->intzero.ev_count = 0;
   1978 	stats->legint.ev_count = 0;
   1979 	stats->crcerrs.ev_count = 0;
   1980 	stats->illerrc.ev_count = 0;
   1981 	stats->errbc.ev_count = 0;
   1982 	stats->mspdc.ev_count = 0;
   1983 	stats->mbsdc.ev_count = 0;
   1984 	stats->mpctotal.ev_count = 0;
   1985 	stats->mlfc.ev_count = 0;
   1986 	stats->mrfc.ev_count = 0;
   1987 	stats->rlec.ev_count = 0;
   1988 	stats->lxontxc.ev_count = 0;
   1989 	stats->lxonrxc.ev_count = 0;
   1990 	stats->lxofftxc.ev_count = 0;
   1991 	stats->lxoffrxc.ev_count = 0;
   1992 
   1993 	/* Packet Reception Stats */
   1994 	stats->tor.ev_count = 0;
   1995 	stats->gorc.ev_count = 0;
   1996 	stats->tpr.ev_count = 0;
   1997 	stats->gprc.ev_count = 0;
   1998 	stats->mprc.ev_count = 0;
   1999 	stats->bprc.ev_count = 0;
   2000 	stats->prc64.ev_count = 0;
   2001 	stats->prc127.ev_count = 0;
   2002 	stats->prc255.ev_count = 0;
   2003 	stats->prc511.ev_count = 0;
   2004 	stats->prc1023.ev_count = 0;
   2005 	stats->prc1522.ev_count = 0;
   2006 	stats->ruc.ev_count = 0;
   2007 	stats->rfc.ev_count = 0;
   2008 	stats->roc.ev_count = 0;
   2009 	stats->rjc.ev_count = 0;
   2010 	stats->mngprc.ev_count = 0;
   2011 	stats->mngpdc.ev_count = 0;
   2012 	stats->xec.ev_count = 0;
   2013 
   2014 	/* Packet Transmission Stats */
   2015 	stats->gotc.ev_count = 0;
   2016 	stats->tpt.ev_count = 0;
   2017 	stats->gptc.ev_count = 0;
   2018 	stats->bptc.ev_count = 0;
   2019 	stats->mptc.ev_count = 0;
   2020 	stats->mngptc.ev_count = 0;
   2021 	stats->ptc64.ev_count = 0;
   2022 	stats->ptc127.ev_count = 0;
   2023 	stats->ptc255.ev_count = 0;
   2024 	stats->ptc511.ev_count = 0;
   2025 	stats->ptc1023.ev_count = 0;
   2026 	stats->ptc1522.ev_count = 0;
   2027 }
   2028 
   2029 /************************************************************************
   2030  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
   2031  *
   2032  *   Retrieves the TDH value from the hardware
   2033  ************************************************************************/
   2034 static int
   2035 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   2036 {
   2037 	struct sysctlnode node = *rnode;
   2038 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2039 	uint32_t val;
   2040 
   2041 	if (!txr)
   2042 		return (0);
   2043 
   2044 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
   2045 	node.sysctl_data = &val;
   2046 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2047 } /* ixgbe_sysctl_tdh_handler */
   2048 
   2049 /************************************************************************
   2050  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   2051  *
   2052  *   Retrieves the TDT value from the hardware
   2053  ************************************************************************/
   2054 static int
   2055 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   2056 {
   2057 	struct sysctlnode node = *rnode;
   2058 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2059 	uint32_t val;
   2060 
   2061 	if (!txr)
   2062 		return (0);
   2063 
   2064 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
   2065 	node.sysctl_data = &val;
   2066 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2067 } /* ixgbe_sysctl_tdt_handler */
   2068 
   2069 /************************************************************************
   2070  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
   2071  *
   2072  *   Retrieves the RDH value from the hardware
   2073  ************************************************************************/
   2074 static int
   2075 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2076 {
   2077 	struct sysctlnode node = *rnode;
   2078 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2079 	uint32_t val;
   2080 
   2081 	if (!rxr)
   2082 		return (0);
   2083 
   2084 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
   2085 	node.sysctl_data = &val;
   2086 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2087 } /* ixgbe_sysctl_rdh_handler */
   2088 
   2089 /************************************************************************
   2090  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2091  *
   2092  *   Retrieves the RDT value from the hardware
   2093  ************************************************************************/
   2094 static int
   2095 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2096 {
   2097 	struct sysctlnode node = *rnode;
   2098 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2099 	uint32_t val;
   2100 
   2101 	if (!rxr)
   2102 		return (0);
   2103 
   2104 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
   2105 	node.sysctl_data = &val;
   2106 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2107 } /* ixgbe_sysctl_rdt_handler */
   2108 
   2109 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   2110 /************************************************************************
   2111  * ixgbe_register_vlan
   2112  *
   2113  *   Run via vlan config EVENT, it enables us to use the
   2114  *   HW Filter table since we can get the vlan id. This
   2115  *   just creates the entry in the soft version of the
   2116  *   VFTA, init will repopulate the real table.
   2117  ************************************************************************/
   2118 static void
   2119 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2120 {
   2121 	struct adapter	*adapter = ifp->if_softc;
   2122 	u16		index, bit;
   2123 
   2124 	if (ifp->if_softc != arg)   /* Not our event */
   2125 		return;
   2126 
   2127 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2128 		return;
   2129 
   2130 	IXGBE_CORE_LOCK(adapter);
   2131 	index = (vtag >> 5) & 0x7F;
   2132 	bit = vtag & 0x1F;
   2133 	adapter->shadow_vfta[index] |= (1 << bit);
   2134 	ixgbe_setup_vlan_hw_support(adapter);
   2135 	IXGBE_CORE_UNLOCK(adapter);
   2136 } /* ixgbe_register_vlan */
   2137 
   2138 /************************************************************************
   2139  * ixgbe_unregister_vlan
   2140  *
   2141  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
   2142  ************************************************************************/
   2143 static void
   2144 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2145 {
   2146 	struct adapter	*adapter = ifp->if_softc;
   2147 	u16		index, bit;
   2148 
   2149 	if (ifp->if_softc != arg)
   2150 		return;
   2151 
   2152 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2153 		return;
   2154 
   2155 	IXGBE_CORE_LOCK(adapter);
   2156 	index = (vtag >> 5) & 0x7F;
   2157 	bit = vtag & 0x1F;
   2158 	adapter->shadow_vfta[index] &= ~(1 << bit);
   2159 	/* Re-init to load the changes */
   2160 	ixgbe_setup_vlan_hw_support(adapter);
   2161 	IXGBE_CORE_UNLOCK(adapter);
   2162 } /* ixgbe_unregister_vlan */
   2163 #endif
   2164 
   2165 static void
   2166 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   2167 {
   2168 	struct ethercom *ec = &adapter->osdep.ec;
   2169 	struct ixgbe_hw *hw = &adapter->hw;
   2170 	struct rx_ring	*rxr;
   2171 	int             i;
   2172 	u32		ctrl;
   2173 
   2174 
   2175 	/*
   2176 	 * We get here thru init_locked, meaning
   2177 	 * a soft reset, this has already cleared
   2178 	 * the VFTA and other state, so if there
   2179 	 * have been no vlan's registered do nothing.
   2180 	 */
   2181 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   2182 		return;
   2183 
   2184 	/* Setup the queues for vlans */
   2185 	for (i = 0; i < adapter->num_queues; i++) {
   2186 		rxr = &adapter->rx_rings[i];
   2187 		/* On 82599 the VLAN enable is per/queue in RXDCTL */
   2188 		if (hw->mac.type != ixgbe_mac_82598EB) {
   2189 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   2190 			ctrl |= IXGBE_RXDCTL_VME;
   2191 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
   2192 		}
   2193 		rxr->vtag_strip = TRUE;
   2194 	}
   2195 
   2196 	if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
   2197 		return;
   2198 	/*
   2199 	 * A soft reset zero's out the VFTA, so
   2200 	 * we need to repopulate it now.
   2201 	 */
   2202 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2203 		if (adapter->shadow_vfta[i] != 0)
   2204 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
   2205 			    adapter->shadow_vfta[i]);
   2206 
   2207 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2208 	/* Enable the Filter Table if enabled */
   2209 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
   2210 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
   2211 		ctrl |= IXGBE_VLNCTRL_VFE;
   2212 	}
   2213 	if (hw->mac.type == ixgbe_mac_82598EB)
   2214 		ctrl |= IXGBE_VLNCTRL_VME;
   2215 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2216 } /* ixgbe_setup_vlan_hw_support */
   2217 
   2218 /************************************************************************
   2219  * ixgbe_get_slot_info
   2220  *
   2221  *   Get the width and transaction speed of
   2222  *   the slot this adapter is plugged into.
   2223  ************************************************************************/
   2224 static void
   2225 ixgbe_get_slot_info(struct adapter *adapter)
   2226 {
   2227 	device_t		dev = adapter->dev;
   2228 	struct ixgbe_hw		*hw = &adapter->hw;
   2229 	u32                   offset;
   2230 //	struct ixgbe_mac_info	*mac = &hw->mac;
   2231 	u16			link;
   2232 	int                   bus_info_valid = TRUE;
   2233 
   2234 	/* Some devices are behind an internal bridge */
   2235 	switch (hw->device_id) {
   2236 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
   2237 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
   2238 		goto get_parent_info;
   2239 	default:
   2240 		break;
   2241 	}
   2242 
   2243 	ixgbe_get_bus_info(hw);
   2244 
   2245 	/*
   2246 	 * Some devices don't use PCI-E, but there is no need
   2247 	 * to display "Unknown" for bus speed and width.
   2248 	 */
   2249 	switch (hw->mac.type) {
   2250 	case ixgbe_mac_X550EM_x:
   2251 	case ixgbe_mac_X550EM_a:
   2252 		return;
   2253 	default:
   2254 		goto display;
   2255 	}
   2256 
   2257 get_parent_info:
   2258 	/*
   2259 	 * For the Quad port adapter we need to parse back
   2260 	 * up the PCI tree to find the speed of the expansion
   2261 	 * slot into which this adapter is plugged. A bit more work.
   2262 	 */
   2263 	dev = device_parent(device_parent(dev));
   2264 #if 0
   2265 #ifdef IXGBE_DEBUG
   2266 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
   2267 	    pci_get_slot(dev), pci_get_function(dev));
   2268 #endif
   2269 	dev = device_parent(device_parent(dev));
   2270 #ifdef IXGBE_DEBUG
   2271 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
   2272 	    pci_get_slot(dev), pci_get_function(dev));
   2273 #endif
   2274 #endif
   2275 	/* Now get the PCI Express Capabilities offset */
   2276 	if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
   2277 	    PCI_CAP_PCIEXPRESS, &offset, NULL)) {
   2278 		/*
   2279 		 * Hmm...can't get PCI-Express capabilities.
   2280 		 * Falling back to default method.
   2281 		 */
   2282 		bus_info_valid = FALSE;
   2283 		ixgbe_get_bus_info(hw);
   2284 		goto display;
   2285 	}
   2286 	/* ...and read the Link Status Register */
   2287 	link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
   2288 	    offset + PCIE_LCSR);
   2289 	ixgbe_set_pci_config_data_generic(hw, link >> 16);
   2290 
   2291 display:
   2292 	device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
   2293 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s" :
   2294 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s" :
   2295 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s" :
   2296 	     "Unknown"),
   2297 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
   2298 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
   2299 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
   2300 	     "Unknown"));
   2301 
   2302 	if (bus_info_valid) {
   2303 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2304 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
   2305 			(hw->bus.speed == ixgbe_bus_speed_2500))) {
   2306 			device_printf(dev, "PCI-Express bandwidth available"
   2307 			    " for this card\n     is not sufficient for"
   2308 			    " optimal performance.\n");
   2309 			device_printf(dev, "For optimal performance a x8 "
   2310 			    "PCIE, or x4 PCIE Gen2 slot is required.\n");
   2311 		}
   2312 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2313 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
   2314 			(hw->bus.speed < ixgbe_bus_speed_8000))) {
   2315 			device_printf(dev, "PCI-Express bandwidth available"
   2316 			    " for this card\n     is not sufficient for"
   2317 			    " optimal performance.\n");
   2318 			device_printf(dev, "For optimal performance a x8 "
   2319 			    "PCIE Gen3 slot is required.\n");
   2320 		}
   2321 	} else
   2322 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
   2323 
   2324 	return;
   2325 } /* ixgbe_get_slot_info */
   2326 
   2327 /************************************************************************
   2328  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
   2329  ************************************************************************/
   2330 static inline void
   2331 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   2332 {
   2333 	struct ixgbe_hw *hw = &adapter->hw;
   2334 	u64             queue = (u64)(1ULL << vector);
   2335 	u32             mask;
   2336 
   2337 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2338 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2339 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   2340 	} else {
   2341 		mask = (queue & 0xFFFFFFFF);
   2342 		if (mask)
   2343 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   2344 		mask = (queue >> 32);
   2345 		if (mask)
   2346 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   2347 	}
   2348 } /* ixgbe_enable_queue */
   2349 
   2350 /************************************************************************
   2351  * ixgbe_disable_queue
   2352  ************************************************************************/
   2353 static inline void
   2354 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   2355 {
   2356 	struct ixgbe_hw *hw = &adapter->hw;
   2357 	u64             queue = (u64)(1ULL << vector);
   2358 	u32             mask;
   2359 
   2360 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2361 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2362 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   2363 	} else {
   2364 		mask = (queue & 0xFFFFFFFF);
   2365 		if (mask)
   2366 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   2367 		mask = (queue >> 32);
   2368 		if (mask)
   2369 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   2370 	}
   2371 } /* ixgbe_disable_queue */
   2372 
   2373 /************************************************************************
   2374  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
   2375  ************************************************************************/
   2376 static int
   2377 ixgbe_msix_que(void *arg)
   2378 {
   2379 	struct ix_queue	*que = arg;
   2380 	struct adapter  *adapter = que->adapter;
   2381 	struct ifnet    *ifp = adapter->ifp;
   2382 	struct tx_ring	*txr = que->txr;
   2383 	struct rx_ring	*rxr = que->rxr;
   2384 	bool		more;
   2385 	u32		newitr = 0;
   2386 
   2387 	/* Protect against spurious interrupts */
   2388 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   2389 		return 0;
   2390 
   2391 	ixgbe_disable_queue(adapter, que->msix);
   2392 	++que->irqs.ev_count;
   2393 
   2394 #ifdef __NetBSD__
   2395 	/* Don't run ixgbe_rxeof in interrupt context */
   2396 	more = true;
   2397 #else
   2398 	more = ixgbe_rxeof(que);
   2399 #endif
   2400 
   2401 	IXGBE_TX_LOCK(txr);
   2402 	ixgbe_txeof(txr);
   2403 	IXGBE_TX_UNLOCK(txr);
   2404 
   2405 	/* Do AIM now? */
   2406 
   2407 	if (adapter->enable_aim == false)
   2408 		goto no_calc;
   2409 	/*
   2410 	 * Do Adaptive Interrupt Moderation:
   2411 	 *  - Write out last calculated setting
   2412 	 *  - Calculate based on average size over
   2413 	 *    the last interval.
   2414 	 */
   2415 	if (que->eitr_setting)
   2416 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
   2417 		    que->eitr_setting);
   2418 
   2419 	que->eitr_setting = 0;
   2420 
   2421 	/* Idle, do nothing */
   2422         if ((txr->bytes == 0) && (rxr->bytes == 0))
   2423                 goto no_calc;
   2424 
   2425 	if ((txr->bytes) && (txr->packets))
   2426 		newitr = txr->bytes/txr->packets;
   2427 	if ((rxr->bytes) && (rxr->packets))
   2428 		newitr = max(newitr, (rxr->bytes / rxr->packets));
   2429 	newitr += 24; /* account for hardware frame, crc */
   2430 
   2431 	/* set an upper boundary */
   2432 	newitr = min(newitr, 3000);
   2433 
   2434 	/* Be nice to the mid range */
   2435 	if ((newitr > 300) && (newitr < 1200))
   2436 		newitr = (newitr / 3);
   2437 	else
   2438 		newitr = (newitr / 2);
   2439 
   2440         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   2441                 newitr |= newitr << 16;
   2442         else
   2443                 newitr |= IXGBE_EITR_CNT_WDIS;
   2444 
   2445         /* save for next interrupt */
   2446         que->eitr_setting = newitr;
   2447 
   2448 	/* Reset state */
   2449 	txr->bytes = 0;
   2450 	txr->packets = 0;
   2451 	rxr->bytes = 0;
   2452 	rxr->packets = 0;
   2453 
   2454 no_calc:
   2455 	if (more)
   2456 		softint_schedule(que->que_si);
   2457 	else
   2458 		ixgbe_enable_queue(adapter, que->msix);
   2459 
   2460 	return 1;
   2461 } /* ixgbe_msix_que */
   2462 
   2463 /************************************************************************
   2464  * ixgbe_media_status - Media Ioctl callback
   2465  *
   2466  *   Called whenever the user queries the status of
   2467  *   the interface using ifconfig.
   2468  ************************************************************************/
   2469 static void
   2470 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   2471 {
   2472 	struct adapter *adapter = ifp->if_softc;
   2473 	struct ixgbe_hw *hw = &adapter->hw;
   2474 	int layer;
   2475 
   2476 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   2477 	IXGBE_CORE_LOCK(adapter);
   2478 	ixgbe_update_link_status(adapter);
   2479 
   2480 	ifmr->ifm_status = IFM_AVALID;
   2481 	ifmr->ifm_active = IFM_ETHER;
   2482 
   2483 	if (!adapter->link_active) {
   2484 		ifmr->ifm_active |= IFM_NONE;
   2485 		IXGBE_CORE_UNLOCK(adapter);
   2486 		return;
   2487 	}
   2488 
   2489 	ifmr->ifm_status |= IFM_ACTIVE;
   2490 	layer = adapter->phy_layer;
   2491 
   2492 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
   2493 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
   2494 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
   2495 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
   2496 		switch (adapter->link_speed) {
   2497 		case IXGBE_LINK_SPEED_10GB_FULL:
   2498 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   2499 			break;
   2500 		case IXGBE_LINK_SPEED_1GB_FULL:
   2501 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   2502 			break;
   2503 		case IXGBE_LINK_SPEED_100_FULL:
   2504 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   2505 			break;
   2506 		case IXGBE_LINK_SPEED_10_FULL:
   2507 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   2508 			break;
   2509 		}
   2510 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   2511 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
   2512 		switch (adapter->link_speed) {
   2513 		case IXGBE_LINK_SPEED_10GB_FULL:
   2514 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
   2515 			break;
   2516 		}
   2517 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
   2518 		switch (adapter->link_speed) {
   2519 		case IXGBE_LINK_SPEED_10GB_FULL:
   2520 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
   2521 			break;
   2522 		case IXGBE_LINK_SPEED_1GB_FULL:
   2523 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2524 			break;
   2525 		}
   2526 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
   2527 		switch (adapter->link_speed) {
   2528 		case IXGBE_LINK_SPEED_10GB_FULL:
   2529 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
   2530 			break;
   2531 		case IXGBE_LINK_SPEED_1GB_FULL:
   2532 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2533 			break;
   2534 		}
   2535 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
   2536 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
   2537 		switch (adapter->link_speed) {
   2538 		case IXGBE_LINK_SPEED_10GB_FULL:
   2539 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2540 			break;
   2541 		case IXGBE_LINK_SPEED_1GB_FULL:
   2542 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
   2543 			break;
   2544 		}
   2545 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
   2546 		switch (adapter->link_speed) {
   2547 		case IXGBE_LINK_SPEED_10GB_FULL:
   2548 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2549 			break;
   2550 		}
   2551 	/*
   2552 	 * XXX: These need to use the proper media types once
   2553 	 * they're added.
   2554 	 */
   2555 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
   2556 		switch (adapter->link_speed) {
   2557 		case IXGBE_LINK_SPEED_10GB_FULL:
   2558 #ifndef IFM_ETH_XTYPE
   2559 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2560 #else
   2561 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
   2562 #endif
   2563 			break;
   2564 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2565 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2566 			break;
   2567 		case IXGBE_LINK_SPEED_1GB_FULL:
   2568 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2569 			break;
   2570 		}
   2571 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
   2572 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
   2573 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
   2574 		switch (adapter->link_speed) {
   2575 		case IXGBE_LINK_SPEED_10GB_FULL:
   2576 #ifndef IFM_ETH_XTYPE
   2577 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2578 #else
   2579 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
   2580 #endif
   2581 			break;
   2582 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2583 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2584 			break;
   2585 		case IXGBE_LINK_SPEED_1GB_FULL:
   2586 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2587 			break;
   2588 		}
   2589 
   2590 	/* If nothing is recognized... */
   2591 #if 0
   2592 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
   2593 		ifmr->ifm_active |= IFM_UNKNOWN;
   2594 #endif
   2595 
   2596 	/* Display current flow control setting used on link */
   2597 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
   2598 	    hw->fc.current_mode == ixgbe_fc_full)
   2599 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
   2600 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
   2601 	    hw->fc.current_mode == ixgbe_fc_full)
   2602 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
   2603 
   2604 	IXGBE_CORE_UNLOCK(adapter);
   2605 
   2606 	return;
   2607 } /* ixgbe_media_status */
   2608 
   2609 /************************************************************************
   2610  * ixgbe_media_change - Media Ioctl callback
   2611  *
   2612  *   Called when the user changes speed/duplex using
   2613  *   media/mediopt option with ifconfig.
   2614  ************************************************************************/
   2615 static int
   2616 ixgbe_media_change(struct ifnet *ifp)
   2617 {
   2618 	struct adapter   *adapter = ifp->if_softc;
   2619 	struct ifmedia   *ifm = &adapter->media;
   2620 	struct ixgbe_hw  *hw = &adapter->hw;
   2621 	ixgbe_link_speed speed = 0;
   2622 	ixgbe_link_speed link_caps = 0;
   2623 	bool negotiate = false;
   2624 	s32 err = IXGBE_NOT_IMPLEMENTED;
   2625 
   2626 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   2627 
   2628 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   2629 		return (EINVAL);
   2630 
   2631 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   2632 		return (ENODEV);
   2633 
   2634 	/*
   2635 	 * We don't actually need to check against the supported
   2636 	 * media types of the adapter; ifmedia will take care of
   2637 	 * that for us.
   2638 	 */
   2639 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   2640 	case IFM_AUTO:
   2641 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   2642 		    &negotiate);
   2643 		if (err != IXGBE_SUCCESS) {
   2644 			device_printf(adapter->dev, "Unable to determine "
   2645 			    "supported advertise speeds\n");
   2646 			return (ENODEV);
   2647 		}
   2648 		speed |= link_caps;
   2649 		break;
   2650 	case IFM_10G_T:
   2651 	case IFM_10G_LRM:
   2652 	case IFM_10G_LR:
   2653 	case IFM_10G_TWINAX:
   2654 #ifndef IFM_ETH_XTYPE
   2655 	case IFM_10G_SR: /* KR, too */
   2656 	case IFM_10G_CX4: /* KX4 */
   2657 #else
   2658 	case IFM_10G_KR:
   2659 	case IFM_10G_KX4:
   2660 #endif
   2661 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   2662 		break;
   2663 	case IFM_2500_KX:
   2664 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   2665 		break;
   2666 	case IFM_1000_T:
   2667 	case IFM_1000_LX:
   2668 	case IFM_1000_SX:
   2669 	case IFM_1000_KX:
   2670 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   2671 		break;
   2672 	case IFM_100_TX:
   2673 		speed |= IXGBE_LINK_SPEED_100_FULL;
   2674 		break;
   2675 	case IFM_10_T:
   2676 		speed |= IXGBE_LINK_SPEED_10_FULL;
   2677 		break;
   2678 	default:
   2679 		goto invalid;
   2680 	}
   2681 
   2682 	hw->mac.autotry_restart = TRUE;
   2683 	hw->mac.ops.setup_link(hw, speed, TRUE);
   2684 	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
   2685 		adapter->advertise = 0;
   2686 	} else {
   2687 		if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
   2688 			adapter->advertise |= 1 << 2;
   2689 		if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
   2690 			adapter->advertise |= 1 << 1;
   2691 		if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
   2692 			adapter->advertise |= 1 << 0;
   2693 		if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
   2694 			adapter->advertise |= 1 << 3;
   2695 	}
   2696 
   2697 	return (0);
   2698 
   2699 invalid:
   2700 	device_printf(adapter->dev, "Invalid media type!\n");
   2701 
   2702 	return (EINVAL);
   2703 } /* ixgbe_media_change */
   2704 
   2705 /************************************************************************
   2706  * ixgbe_set_promisc
   2707  ************************************************************************/
   2708 static void
   2709 ixgbe_set_promisc(struct adapter *adapter)
   2710 {
   2711 	struct ifnet *ifp = adapter->ifp;
   2712 	int          mcnt = 0;
   2713 	u32          rctl;
   2714 	struct ether_multi *enm;
   2715 	struct ether_multistep step;
   2716 	struct ethercom *ec = &adapter->osdep.ec;
   2717 
   2718 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   2719 	rctl &= (~IXGBE_FCTRL_UPE);
   2720 	if (ifp->if_flags & IFF_ALLMULTI)
   2721 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
   2722 	else {
   2723 		ETHER_FIRST_MULTI(step, ec, enm);
   2724 		while (enm != NULL) {
   2725 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
   2726 				break;
   2727 			mcnt++;
   2728 			ETHER_NEXT_MULTI(step, enm);
   2729 		}
   2730 	}
   2731 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
   2732 		rctl &= (~IXGBE_FCTRL_MPE);
   2733 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2734 
   2735 	if (ifp->if_flags & IFF_PROMISC) {
   2736 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   2737 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2738 	} else if (ifp->if_flags & IFF_ALLMULTI) {
   2739 		rctl |= IXGBE_FCTRL_MPE;
   2740 		rctl &= ~IXGBE_FCTRL_UPE;
   2741 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2742 	}
   2743 } /* ixgbe_set_promisc */
   2744 
   2745 /************************************************************************
   2746  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
   2747  ************************************************************************/
   2748 static int
   2749 ixgbe_msix_link(void *arg)
   2750 {
   2751 	struct adapter	*adapter = arg;
   2752 	struct ixgbe_hw *hw = &adapter->hw;
   2753 	u32		eicr, eicr_mask;
   2754 	s32             retval;
   2755 
   2756 	++adapter->link_irq.ev_count;
   2757 
   2758 	/* Pause other interrupts */
   2759 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
   2760 
   2761 	/* First get the cause */
   2762 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   2763 	/* Be sure the queue bits are not cleared */
   2764 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
   2765 	/* Clear interrupt with write */
   2766 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
   2767 
   2768 	/* Link status change */
   2769 	if (eicr & IXGBE_EICR_LSC) {
   2770 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
   2771 		softint_schedule(adapter->link_si);
   2772 	}
   2773 
   2774 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   2775 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
   2776 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
   2777 			/* This is probably overkill :) */
   2778 			if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
   2779 				return 1;
   2780 			/* Disable the interrupt */
   2781 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
   2782 			softint_schedule(adapter->fdir_si);
   2783 		}
   2784 
   2785 		if (eicr & IXGBE_EICR_ECC) {
   2786 			device_printf(adapter->dev,
   2787 			    "CRITICAL: ECC ERROR!! Please Reboot!!\n");
   2788 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   2789 		}
   2790 
   2791 		/* Check for over temp condition */
   2792 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
   2793 			switch (adapter->hw.mac.type) {
   2794 			case ixgbe_mac_X550EM_a:
   2795 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
   2796 					break;
   2797 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
   2798 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2799 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2800 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2801 				retval = hw->phy.ops.check_overtemp(hw);
   2802 				if (retval != IXGBE_ERR_OVERTEMP)
   2803 					break;
   2804 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2805 				device_printf(adapter->dev, "System shutdown required!\n");
   2806 				break;
   2807 			default:
   2808 				if (!(eicr & IXGBE_EICR_TS))
   2809 					break;
   2810 				retval = hw->phy.ops.check_overtemp(hw);
   2811 				if (retval != IXGBE_ERR_OVERTEMP)
   2812 					break;
   2813 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2814 				device_printf(adapter->dev, "System shutdown required!\n");
   2815 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
   2816 				break;
   2817 			}
   2818 		}
   2819 
   2820 		/* Check for VF message */
   2821 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
   2822 		    (eicr & IXGBE_EICR_MAILBOX))
   2823 			softint_schedule(adapter->mbx_si);
   2824 	}
   2825 
   2826 	if (ixgbe_is_sfp(hw)) {
   2827 		/* Pluggable optics-related interrupt */
   2828 		if (hw->mac.type >= ixgbe_mac_X540)
   2829 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   2830 		else
   2831 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   2832 
   2833 		if (eicr & eicr_mask) {
   2834 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   2835 			softint_schedule(adapter->mod_si);
   2836 		}
   2837 
   2838 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   2839 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   2840 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2841 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2842 			softint_schedule(adapter->msf_si);
   2843 		}
   2844 	}
   2845 
   2846 	/* Check for fan failure */
   2847 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   2848 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
   2849 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2850 	}
   2851 
   2852 	/* External PHY interrupt */
   2853 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   2854 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
   2855 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
   2856 		softint_schedule(adapter->phy_si);
   2857  	}
   2858 
   2859 	/* Re-enable other interrupts */
   2860 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   2861 	return 1;
   2862 } /* ixgbe_msix_link */
   2863 
   2864 /************************************************************************
   2865  * ixgbe_sysctl_interrupt_rate_handler
   2866  ************************************************************************/
   2867 static int
   2868 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   2869 {
   2870 	struct sysctlnode node = *rnode;
   2871 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   2872 	uint32_t reg, usec, rate;
   2873 	int error;
   2874 
   2875 	if (que == NULL)
   2876 		return 0;
   2877 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
   2878 	usec = ((reg & 0x0FF8) >> 3);
   2879 	if (usec > 0)
   2880 		rate = 500000 / usec;
   2881 	else
   2882 		rate = 0;
   2883 	node.sysctl_data = &rate;
   2884 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2885 	if (error || newp == NULL)
   2886 		return error;
   2887 	reg &= ~0xfff; /* default, no limitation */
   2888 	ixgbe_max_interrupt_rate = 0;
   2889 	if (rate > 0 && rate < 500000) {
   2890 		if (rate < 1000)
   2891 			rate = 1000;
   2892 		ixgbe_max_interrupt_rate = rate;
   2893 		reg |= ((4000000/rate) & 0xff8);
   2894 	}
   2895 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
   2896 
   2897 	return (0);
   2898 } /* ixgbe_sysctl_interrupt_rate_handler */
   2899 
   2900 const struct sysctlnode *
   2901 ixgbe_sysctl_instance(struct adapter *adapter)
   2902 {
   2903 	const char *dvname;
   2904 	struct sysctllog **log;
   2905 	int rc;
   2906 	const struct sysctlnode *rnode;
   2907 
   2908 	if (adapter->sysctltop != NULL)
   2909 		return adapter->sysctltop;
   2910 
   2911 	log = &adapter->sysctllog;
   2912 	dvname = device_xname(adapter->dev);
   2913 
   2914 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2915 	    0, CTLTYPE_NODE, dvname,
   2916 	    SYSCTL_DESCR("ixgbe information and settings"),
   2917 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2918 		goto err;
   2919 
   2920 	return rnode;
   2921 err:
   2922 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2923 	return NULL;
   2924 }
   2925 
   2926 /************************************************************************
   2927  * ixgbe_add_device_sysctls
   2928  ************************************************************************/
   2929 static void
   2930 ixgbe_add_device_sysctls(struct adapter *adapter)
   2931 {
   2932 	device_t               dev = adapter->dev;
   2933 	struct ixgbe_hw        *hw = &adapter->hw;
   2934 	struct sysctllog **log;
   2935 	const struct sysctlnode *rnode, *cnode;
   2936 
   2937 	log = &adapter->sysctllog;
   2938 
   2939 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   2940 		aprint_error_dev(dev, "could not create sysctl root\n");
   2941 		return;
   2942 	}
   2943 
   2944 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2945 	    CTLFLAG_READONLY, CTLTYPE_INT,
   2946 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
   2947 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   2948 		aprint_error_dev(dev, "could not create sysctl\n");
   2949 
   2950 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2951 	    CTLFLAG_READONLY, CTLTYPE_INT,
   2952 	    "num_queues", SYSCTL_DESCR("Number of queues"),
   2953 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
   2954 		aprint_error_dev(dev, "could not create sysctl\n");
   2955 
   2956 	/* Sysctls for all devices */
   2957 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   2958 	    CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
   2959 	    ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
   2960 	    CTL_EOL) != 0)
   2961 		aprint_error_dev(dev, "could not create sysctl\n");
   2962 
   2963 	adapter->enable_aim = ixgbe_enable_aim;
   2964 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   2965 	    CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2966 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2967 		aprint_error_dev(dev, "could not create sysctl\n");
   2968 
   2969 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2970 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2971 	    "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
   2972 	    ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
   2973 	    CTL_EOL) != 0)
   2974 		aprint_error_dev(dev, "could not create sysctl\n");
   2975 
   2976 #ifdef IXGBE_DEBUG
   2977 	/* testing sysctls (for all devices) */
   2978 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   2979 	    CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
   2980 	    ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
   2981 	    CTL_EOL) != 0)
   2982 		aprint_error_dev(dev, "could not create sysctl\n");
   2983 
   2984 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
   2985 	    CTLTYPE_STRING, "print_rss_config",
   2986 	    SYSCTL_DESCR("Prints RSS Configuration"),
   2987 	    ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
   2988 	    CTL_EOL) != 0)
   2989 		aprint_error_dev(dev, "could not create sysctl\n");
   2990 #endif
   2991 	/* for X550 series devices */
   2992 	if (hw->mac.type >= ixgbe_mac_X550)
   2993 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   2994 		    CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
   2995 		    ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
   2996 		    CTL_EOL) != 0)
   2997 			aprint_error_dev(dev, "could not create sysctl\n");
   2998 
   2999 	/* for WoL-capable devices */
   3000 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3001 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3002 		    CTLTYPE_BOOL, "wol_enable",
   3003 		    SYSCTL_DESCR("Enable/Disable Wake on LAN"),
   3004 		    ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
   3005 		    CTL_EOL) != 0)
   3006 			aprint_error_dev(dev, "could not create sysctl\n");
   3007 
   3008 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3009 		    CTLTYPE_INT, "wufc",
   3010 		    SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
   3011 		    ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
   3012 		    CTL_EOL) != 0)
   3013 			aprint_error_dev(dev, "could not create sysctl\n");
   3014 	}
   3015 
   3016 	/* for X552/X557-AT devices */
   3017 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3018 		const struct sysctlnode *phy_node;
   3019 
   3020 		if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
   3021 		    "phy", SYSCTL_DESCR("External PHY sysctls"),
   3022 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
   3023 			aprint_error_dev(dev, "could not create sysctl\n");
   3024 			return;
   3025 		}
   3026 
   3027 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3028 		    CTLTYPE_INT, "temp",
   3029 		    SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
   3030 		    ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
   3031 		    CTL_EOL) != 0)
   3032 			aprint_error_dev(dev, "could not create sysctl\n");
   3033 
   3034 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3035 		    CTLTYPE_INT, "overtemp_occurred",
   3036 		    SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
   3037 		    ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
   3038 		    CTL_CREATE, CTL_EOL) != 0)
   3039 			aprint_error_dev(dev, "could not create sysctl\n");
   3040 	}
   3041 
   3042 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
   3043 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3044 		    CTLTYPE_INT, "eee_state",
   3045 		    SYSCTL_DESCR("EEE Power Save State"),
   3046 		    ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
   3047 		    CTL_EOL) != 0)
   3048 			aprint_error_dev(dev, "could not create sysctl\n");
   3049 	}
   3050 } /* ixgbe_add_device_sysctls */
   3051 
   3052 /************************************************************************
   3053  * ixgbe_allocate_pci_resources
   3054  ************************************************************************/
   3055 static int
   3056 ixgbe_allocate_pci_resources(struct adapter *adapter,
   3057     const struct pci_attach_args *pa)
   3058 {
   3059 	pcireg_t	memtype;
   3060 	device_t dev = adapter->dev;
   3061 	bus_addr_t addr;
   3062 	int flags;
   3063 
   3064 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   3065 	switch (memtype) {
   3066 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   3067 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   3068 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   3069 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   3070 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   3071 			goto map_err;
   3072 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   3073 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   3074 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3075 		}
   3076 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   3077 		     adapter->osdep.mem_size, flags,
   3078 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   3079 map_err:
   3080 			adapter->osdep.mem_size = 0;
   3081 			aprint_error_dev(dev, "unable to map BAR0\n");
   3082 			return ENXIO;
   3083 		}
   3084 		break;
   3085 	default:
   3086 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   3087 		return ENXIO;
   3088 	}
   3089 
   3090 	return (0);
   3091 } /* ixgbe_allocate_pci_resources */
   3092 
   3093 /************************************************************************
   3094  * ixgbe_detach - Device removal routine
   3095  *
   3096  *   Called when the driver is being removed.
   3097  *   Stops the adapter and deallocates all the resources
   3098  *   that were allocated for driver operation.
   3099  *
   3100  *   return 0 on success, positive on failure
   3101  ************************************************************************/
   3102 static int
   3103 ixgbe_detach(device_t dev, int flags)
   3104 {
   3105 	struct adapter *adapter = device_private(dev);
   3106 	struct ix_queue *que = adapter->queues;
   3107 	struct rx_ring *rxr = adapter->rx_rings;
   3108 	struct tx_ring *txr = adapter->tx_rings;
   3109 	struct ixgbe_hw *hw = &adapter->hw;
   3110 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   3111 	u32	ctrl_ext;
   3112 
   3113 	INIT_DEBUGOUT("ixgbe_detach: begin");
   3114 	if (adapter->osdep.attached == false)
   3115 		return 0;
   3116 
   3117 	if (ixgbe_pci_iov_detach(dev) != 0) {
   3118 		device_printf(dev, "SR-IOV in use; detach first.\n");
   3119 		return (EBUSY);
   3120 	}
   3121 
   3122 	/* Stop the interface. Callouts are stopped in it. */
   3123 	ixgbe_ifstop(adapter->ifp, 1);
   3124 #if NVLAN > 0
   3125 	/* Make sure VLANs are not using driver */
   3126 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   3127 		;	/* nothing to do: no VLANs */
   3128 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
   3129 		vlan_ifdetach(adapter->ifp);
   3130 	else {
   3131 		aprint_error_dev(dev, "VLANs in use, detach first\n");
   3132 		return (EBUSY);
   3133 	}
   3134 #endif
   3135 
   3136 	pmf_device_deregister(dev);
   3137 
   3138 	ether_ifdetach(adapter->ifp);
   3139 	/* Stop the adapter */
   3140 	IXGBE_CORE_LOCK(adapter);
   3141 	ixgbe_setup_low_power_mode(adapter);
   3142 	IXGBE_CORE_UNLOCK(adapter);
   3143 
   3144 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
   3145 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   3146 			softint_disestablish(txr->txr_si);
   3147 		softint_disestablish(que->que_si);
   3148 	}
   3149 
   3150 	/* Drain the Link queue */
   3151 	softint_disestablish(adapter->link_si);
   3152 	softint_disestablish(adapter->mod_si);
   3153 	softint_disestablish(adapter->msf_si);
   3154 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   3155 		softint_disestablish(adapter->mbx_si);
   3156 	softint_disestablish(adapter->phy_si);
   3157 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   3158 		softint_disestablish(adapter->fdir_si);
   3159 
   3160 	/* let hardware know driver is unloading */
   3161 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   3162 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   3163 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   3164 
   3165 	callout_halt(&adapter->timer, NULL);
   3166 
   3167 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   3168 		netmap_detach(adapter->ifp);
   3169 
   3170 	ixgbe_free_pci_resources(adapter);
   3171 #if 0	/* XXX the NetBSD port is probably missing something here */
   3172 	bus_generic_detach(dev);
   3173 #endif
   3174 	if_detach(adapter->ifp);
   3175 	if_percpuq_destroy(adapter->ipq);
   3176 
   3177 	sysctl_teardown(&adapter->sysctllog);
   3178 	evcnt_detach(&adapter->handleq);
   3179 	evcnt_detach(&adapter->req);
   3180 	evcnt_detach(&adapter->efbig_tx_dma_setup);
   3181 	evcnt_detach(&adapter->mbuf_defrag_failed);
   3182 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
   3183 	evcnt_detach(&adapter->einval_tx_dma_setup);
   3184 	evcnt_detach(&adapter->other_tx_dma_setup);
   3185 	evcnt_detach(&adapter->eagain_tx_dma_setup);
   3186 	evcnt_detach(&adapter->enomem_tx_dma_setup);
   3187 	evcnt_detach(&adapter->watchdog_events);
   3188 	evcnt_detach(&adapter->tso_err);
   3189 	evcnt_detach(&adapter->link_irq);
   3190 
   3191 	txr = adapter->tx_rings;
   3192 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   3193 		evcnt_detach(&adapter->queues[i].irqs);
   3194 		evcnt_detach(&txr->no_desc_avail);
   3195 		evcnt_detach(&txr->total_packets);
   3196 		evcnt_detach(&txr->tso_tx);
   3197 #ifndef IXGBE_LEGACY_TX
   3198 		evcnt_detach(&txr->pcq_drops);
   3199 #endif
   3200 
   3201 		if (i < __arraycount(stats->mpc)) {
   3202 			evcnt_detach(&stats->mpc[i]);
   3203 			if (hw->mac.type == ixgbe_mac_82598EB)
   3204 				evcnt_detach(&stats->rnbc[i]);
   3205 		}
   3206 		if (i < __arraycount(stats->pxontxc)) {
   3207 			evcnt_detach(&stats->pxontxc[i]);
   3208 			evcnt_detach(&stats->pxonrxc[i]);
   3209 			evcnt_detach(&stats->pxofftxc[i]);
   3210 			evcnt_detach(&stats->pxoffrxc[i]);
   3211 			evcnt_detach(&stats->pxon2offc[i]);
   3212 		}
   3213 		if (i < __arraycount(stats->qprc)) {
   3214 			evcnt_detach(&stats->qprc[i]);
   3215 			evcnt_detach(&stats->qptc[i]);
   3216 			evcnt_detach(&stats->qbrc[i]);
   3217 			evcnt_detach(&stats->qbtc[i]);
   3218 			evcnt_detach(&stats->qprdc[i]);
   3219 		}
   3220 
   3221 		evcnt_detach(&rxr->rx_packets);
   3222 		evcnt_detach(&rxr->rx_bytes);
   3223 		evcnt_detach(&rxr->rx_copies);
   3224 		evcnt_detach(&rxr->no_jmbuf);
   3225 		evcnt_detach(&rxr->rx_discarded);
   3226 	}
   3227 	evcnt_detach(&stats->ipcs);
   3228 	evcnt_detach(&stats->l4cs);
   3229 	evcnt_detach(&stats->ipcs_bad);
   3230 	evcnt_detach(&stats->l4cs_bad);
   3231 	evcnt_detach(&stats->intzero);
   3232 	evcnt_detach(&stats->legint);
   3233 	evcnt_detach(&stats->crcerrs);
   3234 	evcnt_detach(&stats->illerrc);
   3235 	evcnt_detach(&stats->errbc);
   3236 	evcnt_detach(&stats->mspdc);
   3237 	if (hw->mac.type >= ixgbe_mac_X550)
   3238 		evcnt_detach(&stats->mbsdc);
   3239 	evcnt_detach(&stats->mpctotal);
   3240 	evcnt_detach(&stats->mlfc);
   3241 	evcnt_detach(&stats->mrfc);
   3242 	evcnt_detach(&stats->rlec);
   3243 	evcnt_detach(&stats->lxontxc);
   3244 	evcnt_detach(&stats->lxonrxc);
   3245 	evcnt_detach(&stats->lxofftxc);
   3246 	evcnt_detach(&stats->lxoffrxc);
   3247 
   3248 	/* Packet Reception Stats */
   3249 	evcnt_detach(&stats->tor);
   3250 	evcnt_detach(&stats->gorc);
   3251 	evcnt_detach(&stats->tpr);
   3252 	evcnt_detach(&stats->gprc);
   3253 	evcnt_detach(&stats->mprc);
   3254 	evcnt_detach(&stats->bprc);
   3255 	evcnt_detach(&stats->prc64);
   3256 	evcnt_detach(&stats->prc127);
   3257 	evcnt_detach(&stats->prc255);
   3258 	evcnt_detach(&stats->prc511);
   3259 	evcnt_detach(&stats->prc1023);
   3260 	evcnt_detach(&stats->prc1522);
   3261 	evcnt_detach(&stats->ruc);
   3262 	evcnt_detach(&stats->rfc);
   3263 	evcnt_detach(&stats->roc);
   3264 	evcnt_detach(&stats->rjc);
   3265 	evcnt_detach(&stats->mngprc);
   3266 	evcnt_detach(&stats->mngpdc);
   3267 	evcnt_detach(&stats->xec);
   3268 
   3269 	/* Packet Transmission Stats */
   3270 	evcnt_detach(&stats->gotc);
   3271 	evcnt_detach(&stats->tpt);
   3272 	evcnt_detach(&stats->gptc);
   3273 	evcnt_detach(&stats->bptc);
   3274 	evcnt_detach(&stats->mptc);
   3275 	evcnt_detach(&stats->mngptc);
   3276 	evcnt_detach(&stats->ptc64);
   3277 	evcnt_detach(&stats->ptc127);
   3278 	evcnt_detach(&stats->ptc255);
   3279 	evcnt_detach(&stats->ptc511);
   3280 	evcnt_detach(&stats->ptc1023);
   3281 	evcnt_detach(&stats->ptc1522);
   3282 
   3283 	ixgbe_free_transmit_structures(adapter);
   3284 	ixgbe_free_receive_structures(adapter);
   3285 	free(adapter->queues, M_DEVBUF);
   3286 	free(adapter->mta, M_DEVBUF);
   3287 
   3288 	IXGBE_CORE_LOCK_DESTROY(adapter);
   3289 
   3290 	return (0);
   3291 } /* ixgbe_detach */
   3292 
   3293 /************************************************************************
   3294  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
   3295  *
   3296  *   Prepare the adapter/port for LPLU and/or WoL
   3297  ************************************************************************/
   3298 static int
   3299 ixgbe_setup_low_power_mode(struct adapter *adapter)
   3300 {
   3301 	struct ixgbe_hw *hw = &adapter->hw;
   3302 	device_t        dev = adapter->dev;
   3303 	s32             error = 0;
   3304 
   3305 	KASSERT(mutex_owned(&adapter->core_mtx));
   3306 
   3307 	/* Limit power management flow to X550EM baseT */
   3308 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
   3309 	    hw->phy.ops.enter_lplu) {
   3310 		/* X550EM baseT adapters need a special LPLU flow */
   3311 		hw->phy.reset_disable = true;
   3312 		ixgbe_stop(adapter);
   3313 		error = hw->phy.ops.enter_lplu(hw);
   3314 		if (error)
   3315 			device_printf(dev,
   3316 			    "Error entering LPLU: %d\n", error);
   3317 		hw->phy.reset_disable = false;
   3318 	} else {
   3319 		/* Just stop for other adapters */
   3320 		ixgbe_stop(adapter);
   3321 	}
   3322 
   3323 	if (!hw->wol_enabled) {
   3324 		ixgbe_set_phy_power(hw, FALSE);
   3325 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3326 		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
   3327 	} else {
   3328 		/* Turn off support for APM wakeup. (Using ACPI instead) */
   3329 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
   3330 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
   3331 
   3332 		/*
   3333 		 * Clear Wake Up Status register to prevent any previous wakeup
   3334 		 * events from waking us up immediately after we suspend.
   3335 		 */
   3336 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3337 
   3338 		/*
   3339 		 * Program the Wakeup Filter Control register with user filter
   3340 		 * settings
   3341 		 */
   3342 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
   3343 
   3344 		/* Enable wakeups and power management in Wakeup Control */
   3345 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
   3346 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
   3347 
   3348 	}
   3349 
   3350 	return error;
   3351 } /* ixgbe_setup_low_power_mode */
   3352 
   3353 /************************************************************************
   3354  * ixgbe_shutdown - Shutdown entry point
   3355  ************************************************************************/
   3356 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   3357 static int
   3358 ixgbe_shutdown(device_t dev)
   3359 {
   3360 	struct adapter *adapter = device_private(dev);
   3361 	int error = 0;
   3362 
   3363 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
   3364 
   3365 	IXGBE_CORE_LOCK(adapter);
   3366 	error = ixgbe_setup_low_power_mode(adapter);
   3367 	IXGBE_CORE_UNLOCK(adapter);
   3368 
   3369 	return (error);
   3370 } /* ixgbe_shutdown */
   3371 #endif
   3372 
   3373 /************************************************************************
   3374  * ixgbe_suspend
   3375  *
   3376  *   From D0 to D3
   3377  ************************************************************************/
   3378 static bool
   3379 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
   3380 {
   3381 	struct adapter *adapter = device_private(dev);
   3382 	int            error = 0;
   3383 
   3384 	INIT_DEBUGOUT("ixgbe_suspend: begin");
   3385 
   3386 	IXGBE_CORE_LOCK(adapter);
   3387 
   3388 	error = ixgbe_setup_low_power_mode(adapter);
   3389 
   3390 	IXGBE_CORE_UNLOCK(adapter);
   3391 
   3392 	return (error);
   3393 } /* ixgbe_suspend */
   3394 
   3395 /************************************************************************
   3396  * ixgbe_resume
   3397  *
   3398  *   From D3 to D0
   3399  ************************************************************************/
   3400 static bool
   3401 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
   3402 {
   3403 	struct adapter  *adapter = device_private(dev);
   3404 	struct ifnet    *ifp = adapter->ifp;
   3405 	struct ixgbe_hw *hw = &adapter->hw;
   3406 	u32             wus;
   3407 
   3408 	INIT_DEBUGOUT("ixgbe_resume: begin");
   3409 
   3410 	IXGBE_CORE_LOCK(adapter);
   3411 
   3412 	/* Read & clear WUS register */
   3413 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
   3414 	if (wus)
   3415 		device_printf(dev, "Woken up by (WUS): %#010x\n",
   3416 		    IXGBE_READ_REG(hw, IXGBE_WUS));
   3417 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3418 	/* And clear WUFC until next low-power transition */
   3419 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3420 
   3421 	/*
   3422 	 * Required after D3->D0 transition;
   3423 	 * will re-advertise all previous advertised speeds
   3424 	 */
   3425 	if (ifp->if_flags & IFF_UP)
   3426 		ixgbe_init_locked(adapter);
   3427 
   3428 	IXGBE_CORE_UNLOCK(adapter);
   3429 
   3430 	return true;
   3431 } /* ixgbe_resume */
   3432 
   3433 /*
   3434  * Set the various hardware offload abilities.
   3435  *
   3436  * This takes the ifnet's if_capenable flags (e.g. set by the user using
   3437  * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
   3438  * mbuf offload flags the driver will understand.
   3439  */
   3440 static void
   3441 ixgbe_set_if_hwassist(struct adapter *adapter)
   3442 {
   3443 	/* XXX */
   3444 }
   3445 
   3446 /************************************************************************
   3447  * ixgbe_init_locked - Init entry point
   3448  *
   3449  *   Used in two ways: It is used by the stack as an init
   3450  *   entry point in network interface structure. It is also
   3451  *   used by the driver as a hw/sw initialization routine to
   3452  *   get to a consistent state.
   3453  *
   3454  *   return 0 on success, positive on failure
   3455  ************************************************************************/
   3456 static void
   3457 ixgbe_init_locked(struct adapter *adapter)
   3458 {
   3459 	struct ifnet   *ifp = adapter->ifp;
   3460 	device_t 	dev = adapter->dev;
   3461 	struct ixgbe_hw *hw = &adapter->hw;
   3462 	struct tx_ring  *txr;
   3463 	struct rx_ring  *rxr;
   3464 	u32		txdctl, mhadd;
   3465 	u32		rxdctl, rxctrl;
   3466 	u32             ctrl_ext;
   3467 	int             err = 0;
   3468 
   3469 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   3470 
   3471 	KASSERT(mutex_owned(&adapter->core_mtx));
   3472 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
   3473 
   3474 	hw->adapter_stopped = FALSE;
   3475 	ixgbe_stop_adapter(hw);
   3476         callout_stop(&adapter->timer);
   3477 
   3478 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   3479 	adapter->max_frame_size =
   3480 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   3481 
   3482 	/* Queue indices may change with IOV mode */
   3483 	ixgbe_align_all_queue_indices(adapter);
   3484 
   3485 	/* reprogram the RAR[0] in case user changed it. */
   3486 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
   3487 
   3488 	/* Get the latest mac address, User can use a LAA */
   3489 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
   3490 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   3491 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
   3492 	hw->addr_ctrl.rar_used_count = 1;
   3493 
   3494 	/* Set hardware offload abilities from ifnet flags */
   3495 	ixgbe_set_if_hwassist(adapter);
   3496 
   3497 	/* Prepare transmit descriptors and buffers */
   3498 	if (ixgbe_setup_transmit_structures(adapter)) {
   3499 		device_printf(dev, "Could not setup transmit structures\n");
   3500 		ixgbe_stop(adapter);
   3501 		return;
   3502 	}
   3503 
   3504 	ixgbe_init_hw(hw);
   3505 	ixgbe_initialize_iov(adapter);
   3506 	ixgbe_initialize_transmit_units(adapter);
   3507 
   3508 	/* Setup Multicast table */
   3509 	ixgbe_set_multi(adapter);
   3510 
   3511 	/* Determine the correct mbuf pool, based on frame size */
   3512 	if (adapter->max_frame_size <= MCLBYTES)
   3513 		adapter->rx_mbuf_sz = MCLBYTES;
   3514 	else
   3515 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   3516 
   3517 	/* Prepare receive descriptors and buffers */
   3518 	if (ixgbe_setup_receive_structures(adapter)) {
   3519 		device_printf(dev, "Could not setup receive structures\n");
   3520 		ixgbe_stop(adapter);
   3521 		return;
   3522 	}
   3523 
   3524 	/* Configure RX settings */
   3525 	ixgbe_initialize_receive_units(adapter);
   3526 
   3527 	/* Enable SDP & MSI-X interrupts based on adapter */
   3528 	ixgbe_config_gpie(adapter);
   3529 
   3530 	/* Set MTU size */
   3531 	if (ifp->if_mtu > ETHERMTU) {
   3532 		/* aka IXGBE_MAXFRS on 82599 and newer */
   3533 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   3534 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   3535 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   3536 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   3537 	}
   3538 
   3539 	/* Now enable all the queues */
   3540 	for (int i = 0; i < adapter->num_queues; i++) {
   3541 		txr = &adapter->tx_rings[i];
   3542 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
   3543 		txdctl |= IXGBE_TXDCTL_ENABLE;
   3544 		/* Set WTHRESH to 8, burst writeback */
   3545 		txdctl |= (8 << 16);
   3546 		/*
   3547 		 * When the internal queue falls below PTHRESH (32),
   3548 		 * start prefetching as long as there are at least
   3549 		 * HTHRESH (1) buffers ready. The values are taken
   3550 		 * from the Intel linux driver 3.8.21.
   3551 		 * Prefetching enables tx line rate even with 1 queue.
   3552 		 */
   3553 		txdctl |= (32 << 0) | (1 << 8);
   3554 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
   3555 	}
   3556 
   3557 	for (int i = 0, j = 0; i < adapter->num_queues; i++) {
   3558 		rxr = &adapter->rx_rings[i];
   3559 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   3560 		if (hw->mac.type == ixgbe_mac_82598EB) {
   3561 			/*
   3562 			 * PTHRESH = 21
   3563 			 * HTHRESH = 4
   3564 			 * WTHRESH = 8
   3565 			 */
   3566 			rxdctl &= ~0x3FFFFF;
   3567 			rxdctl |= 0x080420;
   3568 		}
   3569 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   3570 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
   3571 		for (; j < 10; j++) {
   3572 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
   3573 			    IXGBE_RXDCTL_ENABLE)
   3574 				break;
   3575 			else
   3576 				msec_delay(1);
   3577 		}
   3578 		wmb();
   3579 
   3580 		/*
   3581 		 * In netmap mode, we must preserve the buffers made
   3582 		 * available to userspace before the if_init()
   3583 		 * (this is true by default on the TX side, because
   3584 		 * init makes all buffers available to userspace).
   3585 		 *
   3586 		 * netmap_reset() and the device specific routines
   3587 		 * (e.g. ixgbe_setup_receive_rings()) map these
   3588 		 * buffers at the end of the NIC ring, so here we
   3589 		 * must set the RDT (tail) register to make sure
   3590 		 * they are not overwritten.
   3591 		 *
   3592 		 * In this driver the NIC ring starts at RDH = 0,
   3593 		 * RDT points to the last slot available for reception (?),
   3594 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   3595 		 */
   3596 #ifdef DEV_NETMAP
   3597 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   3598 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   3599 			struct netmap_adapter *na = NA(adapter->ifp);
   3600 			struct netmap_kring *kring = &na->rx_rings[i];
   3601 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   3602 
   3603 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
   3604 		} else
   3605 #endif /* DEV_NETMAP */
   3606 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
   3607 			    adapter->num_rx_desc - 1);
   3608 	}
   3609 
   3610 	/* Enable Receive engine */
   3611 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   3612 	if (hw->mac.type == ixgbe_mac_82598EB)
   3613 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   3614 	rxctrl |= IXGBE_RXCTRL_RXEN;
   3615 	ixgbe_enable_rx_dma(hw, rxctrl);
   3616 
   3617 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   3618 
   3619 	/* Set up MSI-X routing */
   3620 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3621 		ixgbe_configure_ivars(adapter);
   3622 		/* Set up auto-mask */
   3623 		if (hw->mac.type == ixgbe_mac_82598EB)
   3624 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3625 		else {
   3626 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   3627 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   3628 		}
   3629 	} else {  /* Simple settings for Legacy/MSI */
   3630 		ixgbe_set_ivar(adapter, 0, 0, 0);
   3631 		ixgbe_set_ivar(adapter, 0, 0, 1);
   3632 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3633 	}
   3634 
   3635 	ixgbe_init_fdir(adapter);
   3636 
   3637 	/*
   3638 	 * Check on any SFP devices that
   3639 	 * need to be kick-started
   3640 	 */
   3641 	if (hw->phy.type == ixgbe_phy_none) {
   3642 		err = hw->phy.ops.identify(hw);
   3643 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   3644                 	device_printf(dev,
   3645 			    "Unsupported SFP+ module type was detected.\n");
   3646 			return;
   3647         	}
   3648 	}
   3649 
   3650 	/* Set moderation on the Link interrupt */
   3651 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
   3652 
   3653 	/* Config/Enable Link */
   3654 	ixgbe_config_link(adapter);
   3655 
   3656 	/* Hardware Packet Buffer & Flow Control setup */
   3657 	ixgbe_config_delay_values(adapter);
   3658 
   3659 	/* Initialize the FC settings */
   3660 	ixgbe_start_hw(hw);
   3661 
   3662 	/* Set up VLAN support and filter */
   3663 	ixgbe_setup_vlan_hw_support(adapter);
   3664 
   3665 	/* Setup DMA Coalescing */
   3666 	ixgbe_config_dmac(adapter);
   3667 
   3668 	/* And now turn on interrupts */
   3669 	ixgbe_enable_intr(adapter);
   3670 
   3671 	/* Enable the use of the MBX by the VF's */
   3672 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
   3673 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
   3674 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
   3675 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
   3676 	}
   3677 
   3678 	/* Now inform the stack we're ready */
   3679 	ifp->if_flags |= IFF_RUNNING;
   3680 
   3681 	return;
   3682 } /* ixgbe_init_locked */
   3683 
   3684 /************************************************************************
   3685  * ixgbe_init
   3686  ************************************************************************/
   3687 static int
   3688 ixgbe_init(struct ifnet *ifp)
   3689 {
   3690 	struct adapter *adapter = ifp->if_softc;
   3691 
   3692 	IXGBE_CORE_LOCK(adapter);
   3693 	ixgbe_init_locked(adapter);
   3694 	IXGBE_CORE_UNLOCK(adapter);
   3695 
   3696 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   3697 } /* ixgbe_init */
   3698 
   3699 /************************************************************************
   3700  * ixgbe_set_ivar
   3701  *
   3702  *   Setup the correct IVAR register for a particular MSI-X interrupt
   3703  *     (yes this is all very magic and confusing :)
   3704  *    - entry is the register array entry
   3705  *    - vector is the MSI-X vector for this queue
   3706  *    - type is RX/TX/MISC
   3707  ************************************************************************/
   3708 static void
   3709 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   3710 {
   3711 	struct ixgbe_hw *hw = &adapter->hw;
   3712 	u32 ivar, index;
   3713 
   3714 	vector |= IXGBE_IVAR_ALLOC_VAL;
   3715 
   3716 	switch (hw->mac.type) {
   3717 
   3718 	case ixgbe_mac_82598EB:
   3719 		if (type == -1)
   3720 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   3721 		else
   3722 			entry += (type * 64);
   3723 		index = (entry >> 2) & 0x1F;
   3724 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   3725 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
   3726 		ivar |= (vector << (8 * (entry & 0x3)));
   3727 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   3728 		break;
   3729 
   3730 	case ixgbe_mac_82599EB:
   3731 	case ixgbe_mac_X540:
   3732 	case ixgbe_mac_X550:
   3733 	case ixgbe_mac_X550EM_x:
   3734 	case ixgbe_mac_X550EM_a:
   3735 		if (type == -1) { /* MISC IVAR */
   3736 			index = (entry & 1) * 8;
   3737 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   3738 			ivar &= ~(0xFF << index);
   3739 			ivar |= (vector << index);
   3740 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   3741 		} else {	/* RX/TX IVARS */
   3742 			index = (16 * (entry & 1)) + (8 * type);
   3743 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   3744 			ivar &= ~(0xFF << index);
   3745 			ivar |= (vector << index);
   3746 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   3747 		}
   3748 
   3749 	default:
   3750 		break;
   3751 	}
   3752 } /* ixgbe_set_ivar */
   3753 
   3754 /************************************************************************
   3755  * ixgbe_configure_ivars
   3756  ************************************************************************/
   3757 static void
   3758 ixgbe_configure_ivars(struct adapter *adapter)
   3759 {
   3760 	struct ix_queue *que = adapter->queues;
   3761 	u32             newitr;
   3762 
   3763 	if (ixgbe_max_interrupt_rate > 0)
   3764 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   3765 	else {
   3766 		/*
   3767 		 * Disable DMA coalescing if interrupt moderation is
   3768 		 * disabled.
   3769 		 */
   3770 		adapter->dmac = 0;
   3771 		newitr = 0;
   3772 	}
   3773 
   3774         for (int i = 0; i < adapter->num_queues; i++, que++) {
   3775 		struct rx_ring *rxr = &adapter->rx_rings[i];
   3776 		struct tx_ring *txr = &adapter->tx_rings[i];
   3777 		/* First the RX queue entry */
   3778                 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
   3779 		/* ... and the TX */
   3780 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
   3781 		/* Set an Initial EITR value */
   3782 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
   3783 	}
   3784 
   3785 	/* For the Link interrupt */
   3786         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
   3787 } /* ixgbe_configure_ivars */
   3788 
   3789 /************************************************************************
   3790  * ixgbe_config_gpie
   3791  ************************************************************************/
   3792 static void
   3793 ixgbe_config_gpie(struct adapter *adapter)
   3794 {
   3795 	struct ixgbe_hw *hw = &adapter->hw;
   3796 	u32             gpie;
   3797 
   3798 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
   3799 
   3800 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3801 		/* Enable Enhanced MSI-X mode */
   3802 		gpie |= IXGBE_GPIE_MSIX_MODE
   3803 		     |  IXGBE_GPIE_EIAME
   3804 		     |  IXGBE_GPIE_PBA_SUPPORT
   3805 		     |  IXGBE_GPIE_OCD;
   3806 	}
   3807 
   3808 	/* Fan Failure Interrupt */
   3809 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   3810 		gpie |= IXGBE_SDP1_GPIEN;
   3811 
   3812 	/* Thermal Sensor Interrupt */
   3813 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
   3814 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3815 
   3816 	/* Link detection */
   3817 	switch (hw->mac.type) {
   3818 	case ixgbe_mac_82599EB:
   3819 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
   3820 		break;
   3821 	case ixgbe_mac_X550EM_x:
   3822 	case ixgbe_mac_X550EM_a:
   3823 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3824 		break;
   3825 	default:
   3826 		break;
   3827 	}
   3828 
   3829 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   3830 
   3831 	return;
   3832 } /* ixgbe_config_gpie */
   3833 
   3834 /************************************************************************
   3835  * ixgbe_config_delay_values
   3836  *
   3837  *   Requires adapter->max_frame_size to be set.
   3838  ************************************************************************/
   3839 static void
   3840 ixgbe_config_delay_values(struct adapter *adapter)
   3841 {
   3842 	struct ixgbe_hw *hw = &adapter->hw;
   3843 	u32             rxpb, frame, size, tmp;
   3844 
   3845 	frame = adapter->max_frame_size;
   3846 
   3847 	/* Calculate High Water */
   3848 	switch (hw->mac.type) {
   3849 	case ixgbe_mac_X540:
   3850 	case ixgbe_mac_X550:
   3851 	case ixgbe_mac_X550EM_x:
   3852 	case ixgbe_mac_X550EM_a:
   3853 		tmp = IXGBE_DV_X540(frame, frame);
   3854 		break;
   3855 	default:
   3856 		tmp = IXGBE_DV(frame, frame);
   3857 		break;
   3858 	}
   3859 	size = IXGBE_BT2KB(tmp);
   3860 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
   3861 	hw->fc.high_water[0] = rxpb - size;
   3862 
   3863 	/* Now calculate Low Water */
   3864 	switch (hw->mac.type) {
   3865 	case ixgbe_mac_X540:
   3866 	case ixgbe_mac_X550:
   3867 	case ixgbe_mac_X550EM_x:
   3868 	case ixgbe_mac_X550EM_a:
   3869 		tmp = IXGBE_LOW_DV_X540(frame);
   3870 		break;
   3871 	default:
   3872 		tmp = IXGBE_LOW_DV(frame);
   3873 		break;
   3874 	}
   3875 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
   3876 
   3877 	hw->fc.pause_time = IXGBE_FC_PAUSE;
   3878 	hw->fc.send_xon = TRUE;
   3879 } /* ixgbe_config_delay_values */
   3880 
   3881 /************************************************************************
   3882  * ixgbe_set_multi - Multicast Update
   3883  *
   3884  *   Called whenever multicast address list is updated.
   3885  ************************************************************************/
   3886 static void
   3887 ixgbe_set_multi(struct adapter *adapter)
   3888 {
   3889 	struct ixgbe_mc_addr	*mta;
   3890 	struct ifnet		*ifp = adapter->ifp;
   3891 	u8			*update_ptr;
   3892 	int			mcnt = 0;
   3893 	u32			fctrl;
   3894 	struct ethercom		*ec = &adapter->osdep.ec;
   3895 	struct ether_multi	*enm;
   3896 	struct ether_multistep	step;
   3897 
   3898 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   3899 
   3900 	mta = adapter->mta;
   3901 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
   3902 
   3903 	ifp->if_flags &= ~IFF_ALLMULTI;
   3904 	ETHER_FIRST_MULTI(step, ec, enm);
   3905 	while (enm != NULL) {
   3906 		if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
   3907 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   3908 			ETHER_ADDR_LEN) != 0)) {
   3909 			ifp->if_flags |= IFF_ALLMULTI;
   3910 			break;
   3911 		}
   3912 		bcopy(enm->enm_addrlo,
   3913 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
   3914 		mta[mcnt].vmdq = adapter->pool;
   3915 		mcnt++;
   3916 		ETHER_NEXT_MULTI(step, enm);
   3917 	}
   3918 
   3919 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   3920 	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   3921 	if (ifp->if_flags & IFF_PROMISC)
   3922 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   3923 	else if (ifp->if_flags & IFF_ALLMULTI) {
   3924 		fctrl |= IXGBE_FCTRL_MPE;
   3925 	}
   3926 
   3927 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   3928 
   3929 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
   3930 		update_ptr = (u8 *)mta;
   3931 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   3932 		    ixgbe_mc_array_itr, TRUE);
   3933 	}
   3934 
   3935 	return;
   3936 } /* ixgbe_set_multi */
   3937 
   3938 /************************************************************************
   3939  * ixgbe_mc_array_itr
   3940  *
   3941  *   An iterator function needed by the multicast shared code.
   3942  *   It feeds the shared code routine the addresses in the
   3943  *   array of ixgbe_set_multi() one by one.
   3944  ************************************************************************/
   3945 static u8 *
   3946 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   3947 {
   3948 	struct ixgbe_mc_addr *mta;
   3949 
   3950 	mta = (struct ixgbe_mc_addr *)*update_ptr;
   3951 	*vmdq = mta->vmdq;
   3952 
   3953 	*update_ptr = (u8*)(mta + 1);
   3954 
   3955 	return (mta->addr);
   3956 } /* ixgbe_mc_array_itr */
   3957 
   3958 /************************************************************************
   3959  * ixgbe_local_timer - Timer routine
   3960  *
   3961  *   Checks for link status, updates statistics,
   3962  *   and runs the watchdog check.
   3963  ************************************************************************/
   3964 static void
   3965 ixgbe_local_timer(void *arg)
   3966 {
   3967 	struct adapter *adapter = arg;
   3968 
   3969 	IXGBE_CORE_LOCK(adapter);
   3970 	ixgbe_local_timer1(adapter);
   3971 	IXGBE_CORE_UNLOCK(adapter);
   3972 }
   3973 
   3974 static void
   3975 ixgbe_local_timer1(void *arg)
   3976 {
   3977 	struct adapter	*adapter = arg;
   3978 	device_t	dev = adapter->dev;
   3979 	struct ix_queue *que = adapter->queues;
   3980 	u64		queues = 0;
   3981 	int		hung = 0;
   3982 
   3983 	KASSERT(mutex_owned(&adapter->core_mtx));
   3984 
   3985 	/* Check for pluggable optics */
   3986 	if (adapter->sfp_probe)
   3987 		if (!ixgbe_sfp_probe(adapter))
   3988 			goto out; /* Nothing to do */
   3989 
   3990 	ixgbe_update_link_status(adapter);
   3991 	ixgbe_update_stats_counters(adapter);
   3992 
   3993 	/*
   3994 	 * Check the TX queues status
   3995 	 *      - mark hung queues so we don't schedule on them
   3996 	 *      - watchdog only if all queues show hung
   3997 	 */
   3998 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   3999 		/* Keep track of queues with work for soft irq */
   4000 		if (que->txr->busy)
   4001 			queues |= ((u64)1 << que->me);
   4002 		/*
   4003 		 * Each time txeof runs without cleaning, but there
   4004 		 * are uncleaned descriptors it increments busy. If
   4005 		 * we get to the MAX we declare it hung.
   4006 		 */
   4007 		if (que->busy == IXGBE_QUEUE_HUNG) {
   4008 			++hung;
   4009 			/* Mark the queue as inactive */
   4010 			adapter->active_queues &= ~((u64)1 << que->me);
   4011 			continue;
   4012 		} else {
   4013 			/* Check if we've come back from hung */
   4014 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   4015 				adapter->active_queues |= ((u64)1 << que->me);
   4016 		}
   4017 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   4018 			device_printf(dev,
   4019 			    "Warning queue %d appears to be hung!\n", i);
   4020 			que->txr->busy = IXGBE_QUEUE_HUNG;
   4021 			++hung;
   4022 		}
   4023 	}
   4024 
   4025 	/* Only truely watchdog if all queues show hung */
   4026 	if (hung == adapter->num_queues)
   4027 		goto watchdog;
   4028 	else if (queues != 0) { /* Force an IRQ on queues with work */
   4029 		ixgbe_rearm_queues(adapter, queues);
   4030 	}
   4031 
   4032 out:
   4033 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4034 	return;
   4035 
   4036 watchdog:
   4037 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   4038 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   4039 	adapter->watchdog_events.ev_count++;
   4040 	ixgbe_init_locked(adapter);
   4041 } /* ixgbe_local_timer */
   4042 
   4043 /************************************************************************
   4044  * ixgbe_sfp_probe
   4045  *
   4046  *   Determine if a port had optics inserted.
   4047  ************************************************************************/
   4048 static bool
   4049 ixgbe_sfp_probe(struct adapter *adapter)
   4050 {
   4051 	struct ixgbe_hw	*hw = &adapter->hw;
   4052 	device_t	dev = adapter->dev;
   4053 	bool		result = FALSE;
   4054 
   4055 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4056 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4057 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4058 		if (ret)
   4059 			goto out;
   4060 		ret = hw->phy.ops.reset(hw);
   4061 		adapter->sfp_probe = FALSE;
   4062 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4063 			device_printf(dev,"Unsupported SFP+ module detected!");
   4064 			device_printf(dev,
   4065 			    "Reload driver with supported module.\n");
   4066                         goto out;
   4067 		} else
   4068 			device_printf(dev, "SFP+ module detected!\n");
   4069 		/* We now have supported optics */
   4070 		result = TRUE;
   4071 	}
   4072 out:
   4073 
   4074 	return (result);
   4075 } /* ixgbe_sfp_probe */
   4076 
   4077 /************************************************************************
   4078  * ixgbe_handle_mod - Tasklet for SFP module interrupts
   4079  ************************************************************************/
   4080 static void
   4081 ixgbe_handle_mod(void *context)
   4082 {
   4083 	struct adapter  *adapter = context;
   4084 	struct ixgbe_hw *hw = &adapter->hw;
   4085 	device_t	dev = adapter->dev;
   4086 	u32             err, cage_full = 0;
   4087 
   4088 	if (adapter->hw.need_crosstalk_fix) {
   4089 		switch (hw->mac.type) {
   4090 		case ixgbe_mac_82599EB:
   4091 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4092 			    IXGBE_ESDP_SDP2;
   4093 			break;
   4094 		case ixgbe_mac_X550EM_x:
   4095 		case ixgbe_mac_X550EM_a:
   4096 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4097 			    IXGBE_ESDP_SDP0;
   4098 			break;
   4099 		default:
   4100 			break;
   4101 		}
   4102 
   4103 		if (!cage_full)
   4104 			return;
   4105 	}
   4106 
   4107 	err = hw->phy.ops.identify_sfp(hw);
   4108 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4109 		device_printf(dev,
   4110 		    "Unsupported SFP+ module type was detected.\n");
   4111 		return;
   4112 	}
   4113 
   4114 	err = hw->mac.ops.setup_sfp(hw);
   4115 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4116 		device_printf(dev,
   4117 		    "Setup failure - unsupported SFP+ module type.\n");
   4118 		return;
   4119 	}
   4120 	softint_schedule(adapter->msf_si);
   4121 } /* ixgbe_handle_mod */
   4122 
   4123 
   4124 /************************************************************************
   4125  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
   4126  ************************************************************************/
   4127 static void
   4128 ixgbe_handle_msf(void *context)
   4129 {
   4130 	struct adapter  *adapter = context;
   4131 	struct ixgbe_hw *hw = &adapter->hw;
   4132 	u32             autoneg;
   4133 	bool            negotiate;
   4134 
   4135 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
   4136 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
   4137 
   4138 	autoneg = hw->phy.autoneg_advertised;
   4139 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   4140 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   4141 	else
   4142 		negotiate = 0;
   4143 	if (hw->mac.ops.setup_link)
   4144 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
   4145 
   4146 	/* Adjust media types shown in ifconfig */
   4147 	ifmedia_removeall(&adapter->media);
   4148 	ixgbe_add_media_types(adapter);
   4149 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   4150 } /* ixgbe_handle_msf */
   4151 
   4152 /************************************************************************
   4153  * ixgbe_handle_phy - Tasklet for external PHY interrupts
   4154  ************************************************************************/
   4155 static void
   4156 ixgbe_handle_phy(void *context)
   4157 {
   4158 	struct adapter  *adapter = context;
   4159 	struct ixgbe_hw *hw = &adapter->hw;
   4160 	int error;
   4161 
   4162 	error = hw->phy.ops.handle_lasi(hw);
   4163 	if (error == IXGBE_ERR_OVERTEMP)
   4164 		device_printf(adapter->dev,
   4165 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
   4166 		    " PHY will downshift to lower power state!\n");
   4167 	else if (error)
   4168 		device_printf(adapter->dev,
   4169 		    "Error handling LASI interrupt: %d\n", error);
   4170 } /* ixgbe_handle_phy */
   4171 
   4172 static void
   4173 ixgbe_ifstop(struct ifnet *ifp, int disable)
   4174 {
   4175 	struct adapter *adapter = ifp->if_softc;
   4176 
   4177 	IXGBE_CORE_LOCK(adapter);
   4178 	ixgbe_stop(adapter);
   4179 	IXGBE_CORE_UNLOCK(adapter);
   4180 }
   4181 
   4182 /************************************************************************
   4183  * ixgbe_stop - Stop the hardware
   4184  *
   4185  *   Disables all traffic on the adapter by issuing a
   4186  *   global reset on the MAC and deallocates TX/RX buffers.
   4187  ************************************************************************/
   4188 static void
   4189 ixgbe_stop(void *arg)
   4190 {
   4191 	struct ifnet    *ifp;
   4192 	struct adapter  *adapter = arg;
   4193 	struct ixgbe_hw *hw = &adapter->hw;
   4194 
   4195 	ifp = adapter->ifp;
   4196 
   4197 	KASSERT(mutex_owned(&adapter->core_mtx));
   4198 
   4199 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   4200 	ixgbe_disable_intr(adapter);
   4201 	callout_stop(&adapter->timer);
   4202 
   4203 	/* Let the stack know...*/
   4204 	ifp->if_flags &= ~IFF_RUNNING;
   4205 
   4206 	ixgbe_reset_hw(hw);
   4207 	hw->adapter_stopped = FALSE;
   4208 	ixgbe_stop_adapter(hw);
   4209 	if (hw->mac.type == ixgbe_mac_82599EB)
   4210 		ixgbe_stop_mac_link_on_d3_82599(hw);
   4211 	/* Turn off the laser - noop with no optics */
   4212 	ixgbe_disable_tx_laser(hw);
   4213 
   4214 	/* Update the stack */
   4215 	adapter->link_up = FALSE;
   4216 	ixgbe_update_link_status(adapter);
   4217 
   4218 	/* reprogram the RAR[0] in case user changed it. */
   4219 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   4220 
   4221 	return;
   4222 } /* ixgbe_stop */
   4223 
   4224 /************************************************************************
   4225  * ixgbe_update_link_status - Update OS on link state
   4226  *
   4227  * Note: Only updates the OS on the cached link state.
   4228  *       The real check of the hardware only happens with
   4229  *       a link interrupt.
   4230  ************************************************************************/
   4231 static void
   4232 ixgbe_update_link_status(struct adapter *adapter)
   4233 {
   4234 	struct ifnet	*ifp = adapter->ifp;
   4235 	device_t        dev = adapter->dev;
   4236 	struct ixgbe_hw *hw = &adapter->hw;
   4237 
   4238 	if (adapter->link_up) {
   4239 		if (adapter->link_active == FALSE) {
   4240 			if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
   4241 				/*
   4242 				 *  Discard count for both MAC Local Fault and
   4243 				 * Remote Fault because those registers are
   4244 				 * valid only when the link speed is up and
   4245 				 * 10Gbps.
   4246 				 */
   4247 				IXGBE_READ_REG(hw, IXGBE_MLFC);
   4248 				IXGBE_READ_REG(hw, IXGBE_MRFC);
   4249 			}
   4250 
   4251 			if (bootverbose) {
   4252 				const char *bpsmsg;
   4253 
   4254 				switch (adapter->link_speed) {
   4255 				case IXGBE_LINK_SPEED_10GB_FULL:
   4256 					bpsmsg = "10 Gbps";
   4257 					break;
   4258 				case IXGBE_LINK_SPEED_5GB_FULL:
   4259 					bpsmsg = "5 Gbps";
   4260 					break;
   4261 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   4262 					bpsmsg = "2.5 Gbps";
   4263 					break;
   4264 				case IXGBE_LINK_SPEED_1GB_FULL:
   4265 					bpsmsg = "1 Gbps";
   4266 					break;
   4267 				case IXGBE_LINK_SPEED_100_FULL:
   4268 					bpsmsg = "100 Mbps";
   4269 					break;
   4270 				case IXGBE_LINK_SPEED_10_FULL:
   4271 					bpsmsg = "10 Mbps";
   4272 					break;
   4273 				default:
   4274 					bpsmsg = "unknown speed";
   4275 					break;
   4276 				}
   4277 				device_printf(dev, "Link is up %s %s \n",
   4278 				    bpsmsg, "Full Duplex");
   4279 			}
   4280 			adapter->link_active = TRUE;
   4281 			/* Update any Flow Control changes */
   4282 			ixgbe_fc_enable(&adapter->hw);
   4283 			/* Update DMA coalescing config */
   4284 			ixgbe_config_dmac(adapter);
   4285 			if_link_state_change(ifp, LINK_STATE_UP);
   4286 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4287 				ixgbe_ping_all_vfs(adapter);
   4288 		}
   4289 	} else { /* Link down */
   4290 		if (adapter->link_active == TRUE) {
   4291 			if (bootverbose)
   4292 				device_printf(dev, "Link is Down\n");
   4293 			if_link_state_change(ifp, LINK_STATE_DOWN);
   4294 			adapter->link_active = FALSE;
   4295 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4296 				ixgbe_ping_all_vfs(adapter);
   4297 		}
   4298 	}
   4299 
   4300 	return;
   4301 } /* ixgbe_update_link_status */
   4302 
   4303 /************************************************************************
   4304  * ixgbe_config_dmac - Configure DMA Coalescing
   4305  ************************************************************************/
   4306 static void
   4307 ixgbe_config_dmac(struct adapter *adapter)
   4308 {
   4309 	struct ixgbe_hw *hw = &adapter->hw;
   4310 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
   4311 
   4312 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
   4313 		return;
   4314 
   4315 	if (dcfg->watchdog_timer ^ adapter->dmac ||
   4316 	    dcfg->link_speed ^ adapter->link_speed) {
   4317 		dcfg->watchdog_timer = adapter->dmac;
   4318 		dcfg->fcoe_en = false;
   4319 		dcfg->link_speed = adapter->link_speed;
   4320 		dcfg->num_tcs = 1;
   4321 
   4322 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
   4323 		    dcfg->watchdog_timer, dcfg->link_speed);
   4324 
   4325 		hw->mac.ops.dmac_config(hw);
   4326 	}
   4327 } /* ixgbe_config_dmac */
   4328 
   4329 /************************************************************************
   4330  * ixgbe_enable_intr
   4331  ************************************************************************/
   4332 static void
   4333 ixgbe_enable_intr(struct adapter *adapter)
   4334 {
   4335 	struct ixgbe_hw	*hw = &adapter->hw;
   4336 	struct ix_queue	*que = adapter->queues;
   4337 	u32		mask, fwsm;
   4338 
   4339 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4340 
   4341 	switch (adapter->hw.mac.type) {
   4342 	case ixgbe_mac_82599EB:
   4343 		mask |= IXGBE_EIMS_ECC;
   4344 		/* Temperature sensor on some adapters */
   4345 		mask |= IXGBE_EIMS_GPI_SDP0;
   4346 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
   4347 		mask |= IXGBE_EIMS_GPI_SDP1;
   4348 		mask |= IXGBE_EIMS_GPI_SDP2;
   4349 		break;
   4350 	case ixgbe_mac_X540:
   4351 		/* Detect if Thermal Sensor is enabled */
   4352 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
   4353 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
   4354 			mask |= IXGBE_EIMS_TS;
   4355 		mask |= IXGBE_EIMS_ECC;
   4356 		break;
   4357 	case ixgbe_mac_X550:
   4358 		/* MAC thermal sensor is automatically enabled */
   4359 		mask |= IXGBE_EIMS_TS;
   4360 		mask |= IXGBE_EIMS_ECC;
   4361 		break;
   4362 	case ixgbe_mac_X550EM_x:
   4363 	case ixgbe_mac_X550EM_a:
   4364 		/* Some devices use SDP0 for important information */
   4365 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
   4366 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
   4367 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
   4368 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
   4369 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
   4370 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
   4371 			mask |= IXGBE_EICR_GPI_SDP0_X540;
   4372 		mask |= IXGBE_EIMS_ECC;
   4373 		break;
   4374 	default:
   4375 		break;
   4376 	}
   4377 
   4378 	/* Enable Fan Failure detection */
   4379 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4380 		mask |= IXGBE_EIMS_GPI_SDP1;
   4381 	/* Enable SR-IOV */
   4382 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4383 		mask |= IXGBE_EIMS_MAILBOX;
   4384 	/* Enable Flow Director */
   4385 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   4386 		mask |= IXGBE_EIMS_FLOW_DIR;
   4387 
   4388 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4389 
   4390 	/* With MSI-X we use auto clear */
   4391 	if (adapter->msix_mem) {
   4392 		mask = IXGBE_EIMS_ENABLE_MASK;
   4393 		/* Don't autoclear Link */
   4394 		mask &= ~IXGBE_EIMS_OTHER;
   4395 		mask &= ~IXGBE_EIMS_LSC;
   4396 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   4397 			mask &= ~IXGBE_EIMS_MAILBOX;
   4398 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4399 	}
   4400 
   4401 	/*
   4402 	 * Now enable all queues, this is done separately to
   4403 	 * allow for handling the extended (beyond 32) MSI-X
   4404 	 * vectors that can be used by 82599
   4405 	 */
   4406         for (int i = 0; i < adapter->num_queues; i++, que++)
   4407                 ixgbe_enable_queue(adapter, que->msix);
   4408 
   4409 	IXGBE_WRITE_FLUSH(hw);
   4410 
   4411 	return;
   4412 } /* ixgbe_enable_intr */
   4413 
   4414 /************************************************************************
   4415  * ixgbe_disable_intr
   4416  ************************************************************************/
   4417 static void
   4418 ixgbe_disable_intr(struct adapter *adapter)
   4419 {
   4420 	if (adapter->msix_mem)
   4421 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4422 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
   4423 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
   4424 	} else {
   4425 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
   4426 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
   4427 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
   4428 	}
   4429 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4430 
   4431 	return;
   4432 } /* ixgbe_disable_intr */
   4433 
   4434 /************************************************************************
   4435  * ixgbe_legacy_irq - Legacy Interrupt Service routine
   4436  ************************************************************************/
   4437 static int
   4438 ixgbe_legacy_irq(void *arg)
   4439 {
   4440 	struct ix_queue *que = arg;
   4441 	struct adapter	*adapter = que->adapter;
   4442 	struct ixgbe_hw	*hw = &adapter->hw;
   4443 	struct ifnet    *ifp = adapter->ifp;
   4444 	struct 		tx_ring *txr = adapter->tx_rings;
   4445 	bool		more = false;
   4446 	u32             eicr, eicr_mask;
   4447 
   4448 	/* Silicon errata #26 on 82598 */
   4449 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
   4450 
   4451 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   4452 
   4453 	adapter->stats.pf.legint.ev_count++;
   4454 	++que->irqs.ev_count;
   4455 	if (eicr == 0) {
   4456 		adapter->stats.pf.intzero.ev_count++;
   4457 		if ((ifp->if_flags & IFF_UP) != 0)
   4458 			ixgbe_enable_intr(adapter);
   4459 		return 0;
   4460 	}
   4461 
   4462 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   4463 #ifdef __NetBSD__
   4464 		/* Don't run ixgbe_rxeof in interrupt context */
   4465 		more = true;
   4466 #else
   4467 		more = ixgbe_rxeof(que);
   4468 #endif
   4469 
   4470 		IXGBE_TX_LOCK(txr);
   4471 		ixgbe_txeof(txr);
   4472 #ifdef notyet
   4473 		if (!ixgbe_ring_empty(ifp, txr->br))
   4474 			ixgbe_start_locked(ifp, txr);
   4475 #endif
   4476 		IXGBE_TX_UNLOCK(txr);
   4477 	}
   4478 
   4479 	/* Check for fan failure */
   4480 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   4481 		ixgbe_check_fan_failure(adapter, eicr, true);
   4482 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4483 	}
   4484 
   4485 	/* Link status change */
   4486 	if (eicr & IXGBE_EICR_LSC)
   4487 		softint_schedule(adapter->link_si);
   4488 
   4489 	if (ixgbe_is_sfp(hw)) {
   4490 		/* Pluggable optics-related interrupt */
   4491 		if (hw->mac.type >= ixgbe_mac_X540)
   4492 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   4493 		else
   4494 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   4495 
   4496 		if (eicr & eicr_mask) {
   4497 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   4498 			softint_schedule(adapter->mod_si);
   4499 		}
   4500 
   4501 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   4502 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   4503 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   4504 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4505 			softint_schedule(adapter->msf_si);
   4506 		}
   4507 	}
   4508 
   4509 	/* External PHY interrupt */
   4510 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   4511 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
   4512 		softint_schedule(adapter->phy_si);
   4513 
   4514 	if (more)
   4515 		softint_schedule(que->que_si);
   4516 	else
   4517 		ixgbe_enable_intr(adapter);
   4518 
   4519 	return 1;
   4520 } /* ixgbe_legacy_irq */
   4521 
   4522 /************************************************************************
   4523  * ixgbe_free_pci_resources
   4524  ************************************************************************/
   4525 static void
   4526 ixgbe_free_pci_resources(struct adapter *adapter)
   4527 {
   4528 	struct ix_queue *que = adapter->queues;
   4529 	int		rid;
   4530 
   4531 	/*
   4532 	 * Release all msix queue resources:
   4533 	 */
   4534 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4535 		if (que->res != NULL)
   4536 			pci_intr_disestablish(adapter->osdep.pc,
   4537 			    adapter->osdep.ihs[i]);
   4538 	}
   4539 
   4540 	/* Clean the Legacy or Link interrupt last */
   4541 	if (adapter->vector) /* we are doing MSIX */
   4542 		rid = adapter->vector;
   4543 	else
   4544 		rid = 0;
   4545 
   4546 	if (adapter->osdep.ihs[rid] != NULL) {
   4547 		pci_intr_disestablish(adapter->osdep.pc,
   4548 		    adapter->osdep.ihs[rid]);
   4549 		adapter->osdep.ihs[rid] = NULL;
   4550 	}
   4551 
   4552 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   4553 	    adapter->osdep.nintrs);
   4554 
   4555 	if (adapter->osdep.mem_size != 0) {
   4556 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   4557 		    adapter->osdep.mem_bus_space_handle,
   4558 		    adapter->osdep.mem_size);
   4559 	}
   4560 
   4561 	return;
   4562 } /* ixgbe_free_pci_resources */
   4563 
   4564 /************************************************************************
   4565  * ixgbe_set_sysctl_value
   4566  ************************************************************************/
   4567 static void
   4568 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
   4569     const char *description, int *limit, int value)
   4570 {
   4571 	device_t dev =  adapter->dev;
   4572 	struct sysctllog **log;
   4573 	const struct sysctlnode *rnode, *cnode;
   4574 
   4575 	log = &adapter->sysctllog;
   4576 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   4577 		aprint_error_dev(dev, "could not create sysctl root\n");
   4578 		return;
   4579 	}
   4580 	if (sysctl_createv(log, 0, &rnode, &cnode,
   4581 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   4582 	    name, SYSCTL_DESCR(description),
   4583 		NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   4584 		aprint_error_dev(dev, "could not create sysctl\n");
   4585 	*limit = value;
   4586 } /* ixgbe_set_sysctl_value */
   4587 
   4588 /************************************************************************
   4589  * ixgbe_sysctl_flowcntl
   4590  *
   4591  *   SYSCTL wrapper around setting Flow Control
   4592  ************************************************************************/
   4593 static int
   4594 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
   4595 {
   4596 	struct sysctlnode node = *rnode;
   4597 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4598 	int error, fc;
   4599 
   4600 	fc = adapter->hw.fc.current_mode;
   4601 	node.sysctl_data = &fc;
   4602 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4603 	if (error != 0 || newp == NULL)
   4604 		return error;
   4605 
   4606 	/* Don't bother if it's not changed */
   4607 	if (fc == adapter->hw.fc.current_mode)
   4608 		return (0);
   4609 
   4610 	return ixgbe_set_flowcntl(adapter, fc);
   4611 } /* ixgbe_sysctl_flowcntl */
   4612 
   4613 /************************************************************************
   4614  * ixgbe_set_flowcntl - Set flow control
   4615  *
   4616  *   Flow control values:
   4617  *     0 - off
   4618  *     1 - rx pause
   4619  *     2 - tx pause
   4620  *     3 - full
   4621  ************************************************************************/
   4622 static int
   4623 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
   4624 {
   4625 	switch (fc) {
   4626 		case ixgbe_fc_rx_pause:
   4627 		case ixgbe_fc_tx_pause:
   4628 		case ixgbe_fc_full:
   4629 			adapter->hw.fc.requested_mode = fc;
   4630 			if (adapter->num_queues > 1)
   4631 				ixgbe_disable_rx_drop(adapter);
   4632 			break;
   4633 		case ixgbe_fc_none:
   4634 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   4635 			if (adapter->num_queues > 1)
   4636 				ixgbe_enable_rx_drop(adapter);
   4637 			break;
   4638 		default:
   4639 			return (EINVAL);
   4640 	}
   4641 
   4642 #if 0 /* XXX NetBSD */
   4643 	/* Don't autoneg if forcing a value */
   4644 	adapter->hw.fc.disable_fc_autoneg = TRUE;
   4645 #endif
   4646 	ixgbe_fc_enable(&adapter->hw);
   4647 
   4648 	return (0);
   4649 } /* ixgbe_set_flowcntl */
   4650 
   4651 /************************************************************************
   4652  * ixgbe_enable_rx_drop
   4653  *
   4654  *   Enable the hardware to drop packets when the buffer is
   4655  *   full. This is useful with multiqueue, so that no single
   4656  *   queue being full stalls the entire RX engine. We only
   4657  *   enable this when Multiqueue is enabled AND Flow Control
   4658  *   is disabled.
   4659  ************************************************************************/
   4660 static void
   4661 ixgbe_enable_rx_drop(struct adapter *adapter)
   4662 {
   4663 	struct ixgbe_hw *hw = &adapter->hw;
   4664 	struct rx_ring  *rxr;
   4665 	u32             srrctl;
   4666 
   4667 	for (int i = 0; i < adapter->num_queues; i++) {
   4668 		rxr = &adapter->rx_rings[i];
   4669 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4670 		srrctl |= IXGBE_SRRCTL_DROP_EN;
   4671 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4672 	}
   4673 
   4674 	/* enable drop for each vf */
   4675 	for (int i = 0; i < adapter->num_vfs; i++) {
   4676 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4677 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
   4678 		    IXGBE_QDE_ENABLE));
   4679 	}
   4680 } /* ixgbe_enable_rx_drop */
   4681 
   4682 /************************************************************************
   4683  * ixgbe_disable_rx_drop
   4684  ************************************************************************/
   4685 static void
   4686 ixgbe_disable_rx_drop(struct adapter *adapter)
   4687 {
   4688 	struct ixgbe_hw *hw = &adapter->hw;
   4689 	struct rx_ring  *rxr;
   4690 	u32             srrctl;
   4691 
   4692 	for (int i = 0; i < adapter->num_queues; i++) {
   4693 		rxr = &adapter->rx_rings[i];
   4694         	srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4695         	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
   4696         	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4697 	}
   4698 
   4699 	/* disable drop for each vf */
   4700 	for (int i = 0; i < adapter->num_vfs; i++) {
   4701 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4702 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
   4703 	}
   4704 } /* ixgbe_disable_rx_drop */
   4705 
   4706 /************************************************************************
   4707  * ixgbe_sysctl_advertise
   4708  *
   4709  *   SYSCTL wrapper around setting advertised speed
   4710  ************************************************************************/
   4711 static int
   4712 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
   4713 {
   4714 	struct sysctlnode node = *rnode;
   4715 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4716 	int            error = 0, advertise;
   4717 
   4718 	advertise = adapter->advertise;
   4719 	node.sysctl_data = &advertise;
   4720 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4721 	if (error != 0 || newp == NULL)
   4722 		return error;
   4723 
   4724 	return ixgbe_set_advertise(adapter, advertise);
   4725 } /* ixgbe_sysctl_advertise */
   4726 
   4727 /************************************************************************
   4728  * ixgbe_set_advertise - Control advertised link speed
   4729  *
   4730  *   Flags:
   4731  *     0x0 - Default (all capable link speed)
   4732  *     0x1 - advertise 100 Mb
   4733  *     0x2 - advertise 1G
   4734  *     0x4 - advertise 10G
   4735  *     0x8 - advertise 10 Mb (yes, Mb)
   4736  ************************************************************************/
   4737 static int
   4738 ixgbe_set_advertise(struct adapter *adapter, int advertise)
   4739 {
   4740 	device_t         dev;
   4741 	struct ixgbe_hw  *hw;
   4742 	ixgbe_link_speed speed = 0;
   4743 	ixgbe_link_speed link_caps = 0;
   4744 	s32              err = IXGBE_NOT_IMPLEMENTED;
   4745 	bool             negotiate = FALSE;
   4746 
   4747 	/* Checks to validate new value */
   4748 	if (adapter->advertise == advertise) /* no change */
   4749 		return (0);
   4750 
   4751 	dev = adapter->dev;
   4752 	hw = &adapter->hw;
   4753 
   4754 	/* No speed changes for backplane media */
   4755 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   4756 		return (ENODEV);
   4757 
   4758 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   4759 	    (hw->phy.multispeed_fiber))) {
   4760 		device_printf(dev,
   4761 		    "Advertised speed can only be set on copper or "
   4762 		    "multispeed fiber media types.\n");
   4763 		return (EINVAL);
   4764 	}
   4765 
   4766 	if (advertise < 0x0 || advertise > 0xF) {
   4767 		device_printf(dev,
   4768 		    "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
   4769 		return (EINVAL);
   4770 	}
   4771 
   4772 	if (hw->mac.ops.get_link_capabilities) {
   4773 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   4774 		    &negotiate);
   4775 		if (err != IXGBE_SUCCESS) {
   4776 			device_printf(dev, "Unable to determine supported advertise speeds\n");
   4777 			return (ENODEV);
   4778 		}
   4779 	}
   4780 
   4781 	/* Set new value and report new advertised mode */
   4782 	if (advertise & 0x1) {
   4783 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
   4784 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
   4785 			return (EINVAL);
   4786 		}
   4787 		speed |= IXGBE_LINK_SPEED_100_FULL;
   4788 	}
   4789 	if (advertise & 0x2) {
   4790 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
   4791 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
   4792 			return (EINVAL);
   4793 		}
   4794 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   4795 	}
   4796 	if (advertise & 0x4) {
   4797 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
   4798 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
   4799 			return (EINVAL);
   4800 		}
   4801 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   4802 	}
   4803 	if (advertise & 0x8) {
   4804 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
   4805 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
   4806 			return (EINVAL);
   4807 		}
   4808 		speed |= IXGBE_LINK_SPEED_10_FULL;
   4809 	}
   4810 	if (advertise == 0)
   4811 		speed = link_caps; /* All capable link speed */
   4812 
   4813 	hw->mac.autotry_restart = TRUE;
   4814 	hw->mac.ops.setup_link(hw, speed, TRUE);
   4815 	adapter->advertise = advertise;
   4816 
   4817 	return (0);
   4818 } /* ixgbe_set_advertise */
   4819 
   4820 /************************************************************************
   4821  * ixgbe_get_advertise - Get current advertised speed settings
   4822  *
   4823  *   Formatted for sysctl usage.
   4824  *   Flags:
   4825  *     0x1 - advertise 100 Mb
   4826  *     0x2 - advertise 1G
   4827  *     0x4 - advertise 10G
   4828  *     0x8 - advertise 10 Mb (yes, Mb)
   4829  ************************************************************************/
   4830 static int
   4831 ixgbe_get_advertise(struct adapter *adapter)
   4832 {
   4833 	struct ixgbe_hw  *hw = &adapter->hw;
   4834 	int              speed;
   4835 	ixgbe_link_speed link_caps = 0;
   4836 	s32              err;
   4837 	bool             negotiate = FALSE;
   4838 
   4839 	/*
   4840 	 * Advertised speed means nothing unless it's copper or
   4841 	 * multi-speed fiber
   4842 	 */
   4843 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
   4844 	    !(hw->phy.multispeed_fiber))
   4845 		return (0);
   4846 
   4847 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
   4848 	if (err != IXGBE_SUCCESS)
   4849 		return (0);
   4850 
   4851 	speed =
   4852 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
   4853 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
   4854 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
   4855 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
   4856 
   4857 	return speed;
   4858 } /* ixgbe_get_advertise */
   4859 
   4860 /************************************************************************
   4861  * ixgbe_sysctl_dmac - Manage DMA Coalescing
   4862  *
   4863  *   Control values:
   4864  *     0/1 - off / on (use default value of 1000)
   4865  *
   4866  *     Legal timer values are:
   4867  *     50,100,250,500,1000,2000,5000,10000
   4868  *
   4869  *     Turning off interrupt moderation will also turn this off.
   4870  ************************************************************************/
   4871 static int
   4872 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
   4873 {
   4874 	struct sysctlnode node = *rnode;
   4875 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4876 	struct ifnet   *ifp = adapter->ifp;
   4877 	int            error;
   4878 	int            newval;
   4879 
   4880 	newval = adapter->dmac;
   4881 	node.sysctl_data = &newval;
   4882 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4883 	if ((error) || (newp == NULL))
   4884 		return (error);
   4885 
   4886 	switch (newval) {
   4887 	case 0:
   4888 		/* Disabled */
   4889 		adapter->dmac = 0;
   4890 		break;
   4891 	case 1:
   4892 		/* Enable and use default */
   4893 		adapter->dmac = 1000;
   4894 		break;
   4895 	case 50:
   4896 	case 100:
   4897 	case 250:
   4898 	case 500:
   4899 	case 1000:
   4900 	case 2000:
   4901 	case 5000:
   4902 	case 10000:
   4903 		/* Legal values - allow */
   4904 		adapter->dmac = newval;
   4905 		break;
   4906 	default:
   4907 		/* Do nothing, illegal value */
   4908 		return (EINVAL);
   4909 	}
   4910 
   4911 	/* Re-initialize hardware if it's already running */
   4912 	if (ifp->if_flags & IFF_RUNNING)
   4913 		ixgbe_init(ifp);
   4914 
   4915 	return (0);
   4916 }
   4917 
   4918 #ifdef IXGBE_DEBUG
   4919 /************************************************************************
   4920  * ixgbe_sysctl_power_state
   4921  *
   4922  *   Sysctl to test power states
   4923  *   Values:
   4924  *     0      - set device to D0
   4925  *     3      - set device to D3
   4926  *     (none) - get current device power state
   4927  ************************************************************************/
   4928 static int
   4929 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
   4930 {
   4931 #ifdef notyet
   4932 	struct sysctlnode node = *rnode;
   4933 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4934 	device_t       dev =  adapter->dev;
   4935 	int            curr_ps, new_ps, error = 0;
   4936 
   4937 	curr_ps = new_ps = pci_get_powerstate(dev);
   4938 
   4939 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4940 	if ((error) || (req->newp == NULL))
   4941 		return (error);
   4942 
   4943 	if (new_ps == curr_ps)
   4944 		return (0);
   4945 
   4946 	if (new_ps == 3 && curr_ps == 0)
   4947 		error = DEVICE_SUSPEND(dev);
   4948 	else if (new_ps == 0 && curr_ps == 3)
   4949 		error = DEVICE_RESUME(dev);
   4950 	else
   4951 		return (EINVAL);
   4952 
   4953 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
   4954 
   4955 	return (error);
   4956 #else
   4957 	return 0;
   4958 #endif
   4959 } /* ixgbe_sysctl_power_state */
   4960 #endif
   4961 
   4962 /************************************************************************
   4963  * ixgbe_sysctl_wol_enable
   4964  *
   4965  *   Sysctl to enable/disable the WoL capability,
   4966  *   if supported by the adapter.
   4967  *
   4968  *   Values:
   4969  *     0 - disabled
   4970  *     1 - enabled
   4971  ************************************************************************/
   4972 static int
   4973 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
   4974 {
   4975 	struct sysctlnode node = *rnode;
   4976 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   4977 	struct ixgbe_hw *hw = &adapter->hw;
   4978 	bool            new_wol_enabled;
   4979 	int             error = 0;
   4980 
   4981 	new_wol_enabled = hw->wol_enabled;
   4982 	node.sysctl_data = &new_wol_enabled;
   4983 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4984 	if ((error) || (newp == NULL))
   4985 		return (error);
   4986 	if (new_wol_enabled == hw->wol_enabled)
   4987 		return (0);
   4988 
   4989 	if (new_wol_enabled && !adapter->wol_support)
   4990 		return (ENODEV);
   4991 	else
   4992 		hw->wol_enabled = new_wol_enabled;
   4993 
   4994 	return (0);
   4995 } /* ixgbe_sysctl_wol_enable */
   4996 
   4997 /************************************************************************
   4998  * ixgbe_sysctl_wufc - Wake Up Filter Control
   4999  *
   5000  *   Sysctl to enable/disable the types of packets that the
   5001  *   adapter will wake up on upon receipt.
   5002  *   Flags:
   5003  *     0x1  - Link Status Change
   5004  *     0x2  - Magic Packet
   5005  *     0x4  - Direct Exact
   5006  *     0x8  - Directed Multicast
   5007  *     0x10 - Broadcast
   5008  *     0x20 - ARP/IPv4 Request Packet
   5009  *     0x40 - Direct IPv4 Packet
   5010  *     0x80 - Direct IPv6 Packet
   5011  *
   5012  *   Settings not listed above will cause the sysctl to return an error.
   5013  ************************************************************************/
   5014 static int
   5015 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
   5016 {
   5017 	struct sysctlnode node = *rnode;
   5018 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5019 	int error = 0;
   5020 	u32 new_wufc;
   5021 
   5022 	new_wufc = adapter->wufc;
   5023 	node.sysctl_data = &new_wufc;
   5024 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5025 	if ((error) || (newp == NULL))
   5026 		return (error);
   5027 	if (new_wufc == adapter->wufc)
   5028 		return (0);
   5029 
   5030 	if (new_wufc & 0xffffff00)
   5031 		return (EINVAL);
   5032 
   5033 	new_wufc &= 0xff;
   5034 	new_wufc |= (0xffffff & adapter->wufc);
   5035 	adapter->wufc = new_wufc;
   5036 
   5037 	return (0);
   5038 } /* ixgbe_sysctl_wufc */
   5039 
   5040 #ifdef IXGBE_DEBUG
   5041 /************************************************************************
   5042  * ixgbe_sysctl_print_rss_config
   5043  ************************************************************************/
   5044 static int
   5045 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
   5046 {
   5047 #ifdef notyet
   5048 	struct sysctlnode node = *rnode;
   5049 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5050 	struct ixgbe_hw *hw = &adapter->hw;
   5051 	device_t        dev = adapter->dev;
   5052 	struct sbuf     *buf;
   5053 	int             error = 0, reta_size;
   5054 	u32             reg;
   5055 
   5056 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
   5057 	if (!buf) {
   5058 		device_printf(dev, "Could not allocate sbuf for output.\n");
   5059 		return (ENOMEM);
   5060 	}
   5061 
   5062 	// TODO: use sbufs to make a string to print out
   5063 	/* Set multiplier for RETA setup and table size based on MAC */
   5064 	switch (adapter->hw.mac.type) {
   5065 	case ixgbe_mac_X550:
   5066 	case ixgbe_mac_X550EM_x:
   5067 	case ixgbe_mac_X550EM_a:
   5068 		reta_size = 128;
   5069 		break;
   5070 	default:
   5071 		reta_size = 32;
   5072 		break;
   5073 	}
   5074 
   5075 	/* Print out the redirection table */
   5076 	sbuf_cat(buf, "\n");
   5077 	for (int i = 0; i < reta_size; i++) {
   5078 		if (i < 32) {
   5079 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
   5080 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
   5081 		} else {
   5082 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
   5083 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
   5084 		}
   5085 	}
   5086 
   5087 	// TODO: print more config
   5088 
   5089 	error = sbuf_finish(buf);
   5090 	if (error)
   5091 		device_printf(dev, "Error finishing sbuf: %d\n", error);
   5092 
   5093 	sbuf_delete(buf);
   5094 #endif
   5095 	return (0);
   5096 } /* ixgbe_sysctl_print_rss_config */
   5097 #endif /* IXGBE_DEBUG */
   5098 
   5099 /************************************************************************
   5100  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
   5101  *
   5102  *   For X552/X557-AT devices using an external PHY
   5103  ************************************************************************/
   5104 static int
   5105 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
   5106 {
   5107 	struct sysctlnode node = *rnode;
   5108 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5109 	struct ixgbe_hw *hw = &adapter->hw;
   5110 	int val;
   5111 	u16 reg;
   5112 	int		error;
   5113 
   5114 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5115 		device_printf(adapter->dev,
   5116 		    "Device has no supported external thermal sensor.\n");
   5117 		return (ENODEV);
   5118 	}
   5119 
   5120 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
   5121 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5122 		device_printf(adapter->dev,
   5123 		    "Error reading from PHY's current temperature register\n");
   5124 		return (EAGAIN);
   5125 	}
   5126 
   5127 	node.sysctl_data = &val;
   5128 
   5129 	/* Shift temp for output */
   5130 	val = reg >> 8;
   5131 
   5132 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5133 	if ((error) || (newp == NULL))
   5134 		return (error);
   5135 
   5136 	return (0);
   5137 } /* ixgbe_sysctl_phy_temp */
   5138 
   5139 /************************************************************************
   5140  * ixgbe_sysctl_phy_overtemp_occurred
   5141  *
   5142  *   Reports (directly from the PHY) whether the current PHY
   5143  *   temperature is over the overtemp threshold.
   5144  ************************************************************************/
   5145 static int
   5146 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
   5147 {
   5148 	struct sysctlnode node = *rnode;
   5149 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5150 	struct ixgbe_hw *hw = &adapter->hw;
   5151 	int val, error;
   5152 	u16 reg;
   5153 
   5154 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5155 		device_printf(adapter->dev,
   5156 		    "Device has no supported external thermal sensor.\n");
   5157 		return (ENODEV);
   5158 	}
   5159 
   5160 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
   5161 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5162 		device_printf(adapter->dev,
   5163 		    "Error reading from PHY's temperature status register\n");
   5164 		return (EAGAIN);
   5165 	}
   5166 
   5167 	node.sysctl_data = &val;
   5168 
   5169 	/* Get occurrence bit */
   5170 	val = !!(reg & 0x4000);
   5171 
   5172 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5173 	if ((error) || (newp == NULL))
   5174 		return (error);
   5175 
   5176 	return (0);
   5177 } /* ixgbe_sysctl_phy_overtemp_occurred */
   5178 
   5179 /************************************************************************
   5180  * ixgbe_sysctl_eee_state
   5181  *
   5182  *   Sysctl to set EEE power saving feature
   5183  *   Values:
   5184  *     0      - disable EEE
   5185  *     1      - enable EEE
   5186  *     (none) - get current device EEE state
   5187  ************************************************************************/
   5188 static int
   5189 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
   5190 {
   5191 	struct sysctlnode node = *rnode;
   5192 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5193 	struct ifnet   *ifp = adapter->ifp;
   5194 	device_t       dev = adapter->dev;
   5195 	int            curr_eee, new_eee, error = 0;
   5196 	s32            retval;
   5197 
   5198 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
   5199 	node.sysctl_data = &new_eee;
   5200 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5201 	if ((error) || (newp == NULL))
   5202 		return (error);
   5203 
   5204 	/* Nothing to do */
   5205 	if (new_eee == curr_eee)
   5206 		return (0);
   5207 
   5208 	/* Not supported */
   5209 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
   5210 		return (EINVAL);
   5211 
   5212 	/* Bounds checking */
   5213 	if ((new_eee < 0) || (new_eee > 1))
   5214 		return (EINVAL);
   5215 
   5216 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
   5217 	if (retval) {
   5218 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
   5219 		return (EINVAL);
   5220 	}
   5221 
   5222 	/* Restart auto-neg */
   5223 	ixgbe_init(ifp);
   5224 
   5225 	device_printf(dev, "New EEE state: %d\n", new_eee);
   5226 
   5227 	/* Cache new value */
   5228 	if (new_eee)
   5229 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5230 	else
   5231 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
   5232 
   5233 	return (error);
   5234 } /* ixgbe_sysctl_eee_state */
   5235 
   5236 /************************************************************************
   5237  * ixgbe_init_device_features
   5238  ************************************************************************/
   5239 static void
   5240 ixgbe_init_device_features(struct adapter *adapter)
   5241 {
   5242 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   5243 	                  | IXGBE_FEATURE_RSS
   5244 	                  | IXGBE_FEATURE_MSI
   5245 	                  | IXGBE_FEATURE_MSIX
   5246 	                  | IXGBE_FEATURE_LEGACY_IRQ
   5247 	                  | IXGBE_FEATURE_LEGACY_TX;
   5248 
   5249 	/* Set capabilities first... */
   5250 	switch (adapter->hw.mac.type) {
   5251 	case ixgbe_mac_82598EB:
   5252 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
   5253 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
   5254 		break;
   5255 	case ixgbe_mac_X540:
   5256 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5257 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5258 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
   5259 		    (adapter->hw.bus.func == 0))
   5260 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5261 		break;
   5262 	case ixgbe_mac_X550:
   5263 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5264 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5265 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5266 		break;
   5267 	case ixgbe_mac_X550EM_x:
   5268 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5269 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5270 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
   5271 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5272 		break;
   5273 	case ixgbe_mac_X550EM_a:
   5274 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5275 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5276 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5277 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
   5278 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
   5279 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5280 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5281 		}
   5282 		break;
   5283 	case ixgbe_mac_82599EB:
   5284 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5285 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5286 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
   5287 		    (adapter->hw.bus.func == 0))
   5288 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5289 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
   5290 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5291 		break;
   5292 	default:
   5293 		break;
   5294 	}
   5295 
   5296 	/* Enabled by default... */
   5297 	/* Fan failure detection */
   5298 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
   5299 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
   5300 	/* Netmap */
   5301 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   5302 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   5303 	/* EEE */
   5304 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   5305 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5306 	/* Thermal Sensor */
   5307 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
   5308 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
   5309 
   5310 	/* Enabled via global sysctl... */
   5311 	/* Flow Director */
   5312 	if (ixgbe_enable_fdir) {
   5313 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
   5314 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
   5315 		else
   5316 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
   5317 	}
   5318 	/* Legacy (single queue) transmit */
   5319 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   5320 	    ixgbe_enable_legacy_tx)
   5321 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   5322 	/*
   5323 	 * Message Signal Interrupts - Extended (MSI-X)
   5324 	 * Normal MSI is only enabled if MSI-X calls fail.
   5325 	 */
   5326 	if (!ixgbe_enable_msix)
   5327 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
   5328 	/* Receive-Side Scaling (RSS) */
   5329 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
   5330 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   5331 
   5332 	/* Disable features with unmet dependencies... */
   5333 	/* No MSI-X */
   5334 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
   5335 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   5336 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   5337 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
   5338 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
   5339 	}
   5340 } /* ixgbe_init_device_features */
   5341 
   5342 /************************************************************************
   5343  * ixgbe_probe - Device identification routine
   5344  *
   5345  *   Determines if the driver should be loaded on
   5346  *   adapter based on its PCI vendor/device ID.
   5347  *
   5348  *   return BUS_PROBE_DEFAULT on success, positive on failure
   5349  ************************************************************************/
   5350 static int
   5351 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
   5352 {
   5353 	const struct pci_attach_args *pa = aux;
   5354 
   5355 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
   5356 }
   5357 
   5358 static ixgbe_vendor_info_t *
   5359 ixgbe_lookup(const struct pci_attach_args *pa)
   5360 {
   5361 	ixgbe_vendor_info_t *ent;
   5362 	pcireg_t subid;
   5363 
   5364 	INIT_DEBUGOUT("ixgbe_lookup: begin");
   5365 
   5366 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
   5367 		return NULL;
   5368 
   5369 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
   5370 
   5371 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
   5372 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
   5373 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
   5374 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
   5375 			(ent->subvendor_id == 0)) &&
   5376 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
   5377 			(ent->subdevice_id == 0))) {
   5378 			++ixgbe_total_ports;
   5379 			return ent;
   5380 		}
   5381 	}
   5382 	return NULL;
   5383 }
   5384 
   5385 static int
   5386 ixgbe_ifflags_cb(struct ethercom *ec)
   5387 {
   5388 	struct ifnet *ifp = &ec->ec_if;
   5389 	struct adapter *adapter = ifp->if_softc;
   5390 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   5391 
   5392 	IXGBE_CORE_LOCK(adapter);
   5393 
   5394 	if (change != 0)
   5395 		adapter->if_flags = ifp->if_flags;
   5396 
   5397 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   5398 		rc = ENETRESET;
   5399 	else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   5400 		ixgbe_set_promisc(adapter);
   5401 
   5402 	/* Set up VLAN support and filter */
   5403 	ixgbe_setup_vlan_hw_support(adapter);
   5404 
   5405 	IXGBE_CORE_UNLOCK(adapter);
   5406 
   5407 	return rc;
   5408 }
   5409 
   5410 /************************************************************************
   5411  * ixgbe_ioctl - Ioctl entry point
   5412  *
   5413  *   Called when the user wants to configure the interface.
   5414  *
   5415  *   return 0 on success, positive on failure
   5416  ************************************************************************/
   5417 static int
   5418 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   5419 {
   5420 	struct adapter	*adapter = ifp->if_softc;
   5421 	struct ixgbe_hw *hw = &adapter->hw;
   5422 	struct ifcapreq *ifcr = data;
   5423 	struct ifreq	*ifr = data;
   5424 	int             error = 0;
   5425 	int l4csum_en;
   5426 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   5427 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   5428 
   5429 	switch (command) {
   5430 	case SIOCSIFFLAGS:
   5431 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   5432 		break;
   5433 	case SIOCADDMULTI:
   5434 	case SIOCDELMULTI:
   5435 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   5436 		break;
   5437 	case SIOCSIFMEDIA:
   5438 	case SIOCGIFMEDIA:
   5439 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   5440 		break;
   5441 	case SIOCSIFCAP:
   5442 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   5443 		break;
   5444 	case SIOCSIFMTU:
   5445 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   5446 		break;
   5447 #ifdef __NetBSD__
   5448 	case SIOCINITIFADDR:
   5449 		IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
   5450 		break;
   5451 	case SIOCGIFFLAGS:
   5452 		IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
   5453 		break;
   5454 	case SIOCGIFAFLAG_IN:
   5455 		IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
   5456 		break;
   5457 	case SIOCGIFADDR:
   5458 		IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
   5459 		break;
   5460 	case SIOCGIFMTU:
   5461 		IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
   5462 		break;
   5463 	case SIOCGIFCAP:
   5464 		IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
   5465 		break;
   5466 	case SIOCGETHERCAP:
   5467 		IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
   5468 		break;
   5469 	case SIOCGLIFADDR:
   5470 		IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
   5471 		break;
   5472 	case SIOCZIFDATA:
   5473 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   5474 		hw->mac.ops.clear_hw_cntrs(hw);
   5475 		ixgbe_clear_evcnt(adapter);
   5476 		break;
   5477 	case SIOCAIFADDR:
   5478 		IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
   5479 		break;
   5480 #endif
   5481 	default:
   5482 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   5483 		break;
   5484 	}
   5485 
   5486 	switch (command) {
   5487 	case SIOCSIFMEDIA:
   5488 	case SIOCGIFMEDIA:
   5489 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   5490 	case SIOCGI2C:
   5491 	{
   5492 		struct ixgbe_i2c_req	i2c;
   5493 
   5494 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
   5495 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
   5496 		if (error != 0)
   5497 			break;
   5498 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
   5499 			error = EINVAL;
   5500 			break;
   5501 		}
   5502 		if (i2c.len > sizeof(i2c.data)) {
   5503 			error = EINVAL;
   5504 			break;
   5505 		}
   5506 
   5507 		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
   5508 		    i2c.dev_addr, i2c.data);
   5509 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
   5510 		break;
   5511 	}
   5512 	case SIOCSIFCAP:
   5513 		/* Layer-4 Rx checksum offload has to be turned on and
   5514 		 * off as a unit.
   5515 		 */
   5516 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   5517 		if (l4csum_en != l4csum && l4csum_en != 0)
   5518 			return EINVAL;
   5519 		/*FALLTHROUGH*/
   5520 	case SIOCADDMULTI:
   5521 	case SIOCDELMULTI:
   5522 	case SIOCSIFFLAGS:
   5523 	case SIOCSIFMTU:
   5524 	default:
   5525 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   5526 			return error;
   5527 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   5528 			;
   5529 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   5530 			IXGBE_CORE_LOCK(adapter);
   5531 			ixgbe_init_locked(adapter);
   5532 			ixgbe_recalculate_max_frame(adapter);
   5533 			IXGBE_CORE_UNLOCK(adapter);
   5534 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   5535 			/*
   5536 			 * Multicast list has changed; set the hardware filter
   5537 			 * accordingly.
   5538 			 */
   5539 			IXGBE_CORE_LOCK(adapter);
   5540 			ixgbe_disable_intr(adapter);
   5541 			ixgbe_set_multi(adapter);
   5542 			ixgbe_enable_intr(adapter);
   5543 			IXGBE_CORE_UNLOCK(adapter);
   5544 		}
   5545 		return 0;
   5546 	}
   5547 
   5548 	return error;
   5549 } /* ixgbe_ioctl */
   5550 
   5551 /************************************************************************
   5552  * ixgbe_check_fan_failure
   5553  ************************************************************************/
   5554 static void
   5555 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
   5556 {
   5557 	u32 mask;
   5558 
   5559 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
   5560 	    IXGBE_ESDP_SDP1;
   5561 
   5562 	if (reg & mask)
   5563 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
   5564 } /* ixgbe_check_fan_failure */
   5565 
   5566 /************************************************************************
   5567  * ixgbe_handle_que
   5568  ************************************************************************/
   5569 static void
   5570 ixgbe_handle_que(void *context)
   5571 {
   5572 	struct ix_queue *que = context;
   5573 	struct adapter  *adapter = que->adapter;
   5574 	struct tx_ring  *txr = que->txr;
   5575 	struct ifnet    *ifp = adapter->ifp;
   5576 
   5577 	adapter->handleq.ev_count++;
   5578 
   5579 	if (ifp->if_flags & IFF_RUNNING) {
   5580 		ixgbe_rxeof(que);
   5581 		IXGBE_TX_LOCK(txr);
   5582 		ixgbe_txeof(txr);
   5583 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5584 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   5585 				ixgbe_mq_start_locked(ifp, txr);
   5586 		/* Only for queue 0 */
   5587 		/* NetBSD still needs this for CBQ */
   5588 		if ((&adapter->queues[0] == que)
   5589 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   5590 			ixgbe_legacy_start_locked(ifp, txr);
   5591 		IXGBE_TX_UNLOCK(txr);
   5592 	}
   5593 
   5594 	/* Re-enable this interrupt */
   5595 	if (que->res != NULL)
   5596 		ixgbe_enable_queue(adapter, que->msix);
   5597 	else
   5598 		ixgbe_enable_intr(adapter);
   5599 
   5600 	return;
   5601 } /* ixgbe_handle_que */
   5602 
   5603 /************************************************************************
   5604  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
   5605  ************************************************************************/
   5606 static int
   5607 ixgbe_allocate_legacy(struct adapter *adapter,
   5608     const struct pci_attach_args *pa)
   5609 {
   5610 	device_t	dev = adapter->dev;
   5611 	struct ix_queue *que = adapter->queues;
   5612 	struct tx_ring  *txr = adapter->tx_rings;
   5613 	int		counts[PCI_INTR_TYPE_SIZE];
   5614 	pci_intr_type_t intr_type, max_type;
   5615 	char            intrbuf[PCI_INTRSTR_LEN];
   5616 	const char	*intrstr = NULL;
   5617 
   5618 	/* We allocate a single interrupt resource */
   5619 	max_type = PCI_INTR_TYPE_MSI;
   5620 	counts[PCI_INTR_TYPE_MSIX] = 0;
   5621 	counts[PCI_INTR_TYPE_MSI] =
   5622 	    (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
   5623 	counts[PCI_INTR_TYPE_INTX] =
   5624 	    (adapter->feat_en & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
   5625 
   5626 alloc_retry:
   5627 	if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
   5628 		aprint_error_dev(dev, "couldn't alloc interrupt\n");
   5629 		return ENXIO;
   5630 	}
   5631 	adapter->osdep.nintrs = 1;
   5632 	intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
   5633 	    intrbuf, sizeof(intrbuf));
   5634 	adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
   5635 	    adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
   5636 	    device_xname(dev));
   5637 	if (adapter->osdep.ihs[0] == NULL) {
   5638 		intr_type = pci_intr_type(adapter->osdep.pc,
   5639 		    adapter->osdep.intrs[0]);
   5640 		aprint_error_dev(dev,"unable to establish %s\n",
   5641 		    (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5642 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5643 		switch (intr_type) {
   5644 		case PCI_INTR_TYPE_MSI:
   5645 			/* The next try is for INTx: Disable MSI */
   5646 			max_type = PCI_INTR_TYPE_INTX;
   5647 			counts[PCI_INTR_TYPE_INTX] = 1;
   5648 			goto alloc_retry;
   5649 		case PCI_INTR_TYPE_INTX:
   5650 		default:
   5651 			/* See below */
   5652 			break;
   5653 		}
   5654 	}
   5655 	if (adapter->osdep.ihs[0] == NULL) {
   5656 		aprint_error_dev(dev,
   5657 		    "couldn't establish interrupt%s%s\n",
   5658 		    intrstr ? " at " : "", intrstr ? intrstr : "");
   5659 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5660 		return ENXIO;
   5661 	}
   5662 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   5663 	/*
   5664 	 * Try allocating a fast interrupt and the associated deferred
   5665 	 * processing contexts.
   5666 	 */
   5667 	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5668 		txr->txr_si =
   5669 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5670 			ixgbe_deferred_mq_start, txr);
   5671 	que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5672 	    ixgbe_handle_que, que);
   5673 
   5674 	/* Tasklets for Link, SFP and Multispeed Fiber */
   5675 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   5676 	    ixgbe_handle_link, adapter);
   5677 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5678 	    ixgbe_handle_mod, adapter);
   5679 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5680 	    ixgbe_handle_msf, adapter);
   5681 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5682 	    ixgbe_handle_phy, adapter);
   5683 
   5684 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   5685 		adapter->fdir_si =
   5686 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5687 			ixgbe_reinit_fdir, adapter);
   5688 
   5689 	if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) &
   5690 		(txr->txr_si == NULL)) ||
   5691 	    que->que_si == NULL ||
   5692 	    adapter->link_si == NULL ||
   5693 	    adapter->mod_si == NULL ||
   5694 	    ((adapter->feat_en & IXGBE_FEATURE_FDIR) &
   5695 		(adapter->fdir_si == NULL)) ||
   5696 	    adapter->msf_si == NULL) {
   5697 		aprint_error_dev(dev,
   5698 		    "could not establish software interrupts\n");
   5699 
   5700 		return ENXIO;
   5701 	}
   5702 	/* For simplicity in the handlers */
   5703 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
   5704 
   5705 	return (0);
   5706 } /* ixgbe_allocate_legacy */
   5707 
   5708 
   5709 /************************************************************************
   5710  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
   5711  ************************************************************************/
   5712 static int
   5713 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   5714 {
   5715 	device_t        dev = adapter->dev;
   5716 	struct 		ix_queue *que = adapter->queues;
   5717 	struct  	tx_ring *txr = adapter->tx_rings;
   5718 	pci_chipset_tag_t pc;
   5719 	char		intrbuf[PCI_INTRSTR_LEN];
   5720 	char		intr_xname[32];
   5721 	const char	*intrstr = NULL;
   5722 	int 		error, vector = 0;
   5723 	int		cpu_id = 0;
   5724 	kcpuset_t	*affinity;
   5725 #ifdef RSS
   5726 	unsigned int    rss_buckets = 0;
   5727 	kcpuset_t	cpu_mask;
   5728 #endif
   5729 
   5730 	pc = adapter->osdep.pc;
   5731 #ifdef	RSS
   5732 	/*
   5733 	 * If we're doing RSS, the number of queues needs to
   5734 	 * match the number of RSS buckets that are configured.
   5735 	 *
   5736 	 * + If there's more queues than RSS buckets, we'll end
   5737 	 *   up with queues that get no traffic.
   5738 	 *
   5739 	 * + If there's more RSS buckets than queues, we'll end
   5740 	 *   up having multiple RSS buckets map to the same queue,
   5741 	 *   so there'll be some contention.
   5742 	 */
   5743 	rss_buckets = rss_getnumbuckets();
   5744 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
   5745 	    (adapter->num_queues != rss_buckets)) {
   5746 		device_printf(dev,
   5747 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
   5748 		    "; performance will be impacted.\n",
   5749 		    __func__, adapter->num_queues, rss_buckets);
   5750 	}
   5751 #endif
   5752 
   5753 	adapter->osdep.nintrs = adapter->num_queues + 1;
   5754 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   5755 	    adapter->osdep.nintrs) != 0) {
   5756 		aprint_error_dev(dev,
   5757 		    "failed to allocate MSI-X interrupt\n");
   5758 		return (ENXIO);
   5759 	}
   5760 
   5761 	kcpuset_create(&affinity, false);
   5762 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   5763 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   5764 		    device_xname(dev), i);
   5765 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   5766 		    sizeof(intrbuf));
   5767 #ifdef IXGBE_MPSAFE
   5768 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   5769 		    true);
   5770 #endif
   5771 		/* Set the handler function */
   5772 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   5773 		    adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
   5774 		    intr_xname);
   5775 		if (que->res == NULL) {
   5776 			pci_intr_release(pc, adapter->osdep.intrs,
   5777 			    adapter->osdep.nintrs);
   5778 			aprint_error_dev(dev,
   5779 			    "Failed to register QUE handler\n");
   5780 			kcpuset_destroy(affinity);
   5781 			return ENXIO;
   5782 		}
   5783 		que->msix = vector;
   5784 		adapter->active_queues |= (u64)(1 << que->msix);
   5785 
   5786 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   5787 #ifdef	RSS
   5788 			/*
   5789 			 * The queue ID is used as the RSS layer bucket ID.
   5790 			 * We look up the queue ID -> RSS CPU ID and select
   5791 			 * that.
   5792 			 */
   5793 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
   5794 			CPU_SETOF(cpu_id, &cpu_mask);
   5795 #endif
   5796 		} else {
   5797 			/*
   5798 			 * Bind the MSI-X vector, and thus the
   5799 			 * rings to the corresponding CPU.
   5800 			 *
   5801 			 * This just happens to match the default RSS
   5802 			 * round-robin bucket -> queue -> CPU allocation.
   5803 			 */
   5804 			if (adapter->num_queues > 1)
   5805 				cpu_id = i;
   5806 		}
   5807 		/* Round-robin affinity */
   5808 		kcpuset_zero(affinity);
   5809 		kcpuset_set(affinity, cpu_id % ncpu);
   5810 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   5811 		    NULL);
   5812 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   5813 		    intrstr);
   5814 		if (error == 0) {
   5815 #if 1 /* def IXGBE_DEBUG */
   5816 #ifdef	RSS
   5817 			aprintf_normal(", bound RSS bucket %d to CPU %d", i,
   5818 			    cpu_id % ncpu);
   5819 #else
   5820 			aprint_normal(", bound queue %d to cpu %d", i,
   5821 			    cpu_id % ncpu);
   5822 #endif
   5823 #endif /* IXGBE_DEBUG */
   5824 		}
   5825 		aprint_normal("\n");
   5826 
   5827 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5828 			txr->txr_si = softint_establish(
   5829 				SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5830 				ixgbe_deferred_mq_start, txr);
   5831 		que->que_si
   5832 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5833 			ixgbe_handle_que, que);
   5834 		if (que->que_si == NULL) {
   5835 			aprint_error_dev(dev,
   5836 			    "could not establish software interrupt\n");
   5837 		}
   5838 	}
   5839 
   5840 	/* and Link */
   5841 	cpu_id++;
   5842 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   5843 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   5844 	    sizeof(intrbuf));
   5845 #ifdef IXGBE_MPSAFE
   5846 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   5847 	    true);
   5848 #endif
   5849 	/* Set the link handler function */
   5850 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   5851 	    adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
   5852 	    intr_xname);
   5853 	if (adapter->osdep.ihs[vector] == NULL) {
   5854 		adapter->res = NULL;
   5855 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   5856 		kcpuset_destroy(affinity);
   5857 		return (ENXIO);
   5858 	}
   5859 	/* Round-robin affinity */
   5860 	kcpuset_zero(affinity);
   5861 	kcpuset_set(affinity, cpu_id % ncpu);
   5862 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   5863 
   5864 	aprint_normal_dev(dev,
   5865 	    "for link, interrupting at %s", intrstr);
   5866 	if (error == 0)
   5867 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   5868 	else
   5869 		aprint_normal("\n");
   5870 
   5871 	adapter->vector = vector;
   5872 	/* Tasklets for Link, SFP and Multispeed Fiber */
   5873 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   5874 	    ixgbe_handle_link, adapter);
   5875 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5876 	    ixgbe_handle_mod, adapter);
   5877 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5878 	    ixgbe_handle_msf, adapter);
   5879 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   5880 		adapter->mbx_si =
   5881 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5882 			ixgbe_handle_mbx, adapter);
   5883 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5884 		ixgbe_handle_phy, adapter);
   5885 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   5886 		adapter->fdir_si =
   5887 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5888 			ixgbe_reinit_fdir, adapter);
   5889 
   5890 	kcpuset_destroy(affinity);
   5891 
   5892 	return (0);
   5893 } /* ixgbe_allocate_msix */
   5894 
   5895 /************************************************************************
   5896  * ixgbe_configure_interrupts
   5897  *
   5898  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
   5899  *   This will also depend on user settings.
   5900  ************************************************************************/
   5901 static int
   5902 ixgbe_configure_interrupts(struct adapter *adapter)
   5903 {
   5904 	device_t dev = adapter->dev;
   5905 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
   5906 	int want, queues, msgs;
   5907 
   5908 	/* Default to 1 queue if MSI-X setup fails */
   5909 	adapter->num_queues = 1;
   5910 
   5911 	/* Override by tuneable */
   5912 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
   5913 		goto msi;
   5914 
   5915 	/* First try MSI-X */
   5916 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   5917 	msgs = MIN(msgs, IXG_MAX_NINTR);
   5918 	if (msgs < 2)
   5919 		goto msi;
   5920 
   5921 	adapter->msix_mem = (void *)1; /* XXX */
   5922 
   5923 	/* Figure out a reasonable auto config value */
   5924 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   5925 
   5926 #ifdef	RSS
   5927 	/* If we're doing RSS, clamp at the number of RSS buckets */
   5928 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   5929 		queues = min(queues, rss_getnumbuckets());
   5930 #endif
   5931 	if (ixgbe_num_queues > queues) {
   5932 		aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
   5933 		ixgbe_num_queues = queues;
   5934 	}
   5935 
   5936 	if (ixgbe_num_queues != 0)
   5937 		queues = ixgbe_num_queues;
   5938 	/* Set max queues to 8 when autoconfiguring */
   5939 	else
   5940 		queues = min(queues,
   5941 		    min(mac->max_tx_queues, mac->max_rx_queues));
   5942 
   5943 	/* reflect correct sysctl value */
   5944 	ixgbe_num_queues = queues;
   5945 
   5946 	/*
   5947 	 * Want one vector (RX/TX pair) per queue
   5948 	 * plus an additional for Link.
   5949 	 */
   5950 	want = queues + 1;
   5951 	if (msgs >= want)
   5952 		msgs = want;
   5953 	else {
   5954                	aprint_error_dev(dev, "MSI-X Configuration Problem, "
   5955 		    "%d vectors but %d queues wanted!\n",
   5956 		    msgs, want);
   5957 		goto msi;
   5958 	}
   5959 	device_printf(dev,
   5960 	    "Using MSI-X interrupts with %d vectors\n", msgs);
   5961 	adapter->num_queues = queues;
   5962 	adapter->feat_en |= IXGBE_FEATURE_MSIX;
   5963 	return (0);
   5964 
   5965 	/*
   5966 	 * MSI-X allocation failed or provided us with
   5967 	 * less vectors than needed. Free MSI-X resources
   5968 	 * and we'll try enabling MSI.
   5969 	 */
   5970 msi:
   5971 	/* Without MSI-X, some features are no longer supported */
   5972 	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   5973 	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
   5974 	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   5975 	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
   5976 
   5977        	msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
   5978 	adapter->msix_mem = NULL; /* XXX */
   5979 	if (msgs > 1)
   5980 		msgs = 1;
   5981 	if (msgs != 0) {
   5982 		msgs = 1;
   5983 		adapter->feat_en |= IXGBE_FEATURE_MSI;
   5984 		aprint_normal_dev(dev, "Using an MSI interrupt\n");
   5985 		return (0);
   5986 	}
   5987 
   5988 	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
   5989 		aprint_error_dev(dev,
   5990 		    "Device does not support legacy interrupts.\n");
   5991 		return 1;
   5992 	}
   5993 
   5994 	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   5995 	aprint_normal_dev(dev, "Using a Legacy interrupt\n");
   5996 
   5997 	return (0);
   5998 } /* ixgbe_configure_interrupts */
   5999 
   6000 
   6001 /************************************************************************
   6002  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
   6003  *
   6004  *   Done outside of interrupt context since the driver might sleep
   6005  ************************************************************************/
   6006 static void
   6007 ixgbe_handle_link(void *context)
   6008 {
   6009 	struct adapter  *adapter = context;
   6010 	struct ixgbe_hw *hw = &adapter->hw;
   6011 
   6012 	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
   6013 	ixgbe_update_link_status(adapter);
   6014 
   6015 	/* Re-enable link interrupts */
   6016 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
   6017 } /* ixgbe_handle_link */
   6018 
   6019 /************************************************************************
   6020  * ixgbe_rearm_queues
   6021  ************************************************************************/
   6022 static void
   6023 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   6024 {
   6025 	u32 mask;
   6026 
   6027 	switch (adapter->hw.mac.type) {
   6028 	case ixgbe_mac_82598EB:
   6029 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   6030 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   6031 		break;
   6032 	case ixgbe_mac_82599EB:
   6033 	case ixgbe_mac_X540:
   6034 	case ixgbe_mac_X550:
   6035 	case ixgbe_mac_X550EM_x:
   6036 	case ixgbe_mac_X550EM_a:
   6037 		mask = (queues & 0xFFFFFFFF);
   6038 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   6039 		mask = (queues >> 32);
   6040 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   6041 		break;
   6042 	default:
   6043 		break;
   6044 	}
   6045 } /* ixgbe_rearm_queues */
   6046