Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.113
      1 /* $NetBSD: ixgbe.c,v 1.113 2017/11/22 15:15:09 msaitoh Exp $ */
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 320916 2017-07-12 17:35:32Z sbruno $*/
     36 
     37 /*
     38  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39  * All rights reserved.
     40  *
     41  * This code is derived from software contributed to The NetBSD Foundation
     42  * by Coyote Point Systems, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63  * POSSIBILITY OF SUCH DAMAGE.
     64  */
     65 
     66 #ifdef _KERNEL_OPT
     67 #include "opt_inet.h"
     68 #include "opt_inet6.h"
     69 #include "opt_net_mpsafe.h"
     70 #endif
     71 
     72 #include "ixgbe.h"
     73 #include "vlan.h"
     74 
     75 #include <sys/cprng.h>
     76 #include <dev/mii/mii.h>
     77 #include <dev/mii/miivar.h>
     78 
     79 /************************************************************************
     80  * Driver version
     81  ************************************************************************/
     82 char ixgbe_driver_version[] = "3.2.12-k";
     83 
     84 
     85 /************************************************************************
     86  * PCI Device ID Table
     87  *
     88  *   Used by probe to select devices to load on
     89  *   Last field stores an index into ixgbe_strings
     90  *   Last entry must be all 0s
     91  *
     92  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     93  ************************************************************************/
     94 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     95 {
     96 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    111 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    112 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    113 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    114 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    115 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
    116 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    117 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
    118 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
    119 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
    120 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
    121 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
    122 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
    123 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
    124 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
    125 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
    126 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
    127 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
    128 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
    129 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
    130 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
    131 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
    132 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
    133 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
    134 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
    135 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
    136 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
    137 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
    138 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
    139 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
    140 	/* required last entry */
    141 	{0, 0, 0, 0, 0}
    142 };
    143 
    144 /************************************************************************
    145  * Table of branding strings
    146  ************************************************************************/
    147 static const char    *ixgbe_strings[] = {
    148 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    149 };
    150 
    151 /************************************************************************
    152  * Function prototypes
    153  ************************************************************************/
    154 static int      ixgbe_probe(device_t, cfdata_t, void *);
    155 static void     ixgbe_attach(device_t, device_t, void *);
    156 static int      ixgbe_detach(device_t, int);
    157 #if 0
    158 static int      ixgbe_shutdown(device_t);
    159 #endif
    160 static bool	ixgbe_suspend(device_t, const pmf_qual_t *);
    161 static bool	ixgbe_resume(device_t, const pmf_qual_t *);
    162 static int	ixgbe_ifflags_cb(struct ethercom *);
    163 static int      ixgbe_ioctl(struct ifnet *, u_long, void *);
    164 static void	ixgbe_ifstop(struct ifnet *, int);
    165 static int	ixgbe_init(struct ifnet *);
    166 static void	ixgbe_init_locked(struct adapter *);
    167 static void     ixgbe_stop(void *);
    168 static void     ixgbe_init_device_features(struct adapter *);
    169 static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
    170 static void	ixgbe_add_media_types(struct adapter *);
    171 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    172 static int      ixgbe_media_change(struct ifnet *);
    173 static int      ixgbe_allocate_pci_resources(struct adapter *,
    174 		    const struct pci_attach_args *);
    175 static void	ixgbe_get_slot_info(struct adapter *);
    176 static int      ixgbe_allocate_msix(struct adapter *,
    177 		    const struct pci_attach_args *);
    178 static int      ixgbe_allocate_legacy(struct adapter *,
    179 		    const struct pci_attach_args *);
    180 static int      ixgbe_configure_interrupts(struct adapter *);
    181 static void	ixgbe_free_pci_resources(struct adapter *);
    182 static void	ixgbe_local_timer(void *);
    183 static void	ixgbe_local_timer1(void *);
    184 static int	ixgbe_setup_interface(device_t, struct adapter *);
    185 static void	ixgbe_config_gpie(struct adapter *);
    186 static void	ixgbe_config_dmac(struct adapter *);
    187 static void	ixgbe_config_delay_values(struct adapter *);
    188 static void	ixgbe_config_link(struct adapter *);
    189 static void	ixgbe_check_wol_support(struct adapter *);
    190 static int	ixgbe_setup_low_power_mode(struct adapter *);
    191 static void	ixgbe_rearm_queues(struct adapter *, u64);
    192 
    193 static void     ixgbe_initialize_transmit_units(struct adapter *);
    194 static void     ixgbe_initialize_receive_units(struct adapter *);
    195 static void	ixgbe_enable_rx_drop(struct adapter *);
    196 static void	ixgbe_disable_rx_drop(struct adapter *);
    197 static void	ixgbe_initialize_rss_mapping(struct adapter *);
    198 
    199 static void     ixgbe_enable_intr(struct adapter *);
    200 static void     ixgbe_disable_intr(struct adapter *);
    201 static void     ixgbe_update_stats_counters(struct adapter *);
    202 static void     ixgbe_set_promisc(struct adapter *);
    203 static void     ixgbe_set_multi(struct adapter *);
    204 static void     ixgbe_update_link_status(struct adapter *);
    205 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    206 static void	ixgbe_configure_ivars(struct adapter *);
    207 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    208 
    209 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    210 #if 0
    211 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
    212 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
    213 #endif
    214 
    215 static void	ixgbe_add_device_sysctls(struct adapter *);
    216 static void     ixgbe_add_hw_stats(struct adapter *);
    217 static void	ixgbe_clear_evcnt(struct adapter *);
    218 static int	ixgbe_set_flowcntl(struct adapter *, int);
    219 static int	ixgbe_set_advertise(struct adapter *, int);
    220 static int      ixgbe_get_advertise(struct adapter *);
    221 
    222 /* Sysctl handlers */
    223 static void	ixgbe_set_sysctl_value(struct adapter *, const char *,
    224 		     const char *, int *, int);
    225 static int	ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
    226 static int	ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
    227 static int      ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    228 static int	ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
    229 static int	ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
    230 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
    231 #ifdef IXGBE_DEBUG
    232 static int	ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
    233 static int	ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
    234 #endif
    235 static int      ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
    236 static int      ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
    237 static int      ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
    238 static int      ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
    239 static int      ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
    240 static int	ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
    241 static int	ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
    242 
    243 /* Support for pluggable optic modules */
    244 static bool	ixgbe_sfp_probe(struct adapter *);
    245 
    246 /* Legacy (single vector) interrupt handler */
    247 static int	ixgbe_legacy_irq(void *);
    248 
    249 /* The MSI/MSI-X Interrupt handlers */
    250 static int	ixgbe_msix_que(void *);
    251 static int	ixgbe_msix_link(void *);
    252 
    253 /* Software interrupts for deferred work */
    254 static void	ixgbe_handle_que(void *);
    255 static void	ixgbe_handle_link(void *);
    256 static void	ixgbe_handle_msf(void *);
    257 static void	ixgbe_handle_mod(void *);
    258 static void	ixgbe_handle_phy(void *);
    259 
    260 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    261 
    262 /************************************************************************
    263  *  NetBSD Device Interface Entry Points
    264  ************************************************************************/
    265 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    266     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    267     DVF_DETACH_SHUTDOWN);
    268 
    269 #if 0
    270 devclass_t ix_devclass;
    271 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
    272 
    273 MODULE_DEPEND(ix, pci, 1, 1, 1);
    274 MODULE_DEPEND(ix, ether, 1, 1, 1);
    275 #endif
    276 
    277 /*
    278  * TUNEABLE PARAMETERS:
    279  */
    280 
    281 /*
    282  * AIM: Adaptive Interrupt Moderation
    283  * which means that the interrupt rate
    284  * is varied over time based on the
    285  * traffic for that interrupt vector
    286  */
    287 static bool ixgbe_enable_aim = true;
    288 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
    289 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
    290     "Enable adaptive interrupt moderation");
    291 
    292 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    293 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
    294     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
    295 
    296 /* How many packets rxeof tries to clean at a time */
    297 static int ixgbe_rx_process_limit = 256;
    298 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
    299     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
    300 
    301 /* How many packets txeof tries to clean at a time */
    302 static int ixgbe_tx_process_limit = 256;
    303 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
    304     &ixgbe_tx_process_limit, 0,
    305     "Maximum number of sent packets to process at a time, -1 means unlimited");
    306 
    307 /* Flow control setting, default to full */
    308 static int ixgbe_flow_control = ixgbe_fc_full;
    309 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
    310     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
    311 
    312 /*
    313  * Smart speed setting, default to on
    314  * this only works as a compile option
    315  * right now as its during attach, set
    316  * this to 'ixgbe_smart_speed_off' to
    317  * disable.
    318  */
    319 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    320 
    321 /*
    322  * MSI-X should be the default for best performance,
    323  * but this allows it to be forced off for testing.
    324  */
    325 static int ixgbe_enable_msix = 1;
    326 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
    327     "Enable MSI-X interrupts");
    328 
    329 /*
    330  * Number of Queues, can be set to 0,
    331  * it then autoconfigures based on the
    332  * number of cpus with a max of 8. This
    333  * can be overriden manually here.
    334  */
    335 static int ixgbe_num_queues = 0;
    336 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
    337     "Number of queues to configure, 0 indicates autoconfigure");
    338 
    339 /*
    340  * Number of TX descriptors per ring,
    341  * setting higher than RX as this seems
    342  * the better performing choice.
    343  */
    344 static int ixgbe_txd = PERFORM_TXD;
    345 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
    346     "Number of transmit descriptors per queue");
    347 
    348 /* Number of RX descriptors per ring */
    349 static int ixgbe_rxd = PERFORM_RXD;
    350 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
    351     "Number of receive descriptors per queue");
    352 
    353 /*
    354  * Defining this on will allow the use
    355  * of unsupported SFP+ modules, note that
    356  * doing so you are on your own :)
    357  */
    358 static int allow_unsupported_sfp = false;
    359 #define TUNABLE_INT(__x, __y)
    360 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
    361 
    362 /*
    363  * Not sure if Flow Director is fully baked,
    364  * so we'll default to turning it off.
    365  */
    366 static int ixgbe_enable_fdir = 0;
    367 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
    368     "Enable Flow Director");
    369 
    370 /* Legacy Transmit (single queue) */
    371 static int ixgbe_enable_legacy_tx = 0;
    372 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
    373     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
    374 
    375 /* Receive-Side Scaling */
    376 static int ixgbe_enable_rss = 1;
    377 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
    378     "Enable Receive-Side Scaling (RSS)");
    379 
    380 /* Keep running tab on them for sanity check */
    381 static int ixgbe_total_ports;
    382 
    383 #if 0
    384 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
    385 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
    386 #endif
    387 
    388 #ifdef NET_MPSAFE
    389 #define IXGBE_MPSAFE		1
    390 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    391 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    392 #else
    393 #define IXGBE_CALLOUT_FLAGS	0
    394 #define IXGBE_SOFTINFT_FLAGS	0
    395 #endif
    396 
    397 /************************************************************************
    398  * ixgbe_initialize_rss_mapping
    399  ************************************************************************/
    400 static void
    401 ixgbe_initialize_rss_mapping(struct adapter *adapter)
    402 {
    403 	struct ixgbe_hw	*hw = &adapter->hw;
    404 	u32             reta = 0, mrqc, rss_key[10];
    405 	int             queue_id, table_size, index_mult;
    406 	int             i, j;
    407 	u32             rss_hash_config;
    408 
    409 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    410 		/* Fetch the configured RSS key */
    411 		rss_getkey((uint8_t *) &rss_key);
    412 	} else {
    413 		/* set up random bits */
    414 		cprng_fast(&rss_key, sizeof(rss_key));
    415 	}
    416 
    417 	/* Set multiplier for RETA setup and table size based on MAC */
    418 	index_mult = 0x1;
    419 	table_size = 128;
    420 	switch (adapter->hw.mac.type) {
    421 	case ixgbe_mac_82598EB:
    422 		index_mult = 0x11;
    423 		break;
    424 	case ixgbe_mac_X550:
    425 	case ixgbe_mac_X550EM_x:
    426 	case ixgbe_mac_X550EM_a:
    427 		table_size = 512;
    428 		break;
    429 	default:
    430 		break;
    431 	}
    432 
    433 	/* Set up the redirection table */
    434 	for (i = 0, j = 0; i < table_size; i++, j++) {
    435 		if (j == adapter->num_queues)
    436 			j = 0;
    437 
    438 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    439 			/*
    440 			 * Fetch the RSS bucket id for the given indirection
    441 			 * entry. Cap it at the number of configured buckets
    442 			 * (which is num_queues.)
    443 			 */
    444 			queue_id = rss_get_indirection_to_bucket(i);
    445 			queue_id = queue_id % adapter->num_queues;
    446 		} else
    447 			queue_id = (j * index_mult);
    448 
    449 		/*
    450 		 * The low 8 bits are for hash value (n+0);
    451 		 * The next 8 bits are for hash value (n+1), etc.
    452 		 */
    453 		reta = reta >> 8;
    454 		reta = reta | (((uint32_t) queue_id) << 24);
    455 		if ((i & 3) == 3) {
    456 			if (i < 128)
    457 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
    458 			else
    459 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
    460 				    reta);
    461 			reta = 0;
    462 		}
    463 	}
    464 
    465 	/* Now fill our hash function seeds */
    466 	for (i = 0; i < 10; i++)
    467 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
    468 
    469 	/* Perform hash on these packet types */
    470 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
    471 		rss_hash_config = rss_gethashconfig();
    472 	else {
    473 		/*
    474 		 * Disable UDP - IP fragments aren't currently being handled
    475 		 * and so we end up with a mix of 2-tuple and 4-tuple
    476 		 * traffic.
    477 		 */
    478 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
    479 		                | RSS_HASHTYPE_RSS_TCP_IPV4
    480 		                | RSS_HASHTYPE_RSS_IPV6
    481 		                | RSS_HASHTYPE_RSS_TCP_IPV6
    482 		                | RSS_HASHTYPE_RSS_IPV6_EX
    483 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
    484 	}
    485 
    486 	mrqc = IXGBE_MRQC_RSSEN;
    487 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
    488 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
    489 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
    490 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
    491 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
    492 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
    493 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
    494 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
    495 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
    496 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
    497 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
    498 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
    499 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
    500 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
    501 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
    502 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
    503 		    __func__);
    504 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
    505 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
    506 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
    507 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
    508 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
    509 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    510 } /* ixgbe_initialize_rss_mapping */
    511 
    512 /************************************************************************
    513  * ixgbe_initialize_receive_units - Setup receive registers and features.
    514  ************************************************************************/
    515 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
    516 
    517 static void
    518 ixgbe_initialize_receive_units(struct adapter *adapter)
    519 {
    520 	struct	rx_ring	*rxr = adapter->rx_rings;
    521 	struct ixgbe_hw	*hw = &adapter->hw;
    522 	struct ifnet    *ifp = adapter->ifp;
    523 	int             i, j;
    524 	u32		bufsz, fctrl, srrctl, rxcsum;
    525 	u32		hlreg;
    526 
    527 	/*
    528 	 * Make sure receives are disabled while
    529 	 * setting up the descriptor ring
    530 	 */
    531 	ixgbe_disable_rx(hw);
    532 
    533 	/* Enable broadcasts */
    534 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
    535 	fctrl |= IXGBE_FCTRL_BAM;
    536 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
    537 		fctrl |= IXGBE_FCTRL_DPF;
    538 		fctrl |= IXGBE_FCTRL_PMCF;
    539 	}
    540 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
    541 
    542 	/* Set for Jumbo Frames? */
    543 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
    544 	if (ifp->if_mtu > ETHERMTU)
    545 		hlreg |= IXGBE_HLREG0_JUMBOEN;
    546 	else
    547 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
    548 
    549 #ifdef DEV_NETMAP
    550 	/* CRC stripping is conditional in Netmap */
    551 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
    552 	    (ifp->if_capenable & IFCAP_NETMAP) &&
    553 	    !ix_crcstrip)
    554 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
    555 	else
    556 #endif /* DEV_NETMAP */
    557 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
    558 
    559 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
    560 
    561 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
    562 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
    563 
    564 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
    565 		u64 rdba = rxr->rxdma.dma_paddr;
    566 		u32 tqsmreg, reg;
    567 		int regnum = i / 4;	/* 1 register per 4 queues */
    568 		int regshift = i % 4;	/* 4 bits per 1 queue */
    569 		j = rxr->me;
    570 
    571 		/* Setup the Base and Length of the Rx Descriptor Ring */
    572 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
    573 		    (rdba & 0x00000000ffffffffULL));
    574 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
    575 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
    576 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
    577 
    578 		/* Set up the SRRCTL register */
    579 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
    580 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
    581 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
    582 		srrctl |= bufsz;
    583 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
    584 
    585 		/* Set RQSMR (Receive Queue Statistic Mapping) register */
    586 		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
    587 		reg &= ~(0x000000ff << (regshift * 8));
    588 		reg |= i << (regshift * 8);
    589 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
    590 
    591 		/*
    592 		 * Set RQSMR (Receive Queue Statistic Mapping) register.
    593 		 * Register location for queue 0...7 are different between
    594 		 * 82598 and newer.
    595 		 */
    596 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
    597 			tqsmreg = IXGBE_TQSMR(regnum);
    598 		else
    599 			tqsmreg = IXGBE_TQSM(regnum);
    600 		reg = IXGBE_READ_REG(hw, tqsmreg);
    601 		reg &= ~(0x000000ff << (regshift * 8));
    602 		reg |= i << (regshift * 8);
    603 		IXGBE_WRITE_REG(hw, tqsmreg, reg);
    604 
    605 		/*
    606 		 * Set DROP_EN iff we have no flow control and >1 queue.
    607 		 * Note that srrctl was cleared shortly before during reset,
    608 		 * so we do not need to clear the bit, but do it just in case
    609 		 * this code is moved elsewhere.
    610 		 */
    611 		if (adapter->num_queues > 1 &&
    612 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
    613 			srrctl |= IXGBE_SRRCTL_DROP_EN;
    614 		} else {
    615 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
    616 		}
    617 
    618 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
    619 
    620 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
    621 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
    622 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
    623 
    624 		/* Set the driver rx tail address */
    625 		rxr->tail =  IXGBE_RDT(rxr->me);
    626 	}
    627 
    628 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    629 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
    630 		            | IXGBE_PSRTYPE_UDPHDR
    631 		            | IXGBE_PSRTYPE_IPV4HDR
    632 		            | IXGBE_PSRTYPE_IPV6HDR;
    633 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
    634 	}
    635 
    636 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
    637 
    638 	ixgbe_initialize_rss_mapping(adapter);
    639 
    640 	if (adapter->num_queues > 1) {
    641 		/* RSS and RX IPP Checksum are mutually exclusive */
    642 		rxcsum |= IXGBE_RXCSUM_PCSD;
    643 	}
    644 
    645 	if (ifp->if_capenable & IFCAP_RXCSUM)
    646 		rxcsum |= IXGBE_RXCSUM_PCSD;
    647 
    648 	/* This is useful for calculating UDP/IP fragment checksums */
    649 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
    650 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
    651 
    652 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
    653 
    654 	return;
    655 } /* ixgbe_initialize_receive_units */
    656 
    657 /************************************************************************
    658  * ixgbe_initialize_transmit_units - Enable transmit units.
    659  ************************************************************************/
    660 static void
    661 ixgbe_initialize_transmit_units(struct adapter *adapter)
    662 {
    663 	struct tx_ring  *txr = adapter->tx_rings;
    664 	struct ixgbe_hw	*hw = &adapter->hw;
    665 
    666 	/* Setup the Base and Length of the Tx Descriptor Ring */
    667 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    668 		u64 tdba = txr->txdma.dma_paddr;
    669 		u32 txctrl = 0;
    670 		int j = txr->me;
    671 
    672 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
    673 		    (tdba & 0x00000000ffffffffULL));
    674 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
    675 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
    676 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
    677 
    678 		/* Setup the HW Tx Head and Tail descriptor pointers */
    679 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
    680 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
    681 
    682 		/* Cache the tail address */
    683 		txr->tail = IXGBE_TDT(j);
    684 
    685 		/* Disable Head Writeback */
    686 		/*
    687 		 * Note: for X550 series devices, these registers are actually
    688 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
    689 		 * fields remain the same.
    690 		 */
    691 		switch (hw->mac.type) {
    692 		case ixgbe_mac_82598EB:
    693 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
    694 			break;
    695 		default:
    696 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
    697 			break;
    698 		}
    699 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
    700 		switch (hw->mac.type) {
    701 		case ixgbe_mac_82598EB:
    702 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
    703 			break;
    704 		default:
    705 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
    706 			break;
    707 		}
    708 
    709 	}
    710 
    711 	if (hw->mac.type != ixgbe_mac_82598EB) {
    712 		u32 dmatxctl, rttdcs;
    713 
    714 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
    715 		dmatxctl |= IXGBE_DMATXCTL_TE;
    716 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
    717 		/* Disable arbiter to set MTQC */
    718 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
    719 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
    720 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    721 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
    722 		    ixgbe_get_mtqc(adapter->iov_mode));
    723 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
    724 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    725 	}
    726 
    727 	return;
    728 } /* ixgbe_initialize_transmit_units */
    729 
    730 /************************************************************************
    731  * ixgbe_attach - Device initialization routine
    732  *
    733  *   Called when the driver is being loaded.
    734  *   Identifies the type of hardware, allocates all resources
    735  *   and initializes the hardware.
    736  *
    737  *   return 0 on success, positive on failure
    738  ************************************************************************/
    739 static void
    740 ixgbe_attach(device_t parent, device_t dev, void *aux)
    741 {
    742 	struct adapter  *adapter;
    743 	struct ixgbe_hw *hw;
    744 	int             error = -1;
    745 	u32		ctrl_ext;
    746 	u16		high, low, nvmreg;
    747 	pcireg_t	id, subid;
    748 	ixgbe_vendor_info_t *ent;
    749 	struct pci_attach_args *pa = aux;
    750 	const char *str;
    751 	char buf[256];
    752 
    753 	INIT_DEBUGOUT("ixgbe_attach: begin");
    754 
    755 	/* Allocate, clear, and link in our adapter structure */
    756 	adapter = device_private(dev);
    757 	adapter->hw.back = adapter;
    758 	adapter->dev = dev;
    759 	hw = &adapter->hw;
    760 	adapter->osdep.pc = pa->pa_pc;
    761 	adapter->osdep.tag = pa->pa_tag;
    762 	if (pci_dma64_available(pa))
    763 		adapter->osdep.dmat = pa->pa_dmat64;
    764 	else
    765 		adapter->osdep.dmat = pa->pa_dmat;
    766 	adapter->osdep.attached = false;
    767 
    768 	ent = ixgbe_lookup(pa);
    769 
    770 	KASSERT(ent != NULL);
    771 
    772 	aprint_normal(": %s, Version - %s\n",
    773 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    774 
    775 	/* Core Lock Init*/
    776 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    777 
    778 	/* Set up the timer callout */
    779 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    780 
    781 	/* Determine hardware revision */
    782 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    783 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    784 
    785 	hw->vendor_id = PCI_VENDOR(id);
    786 	hw->device_id = PCI_PRODUCT(id);
    787 	hw->revision_id =
    788 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    789 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    790 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    791 
    792 	/*
    793 	 * Make sure BUSMASTER is set
    794 	 */
    795 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    796 
    797 	/* Do base PCI setup - map BAR0 */
    798 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    799 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    800 		error = ENXIO;
    801 		goto err_out;
    802 	}
    803 
    804 	/* let hardware know driver is loaded */
    805 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    806 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    807 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    808 
    809 	/*
    810 	 * Initialize the shared code
    811 	 */
    812 	if (ixgbe_init_shared_code(hw)) {
    813 		aprint_error_dev(dev, "Unable to initialize the shared code\n");
    814 		error = ENXIO;
    815 		goto err_out;
    816 	}
    817 
    818 	switch (hw->mac.type) {
    819 	case ixgbe_mac_82598EB:
    820 		str = "82598EB";
    821 		break;
    822 	case ixgbe_mac_82599EB:
    823 		str = "82599EB";
    824 		break;
    825 	case ixgbe_mac_X540:
    826 		str = "X540";
    827 		break;
    828 	case ixgbe_mac_X550:
    829 		str = "X550";
    830 		break;
    831 	case ixgbe_mac_X550EM_x:
    832 		str = "X550EM";
    833 		break;
    834 	case ixgbe_mac_X550EM_a:
    835 		str = "X550EM A";
    836 		break;
    837 	default:
    838 		str = "Unknown";
    839 		break;
    840 	}
    841 	aprint_normal_dev(dev, "device %s\n", str);
    842 
    843 	if (hw->mbx.ops.init_params)
    844 		hw->mbx.ops.init_params(hw);
    845 
    846 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
    847 
    848 	/* Pick up the 82599 settings */
    849 	if (hw->mac.type != ixgbe_mac_82598EB) {
    850 		hw->phy.smart_speed = ixgbe_smart_speed;
    851 		adapter->num_segs = IXGBE_82599_SCATTER;
    852 	} else
    853 		adapter->num_segs = IXGBE_82598_SCATTER;
    854 
    855 	hw->mac.ops.set_lan_id(hw);
    856 	ixgbe_init_device_features(adapter);
    857 
    858 	if (ixgbe_configure_interrupts(adapter)) {
    859 		error = ENXIO;
    860 		goto err_out;
    861 	}
    862 
    863 	/* Allocate multicast array memory. */
    864 	adapter->mta = malloc(sizeof(*adapter->mta) *
    865 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    866 	if (adapter->mta == NULL) {
    867 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    868 		error = ENOMEM;
    869 		goto err_out;
    870 	}
    871 
    872 	/* Enable WoL (if supported) */
    873 	ixgbe_check_wol_support(adapter);
    874 
    875 	/* Verify adapter fan is still functional (if applicable) */
    876 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
    877 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    878 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
    879 	}
    880 
    881 	/* Ensure SW/FW semaphore is free */
    882 	ixgbe_init_swfw_semaphore(hw);
    883 
    884 	/* Enable EEE power saving */
    885 	if (adapter->feat_en & IXGBE_FEATURE_EEE)
    886 		hw->mac.ops.setup_eee(hw, TRUE);
    887 
    888 	/* Set an initial default flow control value */
    889 	hw->fc.requested_mode = ixgbe_flow_control;
    890 
    891 	/* Sysctls for limiting the amount of work done in the taskqueues */
    892 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
    893 	    "max number of rx packets to process",
    894 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
    895 
    896 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
    897 	    "max number of tx packets to process",
    898 	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
    899 
    900 	/* Do descriptor calc and sanity checks */
    901 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    902 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    903 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    904 		adapter->num_tx_desc = DEFAULT_TXD;
    905 	} else
    906 		adapter->num_tx_desc = ixgbe_txd;
    907 
    908 	/*
    909 	 * With many RX rings it is easy to exceed the
    910 	 * system mbuf allocation. Tuning nmbclusters
    911 	 * can alleviate this.
    912 	 */
    913 	if (nmbclusters > 0) {
    914 		int s;
    915 		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
    916 		if (s > nmbclusters) {
    917 			aprint_error_dev(dev, "RX Descriptors exceed "
    918 			    "system mbuf max, using default instead!\n");
    919 			ixgbe_rxd = DEFAULT_RXD;
    920 		}
    921 	}
    922 
    923 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    924 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
    925 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    926 		adapter->num_rx_desc = DEFAULT_RXD;
    927 	} else
    928 		adapter->num_rx_desc = ixgbe_rxd;
    929 
    930 	/* Allocate our TX/RX Queues */
    931 	if (ixgbe_allocate_queues(adapter)) {
    932 		error = ENOMEM;
    933 		goto err_out;
    934 	}
    935 
    936 	hw->phy.reset_if_overtemp = TRUE;
    937 	error = ixgbe_reset_hw(hw);
    938 	hw->phy.reset_if_overtemp = FALSE;
    939 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    940 		/*
    941 		 * No optics in this port, set up
    942 		 * so the timer routine will probe
    943 		 * for later insertion.
    944 		 */
    945 		adapter->sfp_probe = TRUE;
    946 		error = IXGBE_SUCCESS;
    947 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    948 		aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
    949 		error = EIO;
    950 		goto err_late;
    951 	} else if (error) {
    952 		aprint_error_dev(dev, "Hardware initialization failed\n");
    953 		error = EIO;
    954 		goto err_late;
    955 	}
    956 
    957 	/* Make sure we have a good EEPROM before we read from it */
    958 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
    959 		aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
    960 		error = EIO;
    961 		goto err_late;
    962 	}
    963 
    964 	aprint_normal("%s:", device_xname(dev));
    965 	/* NVM Image Version */
    966 	switch (hw->mac.type) {
    967 	case ixgbe_mac_X540:
    968 	case ixgbe_mac_X550EM_a:
    969 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    970 		if (nvmreg == 0xffff)
    971 			break;
    972 		high = (nvmreg >> 12) & 0x0f;
    973 		low = (nvmreg >> 4) & 0xff;
    974 		id = nvmreg & 0x0f;
    975 		aprint_normal(" NVM Image Version %u.", high);
    976 		if (hw->mac.type == ixgbe_mac_X540)
    977 			str = "%x";
    978 		else
    979 			str = "%02x";
    980 		aprint_normal(str, low);
    981 		aprint_normal(" ID 0x%x,", id);
    982 		break;
    983 	case ixgbe_mac_X550EM_x:
    984 	case ixgbe_mac_X550:
    985 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    986 		if (nvmreg == 0xffff)
    987 			break;
    988 		high = (nvmreg >> 12) & 0x0f;
    989 		low = nvmreg & 0xff;
    990 		aprint_normal(" NVM Image Version %u.%02x,", high, low);
    991 		break;
    992 	default:
    993 		break;
    994 	}
    995 
    996 	/* PHY firmware revision */
    997 	switch (hw->mac.type) {
    998 	case ixgbe_mac_X540:
    999 	case ixgbe_mac_X550:
   1000 		hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
   1001 		if (nvmreg == 0xffff)
   1002 			break;
   1003 		high = (nvmreg >> 12) & 0x0f;
   1004 		low = (nvmreg >> 4) & 0xff;
   1005 		id = nvmreg & 0x000f;
   1006 		aprint_normal(" PHY FW Revision %u.%02x ID 0x%x,", high, low,
   1007 		    id);
   1008 		break;
   1009 	default:
   1010 		break;
   1011 	}
   1012 
   1013 	/* NVM Map version & OEM NVM Image version */
   1014 	switch (hw->mac.type) {
   1015 	case ixgbe_mac_X550:
   1016 	case ixgbe_mac_X550EM_x:
   1017 	case ixgbe_mac_X550EM_a:
   1018 		hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
   1019 		if (nvmreg != 0xffff) {
   1020 			high = (nvmreg >> 12) & 0x0f;
   1021 			low = nvmreg & 0x00ff;
   1022 			aprint_normal(" NVM Map version %u.%02x,", high, low);
   1023 		}
   1024 		hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
   1025 		if (nvmreg != 0xffff) {
   1026 			high = (nvmreg >> 12) & 0x0f;
   1027 			low = nvmreg & 0x00ff;
   1028 			aprint_verbose(" OEM NVM Image version %u.%02x,", high,
   1029 			    low);
   1030 		}
   1031 		break;
   1032 	default:
   1033 		break;
   1034 	}
   1035 
   1036 	/* Print the ETrackID */
   1037 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
   1038 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
   1039 	aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
   1040 
   1041 	/* Setup OS specific network interface */
   1042 	if (ixgbe_setup_interface(dev, adapter) != 0)
   1043 		goto err_late;
   1044 
   1045 	if (adapter->feat_en & IXGBE_FEATURE_MSIX)
   1046 		error = ixgbe_allocate_msix(adapter, pa);
   1047 	else
   1048 		error = ixgbe_allocate_legacy(adapter, pa);
   1049 	if (error)
   1050 		goto err_late;
   1051 
   1052 	error = ixgbe_start_hw(hw);
   1053 	switch (error) {
   1054 	case IXGBE_ERR_EEPROM_VERSION:
   1055 		aprint_error_dev(dev, "This device is a pre-production adapter/"
   1056 		    "LOM.  Please be aware there may be issues associated "
   1057 		    "with your hardware.\nIf you are experiencing problems "
   1058 		    "please contact your Intel or hardware representative "
   1059 		    "who provided you with this hardware.\n");
   1060 		break;
   1061 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
   1062 		aprint_error_dev(dev, "Unsupported SFP+ Module\n");
   1063 		error = EIO;
   1064 		goto err_late;
   1065 	case IXGBE_ERR_SFP_NOT_PRESENT:
   1066 		aprint_error_dev(dev, "No SFP+ Module found\n");
   1067 		/* falls thru */
   1068 	default:
   1069 		break;
   1070 	}
   1071 
   1072 	/*
   1073 	 *  Print PHY ID only for copper PHY. On device which has SFP(+) cage
   1074 	 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
   1075 	 */
   1076 	if (hw->phy.media_type == ixgbe_media_type_copper) {
   1077 		uint16_t id1, id2;
   1078 		int oui, model, rev;
   1079 		const char *descr;
   1080 
   1081 		id1 = hw->phy.id >> 16;
   1082 		id2 = hw->phy.id & 0xffff;
   1083 		oui = MII_OUI(id1, id2);
   1084 		model = MII_MODEL(id2);
   1085 		rev = MII_REV(id2);
   1086 		if ((descr = mii_get_descr(oui, model)) != NULL)
   1087 			aprint_normal_dev(dev,
   1088 			    "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
   1089 			    descr, oui, model, rev);
   1090 		else
   1091 			aprint_normal_dev(dev,
   1092 			    "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
   1093 			    oui, model, rev);
   1094 	}
   1095 
   1096 	/* Enable the optics for 82599 SFP+ fiber */
   1097 	ixgbe_enable_tx_laser(hw);
   1098 
   1099 	/* Enable power to the phy. */
   1100 	ixgbe_set_phy_power(hw, TRUE);
   1101 
   1102 	/* Initialize statistics */
   1103 	ixgbe_update_stats_counters(adapter);
   1104 
   1105 	/* Check PCIE slot type/speed/width */
   1106 	ixgbe_get_slot_info(adapter);
   1107 
   1108 	/*
   1109 	 * Do time init and sysctl init here, but
   1110 	 * only on the first port of a bypass adapter.
   1111 	 */
   1112 	ixgbe_bypass_init(adapter);
   1113 
   1114 	/* Set an initial dmac value */
   1115 	adapter->dmac = 0;
   1116 	/* Set initial advertised speeds (if applicable) */
   1117 	adapter->advertise = ixgbe_get_advertise(adapter);
   1118 
   1119 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   1120 		ixgbe_define_iov_schemas(dev, &error);
   1121 
   1122 	/* Add sysctls */
   1123 	ixgbe_add_device_sysctls(adapter);
   1124 	ixgbe_add_hw_stats(adapter);
   1125 
   1126 	/* For Netmap */
   1127 	adapter->init_locked = ixgbe_init_locked;
   1128 	adapter->stop_locked = ixgbe_stop;
   1129 
   1130 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1131 		ixgbe_netmap_attach(adapter);
   1132 
   1133 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
   1134 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
   1135 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
   1136 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
   1137 
   1138 	if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
   1139 		pmf_class_network_register(dev, adapter->ifp);
   1140 	else
   1141 		aprint_error_dev(dev, "couldn't establish power handler\n");
   1142 
   1143 	INIT_DEBUGOUT("ixgbe_attach: end");
   1144 	adapter->osdep.attached = true;
   1145 
   1146 	return;
   1147 
   1148 err_late:
   1149 	ixgbe_free_transmit_structures(adapter);
   1150 	ixgbe_free_receive_structures(adapter);
   1151 	free(adapter->queues, M_DEVBUF);
   1152 err_out:
   1153 	if (adapter->ifp != NULL)
   1154 		if_free(adapter->ifp);
   1155 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   1156 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   1157 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   1158 	ixgbe_free_pci_resources(adapter);
   1159 	if (adapter->mta != NULL)
   1160 		free(adapter->mta, M_DEVBUF);
   1161 	IXGBE_CORE_LOCK_DESTROY(adapter);
   1162 
   1163 	return;
   1164 } /* ixgbe_attach */
   1165 
   1166 /************************************************************************
   1167  * ixgbe_check_wol_support
   1168  *
   1169  *   Checks whether the adapter's ports are capable of
   1170  *   Wake On LAN by reading the adapter's NVM.
   1171  *
   1172  *   Sets each port's hw->wol_enabled value depending
   1173  *   on the value read here.
   1174  ************************************************************************/
   1175 static void
   1176 ixgbe_check_wol_support(struct adapter *adapter)
   1177 {
   1178 	struct ixgbe_hw *hw = &adapter->hw;
   1179 	u16             dev_caps = 0;
   1180 
   1181 	/* Find out WoL support for port */
   1182 	adapter->wol_support = hw->wol_enabled = 0;
   1183 	ixgbe_get_device_caps(hw, &dev_caps);
   1184 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
   1185 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
   1186 	     hw->bus.func == 0))
   1187 		adapter->wol_support = hw->wol_enabled = 1;
   1188 
   1189 	/* Save initial wake up filter configuration */
   1190 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
   1191 
   1192 	return;
   1193 } /* ixgbe_check_wol_support */
   1194 
   1195 /************************************************************************
   1196  * ixgbe_setup_interface
   1197  *
   1198  *   Setup networking device structure and register an interface.
   1199  ************************************************************************/
   1200 static int
   1201 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   1202 {
   1203 	struct ethercom *ec = &adapter->osdep.ec;
   1204 	struct ifnet   *ifp;
   1205 	int rv;
   1206 
   1207 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   1208 
   1209 	ifp = adapter->ifp = &ec->ec_if;
   1210 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1211 	ifp->if_baudrate = IF_Gbps(10);
   1212 	ifp->if_init = ixgbe_init;
   1213 	ifp->if_stop = ixgbe_ifstop;
   1214 	ifp->if_softc = adapter;
   1215 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1216 #ifdef IXGBE_MPSAFE
   1217 	ifp->if_extflags = IFEF_MPSAFE;
   1218 #endif
   1219 	ifp->if_ioctl = ixgbe_ioctl;
   1220 #if __FreeBSD_version >= 1100045
   1221 	/* TSO parameters */
   1222 	ifp->if_hw_tsomax = 65518;
   1223 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
   1224 	ifp->if_hw_tsomaxsegsize = 2048;
   1225 #endif
   1226 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1227 #if 0
   1228 		ixgbe_start_locked = ixgbe_legacy_start_locked;
   1229 #endif
   1230 	} else {
   1231 		ifp->if_transmit = ixgbe_mq_start;
   1232 #if 0
   1233 		ixgbe_start_locked = ixgbe_mq_start_locked;
   1234 #endif
   1235 	}
   1236 	ifp->if_start = ixgbe_legacy_start;
   1237 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1238 	IFQ_SET_READY(&ifp->if_snd);
   1239 
   1240 	rv = if_initialize(ifp);
   1241 	if (rv != 0) {
   1242 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1243 		return rv;
   1244 	}
   1245 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1246 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1247 	/*
   1248 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1249 	 * used.
   1250 	 */
   1251 	if_register(ifp);
   1252 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   1253 
   1254 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1255 
   1256 	/*
   1257 	 * Tell the upper layer(s) we support long frames.
   1258 	 */
   1259 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1260 
   1261 	/* Set capability flags */
   1262 	ifp->if_capabilities |= IFCAP_RXCSUM
   1263 			     |  IFCAP_TXCSUM
   1264 			     |  IFCAP_TSOv4
   1265 			     |  IFCAP_TSOv6
   1266 			     |  IFCAP_LRO;
   1267 	ifp->if_capenable = 0;
   1268 
   1269 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1270 	    		    |  ETHERCAP_VLAN_HWCSUM
   1271 	    		    |  ETHERCAP_JUMBO_MTU
   1272 	    		    |  ETHERCAP_VLAN_MTU;
   1273 
   1274 	/* Enable the above capabilities by default */
   1275 	ec->ec_capenable = ec->ec_capabilities;
   1276 
   1277 	/*
   1278 	 * Don't turn this on by default, if vlans are
   1279 	 * created on another pseudo device (eg. lagg)
   1280 	 * then vlan events are not passed thru, breaking
   1281 	 * operation, but with HW FILTER off it works. If
   1282 	 * using vlans directly on the ixgbe driver you can
   1283 	 * enable this and get full hardware tag filtering.
   1284 	 */
   1285 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1286 
   1287 	/*
   1288 	 * Specify the media types supported by this adapter and register
   1289 	 * callbacks to update media and link information
   1290 	 */
   1291 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   1292 	    ixgbe_media_status);
   1293 
   1294 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
   1295 	ixgbe_add_media_types(adapter);
   1296 
   1297 	/* Set autoselect media by default */
   1298 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1299 
   1300 	return (0);
   1301 } /* ixgbe_setup_interface */
   1302 
   1303 /************************************************************************
   1304  * ixgbe_add_media_types
   1305  ************************************************************************/
   1306 static void
   1307 ixgbe_add_media_types(struct adapter *adapter)
   1308 {
   1309 	struct ixgbe_hw *hw = &adapter->hw;
   1310 	device_t        dev = adapter->dev;
   1311 	u64             layer;
   1312 
   1313 	layer = adapter->phy_layer;
   1314 
   1315 #define	ADD(mm, dd)							\
   1316 	ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
   1317 
   1318 	/* Media types with matching NetBSD media defines */
   1319 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
   1320 		ADD(IFM_10G_T | IFM_FDX, 0);
   1321 	}
   1322 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
   1323 		ADD(IFM_1000_T | IFM_FDX, 0);
   1324 	}
   1325 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
   1326 		ADD(IFM_100_TX | IFM_FDX, 0);
   1327 	}
   1328 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
   1329 		ADD(IFM_10_T | IFM_FDX, 0);
   1330 	}
   1331 
   1332 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   1333 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
   1334 		ADD(IFM_10G_TWINAX | IFM_FDX, 0);
   1335 	}
   1336 
   1337 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
   1338 		ADD(IFM_10G_LR | IFM_FDX, 0);
   1339 		if (hw->phy.multispeed_fiber) {
   1340 			ADD(IFM_1000_LX | IFM_FDX, 0);
   1341 		}
   1342 	}
   1343 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
   1344 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1345 		if (hw->phy.multispeed_fiber) {
   1346 			ADD(IFM_1000_SX | IFM_FDX, 0);
   1347 		}
   1348 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
   1349 		ADD(IFM_1000_SX | IFM_FDX, 0);
   1350 	}
   1351 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
   1352 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1353 	}
   1354 
   1355 #ifdef IFM_ETH_XTYPE
   1356 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1357 		ADD(IFM_10G_KR | IFM_FDX, 0);
   1358 	}
   1359 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1360 		ADD(AIFM_10G_KX4 | IFM_FDX, 0);
   1361 	}
   1362 #else
   1363 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1364 		device_printf(dev, "Media supported: 10GbaseKR\n");
   1365 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
   1366 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1367 	}
   1368 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1369 		device_printf(dev, "Media supported: 10GbaseKX4\n");
   1370 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
   1371 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1372 	}
   1373 #endif
   1374 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
   1375 		ADD(IFM_1000_KX | IFM_FDX, 0);
   1376 	}
   1377 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
   1378 		ADD(IFM_2500_KX | IFM_FDX, 0);
   1379 	}
   1380 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
   1381 		ADD(IFM_2500_T | IFM_FDX, 0);
   1382 	}
   1383 	if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
   1384 		ADD(IFM_5000_T | IFM_FDX, 0);
   1385 	}
   1386 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
   1387 		device_printf(dev, "Media supported: 1000baseBX\n");
   1388 	/* XXX no ifmedia_set? */
   1389 
   1390 	ADD(IFM_AUTO, 0);
   1391 
   1392 #undef ADD
   1393 } /* ixgbe_add_media_types */
   1394 
   1395 /************************************************************************
   1396  * ixgbe_is_sfp
   1397  ************************************************************************/
   1398 static inline bool
   1399 ixgbe_is_sfp(struct ixgbe_hw *hw)
   1400 {
   1401 	switch (hw->mac.type) {
   1402 	case ixgbe_mac_82598EB:
   1403 		if (hw->phy.type == ixgbe_phy_nl)
   1404 			return TRUE;
   1405 		return FALSE;
   1406 	case ixgbe_mac_82599EB:
   1407 		switch (hw->mac.ops.get_media_type(hw)) {
   1408 		case ixgbe_media_type_fiber:
   1409 		case ixgbe_media_type_fiber_qsfp:
   1410 			return TRUE;
   1411 		default:
   1412 			return FALSE;
   1413 		}
   1414 	case ixgbe_mac_X550EM_x:
   1415 	case ixgbe_mac_X550EM_a:
   1416 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
   1417 			return TRUE;
   1418 		return FALSE;
   1419 	default:
   1420 		return FALSE;
   1421 	}
   1422 } /* ixgbe_is_sfp */
   1423 
   1424 /************************************************************************
   1425  * ixgbe_config_link
   1426  ************************************************************************/
   1427 static void
   1428 ixgbe_config_link(struct adapter *adapter)
   1429 {
   1430 	struct ixgbe_hw *hw = &adapter->hw;
   1431 	u32             autoneg, err = 0;
   1432 	bool            sfp, negotiate = false;
   1433 
   1434 	sfp = ixgbe_is_sfp(hw);
   1435 
   1436 	if (sfp) {
   1437 		if (hw->phy.multispeed_fiber) {
   1438 			hw->mac.ops.setup_sfp(hw);
   1439 			ixgbe_enable_tx_laser(hw);
   1440 			kpreempt_disable();
   1441 			softint_schedule(adapter->msf_si);
   1442 			kpreempt_enable();
   1443 		} else {
   1444 			kpreempt_disable();
   1445 			softint_schedule(adapter->mod_si);
   1446 			kpreempt_enable();
   1447 		}
   1448 	} else {
   1449 		if (hw->mac.ops.check_link)
   1450 			err = ixgbe_check_link(hw, &adapter->link_speed,
   1451 			    &adapter->link_up, FALSE);
   1452 		if (err)
   1453 			goto out;
   1454 		autoneg = hw->phy.autoneg_advertised;
   1455 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   1456                 	err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
   1457 			    &negotiate);
   1458 		if (err)
   1459 			goto out;
   1460 		if (hw->mac.ops.setup_link)
   1461                 	err = hw->mac.ops.setup_link(hw, autoneg,
   1462 			    adapter->link_up);
   1463 	}
   1464 out:
   1465 
   1466 	return;
   1467 } /* ixgbe_config_link */
   1468 
   1469 /************************************************************************
   1470  * ixgbe_update_stats_counters - Update board statistics counters.
   1471  ************************************************************************/
   1472 static void
   1473 ixgbe_update_stats_counters(struct adapter *adapter)
   1474 {
   1475 	struct ifnet          *ifp = adapter->ifp;
   1476 	struct ixgbe_hw       *hw = &adapter->hw;
   1477 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1478 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
   1479 	u64                   total_missed_rx = 0;
   1480 	uint64_t              crcerrs, rlec;
   1481 
   1482 	crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   1483 	stats->crcerrs.ev_count += crcerrs;
   1484 	stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   1485 	stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   1486 	stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   1487 	if (hw->mac.type == ixgbe_mac_X550)
   1488 		stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
   1489 
   1490 	for (int i = 0; i < __arraycount(stats->qprc); i++) {
   1491 		int j = i % adapter->num_queues;
   1492 		stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   1493 		stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   1494 		stats->qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   1495 	}
   1496 	for (int i = 0; i < __arraycount(stats->mpc); i++) {
   1497 		uint32_t mp;
   1498 		int j = i % adapter->num_queues;
   1499 
   1500 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   1501 		/* global total per queue */
   1502 		stats->mpc[j].ev_count += mp;
   1503 		/* running comprehensive total for stats display */
   1504 		total_missed_rx += mp;
   1505 
   1506 		if (hw->mac.type == ixgbe_mac_82598EB)
   1507 			stats->rnbc[j].ev_count
   1508 			    += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   1509 
   1510 	}
   1511 	stats->mpctotal.ev_count += total_missed_rx;
   1512 
   1513 	/* Document says M[LR]FC are valid when link is up and 10Gbps */
   1514 	if ((adapter->link_active == TRUE)
   1515 	    && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
   1516 		stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   1517 		stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   1518 	}
   1519 	rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
   1520 	stats->rlec.ev_count += rlec;
   1521 
   1522 	/* Hardware workaround, gprc counts missed packets */
   1523 	stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   1524 
   1525 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   1526 	stats->lxontxc.ev_count += lxon;
   1527 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   1528 	stats->lxofftxc.ev_count += lxoff;
   1529 	total = lxon + lxoff;
   1530 
   1531 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1532 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   1533 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   1534 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   1535 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   1536 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   1537 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   1538 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   1539 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   1540 	} else {
   1541 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   1542 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   1543 		/* 82598 only has a counter in the high register */
   1544 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   1545 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   1546 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   1547 	}
   1548 
   1549 	/*
   1550 	 * Workaround: mprc hardware is incorrectly counting
   1551 	 * broadcasts, so for now we subtract those.
   1552 	 */
   1553 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   1554 	stats->bprc.ev_count += bprc;
   1555 	stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
   1556 	    - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   1557 
   1558 	stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   1559 	stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   1560 	stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   1561 	stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   1562 	stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   1563 	stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   1564 
   1565 	stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   1566 	stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   1567 	stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   1568 
   1569 	stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   1570 	stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   1571 	stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   1572 	stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   1573 	stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   1574 	stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   1575 	stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   1576 	stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   1577 	stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   1578 	stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   1579 	stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   1580 	stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   1581 	stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   1582 	stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   1583 	stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   1584 	stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   1585 	stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   1586 	stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   1587 	/* Only read FCOE on 82599 */
   1588 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1589 		stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   1590 		stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   1591 		stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   1592 		stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   1593 		stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   1594 	}
   1595 
   1596 	/* Fill out the OS statistics structure */
   1597 	/*
   1598 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   1599 	 * adapter->stats counters. It's required to make ifconfig -z
   1600 	 * (SOICZIFDATA) work.
   1601 	 */
   1602 	ifp->if_collisions = 0;
   1603 
   1604 	/* Rx Errors */
   1605 	ifp->if_iqdrops += total_missed_rx;
   1606 	ifp->if_ierrors += crcerrs + rlec;
   1607 } /* ixgbe_update_stats_counters */
   1608 
   1609 /************************************************************************
   1610  * ixgbe_add_hw_stats
   1611  *
   1612  *   Add sysctl variables, one per statistic, to the system.
   1613  ************************************************************************/
   1614 static void
   1615 ixgbe_add_hw_stats(struct adapter *adapter)
   1616 {
   1617 	device_t dev = adapter->dev;
   1618 	const struct sysctlnode *rnode, *cnode;
   1619 	struct sysctllog **log = &adapter->sysctllog;
   1620 	struct tx_ring *txr = adapter->tx_rings;
   1621 	struct rx_ring *rxr = adapter->rx_rings;
   1622 	struct ixgbe_hw *hw = &adapter->hw;
   1623 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1624 	const char *xname = device_xname(dev);
   1625 
   1626 	/* Driver Statistics */
   1627 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   1628 	    NULL, xname, "Handled queue in softint");
   1629 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   1630 	    NULL, xname, "Requeued in softint");
   1631 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   1632 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   1633 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   1634 	    NULL, xname, "m_defrag() failed");
   1635 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   1636 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   1637 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   1638 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   1639 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   1640 	    NULL, xname, "Driver tx dma hard fail other");
   1641 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   1642 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   1643 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   1644 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   1645 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   1646 	    NULL, xname, "Watchdog timeouts");
   1647 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   1648 	    NULL, xname, "TSO errors");
   1649 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   1650 	    NULL, xname, "Link MSI-X IRQ Handled");
   1651 
   1652 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1653 		snprintf(adapter->queues[i].evnamebuf,
   1654 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   1655 		    xname, i);
   1656 		snprintf(adapter->queues[i].namebuf,
   1657 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   1658 
   1659 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   1660 			aprint_error_dev(dev, "could not create sysctl root\n");
   1661 			break;
   1662 		}
   1663 
   1664 		if (sysctl_createv(log, 0, &rnode, &rnode,
   1665 		    0, CTLTYPE_NODE,
   1666 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   1667 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   1668 			break;
   1669 
   1670 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1671 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   1672 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   1673 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   1674 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   1675 			break;
   1676 
   1677 #if 0 /* XXX msaitoh */
   1678 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1679 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   1680 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   1681 			NULL, 0, &(adapter->queues[i].irqs),
   1682 		    0, CTL_CREATE, CTL_EOL) != 0)
   1683 			break;
   1684 #endif
   1685 
   1686 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1687 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1688 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   1689 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   1690 		    0, CTL_CREATE, CTL_EOL) != 0)
   1691 			break;
   1692 
   1693 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1694 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1695 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   1696 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   1697 		    0, CTL_CREATE, CTL_EOL) != 0)
   1698 			break;
   1699 
   1700 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   1701 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   1702 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   1703 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   1704 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   1705 		    NULL, adapter->queues[i].evnamebuf,
   1706 		    "Queue No Descriptor Available");
   1707 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   1708 		    NULL, adapter->queues[i].evnamebuf,
   1709 		    "Queue Packets Transmitted");
   1710 #ifndef IXGBE_LEGACY_TX
   1711 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   1712 		    NULL, adapter->queues[i].evnamebuf,
   1713 		    "Packets dropped in pcq");
   1714 #endif
   1715 
   1716 #ifdef LRO
   1717 		struct lro_ctrl *lro = &rxr->lro;
   1718 #endif /* LRO */
   1719 
   1720 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1721 		    CTLFLAG_READONLY,
   1722 		    CTLTYPE_INT,
   1723 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   1724 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   1725 		    CTL_CREATE, CTL_EOL) != 0)
   1726 			break;
   1727 
   1728 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1729 		    CTLFLAG_READONLY,
   1730 		    CTLTYPE_INT,
   1731 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   1732 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   1733 		    CTL_CREATE, CTL_EOL) != 0)
   1734 			break;
   1735 
   1736 		if (i < __arraycount(stats->mpc)) {
   1737 			evcnt_attach_dynamic(&stats->mpc[i],
   1738 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1739 			    "RX Missed Packet Count");
   1740 			if (hw->mac.type == ixgbe_mac_82598EB)
   1741 				evcnt_attach_dynamic(&stats->rnbc[i],
   1742 				    EVCNT_TYPE_MISC, NULL,
   1743 				    adapter->queues[i].evnamebuf,
   1744 				    "Receive No Buffers");
   1745 		}
   1746 		if (i < __arraycount(stats->pxontxc)) {
   1747 			evcnt_attach_dynamic(&stats->pxontxc[i],
   1748 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1749 			    "pxontxc");
   1750 			evcnt_attach_dynamic(&stats->pxonrxc[i],
   1751 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1752 			    "pxonrxc");
   1753 			evcnt_attach_dynamic(&stats->pxofftxc[i],
   1754 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1755 			    "pxofftxc");
   1756 			evcnt_attach_dynamic(&stats->pxoffrxc[i],
   1757 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1758 			    "pxoffrxc");
   1759 			evcnt_attach_dynamic(&stats->pxon2offc[i],
   1760 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1761 			    "pxon2offc");
   1762 		}
   1763 		if (i < __arraycount(stats->qprc)) {
   1764 			evcnt_attach_dynamic(&stats->qprc[i],
   1765 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1766 			    "qprc");
   1767 			evcnt_attach_dynamic(&stats->qptc[i],
   1768 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1769 			    "qptc");
   1770 			evcnt_attach_dynamic(&stats->qbrc[i],
   1771 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1772 			    "qbrc");
   1773 			evcnt_attach_dynamic(&stats->qbtc[i],
   1774 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1775 			    "qbtc");
   1776 			evcnt_attach_dynamic(&stats->qprdc[i],
   1777 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1778 			    "qprdc");
   1779 		}
   1780 
   1781 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   1782 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   1783 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   1784 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   1785 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   1786 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   1787 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   1788 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   1789 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   1790 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   1791 #ifdef LRO
   1792 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   1793 				CTLFLAG_RD, &lro->lro_queued, 0,
   1794 				"LRO Queued");
   1795 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   1796 				CTLFLAG_RD, &lro->lro_flushed, 0,
   1797 				"LRO Flushed");
   1798 #endif /* LRO */
   1799 	}
   1800 
   1801 	/* MAC stats get their own sub node */
   1802 
   1803 	snprintf(stats->namebuf,
   1804 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   1805 
   1806 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   1807 	    stats->namebuf, "rx csum offload - IP");
   1808 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   1809 	    stats->namebuf, "rx csum offload - L4");
   1810 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   1811 	    stats->namebuf, "rx csum offload - IP bad");
   1812 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   1813 	    stats->namebuf, "rx csum offload - L4 bad");
   1814 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   1815 	    stats->namebuf, "Interrupt conditions zero");
   1816 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   1817 	    stats->namebuf, "Legacy interrupts");
   1818 
   1819 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   1820 	    stats->namebuf, "CRC Errors");
   1821 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   1822 	    stats->namebuf, "Illegal Byte Errors");
   1823 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   1824 	    stats->namebuf, "Byte Errors");
   1825 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   1826 	    stats->namebuf, "MAC Short Packets Discarded");
   1827 	if (hw->mac.type >= ixgbe_mac_X550)
   1828 		evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
   1829 		    stats->namebuf, "Bad SFD");
   1830 	evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
   1831 	    stats->namebuf, "Total Packets Missed");
   1832 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   1833 	    stats->namebuf, "MAC Local Faults");
   1834 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   1835 	    stats->namebuf, "MAC Remote Faults");
   1836 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   1837 	    stats->namebuf, "Receive Length Errors");
   1838 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   1839 	    stats->namebuf, "Link XON Transmitted");
   1840 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   1841 	    stats->namebuf, "Link XON Received");
   1842 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   1843 	    stats->namebuf, "Link XOFF Transmitted");
   1844 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   1845 	    stats->namebuf, "Link XOFF Received");
   1846 
   1847 	/* Packet Reception Stats */
   1848 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   1849 	    stats->namebuf, "Total Octets Received");
   1850 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   1851 	    stats->namebuf, "Good Octets Received");
   1852 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   1853 	    stats->namebuf, "Total Packets Received");
   1854 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   1855 	    stats->namebuf, "Good Packets Received");
   1856 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   1857 	    stats->namebuf, "Multicast Packets Received");
   1858 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   1859 	    stats->namebuf, "Broadcast Packets Received");
   1860 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   1861 	    stats->namebuf, "64 byte frames received ");
   1862 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   1863 	    stats->namebuf, "65-127 byte frames received");
   1864 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   1865 	    stats->namebuf, "128-255 byte frames received");
   1866 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   1867 	    stats->namebuf, "256-511 byte frames received");
   1868 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   1869 	    stats->namebuf, "512-1023 byte frames received");
   1870 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   1871 	    stats->namebuf, "1023-1522 byte frames received");
   1872 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   1873 	    stats->namebuf, "Receive Undersized");
   1874 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   1875 	    stats->namebuf, "Fragmented Packets Received ");
   1876 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   1877 	    stats->namebuf, "Oversized Packets Received");
   1878 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   1879 	    stats->namebuf, "Received Jabber");
   1880 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   1881 	    stats->namebuf, "Management Packets Received");
   1882 	evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
   1883 	    stats->namebuf, "Management Packets Dropped");
   1884 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   1885 	    stats->namebuf, "Checksum Errors");
   1886 
   1887 	/* Packet Transmission Stats */
   1888 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   1889 	    stats->namebuf, "Good Octets Transmitted");
   1890 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   1891 	    stats->namebuf, "Total Packets Transmitted");
   1892 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   1893 	    stats->namebuf, "Good Packets Transmitted");
   1894 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   1895 	    stats->namebuf, "Broadcast Packets Transmitted");
   1896 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   1897 	    stats->namebuf, "Multicast Packets Transmitted");
   1898 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   1899 	    stats->namebuf, "Management Packets Transmitted");
   1900 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   1901 	    stats->namebuf, "64 byte frames transmitted ");
   1902 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   1903 	    stats->namebuf, "65-127 byte frames transmitted");
   1904 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   1905 	    stats->namebuf, "128-255 byte frames transmitted");
   1906 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   1907 	    stats->namebuf, "256-511 byte frames transmitted");
   1908 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   1909 	    stats->namebuf, "512-1023 byte frames transmitted");
   1910 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   1911 	    stats->namebuf, "1024-1522 byte frames transmitted");
   1912 } /* ixgbe_add_hw_stats */
   1913 
   1914 static void
   1915 ixgbe_clear_evcnt(struct adapter *adapter)
   1916 {
   1917 	struct tx_ring *txr = adapter->tx_rings;
   1918 	struct rx_ring *rxr = adapter->rx_rings;
   1919 	struct ixgbe_hw *hw = &adapter->hw;
   1920 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1921 
   1922 	adapter->handleq.ev_count = 0;
   1923 	adapter->req.ev_count = 0;
   1924 	adapter->efbig_tx_dma_setup.ev_count = 0;
   1925 	adapter->mbuf_defrag_failed.ev_count = 0;
   1926 	adapter->efbig2_tx_dma_setup.ev_count = 0;
   1927 	adapter->einval_tx_dma_setup.ev_count = 0;
   1928 	adapter->other_tx_dma_setup.ev_count = 0;
   1929 	adapter->eagain_tx_dma_setup.ev_count = 0;
   1930 	adapter->enomem_tx_dma_setup.ev_count = 0;
   1931 	adapter->watchdog_events.ev_count = 0;
   1932 	adapter->tso_err.ev_count = 0;
   1933 	adapter->link_irq.ev_count = 0;
   1934 
   1935 	txr = adapter->tx_rings;
   1936 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1937 		adapter->queues[i].irqs.ev_count = 0;
   1938 		txr->no_desc_avail.ev_count = 0;
   1939 		txr->total_packets.ev_count = 0;
   1940 		txr->tso_tx.ev_count = 0;
   1941 #ifndef IXGBE_LEGACY_TX
   1942 		txr->pcq_drops.ev_count = 0;
   1943 #endif
   1944 
   1945 		if (i < __arraycount(stats->mpc)) {
   1946 			stats->mpc[i].ev_count = 0;
   1947 			if (hw->mac.type == ixgbe_mac_82598EB)
   1948 				stats->rnbc[i].ev_count = 0;
   1949 		}
   1950 		if (i < __arraycount(stats->pxontxc)) {
   1951 			stats->pxontxc[i].ev_count = 0;
   1952 			stats->pxonrxc[i].ev_count = 0;
   1953 			stats->pxofftxc[i].ev_count = 0;
   1954 			stats->pxoffrxc[i].ev_count = 0;
   1955 			stats->pxon2offc[i].ev_count = 0;
   1956 		}
   1957 		if (i < __arraycount(stats->qprc)) {
   1958 			stats->qprc[i].ev_count = 0;
   1959 			stats->qptc[i].ev_count = 0;
   1960 			stats->qbrc[i].ev_count = 0;
   1961 			stats->qbtc[i].ev_count = 0;
   1962 			stats->qprdc[i].ev_count = 0;
   1963 		}
   1964 
   1965 		rxr->rx_packets.ev_count = 0;
   1966 		rxr->rx_bytes.ev_count = 0;
   1967 		rxr->rx_copies.ev_count = 0;
   1968 		rxr->no_jmbuf.ev_count = 0;
   1969 		rxr->rx_discarded.ev_count = 0;
   1970 	}
   1971 	stats->ipcs.ev_count = 0;
   1972 	stats->l4cs.ev_count = 0;
   1973 	stats->ipcs_bad.ev_count = 0;
   1974 	stats->l4cs_bad.ev_count = 0;
   1975 	stats->intzero.ev_count = 0;
   1976 	stats->legint.ev_count = 0;
   1977 	stats->crcerrs.ev_count = 0;
   1978 	stats->illerrc.ev_count = 0;
   1979 	stats->errbc.ev_count = 0;
   1980 	stats->mspdc.ev_count = 0;
   1981 	stats->mbsdc.ev_count = 0;
   1982 	stats->mpctotal.ev_count = 0;
   1983 	stats->mlfc.ev_count = 0;
   1984 	stats->mrfc.ev_count = 0;
   1985 	stats->rlec.ev_count = 0;
   1986 	stats->lxontxc.ev_count = 0;
   1987 	stats->lxonrxc.ev_count = 0;
   1988 	stats->lxofftxc.ev_count = 0;
   1989 	stats->lxoffrxc.ev_count = 0;
   1990 
   1991 	/* Packet Reception Stats */
   1992 	stats->tor.ev_count = 0;
   1993 	stats->gorc.ev_count = 0;
   1994 	stats->tpr.ev_count = 0;
   1995 	stats->gprc.ev_count = 0;
   1996 	stats->mprc.ev_count = 0;
   1997 	stats->bprc.ev_count = 0;
   1998 	stats->prc64.ev_count = 0;
   1999 	stats->prc127.ev_count = 0;
   2000 	stats->prc255.ev_count = 0;
   2001 	stats->prc511.ev_count = 0;
   2002 	stats->prc1023.ev_count = 0;
   2003 	stats->prc1522.ev_count = 0;
   2004 	stats->ruc.ev_count = 0;
   2005 	stats->rfc.ev_count = 0;
   2006 	stats->roc.ev_count = 0;
   2007 	stats->rjc.ev_count = 0;
   2008 	stats->mngprc.ev_count = 0;
   2009 	stats->mngpdc.ev_count = 0;
   2010 	stats->xec.ev_count = 0;
   2011 
   2012 	/* Packet Transmission Stats */
   2013 	stats->gotc.ev_count = 0;
   2014 	stats->tpt.ev_count = 0;
   2015 	stats->gptc.ev_count = 0;
   2016 	stats->bptc.ev_count = 0;
   2017 	stats->mptc.ev_count = 0;
   2018 	stats->mngptc.ev_count = 0;
   2019 	stats->ptc64.ev_count = 0;
   2020 	stats->ptc127.ev_count = 0;
   2021 	stats->ptc255.ev_count = 0;
   2022 	stats->ptc511.ev_count = 0;
   2023 	stats->ptc1023.ev_count = 0;
   2024 	stats->ptc1522.ev_count = 0;
   2025 }
   2026 
   2027 /************************************************************************
   2028  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
   2029  *
   2030  *   Retrieves the TDH value from the hardware
   2031  ************************************************************************/
   2032 static int
   2033 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   2034 {
   2035 	struct sysctlnode node = *rnode;
   2036 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2037 	uint32_t val;
   2038 
   2039 	if (!txr)
   2040 		return (0);
   2041 
   2042 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
   2043 	node.sysctl_data = &val;
   2044 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2045 } /* ixgbe_sysctl_tdh_handler */
   2046 
   2047 /************************************************************************
   2048  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   2049  *
   2050  *   Retrieves the TDT value from the hardware
   2051  ************************************************************************/
   2052 static int
   2053 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   2054 {
   2055 	struct sysctlnode node = *rnode;
   2056 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2057 	uint32_t val;
   2058 
   2059 	if (!txr)
   2060 		return (0);
   2061 
   2062 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
   2063 	node.sysctl_data = &val;
   2064 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2065 } /* ixgbe_sysctl_tdt_handler */
   2066 
   2067 /************************************************************************
   2068  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
   2069  *
   2070  *   Retrieves the RDH value from the hardware
   2071  ************************************************************************/
   2072 static int
   2073 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2074 {
   2075 	struct sysctlnode node = *rnode;
   2076 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2077 	uint32_t val;
   2078 
   2079 	if (!rxr)
   2080 		return (0);
   2081 
   2082 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
   2083 	node.sysctl_data = &val;
   2084 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2085 } /* ixgbe_sysctl_rdh_handler */
   2086 
   2087 /************************************************************************
   2088  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2089  *
   2090  *   Retrieves the RDT value from the hardware
   2091  ************************************************************************/
   2092 static int
   2093 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2094 {
   2095 	struct sysctlnode node = *rnode;
   2096 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2097 	uint32_t val;
   2098 
   2099 	if (!rxr)
   2100 		return (0);
   2101 
   2102 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
   2103 	node.sysctl_data = &val;
   2104 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2105 } /* ixgbe_sysctl_rdt_handler */
   2106 
   2107 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   2108 /************************************************************************
   2109  * ixgbe_register_vlan
   2110  *
   2111  *   Run via vlan config EVENT, it enables us to use the
   2112  *   HW Filter table since we can get the vlan id. This
   2113  *   just creates the entry in the soft version of the
   2114  *   VFTA, init will repopulate the real table.
   2115  ************************************************************************/
   2116 static void
   2117 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2118 {
   2119 	struct adapter	*adapter = ifp->if_softc;
   2120 	u16		index, bit;
   2121 
   2122 	if (ifp->if_softc != arg)   /* Not our event */
   2123 		return;
   2124 
   2125 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2126 		return;
   2127 
   2128 	IXGBE_CORE_LOCK(adapter);
   2129 	index = (vtag >> 5) & 0x7F;
   2130 	bit = vtag & 0x1F;
   2131 	adapter->shadow_vfta[index] |= (1 << bit);
   2132 	ixgbe_setup_vlan_hw_support(adapter);
   2133 	IXGBE_CORE_UNLOCK(adapter);
   2134 } /* ixgbe_register_vlan */
   2135 
   2136 /************************************************************************
   2137  * ixgbe_unregister_vlan
   2138  *
   2139  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
   2140  ************************************************************************/
   2141 static void
   2142 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2143 {
   2144 	struct adapter	*adapter = ifp->if_softc;
   2145 	u16		index, bit;
   2146 
   2147 	if (ifp->if_softc != arg)
   2148 		return;
   2149 
   2150 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2151 		return;
   2152 
   2153 	IXGBE_CORE_LOCK(adapter);
   2154 	index = (vtag >> 5) & 0x7F;
   2155 	bit = vtag & 0x1F;
   2156 	adapter->shadow_vfta[index] &= ~(1 << bit);
   2157 	/* Re-init to load the changes */
   2158 	ixgbe_setup_vlan_hw_support(adapter);
   2159 	IXGBE_CORE_UNLOCK(adapter);
   2160 } /* ixgbe_unregister_vlan */
   2161 #endif
   2162 
   2163 static void
   2164 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   2165 {
   2166 	struct ethercom *ec = &adapter->osdep.ec;
   2167 	struct ixgbe_hw *hw = &adapter->hw;
   2168 	struct rx_ring	*rxr;
   2169 	int             i;
   2170 	u32		ctrl;
   2171 
   2172 
   2173 	/*
   2174 	 * We get here thru init_locked, meaning
   2175 	 * a soft reset, this has already cleared
   2176 	 * the VFTA and other state, so if there
   2177 	 * have been no vlan's registered do nothing.
   2178 	 */
   2179 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   2180 		return;
   2181 
   2182 	/* Setup the queues for vlans */
   2183 	for (i = 0; i < adapter->num_queues; i++) {
   2184 		rxr = &adapter->rx_rings[i];
   2185 		/* On 82599 the VLAN enable is per/queue in RXDCTL */
   2186 		if (hw->mac.type != ixgbe_mac_82598EB) {
   2187 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   2188 			ctrl |= IXGBE_RXDCTL_VME;
   2189 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
   2190 		}
   2191 		rxr->vtag_strip = TRUE;
   2192 	}
   2193 
   2194 	if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
   2195 		return;
   2196 	/*
   2197 	 * A soft reset zero's out the VFTA, so
   2198 	 * we need to repopulate it now.
   2199 	 */
   2200 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2201 		if (adapter->shadow_vfta[i] != 0)
   2202 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
   2203 			    adapter->shadow_vfta[i]);
   2204 
   2205 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2206 	/* Enable the Filter Table if enabled */
   2207 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
   2208 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
   2209 		ctrl |= IXGBE_VLNCTRL_VFE;
   2210 	}
   2211 	if (hw->mac.type == ixgbe_mac_82598EB)
   2212 		ctrl |= IXGBE_VLNCTRL_VME;
   2213 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2214 } /* ixgbe_setup_vlan_hw_support */
   2215 
   2216 /************************************************************************
   2217  * ixgbe_get_slot_info
   2218  *
   2219  *   Get the width and transaction speed of
   2220  *   the slot this adapter is plugged into.
   2221  ************************************************************************/
   2222 static void
   2223 ixgbe_get_slot_info(struct adapter *adapter)
   2224 {
   2225 	device_t		dev = adapter->dev;
   2226 	struct ixgbe_hw		*hw = &adapter->hw;
   2227 	u32                   offset;
   2228 //	struct ixgbe_mac_info	*mac = &hw->mac;
   2229 	u16			link;
   2230 	int                   bus_info_valid = TRUE;
   2231 
   2232 	/* Some devices are behind an internal bridge */
   2233 	switch (hw->device_id) {
   2234 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
   2235 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
   2236 		goto get_parent_info;
   2237 	default:
   2238 		break;
   2239 	}
   2240 
   2241 	ixgbe_get_bus_info(hw);
   2242 
   2243 	/*
   2244 	 * Some devices don't use PCI-E, but there is no need
   2245 	 * to display "Unknown" for bus speed and width.
   2246 	 */
   2247 	switch (hw->mac.type) {
   2248 	case ixgbe_mac_X550EM_x:
   2249 	case ixgbe_mac_X550EM_a:
   2250 		return;
   2251 	default:
   2252 		goto display;
   2253 	}
   2254 
   2255 get_parent_info:
   2256 	/*
   2257 	 * For the Quad port adapter we need to parse back
   2258 	 * up the PCI tree to find the speed of the expansion
   2259 	 * slot into which this adapter is plugged. A bit more work.
   2260 	 */
   2261 	dev = device_parent(device_parent(dev));
   2262 #if 0
   2263 #ifdef IXGBE_DEBUG
   2264 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
   2265 	    pci_get_slot(dev), pci_get_function(dev));
   2266 #endif
   2267 	dev = device_parent(device_parent(dev));
   2268 #ifdef IXGBE_DEBUG
   2269 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
   2270 	    pci_get_slot(dev), pci_get_function(dev));
   2271 #endif
   2272 #endif
   2273 	/* Now get the PCI Express Capabilities offset */
   2274 	if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
   2275 	    PCI_CAP_PCIEXPRESS, &offset, NULL)) {
   2276 		/*
   2277 		 * Hmm...can't get PCI-Express capabilities.
   2278 		 * Falling back to default method.
   2279 		 */
   2280 		bus_info_valid = FALSE;
   2281 		ixgbe_get_bus_info(hw);
   2282 		goto display;
   2283 	}
   2284 	/* ...and read the Link Status Register */
   2285 	link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
   2286 	    offset + PCIE_LCSR);
   2287 	ixgbe_set_pci_config_data_generic(hw, link >> 16);
   2288 
   2289 display:
   2290 	device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
   2291 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s" :
   2292 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s" :
   2293 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s" :
   2294 	     "Unknown"),
   2295 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
   2296 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
   2297 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
   2298 	     "Unknown"));
   2299 
   2300 	if (bus_info_valid) {
   2301 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2302 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
   2303 			(hw->bus.speed == ixgbe_bus_speed_2500))) {
   2304 			device_printf(dev, "PCI-Express bandwidth available"
   2305 			    " for this card\n     is not sufficient for"
   2306 			    " optimal performance.\n");
   2307 			device_printf(dev, "For optimal performance a x8 "
   2308 			    "PCIE, or x4 PCIE Gen2 slot is required.\n");
   2309 		}
   2310 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2311 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
   2312 			(hw->bus.speed < ixgbe_bus_speed_8000))) {
   2313 			device_printf(dev, "PCI-Express bandwidth available"
   2314 			    " for this card\n     is not sufficient for"
   2315 			    " optimal performance.\n");
   2316 			device_printf(dev, "For optimal performance a x8 "
   2317 			    "PCIE Gen3 slot is required.\n");
   2318 		}
   2319 	} else
   2320 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
   2321 
   2322 	return;
   2323 } /* ixgbe_get_slot_info */
   2324 
   2325 /************************************************************************
   2326  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
   2327  ************************************************************************/
   2328 static inline void
   2329 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   2330 {
   2331 	struct ixgbe_hw *hw = &adapter->hw;
   2332 	u64             queue = (u64)(1ULL << vector);
   2333 	u32             mask;
   2334 
   2335 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2336 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2337 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   2338 	} else {
   2339 		mask = (queue & 0xFFFFFFFF);
   2340 		if (mask)
   2341 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   2342 		mask = (queue >> 32);
   2343 		if (mask)
   2344 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   2345 	}
   2346 } /* ixgbe_enable_queue */
   2347 
   2348 /************************************************************************
   2349  * ixgbe_disable_queue
   2350  ************************************************************************/
   2351 static inline void
   2352 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   2353 {
   2354 	struct ixgbe_hw *hw = &adapter->hw;
   2355 	u64             queue = (u64)(1ULL << vector);
   2356 	u32             mask;
   2357 
   2358 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2359 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2360 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   2361 	} else {
   2362 		mask = (queue & 0xFFFFFFFF);
   2363 		if (mask)
   2364 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   2365 		mask = (queue >> 32);
   2366 		if (mask)
   2367 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   2368 	}
   2369 } /* ixgbe_disable_queue */
   2370 
   2371 /************************************************************************
   2372  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
   2373  ************************************************************************/
   2374 static int
   2375 ixgbe_msix_que(void *arg)
   2376 {
   2377 	struct ix_queue	*que = arg;
   2378 	struct adapter  *adapter = que->adapter;
   2379 	struct ifnet    *ifp = adapter->ifp;
   2380 	struct tx_ring	*txr = que->txr;
   2381 	struct rx_ring	*rxr = que->rxr;
   2382 	bool		more;
   2383 	u32		newitr = 0;
   2384 
   2385 	/* Protect against spurious interrupts */
   2386 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   2387 		return 0;
   2388 
   2389 	ixgbe_disable_queue(adapter, que->msix);
   2390 	++que->irqs.ev_count;
   2391 
   2392 #ifdef __NetBSD__
   2393 	/* Don't run ixgbe_rxeof in interrupt context */
   2394 	more = true;
   2395 #else
   2396 	more = ixgbe_rxeof(que);
   2397 #endif
   2398 
   2399 	IXGBE_TX_LOCK(txr);
   2400 	ixgbe_txeof(txr);
   2401 	IXGBE_TX_UNLOCK(txr);
   2402 
   2403 	/* Do AIM now? */
   2404 
   2405 	if (adapter->enable_aim == false)
   2406 		goto no_calc;
   2407 	/*
   2408 	 * Do Adaptive Interrupt Moderation:
   2409 	 *  - Write out last calculated setting
   2410 	 *  - Calculate based on average size over
   2411 	 *    the last interval.
   2412 	 */
   2413 	if (que->eitr_setting)
   2414 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
   2415 		    que->eitr_setting);
   2416 
   2417 	que->eitr_setting = 0;
   2418 
   2419 	/* Idle, do nothing */
   2420         if ((txr->bytes == 0) && (rxr->bytes == 0))
   2421                 goto no_calc;
   2422 
   2423 	if ((txr->bytes) && (txr->packets))
   2424 		newitr = txr->bytes/txr->packets;
   2425 	if ((rxr->bytes) && (rxr->packets))
   2426 		newitr = max(newitr, (rxr->bytes / rxr->packets));
   2427 	newitr += 24; /* account for hardware frame, crc */
   2428 
   2429 	/* set an upper boundary */
   2430 	newitr = min(newitr, 3000);
   2431 
   2432 	/* Be nice to the mid range */
   2433 	if ((newitr > 300) && (newitr < 1200))
   2434 		newitr = (newitr / 3);
   2435 	else
   2436 		newitr = (newitr / 2);
   2437 
   2438         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   2439                 newitr |= newitr << 16;
   2440         else
   2441                 newitr |= IXGBE_EITR_CNT_WDIS;
   2442 
   2443         /* save for next interrupt */
   2444         que->eitr_setting = newitr;
   2445 
   2446 	/* Reset state */
   2447 	txr->bytes = 0;
   2448 	txr->packets = 0;
   2449 	rxr->bytes = 0;
   2450 	rxr->packets = 0;
   2451 
   2452 no_calc:
   2453 	if (more)
   2454 		softint_schedule(que->que_si);
   2455 	else
   2456 		ixgbe_enable_queue(adapter, que->msix);
   2457 
   2458 	return 1;
   2459 } /* ixgbe_msix_que */
   2460 
   2461 /************************************************************************
   2462  * ixgbe_media_status - Media Ioctl callback
   2463  *
   2464  *   Called whenever the user queries the status of
   2465  *   the interface using ifconfig.
   2466  ************************************************************************/
   2467 static void
   2468 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   2469 {
   2470 	struct adapter *adapter = ifp->if_softc;
   2471 	struct ixgbe_hw *hw = &adapter->hw;
   2472 	int layer;
   2473 
   2474 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   2475 	IXGBE_CORE_LOCK(adapter);
   2476 	ixgbe_update_link_status(adapter);
   2477 
   2478 	ifmr->ifm_status = IFM_AVALID;
   2479 	ifmr->ifm_active = IFM_ETHER;
   2480 
   2481 	if (!adapter->link_active) {
   2482 		ifmr->ifm_active |= IFM_NONE;
   2483 		IXGBE_CORE_UNLOCK(adapter);
   2484 		return;
   2485 	}
   2486 
   2487 	ifmr->ifm_status |= IFM_ACTIVE;
   2488 	layer = adapter->phy_layer;
   2489 
   2490 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
   2491 	    layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
   2492 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
   2493 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
   2494 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
   2495 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
   2496 		switch (adapter->link_speed) {
   2497 		case IXGBE_LINK_SPEED_10GB_FULL:
   2498 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   2499 			break;
   2500 		case IXGBE_LINK_SPEED_5GB_FULL:
   2501 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   2502 			break;
   2503 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2504 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   2505 			break;
   2506 		case IXGBE_LINK_SPEED_1GB_FULL:
   2507 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   2508 			break;
   2509 		case IXGBE_LINK_SPEED_100_FULL:
   2510 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   2511 			break;
   2512 		case IXGBE_LINK_SPEED_10_FULL:
   2513 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   2514 			break;
   2515 		}
   2516 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   2517 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
   2518 		switch (adapter->link_speed) {
   2519 		case IXGBE_LINK_SPEED_10GB_FULL:
   2520 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
   2521 			break;
   2522 		}
   2523 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
   2524 		switch (adapter->link_speed) {
   2525 		case IXGBE_LINK_SPEED_10GB_FULL:
   2526 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
   2527 			break;
   2528 		case IXGBE_LINK_SPEED_1GB_FULL:
   2529 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2530 			break;
   2531 		}
   2532 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
   2533 		switch (adapter->link_speed) {
   2534 		case IXGBE_LINK_SPEED_10GB_FULL:
   2535 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
   2536 			break;
   2537 		case IXGBE_LINK_SPEED_1GB_FULL:
   2538 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2539 			break;
   2540 		}
   2541 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
   2542 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
   2543 		switch (adapter->link_speed) {
   2544 		case IXGBE_LINK_SPEED_10GB_FULL:
   2545 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2546 			break;
   2547 		case IXGBE_LINK_SPEED_1GB_FULL:
   2548 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
   2549 			break;
   2550 		}
   2551 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
   2552 		switch (adapter->link_speed) {
   2553 		case IXGBE_LINK_SPEED_10GB_FULL:
   2554 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2555 			break;
   2556 		}
   2557 	/*
   2558 	 * XXX: These need to use the proper media types once
   2559 	 * they're added.
   2560 	 */
   2561 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
   2562 		switch (adapter->link_speed) {
   2563 		case IXGBE_LINK_SPEED_10GB_FULL:
   2564 #ifndef IFM_ETH_XTYPE
   2565 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2566 #else
   2567 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
   2568 #endif
   2569 			break;
   2570 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2571 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2572 			break;
   2573 		case IXGBE_LINK_SPEED_1GB_FULL:
   2574 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2575 			break;
   2576 		}
   2577 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
   2578 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
   2579 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
   2580 		switch (adapter->link_speed) {
   2581 		case IXGBE_LINK_SPEED_10GB_FULL:
   2582 #ifndef IFM_ETH_XTYPE
   2583 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2584 #else
   2585 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
   2586 #endif
   2587 			break;
   2588 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2589 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2590 			break;
   2591 		case IXGBE_LINK_SPEED_1GB_FULL:
   2592 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2593 			break;
   2594 		}
   2595 
   2596 	/* If nothing is recognized... */
   2597 #if 0
   2598 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
   2599 		ifmr->ifm_active |= IFM_UNKNOWN;
   2600 #endif
   2601 
   2602 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   2603 
   2604 	/* Display current flow control setting used on link */
   2605 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
   2606 	    hw->fc.current_mode == ixgbe_fc_full)
   2607 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
   2608 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
   2609 	    hw->fc.current_mode == ixgbe_fc_full)
   2610 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
   2611 
   2612 	IXGBE_CORE_UNLOCK(adapter);
   2613 
   2614 	return;
   2615 } /* ixgbe_media_status */
   2616 
   2617 /************************************************************************
   2618  * ixgbe_media_change - Media Ioctl callback
   2619  *
   2620  *   Called when the user changes speed/duplex using
   2621  *   media/mediopt option with ifconfig.
   2622  ************************************************************************/
   2623 static int
   2624 ixgbe_media_change(struct ifnet *ifp)
   2625 {
   2626 	struct adapter   *adapter = ifp->if_softc;
   2627 	struct ifmedia   *ifm = &adapter->media;
   2628 	struct ixgbe_hw  *hw = &adapter->hw;
   2629 	ixgbe_link_speed speed = 0;
   2630 	ixgbe_link_speed link_caps = 0;
   2631 	bool negotiate = false;
   2632 	s32 err = IXGBE_NOT_IMPLEMENTED;
   2633 
   2634 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   2635 
   2636 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   2637 		return (EINVAL);
   2638 
   2639 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   2640 		return (ENODEV);
   2641 
   2642 	/*
   2643 	 * We don't actually need to check against the supported
   2644 	 * media types of the adapter; ifmedia will take care of
   2645 	 * that for us.
   2646 	 */
   2647 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   2648 	case IFM_AUTO:
   2649 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   2650 		    &negotiate);
   2651 		if (err != IXGBE_SUCCESS) {
   2652 			device_printf(adapter->dev, "Unable to determine "
   2653 			    "supported advertise speeds\n");
   2654 			return (ENODEV);
   2655 		}
   2656 		speed |= link_caps;
   2657 		break;
   2658 	case IFM_10G_T:
   2659 	case IFM_10G_LRM:
   2660 	case IFM_10G_LR:
   2661 	case IFM_10G_TWINAX:
   2662 #ifndef IFM_ETH_XTYPE
   2663 	case IFM_10G_SR: /* KR, too */
   2664 	case IFM_10G_CX4: /* KX4 */
   2665 #else
   2666 	case IFM_10G_KR:
   2667 	case IFM_10G_KX4:
   2668 #endif
   2669 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   2670 		break;
   2671 	case IFM_5000_T:
   2672 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   2673 		break;
   2674 	case IFM_2500_T:
   2675 	case IFM_2500_KX:
   2676 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   2677 		break;
   2678 	case IFM_1000_T:
   2679 	case IFM_1000_LX:
   2680 	case IFM_1000_SX:
   2681 	case IFM_1000_KX:
   2682 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   2683 		break;
   2684 	case IFM_100_TX:
   2685 		speed |= IXGBE_LINK_SPEED_100_FULL;
   2686 		break;
   2687 	case IFM_10_T:
   2688 		speed |= IXGBE_LINK_SPEED_10_FULL;
   2689 		break;
   2690 	default:
   2691 		goto invalid;
   2692 	}
   2693 
   2694 	hw->mac.autotry_restart = TRUE;
   2695 	hw->mac.ops.setup_link(hw, speed, TRUE);
   2696 	adapter->advertise = 0;
   2697 	if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
   2698 		if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
   2699 			adapter->advertise |= 1 << 2;
   2700 		if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
   2701 			adapter->advertise |= 1 << 1;
   2702 		if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
   2703 			adapter->advertise |= 1 << 0;
   2704 		if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
   2705 			adapter->advertise |= 1 << 3;
   2706 		if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
   2707 			adapter->advertise |= 1 << 4;
   2708 		if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
   2709 			adapter->advertise |= 1 << 5;
   2710 	}
   2711 
   2712 	return (0);
   2713 
   2714 invalid:
   2715 	device_printf(adapter->dev, "Invalid media type!\n");
   2716 
   2717 	return (EINVAL);
   2718 } /* ixgbe_media_change */
   2719 
   2720 /************************************************************************
   2721  * ixgbe_set_promisc
   2722  ************************************************************************/
   2723 static void
   2724 ixgbe_set_promisc(struct adapter *adapter)
   2725 {
   2726 	struct ifnet *ifp = adapter->ifp;
   2727 	int          mcnt = 0;
   2728 	u32          rctl;
   2729 	struct ether_multi *enm;
   2730 	struct ether_multistep step;
   2731 	struct ethercom *ec = &adapter->osdep.ec;
   2732 
   2733 	KASSERT(mutex_owned(&adapter->core_mtx));
   2734 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   2735 	rctl &= (~IXGBE_FCTRL_UPE);
   2736 	if (ifp->if_flags & IFF_ALLMULTI)
   2737 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
   2738 	else {
   2739 		ETHER_LOCK(ec);
   2740 		ETHER_FIRST_MULTI(step, ec, enm);
   2741 		while (enm != NULL) {
   2742 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
   2743 				break;
   2744 			mcnt++;
   2745 			ETHER_NEXT_MULTI(step, enm);
   2746 		}
   2747 		ETHER_UNLOCK(ec);
   2748 	}
   2749 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
   2750 		rctl &= (~IXGBE_FCTRL_MPE);
   2751 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2752 
   2753 	if (ifp->if_flags & IFF_PROMISC) {
   2754 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   2755 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2756 	} else if (ifp->if_flags & IFF_ALLMULTI) {
   2757 		rctl |= IXGBE_FCTRL_MPE;
   2758 		rctl &= ~IXGBE_FCTRL_UPE;
   2759 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2760 	}
   2761 } /* ixgbe_set_promisc */
   2762 
   2763 /************************************************************************
   2764  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
   2765  ************************************************************************/
   2766 static int
   2767 ixgbe_msix_link(void *arg)
   2768 {
   2769 	struct adapter	*adapter = arg;
   2770 	struct ixgbe_hw *hw = &adapter->hw;
   2771 	u32		eicr, eicr_mask;
   2772 	s32             retval;
   2773 
   2774 	++adapter->link_irq.ev_count;
   2775 
   2776 	/* Pause other interrupts */
   2777 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
   2778 
   2779 	/* First get the cause */
   2780 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   2781 	/* Be sure the queue bits are not cleared */
   2782 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
   2783 	/* Clear interrupt with write */
   2784 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
   2785 
   2786 	/* Link status change */
   2787 	if (eicr & IXGBE_EICR_LSC) {
   2788 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
   2789 		softint_schedule(adapter->link_si);
   2790 	}
   2791 
   2792 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   2793 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
   2794 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
   2795 			/* This is probably overkill :) */
   2796 			if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
   2797 				return 1;
   2798 			/* Disable the interrupt */
   2799 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
   2800 			softint_schedule(adapter->fdir_si);
   2801 		}
   2802 
   2803 		if (eicr & IXGBE_EICR_ECC) {
   2804 			device_printf(adapter->dev,
   2805 			    "CRITICAL: ECC ERROR!! Please Reboot!!\n");
   2806 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   2807 		}
   2808 
   2809 		/* Check for over temp condition */
   2810 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
   2811 			switch (adapter->hw.mac.type) {
   2812 			case ixgbe_mac_X550EM_a:
   2813 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
   2814 					break;
   2815 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
   2816 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2817 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2818 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2819 				retval = hw->phy.ops.check_overtemp(hw);
   2820 				if (retval != IXGBE_ERR_OVERTEMP)
   2821 					break;
   2822 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2823 				device_printf(adapter->dev, "System shutdown required!\n");
   2824 				break;
   2825 			default:
   2826 				if (!(eicr & IXGBE_EICR_TS))
   2827 					break;
   2828 				retval = hw->phy.ops.check_overtemp(hw);
   2829 				if (retval != IXGBE_ERR_OVERTEMP)
   2830 					break;
   2831 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2832 				device_printf(adapter->dev, "System shutdown required!\n");
   2833 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
   2834 				break;
   2835 			}
   2836 		}
   2837 
   2838 		/* Check for VF message */
   2839 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
   2840 		    (eicr & IXGBE_EICR_MAILBOX))
   2841 			softint_schedule(adapter->mbx_si);
   2842 	}
   2843 
   2844 	if (ixgbe_is_sfp(hw)) {
   2845 		/* Pluggable optics-related interrupt */
   2846 		if (hw->mac.type >= ixgbe_mac_X540)
   2847 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   2848 		else
   2849 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   2850 
   2851 		if (eicr & eicr_mask) {
   2852 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   2853 			softint_schedule(adapter->mod_si);
   2854 		}
   2855 
   2856 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   2857 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   2858 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2859 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2860 			softint_schedule(adapter->msf_si);
   2861 		}
   2862 	}
   2863 
   2864 	/* Check for fan failure */
   2865 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   2866 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
   2867 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2868 	}
   2869 
   2870 	/* External PHY interrupt */
   2871 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   2872 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
   2873 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
   2874 		softint_schedule(adapter->phy_si);
   2875  	}
   2876 
   2877 	/* Re-enable other interrupts */
   2878 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   2879 	return 1;
   2880 } /* ixgbe_msix_link */
   2881 
   2882 /************************************************************************
   2883  * ixgbe_sysctl_interrupt_rate_handler
   2884  ************************************************************************/
   2885 static int
   2886 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   2887 {
   2888 	struct sysctlnode node = *rnode;
   2889 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   2890 	uint32_t reg, usec, rate;
   2891 	int error;
   2892 
   2893 	if (que == NULL)
   2894 		return 0;
   2895 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
   2896 	usec = ((reg & 0x0FF8) >> 3);
   2897 	if (usec > 0)
   2898 		rate = 500000 / usec;
   2899 	else
   2900 		rate = 0;
   2901 	node.sysctl_data = &rate;
   2902 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2903 	if (error || newp == NULL)
   2904 		return error;
   2905 	reg &= ~0xfff; /* default, no limitation */
   2906 	ixgbe_max_interrupt_rate = 0;
   2907 	if (rate > 0 && rate < 500000) {
   2908 		if (rate < 1000)
   2909 			rate = 1000;
   2910 		ixgbe_max_interrupt_rate = rate;
   2911 		reg |= ((4000000/rate) & 0xff8);
   2912 	}
   2913 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
   2914 
   2915 	return (0);
   2916 } /* ixgbe_sysctl_interrupt_rate_handler */
   2917 
   2918 const struct sysctlnode *
   2919 ixgbe_sysctl_instance(struct adapter *adapter)
   2920 {
   2921 	const char *dvname;
   2922 	struct sysctllog **log;
   2923 	int rc;
   2924 	const struct sysctlnode *rnode;
   2925 
   2926 	if (adapter->sysctltop != NULL)
   2927 		return adapter->sysctltop;
   2928 
   2929 	log = &adapter->sysctllog;
   2930 	dvname = device_xname(adapter->dev);
   2931 
   2932 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2933 	    0, CTLTYPE_NODE, dvname,
   2934 	    SYSCTL_DESCR("ixgbe information and settings"),
   2935 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2936 		goto err;
   2937 
   2938 	return rnode;
   2939 err:
   2940 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2941 	return NULL;
   2942 }
   2943 
   2944 /************************************************************************
   2945  * ixgbe_add_device_sysctls
   2946  ************************************************************************/
   2947 static void
   2948 ixgbe_add_device_sysctls(struct adapter *adapter)
   2949 {
   2950 	device_t               dev = adapter->dev;
   2951 	struct ixgbe_hw        *hw = &adapter->hw;
   2952 	struct sysctllog **log;
   2953 	const struct sysctlnode *rnode, *cnode;
   2954 
   2955 	log = &adapter->sysctllog;
   2956 
   2957 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   2958 		aprint_error_dev(dev, "could not create sysctl root\n");
   2959 		return;
   2960 	}
   2961 
   2962 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2963 	    CTLFLAG_READONLY, CTLTYPE_INT,
   2964 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
   2965 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   2966 		aprint_error_dev(dev, "could not create sysctl\n");
   2967 
   2968 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2969 	    CTLFLAG_READONLY, CTLTYPE_INT,
   2970 	    "num_queues", SYSCTL_DESCR("Number of queues"),
   2971 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
   2972 		aprint_error_dev(dev, "could not create sysctl\n");
   2973 
   2974 	/* Sysctls for all devices */
   2975 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   2976 	    CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
   2977 	    ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
   2978 	    CTL_EOL) != 0)
   2979 		aprint_error_dev(dev, "could not create sysctl\n");
   2980 
   2981 	adapter->enable_aim = ixgbe_enable_aim;
   2982 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   2983 	    CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2984 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2985 		aprint_error_dev(dev, "could not create sysctl\n");
   2986 
   2987 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2988 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2989 	    "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
   2990 	    ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
   2991 	    CTL_EOL) != 0)
   2992 		aprint_error_dev(dev, "could not create sysctl\n");
   2993 
   2994 #ifdef IXGBE_DEBUG
   2995 	/* testing sysctls (for all devices) */
   2996 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   2997 	    CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
   2998 	    ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
   2999 	    CTL_EOL) != 0)
   3000 		aprint_error_dev(dev, "could not create sysctl\n");
   3001 
   3002 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
   3003 	    CTLTYPE_STRING, "print_rss_config",
   3004 	    SYSCTL_DESCR("Prints RSS Configuration"),
   3005 	    ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
   3006 	    CTL_EOL) != 0)
   3007 		aprint_error_dev(dev, "could not create sysctl\n");
   3008 #endif
   3009 	/* for X550 series devices */
   3010 	if (hw->mac.type >= ixgbe_mac_X550)
   3011 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3012 		    CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
   3013 		    ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
   3014 		    CTL_EOL) != 0)
   3015 			aprint_error_dev(dev, "could not create sysctl\n");
   3016 
   3017 	/* for WoL-capable devices */
   3018 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3019 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3020 		    CTLTYPE_BOOL, "wol_enable",
   3021 		    SYSCTL_DESCR("Enable/Disable Wake on LAN"),
   3022 		    ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
   3023 		    CTL_EOL) != 0)
   3024 			aprint_error_dev(dev, "could not create sysctl\n");
   3025 
   3026 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3027 		    CTLTYPE_INT, "wufc",
   3028 		    SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
   3029 		    ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
   3030 		    CTL_EOL) != 0)
   3031 			aprint_error_dev(dev, "could not create sysctl\n");
   3032 	}
   3033 
   3034 	/* for X552/X557-AT devices */
   3035 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3036 		const struct sysctlnode *phy_node;
   3037 
   3038 		if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
   3039 		    "phy", SYSCTL_DESCR("External PHY sysctls"),
   3040 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
   3041 			aprint_error_dev(dev, "could not create sysctl\n");
   3042 			return;
   3043 		}
   3044 
   3045 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3046 		    CTLTYPE_INT, "temp",
   3047 		    SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
   3048 		    ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
   3049 		    CTL_EOL) != 0)
   3050 			aprint_error_dev(dev, "could not create sysctl\n");
   3051 
   3052 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3053 		    CTLTYPE_INT, "overtemp_occurred",
   3054 		    SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
   3055 		    ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
   3056 		    CTL_CREATE, CTL_EOL) != 0)
   3057 			aprint_error_dev(dev, "could not create sysctl\n");
   3058 	}
   3059 
   3060 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
   3061 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3062 		    CTLTYPE_INT, "eee_state",
   3063 		    SYSCTL_DESCR("EEE Power Save State"),
   3064 		    ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
   3065 		    CTL_EOL) != 0)
   3066 			aprint_error_dev(dev, "could not create sysctl\n");
   3067 	}
   3068 } /* ixgbe_add_device_sysctls */
   3069 
   3070 /************************************************************************
   3071  * ixgbe_allocate_pci_resources
   3072  ************************************************************************/
   3073 static int
   3074 ixgbe_allocate_pci_resources(struct adapter *adapter,
   3075     const struct pci_attach_args *pa)
   3076 {
   3077 	pcireg_t	memtype;
   3078 	device_t dev = adapter->dev;
   3079 	bus_addr_t addr;
   3080 	int flags;
   3081 
   3082 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   3083 	switch (memtype) {
   3084 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   3085 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   3086 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   3087 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   3088 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   3089 			goto map_err;
   3090 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   3091 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   3092 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3093 		}
   3094 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   3095 		     adapter->osdep.mem_size, flags,
   3096 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   3097 map_err:
   3098 			adapter->osdep.mem_size = 0;
   3099 			aprint_error_dev(dev, "unable to map BAR0\n");
   3100 			return ENXIO;
   3101 		}
   3102 		break;
   3103 	default:
   3104 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   3105 		return ENXIO;
   3106 	}
   3107 
   3108 	return (0);
   3109 } /* ixgbe_allocate_pci_resources */
   3110 
   3111 /************************************************************************
   3112  * ixgbe_detach - Device removal routine
   3113  *
   3114  *   Called when the driver is being removed.
   3115  *   Stops the adapter and deallocates all the resources
   3116  *   that were allocated for driver operation.
   3117  *
   3118  *   return 0 on success, positive on failure
   3119  ************************************************************************/
   3120 static int
   3121 ixgbe_detach(device_t dev, int flags)
   3122 {
   3123 	struct adapter *adapter = device_private(dev);
   3124 	struct ix_queue *que = adapter->queues;
   3125 	struct rx_ring *rxr = adapter->rx_rings;
   3126 	struct tx_ring *txr = adapter->tx_rings;
   3127 	struct ixgbe_hw *hw = &adapter->hw;
   3128 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   3129 	u32	ctrl_ext;
   3130 
   3131 	INIT_DEBUGOUT("ixgbe_detach: begin");
   3132 	if (adapter->osdep.attached == false)
   3133 		return 0;
   3134 
   3135 	if (ixgbe_pci_iov_detach(dev) != 0) {
   3136 		device_printf(dev, "SR-IOV in use; detach first.\n");
   3137 		return (EBUSY);
   3138 	}
   3139 
   3140 	/* Stop the interface. Callouts are stopped in it. */
   3141 	ixgbe_ifstop(adapter->ifp, 1);
   3142 #if NVLAN > 0
   3143 	/* Make sure VLANs are not using driver */
   3144 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   3145 		;	/* nothing to do: no VLANs */
   3146 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
   3147 		vlan_ifdetach(adapter->ifp);
   3148 	else {
   3149 		aprint_error_dev(dev, "VLANs in use, detach first\n");
   3150 		return (EBUSY);
   3151 	}
   3152 #endif
   3153 
   3154 	pmf_device_deregister(dev);
   3155 
   3156 	ether_ifdetach(adapter->ifp);
   3157 	/* Stop the adapter */
   3158 	IXGBE_CORE_LOCK(adapter);
   3159 	ixgbe_setup_low_power_mode(adapter);
   3160 	IXGBE_CORE_UNLOCK(adapter);
   3161 
   3162 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
   3163 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   3164 			softint_disestablish(txr->txr_si);
   3165 		softint_disestablish(que->que_si);
   3166 	}
   3167 
   3168 	/* Drain the Link queue */
   3169 	softint_disestablish(adapter->link_si);
   3170 	softint_disestablish(adapter->mod_si);
   3171 	softint_disestablish(adapter->msf_si);
   3172 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   3173 		softint_disestablish(adapter->mbx_si);
   3174 	softint_disestablish(adapter->phy_si);
   3175 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   3176 		softint_disestablish(adapter->fdir_si);
   3177 
   3178 	/* let hardware know driver is unloading */
   3179 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   3180 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   3181 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   3182 
   3183 	callout_halt(&adapter->timer, NULL);
   3184 
   3185 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   3186 		netmap_detach(adapter->ifp);
   3187 
   3188 	ixgbe_free_pci_resources(adapter);
   3189 #if 0	/* XXX the NetBSD port is probably missing something here */
   3190 	bus_generic_detach(dev);
   3191 #endif
   3192 	if_detach(adapter->ifp);
   3193 	if_percpuq_destroy(adapter->ipq);
   3194 
   3195 	sysctl_teardown(&adapter->sysctllog);
   3196 	evcnt_detach(&adapter->handleq);
   3197 	evcnt_detach(&adapter->req);
   3198 	evcnt_detach(&adapter->efbig_tx_dma_setup);
   3199 	evcnt_detach(&adapter->mbuf_defrag_failed);
   3200 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
   3201 	evcnt_detach(&adapter->einval_tx_dma_setup);
   3202 	evcnt_detach(&adapter->other_tx_dma_setup);
   3203 	evcnt_detach(&adapter->eagain_tx_dma_setup);
   3204 	evcnt_detach(&adapter->enomem_tx_dma_setup);
   3205 	evcnt_detach(&adapter->watchdog_events);
   3206 	evcnt_detach(&adapter->tso_err);
   3207 	evcnt_detach(&adapter->link_irq);
   3208 
   3209 	txr = adapter->tx_rings;
   3210 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   3211 		evcnt_detach(&adapter->queues[i].irqs);
   3212 		evcnt_detach(&txr->no_desc_avail);
   3213 		evcnt_detach(&txr->total_packets);
   3214 		evcnt_detach(&txr->tso_tx);
   3215 #ifndef IXGBE_LEGACY_TX
   3216 		evcnt_detach(&txr->pcq_drops);
   3217 #endif
   3218 
   3219 		if (i < __arraycount(stats->mpc)) {
   3220 			evcnt_detach(&stats->mpc[i]);
   3221 			if (hw->mac.type == ixgbe_mac_82598EB)
   3222 				evcnt_detach(&stats->rnbc[i]);
   3223 		}
   3224 		if (i < __arraycount(stats->pxontxc)) {
   3225 			evcnt_detach(&stats->pxontxc[i]);
   3226 			evcnt_detach(&stats->pxonrxc[i]);
   3227 			evcnt_detach(&stats->pxofftxc[i]);
   3228 			evcnt_detach(&stats->pxoffrxc[i]);
   3229 			evcnt_detach(&stats->pxon2offc[i]);
   3230 		}
   3231 		if (i < __arraycount(stats->qprc)) {
   3232 			evcnt_detach(&stats->qprc[i]);
   3233 			evcnt_detach(&stats->qptc[i]);
   3234 			evcnt_detach(&stats->qbrc[i]);
   3235 			evcnt_detach(&stats->qbtc[i]);
   3236 			evcnt_detach(&stats->qprdc[i]);
   3237 		}
   3238 
   3239 		evcnt_detach(&rxr->rx_packets);
   3240 		evcnt_detach(&rxr->rx_bytes);
   3241 		evcnt_detach(&rxr->rx_copies);
   3242 		evcnt_detach(&rxr->no_jmbuf);
   3243 		evcnt_detach(&rxr->rx_discarded);
   3244 	}
   3245 	evcnt_detach(&stats->ipcs);
   3246 	evcnt_detach(&stats->l4cs);
   3247 	evcnt_detach(&stats->ipcs_bad);
   3248 	evcnt_detach(&stats->l4cs_bad);
   3249 	evcnt_detach(&stats->intzero);
   3250 	evcnt_detach(&stats->legint);
   3251 	evcnt_detach(&stats->crcerrs);
   3252 	evcnt_detach(&stats->illerrc);
   3253 	evcnt_detach(&stats->errbc);
   3254 	evcnt_detach(&stats->mspdc);
   3255 	if (hw->mac.type >= ixgbe_mac_X550)
   3256 		evcnt_detach(&stats->mbsdc);
   3257 	evcnt_detach(&stats->mpctotal);
   3258 	evcnt_detach(&stats->mlfc);
   3259 	evcnt_detach(&stats->mrfc);
   3260 	evcnt_detach(&stats->rlec);
   3261 	evcnt_detach(&stats->lxontxc);
   3262 	evcnt_detach(&stats->lxonrxc);
   3263 	evcnt_detach(&stats->lxofftxc);
   3264 	evcnt_detach(&stats->lxoffrxc);
   3265 
   3266 	/* Packet Reception Stats */
   3267 	evcnt_detach(&stats->tor);
   3268 	evcnt_detach(&stats->gorc);
   3269 	evcnt_detach(&stats->tpr);
   3270 	evcnt_detach(&stats->gprc);
   3271 	evcnt_detach(&stats->mprc);
   3272 	evcnt_detach(&stats->bprc);
   3273 	evcnt_detach(&stats->prc64);
   3274 	evcnt_detach(&stats->prc127);
   3275 	evcnt_detach(&stats->prc255);
   3276 	evcnt_detach(&stats->prc511);
   3277 	evcnt_detach(&stats->prc1023);
   3278 	evcnt_detach(&stats->prc1522);
   3279 	evcnt_detach(&stats->ruc);
   3280 	evcnt_detach(&stats->rfc);
   3281 	evcnt_detach(&stats->roc);
   3282 	evcnt_detach(&stats->rjc);
   3283 	evcnt_detach(&stats->mngprc);
   3284 	evcnt_detach(&stats->mngpdc);
   3285 	evcnt_detach(&stats->xec);
   3286 
   3287 	/* Packet Transmission Stats */
   3288 	evcnt_detach(&stats->gotc);
   3289 	evcnt_detach(&stats->tpt);
   3290 	evcnt_detach(&stats->gptc);
   3291 	evcnt_detach(&stats->bptc);
   3292 	evcnt_detach(&stats->mptc);
   3293 	evcnt_detach(&stats->mngptc);
   3294 	evcnt_detach(&stats->ptc64);
   3295 	evcnt_detach(&stats->ptc127);
   3296 	evcnt_detach(&stats->ptc255);
   3297 	evcnt_detach(&stats->ptc511);
   3298 	evcnt_detach(&stats->ptc1023);
   3299 	evcnt_detach(&stats->ptc1522);
   3300 
   3301 	ixgbe_free_transmit_structures(adapter);
   3302 	ixgbe_free_receive_structures(adapter);
   3303 	free(adapter->queues, M_DEVBUF);
   3304 	free(adapter->mta, M_DEVBUF);
   3305 
   3306 	IXGBE_CORE_LOCK_DESTROY(adapter);
   3307 
   3308 	return (0);
   3309 } /* ixgbe_detach */
   3310 
   3311 /************************************************************************
   3312  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
   3313  *
   3314  *   Prepare the adapter/port for LPLU and/or WoL
   3315  ************************************************************************/
   3316 static int
   3317 ixgbe_setup_low_power_mode(struct adapter *adapter)
   3318 {
   3319 	struct ixgbe_hw *hw = &adapter->hw;
   3320 	device_t        dev = adapter->dev;
   3321 	s32             error = 0;
   3322 
   3323 	KASSERT(mutex_owned(&adapter->core_mtx));
   3324 
   3325 	/* Limit power management flow to X550EM baseT */
   3326 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
   3327 	    hw->phy.ops.enter_lplu) {
   3328 		/* X550EM baseT adapters need a special LPLU flow */
   3329 		hw->phy.reset_disable = true;
   3330 		ixgbe_stop(adapter);
   3331 		error = hw->phy.ops.enter_lplu(hw);
   3332 		if (error)
   3333 			device_printf(dev,
   3334 			    "Error entering LPLU: %d\n", error);
   3335 		hw->phy.reset_disable = false;
   3336 	} else {
   3337 		/* Just stop for other adapters */
   3338 		ixgbe_stop(adapter);
   3339 	}
   3340 
   3341 	if (!hw->wol_enabled) {
   3342 		ixgbe_set_phy_power(hw, FALSE);
   3343 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3344 		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
   3345 	} else {
   3346 		/* Turn off support for APM wakeup. (Using ACPI instead) */
   3347 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
   3348 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
   3349 
   3350 		/*
   3351 		 * Clear Wake Up Status register to prevent any previous wakeup
   3352 		 * events from waking us up immediately after we suspend.
   3353 		 */
   3354 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3355 
   3356 		/*
   3357 		 * Program the Wakeup Filter Control register with user filter
   3358 		 * settings
   3359 		 */
   3360 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
   3361 
   3362 		/* Enable wakeups and power management in Wakeup Control */
   3363 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
   3364 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
   3365 
   3366 	}
   3367 
   3368 	return error;
   3369 } /* ixgbe_setup_low_power_mode */
   3370 
   3371 /************************************************************************
   3372  * ixgbe_shutdown - Shutdown entry point
   3373  ************************************************************************/
   3374 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   3375 static int
   3376 ixgbe_shutdown(device_t dev)
   3377 {
   3378 	struct adapter *adapter = device_private(dev);
   3379 	int error = 0;
   3380 
   3381 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
   3382 
   3383 	IXGBE_CORE_LOCK(adapter);
   3384 	error = ixgbe_setup_low_power_mode(adapter);
   3385 	IXGBE_CORE_UNLOCK(adapter);
   3386 
   3387 	return (error);
   3388 } /* ixgbe_shutdown */
   3389 #endif
   3390 
   3391 /************************************************************************
   3392  * ixgbe_suspend
   3393  *
   3394  *   From D0 to D3
   3395  ************************************************************************/
   3396 static bool
   3397 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
   3398 {
   3399 	struct adapter *adapter = device_private(dev);
   3400 	int            error = 0;
   3401 
   3402 	INIT_DEBUGOUT("ixgbe_suspend: begin");
   3403 
   3404 	IXGBE_CORE_LOCK(adapter);
   3405 
   3406 	error = ixgbe_setup_low_power_mode(adapter);
   3407 
   3408 	IXGBE_CORE_UNLOCK(adapter);
   3409 
   3410 	return (error);
   3411 } /* ixgbe_suspend */
   3412 
   3413 /************************************************************************
   3414  * ixgbe_resume
   3415  *
   3416  *   From D3 to D0
   3417  ************************************************************************/
   3418 static bool
   3419 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
   3420 {
   3421 	struct adapter  *adapter = device_private(dev);
   3422 	struct ifnet    *ifp = adapter->ifp;
   3423 	struct ixgbe_hw *hw = &adapter->hw;
   3424 	u32             wus;
   3425 
   3426 	INIT_DEBUGOUT("ixgbe_resume: begin");
   3427 
   3428 	IXGBE_CORE_LOCK(adapter);
   3429 
   3430 	/* Read & clear WUS register */
   3431 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
   3432 	if (wus)
   3433 		device_printf(dev, "Woken up by (WUS): %#010x\n",
   3434 		    IXGBE_READ_REG(hw, IXGBE_WUS));
   3435 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3436 	/* And clear WUFC until next low-power transition */
   3437 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3438 
   3439 	/*
   3440 	 * Required after D3->D0 transition;
   3441 	 * will re-advertise all previous advertised speeds
   3442 	 */
   3443 	if (ifp->if_flags & IFF_UP)
   3444 		ixgbe_init_locked(adapter);
   3445 
   3446 	IXGBE_CORE_UNLOCK(adapter);
   3447 
   3448 	return true;
   3449 } /* ixgbe_resume */
   3450 
   3451 /*
   3452  * Set the various hardware offload abilities.
   3453  *
   3454  * This takes the ifnet's if_capenable flags (e.g. set by the user using
   3455  * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
   3456  * mbuf offload flags the driver will understand.
   3457  */
   3458 static void
   3459 ixgbe_set_if_hwassist(struct adapter *adapter)
   3460 {
   3461 	/* XXX */
   3462 }
   3463 
   3464 /************************************************************************
   3465  * ixgbe_init_locked - Init entry point
   3466  *
   3467  *   Used in two ways: It is used by the stack as an init
   3468  *   entry point in network interface structure. It is also
   3469  *   used by the driver as a hw/sw initialization routine to
   3470  *   get to a consistent state.
   3471  *
   3472  *   return 0 on success, positive on failure
   3473  ************************************************************************/
   3474 static void
   3475 ixgbe_init_locked(struct adapter *adapter)
   3476 {
   3477 	struct ifnet   *ifp = adapter->ifp;
   3478 	device_t 	dev = adapter->dev;
   3479 	struct ixgbe_hw *hw = &adapter->hw;
   3480 	struct tx_ring  *txr;
   3481 	struct rx_ring  *rxr;
   3482 	u32		txdctl, mhadd;
   3483 	u32		rxdctl, rxctrl;
   3484 	u32             ctrl_ext;
   3485 	int             err = 0;
   3486 
   3487 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   3488 
   3489 	KASSERT(mutex_owned(&adapter->core_mtx));
   3490 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
   3491 
   3492 	hw->adapter_stopped = FALSE;
   3493 	ixgbe_stop_adapter(hw);
   3494         callout_stop(&adapter->timer);
   3495 
   3496 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   3497 	adapter->max_frame_size =
   3498 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   3499 
   3500 	/* Queue indices may change with IOV mode */
   3501 	ixgbe_align_all_queue_indices(adapter);
   3502 
   3503 	/* reprogram the RAR[0] in case user changed it. */
   3504 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
   3505 
   3506 	/* Get the latest mac address, User can use a LAA */
   3507 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
   3508 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   3509 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
   3510 	hw->addr_ctrl.rar_used_count = 1;
   3511 
   3512 	/* Set hardware offload abilities from ifnet flags */
   3513 	ixgbe_set_if_hwassist(adapter);
   3514 
   3515 	/* Prepare transmit descriptors and buffers */
   3516 	if (ixgbe_setup_transmit_structures(adapter)) {
   3517 		device_printf(dev, "Could not setup transmit structures\n");
   3518 		ixgbe_stop(adapter);
   3519 		return;
   3520 	}
   3521 
   3522 	ixgbe_init_hw(hw);
   3523 	ixgbe_initialize_iov(adapter);
   3524 	ixgbe_initialize_transmit_units(adapter);
   3525 
   3526 	/* Setup Multicast table */
   3527 	ixgbe_set_multi(adapter);
   3528 
   3529 	/* Determine the correct mbuf pool, based on frame size */
   3530 	if (adapter->max_frame_size <= MCLBYTES)
   3531 		adapter->rx_mbuf_sz = MCLBYTES;
   3532 	else
   3533 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   3534 
   3535 	/* Prepare receive descriptors and buffers */
   3536 	if (ixgbe_setup_receive_structures(adapter)) {
   3537 		device_printf(dev, "Could not setup receive structures\n");
   3538 		ixgbe_stop(adapter);
   3539 		return;
   3540 	}
   3541 
   3542 	/* Configure RX settings */
   3543 	ixgbe_initialize_receive_units(adapter);
   3544 
   3545 	/* Enable SDP & MSI-X interrupts based on adapter */
   3546 	ixgbe_config_gpie(adapter);
   3547 
   3548 	/* Set MTU size */
   3549 	if (ifp->if_mtu > ETHERMTU) {
   3550 		/* aka IXGBE_MAXFRS on 82599 and newer */
   3551 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   3552 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   3553 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   3554 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   3555 	}
   3556 
   3557 	/* Now enable all the queues */
   3558 	for (int i = 0; i < adapter->num_queues; i++) {
   3559 		txr = &adapter->tx_rings[i];
   3560 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
   3561 		txdctl |= IXGBE_TXDCTL_ENABLE;
   3562 		/* Set WTHRESH to 8, burst writeback */
   3563 		txdctl |= (8 << 16);
   3564 		/*
   3565 		 * When the internal queue falls below PTHRESH (32),
   3566 		 * start prefetching as long as there are at least
   3567 		 * HTHRESH (1) buffers ready. The values are taken
   3568 		 * from the Intel linux driver 3.8.21.
   3569 		 * Prefetching enables tx line rate even with 1 queue.
   3570 		 */
   3571 		txdctl |= (32 << 0) | (1 << 8);
   3572 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
   3573 	}
   3574 
   3575 	for (int i = 0, j = 0; i < adapter->num_queues; i++) {
   3576 		rxr = &adapter->rx_rings[i];
   3577 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   3578 		if (hw->mac.type == ixgbe_mac_82598EB) {
   3579 			/*
   3580 			 * PTHRESH = 21
   3581 			 * HTHRESH = 4
   3582 			 * WTHRESH = 8
   3583 			 */
   3584 			rxdctl &= ~0x3FFFFF;
   3585 			rxdctl |= 0x080420;
   3586 		}
   3587 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   3588 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
   3589 		for (; j < 10; j++) {
   3590 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
   3591 			    IXGBE_RXDCTL_ENABLE)
   3592 				break;
   3593 			else
   3594 				msec_delay(1);
   3595 		}
   3596 		wmb();
   3597 
   3598 		/*
   3599 		 * In netmap mode, we must preserve the buffers made
   3600 		 * available to userspace before the if_init()
   3601 		 * (this is true by default on the TX side, because
   3602 		 * init makes all buffers available to userspace).
   3603 		 *
   3604 		 * netmap_reset() and the device specific routines
   3605 		 * (e.g. ixgbe_setup_receive_rings()) map these
   3606 		 * buffers at the end of the NIC ring, so here we
   3607 		 * must set the RDT (tail) register to make sure
   3608 		 * they are not overwritten.
   3609 		 *
   3610 		 * In this driver the NIC ring starts at RDH = 0,
   3611 		 * RDT points to the last slot available for reception (?),
   3612 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   3613 		 */
   3614 #ifdef DEV_NETMAP
   3615 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   3616 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   3617 			struct netmap_adapter *na = NA(adapter->ifp);
   3618 			struct netmap_kring *kring = &na->rx_rings[i];
   3619 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   3620 
   3621 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
   3622 		} else
   3623 #endif /* DEV_NETMAP */
   3624 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
   3625 			    adapter->num_rx_desc - 1);
   3626 	}
   3627 
   3628 	/* Enable Receive engine */
   3629 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   3630 	if (hw->mac.type == ixgbe_mac_82598EB)
   3631 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   3632 	rxctrl |= IXGBE_RXCTRL_RXEN;
   3633 	ixgbe_enable_rx_dma(hw, rxctrl);
   3634 
   3635 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   3636 
   3637 	/* Set up MSI-X routing */
   3638 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3639 		ixgbe_configure_ivars(adapter);
   3640 		/* Set up auto-mask */
   3641 		if (hw->mac.type == ixgbe_mac_82598EB)
   3642 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3643 		else {
   3644 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   3645 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   3646 		}
   3647 	} else {  /* Simple settings for Legacy/MSI */
   3648 		ixgbe_set_ivar(adapter, 0, 0, 0);
   3649 		ixgbe_set_ivar(adapter, 0, 0, 1);
   3650 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3651 	}
   3652 
   3653 	ixgbe_init_fdir(adapter);
   3654 
   3655 	/*
   3656 	 * Check on any SFP devices that
   3657 	 * need to be kick-started
   3658 	 */
   3659 	if (hw->phy.type == ixgbe_phy_none) {
   3660 		err = hw->phy.ops.identify(hw);
   3661 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   3662                 	device_printf(dev,
   3663 			    "Unsupported SFP+ module type was detected.\n");
   3664 			return;
   3665         	}
   3666 	}
   3667 
   3668 	/* Set moderation on the Link interrupt */
   3669 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
   3670 
   3671 	/* Config/Enable Link */
   3672 	ixgbe_config_link(adapter);
   3673 
   3674 	/* Hardware Packet Buffer & Flow Control setup */
   3675 	ixgbe_config_delay_values(adapter);
   3676 
   3677 	/* Initialize the FC settings */
   3678 	ixgbe_start_hw(hw);
   3679 
   3680 	/* Set up VLAN support and filter */
   3681 	ixgbe_setup_vlan_hw_support(adapter);
   3682 
   3683 	/* Setup DMA Coalescing */
   3684 	ixgbe_config_dmac(adapter);
   3685 
   3686 	/* And now turn on interrupts */
   3687 	ixgbe_enable_intr(adapter);
   3688 
   3689 	/* Enable the use of the MBX by the VF's */
   3690 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
   3691 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
   3692 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
   3693 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
   3694 	}
   3695 
   3696 	/* Now inform the stack we're ready */
   3697 	ifp->if_flags |= IFF_RUNNING;
   3698 
   3699 	return;
   3700 } /* ixgbe_init_locked */
   3701 
   3702 /************************************************************************
   3703  * ixgbe_init
   3704  ************************************************************************/
   3705 static int
   3706 ixgbe_init(struct ifnet *ifp)
   3707 {
   3708 	struct adapter *adapter = ifp->if_softc;
   3709 
   3710 	IXGBE_CORE_LOCK(adapter);
   3711 	ixgbe_init_locked(adapter);
   3712 	IXGBE_CORE_UNLOCK(adapter);
   3713 
   3714 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   3715 } /* ixgbe_init */
   3716 
   3717 /************************************************************************
   3718  * ixgbe_set_ivar
   3719  *
   3720  *   Setup the correct IVAR register for a particular MSI-X interrupt
   3721  *     (yes this is all very magic and confusing :)
   3722  *    - entry is the register array entry
   3723  *    - vector is the MSI-X vector for this queue
   3724  *    - type is RX/TX/MISC
   3725  ************************************************************************/
   3726 static void
   3727 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   3728 {
   3729 	struct ixgbe_hw *hw = &adapter->hw;
   3730 	u32 ivar, index;
   3731 
   3732 	vector |= IXGBE_IVAR_ALLOC_VAL;
   3733 
   3734 	switch (hw->mac.type) {
   3735 
   3736 	case ixgbe_mac_82598EB:
   3737 		if (type == -1)
   3738 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   3739 		else
   3740 			entry += (type * 64);
   3741 		index = (entry >> 2) & 0x1F;
   3742 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   3743 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
   3744 		ivar |= (vector << (8 * (entry & 0x3)));
   3745 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   3746 		break;
   3747 
   3748 	case ixgbe_mac_82599EB:
   3749 	case ixgbe_mac_X540:
   3750 	case ixgbe_mac_X550:
   3751 	case ixgbe_mac_X550EM_x:
   3752 	case ixgbe_mac_X550EM_a:
   3753 		if (type == -1) { /* MISC IVAR */
   3754 			index = (entry & 1) * 8;
   3755 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   3756 			ivar &= ~(0xFF << index);
   3757 			ivar |= (vector << index);
   3758 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   3759 		} else {	/* RX/TX IVARS */
   3760 			index = (16 * (entry & 1)) + (8 * type);
   3761 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   3762 			ivar &= ~(0xFF << index);
   3763 			ivar |= (vector << index);
   3764 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   3765 		}
   3766 
   3767 	default:
   3768 		break;
   3769 	}
   3770 } /* ixgbe_set_ivar */
   3771 
   3772 /************************************************************************
   3773  * ixgbe_configure_ivars
   3774  ************************************************************************/
   3775 static void
   3776 ixgbe_configure_ivars(struct adapter *adapter)
   3777 {
   3778 	struct ix_queue *que = adapter->queues;
   3779 	u32             newitr;
   3780 
   3781 	if (ixgbe_max_interrupt_rate > 0)
   3782 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   3783 	else {
   3784 		/*
   3785 		 * Disable DMA coalescing if interrupt moderation is
   3786 		 * disabled.
   3787 		 */
   3788 		adapter->dmac = 0;
   3789 		newitr = 0;
   3790 	}
   3791 
   3792         for (int i = 0; i < adapter->num_queues; i++, que++) {
   3793 		struct rx_ring *rxr = &adapter->rx_rings[i];
   3794 		struct tx_ring *txr = &adapter->tx_rings[i];
   3795 		/* First the RX queue entry */
   3796                 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
   3797 		/* ... and the TX */
   3798 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
   3799 		/* Set an Initial EITR value */
   3800 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
   3801 	}
   3802 
   3803 	/* For the Link interrupt */
   3804         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
   3805 } /* ixgbe_configure_ivars */
   3806 
   3807 /************************************************************************
   3808  * ixgbe_config_gpie
   3809  ************************************************************************/
   3810 static void
   3811 ixgbe_config_gpie(struct adapter *adapter)
   3812 {
   3813 	struct ixgbe_hw *hw = &adapter->hw;
   3814 	u32             gpie;
   3815 
   3816 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
   3817 
   3818 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3819 		/* Enable Enhanced MSI-X mode */
   3820 		gpie |= IXGBE_GPIE_MSIX_MODE
   3821 		     |  IXGBE_GPIE_EIAME
   3822 		     |  IXGBE_GPIE_PBA_SUPPORT
   3823 		     |  IXGBE_GPIE_OCD;
   3824 	}
   3825 
   3826 	/* Fan Failure Interrupt */
   3827 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   3828 		gpie |= IXGBE_SDP1_GPIEN;
   3829 
   3830 	/* Thermal Sensor Interrupt */
   3831 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
   3832 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3833 
   3834 	/* Link detection */
   3835 	switch (hw->mac.type) {
   3836 	case ixgbe_mac_82599EB:
   3837 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
   3838 		break;
   3839 	case ixgbe_mac_X550EM_x:
   3840 	case ixgbe_mac_X550EM_a:
   3841 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3842 		break;
   3843 	default:
   3844 		break;
   3845 	}
   3846 
   3847 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   3848 
   3849 	return;
   3850 } /* ixgbe_config_gpie */
   3851 
   3852 /************************************************************************
   3853  * ixgbe_config_delay_values
   3854  *
   3855  *   Requires adapter->max_frame_size to be set.
   3856  ************************************************************************/
   3857 static void
   3858 ixgbe_config_delay_values(struct adapter *adapter)
   3859 {
   3860 	struct ixgbe_hw *hw = &adapter->hw;
   3861 	u32             rxpb, frame, size, tmp;
   3862 
   3863 	frame = adapter->max_frame_size;
   3864 
   3865 	/* Calculate High Water */
   3866 	switch (hw->mac.type) {
   3867 	case ixgbe_mac_X540:
   3868 	case ixgbe_mac_X550:
   3869 	case ixgbe_mac_X550EM_x:
   3870 	case ixgbe_mac_X550EM_a:
   3871 		tmp = IXGBE_DV_X540(frame, frame);
   3872 		break;
   3873 	default:
   3874 		tmp = IXGBE_DV(frame, frame);
   3875 		break;
   3876 	}
   3877 	size = IXGBE_BT2KB(tmp);
   3878 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
   3879 	hw->fc.high_water[0] = rxpb - size;
   3880 
   3881 	/* Now calculate Low Water */
   3882 	switch (hw->mac.type) {
   3883 	case ixgbe_mac_X540:
   3884 	case ixgbe_mac_X550:
   3885 	case ixgbe_mac_X550EM_x:
   3886 	case ixgbe_mac_X550EM_a:
   3887 		tmp = IXGBE_LOW_DV_X540(frame);
   3888 		break;
   3889 	default:
   3890 		tmp = IXGBE_LOW_DV(frame);
   3891 		break;
   3892 	}
   3893 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
   3894 
   3895 	hw->fc.pause_time = IXGBE_FC_PAUSE;
   3896 	hw->fc.send_xon = TRUE;
   3897 } /* ixgbe_config_delay_values */
   3898 
   3899 /************************************************************************
   3900  * ixgbe_set_multi - Multicast Update
   3901  *
   3902  *   Called whenever multicast address list is updated.
   3903  ************************************************************************/
   3904 static void
   3905 ixgbe_set_multi(struct adapter *adapter)
   3906 {
   3907 	struct ixgbe_mc_addr	*mta;
   3908 	struct ifnet		*ifp = adapter->ifp;
   3909 	u8			*update_ptr;
   3910 	int			mcnt = 0;
   3911 	u32			fctrl;
   3912 	struct ethercom		*ec = &adapter->osdep.ec;
   3913 	struct ether_multi	*enm;
   3914 	struct ether_multistep	step;
   3915 
   3916 	KASSERT(mutex_owned(&adapter->core_mtx));
   3917 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   3918 
   3919 	mta = adapter->mta;
   3920 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
   3921 
   3922 	ifp->if_flags &= ~IFF_ALLMULTI;
   3923 	ETHER_LOCK(ec);
   3924 	ETHER_FIRST_MULTI(step, ec, enm);
   3925 	while (enm != NULL) {
   3926 		if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
   3927 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   3928 			ETHER_ADDR_LEN) != 0)) {
   3929 			ifp->if_flags |= IFF_ALLMULTI;
   3930 			break;
   3931 		}
   3932 		bcopy(enm->enm_addrlo,
   3933 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
   3934 		mta[mcnt].vmdq = adapter->pool;
   3935 		mcnt++;
   3936 		ETHER_NEXT_MULTI(step, enm);
   3937 	}
   3938 	ETHER_UNLOCK(ec);
   3939 
   3940 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   3941 	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   3942 	if (ifp->if_flags & IFF_PROMISC)
   3943 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   3944 	else if (ifp->if_flags & IFF_ALLMULTI) {
   3945 		fctrl |= IXGBE_FCTRL_MPE;
   3946 	}
   3947 
   3948 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   3949 
   3950 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
   3951 		update_ptr = (u8 *)mta;
   3952 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   3953 		    ixgbe_mc_array_itr, TRUE);
   3954 	}
   3955 
   3956 	return;
   3957 } /* ixgbe_set_multi */
   3958 
   3959 /************************************************************************
   3960  * ixgbe_mc_array_itr
   3961  *
   3962  *   An iterator function needed by the multicast shared code.
   3963  *   It feeds the shared code routine the addresses in the
   3964  *   array of ixgbe_set_multi() one by one.
   3965  ************************************************************************/
   3966 static u8 *
   3967 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   3968 {
   3969 	struct ixgbe_mc_addr *mta;
   3970 
   3971 	mta = (struct ixgbe_mc_addr *)*update_ptr;
   3972 	*vmdq = mta->vmdq;
   3973 
   3974 	*update_ptr = (u8*)(mta + 1);
   3975 
   3976 	return (mta->addr);
   3977 } /* ixgbe_mc_array_itr */
   3978 
   3979 /************************************************************************
   3980  * ixgbe_local_timer - Timer routine
   3981  *
   3982  *   Checks for link status, updates statistics,
   3983  *   and runs the watchdog check.
   3984  ************************************************************************/
   3985 static void
   3986 ixgbe_local_timer(void *arg)
   3987 {
   3988 	struct adapter *adapter = arg;
   3989 
   3990 	IXGBE_CORE_LOCK(adapter);
   3991 	ixgbe_local_timer1(adapter);
   3992 	IXGBE_CORE_UNLOCK(adapter);
   3993 }
   3994 
   3995 static void
   3996 ixgbe_local_timer1(void *arg)
   3997 {
   3998 	struct adapter	*adapter = arg;
   3999 	device_t	dev = adapter->dev;
   4000 	struct ix_queue *que = adapter->queues;
   4001 	u64		queues = 0;
   4002 	int		hung = 0;
   4003 
   4004 	KASSERT(mutex_owned(&adapter->core_mtx));
   4005 
   4006 	/* Check for pluggable optics */
   4007 	if (adapter->sfp_probe)
   4008 		if (!ixgbe_sfp_probe(adapter))
   4009 			goto out; /* Nothing to do */
   4010 
   4011 	ixgbe_update_link_status(adapter);
   4012 	ixgbe_update_stats_counters(adapter);
   4013 
   4014 	/*
   4015 	 * Check the TX queues status
   4016 	 *      - mark hung queues so we don't schedule on them
   4017 	 *      - watchdog only if all queues show hung
   4018 	 */
   4019 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4020 		/* Keep track of queues with work for soft irq */
   4021 		if (que->txr->busy)
   4022 			queues |= ((u64)1 << que->me);
   4023 		/*
   4024 		 * Each time txeof runs without cleaning, but there
   4025 		 * are uncleaned descriptors it increments busy. If
   4026 		 * we get to the MAX we declare it hung.
   4027 		 */
   4028 		if (que->busy == IXGBE_QUEUE_HUNG) {
   4029 			++hung;
   4030 			/* Mark the queue as inactive */
   4031 			adapter->active_queues &= ~((u64)1 << que->me);
   4032 			continue;
   4033 		} else {
   4034 			/* Check if we've come back from hung */
   4035 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   4036 				adapter->active_queues |= ((u64)1 << que->me);
   4037 		}
   4038 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   4039 			device_printf(dev,
   4040 			    "Warning queue %d appears to be hung!\n", i);
   4041 			que->txr->busy = IXGBE_QUEUE_HUNG;
   4042 			++hung;
   4043 		}
   4044 	}
   4045 
   4046 	/* Only truely watchdog if all queues show hung */
   4047 	if (hung == adapter->num_queues)
   4048 		goto watchdog;
   4049 	else if (queues != 0) { /* Force an IRQ on queues with work */
   4050 		ixgbe_rearm_queues(adapter, queues);
   4051 	}
   4052 
   4053 out:
   4054 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4055 	return;
   4056 
   4057 watchdog:
   4058 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   4059 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   4060 	adapter->watchdog_events.ev_count++;
   4061 	ixgbe_init_locked(adapter);
   4062 } /* ixgbe_local_timer */
   4063 
   4064 /************************************************************************
   4065  * ixgbe_sfp_probe
   4066  *
   4067  *   Determine if a port had optics inserted.
   4068  ************************************************************************/
   4069 static bool
   4070 ixgbe_sfp_probe(struct adapter *adapter)
   4071 {
   4072 	struct ixgbe_hw	*hw = &adapter->hw;
   4073 	device_t	dev = adapter->dev;
   4074 	bool		result = FALSE;
   4075 
   4076 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4077 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4078 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4079 		if (ret)
   4080 			goto out;
   4081 		ret = hw->phy.ops.reset(hw);
   4082 		adapter->sfp_probe = FALSE;
   4083 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4084 			device_printf(dev,"Unsupported SFP+ module detected!");
   4085 			device_printf(dev,
   4086 			    "Reload driver with supported module.\n");
   4087                         goto out;
   4088 		} else
   4089 			device_printf(dev, "SFP+ module detected!\n");
   4090 		/* We now have supported optics */
   4091 		result = TRUE;
   4092 	}
   4093 out:
   4094 
   4095 	return (result);
   4096 } /* ixgbe_sfp_probe */
   4097 
   4098 /************************************************************************
   4099  * ixgbe_handle_mod - Tasklet for SFP module interrupts
   4100  ************************************************************************/
   4101 static void
   4102 ixgbe_handle_mod(void *context)
   4103 {
   4104 	struct adapter  *adapter = context;
   4105 	struct ixgbe_hw *hw = &adapter->hw;
   4106 	device_t	dev = adapter->dev;
   4107 	u32             err, cage_full = 0;
   4108 
   4109 	if (adapter->hw.need_crosstalk_fix) {
   4110 		switch (hw->mac.type) {
   4111 		case ixgbe_mac_82599EB:
   4112 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4113 			    IXGBE_ESDP_SDP2;
   4114 			break;
   4115 		case ixgbe_mac_X550EM_x:
   4116 		case ixgbe_mac_X550EM_a:
   4117 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4118 			    IXGBE_ESDP_SDP0;
   4119 			break;
   4120 		default:
   4121 			break;
   4122 		}
   4123 
   4124 		if (!cage_full)
   4125 			return;
   4126 	}
   4127 
   4128 	err = hw->phy.ops.identify_sfp(hw);
   4129 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4130 		device_printf(dev,
   4131 		    "Unsupported SFP+ module type was detected.\n");
   4132 		return;
   4133 	}
   4134 
   4135 	err = hw->mac.ops.setup_sfp(hw);
   4136 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4137 		device_printf(dev,
   4138 		    "Setup failure - unsupported SFP+ module type.\n");
   4139 		return;
   4140 	}
   4141 	softint_schedule(adapter->msf_si);
   4142 } /* ixgbe_handle_mod */
   4143 
   4144 
   4145 /************************************************************************
   4146  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
   4147  ************************************************************************/
   4148 static void
   4149 ixgbe_handle_msf(void *context)
   4150 {
   4151 	struct adapter  *adapter = context;
   4152 	struct ixgbe_hw *hw = &adapter->hw;
   4153 	u32             autoneg;
   4154 	bool            negotiate;
   4155 
   4156 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
   4157 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
   4158 
   4159 	autoneg = hw->phy.autoneg_advertised;
   4160 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   4161 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   4162 	else
   4163 		negotiate = 0;
   4164 	if (hw->mac.ops.setup_link)
   4165 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
   4166 
   4167 	/* Adjust media types shown in ifconfig */
   4168 	ifmedia_removeall(&adapter->media);
   4169 	ixgbe_add_media_types(adapter);
   4170 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   4171 } /* ixgbe_handle_msf */
   4172 
   4173 /************************************************************************
   4174  * ixgbe_handle_phy - Tasklet for external PHY interrupts
   4175  ************************************************************************/
   4176 static void
   4177 ixgbe_handle_phy(void *context)
   4178 {
   4179 	struct adapter  *adapter = context;
   4180 	struct ixgbe_hw *hw = &adapter->hw;
   4181 	int error;
   4182 
   4183 	error = hw->phy.ops.handle_lasi(hw);
   4184 	if (error == IXGBE_ERR_OVERTEMP)
   4185 		device_printf(adapter->dev,
   4186 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
   4187 		    " PHY will downshift to lower power state!\n");
   4188 	else if (error)
   4189 		device_printf(adapter->dev,
   4190 		    "Error handling LASI interrupt: %d\n", error);
   4191 } /* ixgbe_handle_phy */
   4192 
   4193 static void
   4194 ixgbe_ifstop(struct ifnet *ifp, int disable)
   4195 {
   4196 	struct adapter *adapter = ifp->if_softc;
   4197 
   4198 	IXGBE_CORE_LOCK(adapter);
   4199 	ixgbe_stop(adapter);
   4200 	IXGBE_CORE_UNLOCK(adapter);
   4201 }
   4202 
   4203 /************************************************************************
   4204  * ixgbe_stop - Stop the hardware
   4205  *
   4206  *   Disables all traffic on the adapter by issuing a
   4207  *   global reset on the MAC and deallocates TX/RX buffers.
   4208  ************************************************************************/
   4209 static void
   4210 ixgbe_stop(void *arg)
   4211 {
   4212 	struct ifnet    *ifp;
   4213 	struct adapter  *adapter = arg;
   4214 	struct ixgbe_hw *hw = &adapter->hw;
   4215 
   4216 	ifp = adapter->ifp;
   4217 
   4218 	KASSERT(mutex_owned(&adapter->core_mtx));
   4219 
   4220 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   4221 	ixgbe_disable_intr(adapter);
   4222 	callout_stop(&adapter->timer);
   4223 
   4224 	/* Let the stack know...*/
   4225 	ifp->if_flags &= ~IFF_RUNNING;
   4226 
   4227 	ixgbe_reset_hw(hw);
   4228 	hw->adapter_stopped = FALSE;
   4229 	ixgbe_stop_adapter(hw);
   4230 	if (hw->mac.type == ixgbe_mac_82599EB)
   4231 		ixgbe_stop_mac_link_on_d3_82599(hw);
   4232 	/* Turn off the laser - noop with no optics */
   4233 	ixgbe_disable_tx_laser(hw);
   4234 
   4235 	/* Update the stack */
   4236 	adapter->link_up = FALSE;
   4237 	ixgbe_update_link_status(adapter);
   4238 
   4239 	/* reprogram the RAR[0] in case user changed it. */
   4240 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   4241 
   4242 	return;
   4243 } /* ixgbe_stop */
   4244 
   4245 /************************************************************************
   4246  * ixgbe_update_link_status - Update OS on link state
   4247  *
   4248  * Note: Only updates the OS on the cached link state.
   4249  *       The real check of the hardware only happens with
   4250  *       a link interrupt.
   4251  ************************************************************************/
   4252 static void
   4253 ixgbe_update_link_status(struct adapter *adapter)
   4254 {
   4255 	struct ifnet	*ifp = adapter->ifp;
   4256 	device_t        dev = adapter->dev;
   4257 	struct ixgbe_hw *hw = &adapter->hw;
   4258 
   4259 	if (adapter->link_up) {
   4260 		if (adapter->link_active == FALSE) {
   4261 			if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
   4262 				/*
   4263 				 *  Discard count for both MAC Local Fault and
   4264 				 * Remote Fault because those registers are
   4265 				 * valid only when the link speed is up and
   4266 				 * 10Gbps.
   4267 				 */
   4268 				IXGBE_READ_REG(hw, IXGBE_MLFC);
   4269 				IXGBE_READ_REG(hw, IXGBE_MRFC);
   4270 			}
   4271 
   4272 			if (bootverbose) {
   4273 				const char *bpsmsg;
   4274 
   4275 				switch (adapter->link_speed) {
   4276 				case IXGBE_LINK_SPEED_10GB_FULL:
   4277 					bpsmsg = "10 Gbps";
   4278 					break;
   4279 				case IXGBE_LINK_SPEED_5GB_FULL:
   4280 					bpsmsg = "5 Gbps";
   4281 					break;
   4282 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   4283 					bpsmsg = "2.5 Gbps";
   4284 					break;
   4285 				case IXGBE_LINK_SPEED_1GB_FULL:
   4286 					bpsmsg = "1 Gbps";
   4287 					break;
   4288 				case IXGBE_LINK_SPEED_100_FULL:
   4289 					bpsmsg = "100 Mbps";
   4290 					break;
   4291 				case IXGBE_LINK_SPEED_10_FULL:
   4292 					bpsmsg = "10 Mbps";
   4293 					break;
   4294 				default:
   4295 					bpsmsg = "unknown speed";
   4296 					break;
   4297 				}
   4298 				device_printf(dev, "Link is up %s %s \n",
   4299 				    bpsmsg, "Full Duplex");
   4300 			}
   4301 			adapter->link_active = TRUE;
   4302 			/* Update any Flow Control changes */
   4303 			ixgbe_fc_enable(&adapter->hw);
   4304 			/* Update DMA coalescing config */
   4305 			ixgbe_config_dmac(adapter);
   4306 			if_link_state_change(ifp, LINK_STATE_UP);
   4307 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4308 				ixgbe_ping_all_vfs(adapter);
   4309 		}
   4310 	} else { /* Link down */
   4311 		if (adapter->link_active == TRUE) {
   4312 			if (bootverbose)
   4313 				device_printf(dev, "Link is Down\n");
   4314 			if_link_state_change(ifp, LINK_STATE_DOWN);
   4315 			adapter->link_active = FALSE;
   4316 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4317 				ixgbe_ping_all_vfs(adapter);
   4318 		}
   4319 	}
   4320 
   4321 	return;
   4322 } /* ixgbe_update_link_status */
   4323 
   4324 /************************************************************************
   4325  * ixgbe_config_dmac - Configure DMA Coalescing
   4326  ************************************************************************/
   4327 static void
   4328 ixgbe_config_dmac(struct adapter *adapter)
   4329 {
   4330 	struct ixgbe_hw *hw = &adapter->hw;
   4331 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
   4332 
   4333 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
   4334 		return;
   4335 
   4336 	if (dcfg->watchdog_timer ^ adapter->dmac ||
   4337 	    dcfg->link_speed ^ adapter->link_speed) {
   4338 		dcfg->watchdog_timer = adapter->dmac;
   4339 		dcfg->fcoe_en = false;
   4340 		dcfg->link_speed = adapter->link_speed;
   4341 		dcfg->num_tcs = 1;
   4342 
   4343 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
   4344 		    dcfg->watchdog_timer, dcfg->link_speed);
   4345 
   4346 		hw->mac.ops.dmac_config(hw);
   4347 	}
   4348 } /* ixgbe_config_dmac */
   4349 
   4350 /************************************************************************
   4351  * ixgbe_enable_intr
   4352  ************************************************************************/
   4353 static void
   4354 ixgbe_enable_intr(struct adapter *adapter)
   4355 {
   4356 	struct ixgbe_hw	*hw = &adapter->hw;
   4357 	struct ix_queue	*que = adapter->queues;
   4358 	u32		mask, fwsm;
   4359 
   4360 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4361 
   4362 	switch (adapter->hw.mac.type) {
   4363 	case ixgbe_mac_82599EB:
   4364 		mask |= IXGBE_EIMS_ECC;
   4365 		/* Temperature sensor on some adapters */
   4366 		mask |= IXGBE_EIMS_GPI_SDP0;
   4367 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
   4368 		mask |= IXGBE_EIMS_GPI_SDP1;
   4369 		mask |= IXGBE_EIMS_GPI_SDP2;
   4370 		break;
   4371 	case ixgbe_mac_X540:
   4372 		/* Detect if Thermal Sensor is enabled */
   4373 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
   4374 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
   4375 			mask |= IXGBE_EIMS_TS;
   4376 		mask |= IXGBE_EIMS_ECC;
   4377 		break;
   4378 	case ixgbe_mac_X550:
   4379 		/* MAC thermal sensor is automatically enabled */
   4380 		mask |= IXGBE_EIMS_TS;
   4381 		mask |= IXGBE_EIMS_ECC;
   4382 		break;
   4383 	case ixgbe_mac_X550EM_x:
   4384 	case ixgbe_mac_X550EM_a:
   4385 		/* Some devices use SDP0 for important information */
   4386 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
   4387 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
   4388 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
   4389 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
   4390 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
   4391 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
   4392 			mask |= IXGBE_EICR_GPI_SDP0_X540;
   4393 		mask |= IXGBE_EIMS_ECC;
   4394 		break;
   4395 	default:
   4396 		break;
   4397 	}
   4398 
   4399 	/* Enable Fan Failure detection */
   4400 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4401 		mask |= IXGBE_EIMS_GPI_SDP1;
   4402 	/* Enable SR-IOV */
   4403 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4404 		mask |= IXGBE_EIMS_MAILBOX;
   4405 	/* Enable Flow Director */
   4406 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   4407 		mask |= IXGBE_EIMS_FLOW_DIR;
   4408 
   4409 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4410 
   4411 	/* With MSI-X we use auto clear */
   4412 	if (adapter->msix_mem) {
   4413 		mask = IXGBE_EIMS_ENABLE_MASK;
   4414 		/* Don't autoclear Link */
   4415 		mask &= ~IXGBE_EIMS_OTHER;
   4416 		mask &= ~IXGBE_EIMS_LSC;
   4417 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   4418 			mask &= ~IXGBE_EIMS_MAILBOX;
   4419 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4420 	}
   4421 
   4422 	/*
   4423 	 * Now enable all queues, this is done separately to
   4424 	 * allow for handling the extended (beyond 32) MSI-X
   4425 	 * vectors that can be used by 82599
   4426 	 */
   4427         for (int i = 0; i < adapter->num_queues; i++, que++)
   4428                 ixgbe_enable_queue(adapter, que->msix);
   4429 
   4430 	IXGBE_WRITE_FLUSH(hw);
   4431 
   4432 	return;
   4433 } /* ixgbe_enable_intr */
   4434 
   4435 /************************************************************************
   4436  * ixgbe_disable_intr
   4437  ************************************************************************/
   4438 static void
   4439 ixgbe_disable_intr(struct adapter *adapter)
   4440 {
   4441 	if (adapter->msix_mem)
   4442 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4443 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
   4444 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
   4445 	} else {
   4446 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
   4447 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
   4448 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
   4449 	}
   4450 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4451 
   4452 	return;
   4453 } /* ixgbe_disable_intr */
   4454 
   4455 /************************************************************************
   4456  * ixgbe_legacy_irq - Legacy Interrupt Service routine
   4457  ************************************************************************/
   4458 static int
   4459 ixgbe_legacy_irq(void *arg)
   4460 {
   4461 	struct ix_queue *que = arg;
   4462 	struct adapter	*adapter = que->adapter;
   4463 	struct ixgbe_hw	*hw = &adapter->hw;
   4464 	struct ifnet    *ifp = adapter->ifp;
   4465 	struct 		tx_ring *txr = adapter->tx_rings;
   4466 	bool		more = false;
   4467 	u32             eicr, eicr_mask;
   4468 
   4469 	/* Silicon errata #26 on 82598 */
   4470 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
   4471 
   4472 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   4473 
   4474 	adapter->stats.pf.legint.ev_count++;
   4475 	++que->irqs.ev_count;
   4476 	if (eicr == 0) {
   4477 		adapter->stats.pf.intzero.ev_count++;
   4478 		if ((ifp->if_flags & IFF_UP) != 0)
   4479 			ixgbe_enable_intr(adapter);
   4480 		return 0;
   4481 	}
   4482 
   4483 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   4484 #ifdef __NetBSD__
   4485 		/* Don't run ixgbe_rxeof in interrupt context */
   4486 		more = true;
   4487 #else
   4488 		more = ixgbe_rxeof(que);
   4489 #endif
   4490 
   4491 		IXGBE_TX_LOCK(txr);
   4492 		ixgbe_txeof(txr);
   4493 #ifdef notyet
   4494 		if (!ixgbe_ring_empty(ifp, txr->br))
   4495 			ixgbe_start_locked(ifp, txr);
   4496 #endif
   4497 		IXGBE_TX_UNLOCK(txr);
   4498 	}
   4499 
   4500 	/* Check for fan failure */
   4501 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   4502 		ixgbe_check_fan_failure(adapter, eicr, true);
   4503 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4504 	}
   4505 
   4506 	/* Link status change */
   4507 	if (eicr & IXGBE_EICR_LSC)
   4508 		softint_schedule(adapter->link_si);
   4509 
   4510 	if (ixgbe_is_sfp(hw)) {
   4511 		/* Pluggable optics-related interrupt */
   4512 		if (hw->mac.type >= ixgbe_mac_X540)
   4513 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   4514 		else
   4515 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   4516 
   4517 		if (eicr & eicr_mask) {
   4518 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   4519 			softint_schedule(adapter->mod_si);
   4520 		}
   4521 
   4522 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   4523 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   4524 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   4525 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4526 			softint_schedule(adapter->msf_si);
   4527 		}
   4528 	}
   4529 
   4530 	/* External PHY interrupt */
   4531 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   4532 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
   4533 		softint_schedule(adapter->phy_si);
   4534 
   4535 	if (more)
   4536 		softint_schedule(que->que_si);
   4537 	else
   4538 		ixgbe_enable_intr(adapter);
   4539 
   4540 	return 1;
   4541 } /* ixgbe_legacy_irq */
   4542 
   4543 /************************************************************************
   4544  * ixgbe_free_pci_resources
   4545  ************************************************************************/
   4546 static void
   4547 ixgbe_free_pci_resources(struct adapter *adapter)
   4548 {
   4549 	struct ix_queue *que = adapter->queues;
   4550 	int		rid;
   4551 
   4552 	/*
   4553 	 * Release all msix queue resources:
   4554 	 */
   4555 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4556 		if (que->res != NULL)
   4557 			pci_intr_disestablish(adapter->osdep.pc,
   4558 			    adapter->osdep.ihs[i]);
   4559 	}
   4560 
   4561 	/* Clean the Legacy or Link interrupt last */
   4562 	if (adapter->vector) /* we are doing MSIX */
   4563 		rid = adapter->vector;
   4564 	else
   4565 		rid = 0;
   4566 
   4567 	if (adapter->osdep.ihs[rid] != NULL) {
   4568 		pci_intr_disestablish(adapter->osdep.pc,
   4569 		    adapter->osdep.ihs[rid]);
   4570 		adapter->osdep.ihs[rid] = NULL;
   4571 	}
   4572 
   4573 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   4574 	    adapter->osdep.nintrs);
   4575 
   4576 	if (adapter->osdep.mem_size != 0) {
   4577 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   4578 		    adapter->osdep.mem_bus_space_handle,
   4579 		    adapter->osdep.mem_size);
   4580 	}
   4581 
   4582 	return;
   4583 } /* ixgbe_free_pci_resources */
   4584 
   4585 /************************************************************************
   4586  * ixgbe_set_sysctl_value
   4587  ************************************************************************/
   4588 static void
   4589 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
   4590     const char *description, int *limit, int value)
   4591 {
   4592 	device_t dev =  adapter->dev;
   4593 	struct sysctllog **log;
   4594 	const struct sysctlnode *rnode, *cnode;
   4595 
   4596 	log = &adapter->sysctllog;
   4597 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   4598 		aprint_error_dev(dev, "could not create sysctl root\n");
   4599 		return;
   4600 	}
   4601 	if (sysctl_createv(log, 0, &rnode, &cnode,
   4602 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   4603 	    name, SYSCTL_DESCR(description),
   4604 		NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   4605 		aprint_error_dev(dev, "could not create sysctl\n");
   4606 	*limit = value;
   4607 } /* ixgbe_set_sysctl_value */
   4608 
   4609 /************************************************************************
   4610  * ixgbe_sysctl_flowcntl
   4611  *
   4612  *   SYSCTL wrapper around setting Flow Control
   4613  ************************************************************************/
   4614 static int
   4615 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
   4616 {
   4617 	struct sysctlnode node = *rnode;
   4618 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4619 	int error, fc;
   4620 
   4621 	fc = adapter->hw.fc.current_mode;
   4622 	node.sysctl_data = &fc;
   4623 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4624 	if (error != 0 || newp == NULL)
   4625 		return error;
   4626 
   4627 	/* Don't bother if it's not changed */
   4628 	if (fc == adapter->hw.fc.current_mode)
   4629 		return (0);
   4630 
   4631 	return ixgbe_set_flowcntl(adapter, fc);
   4632 } /* ixgbe_sysctl_flowcntl */
   4633 
   4634 /************************************************************************
   4635  * ixgbe_set_flowcntl - Set flow control
   4636  *
   4637  *   Flow control values:
   4638  *     0 - off
   4639  *     1 - rx pause
   4640  *     2 - tx pause
   4641  *     3 - full
   4642  ************************************************************************/
   4643 static int
   4644 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
   4645 {
   4646 	switch (fc) {
   4647 		case ixgbe_fc_rx_pause:
   4648 		case ixgbe_fc_tx_pause:
   4649 		case ixgbe_fc_full:
   4650 			adapter->hw.fc.requested_mode = fc;
   4651 			if (adapter->num_queues > 1)
   4652 				ixgbe_disable_rx_drop(adapter);
   4653 			break;
   4654 		case ixgbe_fc_none:
   4655 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   4656 			if (adapter->num_queues > 1)
   4657 				ixgbe_enable_rx_drop(adapter);
   4658 			break;
   4659 		default:
   4660 			return (EINVAL);
   4661 	}
   4662 
   4663 #if 0 /* XXX NetBSD */
   4664 	/* Don't autoneg if forcing a value */
   4665 	adapter->hw.fc.disable_fc_autoneg = TRUE;
   4666 #endif
   4667 	ixgbe_fc_enable(&adapter->hw);
   4668 
   4669 	return (0);
   4670 } /* ixgbe_set_flowcntl */
   4671 
   4672 /************************************************************************
   4673  * ixgbe_enable_rx_drop
   4674  *
   4675  *   Enable the hardware to drop packets when the buffer is
   4676  *   full. This is useful with multiqueue, so that no single
   4677  *   queue being full stalls the entire RX engine. We only
   4678  *   enable this when Multiqueue is enabled AND Flow Control
   4679  *   is disabled.
   4680  ************************************************************************/
   4681 static void
   4682 ixgbe_enable_rx_drop(struct adapter *adapter)
   4683 {
   4684 	struct ixgbe_hw *hw = &adapter->hw;
   4685 	struct rx_ring  *rxr;
   4686 	u32             srrctl;
   4687 
   4688 	for (int i = 0; i < adapter->num_queues; i++) {
   4689 		rxr = &adapter->rx_rings[i];
   4690 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4691 		srrctl |= IXGBE_SRRCTL_DROP_EN;
   4692 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4693 	}
   4694 
   4695 	/* enable drop for each vf */
   4696 	for (int i = 0; i < adapter->num_vfs; i++) {
   4697 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4698 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
   4699 		    IXGBE_QDE_ENABLE));
   4700 	}
   4701 } /* ixgbe_enable_rx_drop */
   4702 
   4703 /************************************************************************
   4704  * ixgbe_disable_rx_drop
   4705  ************************************************************************/
   4706 static void
   4707 ixgbe_disable_rx_drop(struct adapter *adapter)
   4708 {
   4709 	struct ixgbe_hw *hw = &adapter->hw;
   4710 	struct rx_ring  *rxr;
   4711 	u32             srrctl;
   4712 
   4713 	for (int i = 0; i < adapter->num_queues; i++) {
   4714 		rxr = &adapter->rx_rings[i];
   4715         	srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4716         	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
   4717         	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4718 	}
   4719 
   4720 	/* disable drop for each vf */
   4721 	for (int i = 0; i < adapter->num_vfs; i++) {
   4722 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4723 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
   4724 	}
   4725 } /* ixgbe_disable_rx_drop */
   4726 
   4727 /************************************************************************
   4728  * ixgbe_sysctl_advertise
   4729  *
   4730  *   SYSCTL wrapper around setting advertised speed
   4731  ************************************************************************/
   4732 static int
   4733 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
   4734 {
   4735 	struct sysctlnode node = *rnode;
   4736 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4737 	int            error = 0, advertise;
   4738 
   4739 	advertise = adapter->advertise;
   4740 	node.sysctl_data = &advertise;
   4741 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4742 	if (error != 0 || newp == NULL)
   4743 		return error;
   4744 
   4745 	return ixgbe_set_advertise(adapter, advertise);
   4746 } /* ixgbe_sysctl_advertise */
   4747 
   4748 /************************************************************************
   4749  * ixgbe_set_advertise - Control advertised link speed
   4750  *
   4751  *   Flags:
   4752  *     0x00 - Default (all capable link speed)
   4753  *     0x01 - advertise 100 Mb
   4754  *     0x02 - advertise 1G
   4755  *     0x04 - advertise 10G
   4756  *     0x08 - advertise 10 Mb
   4757  *     0x10 - advertise 2.5G
   4758  *     0x20 - advertise 5G
   4759  ************************************************************************/
   4760 static int
   4761 ixgbe_set_advertise(struct adapter *adapter, int advertise)
   4762 {
   4763 	device_t         dev;
   4764 	struct ixgbe_hw  *hw;
   4765 	ixgbe_link_speed speed = 0;
   4766 	ixgbe_link_speed link_caps = 0;
   4767 	s32              err = IXGBE_NOT_IMPLEMENTED;
   4768 	bool             negotiate = FALSE;
   4769 
   4770 	/* Checks to validate new value */
   4771 	if (adapter->advertise == advertise) /* no change */
   4772 		return (0);
   4773 
   4774 	dev = adapter->dev;
   4775 	hw = &adapter->hw;
   4776 
   4777 	/* No speed changes for backplane media */
   4778 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   4779 		return (ENODEV);
   4780 
   4781 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   4782 	    (hw->phy.multispeed_fiber))) {
   4783 		device_printf(dev,
   4784 		    "Advertised speed can only be set on copper or "
   4785 		    "multispeed fiber media types.\n");
   4786 		return (EINVAL);
   4787 	}
   4788 
   4789 	if (advertise < 0x0 || advertise > 0x2f) {
   4790 		device_printf(dev,
   4791 		    "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
   4792 		return (EINVAL);
   4793 	}
   4794 
   4795 	if (hw->mac.ops.get_link_capabilities) {
   4796 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   4797 		    &negotiate);
   4798 		if (err != IXGBE_SUCCESS) {
   4799 			device_printf(dev, "Unable to determine supported advertise speeds\n");
   4800 			return (ENODEV);
   4801 		}
   4802 	}
   4803 
   4804 	/* Set new value and report new advertised mode */
   4805 	if (advertise & 0x1) {
   4806 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
   4807 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
   4808 			return (EINVAL);
   4809 		}
   4810 		speed |= IXGBE_LINK_SPEED_100_FULL;
   4811 	}
   4812 	if (advertise & 0x2) {
   4813 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
   4814 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
   4815 			return (EINVAL);
   4816 		}
   4817 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   4818 	}
   4819 	if (advertise & 0x4) {
   4820 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
   4821 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
   4822 			return (EINVAL);
   4823 		}
   4824 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   4825 	}
   4826 	if (advertise & 0x8) {
   4827 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
   4828 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
   4829 			return (EINVAL);
   4830 		}
   4831 		speed |= IXGBE_LINK_SPEED_10_FULL;
   4832 	}
   4833 	if (advertise & 0x10) {
   4834 		if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
   4835 			device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
   4836 			return (EINVAL);
   4837 		}
   4838 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   4839 	}
   4840 	if (advertise & 0x20) {
   4841 		if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
   4842 			device_printf(dev, "Interface does not support 5Gb advertised speed\n");
   4843 			return (EINVAL);
   4844 		}
   4845 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   4846 	}
   4847 	if (advertise == 0)
   4848 		speed = link_caps; /* All capable link speed */
   4849 
   4850 	hw->mac.autotry_restart = TRUE;
   4851 	hw->mac.ops.setup_link(hw, speed, TRUE);
   4852 	adapter->advertise = advertise;
   4853 
   4854 	return (0);
   4855 } /* ixgbe_set_advertise */
   4856 
   4857 /************************************************************************
   4858  * ixgbe_get_advertise - Get current advertised speed settings
   4859  *
   4860  *   Formatted for sysctl usage.
   4861  *   Flags:
   4862  *     0x01 - advertise 100 Mb
   4863  *     0x02 - advertise 1G
   4864  *     0x04 - advertise 10G
   4865  *     0x08 - advertise 10 Mb (yes, Mb)
   4866  *     0x10 - advertise 2.5G
   4867  *     0x20 - advertise 5G
   4868  ************************************************************************/
   4869 static int
   4870 ixgbe_get_advertise(struct adapter *adapter)
   4871 {
   4872 	struct ixgbe_hw  *hw = &adapter->hw;
   4873 	int              speed;
   4874 	ixgbe_link_speed link_caps = 0;
   4875 	s32              err;
   4876 	bool             negotiate = FALSE;
   4877 
   4878 	/*
   4879 	 * Advertised speed means nothing unless it's copper or
   4880 	 * multi-speed fiber
   4881 	 */
   4882 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
   4883 	    !(hw->phy.multispeed_fiber))
   4884 		return (0);
   4885 
   4886 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
   4887 	if (err != IXGBE_SUCCESS)
   4888 		return (0);
   4889 
   4890 	speed =
   4891 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x04 : 0) |
   4892 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x02 : 0) |
   4893 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)   ? 0x01 : 0) |
   4894 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)    ? 0x08 : 0) |
   4895 	    ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
   4896 	    ((link_caps & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0);
   4897 
   4898 	return speed;
   4899 } /* ixgbe_get_advertise */
   4900 
   4901 /************************************************************************
   4902  * ixgbe_sysctl_dmac - Manage DMA Coalescing
   4903  *
   4904  *   Control values:
   4905  *     0/1 - off / on (use default value of 1000)
   4906  *
   4907  *     Legal timer values are:
   4908  *     50,100,250,500,1000,2000,5000,10000
   4909  *
   4910  *     Turning off interrupt moderation will also turn this off.
   4911  ************************************************************************/
   4912 static int
   4913 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
   4914 {
   4915 	struct sysctlnode node = *rnode;
   4916 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4917 	struct ifnet   *ifp = adapter->ifp;
   4918 	int            error;
   4919 	int            newval;
   4920 
   4921 	newval = adapter->dmac;
   4922 	node.sysctl_data = &newval;
   4923 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4924 	if ((error) || (newp == NULL))
   4925 		return (error);
   4926 
   4927 	switch (newval) {
   4928 	case 0:
   4929 		/* Disabled */
   4930 		adapter->dmac = 0;
   4931 		break;
   4932 	case 1:
   4933 		/* Enable and use default */
   4934 		adapter->dmac = 1000;
   4935 		break;
   4936 	case 50:
   4937 	case 100:
   4938 	case 250:
   4939 	case 500:
   4940 	case 1000:
   4941 	case 2000:
   4942 	case 5000:
   4943 	case 10000:
   4944 		/* Legal values - allow */
   4945 		adapter->dmac = newval;
   4946 		break;
   4947 	default:
   4948 		/* Do nothing, illegal value */
   4949 		return (EINVAL);
   4950 	}
   4951 
   4952 	/* Re-initialize hardware if it's already running */
   4953 	if (ifp->if_flags & IFF_RUNNING)
   4954 		ixgbe_init(ifp);
   4955 
   4956 	return (0);
   4957 }
   4958 
   4959 #ifdef IXGBE_DEBUG
   4960 /************************************************************************
   4961  * ixgbe_sysctl_power_state
   4962  *
   4963  *   Sysctl to test power states
   4964  *   Values:
   4965  *     0      - set device to D0
   4966  *     3      - set device to D3
   4967  *     (none) - get current device power state
   4968  ************************************************************************/
   4969 static int
   4970 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
   4971 {
   4972 #ifdef notyet
   4973 	struct sysctlnode node = *rnode;
   4974 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4975 	device_t       dev =  adapter->dev;
   4976 	int            curr_ps, new_ps, error = 0;
   4977 
   4978 	curr_ps = new_ps = pci_get_powerstate(dev);
   4979 
   4980 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4981 	if ((error) || (req->newp == NULL))
   4982 		return (error);
   4983 
   4984 	if (new_ps == curr_ps)
   4985 		return (0);
   4986 
   4987 	if (new_ps == 3 && curr_ps == 0)
   4988 		error = DEVICE_SUSPEND(dev);
   4989 	else if (new_ps == 0 && curr_ps == 3)
   4990 		error = DEVICE_RESUME(dev);
   4991 	else
   4992 		return (EINVAL);
   4993 
   4994 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
   4995 
   4996 	return (error);
   4997 #else
   4998 	return 0;
   4999 #endif
   5000 } /* ixgbe_sysctl_power_state */
   5001 #endif
   5002 
   5003 /************************************************************************
   5004  * ixgbe_sysctl_wol_enable
   5005  *
   5006  *   Sysctl to enable/disable the WoL capability,
   5007  *   if supported by the adapter.
   5008  *
   5009  *   Values:
   5010  *     0 - disabled
   5011  *     1 - enabled
   5012  ************************************************************************/
   5013 static int
   5014 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
   5015 {
   5016 	struct sysctlnode node = *rnode;
   5017 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5018 	struct ixgbe_hw *hw = &adapter->hw;
   5019 	bool            new_wol_enabled;
   5020 	int             error = 0;
   5021 
   5022 	new_wol_enabled = hw->wol_enabled;
   5023 	node.sysctl_data = &new_wol_enabled;
   5024 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5025 	if ((error) || (newp == NULL))
   5026 		return (error);
   5027 	if (new_wol_enabled == hw->wol_enabled)
   5028 		return (0);
   5029 
   5030 	if (new_wol_enabled && !adapter->wol_support)
   5031 		return (ENODEV);
   5032 	else
   5033 		hw->wol_enabled = new_wol_enabled;
   5034 
   5035 	return (0);
   5036 } /* ixgbe_sysctl_wol_enable */
   5037 
   5038 /************************************************************************
   5039  * ixgbe_sysctl_wufc - Wake Up Filter Control
   5040  *
   5041  *   Sysctl to enable/disable the types of packets that the
   5042  *   adapter will wake up on upon receipt.
   5043  *   Flags:
   5044  *     0x1  - Link Status Change
   5045  *     0x2  - Magic Packet
   5046  *     0x4  - Direct Exact
   5047  *     0x8  - Directed Multicast
   5048  *     0x10 - Broadcast
   5049  *     0x20 - ARP/IPv4 Request Packet
   5050  *     0x40 - Direct IPv4 Packet
   5051  *     0x80 - Direct IPv6 Packet
   5052  *
   5053  *   Settings not listed above will cause the sysctl to return an error.
   5054  ************************************************************************/
   5055 static int
   5056 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
   5057 {
   5058 	struct sysctlnode node = *rnode;
   5059 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5060 	int error = 0;
   5061 	u32 new_wufc;
   5062 
   5063 	new_wufc = adapter->wufc;
   5064 	node.sysctl_data = &new_wufc;
   5065 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5066 	if ((error) || (newp == NULL))
   5067 		return (error);
   5068 	if (new_wufc == adapter->wufc)
   5069 		return (0);
   5070 
   5071 	if (new_wufc & 0xffffff00)
   5072 		return (EINVAL);
   5073 
   5074 	new_wufc &= 0xff;
   5075 	new_wufc |= (0xffffff & adapter->wufc);
   5076 	adapter->wufc = new_wufc;
   5077 
   5078 	return (0);
   5079 } /* ixgbe_sysctl_wufc */
   5080 
   5081 #ifdef IXGBE_DEBUG
   5082 /************************************************************************
   5083  * ixgbe_sysctl_print_rss_config
   5084  ************************************************************************/
   5085 static int
   5086 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
   5087 {
   5088 #ifdef notyet
   5089 	struct sysctlnode node = *rnode;
   5090 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5091 	struct ixgbe_hw *hw = &adapter->hw;
   5092 	device_t        dev = adapter->dev;
   5093 	struct sbuf     *buf;
   5094 	int             error = 0, reta_size;
   5095 	u32             reg;
   5096 
   5097 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
   5098 	if (!buf) {
   5099 		device_printf(dev, "Could not allocate sbuf for output.\n");
   5100 		return (ENOMEM);
   5101 	}
   5102 
   5103 	// TODO: use sbufs to make a string to print out
   5104 	/* Set multiplier for RETA setup and table size based on MAC */
   5105 	switch (adapter->hw.mac.type) {
   5106 	case ixgbe_mac_X550:
   5107 	case ixgbe_mac_X550EM_x:
   5108 	case ixgbe_mac_X550EM_a:
   5109 		reta_size = 128;
   5110 		break;
   5111 	default:
   5112 		reta_size = 32;
   5113 		break;
   5114 	}
   5115 
   5116 	/* Print out the redirection table */
   5117 	sbuf_cat(buf, "\n");
   5118 	for (int i = 0; i < reta_size; i++) {
   5119 		if (i < 32) {
   5120 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
   5121 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
   5122 		} else {
   5123 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
   5124 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
   5125 		}
   5126 	}
   5127 
   5128 	// TODO: print more config
   5129 
   5130 	error = sbuf_finish(buf);
   5131 	if (error)
   5132 		device_printf(dev, "Error finishing sbuf: %d\n", error);
   5133 
   5134 	sbuf_delete(buf);
   5135 #endif
   5136 	return (0);
   5137 } /* ixgbe_sysctl_print_rss_config */
   5138 #endif /* IXGBE_DEBUG */
   5139 
   5140 /************************************************************************
   5141  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
   5142  *
   5143  *   For X552/X557-AT devices using an external PHY
   5144  ************************************************************************/
   5145 static int
   5146 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
   5147 {
   5148 	struct sysctlnode node = *rnode;
   5149 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5150 	struct ixgbe_hw *hw = &adapter->hw;
   5151 	int val;
   5152 	u16 reg;
   5153 	int		error;
   5154 
   5155 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5156 		device_printf(adapter->dev,
   5157 		    "Device has no supported external thermal sensor.\n");
   5158 		return (ENODEV);
   5159 	}
   5160 
   5161 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
   5162 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5163 		device_printf(adapter->dev,
   5164 		    "Error reading from PHY's current temperature register\n");
   5165 		return (EAGAIN);
   5166 	}
   5167 
   5168 	node.sysctl_data = &val;
   5169 
   5170 	/* Shift temp for output */
   5171 	val = reg >> 8;
   5172 
   5173 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5174 	if ((error) || (newp == NULL))
   5175 		return (error);
   5176 
   5177 	return (0);
   5178 } /* ixgbe_sysctl_phy_temp */
   5179 
   5180 /************************************************************************
   5181  * ixgbe_sysctl_phy_overtemp_occurred
   5182  *
   5183  *   Reports (directly from the PHY) whether the current PHY
   5184  *   temperature is over the overtemp threshold.
   5185  ************************************************************************/
   5186 static int
   5187 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
   5188 {
   5189 	struct sysctlnode node = *rnode;
   5190 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5191 	struct ixgbe_hw *hw = &adapter->hw;
   5192 	int val, error;
   5193 	u16 reg;
   5194 
   5195 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5196 		device_printf(adapter->dev,
   5197 		    "Device has no supported external thermal sensor.\n");
   5198 		return (ENODEV);
   5199 	}
   5200 
   5201 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
   5202 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5203 		device_printf(adapter->dev,
   5204 		    "Error reading from PHY's temperature status register\n");
   5205 		return (EAGAIN);
   5206 	}
   5207 
   5208 	node.sysctl_data = &val;
   5209 
   5210 	/* Get occurrence bit */
   5211 	val = !!(reg & 0x4000);
   5212 
   5213 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5214 	if ((error) || (newp == NULL))
   5215 		return (error);
   5216 
   5217 	return (0);
   5218 } /* ixgbe_sysctl_phy_overtemp_occurred */
   5219 
   5220 /************************************************************************
   5221  * ixgbe_sysctl_eee_state
   5222  *
   5223  *   Sysctl to set EEE power saving feature
   5224  *   Values:
   5225  *     0      - disable EEE
   5226  *     1      - enable EEE
   5227  *     (none) - get current device EEE state
   5228  ************************************************************************/
   5229 static int
   5230 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
   5231 {
   5232 	struct sysctlnode node = *rnode;
   5233 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5234 	struct ifnet   *ifp = adapter->ifp;
   5235 	device_t       dev = adapter->dev;
   5236 	int            curr_eee, new_eee, error = 0;
   5237 	s32            retval;
   5238 
   5239 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
   5240 	node.sysctl_data = &new_eee;
   5241 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5242 	if ((error) || (newp == NULL))
   5243 		return (error);
   5244 
   5245 	/* Nothing to do */
   5246 	if (new_eee == curr_eee)
   5247 		return (0);
   5248 
   5249 	/* Not supported */
   5250 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
   5251 		return (EINVAL);
   5252 
   5253 	/* Bounds checking */
   5254 	if ((new_eee < 0) || (new_eee > 1))
   5255 		return (EINVAL);
   5256 
   5257 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
   5258 	if (retval) {
   5259 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
   5260 		return (EINVAL);
   5261 	}
   5262 
   5263 	/* Restart auto-neg */
   5264 	ixgbe_init(ifp);
   5265 
   5266 	device_printf(dev, "New EEE state: %d\n", new_eee);
   5267 
   5268 	/* Cache new value */
   5269 	if (new_eee)
   5270 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5271 	else
   5272 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
   5273 
   5274 	return (error);
   5275 } /* ixgbe_sysctl_eee_state */
   5276 
   5277 /************************************************************************
   5278  * ixgbe_init_device_features
   5279  ************************************************************************/
   5280 static void
   5281 ixgbe_init_device_features(struct adapter *adapter)
   5282 {
   5283 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   5284 	                  | IXGBE_FEATURE_RSS
   5285 	                  | IXGBE_FEATURE_MSI
   5286 	                  | IXGBE_FEATURE_MSIX
   5287 	                  | IXGBE_FEATURE_LEGACY_IRQ
   5288 	                  | IXGBE_FEATURE_LEGACY_TX;
   5289 
   5290 	/* Set capabilities first... */
   5291 	switch (adapter->hw.mac.type) {
   5292 	case ixgbe_mac_82598EB:
   5293 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
   5294 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
   5295 		break;
   5296 	case ixgbe_mac_X540:
   5297 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5298 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5299 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
   5300 		    (adapter->hw.bus.func == 0))
   5301 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5302 		break;
   5303 	case ixgbe_mac_X550:
   5304 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5305 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5306 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5307 		break;
   5308 	case ixgbe_mac_X550EM_x:
   5309 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5310 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5311 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
   5312 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5313 		break;
   5314 	case ixgbe_mac_X550EM_a:
   5315 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5316 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5317 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5318 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
   5319 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
   5320 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5321 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5322 		}
   5323 		break;
   5324 	case ixgbe_mac_82599EB:
   5325 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5326 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5327 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
   5328 		    (adapter->hw.bus.func == 0))
   5329 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5330 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
   5331 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5332 		break;
   5333 	default:
   5334 		break;
   5335 	}
   5336 
   5337 	/* Enabled by default... */
   5338 	/* Fan failure detection */
   5339 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
   5340 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
   5341 	/* Netmap */
   5342 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   5343 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   5344 	/* EEE */
   5345 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   5346 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5347 	/* Thermal Sensor */
   5348 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
   5349 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
   5350 
   5351 	/* Enabled via global sysctl... */
   5352 	/* Flow Director */
   5353 	if (ixgbe_enable_fdir) {
   5354 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
   5355 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
   5356 		else
   5357 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
   5358 	}
   5359 	/* Legacy (single queue) transmit */
   5360 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   5361 	    ixgbe_enable_legacy_tx)
   5362 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   5363 	/*
   5364 	 * Message Signal Interrupts - Extended (MSI-X)
   5365 	 * Normal MSI is only enabled if MSI-X calls fail.
   5366 	 */
   5367 	if (!ixgbe_enable_msix)
   5368 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
   5369 	/* Receive-Side Scaling (RSS) */
   5370 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
   5371 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   5372 
   5373 	/* Disable features with unmet dependencies... */
   5374 	/* No MSI-X */
   5375 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
   5376 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   5377 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   5378 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
   5379 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
   5380 	}
   5381 } /* ixgbe_init_device_features */
   5382 
   5383 /************************************************************************
   5384  * ixgbe_probe - Device identification routine
   5385  *
   5386  *   Determines if the driver should be loaded on
   5387  *   adapter based on its PCI vendor/device ID.
   5388  *
   5389  *   return BUS_PROBE_DEFAULT on success, positive on failure
   5390  ************************************************************************/
   5391 static int
   5392 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
   5393 {
   5394 	const struct pci_attach_args *pa = aux;
   5395 
   5396 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
   5397 }
   5398 
   5399 static ixgbe_vendor_info_t *
   5400 ixgbe_lookup(const struct pci_attach_args *pa)
   5401 {
   5402 	ixgbe_vendor_info_t *ent;
   5403 	pcireg_t subid;
   5404 
   5405 	INIT_DEBUGOUT("ixgbe_lookup: begin");
   5406 
   5407 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
   5408 		return NULL;
   5409 
   5410 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
   5411 
   5412 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
   5413 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
   5414 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
   5415 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
   5416 			(ent->subvendor_id == 0)) &&
   5417 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
   5418 			(ent->subdevice_id == 0))) {
   5419 			++ixgbe_total_ports;
   5420 			return ent;
   5421 		}
   5422 	}
   5423 	return NULL;
   5424 }
   5425 
   5426 static int
   5427 ixgbe_ifflags_cb(struct ethercom *ec)
   5428 {
   5429 	struct ifnet *ifp = &ec->ec_if;
   5430 	struct adapter *adapter = ifp->if_softc;
   5431 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   5432 
   5433 	IXGBE_CORE_LOCK(adapter);
   5434 
   5435 	if (change != 0)
   5436 		adapter->if_flags = ifp->if_flags;
   5437 
   5438 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   5439 		rc = ENETRESET;
   5440 	else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   5441 		ixgbe_set_promisc(adapter);
   5442 
   5443 	/* Set up VLAN support and filter */
   5444 	ixgbe_setup_vlan_hw_support(adapter);
   5445 
   5446 	IXGBE_CORE_UNLOCK(adapter);
   5447 
   5448 	return rc;
   5449 }
   5450 
   5451 /************************************************************************
   5452  * ixgbe_ioctl - Ioctl entry point
   5453  *
   5454  *   Called when the user wants to configure the interface.
   5455  *
   5456  *   return 0 on success, positive on failure
   5457  ************************************************************************/
   5458 static int
   5459 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   5460 {
   5461 	struct adapter	*adapter = ifp->if_softc;
   5462 	struct ixgbe_hw *hw = &adapter->hw;
   5463 	struct ifcapreq *ifcr = data;
   5464 	struct ifreq	*ifr = data;
   5465 	int             error = 0;
   5466 	int l4csum_en;
   5467 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   5468 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   5469 
   5470 	switch (command) {
   5471 	case SIOCSIFFLAGS:
   5472 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   5473 		break;
   5474 	case SIOCADDMULTI:
   5475 	case SIOCDELMULTI:
   5476 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   5477 		break;
   5478 	case SIOCSIFMEDIA:
   5479 	case SIOCGIFMEDIA:
   5480 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   5481 		break;
   5482 	case SIOCSIFCAP:
   5483 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   5484 		break;
   5485 	case SIOCSIFMTU:
   5486 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   5487 		break;
   5488 #ifdef __NetBSD__
   5489 	case SIOCINITIFADDR:
   5490 		IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
   5491 		break;
   5492 	case SIOCGIFFLAGS:
   5493 		IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
   5494 		break;
   5495 	case SIOCGIFAFLAG_IN:
   5496 		IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
   5497 		break;
   5498 	case SIOCGIFADDR:
   5499 		IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
   5500 		break;
   5501 	case SIOCGIFMTU:
   5502 		IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
   5503 		break;
   5504 	case SIOCGIFCAP:
   5505 		IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
   5506 		break;
   5507 	case SIOCGETHERCAP:
   5508 		IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
   5509 		break;
   5510 	case SIOCGLIFADDR:
   5511 		IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
   5512 		break;
   5513 	case SIOCZIFDATA:
   5514 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   5515 		hw->mac.ops.clear_hw_cntrs(hw);
   5516 		ixgbe_clear_evcnt(adapter);
   5517 		break;
   5518 	case SIOCAIFADDR:
   5519 		IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
   5520 		break;
   5521 #endif
   5522 	default:
   5523 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   5524 		break;
   5525 	}
   5526 
   5527 	switch (command) {
   5528 	case SIOCSIFMEDIA:
   5529 	case SIOCGIFMEDIA:
   5530 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   5531 	case SIOCGI2C:
   5532 	{
   5533 		struct ixgbe_i2c_req	i2c;
   5534 
   5535 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
   5536 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
   5537 		if (error != 0)
   5538 			break;
   5539 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
   5540 			error = EINVAL;
   5541 			break;
   5542 		}
   5543 		if (i2c.len > sizeof(i2c.data)) {
   5544 			error = EINVAL;
   5545 			break;
   5546 		}
   5547 
   5548 		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
   5549 		    i2c.dev_addr, i2c.data);
   5550 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
   5551 		break;
   5552 	}
   5553 	case SIOCSIFCAP:
   5554 		/* Layer-4 Rx checksum offload has to be turned on and
   5555 		 * off as a unit.
   5556 		 */
   5557 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   5558 		if (l4csum_en != l4csum && l4csum_en != 0)
   5559 			return EINVAL;
   5560 		/*FALLTHROUGH*/
   5561 	case SIOCADDMULTI:
   5562 	case SIOCDELMULTI:
   5563 	case SIOCSIFFLAGS:
   5564 	case SIOCSIFMTU:
   5565 	default:
   5566 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   5567 			return error;
   5568 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   5569 			;
   5570 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   5571 			IXGBE_CORE_LOCK(adapter);
   5572 			ixgbe_init_locked(adapter);
   5573 			ixgbe_recalculate_max_frame(adapter);
   5574 			IXGBE_CORE_UNLOCK(adapter);
   5575 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   5576 			/*
   5577 			 * Multicast list has changed; set the hardware filter
   5578 			 * accordingly.
   5579 			 */
   5580 			IXGBE_CORE_LOCK(adapter);
   5581 			ixgbe_disable_intr(adapter);
   5582 			ixgbe_set_multi(adapter);
   5583 			ixgbe_enable_intr(adapter);
   5584 			IXGBE_CORE_UNLOCK(adapter);
   5585 		}
   5586 		return 0;
   5587 	}
   5588 
   5589 	return error;
   5590 } /* ixgbe_ioctl */
   5591 
   5592 /************************************************************************
   5593  * ixgbe_check_fan_failure
   5594  ************************************************************************/
   5595 static void
   5596 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
   5597 {
   5598 	u32 mask;
   5599 
   5600 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
   5601 	    IXGBE_ESDP_SDP1;
   5602 
   5603 	if (reg & mask)
   5604 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
   5605 } /* ixgbe_check_fan_failure */
   5606 
   5607 /************************************************************************
   5608  * ixgbe_handle_que
   5609  ************************************************************************/
   5610 static void
   5611 ixgbe_handle_que(void *context)
   5612 {
   5613 	struct ix_queue *que = context;
   5614 	struct adapter  *adapter = que->adapter;
   5615 	struct tx_ring  *txr = que->txr;
   5616 	struct ifnet    *ifp = adapter->ifp;
   5617 
   5618 	adapter->handleq.ev_count++;
   5619 
   5620 	if (ifp->if_flags & IFF_RUNNING) {
   5621 		ixgbe_rxeof(que);
   5622 		IXGBE_TX_LOCK(txr);
   5623 		ixgbe_txeof(txr);
   5624 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5625 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   5626 				ixgbe_mq_start_locked(ifp, txr);
   5627 		/* Only for queue 0 */
   5628 		/* NetBSD still needs this for CBQ */
   5629 		if ((&adapter->queues[0] == que)
   5630 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   5631 			ixgbe_legacy_start_locked(ifp, txr);
   5632 		IXGBE_TX_UNLOCK(txr);
   5633 	}
   5634 
   5635 	/* Re-enable this interrupt */
   5636 	if (que->res != NULL)
   5637 		ixgbe_enable_queue(adapter, que->msix);
   5638 	else
   5639 		ixgbe_enable_intr(adapter);
   5640 
   5641 	return;
   5642 } /* ixgbe_handle_que */
   5643 
   5644 /************************************************************************
   5645  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
   5646  ************************************************************************/
   5647 static int
   5648 ixgbe_allocate_legacy(struct adapter *adapter,
   5649     const struct pci_attach_args *pa)
   5650 {
   5651 	device_t	dev = adapter->dev;
   5652 	struct ix_queue *que = adapter->queues;
   5653 	struct tx_ring  *txr = adapter->tx_rings;
   5654 	int		counts[PCI_INTR_TYPE_SIZE];
   5655 	pci_intr_type_t intr_type, max_type;
   5656 	char            intrbuf[PCI_INTRSTR_LEN];
   5657 	const char	*intrstr = NULL;
   5658 
   5659 	/* We allocate a single interrupt resource */
   5660 	max_type = PCI_INTR_TYPE_MSI;
   5661 	counts[PCI_INTR_TYPE_MSIX] = 0;
   5662 	counts[PCI_INTR_TYPE_MSI] =
   5663 	    (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
   5664 	counts[PCI_INTR_TYPE_INTX] =
   5665 	    (adapter->feat_en & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
   5666 
   5667 alloc_retry:
   5668 	if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
   5669 		aprint_error_dev(dev, "couldn't alloc interrupt\n");
   5670 		return ENXIO;
   5671 	}
   5672 	adapter->osdep.nintrs = 1;
   5673 	intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
   5674 	    intrbuf, sizeof(intrbuf));
   5675 	adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
   5676 	    adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
   5677 	    device_xname(dev));
   5678 	if (adapter->osdep.ihs[0] == NULL) {
   5679 		intr_type = pci_intr_type(adapter->osdep.pc,
   5680 		    adapter->osdep.intrs[0]);
   5681 		aprint_error_dev(dev,"unable to establish %s\n",
   5682 		    (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5683 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5684 		switch (intr_type) {
   5685 		case PCI_INTR_TYPE_MSI:
   5686 			/* The next try is for INTx: Disable MSI */
   5687 			max_type = PCI_INTR_TYPE_INTX;
   5688 			counts[PCI_INTR_TYPE_INTX] = 1;
   5689 			goto alloc_retry;
   5690 		case PCI_INTR_TYPE_INTX:
   5691 		default:
   5692 			/* See below */
   5693 			break;
   5694 		}
   5695 	}
   5696 	if (adapter->osdep.ihs[0] == NULL) {
   5697 		aprint_error_dev(dev,
   5698 		    "couldn't establish interrupt%s%s\n",
   5699 		    intrstr ? " at " : "", intrstr ? intrstr : "");
   5700 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5701 		return ENXIO;
   5702 	}
   5703 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   5704 	/*
   5705 	 * Try allocating a fast interrupt and the associated deferred
   5706 	 * processing contexts.
   5707 	 */
   5708 	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5709 		txr->txr_si =
   5710 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5711 			ixgbe_deferred_mq_start, txr);
   5712 	que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5713 	    ixgbe_handle_que, que);
   5714 
   5715 	/* Tasklets for Link, SFP and Multispeed Fiber */
   5716 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   5717 	    ixgbe_handle_link, adapter);
   5718 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5719 	    ixgbe_handle_mod, adapter);
   5720 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5721 	    ixgbe_handle_msf, adapter);
   5722 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5723 	    ixgbe_handle_phy, adapter);
   5724 
   5725 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   5726 		adapter->fdir_si =
   5727 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5728 			ixgbe_reinit_fdir, adapter);
   5729 
   5730 	if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) &
   5731 		(txr->txr_si == NULL)) ||
   5732 	    que->que_si == NULL ||
   5733 	    adapter->link_si == NULL ||
   5734 	    adapter->mod_si == NULL ||
   5735 	    ((adapter->feat_en & IXGBE_FEATURE_FDIR) &
   5736 		(adapter->fdir_si == NULL)) ||
   5737 	    adapter->msf_si == NULL) {
   5738 		aprint_error_dev(dev,
   5739 		    "could not establish software interrupts\n");
   5740 
   5741 		return ENXIO;
   5742 	}
   5743 	/* For simplicity in the handlers */
   5744 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
   5745 
   5746 	return (0);
   5747 } /* ixgbe_allocate_legacy */
   5748 
   5749 
   5750 /************************************************************************
   5751  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
   5752  ************************************************************************/
   5753 static int
   5754 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   5755 {
   5756 	device_t        dev = adapter->dev;
   5757 	struct 		ix_queue *que = adapter->queues;
   5758 	struct  	tx_ring *txr = adapter->tx_rings;
   5759 	pci_chipset_tag_t pc;
   5760 	char		intrbuf[PCI_INTRSTR_LEN];
   5761 	char		intr_xname[32];
   5762 	const char	*intrstr = NULL;
   5763 	int 		error, vector = 0;
   5764 	int		cpu_id = 0;
   5765 	kcpuset_t	*affinity;
   5766 #ifdef RSS
   5767 	unsigned int    rss_buckets = 0;
   5768 	kcpuset_t	cpu_mask;
   5769 #endif
   5770 
   5771 	pc = adapter->osdep.pc;
   5772 #ifdef	RSS
   5773 	/*
   5774 	 * If we're doing RSS, the number of queues needs to
   5775 	 * match the number of RSS buckets that are configured.
   5776 	 *
   5777 	 * + If there's more queues than RSS buckets, we'll end
   5778 	 *   up with queues that get no traffic.
   5779 	 *
   5780 	 * + If there's more RSS buckets than queues, we'll end
   5781 	 *   up having multiple RSS buckets map to the same queue,
   5782 	 *   so there'll be some contention.
   5783 	 */
   5784 	rss_buckets = rss_getnumbuckets();
   5785 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
   5786 	    (adapter->num_queues != rss_buckets)) {
   5787 		device_printf(dev,
   5788 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
   5789 		    "; performance will be impacted.\n",
   5790 		    __func__, adapter->num_queues, rss_buckets);
   5791 	}
   5792 #endif
   5793 
   5794 	adapter->osdep.nintrs = adapter->num_queues + 1;
   5795 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   5796 	    adapter->osdep.nintrs) != 0) {
   5797 		aprint_error_dev(dev,
   5798 		    "failed to allocate MSI-X interrupt\n");
   5799 		return (ENXIO);
   5800 	}
   5801 
   5802 	kcpuset_create(&affinity, false);
   5803 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   5804 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   5805 		    device_xname(dev), i);
   5806 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   5807 		    sizeof(intrbuf));
   5808 #ifdef IXGBE_MPSAFE
   5809 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   5810 		    true);
   5811 #endif
   5812 		/* Set the handler function */
   5813 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   5814 		    adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
   5815 		    intr_xname);
   5816 		if (que->res == NULL) {
   5817 			pci_intr_release(pc, adapter->osdep.intrs,
   5818 			    adapter->osdep.nintrs);
   5819 			aprint_error_dev(dev,
   5820 			    "Failed to register QUE handler\n");
   5821 			kcpuset_destroy(affinity);
   5822 			return ENXIO;
   5823 		}
   5824 		que->msix = vector;
   5825 		adapter->active_queues |= (u64)(1 << que->msix);
   5826 
   5827 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   5828 #ifdef	RSS
   5829 			/*
   5830 			 * The queue ID is used as the RSS layer bucket ID.
   5831 			 * We look up the queue ID -> RSS CPU ID and select
   5832 			 * that.
   5833 			 */
   5834 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
   5835 			CPU_SETOF(cpu_id, &cpu_mask);
   5836 #endif
   5837 		} else {
   5838 			/*
   5839 			 * Bind the MSI-X vector, and thus the
   5840 			 * rings to the corresponding CPU.
   5841 			 *
   5842 			 * This just happens to match the default RSS
   5843 			 * round-robin bucket -> queue -> CPU allocation.
   5844 			 */
   5845 			if (adapter->num_queues > 1)
   5846 				cpu_id = i;
   5847 		}
   5848 		/* Round-robin affinity */
   5849 		kcpuset_zero(affinity);
   5850 		kcpuset_set(affinity, cpu_id % ncpu);
   5851 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   5852 		    NULL);
   5853 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   5854 		    intrstr);
   5855 		if (error == 0) {
   5856 #if 1 /* def IXGBE_DEBUG */
   5857 #ifdef	RSS
   5858 			aprintf_normal(", bound RSS bucket %d to CPU %d", i,
   5859 			    cpu_id % ncpu);
   5860 #else
   5861 			aprint_normal(", bound queue %d to cpu %d", i,
   5862 			    cpu_id % ncpu);
   5863 #endif
   5864 #endif /* IXGBE_DEBUG */
   5865 		}
   5866 		aprint_normal("\n");
   5867 
   5868 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5869 			txr->txr_si = softint_establish(
   5870 				SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5871 				ixgbe_deferred_mq_start, txr);
   5872 		que->que_si
   5873 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5874 			ixgbe_handle_que, que);
   5875 		if (que->que_si == NULL) {
   5876 			aprint_error_dev(dev,
   5877 			    "could not establish software interrupt\n");
   5878 		}
   5879 	}
   5880 
   5881 	/* and Link */
   5882 	cpu_id++;
   5883 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   5884 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   5885 	    sizeof(intrbuf));
   5886 #ifdef IXGBE_MPSAFE
   5887 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   5888 	    true);
   5889 #endif
   5890 	/* Set the link handler function */
   5891 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   5892 	    adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
   5893 	    intr_xname);
   5894 	if (adapter->osdep.ihs[vector] == NULL) {
   5895 		adapter->res = NULL;
   5896 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   5897 		kcpuset_destroy(affinity);
   5898 		return (ENXIO);
   5899 	}
   5900 	/* Round-robin affinity */
   5901 	kcpuset_zero(affinity);
   5902 	kcpuset_set(affinity, cpu_id % ncpu);
   5903 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   5904 
   5905 	aprint_normal_dev(dev,
   5906 	    "for link, interrupting at %s", intrstr);
   5907 	if (error == 0)
   5908 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   5909 	else
   5910 		aprint_normal("\n");
   5911 
   5912 	adapter->vector = vector;
   5913 	/* Tasklets for Link, SFP and Multispeed Fiber */
   5914 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   5915 	    ixgbe_handle_link, adapter);
   5916 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5917 	    ixgbe_handle_mod, adapter);
   5918 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5919 	    ixgbe_handle_msf, adapter);
   5920 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   5921 		adapter->mbx_si =
   5922 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5923 			ixgbe_handle_mbx, adapter);
   5924 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5925 		ixgbe_handle_phy, adapter);
   5926 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   5927 		adapter->fdir_si =
   5928 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5929 			ixgbe_reinit_fdir, adapter);
   5930 
   5931 	kcpuset_destroy(affinity);
   5932 
   5933 	return (0);
   5934 } /* ixgbe_allocate_msix */
   5935 
   5936 /************************************************************************
   5937  * ixgbe_configure_interrupts
   5938  *
   5939  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
   5940  *   This will also depend on user settings.
   5941  ************************************************************************/
   5942 static int
   5943 ixgbe_configure_interrupts(struct adapter *adapter)
   5944 {
   5945 	device_t dev = adapter->dev;
   5946 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
   5947 	int want, queues, msgs;
   5948 
   5949 	/* Default to 1 queue if MSI-X setup fails */
   5950 	adapter->num_queues = 1;
   5951 
   5952 	/* Override by tuneable */
   5953 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
   5954 		goto msi;
   5955 
   5956 	/* First try MSI-X */
   5957 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   5958 	msgs = MIN(msgs, IXG_MAX_NINTR);
   5959 	if (msgs < 2)
   5960 		goto msi;
   5961 
   5962 	adapter->msix_mem = (void *)1; /* XXX */
   5963 
   5964 	/* Figure out a reasonable auto config value */
   5965 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   5966 
   5967 #ifdef	RSS
   5968 	/* If we're doing RSS, clamp at the number of RSS buckets */
   5969 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   5970 		queues = min(queues, rss_getnumbuckets());
   5971 #endif
   5972 	if (ixgbe_num_queues > queues) {
   5973 		aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
   5974 		ixgbe_num_queues = queues;
   5975 	}
   5976 
   5977 	if (ixgbe_num_queues != 0)
   5978 		queues = ixgbe_num_queues;
   5979 	else
   5980 		queues = min(queues,
   5981 		    min(mac->max_tx_queues, mac->max_rx_queues));
   5982 
   5983 	/* reflect correct sysctl value */
   5984 	ixgbe_num_queues = queues;
   5985 
   5986 	/*
   5987 	 * Want one vector (RX/TX pair) per queue
   5988 	 * plus an additional for Link.
   5989 	 */
   5990 	want = queues + 1;
   5991 	if (msgs >= want)
   5992 		msgs = want;
   5993 	else {
   5994                	aprint_error_dev(dev, "MSI-X Configuration Problem, "
   5995 		    "%d vectors but %d queues wanted!\n",
   5996 		    msgs, want);
   5997 		goto msi;
   5998 	}
   5999 	device_printf(dev,
   6000 	    "Using MSI-X interrupts with %d vectors\n", msgs);
   6001 	adapter->num_queues = queues;
   6002 	adapter->feat_en |= IXGBE_FEATURE_MSIX;
   6003 	return (0);
   6004 
   6005 	/*
   6006 	 * MSI-X allocation failed or provided us with
   6007 	 * less vectors than needed. Free MSI-X resources
   6008 	 * and we'll try enabling MSI.
   6009 	 */
   6010 msi:
   6011 	/* Without MSI-X, some features are no longer supported */
   6012 	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6013 	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
   6014 	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6015 	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
   6016 
   6017        	msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
   6018 	adapter->msix_mem = NULL; /* XXX */
   6019 	if (msgs > 1)
   6020 		msgs = 1;
   6021 	if (msgs != 0) {
   6022 		msgs = 1;
   6023 		adapter->feat_en |= IXGBE_FEATURE_MSI;
   6024 		aprint_normal_dev(dev, "Using an MSI interrupt\n");
   6025 		return (0);
   6026 	}
   6027 
   6028 	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
   6029 		aprint_error_dev(dev,
   6030 		    "Device does not support legacy interrupts.\n");
   6031 		return 1;
   6032 	}
   6033 
   6034 	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6035 	aprint_normal_dev(dev, "Using a Legacy interrupt\n");
   6036 
   6037 	return (0);
   6038 } /* ixgbe_configure_interrupts */
   6039 
   6040 
   6041 /************************************************************************
   6042  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
   6043  *
   6044  *   Done outside of interrupt context since the driver might sleep
   6045  ************************************************************************/
   6046 static void
   6047 ixgbe_handle_link(void *context)
   6048 {
   6049 	struct adapter  *adapter = context;
   6050 	struct ixgbe_hw *hw = &adapter->hw;
   6051 
   6052 	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
   6053 	ixgbe_update_link_status(adapter);
   6054 
   6055 	/* Re-enable link interrupts */
   6056 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
   6057 } /* ixgbe_handle_link */
   6058 
   6059 /************************************************************************
   6060  * ixgbe_rearm_queues
   6061  ************************************************************************/
   6062 static void
   6063 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   6064 {
   6065 	u32 mask;
   6066 
   6067 	switch (adapter->hw.mac.type) {
   6068 	case ixgbe_mac_82598EB:
   6069 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   6070 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   6071 		break;
   6072 	case ixgbe_mac_82599EB:
   6073 	case ixgbe_mac_X540:
   6074 	case ixgbe_mac_X550:
   6075 	case ixgbe_mac_X550EM_x:
   6076 	case ixgbe_mac_X550EM_a:
   6077 		mask = (queues & 0xFFFFFFFF);
   6078 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   6079 		mask = (queues >> 32);
   6080 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   6081 		break;
   6082 	default:
   6083 		break;
   6084 	}
   6085 } /* ixgbe_rearm_queues */
   6086