Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.100
      1 /* $NetBSD: ixgbe.c,v 1.100 2017/08/31 02:48:55 msaitoh Exp $ */
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 320916 2017-07-12 17:35:32Z sbruno $*/
     36 
     37 /*
     38  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39  * All rights reserved.
     40  *
     41  * This code is derived from software contributed to The NetBSD Foundation
     42  * by Coyote Point Systems, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63  * POSSIBILITY OF SUCH DAMAGE.
     64  */
     65 
     66 #ifdef _KERNEL_OPT
     67 #include "opt_inet.h"
     68 #include "opt_inet6.h"
     69 #include "opt_net_mpsafe.h"
     70 #endif
     71 
     72 #include "ixgbe.h"
     73 #include "vlan.h"
     74 
     75 #include <sys/cprng.h>
     76 #include <dev/mii/mii.h>
     77 #include <dev/mii/miivar.h>
     78 
     79 /************************************************************************
     80  * Driver version
     81  ************************************************************************/
     82 char ixgbe_driver_version[] = "3.2.12-k";
     83 
     84 
     85 /************************************************************************
     86  * PCI Device ID Table
     87  *
     88  *   Used by probe to select devices to load on
     89  *   Last field stores an index into ixgbe_strings
     90  *   Last entry must be all 0s
     91  *
     92  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     93  ************************************************************************/
     94 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     95 {
     96 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    111 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    112 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    113 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    114 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    115 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
    116 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    117 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
    118 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
    119 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
    120 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
    121 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
    122 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
    123 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
    124 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
    125 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
    126 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
    127 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
    128 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
    129 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
    130 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
    131 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
    132 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
    133 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
    134 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
    135 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
    136 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
    137 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
    138 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
    139 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
    140 	/* required last entry */
    141 	{0, 0, 0, 0, 0}
    142 };
    143 
    144 /************************************************************************
    145  * Table of branding strings
    146  ************************************************************************/
    147 static const char    *ixgbe_strings[] = {
    148 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    149 };
    150 
    151 /************************************************************************
    152  * Function prototypes
    153  ************************************************************************/
    154 static int      ixgbe_probe(device_t, cfdata_t, void *);
    155 static void     ixgbe_attach(device_t, device_t, void *);
    156 static int      ixgbe_detach(device_t, int);
    157 #if 0
    158 static int      ixgbe_shutdown(device_t);
    159 #endif
    160 static bool	ixgbe_suspend(device_t, const pmf_qual_t *);
    161 static bool	ixgbe_resume(device_t, const pmf_qual_t *);
    162 static int	ixgbe_ifflags_cb(struct ethercom *);
    163 static int      ixgbe_ioctl(struct ifnet *, u_long, void *);
    164 static void	ixgbe_ifstop(struct ifnet *, int);
    165 static int	ixgbe_init(struct ifnet *);
    166 static void	ixgbe_init_locked(struct adapter *);
    167 static void     ixgbe_stop(void *);
    168 static void     ixgbe_init_device_features(struct adapter *);
    169 static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
    170 static void	ixgbe_add_media_types(struct adapter *);
    171 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    172 static int      ixgbe_media_change(struct ifnet *);
    173 static int      ixgbe_allocate_pci_resources(struct adapter *,
    174 		    const struct pci_attach_args *);
    175 static void	ixgbe_get_slot_info(struct adapter *);
    176 static int      ixgbe_allocate_msix(struct adapter *,
    177 		    const struct pci_attach_args *);
    178 static int      ixgbe_allocate_legacy(struct adapter *,
    179 		    const struct pci_attach_args *);
    180 static int      ixgbe_configure_interrupts(struct adapter *);
    181 static void	ixgbe_free_pci_resources(struct adapter *);
    182 static void	ixgbe_local_timer(void *);
    183 static void	ixgbe_local_timer1(void *);
    184 static int	ixgbe_setup_interface(device_t, struct adapter *);
    185 static void	ixgbe_config_gpie(struct adapter *);
    186 static void	ixgbe_config_dmac(struct adapter *);
    187 static void	ixgbe_config_delay_values(struct adapter *);
    188 static void	ixgbe_config_link(struct adapter *);
    189 static void	ixgbe_check_wol_support(struct adapter *);
    190 static int	ixgbe_setup_low_power_mode(struct adapter *);
    191 static void	ixgbe_rearm_queues(struct adapter *, u64);
    192 
    193 static void     ixgbe_initialize_transmit_units(struct adapter *);
    194 static void     ixgbe_initialize_receive_units(struct adapter *);
    195 static void	ixgbe_enable_rx_drop(struct adapter *);
    196 static void	ixgbe_disable_rx_drop(struct adapter *);
    197 static void	ixgbe_initialize_rss_mapping(struct adapter *);
    198 
    199 static void     ixgbe_enable_intr(struct adapter *);
    200 static void     ixgbe_disable_intr(struct adapter *);
    201 static void     ixgbe_update_stats_counters(struct adapter *);
    202 static void     ixgbe_set_promisc(struct adapter *);
    203 static void     ixgbe_set_multi(struct adapter *);
    204 static void     ixgbe_update_link_status(struct adapter *);
    205 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    206 static void	ixgbe_configure_ivars(struct adapter *);
    207 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    208 
    209 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    210 #if 0
    211 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
    212 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
    213 #endif
    214 
    215 static void	ixgbe_add_device_sysctls(struct adapter *);
    216 static void     ixgbe_add_hw_stats(struct adapter *);
    217 static void	ixgbe_clear_evcnt(struct adapter *);
    218 static int	ixgbe_set_flowcntl(struct adapter *, int);
    219 static int	ixgbe_set_advertise(struct adapter *, int);
    220 static int      ixgbe_get_advertise(struct adapter *);
    221 
    222 /* Sysctl handlers */
    223 static void	ixgbe_set_sysctl_value(struct adapter *, const char *,
    224 		     const char *, int *, int);
    225 static int	ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
    226 static int	ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
    227 static int      ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    228 static int	ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
    229 static int	ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
    230 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
    231 #ifdef IXGBE_DEBUG
    232 static int	ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
    233 static int	ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
    234 #endif
    235 static int      ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
    236 static int      ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
    237 static int      ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
    238 static int      ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
    239 static int      ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
    240 static int	ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
    241 static int	ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
    242 
    243 /* Support for pluggable optic modules */
    244 static bool	ixgbe_sfp_probe(struct adapter *);
    245 
    246 /* Legacy (single vector) interrupt handler */
    247 static int	ixgbe_legacy_irq(void *);
    248 
    249 /* The MSI/MSI-X Interrupt handlers */
    250 static int	ixgbe_msix_que(void *);
    251 static int	ixgbe_msix_link(void *);
    252 
    253 /* Software interrupts for deferred work */
    254 static void	ixgbe_handle_que(void *);
    255 static void	ixgbe_handle_link(void *);
    256 static void	ixgbe_handle_msf(void *);
    257 static void	ixgbe_handle_mod(void *);
    258 static void	ixgbe_handle_phy(void *);
    259 
    260 const struct sysctlnode *ixgbe_sysctl_instance(struct adapter *);
    261 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    262 
    263 /************************************************************************
    264  *  NetBSD Device Interface Entry Points
    265  ************************************************************************/
    266 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    267     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    268     DVF_DETACH_SHUTDOWN);
    269 
    270 #if 0
    271 devclass_t ix_devclass;
    272 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
    273 
    274 MODULE_DEPEND(ix, pci, 1, 1, 1);
    275 MODULE_DEPEND(ix, ether, 1, 1, 1);
    276 #endif
    277 
    278 /*
    279  * TUNEABLE PARAMETERS:
    280  */
    281 
    282 /*
    283  * AIM: Adaptive Interrupt Moderation
    284  * which means that the interrupt rate
    285  * is varied over time based on the
    286  * traffic for that interrupt vector
    287  */
    288 static bool ixgbe_enable_aim = true;
    289 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
    290 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
    291     "Enable adaptive interrupt moderation");
    292 
    293 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    294 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
    295     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
    296 
    297 /* How many packets rxeof tries to clean at a time */
    298 static int ixgbe_rx_process_limit = 256;
    299 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
    300     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
    301 
    302 /* How many packets txeof tries to clean at a time */
    303 static int ixgbe_tx_process_limit = 256;
    304 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
    305     &ixgbe_tx_process_limit, 0,
    306     "Maximum number of sent packets to process at a time, -1 means unlimited");
    307 
    308 /* Flow control setting, default to full */
    309 static int ixgbe_flow_control = ixgbe_fc_full;
    310 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
    311     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
    312 
    313 /*
    314  * Smart speed setting, default to on
    315  * this only works as a compile option
    316  * right now as its during attach, set
    317  * this to 'ixgbe_smart_speed_off' to
    318  * disable.
    319  */
    320 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    321 
    322 /*
    323  * MSI-X should be the default for best performance,
    324  * but this allows it to be forced off for testing.
    325  */
    326 static int ixgbe_enable_msix = 1;
    327 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
    328     "Enable MSI-X interrupts");
    329 
    330 /*
    331  * Number of Queues, can be set to 0,
    332  * it then autoconfigures based on the
    333  * number of cpus with a max of 8. This
    334  * can be overriden manually here.
    335  */
    336 static int ixgbe_num_queues = 0;
    337 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
    338     "Number of queues to configure, 0 indicates autoconfigure");
    339 
    340 /*
    341  * Number of TX descriptors per ring,
    342  * setting higher than RX as this seems
    343  * the better performing choice.
    344  */
    345 static int ixgbe_txd = PERFORM_TXD;
    346 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
    347     "Number of transmit descriptors per queue");
    348 
    349 /* Number of RX descriptors per ring */
    350 static int ixgbe_rxd = PERFORM_RXD;
    351 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
    352     "Number of receive descriptors per queue");
    353 
    354 /*
    355  * Defining this on will allow the use
    356  * of unsupported SFP+ modules, note that
    357  * doing so you are on your own :)
    358  */
    359 static int allow_unsupported_sfp = false;
    360 #define TUNABLE_INT(__x, __y)
    361 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
    362 
    363 /*
    364  * Not sure if Flow Director is fully baked,
    365  * so we'll default to turning it off.
    366  */
    367 static int ixgbe_enable_fdir = 0;
    368 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
    369     "Enable Flow Director");
    370 
    371 /* Legacy Transmit (single queue) */
    372 static int ixgbe_enable_legacy_tx = 0;
    373 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
    374     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
    375 
    376 /* Receive-Side Scaling */
    377 static int ixgbe_enable_rss = 1;
    378 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
    379     "Enable Receive-Side Scaling (RSS)");
    380 
    381 /* Keep running tab on them for sanity check */
    382 static int ixgbe_total_ports;
    383 
    384 #if 0
    385 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
    386 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
    387 #endif
    388 
    389 #ifdef NET_MPSAFE
    390 #define IXGBE_MPSAFE		1
    391 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    392 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    393 #else
    394 #define IXGBE_CALLOUT_FLAGS	0
    395 #define IXGBE_SOFTINFT_FLAGS	0
    396 #endif
    397 
    398 /************************************************************************
    399  * ixgbe_initialize_rss_mapping
    400  ************************************************************************/
    401 static void
    402 ixgbe_initialize_rss_mapping(struct adapter *adapter)
    403 {
    404 	struct ixgbe_hw	*hw = &adapter->hw;
    405 	u32             reta = 0, mrqc, rss_key[10];
    406 	int             queue_id, table_size, index_mult;
    407 	int             i, j;
    408 	u32             rss_hash_config;
    409 
    410 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    411 		/* Fetch the configured RSS key */
    412 		rss_getkey((uint8_t *) &rss_key);
    413 	} else {
    414 		/* set up random bits */
    415 		cprng_fast(&rss_key, sizeof(rss_key));
    416 	}
    417 
    418 	/* Set multiplier for RETA setup and table size based on MAC */
    419 	index_mult = 0x1;
    420 	table_size = 128;
    421 	switch (adapter->hw.mac.type) {
    422 	case ixgbe_mac_82598EB:
    423 		index_mult = 0x11;
    424 		break;
    425 	case ixgbe_mac_X550:
    426 	case ixgbe_mac_X550EM_x:
    427 	case ixgbe_mac_X550EM_a:
    428 		table_size = 512;
    429 		break;
    430 	default:
    431 		break;
    432 	}
    433 
    434 	/* Set up the redirection table */
    435 	for (i = 0, j = 0; i < table_size; i++, j++) {
    436 		if (j == adapter->num_queues)
    437 			j = 0;
    438 
    439 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    440 			/*
    441 			 * Fetch the RSS bucket id for the given indirection
    442 			 * entry. Cap it at the number of configured buckets
    443 			 * (which is num_queues.)
    444 			 */
    445 			queue_id = rss_get_indirection_to_bucket(i);
    446 			queue_id = queue_id % adapter->num_queues;
    447 		} else
    448 			queue_id = (j * index_mult);
    449 
    450 		/*
    451 		 * The low 8 bits are for hash value (n+0);
    452 		 * The next 8 bits are for hash value (n+1), etc.
    453 		 */
    454 		reta = reta >> 8;
    455 		reta = reta | (((uint32_t) queue_id) << 24);
    456 		if ((i & 3) == 3) {
    457 			if (i < 128)
    458 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
    459 			else
    460 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
    461 				    reta);
    462 			reta = 0;
    463 		}
    464 	}
    465 
    466 	/* Now fill our hash function seeds */
    467 	for (i = 0; i < 10; i++)
    468 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
    469 
    470 	/* Perform hash on these packet types */
    471 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
    472 		rss_hash_config = rss_gethashconfig();
    473 	else {
    474 		/*
    475 		 * Disable UDP - IP fragments aren't currently being handled
    476 		 * and so we end up with a mix of 2-tuple and 4-tuple
    477 		 * traffic.
    478 		 */
    479 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
    480 		                | RSS_HASHTYPE_RSS_TCP_IPV4
    481 		                | RSS_HASHTYPE_RSS_IPV6
    482 		                | RSS_HASHTYPE_RSS_TCP_IPV6
    483 		                | RSS_HASHTYPE_RSS_IPV6_EX
    484 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
    485 	}
    486 
    487 	mrqc = IXGBE_MRQC_RSSEN;
    488 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
    489 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
    490 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
    491 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
    492 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
    493 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
    494 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
    495 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
    496 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
    497 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
    498 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
    499 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
    500 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
    501 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
    502 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
    503 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
    504 		    __func__);
    505 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
    506 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
    507 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
    508 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
    509 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
    510 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    511 } /* ixgbe_initialize_rss_mapping */
    512 
    513 /************************************************************************
    514  * ixgbe_initialize_receive_units - Setup receive registers and features.
    515  ************************************************************************/
    516 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
    517 
    518 static void
    519 ixgbe_initialize_receive_units(struct adapter *adapter)
    520 {
    521 	struct	rx_ring	*rxr = adapter->rx_rings;
    522 	struct ixgbe_hw	*hw = &adapter->hw;
    523 	struct ifnet    *ifp = adapter->ifp;
    524 	int             i, j;
    525 	u32		bufsz, fctrl, srrctl, rxcsum;
    526 	u32		hlreg;
    527 
    528 	/*
    529 	 * Make sure receives are disabled while
    530 	 * setting up the descriptor ring
    531 	 */
    532 	ixgbe_disable_rx(hw);
    533 
    534 	/* Enable broadcasts */
    535 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
    536 	fctrl |= IXGBE_FCTRL_BAM;
    537 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
    538 		fctrl |= IXGBE_FCTRL_DPF;
    539 		fctrl |= IXGBE_FCTRL_PMCF;
    540 	}
    541 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
    542 
    543 	/* Set for Jumbo Frames? */
    544 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
    545 	if (ifp->if_mtu > ETHERMTU)
    546 		hlreg |= IXGBE_HLREG0_JUMBOEN;
    547 	else
    548 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
    549 
    550 #ifdef DEV_NETMAP
    551 	/* CRC stripping is conditional in Netmap */
    552 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
    553 	    (ifp->if_capenable & IFCAP_NETMAP) &&
    554 	    !ix_crcstrip)
    555 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
    556 	else
    557 #endif /* DEV_NETMAP */
    558 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
    559 
    560 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
    561 
    562 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
    563 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
    564 
    565 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
    566 		u64 rdba = rxr->rxdma.dma_paddr;
    567 		u32 tqsmreg, reg;
    568 		int regnum = i / 4;	/* 1 register per 4 queues */
    569 		int regshift = i % 4;	/* 4 bits per 1 queue */
    570 		j = rxr->me;
    571 
    572 		/* Setup the Base and Length of the Rx Descriptor Ring */
    573 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
    574 		    (rdba & 0x00000000ffffffffULL));
    575 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
    576 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
    577 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
    578 
    579 		/* Set up the SRRCTL register */
    580 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
    581 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
    582 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
    583 		srrctl |= bufsz;
    584 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
    585 
    586 		/* Set RQSMR (Receive Queue Statistic Mapping) register */
    587 		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
    588 		reg &= ~(0x000000ff << (regshift * 8));
    589 		reg |= i << (regshift * 8);
    590 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
    591 
    592 		/*
    593 		 * Set RQSMR (Receive Queue Statistic Mapping) register.
    594 		 * Register location for queue 0...7 are different between
    595 		 * 82598 and newer.
    596 		 */
    597 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
    598 			tqsmreg = IXGBE_TQSMR(regnum);
    599 		else
    600 			tqsmreg = IXGBE_TQSM(regnum);
    601 		reg = IXGBE_READ_REG(hw, tqsmreg);
    602 		reg &= ~(0x000000ff << (regshift * 8));
    603 		reg |= i << (regshift * 8);
    604 		IXGBE_WRITE_REG(hw, tqsmreg, reg);
    605 
    606 		/*
    607 		 * Set DROP_EN iff we have no flow control and >1 queue.
    608 		 * Note that srrctl was cleared shortly before during reset,
    609 		 * so we do not need to clear the bit, but do it just in case
    610 		 * this code is moved elsewhere.
    611 		 */
    612 		if (adapter->num_queues > 1 &&
    613 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
    614 			srrctl |= IXGBE_SRRCTL_DROP_EN;
    615 		} else {
    616 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
    617 		}
    618 
    619 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
    620 
    621 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
    622 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
    623 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
    624 
    625 		/* Set the driver rx tail address */
    626 		rxr->tail =  IXGBE_RDT(rxr->me);
    627 	}
    628 
    629 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    630 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
    631 		            | IXGBE_PSRTYPE_UDPHDR
    632 		            | IXGBE_PSRTYPE_IPV4HDR
    633 		            | IXGBE_PSRTYPE_IPV6HDR;
    634 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
    635 	}
    636 
    637 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
    638 
    639 	ixgbe_initialize_rss_mapping(adapter);
    640 
    641 	if (adapter->num_queues > 1) {
    642 		/* RSS and RX IPP Checksum are mutually exclusive */
    643 		rxcsum |= IXGBE_RXCSUM_PCSD;
    644 	}
    645 
    646 	if (ifp->if_capenable & IFCAP_RXCSUM)
    647 		rxcsum |= IXGBE_RXCSUM_PCSD;
    648 
    649 	/* This is useful for calculating UDP/IP fragment checksums */
    650 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
    651 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
    652 
    653 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
    654 
    655 	return;
    656 } /* ixgbe_initialize_receive_units */
    657 
    658 /************************************************************************
    659  * ixgbe_initialize_transmit_units - Enable transmit units.
    660  ************************************************************************/
    661 static void
    662 ixgbe_initialize_transmit_units(struct adapter *adapter)
    663 {
    664 	struct tx_ring  *txr = adapter->tx_rings;
    665 	struct ixgbe_hw	*hw = &adapter->hw;
    666 
    667 	/* Setup the Base and Length of the Tx Descriptor Ring */
    668 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    669 		u64 tdba = txr->txdma.dma_paddr;
    670 		u32 txctrl = 0;
    671 		int j = txr->me;
    672 
    673 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
    674 		    (tdba & 0x00000000ffffffffULL));
    675 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
    676 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
    677 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
    678 
    679 		/* Setup the HW Tx Head and Tail descriptor pointers */
    680 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
    681 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
    682 
    683 		/* Cache the tail address */
    684 		txr->tail = IXGBE_TDT(j);
    685 
    686 		/* Disable Head Writeback */
    687 		/*
    688 		 * Note: for X550 series devices, these registers are actually
    689 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
    690 		 * fields remain the same.
    691 		 */
    692 		switch (hw->mac.type) {
    693 		case ixgbe_mac_82598EB:
    694 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
    695 			break;
    696 		default:
    697 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
    698 			break;
    699 		}
    700 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
    701 		switch (hw->mac.type) {
    702 		case ixgbe_mac_82598EB:
    703 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
    704 			break;
    705 		default:
    706 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
    707 			break;
    708 		}
    709 
    710 	}
    711 
    712 	if (hw->mac.type != ixgbe_mac_82598EB) {
    713 		u32 dmatxctl, rttdcs;
    714 
    715 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
    716 		dmatxctl |= IXGBE_DMATXCTL_TE;
    717 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
    718 		/* Disable arbiter to set MTQC */
    719 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
    720 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
    721 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    722 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
    723 		    ixgbe_get_mtqc(adapter->iov_mode));
    724 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
    725 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    726 	}
    727 
    728 	return;
    729 } /* ixgbe_initialize_transmit_units */
    730 
    731 /************************************************************************
    732  * ixgbe_attach - Device initialization routine
    733  *
    734  *   Called when the driver is being loaded.
    735  *   Identifies the type of hardware, allocates all resources
    736  *   and initializes the hardware.
    737  *
    738  *   return 0 on success, positive on failure
    739  ************************************************************************/
    740 static void
    741 ixgbe_attach(device_t parent, device_t dev, void *aux)
    742 {
    743 	struct adapter  *adapter;
    744 	struct ixgbe_hw *hw;
    745 	int             error = -1;
    746 	u32		ctrl_ext;
    747 	u16		high, low, nvmreg;
    748 	pcireg_t	id, subid;
    749 	ixgbe_vendor_info_t *ent;
    750 	struct pci_attach_args *pa = aux;
    751 	const char *str;
    752 	char buf[256];
    753 
    754 	INIT_DEBUGOUT("ixgbe_attach: begin");
    755 
    756 	/* Allocate, clear, and link in our adapter structure */
    757 	adapter = device_private(dev);
    758 	adapter->hw.back = adapter;
    759 	adapter->dev = dev;
    760 	hw = &adapter->hw;
    761 	adapter->osdep.pc = pa->pa_pc;
    762 	adapter->osdep.tag = pa->pa_tag;
    763 	if (pci_dma64_available(pa))
    764 		adapter->osdep.dmat = pa->pa_dmat64;
    765 	else
    766 		adapter->osdep.dmat = pa->pa_dmat;
    767 	adapter->osdep.attached = false;
    768 
    769 	ent = ixgbe_lookup(pa);
    770 
    771 	KASSERT(ent != NULL);
    772 
    773 	aprint_normal(": %s, Version - %s\n",
    774 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    775 
    776 	/* Core Lock Init*/
    777 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    778 
    779 	/* Set up the timer callout */
    780 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    781 
    782 	/* Determine hardware revision */
    783 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    784 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    785 
    786 	hw->vendor_id = PCI_VENDOR(id);
    787 	hw->device_id = PCI_PRODUCT(id);
    788 	hw->revision_id =
    789 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    790 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    791 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    792 
    793 	/*
    794 	 * Make sure BUSMASTER is set
    795 	 */
    796 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    797 
    798 	/* Do base PCI setup - map BAR0 */
    799 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    800 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    801 		error = ENXIO;
    802 		goto err_out;
    803 	}
    804 
    805 	/* let hardware know driver is loaded */
    806 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    807 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    808 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    809 
    810 	/*
    811 	 * Initialize the shared code
    812 	 */
    813 	if (ixgbe_init_shared_code(hw)) {
    814 		aprint_error_dev(dev, "Unable to initialize the shared code\n");
    815 		error = ENXIO;
    816 		goto err_out;
    817 	}
    818 
    819 	switch (hw->mac.type) {
    820 	case ixgbe_mac_82598EB:
    821 		str = "82598EB";
    822 		break;
    823 	case ixgbe_mac_82599EB:
    824 		str = "82599EB";
    825 		break;
    826 	case ixgbe_mac_82599_vf:
    827 		str = "82599 VF";
    828 		break;
    829 	case ixgbe_mac_X540:
    830 		str = "X540";
    831 		break;
    832 	case ixgbe_mac_X540_vf:
    833 		str = "X540 VF";
    834 		break;
    835 	case ixgbe_mac_X550:
    836 		str = "X550";
    837 		break;
    838 	case ixgbe_mac_X550EM_x:
    839 		str = "X550EM";
    840 		break;
    841 	case ixgbe_mac_X550EM_a:
    842 		str = "X550EM A";
    843 		break;
    844 	case ixgbe_mac_X550_vf:
    845 		str = "X550 VF";
    846 		break;
    847 	case ixgbe_mac_X550EM_x_vf:
    848 		str = "X550EM X VF";
    849 		break;
    850 	case ixgbe_mac_X550EM_a_vf:
    851 		str = "X550EM A VF";
    852 		break;
    853 	default:
    854 		str = "Unknown";
    855 		break;
    856 	}
    857 	aprint_normal_dev(dev, "device %s\n", str);
    858 
    859 	if (hw->mbx.ops.init_params)
    860 		hw->mbx.ops.init_params(hw);
    861 
    862 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
    863 
    864 	/* Pick up the 82599 settings */
    865 	if (hw->mac.type != ixgbe_mac_82598EB) {
    866 		hw->phy.smart_speed = ixgbe_smart_speed;
    867 		adapter->num_segs = IXGBE_82599_SCATTER;
    868 	} else
    869 		adapter->num_segs = IXGBE_82598_SCATTER;
    870 
    871 	ixgbe_init_device_features(adapter);
    872 
    873 	if (ixgbe_configure_interrupts(adapter)) {
    874 		error = ENXIO;
    875 		goto err_out;
    876 	}
    877 
    878 	/* Allocate multicast array memory. */
    879 	adapter->mta = malloc(sizeof(*adapter->mta) *
    880 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    881 	if (adapter->mta == NULL) {
    882 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    883 		error = ENOMEM;
    884 		goto err_out;
    885 	}
    886 
    887 	/* Enable WoL (if supported) */
    888 	ixgbe_check_wol_support(adapter);
    889 
    890 	/* Verify adapter fan is still functional (if applicable) */
    891 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
    892 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    893 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
    894 	}
    895 
    896 	/* Ensure SW/FW semaphore is free */
    897 	ixgbe_init_swfw_semaphore(hw);
    898 
    899 	/* Enable EEE power saving */
    900 	if (adapter->feat_en & IXGBE_FEATURE_EEE)
    901 		hw->mac.ops.setup_eee(hw, TRUE);
    902 
    903 	/* Set an initial default flow control value */
    904 	hw->fc.requested_mode = ixgbe_flow_control;
    905 
    906 	/* Sysctls for limiting the amount of work done in the taskqueues */
    907 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
    908 	    "max number of rx packets to process",
    909 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
    910 
    911 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
    912 	    "max number of tx packets to process",
    913 	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
    914 
    915 	/* Do descriptor calc and sanity checks */
    916 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    917 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    918 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    919 		adapter->num_tx_desc = DEFAULT_TXD;
    920 	} else
    921 		adapter->num_tx_desc = ixgbe_txd;
    922 
    923 	/*
    924 	 * With many RX rings it is easy to exceed the
    925 	 * system mbuf allocation. Tuning nmbclusters
    926 	 * can alleviate this.
    927 	 */
    928 	if (nmbclusters > 0) {
    929 		int s;
    930 		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
    931 		if (s > nmbclusters) {
    932 			aprint_error_dev(dev, "RX Descriptors exceed "
    933 			    "system mbuf max, using default instead!\n");
    934 			ixgbe_rxd = DEFAULT_RXD;
    935 		}
    936 	}
    937 
    938 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    939 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
    940 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    941 		adapter->num_rx_desc = DEFAULT_RXD;
    942 	} else
    943 		adapter->num_rx_desc = ixgbe_rxd;
    944 
    945 	/* Allocate our TX/RX Queues */
    946 	if (ixgbe_allocate_queues(adapter)) {
    947 		error = ENOMEM;
    948 		goto err_out;
    949 	}
    950 
    951 	hw->phy.reset_if_overtemp = TRUE;
    952 	error = ixgbe_reset_hw(hw);
    953 	hw->phy.reset_if_overtemp = FALSE;
    954 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    955 		/*
    956 		 * No optics in this port, set up
    957 		 * so the timer routine will probe
    958 		 * for later insertion.
    959 		 */
    960 		adapter->sfp_probe = TRUE;
    961 		error = IXGBE_SUCCESS;
    962 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    963 		aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
    964 		error = EIO;
    965 		goto err_late;
    966 	} else if (error) {
    967 		aprint_error_dev(dev, "Hardware initialization failed\n");
    968 		error = EIO;
    969 		goto err_late;
    970 	}
    971 
    972 	/* Make sure we have a good EEPROM before we read from it */
    973 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
    974 		aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
    975 		error = EIO;
    976 		goto err_late;
    977 	}
    978 
    979 	aprint_normal("%s:", device_xname(dev));
    980 	/* NVM Image Version */
    981 	switch (hw->mac.type) {
    982 	case ixgbe_mac_X540:
    983 	case ixgbe_mac_X550EM_a:
    984 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    985 		if (nvmreg == 0xffff)
    986 			break;
    987 		high = (nvmreg >> 12) & 0x0f;
    988 		low = (nvmreg >> 4) & 0xff;
    989 		id = nvmreg & 0x0f;
    990 		aprint_normal(" NVM Image Version %u.%u ID 0x%x,", high, low,
    991 		    id);
    992 		break;
    993 	case ixgbe_mac_X550EM_x:
    994 	case ixgbe_mac_X550:
    995 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    996 		if (nvmreg == 0xffff)
    997 			break;
    998 		high = (nvmreg >> 12) & 0x0f;
    999 		low = nvmreg & 0xff;
   1000 		aprint_normal(" NVM Image Version %u.%u,", high, low);
   1001 		break;
   1002 	default:
   1003 		break;
   1004 	}
   1005 
   1006 	/* PHY firmware revision */
   1007 	switch (hw->mac.type) {
   1008 	case ixgbe_mac_X540:
   1009 	case ixgbe_mac_X550:
   1010 		hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
   1011 		if (nvmreg == 0xffff)
   1012 			break;
   1013 		high = (nvmreg >> 12) & 0x0f;
   1014 		low = (nvmreg >> 4) & 0xff;
   1015 		id = nvmreg & 0x000f;
   1016 		aprint_normal(" PHY FW Revision %u.%u ID 0x%x,", high, low,
   1017 		    id);
   1018 		break;
   1019 	default:
   1020 		break;
   1021 	}
   1022 
   1023 	/* NVM Map version & OEM NVM Image version */
   1024 	switch (hw->mac.type) {
   1025 	case ixgbe_mac_X550:
   1026 	case ixgbe_mac_X550EM_x:
   1027 	case ixgbe_mac_X550EM_a:
   1028 		hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
   1029 		if (nvmreg != 0xffff) {
   1030 			high = (nvmreg >> 12) & 0x0f;
   1031 			low = nvmreg & 0x00ff;
   1032 			aprint_normal(" NVM Map version %u.%02x,", high, low);
   1033 		}
   1034 		hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
   1035 		if (nvmreg == 0xffff) {
   1036 			high = (nvmreg >> 12) & 0x0f;
   1037 			low = nvmreg & 0x00ff;
   1038 			aprint_verbose(" OEM NVM Image version %u.%02x,", high,
   1039 			    low);
   1040 		}
   1041 		break;
   1042 	default:
   1043 		break;
   1044 	}
   1045 
   1046 	/* Print the ETrackID */
   1047 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
   1048 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
   1049 	aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
   1050 
   1051 	/* Setup OS specific network interface */
   1052 	if (ixgbe_setup_interface(dev, adapter) != 0)
   1053 		goto err_late;
   1054 
   1055 	if (adapter->feat_en & IXGBE_FEATURE_MSIX)
   1056 		error = ixgbe_allocate_msix(adapter, pa);
   1057 	else
   1058 		error = ixgbe_allocate_legacy(adapter, pa);
   1059 	if (error)
   1060 		goto err_late;
   1061 
   1062 	error = ixgbe_start_hw(hw);
   1063 	switch (error) {
   1064 	case IXGBE_ERR_EEPROM_VERSION:
   1065 		aprint_error_dev(dev, "This device is a pre-production adapter/"
   1066 		    "LOM.  Please be aware there may be issues associated "
   1067 		    "with your hardware.\nIf you are experiencing problems "
   1068 		    "please contact your Intel or hardware representative "
   1069 		    "who provided you with this hardware.\n");
   1070 		break;
   1071 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
   1072 		aprint_error_dev(dev, "Unsupported SFP+ Module\n");
   1073 		error = EIO;
   1074 		goto err_late;
   1075 	case IXGBE_ERR_SFP_NOT_PRESENT:
   1076 		aprint_error_dev(dev, "No SFP+ Module found\n");
   1077 		/* falls thru */
   1078 	default:
   1079 		break;
   1080 	}
   1081 
   1082 	if (hw->phy.id != 0) {
   1083 		uint16_t id1, id2;
   1084 		int oui, model, rev;
   1085 		const char *descr;
   1086 
   1087 		id1 = hw->phy.id >> 16;
   1088 		id2 = hw->phy.id & 0xffff;
   1089 		oui = MII_OUI(id1, id2);
   1090 		model = MII_MODEL(id2);
   1091 		rev = MII_REV(id2);
   1092 		if ((descr = mii_get_descr(oui, model)) != NULL)
   1093 			aprint_normal_dev(dev,
   1094 			    "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
   1095 			    descr, oui, model, rev);
   1096 		else
   1097 			aprint_normal_dev(dev,
   1098 			    "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
   1099 			    oui, model, rev);
   1100 	}
   1101 
   1102 	/* Enable the optics for 82599 SFP+ fiber */
   1103 	ixgbe_enable_tx_laser(hw);
   1104 
   1105 	/* Enable power to the phy. */
   1106 	ixgbe_set_phy_power(hw, TRUE);
   1107 
   1108 	/* Initialize statistics */
   1109 	ixgbe_update_stats_counters(adapter);
   1110 
   1111 	/* Check PCIE slot type/speed/width */
   1112 	ixgbe_get_slot_info(adapter);
   1113 
   1114 	/*
   1115 	 * Do time init and sysctl init here, but
   1116 	 * only on the first port of a bypass adapter.
   1117 	 */
   1118 	ixgbe_bypass_init(adapter);
   1119 
   1120 	/* Set an initial dmac value */
   1121 	adapter->dmac = 0;
   1122 	/* Set initial advertised speeds (if applicable) */
   1123 	adapter->advertise = ixgbe_get_advertise(adapter);
   1124 
   1125 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   1126 		ixgbe_define_iov_schemas(dev, &error);
   1127 
   1128 	/* Add sysctls */
   1129 	ixgbe_add_device_sysctls(adapter);
   1130 	ixgbe_add_hw_stats(adapter);
   1131 
   1132 	/* For Netmap */
   1133 	adapter->init_locked = ixgbe_init_locked;
   1134 	adapter->stop_locked = ixgbe_stop;
   1135 
   1136 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1137 		ixgbe_netmap_attach(adapter);
   1138 
   1139 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
   1140 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
   1141 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
   1142 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
   1143 
   1144 	if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
   1145 		pmf_class_network_register(dev, adapter->ifp);
   1146 	else
   1147 		aprint_error_dev(dev, "couldn't establish power handler\n");
   1148 
   1149 	INIT_DEBUGOUT("ixgbe_attach: end");
   1150 	adapter->osdep.attached = true;
   1151 
   1152 	return;
   1153 
   1154 err_late:
   1155 	ixgbe_free_transmit_structures(adapter);
   1156 	ixgbe_free_receive_structures(adapter);
   1157 	free(adapter->queues, M_DEVBUF);
   1158 err_out:
   1159 	if (adapter->ifp != NULL)
   1160 		if_free(adapter->ifp);
   1161 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   1162 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   1163 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   1164 	ixgbe_free_pci_resources(adapter);
   1165 	if (adapter->mta != NULL)
   1166 		free(adapter->mta, M_DEVBUF);
   1167 	IXGBE_CORE_LOCK_DESTROY(adapter);
   1168 
   1169 	return;
   1170 } /* ixgbe_attach */
   1171 
   1172 /************************************************************************
   1173  * ixgbe_check_wol_support
   1174  *
   1175  *   Checks whether the adapter's ports are capable of
   1176  *   Wake On LAN by reading the adapter's NVM.
   1177  *
   1178  *   Sets each port's hw->wol_enabled value depending
   1179  *   on the value read here.
   1180  ************************************************************************/
   1181 static void
   1182 ixgbe_check_wol_support(struct adapter *adapter)
   1183 {
   1184 	struct ixgbe_hw *hw = &adapter->hw;
   1185 	u16             dev_caps = 0;
   1186 
   1187 	/* Find out WoL support for port */
   1188 	adapter->wol_support = hw->wol_enabled = 0;
   1189 	ixgbe_get_device_caps(hw, &dev_caps);
   1190 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
   1191 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
   1192 	     hw->bus.func == 0))
   1193 		adapter->wol_support = hw->wol_enabled = 1;
   1194 
   1195 	/* Save initial wake up filter configuration */
   1196 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
   1197 
   1198 	return;
   1199 } /* ixgbe_check_wol_support */
   1200 
   1201 /************************************************************************
   1202  * ixgbe_setup_interface
   1203  *
   1204  *   Setup networking device structure and register an interface.
   1205  ************************************************************************/
   1206 static int
   1207 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   1208 {
   1209 	struct ethercom *ec = &adapter->osdep.ec;
   1210 	struct ifnet   *ifp;
   1211 
   1212 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   1213 
   1214 	ifp = adapter->ifp = &ec->ec_if;
   1215 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1216 	ifp->if_baudrate = IF_Gbps(10);
   1217 	ifp->if_init = ixgbe_init;
   1218 	ifp->if_stop = ixgbe_ifstop;
   1219 	ifp->if_softc = adapter;
   1220 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1221 #ifdef IXGBE_MPSAFE
   1222 	ifp->if_extflags = IFEF_START_MPSAFE;
   1223 #endif
   1224 	ifp->if_ioctl = ixgbe_ioctl;
   1225 #if __FreeBSD_version >= 1100045
   1226 	/* TSO parameters */
   1227 	ifp->if_hw_tsomax = 65518;
   1228 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
   1229 	ifp->if_hw_tsomaxsegsize = 2048;
   1230 #endif
   1231 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1232 #if 0
   1233 		ixgbe_start_locked = ixgbe_legacy_start_locked;
   1234 #endif
   1235 	} else {
   1236 		ifp->if_transmit = ixgbe_mq_start;
   1237 #if 0
   1238 		ixgbe_start_locked = ixgbe_mq_start_locked;
   1239 #endif
   1240 	}
   1241 	ifp->if_start = ixgbe_legacy_start;
   1242 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1243 	IFQ_SET_READY(&ifp->if_snd);
   1244 
   1245 	if_initialize(ifp);
   1246 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1247 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1248 	/*
   1249 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1250 	 * used.
   1251 	 */
   1252 	if_register(ifp);
   1253 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   1254 
   1255 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1256 
   1257 	/*
   1258 	 * Tell the upper layer(s) we support long frames.
   1259 	 */
   1260 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1261 
   1262 	/* Set capability flags */
   1263 	ifp->if_capabilities |= IFCAP_RXCSUM
   1264 			     |  IFCAP_TXCSUM
   1265 			     |  IFCAP_TSOv4
   1266 			     |  IFCAP_TSOv6
   1267 			     |  IFCAP_LRO;
   1268 	ifp->if_capenable = 0;
   1269 
   1270 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1271 	    		    |  ETHERCAP_VLAN_HWCSUM
   1272 	    		    |  ETHERCAP_JUMBO_MTU
   1273 	    		    |  ETHERCAP_VLAN_MTU;
   1274 
   1275 	/* Enable the above capabilities by default */
   1276 	ec->ec_capenable = ec->ec_capabilities;
   1277 
   1278 	/*
   1279 	 * Don't turn this on by default, if vlans are
   1280 	 * created on another pseudo device (eg. lagg)
   1281 	 * then vlan events are not passed thru, breaking
   1282 	 * operation, but with HW FILTER off it works. If
   1283 	 * using vlans directly on the ixgbe driver you can
   1284 	 * enable this and get full hardware tag filtering.
   1285 	 */
   1286 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1287 
   1288 	/*
   1289 	 * Specify the media types supported by this adapter and register
   1290 	 * callbacks to update media and link information
   1291 	 */
   1292 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   1293 	    ixgbe_media_status);
   1294 
   1295 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
   1296 	ixgbe_add_media_types(adapter);
   1297 
   1298 	/* Set autoselect media by default */
   1299 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1300 
   1301 	return (0);
   1302 } /* ixgbe_setup_interface */
   1303 
   1304 /************************************************************************
   1305  * ixgbe_add_media_types
   1306  ************************************************************************/
   1307 static void
   1308 ixgbe_add_media_types(struct adapter *adapter)
   1309 {
   1310 	struct ixgbe_hw *hw = &adapter->hw;
   1311 	device_t        dev = adapter->dev;
   1312 	u64             layer;
   1313 
   1314 	layer = adapter->phy_layer;
   1315 
   1316 #define	ADD(mm, dd)							\
   1317 	ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
   1318 
   1319 	/* Media types with matching NetBSD media defines */
   1320 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
   1321 		ADD(IFM_10G_T, 0);
   1322 		ADD(IFM_10G_T | IFM_FDX, 0);
   1323 	}
   1324 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
   1325 		ADD(IFM_1000_T, 0);
   1326 		ADD(IFM_1000_T | IFM_FDX, 0);
   1327 	}
   1328 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
   1329 		ADD(IFM_100_TX, 0);
   1330 		ADD(IFM_100_TX | IFM_FDX, 0);
   1331 	}
   1332 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
   1333 		ADD(IFM_10_T, 0);
   1334 		ADD(IFM_10_T | IFM_FDX, 0);
   1335 	}
   1336 
   1337 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   1338 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
   1339 		ADD(IFM_10G_TWINAX, 0);
   1340 		ADD(IFM_10G_TWINAX | IFM_FDX, 0);
   1341 	}
   1342 
   1343 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
   1344 		ADD(IFM_10G_LR, 0);
   1345 		ADD(IFM_10G_LR | IFM_FDX, 0);
   1346 		if (hw->phy.multispeed_fiber) {
   1347 			ADD(IFM_1000_LX, 0);
   1348 			ADD(IFM_1000_LX | IFM_FDX, 0);
   1349 		}
   1350 	}
   1351 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
   1352 		ADD(IFM_10G_SR, 0);
   1353 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1354 		if (hw->phy.multispeed_fiber) {
   1355 			ADD(IFM_1000_SX, 0);
   1356 			ADD(IFM_1000_SX | IFM_FDX, 0);
   1357 		}
   1358 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
   1359 		ADD(IFM_1000_SX, 0);
   1360 		ADD(IFM_1000_SX | IFM_FDX, 0);
   1361 	}
   1362 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
   1363 		ADD(IFM_10G_CX4, 0);
   1364 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1365 	}
   1366 
   1367 #ifdef IFM_ETH_XTYPE
   1368 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1369 		ADD(IFM_10G_KR, 0);
   1370 		ADD(IFM_10G_KR | IFM_FDX, 0);
   1371 	}
   1372 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1373 		ADD(AIFM_10G_KX4, 0);
   1374 		ADD(AIFM_10G_KX4 | IFM_FDX, 0);
   1375 	}
   1376 #else
   1377 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1378 		device_printf(dev, "Media supported: 10GbaseKR\n");
   1379 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
   1380 		ADD(IFM_10G_SR, 0);
   1381 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1382 	}
   1383 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1384 		device_printf(dev, "Media supported: 10GbaseKX4\n");
   1385 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
   1386 		ADD(IFM_10G_CX4, 0);
   1387 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1388 	}
   1389 #endif
   1390 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
   1391 		ADD(IFM_1000_KX, 0);
   1392 		ADD(IFM_1000_KX | IFM_FDX, 0);
   1393 	}
   1394 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
   1395 		ADD(IFM_2500_KX, 0);
   1396 		ADD(IFM_2500_KX | IFM_FDX, 0);
   1397 	}
   1398 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
   1399 		device_printf(dev, "Media supported: 1000baseBX\n");
   1400 	/* XXX no ifmedia_set? */
   1401 
   1402 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
   1403 		ADD(IFM_1000_T | IFM_FDX, 0);
   1404 		ADD(IFM_1000_T, 0);
   1405 	}
   1406 
   1407 	ADD(IFM_AUTO, 0);
   1408 
   1409 #undef ADD
   1410 } /* ixgbe_add_media_types */
   1411 
   1412 /************************************************************************
   1413  * ixgbe_is_sfp
   1414  ************************************************************************/
   1415 static inline bool
   1416 ixgbe_is_sfp(struct ixgbe_hw *hw)
   1417 {
   1418 	switch (hw->mac.type) {
   1419 	case ixgbe_mac_82598EB:
   1420 		if (hw->phy.type == ixgbe_phy_nl)
   1421 			return TRUE;
   1422 		return FALSE;
   1423 	case ixgbe_mac_82599EB:
   1424 		switch (hw->mac.ops.get_media_type(hw)) {
   1425 		case ixgbe_media_type_fiber:
   1426 		case ixgbe_media_type_fiber_qsfp:
   1427 			return TRUE;
   1428 		default:
   1429 			return FALSE;
   1430 		}
   1431 	case ixgbe_mac_X550EM_x:
   1432 	case ixgbe_mac_X550EM_a:
   1433 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
   1434 			return TRUE;
   1435 		return FALSE;
   1436 	default:
   1437 		return FALSE;
   1438 	}
   1439 } /* ixgbe_is_sfp */
   1440 
   1441 /************************************************************************
   1442  * ixgbe_config_link
   1443  ************************************************************************/
   1444 static void
   1445 ixgbe_config_link(struct adapter *adapter)
   1446 {
   1447 	struct ixgbe_hw *hw = &adapter->hw;
   1448 	u32             autoneg, err = 0;
   1449 	bool            sfp, negotiate = false;
   1450 
   1451 	sfp = ixgbe_is_sfp(hw);
   1452 
   1453 	if (sfp) {
   1454 		if (hw->phy.multispeed_fiber) {
   1455 			hw->mac.ops.setup_sfp(hw);
   1456 			ixgbe_enable_tx_laser(hw);
   1457 			kpreempt_disable();
   1458 			softint_schedule(adapter->msf_si);
   1459 			kpreempt_enable();
   1460 		} else {
   1461 			kpreempt_disable();
   1462 			softint_schedule(adapter->mod_si);
   1463 			kpreempt_enable();
   1464 		}
   1465 	} else {
   1466 		if (hw->mac.ops.check_link)
   1467 			err = ixgbe_check_link(hw, &adapter->link_speed,
   1468 			    &adapter->link_up, FALSE);
   1469 		if (err)
   1470 			goto out;
   1471 		autoneg = hw->phy.autoneg_advertised;
   1472 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   1473                 	err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
   1474 			    &negotiate);
   1475 		if (err)
   1476 			goto out;
   1477 		if (hw->mac.ops.setup_link)
   1478                 	err = hw->mac.ops.setup_link(hw, autoneg,
   1479 			    adapter->link_up);
   1480 	}
   1481 out:
   1482 
   1483 	return;
   1484 } /* ixgbe_config_link */
   1485 
   1486 /************************************************************************
   1487  * ixgbe_update_stats_counters - Update board statistics counters.
   1488  ************************************************************************/
   1489 static void
   1490 ixgbe_update_stats_counters(struct adapter *adapter)
   1491 {
   1492 	struct ifnet          *ifp = adapter->ifp;
   1493 	struct ixgbe_hw       *hw = &adapter->hw;
   1494 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1495 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
   1496 	u64                   total_missed_rx = 0;
   1497 	uint64_t              crcerrs, rlec;
   1498 
   1499 	crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   1500 	stats->crcerrs.ev_count += crcerrs;
   1501 	stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   1502 	stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   1503 	stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   1504 	if (hw->mac.type == ixgbe_mac_X550)
   1505 		stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
   1506 
   1507 	for (int i = 0; i < __arraycount(stats->qprc); i++) {
   1508 		int j = i % adapter->num_queues;
   1509 		stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   1510 		stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   1511 		stats->qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   1512 	}
   1513 	for (int i = 0; i < __arraycount(stats->mpc); i++) {
   1514 		uint32_t mp;
   1515 		int j = i % adapter->num_queues;
   1516 
   1517 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   1518 		/* global total per queue */
   1519 		stats->mpc[j].ev_count += mp;
   1520 		/* running comprehensive total for stats display */
   1521 		total_missed_rx += mp;
   1522 
   1523 		if (hw->mac.type == ixgbe_mac_82598EB)
   1524 			stats->rnbc[j].ev_count
   1525 			    += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   1526 
   1527 	}
   1528 	stats->mpctotal.ev_count += total_missed_rx;
   1529 
   1530 	/* Document says M[LR]FC are valid when link is up and 10Gbps */
   1531 	if ((adapter->link_active == TRUE)
   1532 	    && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
   1533 		stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   1534 		stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   1535 	}
   1536 	rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
   1537 	stats->rlec.ev_count += rlec;
   1538 
   1539 	/* Hardware workaround, gprc counts missed packets */
   1540 	stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   1541 
   1542 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   1543 	stats->lxontxc.ev_count += lxon;
   1544 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   1545 	stats->lxofftxc.ev_count += lxoff;
   1546 	total = lxon + lxoff;
   1547 
   1548 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1549 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   1550 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   1551 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   1552 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   1553 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   1554 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   1555 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   1556 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   1557 	} else {
   1558 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   1559 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   1560 		/* 82598 only has a counter in the high register */
   1561 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   1562 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   1563 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   1564 	}
   1565 
   1566 	/*
   1567 	 * Workaround: mprc hardware is incorrectly counting
   1568 	 * broadcasts, so for now we subtract those.
   1569 	 */
   1570 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   1571 	stats->bprc.ev_count += bprc;
   1572 	stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
   1573 	    - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   1574 
   1575 	stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   1576 	stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   1577 	stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   1578 	stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   1579 	stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   1580 	stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   1581 
   1582 	stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   1583 	stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   1584 	stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   1585 
   1586 	stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   1587 	stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   1588 	stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   1589 	stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   1590 	stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   1591 	stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   1592 	stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   1593 	stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   1594 	stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   1595 	stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   1596 	stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   1597 	stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   1598 	stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   1599 	stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   1600 	stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   1601 	stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   1602 	stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   1603 	stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   1604 	/* Only read FCOE on 82599 */
   1605 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1606 		stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   1607 		stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   1608 		stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   1609 		stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   1610 		stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   1611 	}
   1612 
   1613 	/* Fill out the OS statistics structure */
   1614 	/*
   1615 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   1616 	 * adapter->stats counters. It's required to make ifconfig -z
   1617 	 * (SOICZIFDATA) work.
   1618 	 */
   1619 	ifp->if_collisions = 0;
   1620 
   1621 	/* Rx Errors */
   1622 	ifp->if_iqdrops += total_missed_rx;
   1623 	ifp->if_ierrors += crcerrs + rlec;
   1624 } /* ixgbe_update_stats_counters */
   1625 
   1626 /************************************************************************
   1627  * ixgbe_add_hw_stats
   1628  *
   1629  *   Add sysctl variables, one per statistic, to the system.
   1630  ************************************************************************/
   1631 static void
   1632 ixgbe_add_hw_stats(struct adapter *adapter)
   1633 {
   1634 	device_t dev = adapter->dev;
   1635 	const struct sysctlnode *rnode, *cnode;
   1636 	struct sysctllog **log = &adapter->sysctllog;
   1637 	struct tx_ring *txr = adapter->tx_rings;
   1638 	struct rx_ring *rxr = adapter->rx_rings;
   1639 	struct ixgbe_hw *hw = &adapter->hw;
   1640 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1641 	const char *xname = device_xname(dev);
   1642 
   1643 	/* Driver Statistics */
   1644 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   1645 	    NULL, xname, "Handled queue in softint");
   1646 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   1647 	    NULL, xname, "Requeued in softint");
   1648 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   1649 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   1650 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   1651 	    NULL, xname, "m_defrag() failed");
   1652 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   1653 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   1654 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   1655 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   1656 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   1657 	    NULL, xname, "Driver tx dma hard fail other");
   1658 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   1659 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   1660 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   1661 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   1662 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   1663 	    NULL, xname, "Watchdog timeouts");
   1664 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   1665 	    NULL, xname, "TSO errors");
   1666 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   1667 	    NULL, xname, "Link MSI-X IRQ Handled");
   1668 
   1669 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1670 		snprintf(adapter->queues[i].evnamebuf,
   1671 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   1672 		    xname, i);
   1673 		snprintf(adapter->queues[i].namebuf,
   1674 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   1675 
   1676 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   1677 			aprint_error_dev(dev, "could not create sysctl root\n");
   1678 			break;
   1679 		}
   1680 
   1681 		if (sysctl_createv(log, 0, &rnode, &rnode,
   1682 		    0, CTLTYPE_NODE,
   1683 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   1684 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   1685 			break;
   1686 
   1687 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1688 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   1689 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   1690 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   1691 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   1692 			break;
   1693 
   1694 #if 0 /* XXX msaitoh */
   1695 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1696 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   1697 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   1698 			NULL, 0, &(adapter->queues[i].irqs),
   1699 		    0, CTL_CREATE, CTL_EOL) != 0)
   1700 			break;
   1701 #endif
   1702 
   1703 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1704 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1705 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   1706 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   1707 		    0, CTL_CREATE, CTL_EOL) != 0)
   1708 			break;
   1709 
   1710 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1711 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1712 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   1713 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   1714 		    0, CTL_CREATE, CTL_EOL) != 0)
   1715 			break;
   1716 
   1717 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   1718 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   1719 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   1720 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   1721 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   1722 		    NULL, adapter->queues[i].evnamebuf,
   1723 		    "Queue No Descriptor Available");
   1724 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   1725 		    NULL, adapter->queues[i].evnamebuf,
   1726 		    "Queue Packets Transmitted");
   1727 #ifndef IXGBE_LEGACY_TX
   1728 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   1729 		    NULL, adapter->queues[i].evnamebuf,
   1730 		    "Packets dropped in pcq");
   1731 #endif
   1732 
   1733 #ifdef LRO
   1734 		struct lro_ctrl *lro = &rxr->lro;
   1735 #endif /* LRO */
   1736 
   1737 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1738 		    CTLFLAG_READONLY,
   1739 		    CTLTYPE_INT,
   1740 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   1741 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   1742 		    CTL_CREATE, CTL_EOL) != 0)
   1743 			break;
   1744 
   1745 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1746 		    CTLFLAG_READONLY,
   1747 		    CTLTYPE_INT,
   1748 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   1749 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   1750 		    CTL_CREATE, CTL_EOL) != 0)
   1751 			break;
   1752 
   1753 		if (i < __arraycount(stats->mpc)) {
   1754 			evcnt_attach_dynamic(&stats->mpc[i],
   1755 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1756 			    "RX Missed Packet Count");
   1757 			if (hw->mac.type == ixgbe_mac_82598EB)
   1758 				evcnt_attach_dynamic(&stats->rnbc[i],
   1759 				    EVCNT_TYPE_MISC, NULL,
   1760 				    adapter->queues[i].evnamebuf,
   1761 				    "Receive No Buffers");
   1762 		}
   1763 		if (i < __arraycount(stats->pxontxc)) {
   1764 			evcnt_attach_dynamic(&stats->pxontxc[i],
   1765 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1766 			    "pxontxc");
   1767 			evcnt_attach_dynamic(&stats->pxonrxc[i],
   1768 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1769 			    "pxonrxc");
   1770 			evcnt_attach_dynamic(&stats->pxofftxc[i],
   1771 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1772 			    "pxofftxc");
   1773 			evcnt_attach_dynamic(&stats->pxoffrxc[i],
   1774 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1775 			    "pxoffrxc");
   1776 			evcnt_attach_dynamic(&stats->pxon2offc[i],
   1777 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1778 			    "pxon2offc");
   1779 		}
   1780 		if (i < __arraycount(stats->qprc)) {
   1781 			evcnt_attach_dynamic(&stats->qprc[i],
   1782 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1783 			    "qprc");
   1784 			evcnt_attach_dynamic(&stats->qptc[i],
   1785 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1786 			    "qptc");
   1787 			evcnt_attach_dynamic(&stats->qbrc[i],
   1788 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1789 			    "qbrc");
   1790 			evcnt_attach_dynamic(&stats->qbtc[i],
   1791 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1792 			    "qbtc");
   1793 			evcnt_attach_dynamic(&stats->qprdc[i],
   1794 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1795 			    "qprdc");
   1796 		}
   1797 
   1798 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   1799 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   1800 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   1801 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   1802 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   1803 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   1804 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   1805 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   1806 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   1807 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   1808 #ifdef LRO
   1809 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   1810 				CTLFLAG_RD, &lro->lro_queued, 0,
   1811 				"LRO Queued");
   1812 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   1813 				CTLFLAG_RD, &lro->lro_flushed, 0,
   1814 				"LRO Flushed");
   1815 #endif /* LRO */
   1816 	}
   1817 
   1818 	/* MAC stats get their own sub node */
   1819 
   1820 	snprintf(stats->namebuf,
   1821 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   1822 
   1823 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   1824 	    stats->namebuf, "rx csum offload - IP");
   1825 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   1826 	    stats->namebuf, "rx csum offload - L4");
   1827 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   1828 	    stats->namebuf, "rx csum offload - IP bad");
   1829 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   1830 	    stats->namebuf, "rx csum offload - L4 bad");
   1831 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   1832 	    stats->namebuf, "Interrupt conditions zero");
   1833 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   1834 	    stats->namebuf, "Legacy interrupts");
   1835 
   1836 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   1837 	    stats->namebuf, "CRC Errors");
   1838 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   1839 	    stats->namebuf, "Illegal Byte Errors");
   1840 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   1841 	    stats->namebuf, "Byte Errors");
   1842 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   1843 	    stats->namebuf, "MAC Short Packets Discarded");
   1844 	if (hw->mac.type >= ixgbe_mac_X550)
   1845 		evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
   1846 		    stats->namebuf, "Bad SFD");
   1847 	evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
   1848 	    stats->namebuf, "Total Packets Missed");
   1849 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   1850 	    stats->namebuf, "MAC Local Faults");
   1851 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   1852 	    stats->namebuf, "MAC Remote Faults");
   1853 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   1854 	    stats->namebuf, "Receive Length Errors");
   1855 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   1856 	    stats->namebuf, "Link XON Transmitted");
   1857 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   1858 	    stats->namebuf, "Link XON Received");
   1859 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   1860 	    stats->namebuf, "Link XOFF Transmitted");
   1861 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   1862 	    stats->namebuf, "Link XOFF Received");
   1863 
   1864 	/* Packet Reception Stats */
   1865 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   1866 	    stats->namebuf, "Total Octets Received");
   1867 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   1868 	    stats->namebuf, "Good Octets Received");
   1869 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   1870 	    stats->namebuf, "Total Packets Received");
   1871 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   1872 	    stats->namebuf, "Good Packets Received");
   1873 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   1874 	    stats->namebuf, "Multicast Packets Received");
   1875 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   1876 	    stats->namebuf, "Broadcast Packets Received");
   1877 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   1878 	    stats->namebuf, "64 byte frames received ");
   1879 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   1880 	    stats->namebuf, "65-127 byte frames received");
   1881 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   1882 	    stats->namebuf, "128-255 byte frames received");
   1883 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   1884 	    stats->namebuf, "256-511 byte frames received");
   1885 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   1886 	    stats->namebuf, "512-1023 byte frames received");
   1887 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   1888 	    stats->namebuf, "1023-1522 byte frames received");
   1889 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   1890 	    stats->namebuf, "Receive Undersized");
   1891 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   1892 	    stats->namebuf, "Fragmented Packets Received ");
   1893 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   1894 	    stats->namebuf, "Oversized Packets Received");
   1895 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   1896 	    stats->namebuf, "Received Jabber");
   1897 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   1898 	    stats->namebuf, "Management Packets Received");
   1899 	evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
   1900 	    stats->namebuf, "Management Packets Dropped");
   1901 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   1902 	    stats->namebuf, "Checksum Errors");
   1903 
   1904 	/* Packet Transmission Stats */
   1905 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   1906 	    stats->namebuf, "Good Octets Transmitted");
   1907 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   1908 	    stats->namebuf, "Total Packets Transmitted");
   1909 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   1910 	    stats->namebuf, "Good Packets Transmitted");
   1911 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   1912 	    stats->namebuf, "Broadcast Packets Transmitted");
   1913 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   1914 	    stats->namebuf, "Multicast Packets Transmitted");
   1915 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   1916 	    stats->namebuf, "Management Packets Transmitted");
   1917 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   1918 	    stats->namebuf, "64 byte frames transmitted ");
   1919 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   1920 	    stats->namebuf, "65-127 byte frames transmitted");
   1921 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   1922 	    stats->namebuf, "128-255 byte frames transmitted");
   1923 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   1924 	    stats->namebuf, "256-511 byte frames transmitted");
   1925 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   1926 	    stats->namebuf, "512-1023 byte frames transmitted");
   1927 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   1928 	    stats->namebuf, "1024-1522 byte frames transmitted");
   1929 } /* ixgbe_add_hw_stats */
   1930 
   1931 static void
   1932 ixgbe_clear_evcnt(struct adapter *adapter)
   1933 {
   1934 	struct tx_ring *txr = adapter->tx_rings;
   1935 	struct rx_ring *rxr = adapter->rx_rings;
   1936 	struct ixgbe_hw *hw = &adapter->hw;
   1937 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1938 
   1939 	adapter->handleq.ev_count = 0;
   1940 	adapter->req.ev_count = 0;
   1941 	adapter->efbig_tx_dma_setup.ev_count = 0;
   1942 	adapter->mbuf_defrag_failed.ev_count = 0;
   1943 	adapter->efbig2_tx_dma_setup.ev_count = 0;
   1944 	adapter->einval_tx_dma_setup.ev_count = 0;
   1945 	adapter->other_tx_dma_setup.ev_count = 0;
   1946 	adapter->eagain_tx_dma_setup.ev_count = 0;
   1947 	adapter->enomem_tx_dma_setup.ev_count = 0;
   1948 	adapter->watchdog_events.ev_count = 0;
   1949 	adapter->tso_err.ev_count = 0;
   1950 	adapter->link_irq.ev_count = 0;
   1951 
   1952 	txr = adapter->tx_rings;
   1953 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1954 		adapter->queues[i].irqs.ev_count = 0;
   1955 		txr->no_desc_avail.ev_count = 0;
   1956 		txr->total_packets.ev_count = 0;
   1957 		txr->tso_tx.ev_count = 0;
   1958 #ifndef IXGBE_LEGACY_TX
   1959 		txr->pcq_drops.ev_count = 0;
   1960 #endif
   1961 
   1962 		if (i < __arraycount(stats->mpc)) {
   1963 			stats->mpc[i].ev_count = 0;
   1964 			if (hw->mac.type == ixgbe_mac_82598EB)
   1965 				stats->rnbc[i].ev_count = 0;
   1966 		}
   1967 		if (i < __arraycount(stats->pxontxc)) {
   1968 			stats->pxontxc[i].ev_count = 0;
   1969 			stats->pxonrxc[i].ev_count = 0;
   1970 			stats->pxofftxc[i].ev_count = 0;
   1971 			stats->pxoffrxc[i].ev_count = 0;
   1972 			stats->pxon2offc[i].ev_count = 0;
   1973 		}
   1974 		if (i < __arraycount(stats->qprc)) {
   1975 			stats->qprc[i].ev_count = 0;
   1976 			stats->qptc[i].ev_count = 0;
   1977 			stats->qbrc[i].ev_count = 0;
   1978 			stats->qbtc[i].ev_count = 0;
   1979 			stats->qprdc[i].ev_count = 0;
   1980 		}
   1981 
   1982 		rxr->rx_packets.ev_count = 0;
   1983 		rxr->rx_bytes.ev_count = 0;
   1984 		rxr->rx_copies.ev_count = 0;
   1985 		rxr->no_jmbuf.ev_count = 0;
   1986 		rxr->rx_discarded.ev_count = 0;
   1987 	}
   1988 	stats->ipcs.ev_count = 0;
   1989 	stats->l4cs.ev_count = 0;
   1990 	stats->ipcs_bad.ev_count = 0;
   1991 	stats->l4cs_bad.ev_count = 0;
   1992 	stats->intzero.ev_count = 0;
   1993 	stats->legint.ev_count = 0;
   1994 	stats->crcerrs.ev_count = 0;
   1995 	stats->illerrc.ev_count = 0;
   1996 	stats->errbc.ev_count = 0;
   1997 	stats->mspdc.ev_count = 0;
   1998 	stats->mbsdc.ev_count = 0;
   1999 	stats->mpctotal.ev_count = 0;
   2000 	stats->mlfc.ev_count = 0;
   2001 	stats->mrfc.ev_count = 0;
   2002 	stats->rlec.ev_count = 0;
   2003 	stats->lxontxc.ev_count = 0;
   2004 	stats->lxonrxc.ev_count = 0;
   2005 	stats->lxofftxc.ev_count = 0;
   2006 	stats->lxoffrxc.ev_count = 0;
   2007 
   2008 	/* Packet Reception Stats */
   2009 	stats->tor.ev_count = 0;
   2010 	stats->gorc.ev_count = 0;
   2011 	stats->tpr.ev_count = 0;
   2012 	stats->gprc.ev_count = 0;
   2013 	stats->mprc.ev_count = 0;
   2014 	stats->bprc.ev_count = 0;
   2015 	stats->prc64.ev_count = 0;
   2016 	stats->prc127.ev_count = 0;
   2017 	stats->prc255.ev_count = 0;
   2018 	stats->prc511.ev_count = 0;
   2019 	stats->prc1023.ev_count = 0;
   2020 	stats->prc1522.ev_count = 0;
   2021 	stats->ruc.ev_count = 0;
   2022 	stats->rfc.ev_count = 0;
   2023 	stats->roc.ev_count = 0;
   2024 	stats->rjc.ev_count = 0;
   2025 	stats->mngprc.ev_count = 0;
   2026 	stats->mngpdc.ev_count = 0;
   2027 	stats->xec.ev_count = 0;
   2028 
   2029 	/* Packet Transmission Stats */
   2030 	stats->gotc.ev_count = 0;
   2031 	stats->tpt.ev_count = 0;
   2032 	stats->gptc.ev_count = 0;
   2033 	stats->bptc.ev_count = 0;
   2034 	stats->mptc.ev_count = 0;
   2035 	stats->mngptc.ev_count = 0;
   2036 	stats->ptc64.ev_count = 0;
   2037 	stats->ptc127.ev_count = 0;
   2038 	stats->ptc255.ev_count = 0;
   2039 	stats->ptc511.ev_count = 0;
   2040 	stats->ptc1023.ev_count = 0;
   2041 	stats->ptc1522.ev_count = 0;
   2042 }
   2043 
   2044 /************************************************************************
   2045  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
   2046  *
   2047  *   Retrieves the TDH value from the hardware
   2048  ************************************************************************/
   2049 static int
   2050 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   2051 {
   2052 	struct sysctlnode node = *rnode;
   2053 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2054 	uint32_t val;
   2055 
   2056 	if (!txr)
   2057 		return (0);
   2058 
   2059 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
   2060 	node.sysctl_data = &val;
   2061 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2062 } /* ixgbe_sysctl_tdh_handler */
   2063 
   2064 /************************************************************************
   2065  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   2066  *
   2067  *   Retrieves the TDT value from the hardware
   2068  ************************************************************************/
   2069 static int
   2070 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   2071 {
   2072 	struct sysctlnode node = *rnode;
   2073 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2074 	uint32_t val;
   2075 
   2076 	if (!txr)
   2077 		return (0);
   2078 
   2079 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
   2080 	node.sysctl_data = &val;
   2081 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2082 } /* ixgbe_sysctl_tdt_handler */
   2083 
   2084 /************************************************************************
   2085  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
   2086  *
   2087  *   Retrieves the RDH value from the hardware
   2088  ************************************************************************/
   2089 static int
   2090 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2091 {
   2092 	struct sysctlnode node = *rnode;
   2093 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2094 	uint32_t val;
   2095 
   2096 	if (!rxr)
   2097 		return (0);
   2098 
   2099 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
   2100 	node.sysctl_data = &val;
   2101 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2102 } /* ixgbe_sysctl_rdh_handler */
   2103 
   2104 /************************************************************************
   2105  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2106  *
   2107  *   Retrieves the RDT value from the hardware
   2108  ************************************************************************/
   2109 static int
   2110 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2111 {
   2112 	struct sysctlnode node = *rnode;
   2113 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2114 	uint32_t val;
   2115 
   2116 	if (!rxr)
   2117 		return (0);
   2118 
   2119 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
   2120 	node.sysctl_data = &val;
   2121 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2122 } /* ixgbe_sysctl_rdt_handler */
   2123 
   2124 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   2125 /************************************************************************
   2126  * ixgbe_register_vlan
   2127  *
   2128  *   Run via vlan config EVENT, it enables us to use the
   2129  *   HW Filter table since we can get the vlan id. This
   2130  *   just creates the entry in the soft version of the
   2131  *   VFTA, init will repopulate the real table.
   2132  ************************************************************************/
   2133 static void
   2134 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2135 {
   2136 	struct adapter	*adapter = ifp->if_softc;
   2137 	u16		index, bit;
   2138 
   2139 	if (ifp->if_softc != arg)   /* Not our event */
   2140 		return;
   2141 
   2142 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2143 		return;
   2144 
   2145 	IXGBE_CORE_LOCK(adapter);
   2146 	index = (vtag >> 5) & 0x7F;
   2147 	bit = vtag & 0x1F;
   2148 	adapter->shadow_vfta[index] |= (1 << bit);
   2149 	ixgbe_setup_vlan_hw_support(adapter);
   2150 	IXGBE_CORE_UNLOCK(adapter);
   2151 } /* ixgbe_register_vlan */
   2152 
   2153 /************************************************************************
   2154  * ixgbe_unregister_vlan
   2155  *
   2156  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
   2157  ************************************************************************/
   2158 static void
   2159 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2160 {
   2161 	struct adapter	*adapter = ifp->if_softc;
   2162 	u16		index, bit;
   2163 
   2164 	if (ifp->if_softc != arg)
   2165 		return;
   2166 
   2167 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2168 		return;
   2169 
   2170 	IXGBE_CORE_LOCK(adapter);
   2171 	index = (vtag >> 5) & 0x7F;
   2172 	bit = vtag & 0x1F;
   2173 	adapter->shadow_vfta[index] &= ~(1 << bit);
   2174 	/* Re-init to load the changes */
   2175 	ixgbe_setup_vlan_hw_support(adapter);
   2176 	IXGBE_CORE_UNLOCK(adapter);
   2177 } /* ixgbe_unregister_vlan */
   2178 #endif
   2179 
   2180 static void
   2181 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   2182 {
   2183 	struct ethercom *ec = &adapter->osdep.ec;
   2184 	struct ixgbe_hw *hw = &adapter->hw;
   2185 	struct rx_ring	*rxr;
   2186 	int             i;
   2187 	u32		ctrl;
   2188 
   2189 
   2190 	/*
   2191 	 * We get here thru init_locked, meaning
   2192 	 * a soft reset, this has already cleared
   2193 	 * the VFTA and other state, so if there
   2194 	 * have been no vlan's registered do nothing.
   2195 	 */
   2196 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   2197 		return;
   2198 
   2199 	/* Setup the queues for vlans */
   2200 	for (i = 0; i < adapter->num_queues; i++) {
   2201 		rxr = &adapter->rx_rings[i];
   2202 		/* On 82599 the VLAN enable is per/queue in RXDCTL */
   2203 		if (hw->mac.type != ixgbe_mac_82598EB) {
   2204 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   2205 			ctrl |= IXGBE_RXDCTL_VME;
   2206 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
   2207 		}
   2208 		rxr->vtag_strip = TRUE;
   2209 	}
   2210 
   2211 	if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
   2212 		return;
   2213 	/*
   2214 	 * A soft reset zero's out the VFTA, so
   2215 	 * we need to repopulate it now.
   2216 	 */
   2217 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2218 		if (adapter->shadow_vfta[i] != 0)
   2219 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
   2220 			    adapter->shadow_vfta[i]);
   2221 
   2222 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2223 	/* Enable the Filter Table if enabled */
   2224 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
   2225 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
   2226 		ctrl |= IXGBE_VLNCTRL_VFE;
   2227 	}
   2228 	if (hw->mac.type == ixgbe_mac_82598EB)
   2229 		ctrl |= IXGBE_VLNCTRL_VME;
   2230 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2231 } /* ixgbe_setup_vlan_hw_support */
   2232 
   2233 /************************************************************************
   2234  * ixgbe_get_slot_info
   2235  *
   2236  *   Get the width and transaction speed of
   2237  *   the slot this adapter is plugged into.
   2238  ************************************************************************/
   2239 static void
   2240 ixgbe_get_slot_info(struct adapter *adapter)
   2241 {
   2242 	device_t		dev = adapter->dev;
   2243 	struct ixgbe_hw		*hw = &adapter->hw;
   2244 	u32                   offset;
   2245 //	struct ixgbe_mac_info	*mac = &hw->mac;
   2246 	u16			link;
   2247 	int                   bus_info_valid = TRUE;
   2248 
   2249 	/* Some devices are behind an internal bridge */
   2250 	switch (hw->device_id) {
   2251 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
   2252 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
   2253 		goto get_parent_info;
   2254 	default:
   2255 		break;
   2256 	}
   2257 
   2258 	ixgbe_get_bus_info(hw);
   2259 
   2260 	/*
   2261 	 * Some devices don't use PCI-E, but there is no need
   2262 	 * to display "Unknown" for bus speed and width.
   2263 	 */
   2264 	switch (hw->mac.type) {
   2265 	case ixgbe_mac_X550EM_x:
   2266 	case ixgbe_mac_X550EM_a:
   2267 		return;
   2268 	default:
   2269 		goto display;
   2270 	}
   2271 
   2272 get_parent_info:
   2273 	/*
   2274 	 * For the Quad port adapter we need to parse back
   2275 	 * up the PCI tree to find the speed of the expansion
   2276 	 * slot into which this adapter is plugged. A bit more work.
   2277 	 */
   2278 	dev = device_parent(device_parent(dev));
   2279 #if 0
   2280 #ifdef IXGBE_DEBUG
   2281 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
   2282 	    pci_get_slot(dev), pci_get_function(dev));
   2283 #endif
   2284 	dev = device_parent(device_parent(dev));
   2285 #ifdef IXGBE_DEBUG
   2286 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
   2287 	    pci_get_slot(dev), pci_get_function(dev));
   2288 #endif
   2289 #endif
   2290 	/* Now get the PCI Express Capabilities offset */
   2291 	if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
   2292 	    PCI_CAP_PCIEXPRESS, &offset, NULL)) {
   2293 		/*
   2294 		 * Hmm...can't get PCI-Express capabilities.
   2295 		 * Falling back to default method.
   2296 		 */
   2297 		bus_info_valid = FALSE;
   2298 		ixgbe_get_bus_info(hw);
   2299 		goto display;
   2300 	}
   2301 	/* ...and read the Link Status Register */
   2302 	link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
   2303 	    offset + PCIE_LCSR);
   2304 	ixgbe_set_pci_config_data_generic(hw, link >> 16);
   2305 
   2306 display:
   2307 	device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
   2308 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s" :
   2309 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s" :
   2310 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s" :
   2311 	     "Unknown"),
   2312 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
   2313 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
   2314 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
   2315 	     "Unknown"));
   2316 
   2317 	if (bus_info_valid) {
   2318 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2319 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
   2320 			(hw->bus.speed == ixgbe_bus_speed_2500))) {
   2321 			device_printf(dev, "PCI-Express bandwidth available"
   2322 			    " for this card\n     is not sufficient for"
   2323 			    " optimal performance.\n");
   2324 			device_printf(dev, "For optimal performance a x8 "
   2325 			    "PCIE, or x4 PCIE Gen2 slot is required.\n");
   2326 		}
   2327 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2328 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
   2329 			(hw->bus.speed < ixgbe_bus_speed_8000))) {
   2330 			device_printf(dev, "PCI-Express bandwidth available"
   2331 			    " for this card\n     is not sufficient for"
   2332 			    " optimal performance.\n");
   2333 			device_printf(dev, "For optimal performance a x8 "
   2334 			    "PCIE Gen3 slot is required.\n");
   2335 		}
   2336 	} else
   2337 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
   2338 
   2339 	return;
   2340 } /* ixgbe_get_slot_info */
   2341 
   2342 /************************************************************************
   2343  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
   2344  ************************************************************************/
   2345 static inline void
   2346 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   2347 {
   2348 	struct ixgbe_hw *hw = &adapter->hw;
   2349 	u64             queue = (u64)(1ULL << vector);
   2350 	u32             mask;
   2351 
   2352 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2353 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2354 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   2355 	} else {
   2356 		mask = (queue & 0xFFFFFFFF);
   2357 		if (mask)
   2358 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   2359 		mask = (queue >> 32);
   2360 		if (mask)
   2361 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   2362 	}
   2363 } /* ixgbe_enable_queue */
   2364 
   2365 /************************************************************************
   2366  * ixgbe_disable_queue
   2367  ************************************************************************/
   2368 static inline void
   2369 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   2370 {
   2371 	struct ixgbe_hw *hw = &adapter->hw;
   2372 	u64             queue = (u64)(1ULL << vector);
   2373 	u32             mask;
   2374 
   2375 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2376 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2377 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   2378 	} else {
   2379 		mask = (queue & 0xFFFFFFFF);
   2380 		if (mask)
   2381 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   2382 		mask = (queue >> 32);
   2383 		if (mask)
   2384 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   2385 	}
   2386 } /* ixgbe_disable_queue */
   2387 
   2388 /************************************************************************
   2389  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
   2390  ************************************************************************/
   2391 static int
   2392 ixgbe_msix_que(void *arg)
   2393 {
   2394 	struct ix_queue	*que = arg;
   2395 	struct adapter  *adapter = que->adapter;
   2396 	struct ifnet    *ifp = adapter->ifp;
   2397 	struct tx_ring	*txr = que->txr;
   2398 	struct rx_ring	*rxr = que->rxr;
   2399 	bool		more;
   2400 	u32		newitr = 0;
   2401 
   2402 	/* Protect against spurious interrupts */
   2403 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   2404 		return 0;
   2405 
   2406 	ixgbe_disable_queue(adapter, que->msix);
   2407 	++que->irqs.ev_count;
   2408 
   2409 #ifdef __NetBSD__
   2410 	/* Don't run ixgbe_rxeof in interrupt context */
   2411 	more = true;
   2412 #else
   2413 	more = ixgbe_rxeof(que);
   2414 #endif
   2415 
   2416 	IXGBE_TX_LOCK(txr);
   2417 	ixgbe_txeof(txr);
   2418 	IXGBE_TX_UNLOCK(txr);
   2419 
   2420 	/* Do AIM now? */
   2421 
   2422 	if (adapter->enable_aim == false)
   2423 		goto no_calc;
   2424 	/*
   2425 	 * Do Adaptive Interrupt Moderation:
   2426 	 *  - Write out last calculated setting
   2427 	 *  - Calculate based on average size over
   2428 	 *    the last interval.
   2429 	 */
   2430 	if (que->eitr_setting)
   2431 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
   2432 		    que->eitr_setting);
   2433 
   2434 	que->eitr_setting = 0;
   2435 
   2436 	/* Idle, do nothing */
   2437         if ((txr->bytes == 0) && (rxr->bytes == 0))
   2438                 goto no_calc;
   2439 
   2440 	if ((txr->bytes) && (txr->packets))
   2441 		newitr = txr->bytes/txr->packets;
   2442 	if ((rxr->bytes) && (rxr->packets))
   2443 		newitr = max(newitr, (rxr->bytes / rxr->packets));
   2444 	newitr += 24; /* account for hardware frame, crc */
   2445 
   2446 	/* set an upper boundary */
   2447 	newitr = min(newitr, 3000);
   2448 
   2449 	/* Be nice to the mid range */
   2450 	if ((newitr > 300) && (newitr < 1200))
   2451 		newitr = (newitr / 3);
   2452 	else
   2453 		newitr = (newitr / 2);
   2454 
   2455         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   2456                 newitr |= newitr << 16;
   2457         else
   2458                 newitr |= IXGBE_EITR_CNT_WDIS;
   2459 
   2460         /* save for next interrupt */
   2461         que->eitr_setting = newitr;
   2462 
   2463 	/* Reset state */
   2464 	txr->bytes = 0;
   2465 	txr->packets = 0;
   2466 	rxr->bytes = 0;
   2467 	rxr->packets = 0;
   2468 
   2469 no_calc:
   2470 	if (more)
   2471 		softint_schedule(que->que_si);
   2472 	else
   2473 		ixgbe_enable_queue(adapter, que->msix);
   2474 
   2475 	return 1;
   2476 } /* ixgbe_msix_que */
   2477 
   2478 /************************************************************************
   2479  * ixgbe_media_status - Media Ioctl callback
   2480  *
   2481  *   Called whenever the user queries the status of
   2482  *   the interface using ifconfig.
   2483  ************************************************************************/
   2484 static void
   2485 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   2486 {
   2487 	struct adapter *adapter = ifp->if_softc;
   2488 	struct ixgbe_hw *hw = &adapter->hw;
   2489 	int layer;
   2490 
   2491 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   2492 	IXGBE_CORE_LOCK(adapter);
   2493 	ixgbe_update_link_status(adapter);
   2494 
   2495 	ifmr->ifm_status = IFM_AVALID;
   2496 	ifmr->ifm_active = IFM_ETHER;
   2497 
   2498 	if (!adapter->link_active) {
   2499 		ifmr->ifm_active |= IFM_NONE;
   2500 		IXGBE_CORE_UNLOCK(adapter);
   2501 		return;
   2502 	}
   2503 
   2504 	ifmr->ifm_status |= IFM_ACTIVE;
   2505 	layer = adapter->phy_layer;
   2506 
   2507 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
   2508 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
   2509 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
   2510 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
   2511 		switch (adapter->link_speed) {
   2512 		case IXGBE_LINK_SPEED_10GB_FULL:
   2513 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   2514 			break;
   2515 		case IXGBE_LINK_SPEED_1GB_FULL:
   2516 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   2517 			break;
   2518 		case IXGBE_LINK_SPEED_100_FULL:
   2519 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   2520 			break;
   2521 		case IXGBE_LINK_SPEED_10_FULL:
   2522 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   2523 			break;
   2524 		}
   2525 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   2526 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
   2527 		switch (adapter->link_speed) {
   2528 		case IXGBE_LINK_SPEED_10GB_FULL:
   2529 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
   2530 			break;
   2531 		}
   2532 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
   2533 		switch (adapter->link_speed) {
   2534 		case IXGBE_LINK_SPEED_10GB_FULL:
   2535 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
   2536 			break;
   2537 		case IXGBE_LINK_SPEED_1GB_FULL:
   2538 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2539 			break;
   2540 		}
   2541 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
   2542 		switch (adapter->link_speed) {
   2543 		case IXGBE_LINK_SPEED_10GB_FULL:
   2544 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
   2545 			break;
   2546 		case IXGBE_LINK_SPEED_1GB_FULL:
   2547 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2548 			break;
   2549 		}
   2550 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
   2551 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
   2552 		switch (adapter->link_speed) {
   2553 		case IXGBE_LINK_SPEED_10GB_FULL:
   2554 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2555 			break;
   2556 		case IXGBE_LINK_SPEED_1GB_FULL:
   2557 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
   2558 			break;
   2559 		}
   2560 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
   2561 		switch (adapter->link_speed) {
   2562 		case IXGBE_LINK_SPEED_10GB_FULL:
   2563 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2564 			break;
   2565 		}
   2566 	/*
   2567 	 * XXX: These need to use the proper media types once
   2568 	 * they're added.
   2569 	 */
   2570 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
   2571 		switch (adapter->link_speed) {
   2572 		case IXGBE_LINK_SPEED_10GB_FULL:
   2573 #ifndef IFM_ETH_XTYPE
   2574 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2575 #else
   2576 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
   2577 #endif
   2578 			break;
   2579 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2580 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2581 			break;
   2582 		case IXGBE_LINK_SPEED_1GB_FULL:
   2583 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2584 			break;
   2585 		}
   2586 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
   2587 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
   2588 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
   2589 		switch (adapter->link_speed) {
   2590 		case IXGBE_LINK_SPEED_10GB_FULL:
   2591 #ifndef IFM_ETH_XTYPE
   2592 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2593 #else
   2594 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
   2595 #endif
   2596 			break;
   2597 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2598 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2599 			break;
   2600 		case IXGBE_LINK_SPEED_1GB_FULL:
   2601 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2602 			break;
   2603 		}
   2604 
   2605 	/* If nothing is recognized... */
   2606 #if 0
   2607 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
   2608 		ifmr->ifm_active |= IFM_UNKNOWN;
   2609 #endif
   2610 
   2611 	/* Display current flow control setting used on link */
   2612 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
   2613 	    hw->fc.current_mode == ixgbe_fc_full)
   2614 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
   2615 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
   2616 	    hw->fc.current_mode == ixgbe_fc_full)
   2617 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
   2618 
   2619 	IXGBE_CORE_UNLOCK(adapter);
   2620 
   2621 	return;
   2622 } /* ixgbe_media_status */
   2623 
   2624 /************************************************************************
   2625  * ixgbe_media_change - Media Ioctl callback
   2626  *
   2627  *   Called when the user changes speed/duplex using
   2628  *   media/mediopt option with ifconfig.
   2629  ************************************************************************/
   2630 static int
   2631 ixgbe_media_change(struct ifnet *ifp)
   2632 {
   2633 	struct adapter   *adapter = ifp->if_softc;
   2634 	struct ifmedia   *ifm = &adapter->media;
   2635 	struct ixgbe_hw  *hw = &adapter->hw;
   2636 	ixgbe_link_speed speed = 0;
   2637 	ixgbe_link_speed link_caps = 0;
   2638 	bool negotiate = false;
   2639 	s32 err = IXGBE_NOT_IMPLEMENTED;
   2640 
   2641 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   2642 
   2643 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   2644 		return (EINVAL);
   2645 
   2646 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   2647 		return (ENODEV);
   2648 
   2649 	/*
   2650 	 * We don't actually need to check against the supported
   2651 	 * media types of the adapter; ifmedia will take care of
   2652 	 * that for us.
   2653 	 */
   2654 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   2655 	case IFM_AUTO:
   2656 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   2657 		    &negotiate);
   2658 		if (err != IXGBE_SUCCESS) {
   2659 			device_printf(adapter->dev, "Unable to determine "
   2660 			    "supported advertise speeds\n");
   2661 			return (ENODEV);
   2662 		}
   2663 		speed |= link_caps;
   2664 		break;
   2665 	case IFM_10G_T:
   2666 	case IFM_10G_LRM:
   2667 	case IFM_10G_LR:
   2668 	case IFM_10G_TWINAX:
   2669 #ifndef IFM_ETH_XTYPE
   2670 	case IFM_10G_SR: /* KR, too */
   2671 	case IFM_10G_CX4: /* KX4 */
   2672 #else
   2673 	case IFM_10G_KR:
   2674 	case IFM_10G_KX4:
   2675 #endif
   2676 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   2677 		break;
   2678 	case IFM_2500_KX:
   2679 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   2680 		break;
   2681 	case IFM_1000_T:
   2682 	case IFM_1000_LX:
   2683 	case IFM_1000_SX:
   2684 	case IFM_1000_KX:
   2685 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   2686 		break;
   2687 	case IFM_100_TX:
   2688 		speed |= IXGBE_LINK_SPEED_100_FULL;
   2689 		break;
   2690 	case IFM_10_T:
   2691 		speed |= IXGBE_LINK_SPEED_10_FULL;
   2692 		break;
   2693 	default:
   2694 		goto invalid;
   2695 	}
   2696 
   2697 	hw->mac.autotry_restart = TRUE;
   2698 	hw->mac.ops.setup_link(hw, speed, TRUE);
   2699 	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
   2700 		adapter->advertise = 0;
   2701 	} else {
   2702 		if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
   2703 			adapter->advertise |= 1 << 2;
   2704 		if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
   2705 			adapter->advertise |= 1 << 1;
   2706 		if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
   2707 			adapter->advertise |= 1 << 0;
   2708 		if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
   2709 			adapter->advertise |= 1 << 4;
   2710 	}
   2711 
   2712 	return (0);
   2713 
   2714 invalid:
   2715 	device_printf(adapter->dev, "Invalid media type!\n");
   2716 
   2717 	return (EINVAL);
   2718 } /* ixgbe_media_change */
   2719 
   2720 /************************************************************************
   2721  * ixgbe_set_promisc
   2722  ************************************************************************/
   2723 static void
   2724 ixgbe_set_promisc(struct adapter *adapter)
   2725 {
   2726 	struct ifnet *ifp = adapter->ifp;
   2727 	int          mcnt = 0;
   2728 	u32          rctl;
   2729 	struct ether_multi *enm;
   2730 	struct ether_multistep step;
   2731 	struct ethercom *ec = &adapter->osdep.ec;
   2732 
   2733 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   2734 	rctl &= (~IXGBE_FCTRL_UPE);
   2735 	if (ifp->if_flags & IFF_ALLMULTI)
   2736 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
   2737 	else {
   2738 		ETHER_FIRST_MULTI(step, ec, enm);
   2739 		while (enm != NULL) {
   2740 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
   2741 				break;
   2742 			mcnt++;
   2743 			ETHER_NEXT_MULTI(step, enm);
   2744 		}
   2745 	}
   2746 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
   2747 		rctl &= (~IXGBE_FCTRL_MPE);
   2748 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2749 
   2750 	if (ifp->if_flags & IFF_PROMISC) {
   2751 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   2752 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2753 	} else if (ifp->if_flags & IFF_ALLMULTI) {
   2754 		rctl |= IXGBE_FCTRL_MPE;
   2755 		rctl &= ~IXGBE_FCTRL_UPE;
   2756 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2757 	}
   2758 } /* ixgbe_set_promisc */
   2759 
   2760 /************************************************************************
   2761  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
   2762  ************************************************************************/
   2763 static int
   2764 ixgbe_msix_link(void *arg)
   2765 {
   2766 	struct adapter	*adapter = arg;
   2767 	struct ixgbe_hw *hw = &adapter->hw;
   2768 	u32		eicr, eicr_mask;
   2769 	s32             retval;
   2770 
   2771 	++adapter->link_irq.ev_count;
   2772 
   2773 	/* Pause other interrupts */
   2774 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
   2775 
   2776 	/* First get the cause */
   2777 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   2778 	/* Be sure the queue bits are not cleared */
   2779 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
   2780 	/* Clear interrupt with write */
   2781 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
   2782 
   2783 	/* Link status change */
   2784 	if (eicr & IXGBE_EICR_LSC) {
   2785 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
   2786 		softint_schedule(adapter->link_si);
   2787 	}
   2788 
   2789 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   2790 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
   2791 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
   2792 			/* This is probably overkill :) */
   2793 			if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
   2794 				return 1;
   2795 			/* Disable the interrupt */
   2796 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
   2797 			softint_schedule(adapter->fdir_si);
   2798 		}
   2799 
   2800 		if (eicr & IXGBE_EICR_ECC) {
   2801 			device_printf(adapter->dev,
   2802 			    "CRITICAL: ECC ERROR!! Please Reboot!!\n");
   2803 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   2804 		}
   2805 
   2806 		/* Check for over temp condition */
   2807 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
   2808 			switch (adapter->hw.mac.type) {
   2809 			case ixgbe_mac_X550EM_a:
   2810 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
   2811 					break;
   2812 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
   2813 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2814 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2815 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2816 				retval = hw->phy.ops.check_overtemp(hw);
   2817 				if (retval != IXGBE_ERR_OVERTEMP)
   2818 					break;
   2819 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2820 				device_printf(adapter->dev, "System shutdown required!\n");
   2821 				break;
   2822 			default:
   2823 				if (!(eicr & IXGBE_EICR_TS))
   2824 					break;
   2825 				retval = hw->phy.ops.check_overtemp(hw);
   2826 				if (retval != IXGBE_ERR_OVERTEMP)
   2827 					break;
   2828 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2829 				device_printf(adapter->dev, "System shutdown required!\n");
   2830 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
   2831 				break;
   2832 			}
   2833 		}
   2834 
   2835 		/* Check for VF message */
   2836 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
   2837 		    (eicr & IXGBE_EICR_MAILBOX))
   2838 			softint_schedule(adapter->mbx_si);
   2839 	}
   2840 
   2841 	if (ixgbe_is_sfp(hw)) {
   2842 		/* Pluggable optics-related interrupt */
   2843 		if (hw->mac.type >= ixgbe_mac_X540)
   2844 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   2845 		else
   2846 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   2847 
   2848 		if (eicr & eicr_mask) {
   2849 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   2850 			softint_schedule(adapter->mod_si);
   2851 		}
   2852 
   2853 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   2854 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   2855 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2856 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2857 			softint_schedule(adapter->msf_si);
   2858 		}
   2859 	}
   2860 
   2861 	/* Check for fan failure */
   2862 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   2863 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
   2864 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2865 	}
   2866 
   2867 	/* External PHY interrupt */
   2868 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   2869 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
   2870 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
   2871 		softint_schedule(adapter->phy_si);
   2872  	}
   2873 
   2874 	/* Re-enable other interrupts */
   2875 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   2876 	return 1;
   2877 } /* ixgbe_msix_link */
   2878 
   2879 /************************************************************************
   2880  * ixgbe_sysctl_interrupt_rate_handler
   2881  ************************************************************************/
   2882 static int
   2883 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   2884 {
   2885 	struct sysctlnode node = *rnode;
   2886 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   2887 	uint32_t reg, usec, rate;
   2888 	int error;
   2889 
   2890 	if (que == NULL)
   2891 		return 0;
   2892 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
   2893 	usec = ((reg & 0x0FF8) >> 3);
   2894 	if (usec > 0)
   2895 		rate = 500000 / usec;
   2896 	else
   2897 		rate = 0;
   2898 	node.sysctl_data = &rate;
   2899 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2900 	if (error || newp == NULL)
   2901 		return error;
   2902 	reg &= ~0xfff; /* default, no limitation */
   2903 	ixgbe_max_interrupt_rate = 0;
   2904 	if (rate > 0 && rate < 500000) {
   2905 		if (rate < 1000)
   2906 			rate = 1000;
   2907 		ixgbe_max_interrupt_rate = rate;
   2908 		reg |= ((4000000/rate) & 0xff8);
   2909 	}
   2910 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
   2911 
   2912 	return (0);
   2913 } /* ixgbe_sysctl_interrupt_rate_handler */
   2914 
   2915 const struct sysctlnode *
   2916 ixgbe_sysctl_instance(struct adapter *adapter)
   2917 {
   2918 	const char *dvname;
   2919 	struct sysctllog **log;
   2920 	int rc;
   2921 	const struct sysctlnode *rnode;
   2922 
   2923 	if (adapter->sysctltop != NULL)
   2924 		return adapter->sysctltop;
   2925 
   2926 	log = &adapter->sysctllog;
   2927 	dvname = device_xname(adapter->dev);
   2928 
   2929 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2930 	    0, CTLTYPE_NODE, dvname,
   2931 	    SYSCTL_DESCR("ixgbe information and settings"),
   2932 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2933 		goto err;
   2934 
   2935 	return rnode;
   2936 err:
   2937 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2938 	return NULL;
   2939 }
   2940 
   2941 /************************************************************************
   2942  * ixgbe_add_device_sysctls
   2943  ************************************************************************/
   2944 static void
   2945 ixgbe_add_device_sysctls(struct adapter *adapter)
   2946 {
   2947 	device_t               dev = adapter->dev;
   2948 	struct ixgbe_hw        *hw = &adapter->hw;
   2949 	struct sysctllog **log;
   2950 	const struct sysctlnode *rnode, *cnode;
   2951 
   2952 	log = &adapter->sysctllog;
   2953 
   2954 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   2955 		aprint_error_dev(dev, "could not create sysctl root\n");
   2956 		return;
   2957 	}
   2958 
   2959 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2960 	    CTLFLAG_READONLY, CTLTYPE_INT,
   2961 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
   2962 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   2963 		aprint_error_dev(dev, "could not create sysctl\n");
   2964 
   2965 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2966 	    CTLFLAG_READONLY, CTLTYPE_INT,
   2967 	    "num_queues", SYSCTL_DESCR("Number of queues"),
   2968 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
   2969 		aprint_error_dev(dev, "could not create sysctl\n");
   2970 
   2971 	/* Sysctls for all devices */
   2972 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   2973 	    CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
   2974 	    ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
   2975 	    CTL_EOL) != 0)
   2976 		aprint_error_dev(dev, "could not create sysctl\n");
   2977 
   2978 	adapter->enable_aim = ixgbe_enable_aim;
   2979 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   2980 	    CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2981 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2982 		aprint_error_dev(dev, "could not create sysctl\n");
   2983 
   2984 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2985 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2986 	    "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
   2987 	    ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
   2988 	    CTL_EOL) != 0)
   2989 		aprint_error_dev(dev, "could not create sysctl\n");
   2990 
   2991 #ifdef IXGBE_DEBUG
   2992 	/* testing sysctls (for all devices) */
   2993 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   2994 	    CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
   2995 	    ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
   2996 	    CTL_EOL) != 0)
   2997 		aprint_error_dev(dev, "could not create sysctl\n");
   2998 
   2999 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
   3000 	    CTLTYPE_STRING, "print_rss_config",
   3001 	    SYSCTL_DESCR("Prints RSS Configuration"),
   3002 	    ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
   3003 	    CTL_EOL) != 0)
   3004 		aprint_error_dev(dev, "could not create sysctl\n");
   3005 #endif
   3006 	/* for X550 series devices */
   3007 	if (hw->mac.type >= ixgbe_mac_X550)
   3008 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3009 		    CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
   3010 		    ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
   3011 		    CTL_EOL) != 0)
   3012 			aprint_error_dev(dev, "could not create sysctl\n");
   3013 
   3014 	/* for WoL-capable devices */
   3015 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3016 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3017 		    CTLTYPE_BOOL, "wol_enable",
   3018 		    SYSCTL_DESCR("Enable/Disable Wake on LAN"),
   3019 		    ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
   3020 		    CTL_EOL) != 0)
   3021 			aprint_error_dev(dev, "could not create sysctl\n");
   3022 
   3023 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3024 		    CTLTYPE_INT, "wufc",
   3025 		    SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
   3026 		    ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
   3027 		    CTL_EOL) != 0)
   3028 			aprint_error_dev(dev, "could not create sysctl\n");
   3029 	}
   3030 
   3031 	/* for X552/X557-AT devices */
   3032 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3033 		const struct sysctlnode *phy_node;
   3034 
   3035 		if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
   3036 		    "phy", SYSCTL_DESCR("External PHY sysctls"),
   3037 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
   3038 			aprint_error_dev(dev, "could not create sysctl\n");
   3039 			return;
   3040 		}
   3041 
   3042 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3043 		    CTLTYPE_INT, "temp",
   3044 		    SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
   3045 		    ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
   3046 		    CTL_EOL) != 0)
   3047 			aprint_error_dev(dev, "could not create sysctl\n");
   3048 
   3049 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3050 		    CTLTYPE_INT, "overtemp_occurred",
   3051 		    SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
   3052 		    ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
   3053 		    CTL_CREATE, CTL_EOL) != 0)
   3054 			aprint_error_dev(dev, "could not create sysctl\n");
   3055 	}
   3056 
   3057 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
   3058 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3059 		    CTLTYPE_INT, "eee_state",
   3060 		    SYSCTL_DESCR("EEE Power Save State"),
   3061 		    ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
   3062 		    CTL_EOL) != 0)
   3063 			aprint_error_dev(dev, "could not create sysctl\n");
   3064 	}
   3065 } /* ixgbe_add_device_sysctls */
   3066 
   3067 /************************************************************************
   3068  * ixgbe_allocate_pci_resources
   3069  ************************************************************************/
   3070 static int
   3071 ixgbe_allocate_pci_resources(struct adapter *adapter,
   3072     const struct pci_attach_args *pa)
   3073 {
   3074 	pcireg_t	memtype;
   3075 	device_t dev = adapter->dev;
   3076 	bus_addr_t addr;
   3077 	int flags;
   3078 
   3079 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   3080 	switch (memtype) {
   3081 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   3082 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   3083 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   3084 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   3085 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   3086 			goto map_err;
   3087 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   3088 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   3089 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3090 		}
   3091 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   3092 		     adapter->osdep.mem_size, flags,
   3093 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   3094 map_err:
   3095 			adapter->osdep.mem_size = 0;
   3096 			aprint_error_dev(dev, "unable to map BAR0\n");
   3097 			return ENXIO;
   3098 		}
   3099 		break;
   3100 	default:
   3101 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   3102 		return ENXIO;
   3103 	}
   3104 
   3105 	return (0);
   3106 } /* ixgbe_allocate_pci_resources */
   3107 
   3108 /************************************************************************
   3109  * ixgbe_detach - Device removal routine
   3110  *
   3111  *   Called when the driver is being removed.
   3112  *   Stops the adapter and deallocates all the resources
   3113  *   that were allocated for driver operation.
   3114  *
   3115  *   return 0 on success, positive on failure
   3116  ************************************************************************/
   3117 static int
   3118 ixgbe_detach(device_t dev, int flags)
   3119 {
   3120 	struct adapter *adapter = device_private(dev);
   3121 	struct ix_queue *que = adapter->queues;
   3122 	struct rx_ring *rxr = adapter->rx_rings;
   3123 	struct tx_ring *txr = adapter->tx_rings;
   3124 	struct ixgbe_hw *hw = &adapter->hw;
   3125 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   3126 	u32	ctrl_ext;
   3127 
   3128 	INIT_DEBUGOUT("ixgbe_detach: begin");
   3129 	if (adapter->osdep.attached == false)
   3130 		return 0;
   3131 
   3132 	if (ixgbe_pci_iov_detach(dev) != 0) {
   3133 		device_printf(dev, "SR-IOV in use; detach first.\n");
   3134 		return (EBUSY);
   3135 	}
   3136 
   3137 	/* Stop the interface. Callouts are stopped in it. */
   3138 	ixgbe_ifstop(adapter->ifp, 1);
   3139 #if NVLAN > 0
   3140 	/* Make sure VLANs are not using driver */
   3141 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   3142 		;	/* nothing to do: no VLANs */
   3143 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
   3144 		vlan_ifdetach(adapter->ifp);
   3145 	else {
   3146 		aprint_error_dev(dev, "VLANs in use, detach first\n");
   3147 		return (EBUSY);
   3148 	}
   3149 #endif
   3150 
   3151 	pmf_device_deregister(dev);
   3152 
   3153 	ether_ifdetach(adapter->ifp);
   3154 	/* Stop the adapter */
   3155 	IXGBE_CORE_LOCK(adapter);
   3156 	ixgbe_setup_low_power_mode(adapter);
   3157 	IXGBE_CORE_UNLOCK(adapter);
   3158 
   3159 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
   3160 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   3161 			softint_disestablish(txr->txr_si);
   3162 		softint_disestablish(que->que_si);
   3163 	}
   3164 
   3165 	/* Drain the Link queue */
   3166 	softint_disestablish(adapter->link_si);
   3167 	softint_disestablish(adapter->mod_si);
   3168 	softint_disestablish(adapter->msf_si);
   3169 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   3170 		softint_disestablish(adapter->mbx_si);
   3171 	softint_disestablish(adapter->phy_si);
   3172 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   3173 		softint_disestablish(adapter->fdir_si);
   3174 
   3175 	/* let hardware know driver is unloading */
   3176 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   3177 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   3178 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   3179 
   3180 	callout_halt(&adapter->timer, NULL);
   3181 
   3182 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   3183 		netmap_detach(adapter->ifp);
   3184 
   3185 	ixgbe_free_pci_resources(adapter);
   3186 #if 0	/* XXX the NetBSD port is probably missing something here */
   3187 	bus_generic_detach(dev);
   3188 #endif
   3189 	if_detach(adapter->ifp);
   3190 	if_percpuq_destroy(adapter->ipq);
   3191 
   3192 	sysctl_teardown(&adapter->sysctllog);
   3193 	evcnt_detach(&adapter->handleq);
   3194 	evcnt_detach(&adapter->req);
   3195 	evcnt_detach(&adapter->efbig_tx_dma_setup);
   3196 	evcnt_detach(&adapter->mbuf_defrag_failed);
   3197 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
   3198 	evcnt_detach(&adapter->einval_tx_dma_setup);
   3199 	evcnt_detach(&adapter->other_tx_dma_setup);
   3200 	evcnt_detach(&adapter->eagain_tx_dma_setup);
   3201 	evcnt_detach(&adapter->enomem_tx_dma_setup);
   3202 	evcnt_detach(&adapter->watchdog_events);
   3203 	evcnt_detach(&adapter->tso_err);
   3204 	evcnt_detach(&adapter->link_irq);
   3205 
   3206 	txr = adapter->tx_rings;
   3207 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   3208 		evcnt_detach(&adapter->queues[i].irqs);
   3209 		evcnt_detach(&txr->no_desc_avail);
   3210 		evcnt_detach(&txr->total_packets);
   3211 		evcnt_detach(&txr->tso_tx);
   3212 #ifndef IXGBE_LEGACY_TX
   3213 		evcnt_detach(&txr->pcq_drops);
   3214 #endif
   3215 
   3216 		if (i < __arraycount(stats->mpc)) {
   3217 			evcnt_detach(&stats->mpc[i]);
   3218 			if (hw->mac.type == ixgbe_mac_82598EB)
   3219 				evcnt_detach(&stats->rnbc[i]);
   3220 		}
   3221 		if (i < __arraycount(stats->pxontxc)) {
   3222 			evcnt_detach(&stats->pxontxc[i]);
   3223 			evcnt_detach(&stats->pxonrxc[i]);
   3224 			evcnt_detach(&stats->pxofftxc[i]);
   3225 			evcnt_detach(&stats->pxoffrxc[i]);
   3226 			evcnt_detach(&stats->pxon2offc[i]);
   3227 		}
   3228 		if (i < __arraycount(stats->qprc)) {
   3229 			evcnt_detach(&stats->qprc[i]);
   3230 			evcnt_detach(&stats->qptc[i]);
   3231 			evcnt_detach(&stats->qbrc[i]);
   3232 			evcnt_detach(&stats->qbtc[i]);
   3233 			evcnt_detach(&stats->qprdc[i]);
   3234 		}
   3235 
   3236 		evcnt_detach(&rxr->rx_packets);
   3237 		evcnt_detach(&rxr->rx_bytes);
   3238 		evcnt_detach(&rxr->rx_copies);
   3239 		evcnt_detach(&rxr->no_jmbuf);
   3240 		evcnt_detach(&rxr->rx_discarded);
   3241 	}
   3242 	evcnt_detach(&stats->ipcs);
   3243 	evcnt_detach(&stats->l4cs);
   3244 	evcnt_detach(&stats->ipcs_bad);
   3245 	evcnt_detach(&stats->l4cs_bad);
   3246 	evcnt_detach(&stats->intzero);
   3247 	evcnt_detach(&stats->legint);
   3248 	evcnt_detach(&stats->crcerrs);
   3249 	evcnt_detach(&stats->illerrc);
   3250 	evcnt_detach(&stats->errbc);
   3251 	evcnt_detach(&stats->mspdc);
   3252 	if (hw->mac.type >= ixgbe_mac_X550)
   3253 		evcnt_detach(&stats->mbsdc);
   3254 	evcnt_detach(&stats->mpctotal);
   3255 	evcnt_detach(&stats->mlfc);
   3256 	evcnt_detach(&stats->mrfc);
   3257 	evcnt_detach(&stats->rlec);
   3258 	evcnt_detach(&stats->lxontxc);
   3259 	evcnt_detach(&stats->lxonrxc);
   3260 	evcnt_detach(&stats->lxofftxc);
   3261 	evcnt_detach(&stats->lxoffrxc);
   3262 
   3263 	/* Packet Reception Stats */
   3264 	evcnt_detach(&stats->tor);
   3265 	evcnt_detach(&stats->gorc);
   3266 	evcnt_detach(&stats->tpr);
   3267 	evcnt_detach(&stats->gprc);
   3268 	evcnt_detach(&stats->mprc);
   3269 	evcnt_detach(&stats->bprc);
   3270 	evcnt_detach(&stats->prc64);
   3271 	evcnt_detach(&stats->prc127);
   3272 	evcnt_detach(&stats->prc255);
   3273 	evcnt_detach(&stats->prc511);
   3274 	evcnt_detach(&stats->prc1023);
   3275 	evcnt_detach(&stats->prc1522);
   3276 	evcnt_detach(&stats->ruc);
   3277 	evcnt_detach(&stats->rfc);
   3278 	evcnt_detach(&stats->roc);
   3279 	evcnt_detach(&stats->rjc);
   3280 	evcnt_detach(&stats->mngprc);
   3281 	evcnt_detach(&stats->mngpdc);
   3282 	evcnt_detach(&stats->xec);
   3283 
   3284 	/* Packet Transmission Stats */
   3285 	evcnt_detach(&stats->gotc);
   3286 	evcnt_detach(&stats->tpt);
   3287 	evcnt_detach(&stats->gptc);
   3288 	evcnt_detach(&stats->bptc);
   3289 	evcnt_detach(&stats->mptc);
   3290 	evcnt_detach(&stats->mngptc);
   3291 	evcnt_detach(&stats->ptc64);
   3292 	evcnt_detach(&stats->ptc127);
   3293 	evcnt_detach(&stats->ptc255);
   3294 	evcnt_detach(&stats->ptc511);
   3295 	evcnt_detach(&stats->ptc1023);
   3296 	evcnt_detach(&stats->ptc1522);
   3297 
   3298 	ixgbe_free_transmit_structures(adapter);
   3299 	ixgbe_free_receive_structures(adapter);
   3300 	free(adapter->queues, M_DEVBUF);
   3301 	free(adapter->mta, M_DEVBUF);
   3302 
   3303 	IXGBE_CORE_LOCK_DESTROY(adapter);
   3304 
   3305 	return (0);
   3306 } /* ixgbe_detach */
   3307 
   3308 /************************************************************************
   3309  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
   3310  *
   3311  *   Prepare the adapter/port for LPLU and/or WoL
   3312  ************************************************************************/
   3313 static int
   3314 ixgbe_setup_low_power_mode(struct adapter *adapter)
   3315 {
   3316 	struct ixgbe_hw *hw = &adapter->hw;
   3317 	device_t        dev = adapter->dev;
   3318 	s32             error = 0;
   3319 
   3320 	KASSERT(mutex_owned(&adapter->core_mtx));
   3321 
   3322 	/* Limit power management flow to X550EM baseT */
   3323 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
   3324 	    hw->phy.ops.enter_lplu) {
   3325 		/* X550EM baseT adapters need a special LPLU flow */
   3326 		hw->phy.reset_disable = true;
   3327 		ixgbe_stop(adapter);
   3328 		error = hw->phy.ops.enter_lplu(hw);
   3329 		if (error)
   3330 			device_printf(dev,
   3331 			    "Error entering LPLU: %d\n", error);
   3332 		hw->phy.reset_disable = false;
   3333 	} else {
   3334 		/* Just stop for other adapters */
   3335 		ixgbe_stop(adapter);
   3336 	}
   3337 
   3338 	if (!hw->wol_enabled) {
   3339 		ixgbe_set_phy_power(hw, FALSE);
   3340 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3341 		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
   3342 	} else {
   3343 		/* Turn off support for APM wakeup. (Using ACPI instead) */
   3344 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
   3345 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
   3346 
   3347 		/*
   3348 		 * Clear Wake Up Status register to prevent any previous wakeup
   3349 		 * events from waking us up immediately after we suspend.
   3350 		 */
   3351 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3352 
   3353 		/*
   3354 		 * Program the Wakeup Filter Control register with user filter
   3355 		 * settings
   3356 		 */
   3357 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
   3358 
   3359 		/* Enable wakeups and power management in Wakeup Control */
   3360 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
   3361 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
   3362 
   3363 	}
   3364 
   3365 	return error;
   3366 } /* ixgbe_setup_low_power_mode */
   3367 
   3368 /************************************************************************
   3369  * ixgbe_shutdown - Shutdown entry point
   3370  ************************************************************************/
   3371 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   3372 static int
   3373 ixgbe_shutdown(device_t dev)
   3374 {
   3375 	struct adapter *adapter = device_private(dev);
   3376 	int error = 0;
   3377 
   3378 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
   3379 
   3380 	IXGBE_CORE_LOCK(adapter);
   3381 	error = ixgbe_setup_low_power_mode(adapter);
   3382 	IXGBE_CORE_UNLOCK(adapter);
   3383 
   3384 	return (error);
   3385 } /* ixgbe_shutdown */
   3386 #endif
   3387 
   3388 /************************************************************************
   3389  * ixgbe_suspend
   3390  *
   3391  *   From D0 to D3
   3392  ************************************************************************/
   3393 static bool
   3394 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
   3395 {
   3396 	struct adapter *adapter = device_private(dev);
   3397 	int            error = 0;
   3398 
   3399 	INIT_DEBUGOUT("ixgbe_suspend: begin");
   3400 
   3401 	IXGBE_CORE_LOCK(adapter);
   3402 
   3403 	error = ixgbe_setup_low_power_mode(adapter);
   3404 
   3405 	IXGBE_CORE_UNLOCK(adapter);
   3406 
   3407 	return (error);
   3408 } /* ixgbe_suspend */
   3409 
   3410 /************************************************************************
   3411  * ixgbe_resume
   3412  *
   3413  *   From D3 to D0
   3414  ************************************************************************/
   3415 static bool
   3416 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
   3417 {
   3418 	struct adapter  *adapter = device_private(dev);
   3419 	struct ifnet    *ifp = adapter->ifp;
   3420 	struct ixgbe_hw *hw = &adapter->hw;
   3421 	u32             wus;
   3422 
   3423 	INIT_DEBUGOUT("ixgbe_resume: begin");
   3424 
   3425 	IXGBE_CORE_LOCK(adapter);
   3426 
   3427 	/* Read & clear WUS register */
   3428 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
   3429 	if (wus)
   3430 		device_printf(dev, "Woken up by (WUS): %#010x\n",
   3431 		    IXGBE_READ_REG(hw, IXGBE_WUS));
   3432 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3433 	/* And clear WUFC until next low-power transition */
   3434 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3435 
   3436 	/*
   3437 	 * Required after D3->D0 transition;
   3438 	 * will re-advertise all previous advertised speeds
   3439 	 */
   3440 	if (ifp->if_flags & IFF_UP)
   3441 		ixgbe_init_locked(adapter);
   3442 
   3443 	IXGBE_CORE_UNLOCK(adapter);
   3444 
   3445 	return true;
   3446 } /* ixgbe_resume */
   3447 
   3448 /*
   3449  * Set the various hardware offload abilities.
   3450  *
   3451  * This takes the ifnet's if_capenable flags (e.g. set by the user using
   3452  * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
   3453  * mbuf offload flags the driver will understand.
   3454  */
   3455 static void
   3456 ixgbe_set_if_hwassist(struct adapter *adapter)
   3457 {
   3458 	/* XXX */
   3459 }
   3460 
   3461 /************************************************************************
   3462  * ixgbe_init_locked - Init entry point
   3463  *
   3464  *   Used in two ways: It is used by the stack as an init
   3465  *   entry point in network interface structure. It is also
   3466  *   used by the driver as a hw/sw initialization routine to
   3467  *   get to a consistent state.
   3468  *
   3469  *   return 0 on success, positive on failure
   3470  ************************************************************************/
   3471 static void
   3472 ixgbe_init_locked(struct adapter *adapter)
   3473 {
   3474 	struct ifnet   *ifp = adapter->ifp;
   3475 	device_t 	dev = adapter->dev;
   3476 	struct ixgbe_hw *hw = &adapter->hw;
   3477 	struct tx_ring  *txr;
   3478 	struct rx_ring  *rxr;
   3479 	u32		txdctl, mhadd;
   3480 	u32		rxdctl, rxctrl;
   3481 	u32             ctrl_ext;
   3482 	int             err = 0;
   3483 
   3484 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   3485 
   3486 	KASSERT(mutex_owned(&adapter->core_mtx));
   3487 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
   3488 
   3489 	hw->adapter_stopped = FALSE;
   3490 	ixgbe_stop_adapter(hw);
   3491         callout_stop(&adapter->timer);
   3492 
   3493 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   3494 	adapter->max_frame_size =
   3495 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   3496 
   3497 	/* Queue indices may change with IOV mode */
   3498 	ixgbe_align_all_queue_indices(adapter);
   3499 
   3500 	/* reprogram the RAR[0] in case user changed it. */
   3501 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
   3502 
   3503 	/* Get the latest mac address, User can use a LAA */
   3504 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
   3505 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   3506 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
   3507 	hw->addr_ctrl.rar_used_count = 1;
   3508 
   3509 	/* Set hardware offload abilities from ifnet flags */
   3510 	ixgbe_set_if_hwassist(adapter);
   3511 
   3512 	/* Prepare transmit descriptors and buffers */
   3513 	if (ixgbe_setup_transmit_structures(adapter)) {
   3514 		device_printf(dev, "Could not setup transmit structures\n");
   3515 		ixgbe_stop(adapter);
   3516 		return;
   3517 	}
   3518 
   3519 	ixgbe_init_hw(hw);
   3520 	ixgbe_initialize_iov(adapter);
   3521 	ixgbe_initialize_transmit_units(adapter);
   3522 
   3523 	/* Setup Multicast table */
   3524 	ixgbe_set_multi(adapter);
   3525 
   3526 	/* Determine the correct mbuf pool, based on frame size */
   3527 	if (adapter->max_frame_size <= MCLBYTES)
   3528 		adapter->rx_mbuf_sz = MCLBYTES;
   3529 	else
   3530 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   3531 
   3532 	/* Prepare receive descriptors and buffers */
   3533 	if (ixgbe_setup_receive_structures(adapter)) {
   3534 		device_printf(dev, "Could not setup receive structures\n");
   3535 		ixgbe_stop(adapter);
   3536 		return;
   3537 	}
   3538 
   3539 	/* Configure RX settings */
   3540 	ixgbe_initialize_receive_units(adapter);
   3541 
   3542 	/* Enable SDP & MSI-X interrupts based on adapter */
   3543 	ixgbe_config_gpie(adapter);
   3544 
   3545 	/* Set MTU size */
   3546 	if (ifp->if_mtu > ETHERMTU) {
   3547 		/* aka IXGBE_MAXFRS on 82599 and newer */
   3548 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   3549 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   3550 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   3551 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   3552 	}
   3553 
   3554 	/* Now enable all the queues */
   3555 	for (int i = 0; i < adapter->num_queues; i++) {
   3556 		txr = &adapter->tx_rings[i];
   3557 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
   3558 		txdctl |= IXGBE_TXDCTL_ENABLE;
   3559 		/* Set WTHRESH to 8, burst writeback */
   3560 		txdctl |= (8 << 16);
   3561 		/*
   3562 		 * When the internal queue falls below PTHRESH (32),
   3563 		 * start prefetching as long as there are at least
   3564 		 * HTHRESH (1) buffers ready. The values are taken
   3565 		 * from the Intel linux driver 3.8.21.
   3566 		 * Prefetching enables tx line rate even with 1 queue.
   3567 		 */
   3568 		txdctl |= (32 << 0) | (1 << 8);
   3569 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
   3570 	}
   3571 
   3572 	for (int i = 0, j = 0; i < adapter->num_queues; i++) {
   3573 		rxr = &adapter->rx_rings[i];
   3574 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   3575 		if (hw->mac.type == ixgbe_mac_82598EB) {
   3576 			/*
   3577 			 * PTHRESH = 21
   3578 			 * HTHRESH = 4
   3579 			 * WTHRESH = 8
   3580 			 */
   3581 			rxdctl &= ~0x3FFFFF;
   3582 			rxdctl |= 0x080420;
   3583 		}
   3584 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   3585 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
   3586 		for (; j < 10; j++) {
   3587 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
   3588 			    IXGBE_RXDCTL_ENABLE)
   3589 				break;
   3590 			else
   3591 				msec_delay(1);
   3592 		}
   3593 		wmb();
   3594 
   3595 		/*
   3596 		 * In netmap mode, we must preserve the buffers made
   3597 		 * available to userspace before the if_init()
   3598 		 * (this is true by default on the TX side, because
   3599 		 * init makes all buffers available to userspace).
   3600 		 *
   3601 		 * netmap_reset() and the device specific routines
   3602 		 * (e.g. ixgbe_setup_receive_rings()) map these
   3603 		 * buffers at the end of the NIC ring, so here we
   3604 		 * must set the RDT (tail) register to make sure
   3605 		 * they are not overwritten.
   3606 		 *
   3607 		 * In this driver the NIC ring starts at RDH = 0,
   3608 		 * RDT points to the last slot available for reception (?),
   3609 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   3610 		 */
   3611 #ifdef DEV_NETMAP
   3612 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   3613 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   3614 			struct netmap_adapter *na = NA(adapter->ifp);
   3615 			struct netmap_kring *kring = &na->rx_rings[i];
   3616 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   3617 
   3618 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
   3619 		} else
   3620 #endif /* DEV_NETMAP */
   3621 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
   3622 			    adapter->num_rx_desc - 1);
   3623 	}
   3624 
   3625 	/* Enable Receive engine */
   3626 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   3627 	if (hw->mac.type == ixgbe_mac_82598EB)
   3628 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   3629 	rxctrl |= IXGBE_RXCTRL_RXEN;
   3630 	ixgbe_enable_rx_dma(hw, rxctrl);
   3631 
   3632 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   3633 
   3634 	/* Set up MSI-X routing */
   3635 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3636 		ixgbe_configure_ivars(adapter);
   3637 		/* Set up auto-mask */
   3638 		if (hw->mac.type == ixgbe_mac_82598EB)
   3639 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3640 		else {
   3641 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   3642 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   3643 		}
   3644 	} else {  /* Simple settings for Legacy/MSI */
   3645 		ixgbe_set_ivar(adapter, 0, 0, 0);
   3646 		ixgbe_set_ivar(adapter, 0, 0, 1);
   3647 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3648 	}
   3649 
   3650 	ixgbe_init_fdir(adapter);
   3651 
   3652 	/*
   3653 	 * Check on any SFP devices that
   3654 	 * need to be kick-started
   3655 	 */
   3656 	if (hw->phy.type == ixgbe_phy_none) {
   3657 		err = hw->phy.ops.identify(hw);
   3658 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   3659                 	device_printf(dev,
   3660 			    "Unsupported SFP+ module type was detected.\n");
   3661 			return;
   3662         	}
   3663 	}
   3664 
   3665 	/* Set moderation on the Link interrupt */
   3666 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
   3667 
   3668 	/* Config/Enable Link */
   3669 	ixgbe_config_link(adapter);
   3670 
   3671 	/* Hardware Packet Buffer & Flow Control setup */
   3672 	ixgbe_config_delay_values(adapter);
   3673 
   3674 	/* Initialize the FC settings */
   3675 	ixgbe_start_hw(hw);
   3676 
   3677 	/* Set up VLAN support and filter */
   3678 	ixgbe_setup_vlan_hw_support(adapter);
   3679 
   3680 	/* Setup DMA Coalescing */
   3681 	ixgbe_config_dmac(adapter);
   3682 
   3683 	/* And now turn on interrupts */
   3684 	ixgbe_enable_intr(adapter);
   3685 
   3686 	/* Enable the use of the MBX by the VF's */
   3687 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
   3688 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
   3689 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
   3690 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
   3691 	}
   3692 
   3693 	/* Now inform the stack we're ready */
   3694 	ifp->if_flags |= IFF_RUNNING;
   3695 
   3696 	return;
   3697 } /* ixgbe_init_locked */
   3698 
   3699 /************************************************************************
   3700  * ixgbe_init
   3701  ************************************************************************/
   3702 static int
   3703 ixgbe_init(struct ifnet *ifp)
   3704 {
   3705 	struct adapter *adapter = ifp->if_softc;
   3706 
   3707 	IXGBE_CORE_LOCK(adapter);
   3708 	ixgbe_init_locked(adapter);
   3709 	IXGBE_CORE_UNLOCK(adapter);
   3710 
   3711 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   3712 } /* ixgbe_init */
   3713 
   3714 /************************************************************************
   3715  * ixgbe_set_ivar
   3716  *
   3717  *   Setup the correct IVAR register for a particular MSI-X interrupt
   3718  *     (yes this is all very magic and confusing :)
   3719  *    - entry is the register array entry
   3720  *    - vector is the MSI-X vector for this queue
   3721  *    - type is RX/TX/MISC
   3722  ************************************************************************/
   3723 static void
   3724 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   3725 {
   3726 	struct ixgbe_hw *hw = &adapter->hw;
   3727 	u32 ivar, index;
   3728 
   3729 	vector |= IXGBE_IVAR_ALLOC_VAL;
   3730 
   3731 	switch (hw->mac.type) {
   3732 
   3733 	case ixgbe_mac_82598EB:
   3734 		if (type == -1)
   3735 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   3736 		else
   3737 			entry += (type * 64);
   3738 		index = (entry >> 2) & 0x1F;
   3739 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   3740 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
   3741 		ivar |= (vector << (8 * (entry & 0x3)));
   3742 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   3743 		break;
   3744 
   3745 	case ixgbe_mac_82599EB:
   3746 	case ixgbe_mac_X540:
   3747 	case ixgbe_mac_X550:
   3748 	case ixgbe_mac_X550EM_x:
   3749 	case ixgbe_mac_X550EM_a:
   3750 		if (type == -1) { /* MISC IVAR */
   3751 			index = (entry & 1) * 8;
   3752 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   3753 			ivar &= ~(0xFF << index);
   3754 			ivar |= (vector << index);
   3755 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   3756 		} else {	/* RX/TX IVARS */
   3757 			index = (16 * (entry & 1)) + (8 * type);
   3758 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   3759 			ivar &= ~(0xFF << index);
   3760 			ivar |= (vector << index);
   3761 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   3762 		}
   3763 
   3764 	default:
   3765 		break;
   3766 	}
   3767 } /* ixgbe_set_ivar */
   3768 
   3769 /************************************************************************
   3770  * ixgbe_configure_ivars
   3771  ************************************************************************/
   3772 static void
   3773 ixgbe_configure_ivars(struct adapter *adapter)
   3774 {
   3775 	struct ix_queue *que = adapter->queues;
   3776 	u32             newitr;
   3777 
   3778 	if (ixgbe_max_interrupt_rate > 0)
   3779 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   3780 	else {
   3781 		/*
   3782 		 * Disable DMA coalescing if interrupt moderation is
   3783 		 * disabled.
   3784 		 */
   3785 		adapter->dmac = 0;
   3786 		newitr = 0;
   3787 	}
   3788 
   3789         for (int i = 0; i < adapter->num_queues; i++, que++) {
   3790 		struct rx_ring *rxr = &adapter->rx_rings[i];
   3791 		struct tx_ring *txr = &adapter->tx_rings[i];
   3792 		/* First the RX queue entry */
   3793                 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
   3794 		/* ... and the TX */
   3795 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
   3796 		/* Set an Initial EITR value */
   3797 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
   3798 	}
   3799 
   3800 	/* For the Link interrupt */
   3801         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
   3802 } /* ixgbe_configure_ivars */
   3803 
   3804 /************************************************************************
   3805  * ixgbe_config_gpie
   3806  ************************************************************************/
   3807 static void
   3808 ixgbe_config_gpie(struct adapter *adapter)
   3809 {
   3810 	struct ixgbe_hw *hw = &adapter->hw;
   3811 	u32             gpie;
   3812 
   3813 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
   3814 
   3815 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3816 		/* Enable Enhanced MSI-X mode */
   3817 		gpie |= IXGBE_GPIE_MSIX_MODE
   3818 		     |  IXGBE_GPIE_EIAME
   3819 		     |  IXGBE_GPIE_PBA_SUPPORT
   3820 		     |  IXGBE_GPIE_OCD;
   3821 	}
   3822 
   3823 	/* Fan Failure Interrupt */
   3824 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   3825 		gpie |= IXGBE_SDP1_GPIEN;
   3826 
   3827 	/* Thermal Sensor Interrupt */
   3828 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
   3829 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3830 
   3831 	/* Link detection */
   3832 	switch (hw->mac.type) {
   3833 	case ixgbe_mac_82599EB:
   3834 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
   3835 		break;
   3836 	case ixgbe_mac_X550EM_x:
   3837 	case ixgbe_mac_X550EM_a:
   3838 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3839 		break;
   3840 	default:
   3841 		break;
   3842 	}
   3843 
   3844 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   3845 
   3846 	return;
   3847 } /* ixgbe_config_gpie */
   3848 
   3849 /************************************************************************
   3850  * ixgbe_config_delay_values
   3851  *
   3852  *   Requires adapter->max_frame_size to be set.
   3853  ************************************************************************/
   3854 static void
   3855 ixgbe_config_delay_values(struct adapter *adapter)
   3856 {
   3857 	struct ixgbe_hw *hw = &adapter->hw;
   3858 	u32             rxpb, frame, size, tmp;
   3859 
   3860 	frame = adapter->max_frame_size;
   3861 
   3862 	/* Calculate High Water */
   3863 	switch (hw->mac.type) {
   3864 	case ixgbe_mac_X540:
   3865 	case ixgbe_mac_X550:
   3866 	case ixgbe_mac_X550EM_x:
   3867 	case ixgbe_mac_X550EM_a:
   3868 		tmp = IXGBE_DV_X540(frame, frame);
   3869 		break;
   3870 	default:
   3871 		tmp = IXGBE_DV(frame, frame);
   3872 		break;
   3873 	}
   3874 	size = IXGBE_BT2KB(tmp);
   3875 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
   3876 	hw->fc.high_water[0] = rxpb - size;
   3877 
   3878 	/* Now calculate Low Water */
   3879 	switch (hw->mac.type) {
   3880 	case ixgbe_mac_X540:
   3881 	case ixgbe_mac_X550:
   3882 	case ixgbe_mac_X550EM_x:
   3883 	case ixgbe_mac_X550EM_a:
   3884 		tmp = IXGBE_LOW_DV_X540(frame);
   3885 		break;
   3886 	default:
   3887 		tmp = IXGBE_LOW_DV(frame);
   3888 		break;
   3889 	}
   3890 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
   3891 
   3892 	hw->fc.pause_time = IXGBE_FC_PAUSE;
   3893 	hw->fc.send_xon = TRUE;
   3894 } /* ixgbe_config_delay_values */
   3895 
   3896 /************************************************************************
   3897  * ixgbe_set_multi - Multicast Update
   3898  *
   3899  *   Called whenever multicast address list is updated.
   3900  ************************************************************************/
   3901 static void
   3902 ixgbe_set_multi(struct adapter *adapter)
   3903 {
   3904 	struct ixgbe_mc_addr	*mta;
   3905 	struct ifnet		*ifp = adapter->ifp;
   3906 	u8			*update_ptr;
   3907 	int			mcnt = 0;
   3908 	u32			fctrl;
   3909 	struct ethercom		*ec = &adapter->osdep.ec;
   3910 	struct ether_multi	*enm;
   3911 	struct ether_multistep	step;
   3912 
   3913 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   3914 
   3915 	mta = adapter->mta;
   3916 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
   3917 
   3918 	ifp->if_flags &= ~IFF_ALLMULTI;
   3919 	ETHER_FIRST_MULTI(step, ec, enm);
   3920 	while (enm != NULL) {
   3921 		if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
   3922 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   3923 			ETHER_ADDR_LEN) != 0)) {
   3924 			ifp->if_flags |= IFF_ALLMULTI;
   3925 			break;
   3926 		}
   3927 		bcopy(enm->enm_addrlo,
   3928 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
   3929 		mta[mcnt].vmdq = adapter->pool;
   3930 		mcnt++;
   3931 		ETHER_NEXT_MULTI(step, enm);
   3932 	}
   3933 
   3934 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   3935 	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   3936 	if (ifp->if_flags & IFF_PROMISC)
   3937 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   3938 	else if (ifp->if_flags & IFF_ALLMULTI) {
   3939 		fctrl |= IXGBE_FCTRL_MPE;
   3940 	}
   3941 
   3942 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   3943 
   3944 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
   3945 		update_ptr = (u8 *)mta;
   3946 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   3947 		    ixgbe_mc_array_itr, TRUE);
   3948 	}
   3949 
   3950 	return;
   3951 } /* ixgbe_set_multi */
   3952 
   3953 /************************************************************************
   3954  * ixgbe_mc_array_itr
   3955  *
   3956  *   An iterator function needed by the multicast shared code.
   3957  *   It feeds the shared code routine the addresses in the
   3958  *   array of ixgbe_set_multi() one by one.
   3959  ************************************************************************/
   3960 static u8 *
   3961 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   3962 {
   3963 	struct ixgbe_mc_addr *mta;
   3964 
   3965 	mta = (struct ixgbe_mc_addr *)*update_ptr;
   3966 	*vmdq = mta->vmdq;
   3967 
   3968 	*update_ptr = (u8*)(mta + 1);
   3969 
   3970 	return (mta->addr);
   3971 } /* ixgbe_mc_array_itr */
   3972 
   3973 /************************************************************************
   3974  * ixgbe_local_timer - Timer routine
   3975  *
   3976  *   Checks for link status, updates statistics,
   3977  *   and runs the watchdog check.
   3978  ************************************************************************/
   3979 static void
   3980 ixgbe_local_timer(void *arg)
   3981 {
   3982 	struct adapter *adapter = arg;
   3983 
   3984 	IXGBE_CORE_LOCK(adapter);
   3985 	ixgbe_local_timer1(adapter);
   3986 	IXGBE_CORE_UNLOCK(adapter);
   3987 }
   3988 
   3989 static void
   3990 ixgbe_local_timer1(void *arg)
   3991 {
   3992 	struct adapter	*adapter = arg;
   3993 	device_t	dev = adapter->dev;
   3994 	struct ix_queue *que = adapter->queues;
   3995 	u64		queues = 0;
   3996 	int		hung = 0;
   3997 
   3998 	KASSERT(mutex_owned(&adapter->core_mtx));
   3999 
   4000 	/* Check for pluggable optics */
   4001 	if (adapter->sfp_probe)
   4002 		if (!ixgbe_sfp_probe(adapter))
   4003 			goto out; /* Nothing to do */
   4004 
   4005 	ixgbe_update_link_status(adapter);
   4006 	ixgbe_update_stats_counters(adapter);
   4007 
   4008 	/*
   4009 	 * Check the TX queues status
   4010 	 *      - mark hung queues so we don't schedule on them
   4011 	 *      - watchdog only if all queues show hung
   4012 	 */
   4013 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4014 		/* Keep track of queues with work for soft irq */
   4015 		if (que->txr->busy)
   4016 			queues |= ((u64)1 << que->me);
   4017 		/*
   4018 		 * Each time txeof runs without cleaning, but there
   4019 		 * are uncleaned descriptors it increments busy. If
   4020 		 * we get to the MAX we declare it hung.
   4021 		 */
   4022 		if (que->busy == IXGBE_QUEUE_HUNG) {
   4023 			++hung;
   4024 			/* Mark the queue as inactive */
   4025 			adapter->active_queues &= ~((u64)1 << que->me);
   4026 			continue;
   4027 		} else {
   4028 			/* Check if we've come back from hung */
   4029 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   4030 				adapter->active_queues |= ((u64)1 << que->me);
   4031 		}
   4032 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   4033 			device_printf(dev,
   4034 			    "Warning queue %d appears to be hung!\n", i);
   4035 			que->txr->busy = IXGBE_QUEUE_HUNG;
   4036 			++hung;
   4037 		}
   4038 	}
   4039 
   4040 	/* Only truely watchdog if all queues show hung */
   4041 	if (hung == adapter->num_queues)
   4042 		goto watchdog;
   4043 	else if (queues != 0) { /* Force an IRQ on queues with work */
   4044 		ixgbe_rearm_queues(adapter, queues);
   4045 	}
   4046 
   4047 out:
   4048 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4049 	return;
   4050 
   4051 watchdog:
   4052 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   4053 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   4054 	adapter->watchdog_events.ev_count++;
   4055 	ixgbe_init_locked(adapter);
   4056 } /* ixgbe_local_timer */
   4057 
   4058 /************************************************************************
   4059  * ixgbe_sfp_probe
   4060  *
   4061  *   Determine if a port had optics inserted.
   4062  ************************************************************************/
   4063 static bool
   4064 ixgbe_sfp_probe(struct adapter *adapter)
   4065 {
   4066 	struct ixgbe_hw	*hw = &adapter->hw;
   4067 	device_t	dev = adapter->dev;
   4068 	bool		result = FALSE;
   4069 
   4070 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4071 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4072 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4073 		if (ret)
   4074 			goto out;
   4075 		ret = hw->phy.ops.reset(hw);
   4076 		adapter->sfp_probe = FALSE;
   4077 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4078 			device_printf(dev,"Unsupported SFP+ module detected!");
   4079 			device_printf(dev,
   4080 			    "Reload driver with supported module.\n");
   4081                         goto out;
   4082 		} else
   4083 			device_printf(dev, "SFP+ module detected!\n");
   4084 		/* We now have supported optics */
   4085 		result = TRUE;
   4086 	}
   4087 out:
   4088 
   4089 	return (result);
   4090 } /* ixgbe_sfp_probe */
   4091 
   4092 /************************************************************************
   4093  * ixgbe_handle_mod - Tasklet for SFP module interrupts
   4094  ************************************************************************/
   4095 static void
   4096 ixgbe_handle_mod(void *context)
   4097 {
   4098 	struct adapter  *adapter = context;
   4099 	struct ixgbe_hw *hw = &adapter->hw;
   4100 	device_t	dev = adapter->dev;
   4101 	u32             err, cage_full = 0;
   4102 
   4103 	if (adapter->hw.need_crosstalk_fix) {
   4104 		switch (hw->mac.type) {
   4105 		case ixgbe_mac_82599EB:
   4106 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4107 			    IXGBE_ESDP_SDP2;
   4108 			break;
   4109 		case ixgbe_mac_X550EM_x:
   4110 		case ixgbe_mac_X550EM_a:
   4111 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4112 			    IXGBE_ESDP_SDP0;
   4113 			break;
   4114 		default:
   4115 			break;
   4116 		}
   4117 
   4118 		if (!cage_full)
   4119 			return;
   4120 	}
   4121 
   4122 	err = hw->phy.ops.identify_sfp(hw);
   4123 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4124 		device_printf(dev,
   4125 		    "Unsupported SFP+ module type was detected.\n");
   4126 		return;
   4127 	}
   4128 
   4129 	err = hw->mac.ops.setup_sfp(hw);
   4130 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4131 		device_printf(dev,
   4132 		    "Setup failure - unsupported SFP+ module type.\n");
   4133 		return;
   4134 	}
   4135 	softint_schedule(adapter->msf_si);
   4136 } /* ixgbe_handle_mod */
   4137 
   4138 
   4139 /************************************************************************
   4140  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
   4141  ************************************************************************/
   4142 static void
   4143 ixgbe_handle_msf(void *context)
   4144 {
   4145 	struct adapter  *adapter = context;
   4146 	struct ixgbe_hw *hw = &adapter->hw;
   4147 	u32             autoneg;
   4148 	bool            negotiate;
   4149 
   4150 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
   4151 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
   4152 
   4153 	autoneg = hw->phy.autoneg_advertised;
   4154 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   4155 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   4156 	else
   4157 		negotiate = 0;
   4158 	if (hw->mac.ops.setup_link)
   4159 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
   4160 
   4161 	/* Adjust media types shown in ifconfig */
   4162 	ifmedia_removeall(&adapter->media);
   4163 	ixgbe_add_media_types(adapter);
   4164 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   4165 } /* ixgbe_handle_msf */
   4166 
   4167 /************************************************************************
   4168  * ixgbe_handle_phy - Tasklet for external PHY interrupts
   4169  ************************************************************************/
   4170 static void
   4171 ixgbe_handle_phy(void *context)
   4172 {
   4173 	struct adapter  *adapter = context;
   4174 	struct ixgbe_hw *hw = &adapter->hw;
   4175 	int error;
   4176 
   4177 	error = hw->phy.ops.handle_lasi(hw);
   4178 	if (error == IXGBE_ERR_OVERTEMP)
   4179 		device_printf(adapter->dev,
   4180 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
   4181 		    " PHY will downshift to lower power state!\n");
   4182 	else if (error)
   4183 		device_printf(adapter->dev,
   4184 		    "Error handling LASI interrupt: %d\n", error);
   4185 } /* ixgbe_handle_phy */
   4186 
   4187 static void
   4188 ixgbe_ifstop(struct ifnet *ifp, int disable)
   4189 {
   4190 	struct adapter *adapter = ifp->if_softc;
   4191 
   4192 	IXGBE_CORE_LOCK(adapter);
   4193 	ixgbe_stop(adapter);
   4194 	IXGBE_CORE_UNLOCK(adapter);
   4195 }
   4196 
   4197 /************************************************************************
   4198  * ixgbe_stop - Stop the hardware
   4199  *
   4200  *   Disables all traffic on the adapter by issuing a
   4201  *   global reset on the MAC and deallocates TX/RX buffers.
   4202  ************************************************************************/
   4203 static void
   4204 ixgbe_stop(void *arg)
   4205 {
   4206 	struct ifnet    *ifp;
   4207 	struct adapter  *adapter = arg;
   4208 	struct ixgbe_hw *hw = &adapter->hw;
   4209 
   4210 	ifp = adapter->ifp;
   4211 
   4212 	KASSERT(mutex_owned(&adapter->core_mtx));
   4213 
   4214 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   4215 	ixgbe_disable_intr(adapter);
   4216 	callout_stop(&adapter->timer);
   4217 
   4218 	/* Let the stack know...*/
   4219 	ifp->if_flags &= ~IFF_RUNNING;
   4220 
   4221 	ixgbe_reset_hw(hw);
   4222 	hw->adapter_stopped = FALSE;
   4223 	ixgbe_stop_adapter(hw);
   4224 	if (hw->mac.type == ixgbe_mac_82599EB)
   4225 		ixgbe_stop_mac_link_on_d3_82599(hw);
   4226 	/* Turn off the laser - noop with no optics */
   4227 	ixgbe_disable_tx_laser(hw);
   4228 
   4229 	/* Update the stack */
   4230 	adapter->link_up = FALSE;
   4231 	ixgbe_update_link_status(adapter);
   4232 
   4233 	/* reprogram the RAR[0] in case user changed it. */
   4234 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   4235 
   4236 	return;
   4237 } /* ixgbe_stop */
   4238 
   4239 /************************************************************************
   4240  * ixgbe_update_link_status - Update OS on link state
   4241  *
   4242  * Note: Only updates the OS on the cached link state.
   4243  *       The real check of the hardware only happens with
   4244  *       a link interrupt.
   4245  ************************************************************************/
   4246 static void
   4247 ixgbe_update_link_status(struct adapter *adapter)
   4248 {
   4249 	struct ifnet	*ifp = adapter->ifp;
   4250 	device_t        dev = adapter->dev;
   4251 	struct ixgbe_hw *hw = &adapter->hw;
   4252 
   4253 	if (adapter->link_up) {
   4254 		if (adapter->link_active == FALSE) {
   4255 			if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
   4256 				/*
   4257 				 *  Discard count for both MAC Local Fault and
   4258 				 * Remote Fault because those registers are
   4259 				 * valid only when the link speed is up and
   4260 				 * 10Gbps.
   4261 				 */
   4262 				IXGBE_READ_REG(hw, IXGBE_MLFC);
   4263 				IXGBE_READ_REG(hw, IXGBE_MRFC);
   4264 			}
   4265 
   4266 			if (bootverbose) {
   4267 				const char *bpsmsg;
   4268 
   4269 				switch (adapter->link_speed) {
   4270 				case IXGBE_LINK_SPEED_10GB_FULL:
   4271 					bpsmsg = "10 Gbps";
   4272 					break;
   4273 				case IXGBE_LINK_SPEED_5GB_FULL:
   4274 					bpsmsg = "5 Gbps";
   4275 					break;
   4276 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   4277 					bpsmsg = "2.5 Gbps";
   4278 					break;
   4279 				case IXGBE_LINK_SPEED_1GB_FULL:
   4280 					bpsmsg = "1 Gbps";
   4281 					break;
   4282 				case IXGBE_LINK_SPEED_100_FULL:
   4283 					bpsmsg = "100 Mbps";
   4284 					break;
   4285 				case IXGBE_LINK_SPEED_10_FULL:
   4286 					bpsmsg = "10 Mbps";
   4287 					break;
   4288 				default:
   4289 					bpsmsg = "unknown speed";
   4290 					break;
   4291 				}
   4292 				device_printf(dev, "Link is up %s %s \n",
   4293 				    bpsmsg, "Full Duplex");
   4294 			}
   4295 			adapter->link_active = TRUE;
   4296 			/* Update any Flow Control changes */
   4297 			ixgbe_fc_enable(&adapter->hw);
   4298 			/* Update DMA coalescing config */
   4299 			ixgbe_config_dmac(adapter);
   4300 			if_link_state_change(ifp, LINK_STATE_UP);
   4301 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4302 				ixgbe_ping_all_vfs(adapter);
   4303 		}
   4304 	} else { /* Link down */
   4305 		if (adapter->link_active == TRUE) {
   4306 			if (bootverbose)
   4307 				device_printf(dev, "Link is Down\n");
   4308 			if_link_state_change(ifp, LINK_STATE_DOWN);
   4309 			adapter->link_active = FALSE;
   4310 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4311 				ixgbe_ping_all_vfs(adapter);
   4312 		}
   4313 	}
   4314 
   4315 	return;
   4316 } /* ixgbe_update_link_status */
   4317 
   4318 /************************************************************************
   4319  * ixgbe_config_dmac - Configure DMA Coalescing
   4320  ************************************************************************/
   4321 static void
   4322 ixgbe_config_dmac(struct adapter *adapter)
   4323 {
   4324 	struct ixgbe_hw *hw = &adapter->hw;
   4325 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
   4326 
   4327 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
   4328 		return;
   4329 
   4330 	if (dcfg->watchdog_timer ^ adapter->dmac ||
   4331 	    dcfg->link_speed ^ adapter->link_speed) {
   4332 		dcfg->watchdog_timer = adapter->dmac;
   4333 		dcfg->fcoe_en = false;
   4334 		dcfg->link_speed = adapter->link_speed;
   4335 		dcfg->num_tcs = 1;
   4336 
   4337 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
   4338 		    dcfg->watchdog_timer, dcfg->link_speed);
   4339 
   4340 		hw->mac.ops.dmac_config(hw);
   4341 	}
   4342 } /* ixgbe_config_dmac */
   4343 
   4344 /************************************************************************
   4345  * ixgbe_enable_intr
   4346  ************************************************************************/
   4347 static void
   4348 ixgbe_enable_intr(struct adapter *adapter)
   4349 {
   4350 	struct ixgbe_hw	*hw = &adapter->hw;
   4351 	struct ix_queue	*que = adapter->queues;
   4352 	u32		mask, fwsm;
   4353 
   4354 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4355 
   4356 	switch (adapter->hw.mac.type) {
   4357 	case ixgbe_mac_82599EB:
   4358 		mask |= IXGBE_EIMS_ECC;
   4359 		/* Temperature sensor on some adapters */
   4360 		mask |= IXGBE_EIMS_GPI_SDP0;
   4361 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
   4362 		mask |= IXGBE_EIMS_GPI_SDP1;
   4363 		mask |= IXGBE_EIMS_GPI_SDP2;
   4364 		break;
   4365 	case ixgbe_mac_X540:
   4366 		/* Detect if Thermal Sensor is enabled */
   4367 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
   4368 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
   4369 			mask |= IXGBE_EIMS_TS;
   4370 		mask |= IXGBE_EIMS_ECC;
   4371 		break;
   4372 	case ixgbe_mac_X550:
   4373 		/* MAC thermal sensor is automatically enabled */
   4374 		mask |= IXGBE_EIMS_TS;
   4375 		mask |= IXGBE_EIMS_ECC;
   4376 		break;
   4377 	case ixgbe_mac_X550EM_x:
   4378 	case ixgbe_mac_X550EM_a:
   4379 		/* Some devices use SDP0 for important information */
   4380 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
   4381 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
   4382 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
   4383 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
   4384 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
   4385 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
   4386 			mask |= IXGBE_EICR_GPI_SDP0_X540;
   4387 		mask |= IXGBE_EIMS_ECC;
   4388 		break;
   4389 	default:
   4390 		break;
   4391 	}
   4392 
   4393 	/* Enable Fan Failure detection */
   4394 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4395 		mask |= IXGBE_EIMS_GPI_SDP1;
   4396 	/* Enable SR-IOV */
   4397 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4398 		mask |= IXGBE_EIMS_MAILBOX;
   4399 	/* Enable Flow Director */
   4400 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   4401 		mask |= IXGBE_EIMS_FLOW_DIR;
   4402 
   4403 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4404 
   4405 	/* With MSI-X we use auto clear */
   4406 	if (adapter->msix_mem) {
   4407 		mask = IXGBE_EIMS_ENABLE_MASK;
   4408 		/* Don't autoclear Link */
   4409 		mask &= ~IXGBE_EIMS_OTHER;
   4410 		mask &= ~IXGBE_EIMS_LSC;
   4411 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   4412 			mask &= ~IXGBE_EIMS_MAILBOX;
   4413 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4414 	}
   4415 
   4416 	/*
   4417 	 * Now enable all queues, this is done separately to
   4418 	 * allow for handling the extended (beyond 32) MSI-X
   4419 	 * vectors that can be used by 82599
   4420 	 */
   4421         for (int i = 0; i < adapter->num_queues; i++, que++)
   4422                 ixgbe_enable_queue(adapter, que->msix);
   4423 
   4424 	IXGBE_WRITE_FLUSH(hw);
   4425 
   4426 	return;
   4427 } /* ixgbe_enable_intr */
   4428 
   4429 /************************************************************************
   4430  * ixgbe_disable_intr
   4431  ************************************************************************/
   4432 static void
   4433 ixgbe_disable_intr(struct adapter *adapter)
   4434 {
   4435 	if (adapter->msix_mem)
   4436 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4437 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
   4438 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
   4439 	} else {
   4440 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
   4441 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
   4442 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
   4443 	}
   4444 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4445 
   4446 	return;
   4447 } /* ixgbe_disable_intr */
   4448 
   4449 /************************************************************************
   4450  * ixgbe_legacy_irq - Legacy Interrupt Service routine
   4451  ************************************************************************/
   4452 static int
   4453 ixgbe_legacy_irq(void *arg)
   4454 {
   4455 	struct ix_queue *que = arg;
   4456 	struct adapter	*adapter = que->adapter;
   4457 	struct ixgbe_hw	*hw = &adapter->hw;
   4458 	struct ifnet    *ifp = adapter->ifp;
   4459 	struct 		tx_ring *txr = adapter->tx_rings;
   4460 	bool		more = false;
   4461 	u32             eicr, eicr_mask;
   4462 
   4463 	/* Silicon errata #26 on 82598 */
   4464 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
   4465 
   4466 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   4467 
   4468 	adapter->stats.pf.legint.ev_count++;
   4469 	++que->irqs.ev_count;
   4470 	if (eicr == 0) {
   4471 		adapter->stats.pf.intzero.ev_count++;
   4472 		if ((ifp->if_flags & IFF_UP) != 0)
   4473 			ixgbe_enable_intr(adapter);
   4474 		return 0;
   4475 	}
   4476 
   4477 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   4478 #ifdef __NetBSD__
   4479 		/* Don't run ixgbe_rxeof in interrupt context */
   4480 		more = true;
   4481 #else
   4482 		more = ixgbe_rxeof(que);
   4483 #endif
   4484 
   4485 		IXGBE_TX_LOCK(txr);
   4486 		ixgbe_txeof(txr);
   4487 #ifdef notyet
   4488 		if (!ixgbe_ring_empty(ifp, txr->br))
   4489 			ixgbe_start_locked(ifp, txr);
   4490 #endif
   4491 		IXGBE_TX_UNLOCK(txr);
   4492 	}
   4493 
   4494 	/* Check for fan failure */
   4495 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   4496 		ixgbe_check_fan_failure(adapter, eicr, true);
   4497 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4498 	}
   4499 
   4500 	/* Link status change */
   4501 	if (eicr & IXGBE_EICR_LSC)
   4502 		softint_schedule(adapter->link_si);
   4503 
   4504 	if (ixgbe_is_sfp(hw)) {
   4505 		/* Pluggable optics-related interrupt */
   4506 		if (hw->mac.type >= ixgbe_mac_X540)
   4507 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   4508 		else
   4509 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   4510 
   4511 		if (eicr & eicr_mask) {
   4512 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   4513 			softint_schedule(adapter->mod_si);
   4514 		}
   4515 
   4516 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   4517 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   4518 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   4519 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4520 			softint_schedule(adapter->msf_si);
   4521 		}
   4522 	}
   4523 
   4524 	/* External PHY interrupt */
   4525 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   4526 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
   4527 		softint_schedule(adapter->phy_si);
   4528 
   4529 	if (more)
   4530 		softint_schedule(que->que_si);
   4531 	else
   4532 		ixgbe_enable_intr(adapter);
   4533 
   4534 	return 1;
   4535 } /* ixgbe_legacy_irq */
   4536 
   4537 /************************************************************************
   4538  * ixgbe_free_pci_resources
   4539  ************************************************************************/
   4540 static void
   4541 ixgbe_free_pci_resources(struct adapter *adapter)
   4542 {
   4543 	struct ix_queue *que = adapter->queues;
   4544 	int		rid;
   4545 
   4546 	/*
   4547 	 * Release all msix queue resources:
   4548 	 */
   4549 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4550 		if (que->res != NULL)
   4551 			pci_intr_disestablish(adapter->osdep.pc,
   4552 			    adapter->osdep.ihs[i]);
   4553 	}
   4554 
   4555 	/* Clean the Legacy or Link interrupt last */
   4556 	if (adapter->vector) /* we are doing MSIX */
   4557 		rid = adapter->vector;
   4558 	else
   4559 		rid = 0;
   4560 
   4561 	if (adapter->osdep.ihs[rid] != NULL) {
   4562 		pci_intr_disestablish(adapter->osdep.pc,
   4563 		    adapter->osdep.ihs[rid]);
   4564 		adapter->osdep.ihs[rid] = NULL;
   4565 	}
   4566 
   4567 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   4568 	    adapter->osdep.nintrs);
   4569 
   4570 	if (adapter->osdep.mem_size != 0) {
   4571 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   4572 		    adapter->osdep.mem_bus_space_handle,
   4573 		    adapter->osdep.mem_size);
   4574 	}
   4575 
   4576 	return;
   4577 } /* ixgbe_free_pci_resources */
   4578 
   4579 /************************************************************************
   4580  * ixgbe_set_sysctl_value
   4581  ************************************************************************/
   4582 static void
   4583 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
   4584     const char *description, int *limit, int value)
   4585 {
   4586 	device_t dev =  adapter->dev;
   4587 	struct sysctllog **log;
   4588 	const struct sysctlnode *rnode, *cnode;
   4589 
   4590 	log = &adapter->sysctllog;
   4591 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   4592 		aprint_error_dev(dev, "could not create sysctl root\n");
   4593 		return;
   4594 	}
   4595 	if (sysctl_createv(log, 0, &rnode, &cnode,
   4596 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   4597 	    name, SYSCTL_DESCR(description),
   4598 		NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   4599 		aprint_error_dev(dev, "could not create sysctl\n");
   4600 	*limit = value;
   4601 } /* ixgbe_set_sysctl_value */
   4602 
   4603 /************************************************************************
   4604  * ixgbe_sysctl_flowcntl
   4605  *
   4606  *   SYSCTL wrapper around setting Flow Control
   4607  ************************************************************************/
   4608 static int
   4609 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
   4610 {
   4611 	struct sysctlnode node = *rnode;
   4612 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4613 	int error, fc;
   4614 
   4615 	fc = adapter->hw.fc.current_mode;
   4616 	node.sysctl_data = &fc;
   4617 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4618 	if (error != 0 || newp == NULL)
   4619 		return error;
   4620 
   4621 	/* Don't bother if it's not changed */
   4622 	if (fc == adapter->hw.fc.current_mode)
   4623 		return (0);
   4624 
   4625 	return ixgbe_set_flowcntl(adapter, fc);
   4626 } /* ixgbe_sysctl_flowcntl */
   4627 
   4628 /************************************************************************
   4629  * ixgbe_set_flowcntl - Set flow control
   4630  *
   4631  *   Flow control values:
   4632  *     0 - off
   4633  *     1 - rx pause
   4634  *     2 - tx pause
   4635  *     3 - full
   4636  ************************************************************************/
   4637 static int
   4638 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
   4639 {
   4640 	switch (fc) {
   4641 		case ixgbe_fc_rx_pause:
   4642 		case ixgbe_fc_tx_pause:
   4643 		case ixgbe_fc_full:
   4644 			adapter->hw.fc.requested_mode = fc;
   4645 			if (adapter->num_queues > 1)
   4646 				ixgbe_disable_rx_drop(adapter);
   4647 			break;
   4648 		case ixgbe_fc_none:
   4649 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   4650 			if (adapter->num_queues > 1)
   4651 				ixgbe_enable_rx_drop(adapter);
   4652 			break;
   4653 		default:
   4654 			return (EINVAL);
   4655 	}
   4656 
   4657 #if 0 /* XXX NetBSD */
   4658 	/* Don't autoneg if forcing a value */
   4659 	adapter->hw.fc.disable_fc_autoneg = TRUE;
   4660 #endif
   4661 	ixgbe_fc_enable(&adapter->hw);
   4662 
   4663 	return (0);
   4664 } /* ixgbe_set_flowcntl */
   4665 
   4666 /************************************************************************
   4667  * ixgbe_enable_rx_drop
   4668  *
   4669  *   Enable the hardware to drop packets when the buffer is
   4670  *   full. This is useful with multiqueue, so that no single
   4671  *   queue being full stalls the entire RX engine. We only
   4672  *   enable this when Multiqueue is enabled AND Flow Control
   4673  *   is disabled.
   4674  ************************************************************************/
   4675 static void
   4676 ixgbe_enable_rx_drop(struct adapter *adapter)
   4677 {
   4678 	struct ixgbe_hw *hw = &adapter->hw;
   4679 	struct rx_ring  *rxr;
   4680 	u32             srrctl;
   4681 
   4682 	for (int i = 0; i < adapter->num_queues; i++) {
   4683 		rxr = &adapter->rx_rings[i];
   4684 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4685 		srrctl |= IXGBE_SRRCTL_DROP_EN;
   4686 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4687 	}
   4688 
   4689 	/* enable drop for each vf */
   4690 	for (int i = 0; i < adapter->num_vfs; i++) {
   4691 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4692 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
   4693 		    IXGBE_QDE_ENABLE));
   4694 	}
   4695 } /* ixgbe_enable_rx_drop */
   4696 
   4697 /************************************************************************
   4698  * ixgbe_disable_rx_drop
   4699  ************************************************************************/
   4700 static void
   4701 ixgbe_disable_rx_drop(struct adapter *adapter)
   4702 {
   4703 	struct ixgbe_hw *hw = &adapter->hw;
   4704 	struct rx_ring  *rxr;
   4705 	u32             srrctl;
   4706 
   4707 	for (int i = 0; i < adapter->num_queues; i++) {
   4708 		rxr = &adapter->rx_rings[i];
   4709         	srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4710         	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
   4711         	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4712 	}
   4713 
   4714 	/* disable drop for each vf */
   4715 	for (int i = 0; i < adapter->num_vfs; i++) {
   4716 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4717 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
   4718 	}
   4719 } /* ixgbe_disable_rx_drop */
   4720 
   4721 /************************************************************************
   4722  * ixgbe_sysctl_advertise
   4723  *
   4724  *   SYSCTL wrapper around setting advertised speed
   4725  ************************************************************************/
   4726 static int
   4727 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
   4728 {
   4729 	struct sysctlnode node = *rnode;
   4730 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4731 	int            error = 0, advertise;
   4732 
   4733 	advertise = adapter->advertise;
   4734 	node.sysctl_data = &advertise;
   4735 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4736 	if (error != 0 || newp == NULL)
   4737 		return error;
   4738 
   4739 	return ixgbe_set_advertise(adapter, advertise);
   4740 } /* ixgbe_sysctl_advertise */
   4741 
   4742 /************************************************************************
   4743  * ixgbe_set_advertise - Control advertised link speed
   4744  *
   4745  *   Flags:
   4746  *     0x0 - Default (all capable link speed)
   4747  *     0x1 - advertise 100 Mb
   4748  *     0x2 - advertise 1G
   4749  *     0x4 - advertise 10G
   4750  *     0x8 - advertise 10 Mb (yes, Mb)
   4751  ************************************************************************/
   4752 static int
   4753 ixgbe_set_advertise(struct adapter *adapter, int advertise)
   4754 {
   4755 	device_t         dev;
   4756 	struct ixgbe_hw  *hw;
   4757 	ixgbe_link_speed speed = 0;
   4758 	ixgbe_link_speed link_caps = 0;
   4759 	s32              err = IXGBE_NOT_IMPLEMENTED;
   4760 	bool             negotiate = FALSE;
   4761 
   4762 	/* Checks to validate new value */
   4763 	if (adapter->advertise == advertise) /* no change */
   4764 		return (0);
   4765 
   4766 	dev = adapter->dev;
   4767 	hw = &adapter->hw;
   4768 
   4769 	/* No speed changes for backplane media */
   4770 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   4771 		return (ENODEV);
   4772 
   4773 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   4774 	    (hw->phy.multispeed_fiber))) {
   4775 		device_printf(dev,
   4776 		    "Advertised speed can only be set on copper or "
   4777 		    "multispeed fiber media types.\n");
   4778 		return (EINVAL);
   4779 	}
   4780 
   4781 	if (advertise < 0x0 || advertise > 0xF) {
   4782 		device_printf(dev,
   4783 		    "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
   4784 		return (EINVAL);
   4785 	}
   4786 
   4787 	if (hw->mac.ops.get_link_capabilities) {
   4788 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   4789 		    &negotiate);
   4790 		if (err != IXGBE_SUCCESS) {
   4791 			device_printf(dev, "Unable to determine supported advertise speeds\n");
   4792 			return (ENODEV);
   4793 		}
   4794 	}
   4795 
   4796 	/* Set new value and report new advertised mode */
   4797 	if (advertise & 0x1) {
   4798 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
   4799 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
   4800 			return (EINVAL);
   4801 		}
   4802 		speed |= IXGBE_LINK_SPEED_100_FULL;
   4803 	}
   4804 	if (advertise & 0x2) {
   4805 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
   4806 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
   4807 			return (EINVAL);
   4808 		}
   4809 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   4810 	}
   4811 	if (advertise & 0x4) {
   4812 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
   4813 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
   4814 			return (EINVAL);
   4815 		}
   4816 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   4817 	}
   4818 	if (advertise & 0x8) {
   4819 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
   4820 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
   4821 			return (EINVAL);
   4822 		}
   4823 		speed |= IXGBE_LINK_SPEED_10_FULL;
   4824 	}
   4825 	if (advertise == 0)
   4826 		speed = link_caps; /* All capable link speed */
   4827 
   4828 	hw->mac.autotry_restart = TRUE;
   4829 	hw->mac.ops.setup_link(hw, speed, TRUE);
   4830 	adapter->advertise = advertise;
   4831 
   4832 	return (0);
   4833 } /* ixgbe_set_advertise */
   4834 
   4835 /************************************************************************
   4836  * ixgbe_get_advertise - Get current advertised speed settings
   4837  *
   4838  *   Formatted for sysctl usage.
   4839  *   Flags:
   4840  *     0x1 - advertise 100 Mb
   4841  *     0x2 - advertise 1G
   4842  *     0x4 - advertise 10G
   4843  *     0x8 - advertise 10 Mb (yes, Mb)
   4844  ************************************************************************/
   4845 static int
   4846 ixgbe_get_advertise(struct adapter *adapter)
   4847 {
   4848 	struct ixgbe_hw  *hw = &adapter->hw;
   4849 	int              speed;
   4850 	ixgbe_link_speed link_caps = 0;
   4851 	s32              err;
   4852 	bool             negotiate = FALSE;
   4853 
   4854 	/*
   4855 	 * Advertised speed means nothing unless it's copper or
   4856 	 * multi-speed fiber
   4857 	 */
   4858 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
   4859 	    !(hw->phy.multispeed_fiber))
   4860 		return (0);
   4861 
   4862 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
   4863 	if (err != IXGBE_SUCCESS)
   4864 		return (0);
   4865 
   4866 	speed =
   4867 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
   4868 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
   4869 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
   4870 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
   4871 
   4872 	return speed;
   4873 } /* ixgbe_get_advertise */
   4874 
   4875 /************************************************************************
   4876  * ixgbe_sysctl_dmac - Manage DMA Coalescing
   4877  *
   4878  *   Control values:
   4879  *     0/1 - off / on (use default value of 1000)
   4880  *
   4881  *     Legal timer values are:
   4882  *     50,100,250,500,1000,2000,5000,10000
   4883  *
   4884  *     Turning off interrupt moderation will also turn this off.
   4885  ************************************************************************/
   4886 static int
   4887 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
   4888 {
   4889 	struct sysctlnode node = *rnode;
   4890 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4891 	struct ifnet   *ifp = adapter->ifp;
   4892 	int            error;
   4893 	int            newval;
   4894 
   4895 	newval = adapter->dmac;
   4896 	node.sysctl_data = &newval;
   4897 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4898 	if ((error) || (newp == NULL))
   4899 		return (error);
   4900 
   4901 	switch (newval) {
   4902 	case 0:
   4903 		/* Disabled */
   4904 		adapter->dmac = 0;
   4905 		break;
   4906 	case 1:
   4907 		/* Enable and use default */
   4908 		adapter->dmac = 1000;
   4909 		break;
   4910 	case 50:
   4911 	case 100:
   4912 	case 250:
   4913 	case 500:
   4914 	case 1000:
   4915 	case 2000:
   4916 	case 5000:
   4917 	case 10000:
   4918 		/* Legal values - allow */
   4919 		adapter->dmac = newval;
   4920 		break;
   4921 	default:
   4922 		/* Do nothing, illegal value */
   4923 		return (EINVAL);
   4924 	}
   4925 
   4926 	/* Re-initialize hardware if it's already running */
   4927 	if (ifp->if_flags & IFF_RUNNING)
   4928 		ixgbe_init(ifp);
   4929 
   4930 	return (0);
   4931 }
   4932 
   4933 #ifdef IXGBE_DEBUG
   4934 /************************************************************************
   4935  * ixgbe_sysctl_power_state
   4936  *
   4937  *   Sysctl to test power states
   4938  *   Values:
   4939  *     0      - set device to D0
   4940  *     3      - set device to D3
   4941  *     (none) - get current device power state
   4942  ************************************************************************/
   4943 static int
   4944 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
   4945 {
   4946 #ifdef notyet
   4947 	struct sysctlnode node = *rnode;
   4948 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4949 	device_t       dev =  adapter->dev;
   4950 	int            curr_ps, new_ps, error = 0;
   4951 
   4952 	curr_ps = new_ps = pci_get_powerstate(dev);
   4953 
   4954 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4955 	if ((error) || (req->newp == NULL))
   4956 		return (error);
   4957 
   4958 	if (new_ps == curr_ps)
   4959 		return (0);
   4960 
   4961 	if (new_ps == 3 && curr_ps == 0)
   4962 		error = DEVICE_SUSPEND(dev);
   4963 	else if (new_ps == 0 && curr_ps == 3)
   4964 		error = DEVICE_RESUME(dev);
   4965 	else
   4966 		return (EINVAL);
   4967 
   4968 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
   4969 
   4970 	return (error);
   4971 #else
   4972 	return 0;
   4973 #endif
   4974 } /* ixgbe_sysctl_power_state */
   4975 #endif
   4976 
   4977 /************************************************************************
   4978  * ixgbe_sysctl_wol_enable
   4979  *
   4980  *   Sysctl to enable/disable the WoL capability,
   4981  *   if supported by the adapter.
   4982  *
   4983  *   Values:
   4984  *     0 - disabled
   4985  *     1 - enabled
   4986  ************************************************************************/
   4987 static int
   4988 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
   4989 {
   4990 	struct sysctlnode node = *rnode;
   4991 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   4992 	struct ixgbe_hw *hw = &adapter->hw;
   4993 	bool            new_wol_enabled;
   4994 	int             error = 0;
   4995 
   4996 	new_wol_enabled = hw->wol_enabled;
   4997 	node.sysctl_data = &new_wol_enabled;
   4998 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4999 	if ((error) || (newp == NULL))
   5000 		return (error);
   5001 	if (new_wol_enabled == hw->wol_enabled)
   5002 		return (0);
   5003 
   5004 	if (new_wol_enabled && !adapter->wol_support)
   5005 		return (ENODEV);
   5006 	else
   5007 		hw->wol_enabled = new_wol_enabled;
   5008 
   5009 	return (0);
   5010 } /* ixgbe_sysctl_wol_enable */
   5011 
   5012 /************************************************************************
   5013  * ixgbe_sysctl_wufc - Wake Up Filter Control
   5014  *
   5015  *   Sysctl to enable/disable the types of packets that the
   5016  *   adapter will wake up on upon receipt.
   5017  *   Flags:
   5018  *     0x1  - Link Status Change
   5019  *     0x2  - Magic Packet
   5020  *     0x4  - Direct Exact
   5021  *     0x8  - Directed Multicast
   5022  *     0x10 - Broadcast
   5023  *     0x20 - ARP/IPv4 Request Packet
   5024  *     0x40 - Direct IPv4 Packet
   5025  *     0x80 - Direct IPv6 Packet
   5026  *
   5027  *   Settings not listed above will cause the sysctl to return an error.
   5028  ************************************************************************/
   5029 static int
   5030 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
   5031 {
   5032 	struct sysctlnode node = *rnode;
   5033 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5034 	int error = 0;
   5035 	u32 new_wufc;
   5036 
   5037 	new_wufc = adapter->wufc;
   5038 	node.sysctl_data = &new_wufc;
   5039 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5040 	if ((error) || (newp == NULL))
   5041 		return (error);
   5042 	if (new_wufc == adapter->wufc)
   5043 		return (0);
   5044 
   5045 	if (new_wufc & 0xffffff00)
   5046 		return (EINVAL);
   5047 
   5048 	new_wufc &= 0xff;
   5049 	new_wufc |= (0xffffff & adapter->wufc);
   5050 	adapter->wufc = new_wufc;
   5051 
   5052 	return (0);
   5053 } /* ixgbe_sysctl_wufc */
   5054 
   5055 #ifdef IXGBE_DEBUG
   5056 /************************************************************************
   5057  * ixgbe_sysctl_print_rss_config
   5058  ************************************************************************/
   5059 static int
   5060 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
   5061 {
   5062 #ifdef notyet
   5063 	struct sysctlnode node = *rnode;
   5064 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5065 	struct ixgbe_hw *hw = &adapter->hw;
   5066 	device_t        dev = adapter->dev;
   5067 	struct sbuf     *buf;
   5068 	int             error = 0, reta_size;
   5069 	u32             reg;
   5070 
   5071 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
   5072 	if (!buf) {
   5073 		device_printf(dev, "Could not allocate sbuf for output.\n");
   5074 		return (ENOMEM);
   5075 	}
   5076 
   5077 	// TODO: use sbufs to make a string to print out
   5078 	/* Set multiplier for RETA setup and table size based on MAC */
   5079 	switch (adapter->hw.mac.type) {
   5080 	case ixgbe_mac_X550:
   5081 	case ixgbe_mac_X550EM_x:
   5082 	case ixgbe_mac_X550EM_a:
   5083 		reta_size = 128;
   5084 		break;
   5085 	default:
   5086 		reta_size = 32;
   5087 		break;
   5088 	}
   5089 
   5090 	/* Print out the redirection table */
   5091 	sbuf_cat(buf, "\n");
   5092 	for (int i = 0; i < reta_size; i++) {
   5093 		if (i < 32) {
   5094 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
   5095 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
   5096 		} else {
   5097 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
   5098 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
   5099 		}
   5100 	}
   5101 
   5102 	// TODO: print more config
   5103 
   5104 	error = sbuf_finish(buf);
   5105 	if (error)
   5106 		device_printf(dev, "Error finishing sbuf: %d\n", error);
   5107 
   5108 	sbuf_delete(buf);
   5109 #endif
   5110 	return (0);
   5111 } /* ixgbe_sysctl_print_rss_config */
   5112 #endif /* IXGBE_DEBUG */
   5113 
   5114 /************************************************************************
   5115  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
   5116  *
   5117  *   For X552/X557-AT devices using an external PHY
   5118  ************************************************************************/
   5119 static int
   5120 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
   5121 {
   5122 	struct sysctlnode node = *rnode;
   5123 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5124 	struct ixgbe_hw *hw = &adapter->hw;
   5125 	int val;
   5126 	u16 reg;
   5127 	int		error;
   5128 
   5129 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5130 		device_printf(adapter->dev,
   5131 		    "Device has no supported external thermal sensor.\n");
   5132 		return (ENODEV);
   5133 	}
   5134 
   5135 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
   5136 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5137 		device_printf(adapter->dev,
   5138 		    "Error reading from PHY's current temperature register\n");
   5139 		return (EAGAIN);
   5140 	}
   5141 
   5142 	node.sysctl_data = &val;
   5143 
   5144 	/* Shift temp for output */
   5145 	val = reg >> 8;
   5146 
   5147 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5148 	if ((error) || (newp == NULL))
   5149 		return (error);
   5150 
   5151 	return (0);
   5152 } /* ixgbe_sysctl_phy_temp */
   5153 
   5154 /************************************************************************
   5155  * ixgbe_sysctl_phy_overtemp_occurred
   5156  *
   5157  *   Reports (directly from the PHY) whether the current PHY
   5158  *   temperature is over the overtemp threshold.
   5159  ************************************************************************/
   5160 static int
   5161 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
   5162 {
   5163 	struct sysctlnode node = *rnode;
   5164 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5165 	struct ixgbe_hw *hw = &adapter->hw;
   5166 	int val, error;
   5167 	u16 reg;
   5168 
   5169 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5170 		device_printf(adapter->dev,
   5171 		    "Device has no supported external thermal sensor.\n");
   5172 		return (ENODEV);
   5173 	}
   5174 
   5175 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
   5176 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5177 		device_printf(adapter->dev,
   5178 		    "Error reading from PHY's temperature status register\n");
   5179 		return (EAGAIN);
   5180 	}
   5181 
   5182 	node.sysctl_data = &val;
   5183 
   5184 	/* Get occurrence bit */
   5185 	val = !!(reg & 0x4000);
   5186 
   5187 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5188 	if ((error) || (newp == NULL))
   5189 		return (error);
   5190 
   5191 	return (0);
   5192 } /* ixgbe_sysctl_phy_overtemp_occurred */
   5193 
   5194 /************************************************************************
   5195  * ixgbe_sysctl_eee_state
   5196  *
   5197  *   Sysctl to set EEE power saving feature
   5198  *   Values:
   5199  *     0      - disable EEE
   5200  *     1      - enable EEE
   5201  *     (none) - get current device EEE state
   5202  ************************************************************************/
   5203 static int
   5204 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
   5205 {
   5206 	struct sysctlnode node = *rnode;
   5207 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5208 	struct ifnet   *ifp = adapter->ifp;
   5209 	device_t       dev = adapter->dev;
   5210 	int            curr_eee, new_eee, error = 0;
   5211 	s32            retval;
   5212 
   5213 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
   5214 	node.sysctl_data = &new_eee;
   5215 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5216 	if ((error) || (newp == NULL))
   5217 		return (error);
   5218 
   5219 	/* Nothing to do */
   5220 	if (new_eee == curr_eee)
   5221 		return (0);
   5222 
   5223 	/* Not supported */
   5224 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
   5225 		return (EINVAL);
   5226 
   5227 	/* Bounds checking */
   5228 	if ((new_eee < 0) || (new_eee > 1))
   5229 		return (EINVAL);
   5230 
   5231 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
   5232 	if (retval) {
   5233 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
   5234 		return (EINVAL);
   5235 	}
   5236 
   5237 	/* Restart auto-neg */
   5238 	ixgbe_init(ifp);
   5239 
   5240 	device_printf(dev, "New EEE state: %d\n", new_eee);
   5241 
   5242 	/* Cache new value */
   5243 	if (new_eee)
   5244 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5245 	else
   5246 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
   5247 
   5248 	return (error);
   5249 } /* ixgbe_sysctl_eee_state */
   5250 
   5251 /************************************************************************
   5252  * ixgbe_init_device_features
   5253  ************************************************************************/
   5254 static void
   5255 ixgbe_init_device_features(struct adapter *adapter)
   5256 {
   5257 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   5258 	                  | IXGBE_FEATURE_RSS
   5259 	                  | IXGBE_FEATURE_MSI
   5260 	                  | IXGBE_FEATURE_MSIX
   5261 	                  | IXGBE_FEATURE_LEGACY_IRQ
   5262 	                  | IXGBE_FEATURE_LEGACY_TX;
   5263 
   5264 	/* Set capabilities first... */
   5265 	switch (adapter->hw.mac.type) {
   5266 	case ixgbe_mac_82598EB:
   5267 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
   5268 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
   5269 		break;
   5270 	case ixgbe_mac_X540:
   5271 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5272 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5273 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
   5274 		    (adapter->hw.bus.func == 0))
   5275 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5276 		break;
   5277 	case ixgbe_mac_X550:
   5278 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5279 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5280 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5281 		break;
   5282 	case ixgbe_mac_X550EM_x:
   5283 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5284 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5285 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
   5286 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5287 		break;
   5288 	case ixgbe_mac_X550EM_a:
   5289 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5290 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5291 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5292 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
   5293 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
   5294 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5295 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5296 		}
   5297 		break;
   5298 	case ixgbe_mac_82599EB:
   5299 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5300 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5301 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
   5302 		    (adapter->hw.bus.func == 0))
   5303 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5304 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
   5305 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5306 		break;
   5307 	default:
   5308 		break;
   5309 	}
   5310 
   5311 	/* Enabled by default... */
   5312 	/* Fan failure detection */
   5313 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
   5314 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
   5315 	/* Netmap */
   5316 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   5317 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   5318 	/* EEE */
   5319 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   5320 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5321 	/* Thermal Sensor */
   5322 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
   5323 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
   5324 
   5325 	/* Enabled via global sysctl... */
   5326 	/* Flow Director */
   5327 	if (ixgbe_enable_fdir) {
   5328 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
   5329 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
   5330 		else
   5331 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
   5332 	}
   5333 	/* Legacy (single queue) transmit */
   5334 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   5335 	    ixgbe_enable_legacy_tx)
   5336 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   5337 	/*
   5338 	 * Message Signal Interrupts - Extended (MSI-X)
   5339 	 * Normal MSI is only enabled if MSI-X calls fail.
   5340 	 */
   5341 	if (!ixgbe_enable_msix)
   5342 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
   5343 	/* Receive-Side Scaling (RSS) */
   5344 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
   5345 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   5346 
   5347 	/* Disable features with unmet dependencies... */
   5348 	/* No MSI-X */
   5349 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
   5350 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   5351 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   5352 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
   5353 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
   5354 	}
   5355 } /* ixgbe_init_device_features */
   5356 
   5357 /************************************************************************
   5358  * ixgbe_probe - Device identification routine
   5359  *
   5360  *   Determines if the driver should be loaded on
   5361  *   adapter based on its PCI vendor/device ID.
   5362  *
   5363  *   return BUS_PROBE_DEFAULT on success, positive on failure
   5364  ************************************************************************/
   5365 static int
   5366 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
   5367 {
   5368 	const struct pci_attach_args *pa = aux;
   5369 
   5370 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
   5371 }
   5372 
   5373 static ixgbe_vendor_info_t *
   5374 ixgbe_lookup(const struct pci_attach_args *pa)
   5375 {
   5376 	ixgbe_vendor_info_t *ent;
   5377 	pcireg_t subid;
   5378 
   5379 	INIT_DEBUGOUT("ixgbe_lookup: begin");
   5380 
   5381 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
   5382 		return NULL;
   5383 
   5384 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
   5385 
   5386 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
   5387 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
   5388 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
   5389 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
   5390 			(ent->subvendor_id == 0)) &&
   5391 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
   5392 			(ent->subdevice_id == 0))) {
   5393 			++ixgbe_total_ports;
   5394 			return ent;
   5395 		}
   5396 	}
   5397 	return NULL;
   5398 }
   5399 
   5400 static int
   5401 ixgbe_ifflags_cb(struct ethercom *ec)
   5402 {
   5403 	struct ifnet *ifp = &ec->ec_if;
   5404 	struct adapter *adapter = ifp->if_softc;
   5405 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   5406 
   5407 	IXGBE_CORE_LOCK(adapter);
   5408 
   5409 	if (change != 0)
   5410 		adapter->if_flags = ifp->if_flags;
   5411 
   5412 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   5413 		rc = ENETRESET;
   5414 	else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   5415 		ixgbe_set_promisc(adapter);
   5416 
   5417 	/* Set up VLAN support and filter */
   5418 	ixgbe_setup_vlan_hw_support(adapter);
   5419 
   5420 	IXGBE_CORE_UNLOCK(adapter);
   5421 
   5422 	return rc;
   5423 }
   5424 
   5425 /************************************************************************
   5426  * ixgbe_ioctl - Ioctl entry point
   5427  *
   5428  *   Called when the user wants to configure the interface.
   5429  *
   5430  *   return 0 on success, positive on failure
   5431  ************************************************************************/
   5432 static int
   5433 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   5434 {
   5435 	struct adapter	*adapter = ifp->if_softc;
   5436 	struct ixgbe_hw *hw = &adapter->hw;
   5437 	struct ifcapreq *ifcr = data;
   5438 	struct ifreq	*ifr = data;
   5439 	int             error = 0;
   5440 	int l4csum_en;
   5441 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   5442 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   5443 
   5444 	switch (command) {
   5445 	case SIOCSIFFLAGS:
   5446 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   5447 		break;
   5448 	case SIOCADDMULTI:
   5449 	case SIOCDELMULTI:
   5450 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   5451 		break;
   5452 	case SIOCSIFMEDIA:
   5453 	case SIOCGIFMEDIA:
   5454 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   5455 		break;
   5456 	case SIOCSIFCAP:
   5457 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   5458 		break;
   5459 	case SIOCSIFMTU:
   5460 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   5461 		break;
   5462 #ifdef __NetBSD__
   5463 	case SIOCINITIFADDR:
   5464 		IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
   5465 		break;
   5466 	case SIOCGIFFLAGS:
   5467 		IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
   5468 		break;
   5469 	case SIOCGIFAFLAG_IN:
   5470 		IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
   5471 		break;
   5472 	case SIOCGIFADDR:
   5473 		IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
   5474 		break;
   5475 	case SIOCGIFMTU:
   5476 		IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
   5477 		break;
   5478 	case SIOCGIFCAP:
   5479 		IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
   5480 		break;
   5481 	case SIOCGETHERCAP:
   5482 		IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
   5483 		break;
   5484 	case SIOCGLIFADDR:
   5485 		IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
   5486 		break;
   5487 	case SIOCZIFDATA:
   5488 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   5489 		hw->mac.ops.clear_hw_cntrs(hw);
   5490 		ixgbe_clear_evcnt(adapter);
   5491 		break;
   5492 	case SIOCAIFADDR:
   5493 		IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
   5494 		break;
   5495 #endif
   5496 	default:
   5497 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   5498 		break;
   5499 	}
   5500 
   5501 	switch (command) {
   5502 	case SIOCSIFMEDIA:
   5503 	case SIOCGIFMEDIA:
   5504 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   5505 	case SIOCGI2C:
   5506 	{
   5507 		struct ixgbe_i2c_req	i2c;
   5508 
   5509 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
   5510 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
   5511 		if (error != 0)
   5512 			break;
   5513 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
   5514 			error = EINVAL;
   5515 			break;
   5516 		}
   5517 		if (i2c.len > sizeof(i2c.data)) {
   5518 			error = EINVAL;
   5519 			break;
   5520 		}
   5521 
   5522 		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
   5523 		    i2c.dev_addr, i2c.data);
   5524 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
   5525 		break;
   5526 	}
   5527 	case SIOCSIFCAP:
   5528 		/* Layer-4 Rx checksum offload has to be turned on and
   5529 		 * off as a unit.
   5530 		 */
   5531 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   5532 		if (l4csum_en != l4csum && l4csum_en != 0)
   5533 			return EINVAL;
   5534 		/*FALLTHROUGH*/
   5535 	case SIOCADDMULTI:
   5536 	case SIOCDELMULTI:
   5537 	case SIOCSIFFLAGS:
   5538 	case SIOCSIFMTU:
   5539 	default:
   5540 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   5541 			return error;
   5542 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   5543 			;
   5544 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   5545 			IXGBE_CORE_LOCK(adapter);
   5546 			ixgbe_init_locked(adapter);
   5547 			ixgbe_recalculate_max_frame(adapter);
   5548 			IXGBE_CORE_UNLOCK(adapter);
   5549 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   5550 			/*
   5551 			 * Multicast list has changed; set the hardware filter
   5552 			 * accordingly.
   5553 			 */
   5554 			IXGBE_CORE_LOCK(adapter);
   5555 			ixgbe_disable_intr(adapter);
   5556 			ixgbe_set_multi(adapter);
   5557 			ixgbe_enable_intr(adapter);
   5558 			IXGBE_CORE_UNLOCK(adapter);
   5559 		}
   5560 		return 0;
   5561 	}
   5562 
   5563 	return error;
   5564 } /* ixgbe_ioctl */
   5565 
   5566 /************************************************************************
   5567  * ixgbe_check_fan_failure
   5568  ************************************************************************/
   5569 static void
   5570 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
   5571 {
   5572 	u32 mask;
   5573 
   5574 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
   5575 	    IXGBE_ESDP_SDP1;
   5576 
   5577 	if (reg & mask)
   5578 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
   5579 } /* ixgbe_check_fan_failure */
   5580 
   5581 /************************************************************************
   5582  * ixgbe_handle_que
   5583  ************************************************************************/
   5584 static void
   5585 ixgbe_handle_que(void *context)
   5586 {
   5587 	struct ix_queue *que = context;
   5588 	struct adapter  *adapter = que->adapter;
   5589 	struct tx_ring  *txr = que->txr;
   5590 	struct ifnet    *ifp = adapter->ifp;
   5591 
   5592 	adapter->handleq.ev_count++;
   5593 
   5594 	if (ifp->if_flags & IFF_RUNNING) {
   5595 		ixgbe_rxeof(que);
   5596 		IXGBE_TX_LOCK(txr);
   5597 		ixgbe_txeof(txr);
   5598 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5599 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   5600 				ixgbe_mq_start_locked(ifp, txr);
   5601 		/* Only for queue 0 */
   5602 		/* NetBSD still needs this for CBQ */
   5603 		if ((&adapter->queues[0] == que)
   5604 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   5605 			ixgbe_legacy_start_locked(ifp, txr);
   5606 		IXGBE_TX_UNLOCK(txr);
   5607 	}
   5608 
   5609 	/* Re-enable this interrupt */
   5610 	if (que->res != NULL)
   5611 		ixgbe_enable_queue(adapter, que->msix);
   5612 	else
   5613 		ixgbe_enable_intr(adapter);
   5614 
   5615 	return;
   5616 } /* ixgbe_handle_que */
   5617 
   5618 /************************************************************************
   5619  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
   5620  ************************************************************************/
   5621 static int
   5622 ixgbe_allocate_legacy(struct adapter *adapter,
   5623     const struct pci_attach_args *pa)
   5624 {
   5625 	device_t	dev = adapter->dev;
   5626 	struct ix_queue *que = adapter->queues;
   5627 	struct tx_ring  *txr = adapter->tx_rings;
   5628 	int		counts[PCI_INTR_TYPE_SIZE];
   5629 	pci_intr_type_t intr_type, max_type;
   5630 	char            intrbuf[PCI_INTRSTR_LEN];
   5631 	const char	*intrstr = NULL;
   5632 
   5633 	/* We allocate a single interrupt resource */
   5634 	max_type = PCI_INTR_TYPE_MSI;
   5635 	counts[PCI_INTR_TYPE_MSIX] = 0;
   5636 	counts[PCI_INTR_TYPE_MSI] =
   5637 	    (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
   5638 	counts[PCI_INTR_TYPE_INTX] =
   5639 	    (adapter->feat_en & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
   5640 
   5641 alloc_retry:
   5642 	if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
   5643 		aprint_error_dev(dev, "couldn't alloc interrupt\n");
   5644 		return ENXIO;
   5645 	}
   5646 	adapter->osdep.nintrs = 1;
   5647 	intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
   5648 	    intrbuf, sizeof(intrbuf));
   5649 	adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
   5650 	    adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
   5651 	    device_xname(dev));
   5652 	if (adapter->osdep.ihs[0] == NULL) {
   5653 		intr_type = pci_intr_type(adapter->osdep.pc,
   5654 		    adapter->osdep.intrs[0]);
   5655 		aprint_error_dev(dev,"unable to establish %s\n",
   5656 		    (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5657 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5658 		switch (intr_type) {
   5659 		case PCI_INTR_TYPE_MSI:
   5660 			/* The next try is for INTx: Disable MSI */
   5661 			max_type = PCI_INTR_TYPE_INTX;
   5662 			counts[PCI_INTR_TYPE_INTX] = 1;
   5663 			goto alloc_retry;
   5664 		case PCI_INTR_TYPE_INTX:
   5665 		default:
   5666 			/* See below */
   5667 			break;
   5668 		}
   5669 	}
   5670 	if (adapter->osdep.ihs[0] == NULL) {
   5671 		aprint_error_dev(dev,
   5672 		    "couldn't establish interrupt%s%s\n",
   5673 		    intrstr ? " at " : "", intrstr ? intrstr : "");
   5674 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5675 		return ENXIO;
   5676 	}
   5677 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   5678 	/*
   5679 	 * Try allocating a fast interrupt and the associated deferred
   5680 	 * processing contexts.
   5681 	 */
   5682 	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5683 		txr->txr_si =
   5684 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5685 			ixgbe_deferred_mq_start, txr);
   5686 	que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5687 	    ixgbe_handle_que, que);
   5688 
   5689 	/* Tasklets for Link, SFP and Multispeed Fiber */
   5690 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   5691 	    ixgbe_handle_link, adapter);
   5692 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5693 	    ixgbe_handle_mod, adapter);
   5694 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5695 	    ixgbe_handle_msf, adapter);
   5696 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5697 	    ixgbe_handle_phy, adapter);
   5698 
   5699 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   5700 		adapter->fdir_si =
   5701 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5702 			ixgbe_reinit_fdir, adapter);
   5703 
   5704 	if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) &
   5705 		(txr->txr_si == NULL)) ||
   5706 	    que->que_si == NULL ||
   5707 	    adapter->link_si == NULL ||
   5708 	    adapter->mod_si == NULL ||
   5709 	    ((adapter->feat_en & IXGBE_FEATURE_FDIR) &
   5710 		(adapter->fdir_si == NULL)) ||
   5711 	    adapter->msf_si == NULL) {
   5712 		aprint_error_dev(dev,
   5713 		    "could not establish software interrupts\n");
   5714 
   5715 		return ENXIO;
   5716 	}
   5717 	/* For simplicity in the handlers */
   5718 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
   5719 
   5720 	return (0);
   5721 } /* ixgbe_allocate_legacy */
   5722 
   5723 
   5724 /************************************************************************
   5725  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
   5726  ************************************************************************/
   5727 static int
   5728 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   5729 {
   5730 	device_t        dev = adapter->dev;
   5731 	struct 		ix_queue *que = adapter->queues;
   5732 	struct  	tx_ring *txr = adapter->tx_rings;
   5733 	pci_chipset_tag_t pc;
   5734 	char		intrbuf[PCI_INTRSTR_LEN];
   5735 	char		intr_xname[32];
   5736 	const char	*intrstr = NULL;
   5737 	int 		error, vector = 0;
   5738 	int		cpu_id = 0;
   5739 	kcpuset_t	*affinity;
   5740 #ifdef RSS
   5741 	unsigned int    rss_buckets = 0;
   5742 	kcpuset_t	cpu_mask;
   5743 #endif
   5744 
   5745 	pc = adapter->osdep.pc;
   5746 #ifdef	RSS
   5747 	/*
   5748 	 * If we're doing RSS, the number of queues needs to
   5749 	 * match the number of RSS buckets that are configured.
   5750 	 *
   5751 	 * + If there's more queues than RSS buckets, we'll end
   5752 	 *   up with queues that get no traffic.
   5753 	 *
   5754 	 * + If there's more RSS buckets than queues, we'll end
   5755 	 *   up having multiple RSS buckets map to the same queue,
   5756 	 *   so there'll be some contention.
   5757 	 */
   5758 	rss_buckets = rss_getnumbuckets();
   5759 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
   5760 	    (adapter->num_queues != rss_buckets)) {
   5761 		device_printf(dev,
   5762 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
   5763 		    "; performance will be impacted.\n",
   5764 		    __func__, adapter->num_queues, rss_buckets);
   5765 	}
   5766 #endif
   5767 
   5768 	adapter->osdep.nintrs = adapter->num_queues + 1;
   5769 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   5770 	    adapter->osdep.nintrs) != 0) {
   5771 		aprint_error_dev(dev,
   5772 		    "failed to allocate MSI-X interrupt\n");
   5773 		return (ENXIO);
   5774 	}
   5775 
   5776 	kcpuset_create(&affinity, false);
   5777 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   5778 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   5779 		    device_xname(dev), i);
   5780 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   5781 		    sizeof(intrbuf));
   5782 #ifdef IXGBE_MPSAFE
   5783 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   5784 		    true);
   5785 #endif
   5786 		/* Set the handler function */
   5787 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   5788 		    adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
   5789 		    intr_xname);
   5790 		if (que->res == NULL) {
   5791 			pci_intr_release(pc, adapter->osdep.intrs,
   5792 			    adapter->osdep.nintrs);
   5793 			aprint_error_dev(dev,
   5794 			    "Failed to register QUE handler\n");
   5795 			kcpuset_destroy(affinity);
   5796 			return ENXIO;
   5797 		}
   5798 		que->msix = vector;
   5799 		adapter->active_queues |= (u64)(1 << que->msix);
   5800 
   5801 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   5802 #ifdef	RSS
   5803 			/*
   5804 			 * The queue ID is used as the RSS layer bucket ID.
   5805 			 * We look up the queue ID -> RSS CPU ID and select
   5806 			 * that.
   5807 			 */
   5808 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
   5809 			CPU_SETOF(cpu_id, &cpu_mask);
   5810 #endif
   5811 		} else {
   5812 			/*
   5813 			 * Bind the MSI-X vector, and thus the
   5814 			 * rings to the corresponding CPU.
   5815 			 *
   5816 			 * This just happens to match the default RSS
   5817 			 * round-robin bucket -> queue -> CPU allocation.
   5818 			 */
   5819 			if (adapter->num_queues > 1)
   5820 				cpu_id = i;
   5821 		}
   5822 		/* Round-robin affinity */
   5823 		kcpuset_zero(affinity);
   5824 		kcpuset_set(affinity, cpu_id % ncpu);
   5825 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   5826 		    NULL);
   5827 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   5828 		    intrstr);
   5829 		if (error == 0) {
   5830 #if 1 /* def IXGBE_DEBUG */
   5831 #ifdef	RSS
   5832 			aprintf_normal(", bound RSS bucket %d to CPU %d", i,
   5833 			    cpu_id % ncpu);
   5834 #else
   5835 			aprint_normal(", bound queue %d to cpu %d", i,
   5836 			    cpu_id % ncpu);
   5837 #endif
   5838 #endif /* IXGBE_DEBUG */
   5839 		}
   5840 		aprint_normal("\n");
   5841 
   5842 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5843 			txr->txr_si = softint_establish(
   5844 				SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5845 				ixgbe_deferred_mq_start, txr);
   5846 		que->que_si
   5847 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5848 			ixgbe_handle_que, que);
   5849 		if (que->que_si == NULL) {
   5850 			aprint_error_dev(dev,
   5851 			    "could not establish software interrupt\n");
   5852 		}
   5853 	}
   5854 
   5855 	/* and Link */
   5856 	cpu_id++;
   5857 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   5858 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   5859 	    sizeof(intrbuf));
   5860 #ifdef IXGBE_MPSAFE
   5861 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   5862 	    true);
   5863 #endif
   5864 	/* Set the link handler function */
   5865 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   5866 	    adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
   5867 	    intr_xname);
   5868 	if (adapter->osdep.ihs[vector] == NULL) {
   5869 		adapter->res = NULL;
   5870 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   5871 		kcpuset_destroy(affinity);
   5872 		return (ENXIO);
   5873 	}
   5874 	/* Round-robin affinity */
   5875 	kcpuset_zero(affinity);
   5876 	kcpuset_set(affinity, cpu_id % ncpu);
   5877 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   5878 
   5879 	aprint_normal_dev(dev,
   5880 	    "for link, interrupting at %s", intrstr);
   5881 	if (error == 0)
   5882 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   5883 	else
   5884 		aprint_normal("\n");
   5885 
   5886 	adapter->vector = vector;
   5887 	/* Tasklets for Link, SFP and Multispeed Fiber */
   5888 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   5889 	    ixgbe_handle_link, adapter);
   5890 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5891 	    ixgbe_handle_mod, adapter);
   5892 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5893 	    ixgbe_handle_msf, adapter);
   5894 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   5895 		adapter->mbx_si =
   5896 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5897 			ixgbe_handle_mbx, adapter);
   5898 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5899 		ixgbe_handle_phy, adapter);
   5900 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   5901 		adapter->fdir_si =
   5902 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5903 			ixgbe_reinit_fdir, adapter);
   5904 
   5905 	kcpuset_destroy(affinity);
   5906 
   5907 	return (0);
   5908 } /* ixgbe_allocate_msix */
   5909 
   5910 /************************************************************************
   5911  * ixgbe_configure_interrupts
   5912  *
   5913  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
   5914  *   This will also depend on user settings.
   5915  ************************************************************************/
   5916 static int
   5917 ixgbe_configure_interrupts(struct adapter *adapter)
   5918 {
   5919 	device_t dev = adapter->dev;
   5920 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
   5921 	int want, queues, msgs;
   5922 
   5923 	/* Default to 1 queue if MSI-X setup fails */
   5924 	adapter->num_queues = 1;
   5925 
   5926 	/* Override by tuneable */
   5927 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
   5928 		goto msi;
   5929 
   5930 	/* First try MSI-X */
   5931 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   5932 	msgs = MIN(msgs, IXG_MAX_NINTR);
   5933 	if (msgs < 2)
   5934 		goto msi;
   5935 
   5936 	adapter->msix_mem = (void *)1; /* XXX */
   5937 
   5938 	/* Figure out a reasonable auto config value */
   5939 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   5940 
   5941 #ifdef	RSS
   5942 	/* If we're doing RSS, clamp at the number of RSS buckets */
   5943 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   5944 		queues = min(queues, rss_getnumbuckets());
   5945 #endif
   5946 	if (ixgbe_num_queues > queues) {
   5947 		aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
   5948 		ixgbe_num_queues = queues;
   5949 	}
   5950 
   5951 	if (ixgbe_num_queues != 0)
   5952 		queues = ixgbe_num_queues;
   5953 	/* Set max queues to 8 when autoconfiguring */
   5954 	else
   5955 		queues = min(queues,
   5956 		    min(mac->max_tx_queues, mac->max_rx_queues));
   5957 
   5958 	/* reflect correct sysctl value */
   5959 	ixgbe_num_queues = queues;
   5960 
   5961 	/*
   5962 	 * Want one vector (RX/TX pair) per queue
   5963 	 * plus an additional for Link.
   5964 	 */
   5965 	want = queues + 1;
   5966 	if (msgs >= want)
   5967 		msgs = want;
   5968 	else {
   5969                	aprint_error_dev(dev, "MSI-X Configuration Problem, "
   5970 		    "%d vectors but %d queues wanted!\n",
   5971 		    msgs, want);
   5972 		goto msi;
   5973 	}
   5974 	device_printf(dev,
   5975 	    "Using MSI-X interrupts with %d vectors\n", msgs);
   5976 	adapter->num_queues = queues;
   5977 	adapter->feat_en |= IXGBE_FEATURE_MSIX;
   5978 	return (0);
   5979 
   5980 	/*
   5981 	 * MSI-X allocation failed or provided us with
   5982 	 * less vectors than needed. Free MSI-X resources
   5983 	 * and we'll try enabling MSI.
   5984 	 */
   5985 msi:
   5986 	/* Without MSI-X, some features are no longer supported */
   5987 	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   5988 	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
   5989 	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   5990 	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
   5991 
   5992        	msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
   5993 	adapter->msix_mem = NULL; /* XXX */
   5994 	if (msgs > 1)
   5995 		msgs = 1;
   5996 	if (msgs != 0) {
   5997 		msgs = 1;
   5998 		adapter->feat_en |= IXGBE_FEATURE_MSI;
   5999 		aprint_normal_dev(dev, "Using an MSI interrupt\n");
   6000 		return (0);
   6001 	}
   6002 
   6003 	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
   6004 		aprint_error_dev(dev,
   6005 		    "Device does not support legacy interrupts.\n");
   6006 		return 1;
   6007 	}
   6008 
   6009 	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6010 	aprint_normal_dev(dev, "Using a Legacy interrupt\n");
   6011 
   6012 	return (0);
   6013 } /* ixgbe_configure_interrupts */
   6014 
   6015 
   6016 /************************************************************************
   6017  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
   6018  *
   6019  *   Done outside of interrupt context since the driver might sleep
   6020  ************************************************************************/
   6021 static void
   6022 ixgbe_handle_link(void *context)
   6023 {
   6024 	struct adapter  *adapter = context;
   6025 	struct ixgbe_hw *hw = &adapter->hw;
   6026 
   6027 	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
   6028 	ixgbe_update_link_status(adapter);
   6029 
   6030 	/* Re-enable link interrupts */
   6031 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
   6032 } /* ixgbe_handle_link */
   6033 
   6034 /************************************************************************
   6035  * ixgbe_rearm_queues
   6036  ************************************************************************/
   6037 static void
   6038 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   6039 {
   6040 	u32 mask;
   6041 
   6042 	switch (adapter->hw.mac.type) {
   6043 	case ixgbe_mac_82598EB:
   6044 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   6045 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   6046 		break;
   6047 	case ixgbe_mac_82599EB:
   6048 	case ixgbe_mac_X540:
   6049 	case ixgbe_mac_X550:
   6050 	case ixgbe_mac_X550EM_x:
   6051 	case ixgbe_mac_X550EM_a:
   6052 		mask = (queues & 0xFFFFFFFF);
   6053 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   6054 		mask = (queues >> 32);
   6055 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   6056 		break;
   6057 	default:
   6058 		break;
   6059 	}
   6060 } /* ixgbe_rearm_queues */
   6061