Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.116
      1 /* $NetBSD: ixgbe.c,v 1.116 2017/12/20 08:51:42 msaitoh Exp $ */
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 320916 2017-07-12 17:35:32Z sbruno $*/
     36 
     37 /*
     38  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39  * All rights reserved.
     40  *
     41  * This code is derived from software contributed to The NetBSD Foundation
     42  * by Coyote Point Systems, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63  * POSSIBILITY OF SUCH DAMAGE.
     64  */
     65 
     66 #ifdef _KERNEL_OPT
     67 #include "opt_inet.h"
     68 #include "opt_inet6.h"
     69 #include "opt_net_mpsafe.h"
     70 #endif
     71 
     72 #include "ixgbe.h"
     73 #include "vlan.h"
     74 
     75 #include <sys/cprng.h>
     76 #include <dev/mii/mii.h>
     77 #include <dev/mii/miivar.h>
     78 
     79 /************************************************************************
     80  * Driver version
     81  ************************************************************************/
     82 char ixgbe_driver_version[] = "3.2.12-k";
     83 
     84 
     85 /************************************************************************
     86  * PCI Device ID Table
     87  *
     88  *   Used by probe to select devices to load on
     89  *   Last field stores an index into ixgbe_strings
     90  *   Last entry must be all 0s
     91  *
     92  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     93  ************************************************************************/
     94 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     95 {
     96 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    111 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    112 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    113 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    114 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    115 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
    116 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    117 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
    118 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
    119 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
    120 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
    121 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
    122 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
    123 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
    124 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
    125 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
    126 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
    127 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
    128 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
    129 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
    130 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
    131 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
    132 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
    133 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
    134 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
    135 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
    136 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
    137 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
    138 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
    139 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
    140 	/* required last entry */
    141 	{0, 0, 0, 0, 0}
    142 };
    143 
    144 /************************************************************************
    145  * Table of branding strings
    146  ************************************************************************/
    147 static const char    *ixgbe_strings[] = {
    148 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    149 };
    150 
    151 /************************************************************************
    152  * Function prototypes
    153  ************************************************************************/
    154 static int      ixgbe_probe(device_t, cfdata_t, void *);
    155 static void     ixgbe_attach(device_t, device_t, void *);
    156 static int      ixgbe_detach(device_t, int);
    157 #if 0
    158 static int      ixgbe_shutdown(device_t);
    159 #endif
    160 static bool	ixgbe_suspend(device_t, const pmf_qual_t *);
    161 static bool	ixgbe_resume(device_t, const pmf_qual_t *);
    162 static int	ixgbe_ifflags_cb(struct ethercom *);
    163 static int      ixgbe_ioctl(struct ifnet *, u_long, void *);
    164 static void	ixgbe_ifstop(struct ifnet *, int);
    165 static int	ixgbe_init(struct ifnet *);
    166 static void	ixgbe_init_locked(struct adapter *);
    167 static void     ixgbe_stop(void *);
    168 static void     ixgbe_init_device_features(struct adapter *);
    169 static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
    170 static void	ixgbe_add_media_types(struct adapter *);
    171 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    172 static int      ixgbe_media_change(struct ifnet *);
    173 static int      ixgbe_allocate_pci_resources(struct adapter *,
    174 		    const struct pci_attach_args *);
    175 static void	ixgbe_get_slot_info(struct adapter *);
    176 static int      ixgbe_allocate_msix(struct adapter *,
    177 		    const struct pci_attach_args *);
    178 static int      ixgbe_allocate_legacy(struct adapter *,
    179 		    const struct pci_attach_args *);
    180 static int      ixgbe_configure_interrupts(struct adapter *);
    181 static void	ixgbe_free_pci_resources(struct adapter *);
    182 static void	ixgbe_local_timer(void *);
    183 static void	ixgbe_local_timer1(void *);
    184 static int	ixgbe_setup_interface(device_t, struct adapter *);
    185 static void	ixgbe_config_gpie(struct adapter *);
    186 static void	ixgbe_config_dmac(struct adapter *);
    187 static void	ixgbe_config_delay_values(struct adapter *);
    188 static void	ixgbe_config_link(struct adapter *);
    189 static void	ixgbe_check_wol_support(struct adapter *);
    190 static int	ixgbe_setup_low_power_mode(struct adapter *);
    191 static void	ixgbe_rearm_queues(struct adapter *, u64);
    192 
    193 static void     ixgbe_initialize_transmit_units(struct adapter *);
    194 static void     ixgbe_initialize_receive_units(struct adapter *);
    195 static void	ixgbe_enable_rx_drop(struct adapter *);
    196 static void	ixgbe_disable_rx_drop(struct adapter *);
    197 static void	ixgbe_initialize_rss_mapping(struct adapter *);
    198 
    199 static void     ixgbe_enable_intr(struct adapter *);
    200 static void     ixgbe_disable_intr(struct adapter *);
    201 static void     ixgbe_update_stats_counters(struct adapter *);
    202 static void     ixgbe_set_promisc(struct adapter *);
    203 static void     ixgbe_set_multi(struct adapter *);
    204 static void     ixgbe_update_link_status(struct adapter *);
    205 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    206 static void	ixgbe_configure_ivars(struct adapter *);
    207 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    208 
    209 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    210 #if 0
    211 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
    212 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
    213 #endif
    214 
    215 static void	ixgbe_add_device_sysctls(struct adapter *);
    216 static void     ixgbe_add_hw_stats(struct adapter *);
    217 static void	ixgbe_clear_evcnt(struct adapter *);
    218 static int	ixgbe_set_flowcntl(struct adapter *, int);
    219 static int	ixgbe_set_advertise(struct adapter *, int);
    220 static int      ixgbe_get_advertise(struct adapter *);
    221 
    222 /* Sysctl handlers */
    223 static void	ixgbe_set_sysctl_value(struct adapter *, const char *,
    224 		     const char *, int *, int);
    225 static int	ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
    226 static int	ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
    227 static int      ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    228 static int	ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
    229 static int	ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
    230 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
    231 #ifdef IXGBE_DEBUG
    232 static int	ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
    233 static int	ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
    234 #endif
    235 static int      ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
    236 static int      ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
    237 static int      ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
    238 static int      ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
    239 static int      ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
    240 static int	ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
    241 static int	ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
    242 
    243 /* Support for pluggable optic modules */
    244 static bool	ixgbe_sfp_probe(struct adapter *);
    245 
    246 /* Legacy (single vector) interrupt handler */
    247 static int	ixgbe_legacy_irq(void *);
    248 
    249 /* The MSI/MSI-X Interrupt handlers */
    250 static int	ixgbe_msix_que(void *);
    251 static int	ixgbe_msix_link(void *);
    252 
    253 /* Software interrupts for deferred work */
    254 static void	ixgbe_handle_que(void *);
    255 static void	ixgbe_handle_link(void *);
    256 static void	ixgbe_handle_msf(void *);
    257 static void	ixgbe_handle_mod(void *);
    258 static void	ixgbe_handle_phy(void *);
    259 
    260 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    261 
    262 /************************************************************************
    263  *  NetBSD Device Interface Entry Points
    264  ************************************************************************/
    265 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    266     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    267     DVF_DETACH_SHUTDOWN);
    268 
    269 #if 0
    270 devclass_t ix_devclass;
    271 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
    272 
    273 MODULE_DEPEND(ix, pci, 1, 1, 1);
    274 MODULE_DEPEND(ix, ether, 1, 1, 1);
    275 #ifdef DEV_NETMAP
    276 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    277 #endif
    278 #endif
    279 
    280 /*
    281  * TUNEABLE PARAMETERS:
    282  */
    283 
    284 /*
    285  * AIM: Adaptive Interrupt Moderation
    286  * which means that the interrupt rate
    287  * is varied over time based on the
    288  * traffic for that interrupt vector
    289  */
    290 static bool ixgbe_enable_aim = true;
    291 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
    292 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
    293     "Enable adaptive interrupt moderation");
    294 
    295 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    296 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
    297     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
    298 
    299 /* How many packets rxeof tries to clean at a time */
    300 static int ixgbe_rx_process_limit = 256;
    301 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
    302     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
    303 
    304 /* How many packets txeof tries to clean at a time */
    305 static int ixgbe_tx_process_limit = 256;
    306 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
    307     &ixgbe_tx_process_limit, 0,
    308     "Maximum number of sent packets to process at a time, -1 means unlimited");
    309 
    310 /* Flow control setting, default to full */
    311 static int ixgbe_flow_control = ixgbe_fc_full;
    312 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
    313     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
    314 
    315 /*
    316  * Smart speed setting, default to on
    317  * this only works as a compile option
    318  * right now as its during attach, set
    319  * this to 'ixgbe_smart_speed_off' to
    320  * disable.
    321  */
    322 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    323 
    324 /*
    325  * MSI-X should be the default for best performance,
    326  * but this allows it to be forced off for testing.
    327  */
    328 static int ixgbe_enable_msix = 1;
    329 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
    330     "Enable MSI-X interrupts");
    331 
    332 /*
    333  * Number of Queues, can be set to 0,
    334  * it then autoconfigures based on the
    335  * number of cpus with a max of 8. This
    336  * can be overriden manually here.
    337  */
    338 static int ixgbe_num_queues = 0;
    339 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
    340     "Number of queues to configure, 0 indicates autoconfigure");
    341 
    342 /*
    343  * Number of TX descriptors per ring,
    344  * setting higher than RX as this seems
    345  * the better performing choice.
    346  */
    347 static int ixgbe_txd = PERFORM_TXD;
    348 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
    349     "Number of transmit descriptors per queue");
    350 
    351 /* Number of RX descriptors per ring */
    352 static int ixgbe_rxd = PERFORM_RXD;
    353 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
    354     "Number of receive descriptors per queue");
    355 
    356 /*
    357  * Defining this on will allow the use
    358  * of unsupported SFP+ modules, note that
    359  * doing so you are on your own :)
    360  */
    361 static int allow_unsupported_sfp = false;
    362 #define TUNABLE_INT(__x, __y)
    363 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
    364 
    365 /*
    366  * Not sure if Flow Director is fully baked,
    367  * so we'll default to turning it off.
    368  */
    369 static int ixgbe_enable_fdir = 0;
    370 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
    371     "Enable Flow Director");
    372 
    373 /* Legacy Transmit (single queue) */
    374 static int ixgbe_enable_legacy_tx = 0;
    375 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
    376     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
    377 
    378 /* Receive-Side Scaling */
    379 static int ixgbe_enable_rss = 1;
    380 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
    381     "Enable Receive-Side Scaling (RSS)");
    382 
    383 /* Keep running tab on them for sanity check */
    384 static int ixgbe_total_ports;
    385 
    386 #if 0
    387 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
    388 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
    389 #endif
    390 
    391 #ifdef NET_MPSAFE
    392 #define IXGBE_MPSAFE		1
    393 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    394 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    395 #else
    396 #define IXGBE_CALLOUT_FLAGS	0
    397 #define IXGBE_SOFTINFT_FLAGS	0
    398 #endif
    399 
    400 /************************************************************************
    401  * ixgbe_initialize_rss_mapping
    402  ************************************************************************/
    403 static void
    404 ixgbe_initialize_rss_mapping(struct adapter *adapter)
    405 {
    406 	struct ixgbe_hw	*hw = &adapter->hw;
    407 	u32             reta = 0, mrqc, rss_key[10];
    408 	int             queue_id, table_size, index_mult;
    409 	int             i, j;
    410 	u32             rss_hash_config;
    411 
    412 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    413 		/* Fetch the configured RSS key */
    414 		rss_getkey((uint8_t *) &rss_key);
    415 	} else {
    416 		/* set up random bits */
    417 		cprng_fast(&rss_key, sizeof(rss_key));
    418 	}
    419 
    420 	/* Set multiplier for RETA setup and table size based on MAC */
    421 	index_mult = 0x1;
    422 	table_size = 128;
    423 	switch (adapter->hw.mac.type) {
    424 	case ixgbe_mac_82598EB:
    425 		index_mult = 0x11;
    426 		break;
    427 	case ixgbe_mac_X550:
    428 	case ixgbe_mac_X550EM_x:
    429 	case ixgbe_mac_X550EM_a:
    430 		table_size = 512;
    431 		break;
    432 	default:
    433 		break;
    434 	}
    435 
    436 	/* Set up the redirection table */
    437 	for (i = 0, j = 0; i < table_size; i++, j++) {
    438 		if (j == adapter->num_queues)
    439 			j = 0;
    440 
    441 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    442 			/*
    443 			 * Fetch the RSS bucket id for the given indirection
    444 			 * entry. Cap it at the number of configured buckets
    445 			 * (which is num_queues.)
    446 			 */
    447 			queue_id = rss_get_indirection_to_bucket(i);
    448 			queue_id = queue_id % adapter->num_queues;
    449 		} else
    450 			queue_id = (j * index_mult);
    451 
    452 		/*
    453 		 * The low 8 bits are for hash value (n+0);
    454 		 * The next 8 bits are for hash value (n+1), etc.
    455 		 */
    456 		reta = reta >> 8;
    457 		reta = reta | (((uint32_t) queue_id) << 24);
    458 		if ((i & 3) == 3) {
    459 			if (i < 128)
    460 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
    461 			else
    462 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
    463 				    reta);
    464 			reta = 0;
    465 		}
    466 	}
    467 
    468 	/* Now fill our hash function seeds */
    469 	for (i = 0; i < 10; i++)
    470 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
    471 
    472 	/* Perform hash on these packet types */
    473 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
    474 		rss_hash_config = rss_gethashconfig();
    475 	else {
    476 		/*
    477 		 * Disable UDP - IP fragments aren't currently being handled
    478 		 * and so we end up with a mix of 2-tuple and 4-tuple
    479 		 * traffic.
    480 		 */
    481 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
    482 		                | RSS_HASHTYPE_RSS_TCP_IPV4
    483 		                | RSS_HASHTYPE_RSS_IPV6
    484 		                | RSS_HASHTYPE_RSS_TCP_IPV6
    485 		                | RSS_HASHTYPE_RSS_IPV6_EX
    486 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
    487 	}
    488 
    489 	mrqc = IXGBE_MRQC_RSSEN;
    490 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
    491 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
    492 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
    493 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
    494 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
    495 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
    496 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
    497 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
    498 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
    499 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
    500 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
    501 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
    502 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
    503 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
    504 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
    505 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
    506 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
    507 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
    508 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
    509 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    510 } /* ixgbe_initialize_rss_mapping */
    511 
    512 /************************************************************************
    513  * ixgbe_initialize_receive_units - Setup receive registers and features.
    514  ************************************************************************/
    515 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
    516 
    517 static void
    518 ixgbe_initialize_receive_units(struct adapter *adapter)
    519 {
    520 	struct	rx_ring	*rxr = adapter->rx_rings;
    521 	struct ixgbe_hw	*hw = &adapter->hw;
    522 	struct ifnet    *ifp = adapter->ifp;
    523 	int             i, j;
    524 	u32		bufsz, fctrl, srrctl, rxcsum;
    525 	u32		hlreg;
    526 
    527 	/*
    528 	 * Make sure receives are disabled while
    529 	 * setting up the descriptor ring
    530 	 */
    531 	ixgbe_disable_rx(hw);
    532 
    533 	/* Enable broadcasts */
    534 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
    535 	fctrl |= IXGBE_FCTRL_BAM;
    536 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
    537 		fctrl |= IXGBE_FCTRL_DPF;
    538 		fctrl |= IXGBE_FCTRL_PMCF;
    539 	}
    540 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
    541 
    542 	/* Set for Jumbo Frames? */
    543 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
    544 	if (ifp->if_mtu > ETHERMTU)
    545 		hlreg |= IXGBE_HLREG0_JUMBOEN;
    546 	else
    547 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
    548 
    549 #ifdef DEV_NETMAP
    550 	/* CRC stripping is conditional in Netmap */
    551 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
    552 	    (ifp->if_capenable & IFCAP_NETMAP) &&
    553 	    !ix_crcstrip)
    554 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
    555 	else
    556 #endif /* DEV_NETMAP */
    557 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
    558 
    559 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
    560 
    561 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
    562 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
    563 
    564 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
    565 		u64 rdba = rxr->rxdma.dma_paddr;
    566 		u32 tqsmreg, reg;
    567 		int regnum = i / 4;	/* 1 register per 4 queues */
    568 		int regshift = i % 4;	/* 4 bits per 1 queue */
    569 		j = rxr->me;
    570 
    571 		/* Setup the Base and Length of the Rx Descriptor Ring */
    572 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
    573 		    (rdba & 0x00000000ffffffffULL));
    574 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
    575 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
    576 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
    577 
    578 		/* Set up the SRRCTL register */
    579 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
    580 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
    581 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
    582 		srrctl |= bufsz;
    583 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
    584 
    585 		/* Set RQSMR (Receive Queue Statistic Mapping) register */
    586 		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
    587 		reg &= ~(0x000000ff << (regshift * 8));
    588 		reg |= i << (regshift * 8);
    589 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
    590 
    591 		/*
    592 		 * Set RQSMR (Receive Queue Statistic Mapping) register.
    593 		 * Register location for queue 0...7 are different between
    594 		 * 82598 and newer.
    595 		 */
    596 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
    597 			tqsmreg = IXGBE_TQSMR(regnum);
    598 		else
    599 			tqsmreg = IXGBE_TQSM(regnum);
    600 		reg = IXGBE_READ_REG(hw, tqsmreg);
    601 		reg &= ~(0x000000ff << (regshift * 8));
    602 		reg |= i << (regshift * 8);
    603 		IXGBE_WRITE_REG(hw, tqsmreg, reg);
    604 
    605 		/*
    606 		 * Set DROP_EN iff we have no flow control and >1 queue.
    607 		 * Note that srrctl was cleared shortly before during reset,
    608 		 * so we do not need to clear the bit, but do it just in case
    609 		 * this code is moved elsewhere.
    610 		 */
    611 		if (adapter->num_queues > 1 &&
    612 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
    613 			srrctl |= IXGBE_SRRCTL_DROP_EN;
    614 		} else {
    615 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
    616 		}
    617 
    618 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
    619 
    620 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
    621 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
    622 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
    623 
    624 		/* Set the driver rx tail address */
    625 		rxr->tail =  IXGBE_RDT(rxr->me);
    626 	}
    627 
    628 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    629 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
    630 		            | IXGBE_PSRTYPE_UDPHDR
    631 		            | IXGBE_PSRTYPE_IPV4HDR
    632 		            | IXGBE_PSRTYPE_IPV6HDR;
    633 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
    634 	}
    635 
    636 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
    637 
    638 	ixgbe_initialize_rss_mapping(adapter);
    639 
    640 	if (adapter->num_queues > 1) {
    641 		/* RSS and RX IPP Checksum are mutually exclusive */
    642 		rxcsum |= IXGBE_RXCSUM_PCSD;
    643 	}
    644 
    645 	if (ifp->if_capenable & IFCAP_RXCSUM)
    646 		rxcsum |= IXGBE_RXCSUM_PCSD;
    647 
    648 	/* This is useful for calculating UDP/IP fragment checksums */
    649 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
    650 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
    651 
    652 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
    653 
    654 	return;
    655 } /* ixgbe_initialize_receive_units */
    656 
    657 /************************************************************************
    658  * ixgbe_initialize_transmit_units - Enable transmit units.
    659  ************************************************************************/
    660 static void
    661 ixgbe_initialize_transmit_units(struct adapter *adapter)
    662 {
    663 	struct tx_ring  *txr = adapter->tx_rings;
    664 	struct ixgbe_hw	*hw = &adapter->hw;
    665 
    666 	/* Setup the Base and Length of the Tx Descriptor Ring */
    667 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    668 		u64 tdba = txr->txdma.dma_paddr;
    669 		u32 txctrl = 0;
    670 		int j = txr->me;
    671 
    672 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
    673 		    (tdba & 0x00000000ffffffffULL));
    674 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
    675 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
    676 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
    677 
    678 		/* Setup the HW Tx Head and Tail descriptor pointers */
    679 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
    680 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
    681 
    682 		/* Cache the tail address */
    683 		txr->tail = IXGBE_TDT(j);
    684 
    685 		/* Disable Head Writeback */
    686 		/*
    687 		 * Note: for X550 series devices, these registers are actually
    688 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
    689 		 * fields remain the same.
    690 		 */
    691 		switch (hw->mac.type) {
    692 		case ixgbe_mac_82598EB:
    693 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
    694 			break;
    695 		default:
    696 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
    697 			break;
    698 		}
    699 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
    700 		switch (hw->mac.type) {
    701 		case ixgbe_mac_82598EB:
    702 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
    703 			break;
    704 		default:
    705 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
    706 			break;
    707 		}
    708 
    709 	}
    710 
    711 	if (hw->mac.type != ixgbe_mac_82598EB) {
    712 		u32 dmatxctl, rttdcs;
    713 
    714 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
    715 		dmatxctl |= IXGBE_DMATXCTL_TE;
    716 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
    717 		/* Disable arbiter to set MTQC */
    718 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
    719 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
    720 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    721 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
    722 		    ixgbe_get_mtqc(adapter->iov_mode));
    723 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
    724 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    725 	}
    726 
    727 	return;
    728 } /* ixgbe_initialize_transmit_units */
    729 
    730 /************************************************************************
    731  * ixgbe_attach - Device initialization routine
    732  *
    733  *   Called when the driver is being loaded.
    734  *   Identifies the type of hardware, allocates all resources
    735  *   and initializes the hardware.
    736  *
    737  *   return 0 on success, positive on failure
    738  ************************************************************************/
    739 static void
    740 ixgbe_attach(device_t parent, device_t dev, void *aux)
    741 {
    742 	struct adapter  *adapter;
    743 	struct ixgbe_hw *hw;
    744 	int             error = -1;
    745 	u32		ctrl_ext;
    746 	u16		high, low, nvmreg;
    747 	pcireg_t	id, subid;
    748 	ixgbe_vendor_info_t *ent;
    749 	struct pci_attach_args *pa = aux;
    750 	const char *str;
    751 	char buf[256];
    752 
    753 	INIT_DEBUGOUT("ixgbe_attach: begin");
    754 
    755 	/* Allocate, clear, and link in our adapter structure */
    756 	adapter = device_private(dev);
    757 	adapter->hw.back = adapter;
    758 	adapter->dev = dev;
    759 	hw = &adapter->hw;
    760 	adapter->osdep.pc = pa->pa_pc;
    761 	adapter->osdep.tag = pa->pa_tag;
    762 	if (pci_dma64_available(pa))
    763 		adapter->osdep.dmat = pa->pa_dmat64;
    764 	else
    765 		adapter->osdep.dmat = pa->pa_dmat;
    766 	adapter->osdep.attached = false;
    767 
    768 	ent = ixgbe_lookup(pa);
    769 
    770 	KASSERT(ent != NULL);
    771 
    772 	aprint_normal(": %s, Version - %s\n",
    773 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    774 
    775 	/* Core Lock Init*/
    776 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    777 
    778 	/* Set up the timer callout */
    779 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    780 
    781 	/* Determine hardware revision */
    782 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    783 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    784 
    785 	hw->vendor_id = PCI_VENDOR(id);
    786 	hw->device_id = PCI_PRODUCT(id);
    787 	hw->revision_id =
    788 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    789 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    790 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    791 
    792 	/*
    793 	 * Make sure BUSMASTER is set
    794 	 */
    795 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    796 
    797 	/* Do base PCI setup - map BAR0 */
    798 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    799 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    800 		error = ENXIO;
    801 		goto err_out;
    802 	}
    803 
    804 	/* let hardware know driver is loaded */
    805 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    806 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    807 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    808 
    809 	/*
    810 	 * Initialize the shared code
    811 	 */
    812 	if (ixgbe_init_shared_code(hw)) {
    813 		aprint_error_dev(dev, "Unable to initialize the shared code\n");
    814 		error = ENXIO;
    815 		goto err_out;
    816 	}
    817 
    818 	switch (hw->mac.type) {
    819 	case ixgbe_mac_82598EB:
    820 		str = "82598EB";
    821 		break;
    822 	case ixgbe_mac_82599EB:
    823 		str = "82599EB";
    824 		break;
    825 	case ixgbe_mac_X540:
    826 		str = "X540";
    827 		break;
    828 	case ixgbe_mac_X550:
    829 		str = "X550";
    830 		break;
    831 	case ixgbe_mac_X550EM_x:
    832 		str = "X550EM";
    833 		break;
    834 	case ixgbe_mac_X550EM_a:
    835 		str = "X550EM A";
    836 		break;
    837 	default:
    838 		str = "Unknown";
    839 		break;
    840 	}
    841 	aprint_normal_dev(dev, "device %s\n", str);
    842 
    843 	if (hw->mbx.ops.init_params)
    844 		hw->mbx.ops.init_params(hw);
    845 
    846 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
    847 
    848 	/* Pick up the 82599 settings */
    849 	if (hw->mac.type != ixgbe_mac_82598EB) {
    850 		hw->phy.smart_speed = ixgbe_smart_speed;
    851 		adapter->num_segs = IXGBE_82599_SCATTER;
    852 	} else
    853 		adapter->num_segs = IXGBE_82598_SCATTER;
    854 
    855 	hw->mac.ops.set_lan_id(hw);
    856 	ixgbe_init_device_features(adapter);
    857 
    858 	if (ixgbe_configure_interrupts(adapter)) {
    859 		error = ENXIO;
    860 		goto err_out;
    861 	}
    862 
    863 	/* Allocate multicast array memory. */
    864 	adapter->mta = malloc(sizeof(*adapter->mta) *
    865 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    866 	if (adapter->mta == NULL) {
    867 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    868 		error = ENOMEM;
    869 		goto err_out;
    870 	}
    871 
    872 	/* Enable WoL (if supported) */
    873 	ixgbe_check_wol_support(adapter);
    874 
    875 	/* Verify adapter fan is still functional (if applicable) */
    876 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
    877 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    878 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
    879 	}
    880 
    881 	/* Ensure SW/FW semaphore is free */
    882 	ixgbe_init_swfw_semaphore(hw);
    883 
    884 	/* Enable EEE power saving */
    885 	if (adapter->feat_en & IXGBE_FEATURE_EEE)
    886 		hw->mac.ops.setup_eee(hw, TRUE);
    887 
    888 	/* Set an initial default flow control value */
    889 	hw->fc.requested_mode = ixgbe_flow_control;
    890 
    891 	/* Sysctls for limiting the amount of work done in the taskqueues */
    892 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
    893 	    "max number of rx packets to process",
    894 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
    895 
    896 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
    897 	    "max number of tx packets to process",
    898 	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
    899 
    900 	/* Do descriptor calc and sanity checks */
    901 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    902 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    903 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    904 		adapter->num_tx_desc = DEFAULT_TXD;
    905 	} else
    906 		adapter->num_tx_desc = ixgbe_txd;
    907 
    908 	/*
    909 	 * With many RX rings it is easy to exceed the
    910 	 * system mbuf allocation. Tuning nmbclusters
    911 	 * can alleviate this.
    912 	 */
    913 	if (nmbclusters > 0) {
    914 		int s;
    915 		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
    916 		if (s > nmbclusters) {
    917 			aprint_error_dev(dev, "RX Descriptors exceed "
    918 			    "system mbuf max, using default instead!\n");
    919 			ixgbe_rxd = DEFAULT_RXD;
    920 		}
    921 	}
    922 
    923 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    924 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
    925 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    926 		adapter->num_rx_desc = DEFAULT_RXD;
    927 	} else
    928 		adapter->num_rx_desc = ixgbe_rxd;
    929 
    930 	/* Allocate our TX/RX Queues */
    931 	if (ixgbe_allocate_queues(adapter)) {
    932 		error = ENOMEM;
    933 		goto err_out;
    934 	}
    935 
    936 	hw->phy.reset_if_overtemp = TRUE;
    937 	error = ixgbe_reset_hw(hw);
    938 	hw->phy.reset_if_overtemp = FALSE;
    939 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    940 		/*
    941 		 * No optics in this port, set up
    942 		 * so the timer routine will probe
    943 		 * for later insertion.
    944 		 */
    945 		adapter->sfp_probe = TRUE;
    946 		error = IXGBE_SUCCESS;
    947 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    948 		aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
    949 		error = EIO;
    950 		goto err_late;
    951 	} else if (error) {
    952 		aprint_error_dev(dev, "Hardware initialization failed\n");
    953 		error = EIO;
    954 		goto err_late;
    955 	}
    956 
    957 	/* Make sure we have a good EEPROM before we read from it */
    958 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
    959 		aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
    960 		error = EIO;
    961 		goto err_late;
    962 	}
    963 
    964 	aprint_normal("%s:", device_xname(dev));
    965 	/* NVM Image Version */
    966 	switch (hw->mac.type) {
    967 	case ixgbe_mac_X540:
    968 	case ixgbe_mac_X550EM_a:
    969 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    970 		if (nvmreg == 0xffff)
    971 			break;
    972 		high = (nvmreg >> 12) & 0x0f;
    973 		low = (nvmreg >> 4) & 0xff;
    974 		id = nvmreg & 0x0f;
    975 		aprint_normal(" NVM Image Version %u.", high);
    976 		if (hw->mac.type == ixgbe_mac_X540)
    977 			str = "%x";
    978 		else
    979 			str = "%02x";
    980 		aprint_normal(str, low);
    981 		aprint_normal(" ID 0x%x,", id);
    982 		break;
    983 	case ixgbe_mac_X550EM_x:
    984 	case ixgbe_mac_X550:
    985 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    986 		if (nvmreg == 0xffff)
    987 			break;
    988 		high = (nvmreg >> 12) & 0x0f;
    989 		low = nvmreg & 0xff;
    990 		aprint_normal(" NVM Image Version %u.%02x,", high, low);
    991 		break;
    992 	default:
    993 		break;
    994 	}
    995 
    996 	/* PHY firmware revision */
    997 	switch (hw->mac.type) {
    998 	case ixgbe_mac_X540:
    999 	case ixgbe_mac_X550:
   1000 		hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
   1001 		if (nvmreg == 0xffff)
   1002 			break;
   1003 		high = (nvmreg >> 12) & 0x0f;
   1004 		low = (nvmreg >> 4) & 0xff;
   1005 		id = nvmreg & 0x000f;
   1006 		aprint_normal(" PHY FW Revision %u.", high);
   1007 		if (hw->mac.type == ixgbe_mac_X540)
   1008 			str = "%x";
   1009 		else
   1010 			str = "%02x";
   1011 		aprint_normal(str, low);
   1012 		aprint_normal(" ID 0x%x,", id);
   1013 		break;
   1014 	default:
   1015 		break;
   1016 	}
   1017 
   1018 	/* NVM Map version & OEM NVM Image version */
   1019 	switch (hw->mac.type) {
   1020 	case ixgbe_mac_X550:
   1021 	case ixgbe_mac_X550EM_x:
   1022 	case ixgbe_mac_X550EM_a:
   1023 		hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
   1024 		if (nvmreg != 0xffff) {
   1025 			high = (nvmreg >> 12) & 0x0f;
   1026 			low = nvmreg & 0x00ff;
   1027 			aprint_normal(" NVM Map version %u.%02x,", high, low);
   1028 		}
   1029 		hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
   1030 		if (nvmreg != 0xffff) {
   1031 			high = (nvmreg >> 12) & 0x0f;
   1032 			low = nvmreg & 0x00ff;
   1033 			aprint_verbose(" OEM NVM Image version %u.%02x,", high,
   1034 			    low);
   1035 		}
   1036 		break;
   1037 	default:
   1038 		break;
   1039 	}
   1040 
   1041 	/* Print the ETrackID */
   1042 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
   1043 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
   1044 	aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
   1045 
   1046 	if (adapter->feat_en & IXGBE_FEATURE_MSIX)
   1047 		error = ixgbe_allocate_msix(adapter, pa);
   1048 	else
   1049 		error = ixgbe_allocate_legacy(adapter, pa);
   1050 	if (error)
   1051 		goto err_late;
   1052 
   1053 	error = ixgbe_start_hw(hw);
   1054 	switch (error) {
   1055 	case IXGBE_ERR_EEPROM_VERSION:
   1056 		aprint_error_dev(dev, "This device is a pre-production adapter/"
   1057 		    "LOM.  Please be aware there may be issues associated "
   1058 		    "with your hardware.\nIf you are experiencing problems "
   1059 		    "please contact your Intel or hardware representative "
   1060 		    "who provided you with this hardware.\n");
   1061 		break;
   1062 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
   1063 		aprint_error_dev(dev, "Unsupported SFP+ Module\n");
   1064 		error = EIO;
   1065 		goto err_late;
   1066 	case IXGBE_ERR_SFP_NOT_PRESENT:
   1067 		aprint_error_dev(dev, "No SFP+ Module found\n");
   1068 		/* falls thru */
   1069 	default:
   1070 		break;
   1071 	}
   1072 
   1073 	/* Setup OS specific network interface */
   1074 	if (ixgbe_setup_interface(dev, adapter) != 0)
   1075 		goto err_late;
   1076 
   1077 	/*
   1078 	 *  Print PHY ID only for copper PHY. On device which has SFP(+) cage
   1079 	 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
   1080 	 */
   1081 	if (hw->phy.media_type == ixgbe_media_type_copper) {
   1082 		uint16_t id1, id2;
   1083 		int oui, model, rev;
   1084 		const char *descr;
   1085 
   1086 		id1 = hw->phy.id >> 16;
   1087 		id2 = hw->phy.id & 0xffff;
   1088 		oui = MII_OUI(id1, id2);
   1089 		model = MII_MODEL(id2);
   1090 		rev = MII_REV(id2);
   1091 		if ((descr = mii_get_descr(oui, model)) != NULL)
   1092 			aprint_normal_dev(dev,
   1093 			    "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
   1094 			    descr, oui, model, rev);
   1095 		else
   1096 			aprint_normal_dev(dev,
   1097 			    "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
   1098 			    oui, model, rev);
   1099 	}
   1100 
   1101 	/* Enable the optics for 82599 SFP+ fiber */
   1102 	ixgbe_enable_tx_laser(hw);
   1103 
   1104 	/* Enable power to the phy. */
   1105 	ixgbe_set_phy_power(hw, TRUE);
   1106 
   1107 	/* Initialize statistics */
   1108 	ixgbe_update_stats_counters(adapter);
   1109 
   1110 	/* Check PCIE slot type/speed/width */
   1111 	ixgbe_get_slot_info(adapter);
   1112 
   1113 	/*
   1114 	 * Do time init and sysctl init here, but
   1115 	 * only on the first port of a bypass adapter.
   1116 	 */
   1117 	ixgbe_bypass_init(adapter);
   1118 
   1119 	/* Set an initial dmac value */
   1120 	adapter->dmac = 0;
   1121 	/* Set initial advertised speeds (if applicable) */
   1122 	adapter->advertise = ixgbe_get_advertise(adapter);
   1123 
   1124 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   1125 		ixgbe_define_iov_schemas(dev, &error);
   1126 
   1127 	/* Add sysctls */
   1128 	ixgbe_add_device_sysctls(adapter);
   1129 	ixgbe_add_hw_stats(adapter);
   1130 
   1131 	/* For Netmap */
   1132 	adapter->init_locked = ixgbe_init_locked;
   1133 	adapter->stop_locked = ixgbe_stop;
   1134 
   1135 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1136 		ixgbe_netmap_attach(adapter);
   1137 
   1138 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
   1139 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
   1140 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
   1141 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
   1142 
   1143 	if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
   1144 		pmf_class_network_register(dev, adapter->ifp);
   1145 	else
   1146 		aprint_error_dev(dev, "couldn't establish power handler\n");
   1147 
   1148 	INIT_DEBUGOUT("ixgbe_attach: end");
   1149 	adapter->osdep.attached = true;
   1150 
   1151 	return;
   1152 
   1153 err_late:
   1154 	ixgbe_free_transmit_structures(adapter);
   1155 	ixgbe_free_receive_structures(adapter);
   1156 	free(adapter->queues, M_DEVBUF);
   1157 err_out:
   1158 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   1159 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   1160 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   1161 	ixgbe_free_pci_resources(adapter);
   1162 	if (adapter->mta != NULL)
   1163 		free(adapter->mta, M_DEVBUF);
   1164 	IXGBE_CORE_LOCK_DESTROY(adapter);
   1165 
   1166 	return;
   1167 } /* ixgbe_attach */
   1168 
   1169 /************************************************************************
   1170  * ixgbe_check_wol_support
   1171  *
   1172  *   Checks whether the adapter's ports are capable of
   1173  *   Wake On LAN by reading the adapter's NVM.
   1174  *
   1175  *   Sets each port's hw->wol_enabled value depending
   1176  *   on the value read here.
   1177  ************************************************************************/
   1178 static void
   1179 ixgbe_check_wol_support(struct adapter *adapter)
   1180 {
   1181 	struct ixgbe_hw *hw = &adapter->hw;
   1182 	u16             dev_caps = 0;
   1183 
   1184 	/* Find out WoL support for port */
   1185 	adapter->wol_support = hw->wol_enabled = 0;
   1186 	ixgbe_get_device_caps(hw, &dev_caps);
   1187 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
   1188 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
   1189 	     hw->bus.func == 0))
   1190 		adapter->wol_support = hw->wol_enabled = 1;
   1191 
   1192 	/* Save initial wake up filter configuration */
   1193 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
   1194 
   1195 	return;
   1196 } /* ixgbe_check_wol_support */
   1197 
   1198 /************************************************************************
   1199  * ixgbe_setup_interface
   1200  *
   1201  *   Setup networking device structure and register an interface.
   1202  ************************************************************************/
   1203 static int
   1204 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   1205 {
   1206 	struct ethercom *ec = &adapter->osdep.ec;
   1207 	struct ifnet   *ifp;
   1208 	int rv;
   1209 
   1210 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   1211 
   1212 	ifp = adapter->ifp = &ec->ec_if;
   1213 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1214 	ifp->if_baudrate = IF_Gbps(10);
   1215 	ifp->if_init = ixgbe_init;
   1216 	ifp->if_stop = ixgbe_ifstop;
   1217 	ifp->if_softc = adapter;
   1218 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1219 #ifdef IXGBE_MPSAFE
   1220 	ifp->if_extflags = IFEF_MPSAFE;
   1221 #endif
   1222 	ifp->if_ioctl = ixgbe_ioctl;
   1223 #if __FreeBSD_version >= 1100045
   1224 	/* TSO parameters */
   1225 	ifp->if_hw_tsomax = 65518;
   1226 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
   1227 	ifp->if_hw_tsomaxsegsize = 2048;
   1228 #endif
   1229 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1230 #if 0
   1231 		ixgbe_start_locked = ixgbe_legacy_start_locked;
   1232 #endif
   1233 	} else {
   1234 		ifp->if_transmit = ixgbe_mq_start;
   1235 #if 0
   1236 		ixgbe_start_locked = ixgbe_mq_start_locked;
   1237 #endif
   1238 	}
   1239 	ifp->if_start = ixgbe_legacy_start;
   1240 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1241 	IFQ_SET_READY(&ifp->if_snd);
   1242 
   1243 	rv = if_initialize(ifp);
   1244 	if (rv != 0) {
   1245 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1246 		return rv;
   1247 	}
   1248 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1249 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1250 	/*
   1251 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1252 	 * used.
   1253 	 */
   1254 	if_register(ifp);
   1255 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   1256 
   1257 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1258 
   1259 	/*
   1260 	 * Tell the upper layer(s) we support long frames.
   1261 	 */
   1262 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1263 
   1264 	/* Set capability flags */
   1265 	ifp->if_capabilities |= IFCAP_RXCSUM
   1266 			     |  IFCAP_TXCSUM
   1267 			     |  IFCAP_TSOv4
   1268 			     |  IFCAP_TSOv6
   1269 			     |  IFCAP_LRO;
   1270 	ifp->if_capenable = 0;
   1271 
   1272 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1273 	    		    |  ETHERCAP_VLAN_HWCSUM
   1274 	    		    |  ETHERCAP_JUMBO_MTU
   1275 	    		    |  ETHERCAP_VLAN_MTU;
   1276 
   1277 	/* Enable the above capabilities by default */
   1278 	ec->ec_capenable = ec->ec_capabilities;
   1279 
   1280 	/*
   1281 	 * Don't turn this on by default, if vlans are
   1282 	 * created on another pseudo device (eg. lagg)
   1283 	 * then vlan events are not passed thru, breaking
   1284 	 * operation, but with HW FILTER off it works. If
   1285 	 * using vlans directly on the ixgbe driver you can
   1286 	 * enable this and get full hardware tag filtering.
   1287 	 */
   1288 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1289 
   1290 	/*
   1291 	 * Specify the media types supported by this adapter and register
   1292 	 * callbacks to update media and link information
   1293 	 */
   1294 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   1295 	    ixgbe_media_status);
   1296 
   1297 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
   1298 	ixgbe_add_media_types(adapter);
   1299 
   1300 	/* Set autoselect media by default */
   1301 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1302 
   1303 	return (0);
   1304 } /* ixgbe_setup_interface */
   1305 
   1306 /************************************************************************
   1307  * ixgbe_add_media_types
   1308  ************************************************************************/
   1309 static void
   1310 ixgbe_add_media_types(struct adapter *adapter)
   1311 {
   1312 	struct ixgbe_hw *hw = &adapter->hw;
   1313 	device_t        dev = adapter->dev;
   1314 	u64             layer;
   1315 
   1316 	layer = adapter->phy_layer;
   1317 
   1318 #define	ADD(mm, dd)							\
   1319 	ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
   1320 
   1321 	/* Media types with matching NetBSD media defines */
   1322 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
   1323 		ADD(IFM_10G_T | IFM_FDX, 0);
   1324 	}
   1325 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
   1326 		ADD(IFM_1000_T | IFM_FDX, 0);
   1327 	}
   1328 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
   1329 		ADD(IFM_100_TX | IFM_FDX, 0);
   1330 	}
   1331 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
   1332 		ADD(IFM_10_T | IFM_FDX, 0);
   1333 	}
   1334 
   1335 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   1336 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
   1337 		ADD(IFM_10G_TWINAX | IFM_FDX, 0);
   1338 	}
   1339 
   1340 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
   1341 		ADD(IFM_10G_LR | IFM_FDX, 0);
   1342 		if (hw->phy.multispeed_fiber) {
   1343 			ADD(IFM_1000_LX | IFM_FDX, 0);
   1344 		}
   1345 	}
   1346 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
   1347 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1348 		if (hw->phy.multispeed_fiber) {
   1349 			ADD(IFM_1000_SX | IFM_FDX, 0);
   1350 		}
   1351 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
   1352 		ADD(IFM_1000_SX | IFM_FDX, 0);
   1353 	}
   1354 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
   1355 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1356 	}
   1357 
   1358 #ifdef IFM_ETH_XTYPE
   1359 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1360 		ADD(IFM_10G_KR | IFM_FDX, 0);
   1361 	}
   1362 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1363 		ADD(AIFM_10G_KX4 | IFM_FDX, 0);
   1364 	}
   1365 #else
   1366 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1367 		device_printf(dev, "Media supported: 10GbaseKR\n");
   1368 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
   1369 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1370 	}
   1371 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1372 		device_printf(dev, "Media supported: 10GbaseKX4\n");
   1373 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
   1374 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1375 	}
   1376 #endif
   1377 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
   1378 		ADD(IFM_1000_KX | IFM_FDX, 0);
   1379 	}
   1380 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
   1381 		ADD(IFM_2500_KX | IFM_FDX, 0);
   1382 	}
   1383 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
   1384 		ADD(IFM_2500_T | IFM_FDX, 0);
   1385 	}
   1386 	if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
   1387 		ADD(IFM_5000_T | IFM_FDX, 0);
   1388 	}
   1389 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
   1390 		device_printf(dev, "Media supported: 1000baseBX\n");
   1391 	/* XXX no ifmedia_set? */
   1392 
   1393 	ADD(IFM_AUTO, 0);
   1394 
   1395 #undef ADD
   1396 } /* ixgbe_add_media_types */
   1397 
   1398 /************************************************************************
   1399  * ixgbe_is_sfp
   1400  ************************************************************************/
   1401 static inline bool
   1402 ixgbe_is_sfp(struct ixgbe_hw *hw)
   1403 {
   1404 	switch (hw->mac.type) {
   1405 	case ixgbe_mac_82598EB:
   1406 		if (hw->phy.type == ixgbe_phy_nl)
   1407 			return TRUE;
   1408 		return FALSE;
   1409 	case ixgbe_mac_82599EB:
   1410 		switch (hw->mac.ops.get_media_type(hw)) {
   1411 		case ixgbe_media_type_fiber:
   1412 		case ixgbe_media_type_fiber_qsfp:
   1413 			return TRUE;
   1414 		default:
   1415 			return FALSE;
   1416 		}
   1417 	case ixgbe_mac_X550EM_x:
   1418 	case ixgbe_mac_X550EM_a:
   1419 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
   1420 			return TRUE;
   1421 		return FALSE;
   1422 	default:
   1423 		return FALSE;
   1424 	}
   1425 } /* ixgbe_is_sfp */
   1426 
   1427 /************************************************************************
   1428  * ixgbe_config_link
   1429  ************************************************************************/
   1430 static void
   1431 ixgbe_config_link(struct adapter *adapter)
   1432 {
   1433 	struct ixgbe_hw *hw = &adapter->hw;
   1434 	u32             autoneg, err = 0;
   1435 	bool            sfp, negotiate = false;
   1436 
   1437 	sfp = ixgbe_is_sfp(hw);
   1438 
   1439 	if (sfp) {
   1440 		if (hw->phy.multispeed_fiber) {
   1441 			hw->mac.ops.setup_sfp(hw);
   1442 			ixgbe_enable_tx_laser(hw);
   1443 			kpreempt_disable();
   1444 			softint_schedule(adapter->msf_si);
   1445 			kpreempt_enable();
   1446 		} else {
   1447 			kpreempt_disable();
   1448 			softint_schedule(adapter->mod_si);
   1449 			kpreempt_enable();
   1450 		}
   1451 	} else {
   1452 		if (hw->mac.ops.check_link)
   1453 			err = ixgbe_check_link(hw, &adapter->link_speed,
   1454 			    &adapter->link_up, FALSE);
   1455 		if (err)
   1456 			goto out;
   1457 		autoneg = hw->phy.autoneg_advertised;
   1458 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   1459                 	err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
   1460 			    &negotiate);
   1461 		if (err)
   1462 			goto out;
   1463 		if (hw->mac.ops.setup_link)
   1464                 	err = hw->mac.ops.setup_link(hw, autoneg,
   1465 			    adapter->link_up);
   1466 	}
   1467 out:
   1468 
   1469 	return;
   1470 } /* ixgbe_config_link */
   1471 
   1472 /************************************************************************
   1473  * ixgbe_update_stats_counters - Update board statistics counters.
   1474  ************************************************************************/
   1475 static void
   1476 ixgbe_update_stats_counters(struct adapter *adapter)
   1477 {
   1478 	struct ifnet          *ifp = adapter->ifp;
   1479 	struct ixgbe_hw       *hw = &adapter->hw;
   1480 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1481 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
   1482 	u64                   total_missed_rx = 0;
   1483 	uint64_t              crcerrs, rlec;
   1484 
   1485 	crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   1486 	stats->crcerrs.ev_count += crcerrs;
   1487 	stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   1488 	stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   1489 	stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   1490 	if (hw->mac.type == ixgbe_mac_X550)
   1491 		stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
   1492 
   1493 	for (int i = 0; i < __arraycount(stats->qprc); i++) {
   1494 		int j = i % adapter->num_queues;
   1495 		stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   1496 		stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   1497 		stats->qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   1498 	}
   1499 	for (int i = 0; i < __arraycount(stats->mpc); i++) {
   1500 		uint32_t mp;
   1501 		int j = i % adapter->num_queues;
   1502 
   1503 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   1504 		/* global total per queue */
   1505 		stats->mpc[j].ev_count += mp;
   1506 		/* running comprehensive total for stats display */
   1507 		total_missed_rx += mp;
   1508 
   1509 		if (hw->mac.type == ixgbe_mac_82598EB)
   1510 			stats->rnbc[j].ev_count
   1511 			    += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   1512 
   1513 	}
   1514 	stats->mpctotal.ev_count += total_missed_rx;
   1515 
   1516 	/* Document says M[LR]FC are valid when link is up and 10Gbps */
   1517 	if ((adapter->link_active == TRUE)
   1518 	    && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
   1519 		stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   1520 		stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   1521 	}
   1522 	rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
   1523 	stats->rlec.ev_count += rlec;
   1524 
   1525 	/* Hardware workaround, gprc counts missed packets */
   1526 	stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   1527 
   1528 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   1529 	stats->lxontxc.ev_count += lxon;
   1530 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   1531 	stats->lxofftxc.ev_count += lxoff;
   1532 	total = lxon + lxoff;
   1533 
   1534 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1535 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   1536 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   1537 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   1538 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   1539 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   1540 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   1541 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   1542 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   1543 	} else {
   1544 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   1545 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   1546 		/* 82598 only has a counter in the high register */
   1547 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   1548 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   1549 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   1550 	}
   1551 
   1552 	/*
   1553 	 * Workaround: mprc hardware is incorrectly counting
   1554 	 * broadcasts, so for now we subtract those.
   1555 	 */
   1556 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   1557 	stats->bprc.ev_count += bprc;
   1558 	stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
   1559 	    - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   1560 
   1561 	stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   1562 	stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   1563 	stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   1564 	stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   1565 	stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   1566 	stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   1567 
   1568 	stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   1569 	stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   1570 	stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   1571 
   1572 	stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   1573 	stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   1574 	stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   1575 	stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   1576 	stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   1577 	stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   1578 	stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   1579 	stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   1580 	stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   1581 	stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   1582 	stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   1583 	stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   1584 	stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   1585 	stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   1586 	stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   1587 	stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   1588 	stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   1589 	stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   1590 	/* Only read FCOE on 82599 */
   1591 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1592 		stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   1593 		stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   1594 		stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   1595 		stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   1596 		stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   1597 	}
   1598 
   1599 	/* Fill out the OS statistics structure */
   1600 	/*
   1601 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   1602 	 * adapter->stats counters. It's required to make ifconfig -z
   1603 	 * (SOICZIFDATA) work.
   1604 	 */
   1605 	ifp->if_collisions = 0;
   1606 
   1607 	/* Rx Errors */
   1608 	ifp->if_iqdrops += total_missed_rx;
   1609 	ifp->if_ierrors += crcerrs + rlec;
   1610 } /* ixgbe_update_stats_counters */
   1611 
   1612 /************************************************************************
   1613  * ixgbe_add_hw_stats
   1614  *
   1615  *   Add sysctl variables, one per statistic, to the system.
   1616  ************************************************************************/
   1617 static void
   1618 ixgbe_add_hw_stats(struct adapter *adapter)
   1619 {
   1620 	device_t dev = adapter->dev;
   1621 	const struct sysctlnode *rnode, *cnode;
   1622 	struct sysctllog **log = &adapter->sysctllog;
   1623 	struct tx_ring *txr = adapter->tx_rings;
   1624 	struct rx_ring *rxr = adapter->rx_rings;
   1625 	struct ixgbe_hw *hw = &adapter->hw;
   1626 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1627 	const char *xname = device_xname(dev);
   1628 
   1629 	/* Driver Statistics */
   1630 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   1631 	    NULL, xname, "Handled queue in softint");
   1632 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   1633 	    NULL, xname, "Requeued in softint");
   1634 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   1635 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   1636 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   1637 	    NULL, xname, "m_defrag() failed");
   1638 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   1639 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   1640 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   1641 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   1642 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   1643 	    NULL, xname, "Driver tx dma hard fail other");
   1644 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   1645 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   1646 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   1647 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   1648 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   1649 	    NULL, xname, "Watchdog timeouts");
   1650 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   1651 	    NULL, xname, "TSO errors");
   1652 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   1653 	    NULL, xname, "Link MSI-X IRQ Handled");
   1654 
   1655 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1656 		snprintf(adapter->queues[i].evnamebuf,
   1657 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   1658 		    xname, i);
   1659 		snprintf(adapter->queues[i].namebuf,
   1660 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   1661 
   1662 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   1663 			aprint_error_dev(dev, "could not create sysctl root\n");
   1664 			break;
   1665 		}
   1666 
   1667 		if (sysctl_createv(log, 0, &rnode, &rnode,
   1668 		    0, CTLTYPE_NODE,
   1669 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   1670 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   1671 			break;
   1672 
   1673 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1674 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   1675 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   1676 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   1677 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   1678 			break;
   1679 
   1680 #if 0 /* XXX msaitoh */
   1681 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1682 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   1683 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   1684 			NULL, 0, &(adapter->queues[i].irqs),
   1685 		    0, CTL_CREATE, CTL_EOL) != 0)
   1686 			break;
   1687 #endif
   1688 
   1689 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1690 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1691 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   1692 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   1693 		    0, CTL_CREATE, CTL_EOL) != 0)
   1694 			break;
   1695 
   1696 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1697 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1698 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   1699 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   1700 		    0, CTL_CREATE, CTL_EOL) != 0)
   1701 			break;
   1702 
   1703 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   1704 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   1705 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   1706 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   1707 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   1708 		    NULL, adapter->queues[i].evnamebuf,
   1709 		    "Queue No Descriptor Available");
   1710 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   1711 		    NULL, adapter->queues[i].evnamebuf,
   1712 		    "Queue Packets Transmitted");
   1713 #ifndef IXGBE_LEGACY_TX
   1714 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   1715 		    NULL, adapter->queues[i].evnamebuf,
   1716 		    "Packets dropped in pcq");
   1717 #endif
   1718 
   1719 #ifdef LRO
   1720 		struct lro_ctrl *lro = &rxr->lro;
   1721 #endif /* LRO */
   1722 
   1723 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1724 		    CTLFLAG_READONLY,
   1725 		    CTLTYPE_INT,
   1726 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   1727 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   1728 		    CTL_CREATE, CTL_EOL) != 0)
   1729 			break;
   1730 
   1731 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1732 		    CTLFLAG_READONLY,
   1733 		    CTLTYPE_INT,
   1734 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   1735 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   1736 		    CTL_CREATE, CTL_EOL) != 0)
   1737 			break;
   1738 
   1739 		if (i < __arraycount(stats->mpc)) {
   1740 			evcnt_attach_dynamic(&stats->mpc[i],
   1741 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1742 			    "RX Missed Packet Count");
   1743 			if (hw->mac.type == ixgbe_mac_82598EB)
   1744 				evcnt_attach_dynamic(&stats->rnbc[i],
   1745 				    EVCNT_TYPE_MISC, NULL,
   1746 				    adapter->queues[i].evnamebuf,
   1747 				    "Receive No Buffers");
   1748 		}
   1749 		if (i < __arraycount(stats->pxontxc)) {
   1750 			evcnt_attach_dynamic(&stats->pxontxc[i],
   1751 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1752 			    "pxontxc");
   1753 			evcnt_attach_dynamic(&stats->pxonrxc[i],
   1754 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1755 			    "pxonrxc");
   1756 			evcnt_attach_dynamic(&stats->pxofftxc[i],
   1757 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1758 			    "pxofftxc");
   1759 			evcnt_attach_dynamic(&stats->pxoffrxc[i],
   1760 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1761 			    "pxoffrxc");
   1762 			evcnt_attach_dynamic(&stats->pxon2offc[i],
   1763 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1764 			    "pxon2offc");
   1765 		}
   1766 		if (i < __arraycount(stats->qprc)) {
   1767 			evcnt_attach_dynamic(&stats->qprc[i],
   1768 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1769 			    "qprc");
   1770 			evcnt_attach_dynamic(&stats->qptc[i],
   1771 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1772 			    "qptc");
   1773 			evcnt_attach_dynamic(&stats->qbrc[i],
   1774 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1775 			    "qbrc");
   1776 			evcnt_attach_dynamic(&stats->qbtc[i],
   1777 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1778 			    "qbtc");
   1779 			evcnt_attach_dynamic(&stats->qprdc[i],
   1780 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1781 			    "qprdc");
   1782 		}
   1783 
   1784 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   1785 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   1786 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   1787 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   1788 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   1789 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   1790 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   1791 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   1792 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   1793 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   1794 #ifdef LRO
   1795 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   1796 				CTLFLAG_RD, &lro->lro_queued, 0,
   1797 				"LRO Queued");
   1798 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   1799 				CTLFLAG_RD, &lro->lro_flushed, 0,
   1800 				"LRO Flushed");
   1801 #endif /* LRO */
   1802 	}
   1803 
   1804 	/* MAC stats get their own sub node */
   1805 
   1806 	snprintf(stats->namebuf,
   1807 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   1808 
   1809 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   1810 	    stats->namebuf, "rx csum offload - IP");
   1811 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   1812 	    stats->namebuf, "rx csum offload - L4");
   1813 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   1814 	    stats->namebuf, "rx csum offload - IP bad");
   1815 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   1816 	    stats->namebuf, "rx csum offload - L4 bad");
   1817 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   1818 	    stats->namebuf, "Interrupt conditions zero");
   1819 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   1820 	    stats->namebuf, "Legacy interrupts");
   1821 
   1822 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   1823 	    stats->namebuf, "CRC Errors");
   1824 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   1825 	    stats->namebuf, "Illegal Byte Errors");
   1826 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   1827 	    stats->namebuf, "Byte Errors");
   1828 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   1829 	    stats->namebuf, "MAC Short Packets Discarded");
   1830 	if (hw->mac.type >= ixgbe_mac_X550)
   1831 		evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
   1832 		    stats->namebuf, "Bad SFD");
   1833 	evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
   1834 	    stats->namebuf, "Total Packets Missed");
   1835 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   1836 	    stats->namebuf, "MAC Local Faults");
   1837 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   1838 	    stats->namebuf, "MAC Remote Faults");
   1839 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   1840 	    stats->namebuf, "Receive Length Errors");
   1841 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   1842 	    stats->namebuf, "Link XON Transmitted");
   1843 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   1844 	    stats->namebuf, "Link XON Received");
   1845 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   1846 	    stats->namebuf, "Link XOFF Transmitted");
   1847 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   1848 	    stats->namebuf, "Link XOFF Received");
   1849 
   1850 	/* Packet Reception Stats */
   1851 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   1852 	    stats->namebuf, "Total Octets Received");
   1853 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   1854 	    stats->namebuf, "Good Octets Received");
   1855 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   1856 	    stats->namebuf, "Total Packets Received");
   1857 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   1858 	    stats->namebuf, "Good Packets Received");
   1859 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   1860 	    stats->namebuf, "Multicast Packets Received");
   1861 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   1862 	    stats->namebuf, "Broadcast Packets Received");
   1863 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   1864 	    stats->namebuf, "64 byte frames received ");
   1865 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   1866 	    stats->namebuf, "65-127 byte frames received");
   1867 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   1868 	    stats->namebuf, "128-255 byte frames received");
   1869 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   1870 	    stats->namebuf, "256-511 byte frames received");
   1871 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   1872 	    stats->namebuf, "512-1023 byte frames received");
   1873 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   1874 	    stats->namebuf, "1023-1522 byte frames received");
   1875 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   1876 	    stats->namebuf, "Receive Undersized");
   1877 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   1878 	    stats->namebuf, "Fragmented Packets Received ");
   1879 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   1880 	    stats->namebuf, "Oversized Packets Received");
   1881 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   1882 	    stats->namebuf, "Received Jabber");
   1883 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   1884 	    stats->namebuf, "Management Packets Received");
   1885 	evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
   1886 	    stats->namebuf, "Management Packets Dropped");
   1887 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   1888 	    stats->namebuf, "Checksum Errors");
   1889 
   1890 	/* Packet Transmission Stats */
   1891 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   1892 	    stats->namebuf, "Good Octets Transmitted");
   1893 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   1894 	    stats->namebuf, "Total Packets Transmitted");
   1895 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   1896 	    stats->namebuf, "Good Packets Transmitted");
   1897 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   1898 	    stats->namebuf, "Broadcast Packets Transmitted");
   1899 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   1900 	    stats->namebuf, "Multicast Packets Transmitted");
   1901 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   1902 	    stats->namebuf, "Management Packets Transmitted");
   1903 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   1904 	    stats->namebuf, "64 byte frames transmitted ");
   1905 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   1906 	    stats->namebuf, "65-127 byte frames transmitted");
   1907 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   1908 	    stats->namebuf, "128-255 byte frames transmitted");
   1909 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   1910 	    stats->namebuf, "256-511 byte frames transmitted");
   1911 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   1912 	    stats->namebuf, "512-1023 byte frames transmitted");
   1913 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   1914 	    stats->namebuf, "1024-1522 byte frames transmitted");
   1915 } /* ixgbe_add_hw_stats */
   1916 
   1917 static void
   1918 ixgbe_clear_evcnt(struct adapter *adapter)
   1919 {
   1920 	struct tx_ring *txr = adapter->tx_rings;
   1921 	struct rx_ring *rxr = adapter->rx_rings;
   1922 	struct ixgbe_hw *hw = &adapter->hw;
   1923 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1924 
   1925 	adapter->handleq.ev_count = 0;
   1926 	adapter->req.ev_count = 0;
   1927 	adapter->efbig_tx_dma_setup.ev_count = 0;
   1928 	adapter->mbuf_defrag_failed.ev_count = 0;
   1929 	adapter->efbig2_tx_dma_setup.ev_count = 0;
   1930 	adapter->einval_tx_dma_setup.ev_count = 0;
   1931 	adapter->other_tx_dma_setup.ev_count = 0;
   1932 	adapter->eagain_tx_dma_setup.ev_count = 0;
   1933 	adapter->enomem_tx_dma_setup.ev_count = 0;
   1934 	adapter->watchdog_events.ev_count = 0;
   1935 	adapter->tso_err.ev_count = 0;
   1936 	adapter->link_irq.ev_count = 0;
   1937 
   1938 	txr = adapter->tx_rings;
   1939 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1940 		adapter->queues[i].irqs.ev_count = 0;
   1941 		txr->no_desc_avail.ev_count = 0;
   1942 		txr->total_packets.ev_count = 0;
   1943 		txr->tso_tx.ev_count = 0;
   1944 #ifndef IXGBE_LEGACY_TX
   1945 		txr->pcq_drops.ev_count = 0;
   1946 #endif
   1947 
   1948 		if (i < __arraycount(stats->mpc)) {
   1949 			stats->mpc[i].ev_count = 0;
   1950 			if (hw->mac.type == ixgbe_mac_82598EB)
   1951 				stats->rnbc[i].ev_count = 0;
   1952 		}
   1953 		if (i < __arraycount(stats->pxontxc)) {
   1954 			stats->pxontxc[i].ev_count = 0;
   1955 			stats->pxonrxc[i].ev_count = 0;
   1956 			stats->pxofftxc[i].ev_count = 0;
   1957 			stats->pxoffrxc[i].ev_count = 0;
   1958 			stats->pxon2offc[i].ev_count = 0;
   1959 		}
   1960 		if (i < __arraycount(stats->qprc)) {
   1961 			stats->qprc[i].ev_count = 0;
   1962 			stats->qptc[i].ev_count = 0;
   1963 			stats->qbrc[i].ev_count = 0;
   1964 			stats->qbtc[i].ev_count = 0;
   1965 			stats->qprdc[i].ev_count = 0;
   1966 		}
   1967 
   1968 		rxr->rx_packets.ev_count = 0;
   1969 		rxr->rx_bytes.ev_count = 0;
   1970 		rxr->rx_copies.ev_count = 0;
   1971 		rxr->no_jmbuf.ev_count = 0;
   1972 		rxr->rx_discarded.ev_count = 0;
   1973 	}
   1974 	stats->ipcs.ev_count = 0;
   1975 	stats->l4cs.ev_count = 0;
   1976 	stats->ipcs_bad.ev_count = 0;
   1977 	stats->l4cs_bad.ev_count = 0;
   1978 	stats->intzero.ev_count = 0;
   1979 	stats->legint.ev_count = 0;
   1980 	stats->crcerrs.ev_count = 0;
   1981 	stats->illerrc.ev_count = 0;
   1982 	stats->errbc.ev_count = 0;
   1983 	stats->mspdc.ev_count = 0;
   1984 	stats->mbsdc.ev_count = 0;
   1985 	stats->mpctotal.ev_count = 0;
   1986 	stats->mlfc.ev_count = 0;
   1987 	stats->mrfc.ev_count = 0;
   1988 	stats->rlec.ev_count = 0;
   1989 	stats->lxontxc.ev_count = 0;
   1990 	stats->lxonrxc.ev_count = 0;
   1991 	stats->lxofftxc.ev_count = 0;
   1992 	stats->lxoffrxc.ev_count = 0;
   1993 
   1994 	/* Packet Reception Stats */
   1995 	stats->tor.ev_count = 0;
   1996 	stats->gorc.ev_count = 0;
   1997 	stats->tpr.ev_count = 0;
   1998 	stats->gprc.ev_count = 0;
   1999 	stats->mprc.ev_count = 0;
   2000 	stats->bprc.ev_count = 0;
   2001 	stats->prc64.ev_count = 0;
   2002 	stats->prc127.ev_count = 0;
   2003 	stats->prc255.ev_count = 0;
   2004 	stats->prc511.ev_count = 0;
   2005 	stats->prc1023.ev_count = 0;
   2006 	stats->prc1522.ev_count = 0;
   2007 	stats->ruc.ev_count = 0;
   2008 	stats->rfc.ev_count = 0;
   2009 	stats->roc.ev_count = 0;
   2010 	stats->rjc.ev_count = 0;
   2011 	stats->mngprc.ev_count = 0;
   2012 	stats->mngpdc.ev_count = 0;
   2013 	stats->xec.ev_count = 0;
   2014 
   2015 	/* Packet Transmission Stats */
   2016 	stats->gotc.ev_count = 0;
   2017 	stats->tpt.ev_count = 0;
   2018 	stats->gptc.ev_count = 0;
   2019 	stats->bptc.ev_count = 0;
   2020 	stats->mptc.ev_count = 0;
   2021 	stats->mngptc.ev_count = 0;
   2022 	stats->ptc64.ev_count = 0;
   2023 	stats->ptc127.ev_count = 0;
   2024 	stats->ptc255.ev_count = 0;
   2025 	stats->ptc511.ev_count = 0;
   2026 	stats->ptc1023.ev_count = 0;
   2027 	stats->ptc1522.ev_count = 0;
   2028 }
   2029 
   2030 /************************************************************************
   2031  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
   2032  *
   2033  *   Retrieves the TDH value from the hardware
   2034  ************************************************************************/
   2035 static int
   2036 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   2037 {
   2038 	struct sysctlnode node = *rnode;
   2039 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2040 	uint32_t val;
   2041 
   2042 	if (!txr)
   2043 		return (0);
   2044 
   2045 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
   2046 	node.sysctl_data = &val;
   2047 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2048 } /* ixgbe_sysctl_tdh_handler */
   2049 
   2050 /************************************************************************
   2051  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   2052  *
   2053  *   Retrieves the TDT value from the hardware
   2054  ************************************************************************/
   2055 static int
   2056 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   2057 {
   2058 	struct sysctlnode node = *rnode;
   2059 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2060 	uint32_t val;
   2061 
   2062 	if (!txr)
   2063 		return (0);
   2064 
   2065 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
   2066 	node.sysctl_data = &val;
   2067 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2068 } /* ixgbe_sysctl_tdt_handler */
   2069 
   2070 /************************************************************************
   2071  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
   2072  *
   2073  *   Retrieves the RDH value from the hardware
   2074  ************************************************************************/
   2075 static int
   2076 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2077 {
   2078 	struct sysctlnode node = *rnode;
   2079 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2080 	uint32_t val;
   2081 
   2082 	if (!rxr)
   2083 		return (0);
   2084 
   2085 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
   2086 	node.sysctl_data = &val;
   2087 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2088 } /* ixgbe_sysctl_rdh_handler */
   2089 
   2090 /************************************************************************
   2091  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2092  *
   2093  *   Retrieves the RDT value from the hardware
   2094  ************************************************************************/
   2095 static int
   2096 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2097 {
   2098 	struct sysctlnode node = *rnode;
   2099 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2100 	uint32_t val;
   2101 
   2102 	if (!rxr)
   2103 		return (0);
   2104 
   2105 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
   2106 	node.sysctl_data = &val;
   2107 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2108 } /* ixgbe_sysctl_rdt_handler */
   2109 
   2110 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   2111 /************************************************************************
   2112  * ixgbe_register_vlan
   2113  *
   2114  *   Run via vlan config EVENT, it enables us to use the
   2115  *   HW Filter table since we can get the vlan id. This
   2116  *   just creates the entry in the soft version of the
   2117  *   VFTA, init will repopulate the real table.
   2118  ************************************************************************/
   2119 static void
   2120 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2121 {
   2122 	struct adapter	*adapter = ifp->if_softc;
   2123 	u16		index, bit;
   2124 
   2125 	if (ifp->if_softc != arg)   /* Not our event */
   2126 		return;
   2127 
   2128 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2129 		return;
   2130 
   2131 	IXGBE_CORE_LOCK(adapter);
   2132 	index = (vtag >> 5) & 0x7F;
   2133 	bit = vtag & 0x1F;
   2134 	adapter->shadow_vfta[index] |= (1 << bit);
   2135 	ixgbe_setup_vlan_hw_support(adapter);
   2136 	IXGBE_CORE_UNLOCK(adapter);
   2137 } /* ixgbe_register_vlan */
   2138 
   2139 /************************************************************************
   2140  * ixgbe_unregister_vlan
   2141  *
   2142  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
   2143  ************************************************************************/
   2144 static void
   2145 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2146 {
   2147 	struct adapter	*adapter = ifp->if_softc;
   2148 	u16		index, bit;
   2149 
   2150 	if (ifp->if_softc != arg)
   2151 		return;
   2152 
   2153 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2154 		return;
   2155 
   2156 	IXGBE_CORE_LOCK(adapter);
   2157 	index = (vtag >> 5) & 0x7F;
   2158 	bit = vtag & 0x1F;
   2159 	adapter->shadow_vfta[index] &= ~(1 << bit);
   2160 	/* Re-init to load the changes */
   2161 	ixgbe_setup_vlan_hw_support(adapter);
   2162 	IXGBE_CORE_UNLOCK(adapter);
   2163 } /* ixgbe_unregister_vlan */
   2164 #endif
   2165 
   2166 static void
   2167 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   2168 {
   2169 	struct ethercom *ec = &adapter->osdep.ec;
   2170 	struct ixgbe_hw *hw = &adapter->hw;
   2171 	struct rx_ring	*rxr;
   2172 	int             i;
   2173 	u32		ctrl;
   2174 
   2175 
   2176 	/*
   2177 	 * We get here thru init_locked, meaning
   2178 	 * a soft reset, this has already cleared
   2179 	 * the VFTA and other state, so if there
   2180 	 * have been no vlan's registered do nothing.
   2181 	 */
   2182 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   2183 		return;
   2184 
   2185 	/* Setup the queues for vlans */
   2186 	if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
   2187 		for (i = 0; i < adapter->num_queues; i++) {
   2188 			rxr = &adapter->rx_rings[i];
   2189 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
   2190 			if (hw->mac.type != ixgbe_mac_82598EB) {
   2191 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   2192 				ctrl |= IXGBE_RXDCTL_VME;
   2193 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
   2194 			}
   2195 			rxr->vtag_strip = TRUE;
   2196 		}
   2197 	}
   2198 
   2199 	if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
   2200 		return;
   2201 	/*
   2202 	 * A soft reset zero's out the VFTA, so
   2203 	 * we need to repopulate it now.
   2204 	 */
   2205 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2206 		if (adapter->shadow_vfta[i] != 0)
   2207 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
   2208 			    adapter->shadow_vfta[i]);
   2209 
   2210 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2211 	/* Enable the Filter Table if enabled */
   2212 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
   2213 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
   2214 		ctrl |= IXGBE_VLNCTRL_VFE;
   2215 	}
   2216 	if (hw->mac.type == ixgbe_mac_82598EB)
   2217 		ctrl |= IXGBE_VLNCTRL_VME;
   2218 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2219 } /* ixgbe_setup_vlan_hw_support */
   2220 
   2221 /************************************************************************
   2222  * ixgbe_get_slot_info
   2223  *
   2224  *   Get the width and transaction speed of
   2225  *   the slot this adapter is plugged into.
   2226  ************************************************************************/
   2227 static void
   2228 ixgbe_get_slot_info(struct adapter *adapter)
   2229 {
   2230 	device_t		dev = adapter->dev;
   2231 	struct ixgbe_hw		*hw = &adapter->hw;
   2232 	u32                   offset;
   2233 //	struct ixgbe_mac_info	*mac = &hw->mac;
   2234 	u16			link;
   2235 	int                   bus_info_valid = TRUE;
   2236 
   2237 	/* Some devices are behind an internal bridge */
   2238 	switch (hw->device_id) {
   2239 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
   2240 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
   2241 		goto get_parent_info;
   2242 	default:
   2243 		break;
   2244 	}
   2245 
   2246 	ixgbe_get_bus_info(hw);
   2247 
   2248 	/*
   2249 	 * Some devices don't use PCI-E, but there is no need
   2250 	 * to display "Unknown" for bus speed and width.
   2251 	 */
   2252 	switch (hw->mac.type) {
   2253 	case ixgbe_mac_X550EM_x:
   2254 	case ixgbe_mac_X550EM_a:
   2255 		return;
   2256 	default:
   2257 		goto display;
   2258 	}
   2259 
   2260 get_parent_info:
   2261 	/*
   2262 	 * For the Quad port adapter we need to parse back
   2263 	 * up the PCI tree to find the speed of the expansion
   2264 	 * slot into which this adapter is plugged. A bit more work.
   2265 	 */
   2266 	dev = device_parent(device_parent(dev));
   2267 #if 0
   2268 #ifdef IXGBE_DEBUG
   2269 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
   2270 	    pci_get_slot(dev), pci_get_function(dev));
   2271 #endif
   2272 	dev = device_parent(device_parent(dev));
   2273 #ifdef IXGBE_DEBUG
   2274 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
   2275 	    pci_get_slot(dev), pci_get_function(dev));
   2276 #endif
   2277 #endif
   2278 	/* Now get the PCI Express Capabilities offset */
   2279 	if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
   2280 	    PCI_CAP_PCIEXPRESS, &offset, NULL)) {
   2281 		/*
   2282 		 * Hmm...can't get PCI-Express capabilities.
   2283 		 * Falling back to default method.
   2284 		 */
   2285 		bus_info_valid = FALSE;
   2286 		ixgbe_get_bus_info(hw);
   2287 		goto display;
   2288 	}
   2289 	/* ...and read the Link Status Register */
   2290 	link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
   2291 	    offset + PCIE_LCSR);
   2292 	ixgbe_set_pci_config_data_generic(hw, link >> 16);
   2293 
   2294 display:
   2295 	device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
   2296 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s" :
   2297 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s" :
   2298 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s" :
   2299 	     "Unknown"),
   2300 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
   2301 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
   2302 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
   2303 	     "Unknown"));
   2304 
   2305 	if (bus_info_valid) {
   2306 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2307 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
   2308 			(hw->bus.speed == ixgbe_bus_speed_2500))) {
   2309 			device_printf(dev, "PCI-Express bandwidth available"
   2310 			    " for this card\n     is not sufficient for"
   2311 			    " optimal performance.\n");
   2312 			device_printf(dev, "For optimal performance a x8 "
   2313 			    "PCIE, or x4 PCIE Gen2 slot is required.\n");
   2314 		}
   2315 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2316 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
   2317 			(hw->bus.speed < ixgbe_bus_speed_8000))) {
   2318 			device_printf(dev, "PCI-Express bandwidth available"
   2319 			    " for this card\n     is not sufficient for"
   2320 			    " optimal performance.\n");
   2321 			device_printf(dev, "For optimal performance a x8 "
   2322 			    "PCIE Gen3 slot is required.\n");
   2323 		}
   2324 	} else
   2325 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
   2326 
   2327 	return;
   2328 } /* ixgbe_get_slot_info */
   2329 
   2330 /************************************************************************
   2331  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
   2332  ************************************************************************/
   2333 static inline void
   2334 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   2335 {
   2336 	struct ixgbe_hw *hw = &adapter->hw;
   2337 	u64             queue = (u64)(1ULL << vector);
   2338 	u32             mask;
   2339 
   2340 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2341 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2342 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   2343 	} else {
   2344 		mask = (queue & 0xFFFFFFFF);
   2345 		if (mask)
   2346 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   2347 		mask = (queue >> 32);
   2348 		if (mask)
   2349 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   2350 	}
   2351 } /* ixgbe_enable_queue */
   2352 
   2353 /************************************************************************
   2354  * ixgbe_disable_queue
   2355  ************************************************************************/
   2356 static inline void
   2357 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   2358 {
   2359 	struct ixgbe_hw *hw = &adapter->hw;
   2360 	u64             queue = (u64)(1ULL << vector);
   2361 	u32             mask;
   2362 
   2363 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2364 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2365 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   2366 	} else {
   2367 		mask = (queue & 0xFFFFFFFF);
   2368 		if (mask)
   2369 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   2370 		mask = (queue >> 32);
   2371 		if (mask)
   2372 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   2373 	}
   2374 } /* ixgbe_disable_queue */
   2375 
   2376 /************************************************************************
   2377  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
   2378  ************************************************************************/
   2379 static int
   2380 ixgbe_msix_que(void *arg)
   2381 {
   2382 	struct ix_queue	*que = arg;
   2383 	struct adapter  *adapter = que->adapter;
   2384 	struct ifnet    *ifp = adapter->ifp;
   2385 	struct tx_ring	*txr = que->txr;
   2386 	struct rx_ring	*rxr = que->rxr;
   2387 	bool		more;
   2388 	u32		newitr = 0;
   2389 
   2390 	/* Protect against spurious interrupts */
   2391 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   2392 		return 0;
   2393 
   2394 	ixgbe_disable_queue(adapter, que->msix);
   2395 	++que->irqs.ev_count;
   2396 
   2397 #ifdef __NetBSD__
   2398 	/* Don't run ixgbe_rxeof in interrupt context */
   2399 	more = true;
   2400 #else
   2401 	more = ixgbe_rxeof(que);
   2402 #endif
   2403 
   2404 	IXGBE_TX_LOCK(txr);
   2405 	ixgbe_txeof(txr);
   2406 	IXGBE_TX_UNLOCK(txr);
   2407 
   2408 	/* Do AIM now? */
   2409 
   2410 	if (adapter->enable_aim == false)
   2411 		goto no_calc;
   2412 	/*
   2413 	 * Do Adaptive Interrupt Moderation:
   2414 	 *  - Write out last calculated setting
   2415 	 *  - Calculate based on average size over
   2416 	 *    the last interval.
   2417 	 */
   2418 	if (que->eitr_setting)
   2419 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
   2420 		    que->eitr_setting);
   2421 
   2422 	que->eitr_setting = 0;
   2423 
   2424 	/* Idle, do nothing */
   2425         if ((txr->bytes == 0) && (rxr->bytes == 0))
   2426                 goto no_calc;
   2427 
   2428 	if ((txr->bytes) && (txr->packets))
   2429 		newitr = txr->bytes/txr->packets;
   2430 	if ((rxr->bytes) && (rxr->packets))
   2431 		newitr = max(newitr, (rxr->bytes / rxr->packets));
   2432 	newitr += 24; /* account for hardware frame, crc */
   2433 
   2434 	/* set an upper boundary */
   2435 	newitr = min(newitr, 3000);
   2436 
   2437 	/* Be nice to the mid range */
   2438 	if ((newitr > 300) && (newitr < 1200))
   2439 		newitr = (newitr / 3);
   2440 	else
   2441 		newitr = (newitr / 2);
   2442 
   2443         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   2444                 newitr |= newitr << 16;
   2445         else
   2446                 newitr |= IXGBE_EITR_CNT_WDIS;
   2447 
   2448         /* save for next interrupt */
   2449         que->eitr_setting = newitr;
   2450 
   2451 	/* Reset state */
   2452 	txr->bytes = 0;
   2453 	txr->packets = 0;
   2454 	rxr->bytes = 0;
   2455 	rxr->packets = 0;
   2456 
   2457 no_calc:
   2458 	if (more)
   2459 		softint_schedule(que->que_si);
   2460 	else
   2461 		ixgbe_enable_queue(adapter, que->msix);
   2462 
   2463 	return 1;
   2464 } /* ixgbe_msix_que */
   2465 
   2466 /************************************************************************
   2467  * ixgbe_media_status - Media Ioctl callback
   2468  *
   2469  *   Called whenever the user queries the status of
   2470  *   the interface using ifconfig.
   2471  ************************************************************************/
   2472 static void
   2473 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   2474 {
   2475 	struct adapter *adapter = ifp->if_softc;
   2476 	struct ixgbe_hw *hw = &adapter->hw;
   2477 	int layer;
   2478 
   2479 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   2480 	IXGBE_CORE_LOCK(adapter);
   2481 	ixgbe_update_link_status(adapter);
   2482 
   2483 	ifmr->ifm_status = IFM_AVALID;
   2484 	ifmr->ifm_active = IFM_ETHER;
   2485 
   2486 	if (!adapter->link_active) {
   2487 		ifmr->ifm_active |= IFM_NONE;
   2488 		IXGBE_CORE_UNLOCK(adapter);
   2489 		return;
   2490 	}
   2491 
   2492 	ifmr->ifm_status |= IFM_ACTIVE;
   2493 	layer = adapter->phy_layer;
   2494 
   2495 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
   2496 	    layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
   2497 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
   2498 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
   2499 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
   2500 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
   2501 		switch (adapter->link_speed) {
   2502 		case IXGBE_LINK_SPEED_10GB_FULL:
   2503 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   2504 			break;
   2505 		case IXGBE_LINK_SPEED_5GB_FULL:
   2506 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   2507 			break;
   2508 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2509 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   2510 			break;
   2511 		case IXGBE_LINK_SPEED_1GB_FULL:
   2512 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   2513 			break;
   2514 		case IXGBE_LINK_SPEED_100_FULL:
   2515 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   2516 			break;
   2517 		case IXGBE_LINK_SPEED_10_FULL:
   2518 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   2519 			break;
   2520 		}
   2521 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   2522 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
   2523 		switch (adapter->link_speed) {
   2524 		case IXGBE_LINK_SPEED_10GB_FULL:
   2525 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
   2526 			break;
   2527 		}
   2528 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
   2529 		switch (adapter->link_speed) {
   2530 		case IXGBE_LINK_SPEED_10GB_FULL:
   2531 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
   2532 			break;
   2533 		case IXGBE_LINK_SPEED_1GB_FULL:
   2534 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2535 			break;
   2536 		}
   2537 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
   2538 		switch (adapter->link_speed) {
   2539 		case IXGBE_LINK_SPEED_10GB_FULL:
   2540 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
   2541 			break;
   2542 		case IXGBE_LINK_SPEED_1GB_FULL:
   2543 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2544 			break;
   2545 		}
   2546 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
   2547 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
   2548 		switch (adapter->link_speed) {
   2549 		case IXGBE_LINK_SPEED_10GB_FULL:
   2550 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2551 			break;
   2552 		case IXGBE_LINK_SPEED_1GB_FULL:
   2553 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
   2554 			break;
   2555 		}
   2556 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
   2557 		switch (adapter->link_speed) {
   2558 		case IXGBE_LINK_SPEED_10GB_FULL:
   2559 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2560 			break;
   2561 		}
   2562 	/*
   2563 	 * XXX: These need to use the proper media types once
   2564 	 * they're added.
   2565 	 */
   2566 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
   2567 		switch (adapter->link_speed) {
   2568 		case IXGBE_LINK_SPEED_10GB_FULL:
   2569 #ifndef IFM_ETH_XTYPE
   2570 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2571 #else
   2572 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
   2573 #endif
   2574 			break;
   2575 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2576 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2577 			break;
   2578 		case IXGBE_LINK_SPEED_1GB_FULL:
   2579 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2580 			break;
   2581 		}
   2582 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
   2583 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
   2584 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
   2585 		switch (adapter->link_speed) {
   2586 		case IXGBE_LINK_SPEED_10GB_FULL:
   2587 #ifndef IFM_ETH_XTYPE
   2588 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2589 #else
   2590 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
   2591 #endif
   2592 			break;
   2593 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2594 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2595 			break;
   2596 		case IXGBE_LINK_SPEED_1GB_FULL:
   2597 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2598 			break;
   2599 		}
   2600 
   2601 	/* If nothing is recognized... */
   2602 #if 0
   2603 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
   2604 		ifmr->ifm_active |= IFM_UNKNOWN;
   2605 #endif
   2606 
   2607 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   2608 
   2609 	/* Display current flow control setting used on link */
   2610 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
   2611 	    hw->fc.current_mode == ixgbe_fc_full)
   2612 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
   2613 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
   2614 	    hw->fc.current_mode == ixgbe_fc_full)
   2615 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
   2616 
   2617 	IXGBE_CORE_UNLOCK(adapter);
   2618 
   2619 	return;
   2620 } /* ixgbe_media_status */
   2621 
   2622 /************************************************************************
   2623  * ixgbe_media_change - Media Ioctl callback
   2624  *
   2625  *   Called when the user changes speed/duplex using
   2626  *   media/mediopt option with ifconfig.
   2627  ************************************************************************/
   2628 static int
   2629 ixgbe_media_change(struct ifnet *ifp)
   2630 {
   2631 	struct adapter   *adapter = ifp->if_softc;
   2632 	struct ifmedia   *ifm = &adapter->media;
   2633 	struct ixgbe_hw  *hw = &adapter->hw;
   2634 	ixgbe_link_speed speed = 0;
   2635 	ixgbe_link_speed link_caps = 0;
   2636 	bool negotiate = false;
   2637 	s32 err = IXGBE_NOT_IMPLEMENTED;
   2638 
   2639 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   2640 
   2641 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   2642 		return (EINVAL);
   2643 
   2644 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   2645 		return (ENODEV);
   2646 
   2647 	/*
   2648 	 * We don't actually need to check against the supported
   2649 	 * media types of the adapter; ifmedia will take care of
   2650 	 * that for us.
   2651 	 */
   2652 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   2653 	case IFM_AUTO:
   2654 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   2655 		    &negotiate);
   2656 		if (err != IXGBE_SUCCESS) {
   2657 			device_printf(adapter->dev, "Unable to determine "
   2658 			    "supported advertise speeds\n");
   2659 			return (ENODEV);
   2660 		}
   2661 		speed |= link_caps;
   2662 		break;
   2663 	case IFM_10G_T:
   2664 	case IFM_10G_LRM:
   2665 	case IFM_10G_LR:
   2666 	case IFM_10G_TWINAX:
   2667 #ifndef IFM_ETH_XTYPE
   2668 	case IFM_10G_SR: /* KR, too */
   2669 	case IFM_10G_CX4: /* KX4 */
   2670 #else
   2671 	case IFM_10G_KR:
   2672 	case IFM_10G_KX4:
   2673 #endif
   2674 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   2675 		break;
   2676 	case IFM_5000_T:
   2677 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   2678 		break;
   2679 	case IFM_2500_T:
   2680 	case IFM_2500_KX:
   2681 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   2682 		break;
   2683 	case IFM_1000_T:
   2684 	case IFM_1000_LX:
   2685 	case IFM_1000_SX:
   2686 	case IFM_1000_KX:
   2687 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   2688 		break;
   2689 	case IFM_100_TX:
   2690 		speed |= IXGBE_LINK_SPEED_100_FULL;
   2691 		break;
   2692 	case IFM_10_T:
   2693 		speed |= IXGBE_LINK_SPEED_10_FULL;
   2694 		break;
   2695 	default:
   2696 		goto invalid;
   2697 	}
   2698 
   2699 	hw->mac.autotry_restart = TRUE;
   2700 	hw->mac.ops.setup_link(hw, speed, TRUE);
   2701 	adapter->advertise = 0;
   2702 	if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
   2703 		if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
   2704 			adapter->advertise |= 1 << 2;
   2705 		if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
   2706 			adapter->advertise |= 1 << 1;
   2707 		if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
   2708 			adapter->advertise |= 1 << 0;
   2709 		if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
   2710 			adapter->advertise |= 1 << 3;
   2711 		if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
   2712 			adapter->advertise |= 1 << 4;
   2713 		if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
   2714 			adapter->advertise |= 1 << 5;
   2715 	}
   2716 
   2717 	return (0);
   2718 
   2719 invalid:
   2720 	device_printf(adapter->dev, "Invalid media type!\n");
   2721 
   2722 	return (EINVAL);
   2723 } /* ixgbe_media_change */
   2724 
   2725 /************************************************************************
   2726  * ixgbe_set_promisc
   2727  ************************************************************************/
   2728 static void
   2729 ixgbe_set_promisc(struct adapter *adapter)
   2730 {
   2731 	struct ifnet *ifp = adapter->ifp;
   2732 	int          mcnt = 0;
   2733 	u32          rctl;
   2734 	struct ether_multi *enm;
   2735 	struct ether_multistep step;
   2736 	struct ethercom *ec = &adapter->osdep.ec;
   2737 
   2738 	KASSERT(mutex_owned(&adapter->core_mtx));
   2739 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   2740 	rctl &= (~IXGBE_FCTRL_UPE);
   2741 	if (ifp->if_flags & IFF_ALLMULTI)
   2742 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
   2743 	else {
   2744 		ETHER_LOCK(ec);
   2745 		ETHER_FIRST_MULTI(step, ec, enm);
   2746 		while (enm != NULL) {
   2747 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
   2748 				break;
   2749 			mcnt++;
   2750 			ETHER_NEXT_MULTI(step, enm);
   2751 		}
   2752 		ETHER_UNLOCK(ec);
   2753 	}
   2754 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
   2755 		rctl &= (~IXGBE_FCTRL_MPE);
   2756 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2757 
   2758 	if (ifp->if_flags & IFF_PROMISC) {
   2759 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   2760 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2761 	} else if (ifp->if_flags & IFF_ALLMULTI) {
   2762 		rctl |= IXGBE_FCTRL_MPE;
   2763 		rctl &= ~IXGBE_FCTRL_UPE;
   2764 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2765 	}
   2766 } /* ixgbe_set_promisc */
   2767 
   2768 /************************************************************************
   2769  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
   2770  ************************************************************************/
   2771 static int
   2772 ixgbe_msix_link(void *arg)
   2773 {
   2774 	struct adapter	*adapter = arg;
   2775 	struct ixgbe_hw *hw = &adapter->hw;
   2776 	u32		eicr, eicr_mask;
   2777 	s32             retval;
   2778 
   2779 	++adapter->link_irq.ev_count;
   2780 
   2781 	/* Pause other interrupts */
   2782 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
   2783 
   2784 	/* First get the cause */
   2785 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   2786 	/* Be sure the queue bits are not cleared */
   2787 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
   2788 	/* Clear interrupt with write */
   2789 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
   2790 
   2791 	/* Link status change */
   2792 	if (eicr & IXGBE_EICR_LSC) {
   2793 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
   2794 		softint_schedule(adapter->link_si);
   2795 	}
   2796 
   2797 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   2798 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
   2799 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
   2800 			/* This is probably overkill :) */
   2801 			if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
   2802 				return 1;
   2803 			/* Disable the interrupt */
   2804 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
   2805 			softint_schedule(adapter->fdir_si);
   2806 		}
   2807 
   2808 		if (eicr & IXGBE_EICR_ECC) {
   2809 			device_printf(adapter->dev,
   2810 			    "CRITICAL: ECC ERROR!! Please Reboot!!\n");
   2811 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   2812 		}
   2813 
   2814 		/* Check for over temp condition */
   2815 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
   2816 			switch (adapter->hw.mac.type) {
   2817 			case ixgbe_mac_X550EM_a:
   2818 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
   2819 					break;
   2820 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
   2821 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2822 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2823 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2824 				retval = hw->phy.ops.check_overtemp(hw);
   2825 				if (retval != IXGBE_ERR_OVERTEMP)
   2826 					break;
   2827 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2828 				device_printf(adapter->dev, "System shutdown required!\n");
   2829 				break;
   2830 			default:
   2831 				if (!(eicr & IXGBE_EICR_TS))
   2832 					break;
   2833 				retval = hw->phy.ops.check_overtemp(hw);
   2834 				if (retval != IXGBE_ERR_OVERTEMP)
   2835 					break;
   2836 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2837 				device_printf(adapter->dev, "System shutdown required!\n");
   2838 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
   2839 				break;
   2840 			}
   2841 		}
   2842 
   2843 		/* Check for VF message */
   2844 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
   2845 		    (eicr & IXGBE_EICR_MAILBOX))
   2846 			softint_schedule(adapter->mbx_si);
   2847 	}
   2848 
   2849 	if (ixgbe_is_sfp(hw)) {
   2850 		/* Pluggable optics-related interrupt */
   2851 		if (hw->mac.type >= ixgbe_mac_X540)
   2852 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   2853 		else
   2854 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   2855 
   2856 		if (eicr & eicr_mask) {
   2857 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   2858 			softint_schedule(adapter->mod_si);
   2859 		}
   2860 
   2861 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   2862 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   2863 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2864 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2865 			softint_schedule(adapter->msf_si);
   2866 		}
   2867 	}
   2868 
   2869 	/* Check for fan failure */
   2870 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   2871 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
   2872 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2873 	}
   2874 
   2875 	/* External PHY interrupt */
   2876 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   2877 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
   2878 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
   2879 		softint_schedule(adapter->phy_si);
   2880  	}
   2881 
   2882 	/* Re-enable other interrupts */
   2883 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   2884 	return 1;
   2885 } /* ixgbe_msix_link */
   2886 
   2887 /************************************************************************
   2888  * ixgbe_sysctl_interrupt_rate_handler
   2889  ************************************************************************/
   2890 static int
   2891 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   2892 {
   2893 	struct sysctlnode node = *rnode;
   2894 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   2895 	uint32_t reg, usec, rate;
   2896 	int error;
   2897 
   2898 	if (que == NULL)
   2899 		return 0;
   2900 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
   2901 	usec = ((reg & 0x0FF8) >> 3);
   2902 	if (usec > 0)
   2903 		rate = 500000 / usec;
   2904 	else
   2905 		rate = 0;
   2906 	node.sysctl_data = &rate;
   2907 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2908 	if (error || newp == NULL)
   2909 		return error;
   2910 	reg &= ~0xfff; /* default, no limitation */
   2911 	ixgbe_max_interrupt_rate = 0;
   2912 	if (rate > 0 && rate < 500000) {
   2913 		if (rate < 1000)
   2914 			rate = 1000;
   2915 		ixgbe_max_interrupt_rate = rate;
   2916 		reg |= ((4000000/rate) & 0xff8);
   2917 	}
   2918 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
   2919 
   2920 	return (0);
   2921 } /* ixgbe_sysctl_interrupt_rate_handler */
   2922 
   2923 const struct sysctlnode *
   2924 ixgbe_sysctl_instance(struct adapter *adapter)
   2925 {
   2926 	const char *dvname;
   2927 	struct sysctllog **log;
   2928 	int rc;
   2929 	const struct sysctlnode *rnode;
   2930 
   2931 	if (adapter->sysctltop != NULL)
   2932 		return adapter->sysctltop;
   2933 
   2934 	log = &adapter->sysctllog;
   2935 	dvname = device_xname(adapter->dev);
   2936 
   2937 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2938 	    0, CTLTYPE_NODE, dvname,
   2939 	    SYSCTL_DESCR("ixgbe information and settings"),
   2940 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2941 		goto err;
   2942 
   2943 	return rnode;
   2944 err:
   2945 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2946 	return NULL;
   2947 }
   2948 
   2949 /************************************************************************
   2950  * ixgbe_add_device_sysctls
   2951  ************************************************************************/
   2952 static void
   2953 ixgbe_add_device_sysctls(struct adapter *adapter)
   2954 {
   2955 	device_t               dev = adapter->dev;
   2956 	struct ixgbe_hw        *hw = &adapter->hw;
   2957 	struct sysctllog **log;
   2958 	const struct sysctlnode *rnode, *cnode;
   2959 
   2960 	log = &adapter->sysctllog;
   2961 
   2962 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   2963 		aprint_error_dev(dev, "could not create sysctl root\n");
   2964 		return;
   2965 	}
   2966 
   2967 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2968 	    CTLFLAG_READONLY, CTLTYPE_INT,
   2969 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
   2970 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   2971 		aprint_error_dev(dev, "could not create sysctl\n");
   2972 
   2973 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2974 	    CTLFLAG_READONLY, CTLTYPE_INT,
   2975 	    "num_queues", SYSCTL_DESCR("Number of queues"),
   2976 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
   2977 		aprint_error_dev(dev, "could not create sysctl\n");
   2978 
   2979 	/* Sysctls for all devices */
   2980 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   2981 	    CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
   2982 	    ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
   2983 	    CTL_EOL) != 0)
   2984 		aprint_error_dev(dev, "could not create sysctl\n");
   2985 
   2986 	adapter->enable_aim = ixgbe_enable_aim;
   2987 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   2988 	    CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2989 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2990 		aprint_error_dev(dev, "could not create sysctl\n");
   2991 
   2992 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2993 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2994 	    "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
   2995 	    ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
   2996 	    CTL_EOL) != 0)
   2997 		aprint_error_dev(dev, "could not create sysctl\n");
   2998 
   2999 #ifdef IXGBE_DEBUG
   3000 	/* testing sysctls (for all devices) */
   3001 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3002 	    CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
   3003 	    ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
   3004 	    CTL_EOL) != 0)
   3005 		aprint_error_dev(dev, "could not create sysctl\n");
   3006 
   3007 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
   3008 	    CTLTYPE_STRING, "print_rss_config",
   3009 	    SYSCTL_DESCR("Prints RSS Configuration"),
   3010 	    ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
   3011 	    CTL_EOL) != 0)
   3012 		aprint_error_dev(dev, "could not create sysctl\n");
   3013 #endif
   3014 	/* for X550 series devices */
   3015 	if (hw->mac.type >= ixgbe_mac_X550)
   3016 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3017 		    CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
   3018 		    ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
   3019 		    CTL_EOL) != 0)
   3020 			aprint_error_dev(dev, "could not create sysctl\n");
   3021 
   3022 	/* for WoL-capable devices */
   3023 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3024 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3025 		    CTLTYPE_BOOL, "wol_enable",
   3026 		    SYSCTL_DESCR("Enable/Disable Wake on LAN"),
   3027 		    ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
   3028 		    CTL_EOL) != 0)
   3029 			aprint_error_dev(dev, "could not create sysctl\n");
   3030 
   3031 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3032 		    CTLTYPE_INT, "wufc",
   3033 		    SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
   3034 		    ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
   3035 		    CTL_EOL) != 0)
   3036 			aprint_error_dev(dev, "could not create sysctl\n");
   3037 	}
   3038 
   3039 	/* for X552/X557-AT devices */
   3040 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3041 		const struct sysctlnode *phy_node;
   3042 
   3043 		if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
   3044 		    "phy", SYSCTL_DESCR("External PHY sysctls"),
   3045 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
   3046 			aprint_error_dev(dev, "could not create sysctl\n");
   3047 			return;
   3048 		}
   3049 
   3050 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3051 		    CTLTYPE_INT, "temp",
   3052 		    SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
   3053 		    ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
   3054 		    CTL_EOL) != 0)
   3055 			aprint_error_dev(dev, "could not create sysctl\n");
   3056 
   3057 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3058 		    CTLTYPE_INT, "overtemp_occurred",
   3059 		    SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
   3060 		    ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
   3061 		    CTL_CREATE, CTL_EOL) != 0)
   3062 			aprint_error_dev(dev, "could not create sysctl\n");
   3063 	}
   3064 
   3065 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
   3066 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3067 		    CTLTYPE_INT, "eee_state",
   3068 		    SYSCTL_DESCR("EEE Power Save State"),
   3069 		    ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
   3070 		    CTL_EOL) != 0)
   3071 			aprint_error_dev(dev, "could not create sysctl\n");
   3072 	}
   3073 } /* ixgbe_add_device_sysctls */
   3074 
   3075 /************************************************************************
   3076  * ixgbe_allocate_pci_resources
   3077  ************************************************************************/
   3078 static int
   3079 ixgbe_allocate_pci_resources(struct adapter *adapter,
   3080     const struct pci_attach_args *pa)
   3081 {
   3082 	pcireg_t	memtype;
   3083 	device_t dev = adapter->dev;
   3084 	bus_addr_t addr;
   3085 	int flags;
   3086 
   3087 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   3088 	switch (memtype) {
   3089 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   3090 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   3091 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   3092 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   3093 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   3094 			goto map_err;
   3095 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   3096 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   3097 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3098 		}
   3099 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   3100 		     adapter->osdep.mem_size, flags,
   3101 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   3102 map_err:
   3103 			adapter->osdep.mem_size = 0;
   3104 			aprint_error_dev(dev, "unable to map BAR0\n");
   3105 			return ENXIO;
   3106 		}
   3107 		break;
   3108 	default:
   3109 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   3110 		return ENXIO;
   3111 	}
   3112 
   3113 	return (0);
   3114 } /* ixgbe_allocate_pci_resources */
   3115 
   3116 /************************************************************************
   3117  * ixgbe_detach - Device removal routine
   3118  *
   3119  *   Called when the driver is being removed.
   3120  *   Stops the adapter and deallocates all the resources
   3121  *   that were allocated for driver operation.
   3122  *
   3123  *   return 0 on success, positive on failure
   3124  ************************************************************************/
   3125 static int
   3126 ixgbe_detach(device_t dev, int flags)
   3127 {
   3128 	struct adapter *adapter = device_private(dev);
   3129 	struct ix_queue *que = adapter->queues;
   3130 	struct rx_ring *rxr = adapter->rx_rings;
   3131 	struct tx_ring *txr = adapter->tx_rings;
   3132 	struct ixgbe_hw *hw = &adapter->hw;
   3133 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   3134 	u32	ctrl_ext;
   3135 
   3136 	INIT_DEBUGOUT("ixgbe_detach: begin");
   3137 	if (adapter->osdep.attached == false)
   3138 		return 0;
   3139 
   3140 	if (ixgbe_pci_iov_detach(dev) != 0) {
   3141 		device_printf(dev, "SR-IOV in use; detach first.\n");
   3142 		return (EBUSY);
   3143 	}
   3144 
   3145 	/* Stop the interface. Callouts are stopped in it. */
   3146 	ixgbe_ifstop(adapter->ifp, 1);
   3147 #if NVLAN > 0
   3148 	/* Make sure VLANs are not using driver */
   3149 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   3150 		;	/* nothing to do: no VLANs */
   3151 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
   3152 		vlan_ifdetach(adapter->ifp);
   3153 	else {
   3154 		aprint_error_dev(dev, "VLANs in use, detach first\n");
   3155 		return (EBUSY);
   3156 	}
   3157 #endif
   3158 
   3159 	pmf_device_deregister(dev);
   3160 
   3161 	ether_ifdetach(adapter->ifp);
   3162 	/* Stop the adapter */
   3163 	IXGBE_CORE_LOCK(adapter);
   3164 	ixgbe_setup_low_power_mode(adapter);
   3165 	IXGBE_CORE_UNLOCK(adapter);
   3166 
   3167 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
   3168 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   3169 			softint_disestablish(txr->txr_si);
   3170 		softint_disestablish(que->que_si);
   3171 	}
   3172 
   3173 	/* Drain the Link queue */
   3174 	softint_disestablish(adapter->link_si);
   3175 	softint_disestablish(adapter->mod_si);
   3176 	softint_disestablish(adapter->msf_si);
   3177 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   3178 		softint_disestablish(adapter->mbx_si);
   3179 	softint_disestablish(adapter->phy_si);
   3180 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   3181 		softint_disestablish(adapter->fdir_si);
   3182 
   3183 	/* let hardware know driver is unloading */
   3184 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   3185 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   3186 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   3187 
   3188 	callout_halt(&adapter->timer, NULL);
   3189 
   3190 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   3191 		netmap_detach(adapter->ifp);
   3192 
   3193 	ixgbe_free_pci_resources(adapter);
   3194 #if 0	/* XXX the NetBSD port is probably missing something here */
   3195 	bus_generic_detach(dev);
   3196 #endif
   3197 	if_detach(adapter->ifp);
   3198 	if_percpuq_destroy(adapter->ipq);
   3199 
   3200 	sysctl_teardown(&adapter->sysctllog);
   3201 	evcnt_detach(&adapter->handleq);
   3202 	evcnt_detach(&adapter->req);
   3203 	evcnt_detach(&adapter->efbig_tx_dma_setup);
   3204 	evcnt_detach(&adapter->mbuf_defrag_failed);
   3205 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
   3206 	evcnt_detach(&adapter->einval_tx_dma_setup);
   3207 	evcnt_detach(&adapter->other_tx_dma_setup);
   3208 	evcnt_detach(&adapter->eagain_tx_dma_setup);
   3209 	evcnt_detach(&adapter->enomem_tx_dma_setup);
   3210 	evcnt_detach(&adapter->watchdog_events);
   3211 	evcnt_detach(&adapter->tso_err);
   3212 	evcnt_detach(&adapter->link_irq);
   3213 
   3214 	txr = adapter->tx_rings;
   3215 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   3216 		evcnt_detach(&adapter->queues[i].irqs);
   3217 		evcnt_detach(&txr->no_desc_avail);
   3218 		evcnt_detach(&txr->total_packets);
   3219 		evcnt_detach(&txr->tso_tx);
   3220 #ifndef IXGBE_LEGACY_TX
   3221 		evcnt_detach(&txr->pcq_drops);
   3222 #endif
   3223 
   3224 		if (i < __arraycount(stats->mpc)) {
   3225 			evcnt_detach(&stats->mpc[i]);
   3226 			if (hw->mac.type == ixgbe_mac_82598EB)
   3227 				evcnt_detach(&stats->rnbc[i]);
   3228 		}
   3229 		if (i < __arraycount(stats->pxontxc)) {
   3230 			evcnt_detach(&stats->pxontxc[i]);
   3231 			evcnt_detach(&stats->pxonrxc[i]);
   3232 			evcnt_detach(&stats->pxofftxc[i]);
   3233 			evcnt_detach(&stats->pxoffrxc[i]);
   3234 			evcnt_detach(&stats->pxon2offc[i]);
   3235 		}
   3236 		if (i < __arraycount(stats->qprc)) {
   3237 			evcnt_detach(&stats->qprc[i]);
   3238 			evcnt_detach(&stats->qptc[i]);
   3239 			evcnt_detach(&stats->qbrc[i]);
   3240 			evcnt_detach(&stats->qbtc[i]);
   3241 			evcnt_detach(&stats->qprdc[i]);
   3242 		}
   3243 
   3244 		evcnt_detach(&rxr->rx_packets);
   3245 		evcnt_detach(&rxr->rx_bytes);
   3246 		evcnt_detach(&rxr->rx_copies);
   3247 		evcnt_detach(&rxr->no_jmbuf);
   3248 		evcnt_detach(&rxr->rx_discarded);
   3249 	}
   3250 	evcnt_detach(&stats->ipcs);
   3251 	evcnt_detach(&stats->l4cs);
   3252 	evcnt_detach(&stats->ipcs_bad);
   3253 	evcnt_detach(&stats->l4cs_bad);
   3254 	evcnt_detach(&stats->intzero);
   3255 	evcnt_detach(&stats->legint);
   3256 	evcnt_detach(&stats->crcerrs);
   3257 	evcnt_detach(&stats->illerrc);
   3258 	evcnt_detach(&stats->errbc);
   3259 	evcnt_detach(&stats->mspdc);
   3260 	if (hw->mac.type >= ixgbe_mac_X550)
   3261 		evcnt_detach(&stats->mbsdc);
   3262 	evcnt_detach(&stats->mpctotal);
   3263 	evcnt_detach(&stats->mlfc);
   3264 	evcnt_detach(&stats->mrfc);
   3265 	evcnt_detach(&stats->rlec);
   3266 	evcnt_detach(&stats->lxontxc);
   3267 	evcnt_detach(&stats->lxonrxc);
   3268 	evcnt_detach(&stats->lxofftxc);
   3269 	evcnt_detach(&stats->lxoffrxc);
   3270 
   3271 	/* Packet Reception Stats */
   3272 	evcnt_detach(&stats->tor);
   3273 	evcnt_detach(&stats->gorc);
   3274 	evcnt_detach(&stats->tpr);
   3275 	evcnt_detach(&stats->gprc);
   3276 	evcnt_detach(&stats->mprc);
   3277 	evcnt_detach(&stats->bprc);
   3278 	evcnt_detach(&stats->prc64);
   3279 	evcnt_detach(&stats->prc127);
   3280 	evcnt_detach(&stats->prc255);
   3281 	evcnt_detach(&stats->prc511);
   3282 	evcnt_detach(&stats->prc1023);
   3283 	evcnt_detach(&stats->prc1522);
   3284 	evcnt_detach(&stats->ruc);
   3285 	evcnt_detach(&stats->rfc);
   3286 	evcnt_detach(&stats->roc);
   3287 	evcnt_detach(&stats->rjc);
   3288 	evcnt_detach(&stats->mngprc);
   3289 	evcnt_detach(&stats->mngpdc);
   3290 	evcnt_detach(&stats->xec);
   3291 
   3292 	/* Packet Transmission Stats */
   3293 	evcnt_detach(&stats->gotc);
   3294 	evcnt_detach(&stats->tpt);
   3295 	evcnt_detach(&stats->gptc);
   3296 	evcnt_detach(&stats->bptc);
   3297 	evcnt_detach(&stats->mptc);
   3298 	evcnt_detach(&stats->mngptc);
   3299 	evcnt_detach(&stats->ptc64);
   3300 	evcnt_detach(&stats->ptc127);
   3301 	evcnt_detach(&stats->ptc255);
   3302 	evcnt_detach(&stats->ptc511);
   3303 	evcnt_detach(&stats->ptc1023);
   3304 	evcnt_detach(&stats->ptc1522);
   3305 
   3306 	ixgbe_free_transmit_structures(adapter);
   3307 	ixgbe_free_receive_structures(adapter);
   3308 	free(adapter->queues, M_DEVBUF);
   3309 	free(adapter->mta, M_DEVBUF);
   3310 
   3311 	IXGBE_CORE_LOCK_DESTROY(adapter);
   3312 
   3313 	return (0);
   3314 } /* ixgbe_detach */
   3315 
   3316 /************************************************************************
   3317  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
   3318  *
   3319  *   Prepare the adapter/port for LPLU and/or WoL
   3320  ************************************************************************/
   3321 static int
   3322 ixgbe_setup_low_power_mode(struct adapter *adapter)
   3323 {
   3324 	struct ixgbe_hw *hw = &adapter->hw;
   3325 	device_t        dev = adapter->dev;
   3326 	s32             error = 0;
   3327 
   3328 	KASSERT(mutex_owned(&adapter->core_mtx));
   3329 
   3330 	/* Limit power management flow to X550EM baseT */
   3331 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
   3332 	    hw->phy.ops.enter_lplu) {
   3333 		/* X550EM baseT adapters need a special LPLU flow */
   3334 		hw->phy.reset_disable = true;
   3335 		ixgbe_stop(adapter);
   3336 		error = hw->phy.ops.enter_lplu(hw);
   3337 		if (error)
   3338 			device_printf(dev,
   3339 			    "Error entering LPLU: %d\n", error);
   3340 		hw->phy.reset_disable = false;
   3341 	} else {
   3342 		/* Just stop for other adapters */
   3343 		ixgbe_stop(adapter);
   3344 	}
   3345 
   3346 	if (!hw->wol_enabled) {
   3347 		ixgbe_set_phy_power(hw, FALSE);
   3348 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3349 		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
   3350 	} else {
   3351 		/* Turn off support for APM wakeup. (Using ACPI instead) */
   3352 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
   3353 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
   3354 
   3355 		/*
   3356 		 * Clear Wake Up Status register to prevent any previous wakeup
   3357 		 * events from waking us up immediately after we suspend.
   3358 		 */
   3359 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3360 
   3361 		/*
   3362 		 * Program the Wakeup Filter Control register with user filter
   3363 		 * settings
   3364 		 */
   3365 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
   3366 
   3367 		/* Enable wakeups and power management in Wakeup Control */
   3368 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
   3369 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
   3370 
   3371 	}
   3372 
   3373 	return error;
   3374 } /* ixgbe_setup_low_power_mode */
   3375 
   3376 /************************************************************************
   3377  * ixgbe_shutdown - Shutdown entry point
   3378  ************************************************************************/
   3379 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   3380 static int
   3381 ixgbe_shutdown(device_t dev)
   3382 {
   3383 	struct adapter *adapter = device_private(dev);
   3384 	int error = 0;
   3385 
   3386 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
   3387 
   3388 	IXGBE_CORE_LOCK(adapter);
   3389 	error = ixgbe_setup_low_power_mode(adapter);
   3390 	IXGBE_CORE_UNLOCK(adapter);
   3391 
   3392 	return (error);
   3393 } /* ixgbe_shutdown */
   3394 #endif
   3395 
   3396 /************************************************************************
   3397  * ixgbe_suspend
   3398  *
   3399  *   From D0 to D3
   3400  ************************************************************************/
   3401 static bool
   3402 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
   3403 {
   3404 	struct adapter *adapter = device_private(dev);
   3405 	int            error = 0;
   3406 
   3407 	INIT_DEBUGOUT("ixgbe_suspend: begin");
   3408 
   3409 	IXGBE_CORE_LOCK(adapter);
   3410 
   3411 	error = ixgbe_setup_low_power_mode(adapter);
   3412 
   3413 	IXGBE_CORE_UNLOCK(adapter);
   3414 
   3415 	return (error);
   3416 } /* ixgbe_suspend */
   3417 
   3418 /************************************************************************
   3419  * ixgbe_resume
   3420  *
   3421  *   From D3 to D0
   3422  ************************************************************************/
   3423 static bool
   3424 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
   3425 {
   3426 	struct adapter  *adapter = device_private(dev);
   3427 	struct ifnet    *ifp = adapter->ifp;
   3428 	struct ixgbe_hw *hw = &adapter->hw;
   3429 	u32             wus;
   3430 
   3431 	INIT_DEBUGOUT("ixgbe_resume: begin");
   3432 
   3433 	IXGBE_CORE_LOCK(adapter);
   3434 
   3435 	/* Read & clear WUS register */
   3436 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
   3437 	if (wus)
   3438 		device_printf(dev, "Woken up by (WUS): %#010x\n",
   3439 		    IXGBE_READ_REG(hw, IXGBE_WUS));
   3440 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3441 	/* And clear WUFC until next low-power transition */
   3442 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3443 
   3444 	/*
   3445 	 * Required after D3->D0 transition;
   3446 	 * will re-advertise all previous advertised speeds
   3447 	 */
   3448 	if (ifp->if_flags & IFF_UP)
   3449 		ixgbe_init_locked(adapter);
   3450 
   3451 	IXGBE_CORE_UNLOCK(adapter);
   3452 
   3453 	return true;
   3454 } /* ixgbe_resume */
   3455 
   3456 /*
   3457  * Set the various hardware offload abilities.
   3458  *
   3459  * This takes the ifnet's if_capenable flags (e.g. set by the user using
   3460  * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
   3461  * mbuf offload flags the driver will understand.
   3462  */
   3463 static void
   3464 ixgbe_set_if_hwassist(struct adapter *adapter)
   3465 {
   3466 	/* XXX */
   3467 }
   3468 
   3469 /************************************************************************
   3470  * ixgbe_init_locked - Init entry point
   3471  *
   3472  *   Used in two ways: It is used by the stack as an init
   3473  *   entry point in network interface structure. It is also
   3474  *   used by the driver as a hw/sw initialization routine to
   3475  *   get to a consistent state.
   3476  *
   3477  *   return 0 on success, positive on failure
   3478  ************************************************************************/
   3479 static void
   3480 ixgbe_init_locked(struct adapter *adapter)
   3481 {
   3482 	struct ifnet   *ifp = adapter->ifp;
   3483 	device_t 	dev = adapter->dev;
   3484 	struct ixgbe_hw *hw = &adapter->hw;
   3485 	struct tx_ring  *txr;
   3486 	struct rx_ring  *rxr;
   3487 	u32		txdctl, mhadd;
   3488 	u32		rxdctl, rxctrl;
   3489 	u32             ctrl_ext;
   3490 	int             err = 0;
   3491 
   3492 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   3493 
   3494 	KASSERT(mutex_owned(&adapter->core_mtx));
   3495 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
   3496 
   3497 	hw->adapter_stopped = FALSE;
   3498 	ixgbe_stop_adapter(hw);
   3499         callout_stop(&adapter->timer);
   3500 
   3501 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   3502 	adapter->max_frame_size =
   3503 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   3504 
   3505 	/* Queue indices may change with IOV mode */
   3506 	ixgbe_align_all_queue_indices(adapter);
   3507 
   3508 	/* reprogram the RAR[0] in case user changed it. */
   3509 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
   3510 
   3511 	/* Get the latest mac address, User can use a LAA */
   3512 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
   3513 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   3514 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
   3515 	hw->addr_ctrl.rar_used_count = 1;
   3516 
   3517 	/* Set hardware offload abilities from ifnet flags */
   3518 	ixgbe_set_if_hwassist(adapter);
   3519 
   3520 	/* Prepare transmit descriptors and buffers */
   3521 	if (ixgbe_setup_transmit_structures(adapter)) {
   3522 		device_printf(dev, "Could not setup transmit structures\n");
   3523 		ixgbe_stop(adapter);
   3524 		return;
   3525 	}
   3526 
   3527 	ixgbe_init_hw(hw);
   3528 	ixgbe_initialize_iov(adapter);
   3529 	ixgbe_initialize_transmit_units(adapter);
   3530 
   3531 	/* Setup Multicast table */
   3532 	ixgbe_set_multi(adapter);
   3533 
   3534 	/* Determine the correct mbuf pool, based on frame size */
   3535 	if (adapter->max_frame_size <= MCLBYTES)
   3536 		adapter->rx_mbuf_sz = MCLBYTES;
   3537 	else
   3538 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   3539 
   3540 	/* Prepare receive descriptors and buffers */
   3541 	if (ixgbe_setup_receive_structures(adapter)) {
   3542 		device_printf(dev, "Could not setup receive structures\n");
   3543 		ixgbe_stop(adapter);
   3544 		return;
   3545 	}
   3546 
   3547 	/* Configure RX settings */
   3548 	ixgbe_initialize_receive_units(adapter);
   3549 
   3550 	/* Enable SDP & MSI-X interrupts based on adapter */
   3551 	ixgbe_config_gpie(adapter);
   3552 
   3553 	/* Set MTU size */
   3554 	if (ifp->if_mtu > ETHERMTU) {
   3555 		/* aka IXGBE_MAXFRS on 82599 and newer */
   3556 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   3557 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   3558 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   3559 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   3560 	}
   3561 
   3562 	/* Now enable all the queues */
   3563 	for (int i = 0; i < adapter->num_queues; i++) {
   3564 		txr = &adapter->tx_rings[i];
   3565 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
   3566 		txdctl |= IXGBE_TXDCTL_ENABLE;
   3567 		/* Set WTHRESH to 8, burst writeback */
   3568 		txdctl |= (8 << 16);
   3569 		/*
   3570 		 * When the internal queue falls below PTHRESH (32),
   3571 		 * start prefetching as long as there are at least
   3572 		 * HTHRESH (1) buffers ready. The values are taken
   3573 		 * from the Intel linux driver 3.8.21.
   3574 		 * Prefetching enables tx line rate even with 1 queue.
   3575 		 */
   3576 		txdctl |= (32 << 0) | (1 << 8);
   3577 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
   3578 	}
   3579 
   3580 	for (int i = 0, j = 0; i < adapter->num_queues; i++) {
   3581 		rxr = &adapter->rx_rings[i];
   3582 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   3583 		if (hw->mac.type == ixgbe_mac_82598EB) {
   3584 			/*
   3585 			 * PTHRESH = 21
   3586 			 * HTHRESH = 4
   3587 			 * WTHRESH = 8
   3588 			 */
   3589 			rxdctl &= ~0x3FFFFF;
   3590 			rxdctl |= 0x080420;
   3591 		}
   3592 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   3593 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
   3594 		for (; j < 10; j++) {
   3595 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
   3596 			    IXGBE_RXDCTL_ENABLE)
   3597 				break;
   3598 			else
   3599 				msec_delay(1);
   3600 		}
   3601 		wmb();
   3602 
   3603 		/*
   3604 		 * In netmap mode, we must preserve the buffers made
   3605 		 * available to userspace before the if_init()
   3606 		 * (this is true by default on the TX side, because
   3607 		 * init makes all buffers available to userspace).
   3608 		 *
   3609 		 * netmap_reset() and the device specific routines
   3610 		 * (e.g. ixgbe_setup_receive_rings()) map these
   3611 		 * buffers at the end of the NIC ring, so here we
   3612 		 * must set the RDT (tail) register to make sure
   3613 		 * they are not overwritten.
   3614 		 *
   3615 		 * In this driver the NIC ring starts at RDH = 0,
   3616 		 * RDT points to the last slot available for reception (?),
   3617 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   3618 		 */
   3619 #ifdef DEV_NETMAP
   3620 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   3621 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   3622 			struct netmap_adapter *na = NA(adapter->ifp);
   3623 			struct netmap_kring *kring = &na->rx_rings[i];
   3624 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   3625 
   3626 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
   3627 		} else
   3628 #endif /* DEV_NETMAP */
   3629 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
   3630 			    adapter->num_rx_desc - 1);
   3631 	}
   3632 
   3633 	/* Enable Receive engine */
   3634 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   3635 	if (hw->mac.type == ixgbe_mac_82598EB)
   3636 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   3637 	rxctrl |= IXGBE_RXCTRL_RXEN;
   3638 	ixgbe_enable_rx_dma(hw, rxctrl);
   3639 
   3640 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   3641 
   3642 	/* Set up MSI-X routing */
   3643 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3644 		ixgbe_configure_ivars(adapter);
   3645 		/* Set up auto-mask */
   3646 		if (hw->mac.type == ixgbe_mac_82598EB)
   3647 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3648 		else {
   3649 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   3650 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   3651 		}
   3652 	} else {  /* Simple settings for Legacy/MSI */
   3653 		ixgbe_set_ivar(adapter, 0, 0, 0);
   3654 		ixgbe_set_ivar(adapter, 0, 0, 1);
   3655 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3656 	}
   3657 
   3658 	ixgbe_init_fdir(adapter);
   3659 
   3660 	/*
   3661 	 * Check on any SFP devices that
   3662 	 * need to be kick-started
   3663 	 */
   3664 	if (hw->phy.type == ixgbe_phy_none) {
   3665 		err = hw->phy.ops.identify(hw);
   3666 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   3667                 	device_printf(dev,
   3668 			    "Unsupported SFP+ module type was detected.\n");
   3669 			return;
   3670         	}
   3671 	}
   3672 
   3673 	/* Set moderation on the Link interrupt */
   3674 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
   3675 
   3676 	/* Config/Enable Link */
   3677 	ixgbe_config_link(adapter);
   3678 
   3679 	/* Hardware Packet Buffer & Flow Control setup */
   3680 	ixgbe_config_delay_values(adapter);
   3681 
   3682 	/* Initialize the FC settings */
   3683 	ixgbe_start_hw(hw);
   3684 
   3685 	/* Set up VLAN support and filter */
   3686 	ixgbe_setup_vlan_hw_support(adapter);
   3687 
   3688 	/* Setup DMA Coalescing */
   3689 	ixgbe_config_dmac(adapter);
   3690 
   3691 	/* And now turn on interrupts */
   3692 	ixgbe_enable_intr(adapter);
   3693 
   3694 	/* Enable the use of the MBX by the VF's */
   3695 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
   3696 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
   3697 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
   3698 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
   3699 	}
   3700 
   3701 	/* Now inform the stack we're ready */
   3702 	ifp->if_flags |= IFF_RUNNING;
   3703 
   3704 	return;
   3705 } /* ixgbe_init_locked */
   3706 
   3707 /************************************************************************
   3708  * ixgbe_init
   3709  ************************************************************************/
   3710 static int
   3711 ixgbe_init(struct ifnet *ifp)
   3712 {
   3713 	struct adapter *adapter = ifp->if_softc;
   3714 
   3715 	IXGBE_CORE_LOCK(adapter);
   3716 	ixgbe_init_locked(adapter);
   3717 	IXGBE_CORE_UNLOCK(adapter);
   3718 
   3719 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   3720 } /* ixgbe_init */
   3721 
   3722 /************************************************************************
   3723  * ixgbe_set_ivar
   3724  *
   3725  *   Setup the correct IVAR register for a particular MSI-X interrupt
   3726  *     (yes this is all very magic and confusing :)
   3727  *    - entry is the register array entry
   3728  *    - vector is the MSI-X vector for this queue
   3729  *    - type is RX/TX/MISC
   3730  ************************************************************************/
   3731 static void
   3732 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   3733 {
   3734 	struct ixgbe_hw *hw = &adapter->hw;
   3735 	u32 ivar, index;
   3736 
   3737 	vector |= IXGBE_IVAR_ALLOC_VAL;
   3738 
   3739 	switch (hw->mac.type) {
   3740 
   3741 	case ixgbe_mac_82598EB:
   3742 		if (type == -1)
   3743 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   3744 		else
   3745 			entry += (type * 64);
   3746 		index = (entry >> 2) & 0x1F;
   3747 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   3748 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
   3749 		ivar |= (vector << (8 * (entry & 0x3)));
   3750 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   3751 		break;
   3752 
   3753 	case ixgbe_mac_82599EB:
   3754 	case ixgbe_mac_X540:
   3755 	case ixgbe_mac_X550:
   3756 	case ixgbe_mac_X550EM_x:
   3757 	case ixgbe_mac_X550EM_a:
   3758 		if (type == -1) { /* MISC IVAR */
   3759 			index = (entry & 1) * 8;
   3760 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   3761 			ivar &= ~(0xFF << index);
   3762 			ivar |= (vector << index);
   3763 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   3764 		} else {	/* RX/TX IVARS */
   3765 			index = (16 * (entry & 1)) + (8 * type);
   3766 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   3767 			ivar &= ~(0xFF << index);
   3768 			ivar |= (vector << index);
   3769 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   3770 		}
   3771 
   3772 	default:
   3773 		break;
   3774 	}
   3775 } /* ixgbe_set_ivar */
   3776 
   3777 /************************************************************************
   3778  * ixgbe_configure_ivars
   3779  ************************************************************************/
   3780 static void
   3781 ixgbe_configure_ivars(struct adapter *adapter)
   3782 {
   3783 	struct ix_queue *que = adapter->queues;
   3784 	u32             newitr;
   3785 
   3786 	if (ixgbe_max_interrupt_rate > 0)
   3787 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   3788 	else {
   3789 		/*
   3790 		 * Disable DMA coalescing if interrupt moderation is
   3791 		 * disabled.
   3792 		 */
   3793 		adapter->dmac = 0;
   3794 		newitr = 0;
   3795 	}
   3796 
   3797         for (int i = 0; i < adapter->num_queues; i++, que++) {
   3798 		struct rx_ring *rxr = &adapter->rx_rings[i];
   3799 		struct tx_ring *txr = &adapter->tx_rings[i];
   3800 		/* First the RX queue entry */
   3801                 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
   3802 		/* ... and the TX */
   3803 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
   3804 		/* Set an Initial EITR value */
   3805 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
   3806 	}
   3807 
   3808 	/* For the Link interrupt */
   3809         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
   3810 } /* ixgbe_configure_ivars */
   3811 
   3812 /************************************************************************
   3813  * ixgbe_config_gpie
   3814  ************************************************************************/
   3815 static void
   3816 ixgbe_config_gpie(struct adapter *adapter)
   3817 {
   3818 	struct ixgbe_hw *hw = &adapter->hw;
   3819 	u32             gpie;
   3820 
   3821 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
   3822 
   3823 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3824 		/* Enable Enhanced MSI-X mode */
   3825 		gpie |= IXGBE_GPIE_MSIX_MODE
   3826 		     |  IXGBE_GPIE_EIAME
   3827 		     |  IXGBE_GPIE_PBA_SUPPORT
   3828 		     |  IXGBE_GPIE_OCD;
   3829 	}
   3830 
   3831 	/* Fan Failure Interrupt */
   3832 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   3833 		gpie |= IXGBE_SDP1_GPIEN;
   3834 
   3835 	/* Thermal Sensor Interrupt */
   3836 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
   3837 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3838 
   3839 	/* Link detection */
   3840 	switch (hw->mac.type) {
   3841 	case ixgbe_mac_82599EB:
   3842 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
   3843 		break;
   3844 	case ixgbe_mac_X550EM_x:
   3845 	case ixgbe_mac_X550EM_a:
   3846 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3847 		break;
   3848 	default:
   3849 		break;
   3850 	}
   3851 
   3852 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   3853 
   3854 	return;
   3855 } /* ixgbe_config_gpie */
   3856 
   3857 /************************************************************************
   3858  * ixgbe_config_delay_values
   3859  *
   3860  *   Requires adapter->max_frame_size to be set.
   3861  ************************************************************************/
   3862 static void
   3863 ixgbe_config_delay_values(struct adapter *adapter)
   3864 {
   3865 	struct ixgbe_hw *hw = &adapter->hw;
   3866 	u32             rxpb, frame, size, tmp;
   3867 
   3868 	frame = adapter->max_frame_size;
   3869 
   3870 	/* Calculate High Water */
   3871 	switch (hw->mac.type) {
   3872 	case ixgbe_mac_X540:
   3873 	case ixgbe_mac_X550:
   3874 	case ixgbe_mac_X550EM_x:
   3875 	case ixgbe_mac_X550EM_a:
   3876 		tmp = IXGBE_DV_X540(frame, frame);
   3877 		break;
   3878 	default:
   3879 		tmp = IXGBE_DV(frame, frame);
   3880 		break;
   3881 	}
   3882 	size = IXGBE_BT2KB(tmp);
   3883 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
   3884 	hw->fc.high_water[0] = rxpb - size;
   3885 
   3886 	/* Now calculate Low Water */
   3887 	switch (hw->mac.type) {
   3888 	case ixgbe_mac_X540:
   3889 	case ixgbe_mac_X550:
   3890 	case ixgbe_mac_X550EM_x:
   3891 	case ixgbe_mac_X550EM_a:
   3892 		tmp = IXGBE_LOW_DV_X540(frame);
   3893 		break;
   3894 	default:
   3895 		tmp = IXGBE_LOW_DV(frame);
   3896 		break;
   3897 	}
   3898 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
   3899 
   3900 	hw->fc.pause_time = IXGBE_FC_PAUSE;
   3901 	hw->fc.send_xon = TRUE;
   3902 } /* ixgbe_config_delay_values */
   3903 
   3904 /************************************************************************
   3905  * ixgbe_set_multi - Multicast Update
   3906  *
   3907  *   Called whenever multicast address list is updated.
   3908  ************************************************************************/
   3909 static void
   3910 ixgbe_set_multi(struct adapter *adapter)
   3911 {
   3912 	struct ixgbe_mc_addr	*mta;
   3913 	struct ifnet		*ifp = adapter->ifp;
   3914 	u8			*update_ptr;
   3915 	int			mcnt = 0;
   3916 	u32			fctrl;
   3917 	struct ethercom		*ec = &adapter->osdep.ec;
   3918 	struct ether_multi	*enm;
   3919 	struct ether_multistep	step;
   3920 
   3921 	KASSERT(mutex_owned(&adapter->core_mtx));
   3922 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   3923 
   3924 	mta = adapter->mta;
   3925 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
   3926 
   3927 	ifp->if_flags &= ~IFF_ALLMULTI;
   3928 	ETHER_LOCK(ec);
   3929 	ETHER_FIRST_MULTI(step, ec, enm);
   3930 	while (enm != NULL) {
   3931 		if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
   3932 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   3933 			ETHER_ADDR_LEN) != 0)) {
   3934 			ifp->if_flags |= IFF_ALLMULTI;
   3935 			break;
   3936 		}
   3937 		bcopy(enm->enm_addrlo,
   3938 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
   3939 		mta[mcnt].vmdq = adapter->pool;
   3940 		mcnt++;
   3941 		ETHER_NEXT_MULTI(step, enm);
   3942 	}
   3943 	ETHER_UNLOCK(ec);
   3944 
   3945 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   3946 	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   3947 	if (ifp->if_flags & IFF_PROMISC)
   3948 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   3949 	else if (ifp->if_flags & IFF_ALLMULTI) {
   3950 		fctrl |= IXGBE_FCTRL_MPE;
   3951 	}
   3952 
   3953 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   3954 
   3955 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
   3956 		update_ptr = (u8 *)mta;
   3957 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   3958 		    ixgbe_mc_array_itr, TRUE);
   3959 	}
   3960 
   3961 	return;
   3962 } /* ixgbe_set_multi */
   3963 
   3964 /************************************************************************
   3965  * ixgbe_mc_array_itr
   3966  *
   3967  *   An iterator function needed by the multicast shared code.
   3968  *   It feeds the shared code routine the addresses in the
   3969  *   array of ixgbe_set_multi() one by one.
   3970  ************************************************************************/
   3971 static u8 *
   3972 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   3973 {
   3974 	struct ixgbe_mc_addr *mta;
   3975 
   3976 	mta = (struct ixgbe_mc_addr *)*update_ptr;
   3977 	*vmdq = mta->vmdq;
   3978 
   3979 	*update_ptr = (u8*)(mta + 1);
   3980 
   3981 	return (mta->addr);
   3982 } /* ixgbe_mc_array_itr */
   3983 
   3984 /************************************************************************
   3985  * ixgbe_local_timer - Timer routine
   3986  *
   3987  *   Checks for link status, updates statistics,
   3988  *   and runs the watchdog check.
   3989  ************************************************************************/
   3990 static void
   3991 ixgbe_local_timer(void *arg)
   3992 {
   3993 	struct adapter *adapter = arg;
   3994 
   3995 	IXGBE_CORE_LOCK(adapter);
   3996 	ixgbe_local_timer1(adapter);
   3997 	IXGBE_CORE_UNLOCK(adapter);
   3998 }
   3999 
   4000 static void
   4001 ixgbe_local_timer1(void *arg)
   4002 {
   4003 	struct adapter	*adapter = arg;
   4004 	device_t	dev = adapter->dev;
   4005 	struct ix_queue *que = adapter->queues;
   4006 	u64		queues = 0;
   4007 	int		hung = 0;
   4008 
   4009 	KASSERT(mutex_owned(&adapter->core_mtx));
   4010 
   4011 	/* Check for pluggable optics */
   4012 	if (adapter->sfp_probe)
   4013 		if (!ixgbe_sfp_probe(adapter))
   4014 			goto out; /* Nothing to do */
   4015 
   4016 	ixgbe_update_link_status(adapter);
   4017 	ixgbe_update_stats_counters(adapter);
   4018 
   4019 	/*
   4020 	 * Check the TX queues status
   4021 	 *      - mark hung queues so we don't schedule on them
   4022 	 *      - watchdog only if all queues show hung
   4023 	 */
   4024 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4025 		/* Keep track of queues with work for soft irq */
   4026 		if (que->txr->busy)
   4027 			queues |= ((u64)1 << que->me);
   4028 		/*
   4029 		 * Each time txeof runs without cleaning, but there
   4030 		 * are uncleaned descriptors it increments busy. If
   4031 		 * we get to the MAX we declare it hung.
   4032 		 */
   4033 		if (que->busy == IXGBE_QUEUE_HUNG) {
   4034 			++hung;
   4035 			/* Mark the queue as inactive */
   4036 			adapter->active_queues &= ~((u64)1 << que->me);
   4037 			continue;
   4038 		} else {
   4039 			/* Check if we've come back from hung */
   4040 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   4041 				adapter->active_queues |= ((u64)1 << que->me);
   4042 		}
   4043 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   4044 			device_printf(dev,
   4045 			    "Warning queue %d appears to be hung!\n", i);
   4046 			que->txr->busy = IXGBE_QUEUE_HUNG;
   4047 			++hung;
   4048 		}
   4049 	}
   4050 
   4051 	/* Only truely watchdog if all queues show hung */
   4052 	if (hung == adapter->num_queues)
   4053 		goto watchdog;
   4054 	else if (queues != 0) { /* Force an IRQ on queues with work */
   4055 		ixgbe_rearm_queues(adapter, queues);
   4056 	}
   4057 
   4058 out:
   4059 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4060 	return;
   4061 
   4062 watchdog:
   4063 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   4064 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   4065 	adapter->watchdog_events.ev_count++;
   4066 	ixgbe_init_locked(adapter);
   4067 } /* ixgbe_local_timer */
   4068 
   4069 /************************************************************************
   4070  * ixgbe_sfp_probe
   4071  *
   4072  *   Determine if a port had optics inserted.
   4073  ************************************************************************/
   4074 static bool
   4075 ixgbe_sfp_probe(struct adapter *adapter)
   4076 {
   4077 	struct ixgbe_hw	*hw = &adapter->hw;
   4078 	device_t	dev = adapter->dev;
   4079 	bool		result = FALSE;
   4080 
   4081 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4082 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4083 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4084 		if (ret)
   4085 			goto out;
   4086 		ret = hw->phy.ops.reset(hw);
   4087 		adapter->sfp_probe = FALSE;
   4088 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4089 			device_printf(dev,"Unsupported SFP+ module detected!");
   4090 			device_printf(dev,
   4091 			    "Reload driver with supported module.\n");
   4092                         goto out;
   4093 		} else
   4094 			device_printf(dev, "SFP+ module detected!\n");
   4095 		/* We now have supported optics */
   4096 		result = TRUE;
   4097 	}
   4098 out:
   4099 
   4100 	return (result);
   4101 } /* ixgbe_sfp_probe */
   4102 
   4103 /************************************************************************
   4104  * ixgbe_handle_mod - Tasklet for SFP module interrupts
   4105  ************************************************************************/
   4106 static void
   4107 ixgbe_handle_mod(void *context)
   4108 {
   4109 	struct adapter  *adapter = context;
   4110 	struct ixgbe_hw *hw = &adapter->hw;
   4111 	device_t	dev = adapter->dev;
   4112 	u32             err, cage_full = 0;
   4113 
   4114 	if (adapter->hw.need_crosstalk_fix) {
   4115 		switch (hw->mac.type) {
   4116 		case ixgbe_mac_82599EB:
   4117 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4118 			    IXGBE_ESDP_SDP2;
   4119 			break;
   4120 		case ixgbe_mac_X550EM_x:
   4121 		case ixgbe_mac_X550EM_a:
   4122 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4123 			    IXGBE_ESDP_SDP0;
   4124 			break;
   4125 		default:
   4126 			break;
   4127 		}
   4128 
   4129 		if (!cage_full)
   4130 			return;
   4131 	}
   4132 
   4133 	err = hw->phy.ops.identify_sfp(hw);
   4134 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4135 		device_printf(dev,
   4136 		    "Unsupported SFP+ module type was detected.\n");
   4137 		return;
   4138 	}
   4139 
   4140 	err = hw->mac.ops.setup_sfp(hw);
   4141 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4142 		device_printf(dev,
   4143 		    "Setup failure - unsupported SFP+ module type.\n");
   4144 		return;
   4145 	}
   4146 	softint_schedule(adapter->msf_si);
   4147 } /* ixgbe_handle_mod */
   4148 
   4149 
   4150 /************************************************************************
   4151  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
   4152  ************************************************************************/
   4153 static void
   4154 ixgbe_handle_msf(void *context)
   4155 {
   4156 	struct adapter  *adapter = context;
   4157 	struct ixgbe_hw *hw = &adapter->hw;
   4158 	u32             autoneg;
   4159 	bool            negotiate;
   4160 
   4161 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
   4162 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
   4163 
   4164 	autoneg = hw->phy.autoneg_advertised;
   4165 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   4166 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   4167 	else
   4168 		negotiate = 0;
   4169 	if (hw->mac.ops.setup_link)
   4170 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
   4171 
   4172 	/* Adjust media types shown in ifconfig */
   4173 	ifmedia_removeall(&adapter->media);
   4174 	ixgbe_add_media_types(adapter);
   4175 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   4176 } /* ixgbe_handle_msf */
   4177 
   4178 /************************************************************************
   4179  * ixgbe_handle_phy - Tasklet for external PHY interrupts
   4180  ************************************************************************/
   4181 static void
   4182 ixgbe_handle_phy(void *context)
   4183 {
   4184 	struct adapter  *adapter = context;
   4185 	struct ixgbe_hw *hw = &adapter->hw;
   4186 	int error;
   4187 
   4188 	error = hw->phy.ops.handle_lasi(hw);
   4189 	if (error == IXGBE_ERR_OVERTEMP)
   4190 		device_printf(adapter->dev,
   4191 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
   4192 		    " PHY will downshift to lower power state!\n");
   4193 	else if (error)
   4194 		device_printf(adapter->dev,
   4195 		    "Error handling LASI interrupt: %d\n", error);
   4196 } /* ixgbe_handle_phy */
   4197 
   4198 static void
   4199 ixgbe_ifstop(struct ifnet *ifp, int disable)
   4200 {
   4201 	struct adapter *adapter = ifp->if_softc;
   4202 
   4203 	IXGBE_CORE_LOCK(adapter);
   4204 	ixgbe_stop(adapter);
   4205 	IXGBE_CORE_UNLOCK(adapter);
   4206 }
   4207 
   4208 /************************************************************************
   4209  * ixgbe_stop - Stop the hardware
   4210  *
   4211  *   Disables all traffic on the adapter by issuing a
   4212  *   global reset on the MAC and deallocates TX/RX buffers.
   4213  ************************************************************************/
   4214 static void
   4215 ixgbe_stop(void *arg)
   4216 {
   4217 	struct ifnet    *ifp;
   4218 	struct adapter  *adapter = arg;
   4219 	struct ixgbe_hw *hw = &adapter->hw;
   4220 
   4221 	ifp = adapter->ifp;
   4222 
   4223 	KASSERT(mutex_owned(&adapter->core_mtx));
   4224 
   4225 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   4226 	ixgbe_disable_intr(adapter);
   4227 	callout_stop(&adapter->timer);
   4228 
   4229 	/* Let the stack know...*/
   4230 	ifp->if_flags &= ~IFF_RUNNING;
   4231 
   4232 	ixgbe_reset_hw(hw);
   4233 	hw->adapter_stopped = FALSE;
   4234 	ixgbe_stop_adapter(hw);
   4235 	if (hw->mac.type == ixgbe_mac_82599EB)
   4236 		ixgbe_stop_mac_link_on_d3_82599(hw);
   4237 	/* Turn off the laser - noop with no optics */
   4238 	ixgbe_disable_tx_laser(hw);
   4239 
   4240 	/* Update the stack */
   4241 	adapter->link_up = FALSE;
   4242 	ixgbe_update_link_status(adapter);
   4243 
   4244 	/* reprogram the RAR[0] in case user changed it. */
   4245 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   4246 
   4247 	return;
   4248 } /* ixgbe_stop */
   4249 
   4250 /************************************************************************
   4251  * ixgbe_update_link_status - Update OS on link state
   4252  *
   4253  * Note: Only updates the OS on the cached link state.
   4254  *       The real check of the hardware only happens with
   4255  *       a link interrupt.
   4256  ************************************************************************/
   4257 static void
   4258 ixgbe_update_link_status(struct adapter *adapter)
   4259 {
   4260 	struct ifnet	*ifp = adapter->ifp;
   4261 	device_t        dev = adapter->dev;
   4262 	struct ixgbe_hw *hw = &adapter->hw;
   4263 
   4264 	if (adapter->link_up) {
   4265 		if (adapter->link_active == FALSE) {
   4266 			if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
   4267 				/*
   4268 				 *  Discard count for both MAC Local Fault and
   4269 				 * Remote Fault because those registers are
   4270 				 * valid only when the link speed is up and
   4271 				 * 10Gbps.
   4272 				 */
   4273 				IXGBE_READ_REG(hw, IXGBE_MLFC);
   4274 				IXGBE_READ_REG(hw, IXGBE_MRFC);
   4275 			}
   4276 
   4277 			if (bootverbose) {
   4278 				const char *bpsmsg;
   4279 
   4280 				switch (adapter->link_speed) {
   4281 				case IXGBE_LINK_SPEED_10GB_FULL:
   4282 					bpsmsg = "10 Gbps";
   4283 					break;
   4284 				case IXGBE_LINK_SPEED_5GB_FULL:
   4285 					bpsmsg = "5 Gbps";
   4286 					break;
   4287 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   4288 					bpsmsg = "2.5 Gbps";
   4289 					break;
   4290 				case IXGBE_LINK_SPEED_1GB_FULL:
   4291 					bpsmsg = "1 Gbps";
   4292 					break;
   4293 				case IXGBE_LINK_SPEED_100_FULL:
   4294 					bpsmsg = "100 Mbps";
   4295 					break;
   4296 				case IXGBE_LINK_SPEED_10_FULL:
   4297 					bpsmsg = "10 Mbps";
   4298 					break;
   4299 				default:
   4300 					bpsmsg = "unknown speed";
   4301 					break;
   4302 				}
   4303 				device_printf(dev, "Link is up %s %s \n",
   4304 				    bpsmsg, "Full Duplex");
   4305 			}
   4306 			adapter->link_active = TRUE;
   4307 			/* Update any Flow Control changes */
   4308 			ixgbe_fc_enable(&adapter->hw);
   4309 			/* Update DMA coalescing config */
   4310 			ixgbe_config_dmac(adapter);
   4311 			if_link_state_change(ifp, LINK_STATE_UP);
   4312 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4313 				ixgbe_ping_all_vfs(adapter);
   4314 		}
   4315 	} else { /* Link down */
   4316 		if (adapter->link_active == TRUE) {
   4317 			if (bootverbose)
   4318 				device_printf(dev, "Link is Down\n");
   4319 			if_link_state_change(ifp, LINK_STATE_DOWN);
   4320 			adapter->link_active = FALSE;
   4321 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4322 				ixgbe_ping_all_vfs(adapter);
   4323 		}
   4324 	}
   4325 
   4326 	return;
   4327 } /* ixgbe_update_link_status */
   4328 
   4329 /************************************************************************
   4330  * ixgbe_config_dmac - Configure DMA Coalescing
   4331  ************************************************************************/
   4332 static void
   4333 ixgbe_config_dmac(struct adapter *adapter)
   4334 {
   4335 	struct ixgbe_hw *hw = &adapter->hw;
   4336 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
   4337 
   4338 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
   4339 		return;
   4340 
   4341 	if (dcfg->watchdog_timer ^ adapter->dmac ||
   4342 	    dcfg->link_speed ^ adapter->link_speed) {
   4343 		dcfg->watchdog_timer = adapter->dmac;
   4344 		dcfg->fcoe_en = false;
   4345 		dcfg->link_speed = adapter->link_speed;
   4346 		dcfg->num_tcs = 1;
   4347 
   4348 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
   4349 		    dcfg->watchdog_timer, dcfg->link_speed);
   4350 
   4351 		hw->mac.ops.dmac_config(hw);
   4352 	}
   4353 } /* ixgbe_config_dmac */
   4354 
   4355 /************************************************************************
   4356  * ixgbe_enable_intr
   4357  ************************************************************************/
   4358 static void
   4359 ixgbe_enable_intr(struct adapter *adapter)
   4360 {
   4361 	struct ixgbe_hw	*hw = &adapter->hw;
   4362 	struct ix_queue	*que = adapter->queues;
   4363 	u32		mask, fwsm;
   4364 
   4365 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4366 
   4367 	switch (adapter->hw.mac.type) {
   4368 	case ixgbe_mac_82599EB:
   4369 		mask |= IXGBE_EIMS_ECC;
   4370 		/* Temperature sensor on some adapters */
   4371 		mask |= IXGBE_EIMS_GPI_SDP0;
   4372 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
   4373 		mask |= IXGBE_EIMS_GPI_SDP1;
   4374 		mask |= IXGBE_EIMS_GPI_SDP2;
   4375 		break;
   4376 	case ixgbe_mac_X540:
   4377 		/* Detect if Thermal Sensor is enabled */
   4378 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
   4379 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
   4380 			mask |= IXGBE_EIMS_TS;
   4381 		mask |= IXGBE_EIMS_ECC;
   4382 		break;
   4383 	case ixgbe_mac_X550:
   4384 		/* MAC thermal sensor is automatically enabled */
   4385 		mask |= IXGBE_EIMS_TS;
   4386 		mask |= IXGBE_EIMS_ECC;
   4387 		break;
   4388 	case ixgbe_mac_X550EM_x:
   4389 	case ixgbe_mac_X550EM_a:
   4390 		/* Some devices use SDP0 for important information */
   4391 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
   4392 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
   4393 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
   4394 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
   4395 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
   4396 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
   4397 			mask |= IXGBE_EICR_GPI_SDP0_X540;
   4398 		mask |= IXGBE_EIMS_ECC;
   4399 		break;
   4400 	default:
   4401 		break;
   4402 	}
   4403 
   4404 	/* Enable Fan Failure detection */
   4405 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4406 		mask |= IXGBE_EIMS_GPI_SDP1;
   4407 	/* Enable SR-IOV */
   4408 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4409 		mask |= IXGBE_EIMS_MAILBOX;
   4410 	/* Enable Flow Director */
   4411 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   4412 		mask |= IXGBE_EIMS_FLOW_DIR;
   4413 
   4414 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4415 
   4416 	/* With MSI-X we use auto clear */
   4417 	if (adapter->msix_mem) {
   4418 		mask = IXGBE_EIMS_ENABLE_MASK;
   4419 		/* Don't autoclear Link */
   4420 		mask &= ~IXGBE_EIMS_OTHER;
   4421 		mask &= ~IXGBE_EIMS_LSC;
   4422 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   4423 			mask &= ~IXGBE_EIMS_MAILBOX;
   4424 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4425 	}
   4426 
   4427 	/*
   4428 	 * Now enable all queues, this is done separately to
   4429 	 * allow for handling the extended (beyond 32) MSI-X
   4430 	 * vectors that can be used by 82599
   4431 	 */
   4432         for (int i = 0; i < adapter->num_queues; i++, que++)
   4433                 ixgbe_enable_queue(adapter, que->msix);
   4434 
   4435 	IXGBE_WRITE_FLUSH(hw);
   4436 
   4437 	return;
   4438 } /* ixgbe_enable_intr */
   4439 
   4440 /************************************************************************
   4441  * ixgbe_disable_intr
   4442  ************************************************************************/
   4443 static void
   4444 ixgbe_disable_intr(struct adapter *adapter)
   4445 {
   4446 	if (adapter->msix_mem)
   4447 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4448 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
   4449 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
   4450 	} else {
   4451 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
   4452 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
   4453 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
   4454 	}
   4455 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4456 
   4457 	return;
   4458 } /* ixgbe_disable_intr */
   4459 
   4460 /************************************************************************
   4461  * ixgbe_legacy_irq - Legacy Interrupt Service routine
   4462  ************************************************************************/
   4463 static int
   4464 ixgbe_legacy_irq(void *arg)
   4465 {
   4466 	struct ix_queue *que = arg;
   4467 	struct adapter	*adapter = que->adapter;
   4468 	struct ixgbe_hw	*hw = &adapter->hw;
   4469 	struct ifnet    *ifp = adapter->ifp;
   4470 	struct 		tx_ring *txr = adapter->tx_rings;
   4471 	bool		more = false;
   4472 	u32             eicr, eicr_mask;
   4473 
   4474 	/* Silicon errata #26 on 82598 */
   4475 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
   4476 
   4477 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   4478 
   4479 	adapter->stats.pf.legint.ev_count++;
   4480 	++que->irqs.ev_count;
   4481 	if (eicr == 0) {
   4482 		adapter->stats.pf.intzero.ev_count++;
   4483 		if ((ifp->if_flags & IFF_UP) != 0)
   4484 			ixgbe_enable_intr(adapter);
   4485 		return 0;
   4486 	}
   4487 
   4488 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   4489 #ifdef __NetBSD__
   4490 		/* Don't run ixgbe_rxeof in interrupt context */
   4491 		more = true;
   4492 #else
   4493 		more = ixgbe_rxeof(que);
   4494 #endif
   4495 
   4496 		IXGBE_TX_LOCK(txr);
   4497 		ixgbe_txeof(txr);
   4498 #ifdef notyet
   4499 		if (!ixgbe_ring_empty(ifp, txr->br))
   4500 			ixgbe_start_locked(ifp, txr);
   4501 #endif
   4502 		IXGBE_TX_UNLOCK(txr);
   4503 	}
   4504 
   4505 	/* Check for fan failure */
   4506 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   4507 		ixgbe_check_fan_failure(adapter, eicr, true);
   4508 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4509 	}
   4510 
   4511 	/* Link status change */
   4512 	if (eicr & IXGBE_EICR_LSC)
   4513 		softint_schedule(adapter->link_si);
   4514 
   4515 	if (ixgbe_is_sfp(hw)) {
   4516 		/* Pluggable optics-related interrupt */
   4517 		if (hw->mac.type >= ixgbe_mac_X540)
   4518 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   4519 		else
   4520 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   4521 
   4522 		if (eicr & eicr_mask) {
   4523 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   4524 			softint_schedule(adapter->mod_si);
   4525 		}
   4526 
   4527 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   4528 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   4529 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   4530 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4531 			softint_schedule(adapter->msf_si);
   4532 		}
   4533 	}
   4534 
   4535 	/* External PHY interrupt */
   4536 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   4537 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
   4538 		softint_schedule(adapter->phy_si);
   4539 
   4540 	if (more)
   4541 		softint_schedule(que->que_si);
   4542 	else
   4543 		ixgbe_enable_intr(adapter);
   4544 
   4545 	return 1;
   4546 } /* ixgbe_legacy_irq */
   4547 
   4548 /************************************************************************
   4549  * ixgbe_free_pci_resources
   4550  ************************************************************************/
   4551 static void
   4552 ixgbe_free_pci_resources(struct adapter *adapter)
   4553 {
   4554 	struct ix_queue *que = adapter->queues;
   4555 	int		rid;
   4556 
   4557 	/*
   4558 	 * Release all msix queue resources:
   4559 	 */
   4560 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4561 		if (que->res != NULL)
   4562 			pci_intr_disestablish(adapter->osdep.pc,
   4563 			    adapter->osdep.ihs[i]);
   4564 	}
   4565 
   4566 	/* Clean the Legacy or Link interrupt last */
   4567 	if (adapter->vector) /* we are doing MSIX */
   4568 		rid = adapter->vector;
   4569 	else
   4570 		rid = 0;
   4571 
   4572 	if (adapter->osdep.ihs[rid] != NULL) {
   4573 		pci_intr_disestablish(adapter->osdep.pc,
   4574 		    adapter->osdep.ihs[rid]);
   4575 		adapter->osdep.ihs[rid] = NULL;
   4576 	}
   4577 
   4578 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   4579 	    adapter->osdep.nintrs);
   4580 
   4581 	if (adapter->osdep.mem_size != 0) {
   4582 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   4583 		    adapter->osdep.mem_bus_space_handle,
   4584 		    adapter->osdep.mem_size);
   4585 	}
   4586 
   4587 	return;
   4588 } /* ixgbe_free_pci_resources */
   4589 
   4590 /************************************************************************
   4591  * ixgbe_set_sysctl_value
   4592  ************************************************************************/
   4593 static void
   4594 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
   4595     const char *description, int *limit, int value)
   4596 {
   4597 	device_t dev =  adapter->dev;
   4598 	struct sysctllog **log;
   4599 	const struct sysctlnode *rnode, *cnode;
   4600 
   4601 	log = &adapter->sysctllog;
   4602 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   4603 		aprint_error_dev(dev, "could not create sysctl root\n");
   4604 		return;
   4605 	}
   4606 	if (sysctl_createv(log, 0, &rnode, &cnode,
   4607 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   4608 	    name, SYSCTL_DESCR(description),
   4609 		NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   4610 		aprint_error_dev(dev, "could not create sysctl\n");
   4611 	*limit = value;
   4612 } /* ixgbe_set_sysctl_value */
   4613 
   4614 /************************************************************************
   4615  * ixgbe_sysctl_flowcntl
   4616  *
   4617  *   SYSCTL wrapper around setting Flow Control
   4618  ************************************************************************/
   4619 static int
   4620 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
   4621 {
   4622 	struct sysctlnode node = *rnode;
   4623 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4624 	int error, fc;
   4625 
   4626 	fc = adapter->hw.fc.current_mode;
   4627 	node.sysctl_data = &fc;
   4628 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4629 	if (error != 0 || newp == NULL)
   4630 		return error;
   4631 
   4632 	/* Don't bother if it's not changed */
   4633 	if (fc == adapter->hw.fc.current_mode)
   4634 		return (0);
   4635 
   4636 	return ixgbe_set_flowcntl(adapter, fc);
   4637 } /* ixgbe_sysctl_flowcntl */
   4638 
   4639 /************************************************************************
   4640  * ixgbe_set_flowcntl - Set flow control
   4641  *
   4642  *   Flow control values:
   4643  *     0 - off
   4644  *     1 - rx pause
   4645  *     2 - tx pause
   4646  *     3 - full
   4647  ************************************************************************/
   4648 static int
   4649 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
   4650 {
   4651 	switch (fc) {
   4652 		case ixgbe_fc_rx_pause:
   4653 		case ixgbe_fc_tx_pause:
   4654 		case ixgbe_fc_full:
   4655 			adapter->hw.fc.requested_mode = fc;
   4656 			if (adapter->num_queues > 1)
   4657 				ixgbe_disable_rx_drop(adapter);
   4658 			break;
   4659 		case ixgbe_fc_none:
   4660 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   4661 			if (adapter->num_queues > 1)
   4662 				ixgbe_enable_rx_drop(adapter);
   4663 			break;
   4664 		default:
   4665 			return (EINVAL);
   4666 	}
   4667 
   4668 #if 0 /* XXX NetBSD */
   4669 	/* Don't autoneg if forcing a value */
   4670 	adapter->hw.fc.disable_fc_autoneg = TRUE;
   4671 #endif
   4672 	ixgbe_fc_enable(&adapter->hw);
   4673 
   4674 	return (0);
   4675 } /* ixgbe_set_flowcntl */
   4676 
   4677 /************************************************************************
   4678  * ixgbe_enable_rx_drop
   4679  *
   4680  *   Enable the hardware to drop packets when the buffer is
   4681  *   full. This is useful with multiqueue, so that no single
   4682  *   queue being full stalls the entire RX engine. We only
   4683  *   enable this when Multiqueue is enabled AND Flow Control
   4684  *   is disabled.
   4685  ************************************************************************/
   4686 static void
   4687 ixgbe_enable_rx_drop(struct adapter *adapter)
   4688 {
   4689 	struct ixgbe_hw *hw = &adapter->hw;
   4690 	struct rx_ring  *rxr;
   4691 	u32             srrctl;
   4692 
   4693 	for (int i = 0; i < adapter->num_queues; i++) {
   4694 		rxr = &adapter->rx_rings[i];
   4695 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4696 		srrctl |= IXGBE_SRRCTL_DROP_EN;
   4697 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4698 	}
   4699 
   4700 	/* enable drop for each vf */
   4701 	for (int i = 0; i < adapter->num_vfs; i++) {
   4702 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4703 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
   4704 		    IXGBE_QDE_ENABLE));
   4705 	}
   4706 } /* ixgbe_enable_rx_drop */
   4707 
   4708 /************************************************************************
   4709  * ixgbe_disable_rx_drop
   4710  ************************************************************************/
   4711 static void
   4712 ixgbe_disable_rx_drop(struct adapter *adapter)
   4713 {
   4714 	struct ixgbe_hw *hw = &adapter->hw;
   4715 	struct rx_ring  *rxr;
   4716 	u32             srrctl;
   4717 
   4718 	for (int i = 0; i < adapter->num_queues; i++) {
   4719 		rxr = &adapter->rx_rings[i];
   4720         	srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4721         	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
   4722         	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4723 	}
   4724 
   4725 	/* disable drop for each vf */
   4726 	for (int i = 0; i < adapter->num_vfs; i++) {
   4727 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4728 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
   4729 	}
   4730 } /* ixgbe_disable_rx_drop */
   4731 
   4732 /************************************************************************
   4733  * ixgbe_sysctl_advertise
   4734  *
   4735  *   SYSCTL wrapper around setting advertised speed
   4736  ************************************************************************/
   4737 static int
   4738 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
   4739 {
   4740 	struct sysctlnode node = *rnode;
   4741 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4742 	int            error = 0, advertise;
   4743 
   4744 	advertise = adapter->advertise;
   4745 	node.sysctl_data = &advertise;
   4746 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4747 	if (error != 0 || newp == NULL)
   4748 		return error;
   4749 
   4750 	return ixgbe_set_advertise(adapter, advertise);
   4751 } /* ixgbe_sysctl_advertise */
   4752 
   4753 /************************************************************************
   4754  * ixgbe_set_advertise - Control advertised link speed
   4755  *
   4756  *   Flags:
   4757  *     0x00 - Default (all capable link speed)
   4758  *     0x01 - advertise 100 Mb
   4759  *     0x02 - advertise 1G
   4760  *     0x04 - advertise 10G
   4761  *     0x08 - advertise 10 Mb
   4762  *     0x10 - advertise 2.5G
   4763  *     0x20 - advertise 5G
   4764  ************************************************************************/
   4765 static int
   4766 ixgbe_set_advertise(struct adapter *adapter, int advertise)
   4767 {
   4768 	device_t         dev;
   4769 	struct ixgbe_hw  *hw;
   4770 	ixgbe_link_speed speed = 0;
   4771 	ixgbe_link_speed link_caps = 0;
   4772 	s32              err = IXGBE_NOT_IMPLEMENTED;
   4773 	bool             negotiate = FALSE;
   4774 
   4775 	/* Checks to validate new value */
   4776 	if (adapter->advertise == advertise) /* no change */
   4777 		return (0);
   4778 
   4779 	dev = adapter->dev;
   4780 	hw = &adapter->hw;
   4781 
   4782 	/* No speed changes for backplane media */
   4783 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   4784 		return (ENODEV);
   4785 
   4786 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   4787 	    (hw->phy.multispeed_fiber))) {
   4788 		device_printf(dev,
   4789 		    "Advertised speed can only be set on copper or "
   4790 		    "multispeed fiber media types.\n");
   4791 		return (EINVAL);
   4792 	}
   4793 
   4794 	if (advertise < 0x0 || advertise > 0x2f) {
   4795 		device_printf(dev,
   4796 		    "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
   4797 		return (EINVAL);
   4798 	}
   4799 
   4800 	if (hw->mac.ops.get_link_capabilities) {
   4801 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   4802 		    &negotiate);
   4803 		if (err != IXGBE_SUCCESS) {
   4804 			device_printf(dev, "Unable to determine supported advertise speeds\n");
   4805 			return (ENODEV);
   4806 		}
   4807 	}
   4808 
   4809 	/* Set new value and report new advertised mode */
   4810 	if (advertise & 0x1) {
   4811 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
   4812 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
   4813 			return (EINVAL);
   4814 		}
   4815 		speed |= IXGBE_LINK_SPEED_100_FULL;
   4816 	}
   4817 	if (advertise & 0x2) {
   4818 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
   4819 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
   4820 			return (EINVAL);
   4821 		}
   4822 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   4823 	}
   4824 	if (advertise & 0x4) {
   4825 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
   4826 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
   4827 			return (EINVAL);
   4828 		}
   4829 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   4830 	}
   4831 	if (advertise & 0x8) {
   4832 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
   4833 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
   4834 			return (EINVAL);
   4835 		}
   4836 		speed |= IXGBE_LINK_SPEED_10_FULL;
   4837 	}
   4838 	if (advertise & 0x10) {
   4839 		if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
   4840 			device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
   4841 			return (EINVAL);
   4842 		}
   4843 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   4844 	}
   4845 	if (advertise & 0x20) {
   4846 		if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
   4847 			device_printf(dev, "Interface does not support 5Gb advertised speed\n");
   4848 			return (EINVAL);
   4849 		}
   4850 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   4851 	}
   4852 	if (advertise == 0)
   4853 		speed = link_caps; /* All capable link speed */
   4854 
   4855 	hw->mac.autotry_restart = TRUE;
   4856 	hw->mac.ops.setup_link(hw, speed, TRUE);
   4857 	adapter->advertise = advertise;
   4858 
   4859 	return (0);
   4860 } /* ixgbe_set_advertise */
   4861 
   4862 /************************************************************************
   4863  * ixgbe_get_advertise - Get current advertised speed settings
   4864  *
   4865  *   Formatted for sysctl usage.
   4866  *   Flags:
   4867  *     0x01 - advertise 100 Mb
   4868  *     0x02 - advertise 1G
   4869  *     0x04 - advertise 10G
   4870  *     0x08 - advertise 10 Mb (yes, Mb)
   4871  *     0x10 - advertise 2.5G
   4872  *     0x20 - advertise 5G
   4873  ************************************************************************/
   4874 static int
   4875 ixgbe_get_advertise(struct adapter *adapter)
   4876 {
   4877 	struct ixgbe_hw  *hw = &adapter->hw;
   4878 	int              speed;
   4879 	ixgbe_link_speed link_caps = 0;
   4880 	s32              err;
   4881 	bool             negotiate = FALSE;
   4882 
   4883 	/*
   4884 	 * Advertised speed means nothing unless it's copper or
   4885 	 * multi-speed fiber
   4886 	 */
   4887 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
   4888 	    !(hw->phy.multispeed_fiber))
   4889 		return (0);
   4890 
   4891 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
   4892 	if (err != IXGBE_SUCCESS)
   4893 		return (0);
   4894 
   4895 	speed =
   4896 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x04 : 0) |
   4897 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x02 : 0) |
   4898 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)   ? 0x01 : 0) |
   4899 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)    ? 0x08 : 0) |
   4900 	    ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
   4901 	    ((link_caps & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0);
   4902 
   4903 	return speed;
   4904 } /* ixgbe_get_advertise */
   4905 
   4906 /************************************************************************
   4907  * ixgbe_sysctl_dmac - Manage DMA Coalescing
   4908  *
   4909  *   Control values:
   4910  *     0/1 - off / on (use default value of 1000)
   4911  *
   4912  *     Legal timer values are:
   4913  *     50,100,250,500,1000,2000,5000,10000
   4914  *
   4915  *     Turning off interrupt moderation will also turn this off.
   4916  ************************************************************************/
   4917 static int
   4918 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
   4919 {
   4920 	struct sysctlnode node = *rnode;
   4921 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4922 	struct ifnet   *ifp = adapter->ifp;
   4923 	int            error;
   4924 	int            newval;
   4925 
   4926 	newval = adapter->dmac;
   4927 	node.sysctl_data = &newval;
   4928 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4929 	if ((error) || (newp == NULL))
   4930 		return (error);
   4931 
   4932 	switch (newval) {
   4933 	case 0:
   4934 		/* Disabled */
   4935 		adapter->dmac = 0;
   4936 		break;
   4937 	case 1:
   4938 		/* Enable and use default */
   4939 		adapter->dmac = 1000;
   4940 		break;
   4941 	case 50:
   4942 	case 100:
   4943 	case 250:
   4944 	case 500:
   4945 	case 1000:
   4946 	case 2000:
   4947 	case 5000:
   4948 	case 10000:
   4949 		/* Legal values - allow */
   4950 		adapter->dmac = newval;
   4951 		break;
   4952 	default:
   4953 		/* Do nothing, illegal value */
   4954 		return (EINVAL);
   4955 	}
   4956 
   4957 	/* Re-initialize hardware if it's already running */
   4958 	if (ifp->if_flags & IFF_RUNNING)
   4959 		ixgbe_init(ifp);
   4960 
   4961 	return (0);
   4962 }
   4963 
   4964 #ifdef IXGBE_DEBUG
   4965 /************************************************************************
   4966  * ixgbe_sysctl_power_state
   4967  *
   4968  *   Sysctl to test power states
   4969  *   Values:
   4970  *     0      - set device to D0
   4971  *     3      - set device to D3
   4972  *     (none) - get current device power state
   4973  ************************************************************************/
   4974 static int
   4975 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
   4976 {
   4977 #ifdef notyet
   4978 	struct sysctlnode node = *rnode;
   4979 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4980 	device_t       dev =  adapter->dev;
   4981 	int            curr_ps, new_ps, error = 0;
   4982 
   4983 	curr_ps = new_ps = pci_get_powerstate(dev);
   4984 
   4985 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4986 	if ((error) || (req->newp == NULL))
   4987 		return (error);
   4988 
   4989 	if (new_ps == curr_ps)
   4990 		return (0);
   4991 
   4992 	if (new_ps == 3 && curr_ps == 0)
   4993 		error = DEVICE_SUSPEND(dev);
   4994 	else if (new_ps == 0 && curr_ps == 3)
   4995 		error = DEVICE_RESUME(dev);
   4996 	else
   4997 		return (EINVAL);
   4998 
   4999 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
   5000 
   5001 	return (error);
   5002 #else
   5003 	return 0;
   5004 #endif
   5005 } /* ixgbe_sysctl_power_state */
   5006 #endif
   5007 
   5008 /************************************************************************
   5009  * ixgbe_sysctl_wol_enable
   5010  *
   5011  *   Sysctl to enable/disable the WoL capability,
   5012  *   if supported by the adapter.
   5013  *
   5014  *   Values:
   5015  *     0 - disabled
   5016  *     1 - enabled
   5017  ************************************************************************/
   5018 static int
   5019 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
   5020 {
   5021 	struct sysctlnode node = *rnode;
   5022 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5023 	struct ixgbe_hw *hw = &adapter->hw;
   5024 	bool            new_wol_enabled;
   5025 	int             error = 0;
   5026 
   5027 	new_wol_enabled = hw->wol_enabled;
   5028 	node.sysctl_data = &new_wol_enabled;
   5029 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5030 	if ((error) || (newp == NULL))
   5031 		return (error);
   5032 	if (new_wol_enabled == hw->wol_enabled)
   5033 		return (0);
   5034 
   5035 	if (new_wol_enabled && !adapter->wol_support)
   5036 		return (ENODEV);
   5037 	else
   5038 		hw->wol_enabled = new_wol_enabled;
   5039 
   5040 	return (0);
   5041 } /* ixgbe_sysctl_wol_enable */
   5042 
   5043 /************************************************************************
   5044  * ixgbe_sysctl_wufc - Wake Up Filter Control
   5045  *
   5046  *   Sysctl to enable/disable the types of packets that the
   5047  *   adapter will wake up on upon receipt.
   5048  *   Flags:
   5049  *     0x1  - Link Status Change
   5050  *     0x2  - Magic Packet
   5051  *     0x4  - Direct Exact
   5052  *     0x8  - Directed Multicast
   5053  *     0x10 - Broadcast
   5054  *     0x20 - ARP/IPv4 Request Packet
   5055  *     0x40 - Direct IPv4 Packet
   5056  *     0x80 - Direct IPv6 Packet
   5057  *
   5058  *   Settings not listed above will cause the sysctl to return an error.
   5059  ************************************************************************/
   5060 static int
   5061 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
   5062 {
   5063 	struct sysctlnode node = *rnode;
   5064 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5065 	int error = 0;
   5066 	u32 new_wufc;
   5067 
   5068 	new_wufc = adapter->wufc;
   5069 	node.sysctl_data = &new_wufc;
   5070 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5071 	if ((error) || (newp == NULL))
   5072 		return (error);
   5073 	if (new_wufc == adapter->wufc)
   5074 		return (0);
   5075 
   5076 	if (new_wufc & 0xffffff00)
   5077 		return (EINVAL);
   5078 
   5079 	new_wufc &= 0xff;
   5080 	new_wufc |= (0xffffff & adapter->wufc);
   5081 	adapter->wufc = new_wufc;
   5082 
   5083 	return (0);
   5084 } /* ixgbe_sysctl_wufc */
   5085 
   5086 #ifdef IXGBE_DEBUG
   5087 /************************************************************************
   5088  * ixgbe_sysctl_print_rss_config
   5089  ************************************************************************/
   5090 static int
   5091 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
   5092 {
   5093 #ifdef notyet
   5094 	struct sysctlnode node = *rnode;
   5095 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5096 	struct ixgbe_hw *hw = &adapter->hw;
   5097 	device_t        dev = adapter->dev;
   5098 	struct sbuf     *buf;
   5099 	int             error = 0, reta_size;
   5100 	u32             reg;
   5101 
   5102 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
   5103 	if (!buf) {
   5104 		device_printf(dev, "Could not allocate sbuf for output.\n");
   5105 		return (ENOMEM);
   5106 	}
   5107 
   5108 	// TODO: use sbufs to make a string to print out
   5109 	/* Set multiplier for RETA setup and table size based on MAC */
   5110 	switch (adapter->hw.mac.type) {
   5111 	case ixgbe_mac_X550:
   5112 	case ixgbe_mac_X550EM_x:
   5113 	case ixgbe_mac_X550EM_a:
   5114 		reta_size = 128;
   5115 		break;
   5116 	default:
   5117 		reta_size = 32;
   5118 		break;
   5119 	}
   5120 
   5121 	/* Print out the redirection table */
   5122 	sbuf_cat(buf, "\n");
   5123 	for (int i = 0; i < reta_size; i++) {
   5124 		if (i < 32) {
   5125 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
   5126 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
   5127 		} else {
   5128 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
   5129 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
   5130 		}
   5131 	}
   5132 
   5133 	// TODO: print more config
   5134 
   5135 	error = sbuf_finish(buf);
   5136 	if (error)
   5137 		device_printf(dev, "Error finishing sbuf: %d\n", error);
   5138 
   5139 	sbuf_delete(buf);
   5140 #endif
   5141 	return (0);
   5142 } /* ixgbe_sysctl_print_rss_config */
   5143 #endif /* IXGBE_DEBUG */
   5144 
   5145 /************************************************************************
   5146  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
   5147  *
   5148  *   For X552/X557-AT devices using an external PHY
   5149  ************************************************************************/
   5150 static int
   5151 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
   5152 {
   5153 	struct sysctlnode node = *rnode;
   5154 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5155 	struct ixgbe_hw *hw = &adapter->hw;
   5156 	int val;
   5157 	u16 reg;
   5158 	int		error;
   5159 
   5160 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5161 		device_printf(adapter->dev,
   5162 		    "Device has no supported external thermal sensor.\n");
   5163 		return (ENODEV);
   5164 	}
   5165 
   5166 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
   5167 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5168 		device_printf(adapter->dev,
   5169 		    "Error reading from PHY's current temperature register\n");
   5170 		return (EAGAIN);
   5171 	}
   5172 
   5173 	node.sysctl_data = &val;
   5174 
   5175 	/* Shift temp for output */
   5176 	val = reg >> 8;
   5177 
   5178 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5179 	if ((error) || (newp == NULL))
   5180 		return (error);
   5181 
   5182 	return (0);
   5183 } /* ixgbe_sysctl_phy_temp */
   5184 
   5185 /************************************************************************
   5186  * ixgbe_sysctl_phy_overtemp_occurred
   5187  *
   5188  *   Reports (directly from the PHY) whether the current PHY
   5189  *   temperature is over the overtemp threshold.
   5190  ************************************************************************/
   5191 static int
   5192 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
   5193 {
   5194 	struct sysctlnode node = *rnode;
   5195 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5196 	struct ixgbe_hw *hw = &adapter->hw;
   5197 	int val, error;
   5198 	u16 reg;
   5199 
   5200 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5201 		device_printf(adapter->dev,
   5202 		    "Device has no supported external thermal sensor.\n");
   5203 		return (ENODEV);
   5204 	}
   5205 
   5206 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
   5207 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5208 		device_printf(adapter->dev,
   5209 		    "Error reading from PHY's temperature status register\n");
   5210 		return (EAGAIN);
   5211 	}
   5212 
   5213 	node.sysctl_data = &val;
   5214 
   5215 	/* Get occurrence bit */
   5216 	val = !!(reg & 0x4000);
   5217 
   5218 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5219 	if ((error) || (newp == NULL))
   5220 		return (error);
   5221 
   5222 	return (0);
   5223 } /* ixgbe_sysctl_phy_overtemp_occurred */
   5224 
   5225 /************************************************************************
   5226  * ixgbe_sysctl_eee_state
   5227  *
   5228  *   Sysctl to set EEE power saving feature
   5229  *   Values:
   5230  *     0      - disable EEE
   5231  *     1      - enable EEE
   5232  *     (none) - get current device EEE state
   5233  ************************************************************************/
   5234 static int
   5235 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
   5236 {
   5237 	struct sysctlnode node = *rnode;
   5238 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5239 	struct ifnet   *ifp = adapter->ifp;
   5240 	device_t       dev = adapter->dev;
   5241 	int            curr_eee, new_eee, error = 0;
   5242 	s32            retval;
   5243 
   5244 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
   5245 	node.sysctl_data = &new_eee;
   5246 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5247 	if ((error) || (newp == NULL))
   5248 		return (error);
   5249 
   5250 	/* Nothing to do */
   5251 	if (new_eee == curr_eee)
   5252 		return (0);
   5253 
   5254 	/* Not supported */
   5255 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
   5256 		return (EINVAL);
   5257 
   5258 	/* Bounds checking */
   5259 	if ((new_eee < 0) || (new_eee > 1))
   5260 		return (EINVAL);
   5261 
   5262 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
   5263 	if (retval) {
   5264 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
   5265 		return (EINVAL);
   5266 	}
   5267 
   5268 	/* Restart auto-neg */
   5269 	ixgbe_init(ifp);
   5270 
   5271 	device_printf(dev, "New EEE state: %d\n", new_eee);
   5272 
   5273 	/* Cache new value */
   5274 	if (new_eee)
   5275 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5276 	else
   5277 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
   5278 
   5279 	return (error);
   5280 } /* ixgbe_sysctl_eee_state */
   5281 
   5282 /************************************************************************
   5283  * ixgbe_init_device_features
   5284  ************************************************************************/
   5285 static void
   5286 ixgbe_init_device_features(struct adapter *adapter)
   5287 {
   5288 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   5289 	                  | IXGBE_FEATURE_RSS
   5290 	                  | IXGBE_FEATURE_MSI
   5291 	                  | IXGBE_FEATURE_MSIX
   5292 	                  | IXGBE_FEATURE_LEGACY_IRQ
   5293 	                  | IXGBE_FEATURE_LEGACY_TX;
   5294 
   5295 	/* Set capabilities first... */
   5296 	switch (adapter->hw.mac.type) {
   5297 	case ixgbe_mac_82598EB:
   5298 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
   5299 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
   5300 		break;
   5301 	case ixgbe_mac_X540:
   5302 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5303 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5304 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
   5305 		    (adapter->hw.bus.func == 0))
   5306 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5307 		break;
   5308 	case ixgbe_mac_X550:
   5309 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5310 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5311 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5312 		break;
   5313 	case ixgbe_mac_X550EM_x:
   5314 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5315 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5316 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
   5317 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5318 		break;
   5319 	case ixgbe_mac_X550EM_a:
   5320 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5321 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5322 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5323 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
   5324 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
   5325 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5326 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5327 		}
   5328 		break;
   5329 	case ixgbe_mac_82599EB:
   5330 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5331 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5332 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
   5333 		    (adapter->hw.bus.func == 0))
   5334 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5335 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
   5336 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5337 		break;
   5338 	default:
   5339 		break;
   5340 	}
   5341 
   5342 	/* Enabled by default... */
   5343 	/* Fan failure detection */
   5344 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
   5345 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
   5346 	/* Netmap */
   5347 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   5348 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   5349 	/* EEE */
   5350 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   5351 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5352 	/* Thermal Sensor */
   5353 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
   5354 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
   5355 
   5356 	/* Enabled via global sysctl... */
   5357 	/* Flow Director */
   5358 	if (ixgbe_enable_fdir) {
   5359 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
   5360 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
   5361 		else
   5362 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
   5363 	}
   5364 	/* Legacy (single queue) transmit */
   5365 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   5366 	    ixgbe_enable_legacy_tx)
   5367 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   5368 	/*
   5369 	 * Message Signal Interrupts - Extended (MSI-X)
   5370 	 * Normal MSI is only enabled if MSI-X calls fail.
   5371 	 */
   5372 	if (!ixgbe_enable_msix)
   5373 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
   5374 	/* Receive-Side Scaling (RSS) */
   5375 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
   5376 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   5377 
   5378 	/* Disable features with unmet dependencies... */
   5379 	/* No MSI-X */
   5380 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
   5381 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   5382 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   5383 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
   5384 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
   5385 	}
   5386 } /* ixgbe_init_device_features */
   5387 
   5388 /************************************************************************
   5389  * ixgbe_probe - Device identification routine
   5390  *
   5391  *   Determines if the driver should be loaded on
   5392  *   adapter based on its PCI vendor/device ID.
   5393  *
   5394  *   return BUS_PROBE_DEFAULT on success, positive on failure
   5395  ************************************************************************/
   5396 static int
   5397 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
   5398 {
   5399 	const struct pci_attach_args *pa = aux;
   5400 
   5401 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
   5402 }
   5403 
   5404 static ixgbe_vendor_info_t *
   5405 ixgbe_lookup(const struct pci_attach_args *pa)
   5406 {
   5407 	ixgbe_vendor_info_t *ent;
   5408 	pcireg_t subid;
   5409 
   5410 	INIT_DEBUGOUT("ixgbe_lookup: begin");
   5411 
   5412 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
   5413 		return NULL;
   5414 
   5415 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
   5416 
   5417 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
   5418 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
   5419 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
   5420 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
   5421 			(ent->subvendor_id == 0)) &&
   5422 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
   5423 			(ent->subdevice_id == 0))) {
   5424 			++ixgbe_total_ports;
   5425 			return ent;
   5426 		}
   5427 	}
   5428 	return NULL;
   5429 }
   5430 
   5431 static int
   5432 ixgbe_ifflags_cb(struct ethercom *ec)
   5433 {
   5434 	struct ifnet *ifp = &ec->ec_if;
   5435 	struct adapter *adapter = ifp->if_softc;
   5436 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   5437 
   5438 	IXGBE_CORE_LOCK(adapter);
   5439 
   5440 	if (change != 0)
   5441 		adapter->if_flags = ifp->if_flags;
   5442 
   5443 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   5444 		rc = ENETRESET;
   5445 	else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   5446 		ixgbe_set_promisc(adapter);
   5447 
   5448 	/* Set up VLAN support and filter */
   5449 	ixgbe_setup_vlan_hw_support(adapter);
   5450 
   5451 	IXGBE_CORE_UNLOCK(adapter);
   5452 
   5453 	return rc;
   5454 }
   5455 
   5456 /************************************************************************
   5457  * ixgbe_ioctl - Ioctl entry point
   5458  *
   5459  *   Called when the user wants to configure the interface.
   5460  *
   5461  *   return 0 on success, positive on failure
   5462  ************************************************************************/
   5463 static int
   5464 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   5465 {
   5466 	struct adapter	*adapter = ifp->if_softc;
   5467 	struct ixgbe_hw *hw = &adapter->hw;
   5468 	struct ifcapreq *ifcr = data;
   5469 	struct ifreq	*ifr = data;
   5470 	int             error = 0;
   5471 	int l4csum_en;
   5472 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   5473 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   5474 
   5475 	switch (command) {
   5476 	case SIOCSIFFLAGS:
   5477 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   5478 		break;
   5479 	case SIOCADDMULTI:
   5480 	case SIOCDELMULTI:
   5481 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   5482 		break;
   5483 	case SIOCSIFMEDIA:
   5484 	case SIOCGIFMEDIA:
   5485 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   5486 		break;
   5487 	case SIOCSIFCAP:
   5488 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   5489 		break;
   5490 	case SIOCSIFMTU:
   5491 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   5492 		break;
   5493 #ifdef __NetBSD__
   5494 	case SIOCINITIFADDR:
   5495 		IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
   5496 		break;
   5497 	case SIOCGIFFLAGS:
   5498 		IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
   5499 		break;
   5500 	case SIOCGIFAFLAG_IN:
   5501 		IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
   5502 		break;
   5503 	case SIOCGIFADDR:
   5504 		IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
   5505 		break;
   5506 	case SIOCGIFMTU:
   5507 		IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
   5508 		break;
   5509 	case SIOCGIFCAP:
   5510 		IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
   5511 		break;
   5512 	case SIOCGETHERCAP:
   5513 		IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
   5514 		break;
   5515 	case SIOCGLIFADDR:
   5516 		IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
   5517 		break;
   5518 	case SIOCZIFDATA:
   5519 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   5520 		hw->mac.ops.clear_hw_cntrs(hw);
   5521 		ixgbe_clear_evcnt(adapter);
   5522 		break;
   5523 	case SIOCAIFADDR:
   5524 		IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
   5525 		break;
   5526 #endif
   5527 	default:
   5528 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   5529 		break;
   5530 	}
   5531 
   5532 	switch (command) {
   5533 	case SIOCSIFMEDIA:
   5534 	case SIOCGIFMEDIA:
   5535 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   5536 	case SIOCGI2C:
   5537 	{
   5538 		struct ixgbe_i2c_req	i2c;
   5539 
   5540 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
   5541 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
   5542 		if (error != 0)
   5543 			break;
   5544 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
   5545 			error = EINVAL;
   5546 			break;
   5547 		}
   5548 		if (i2c.len > sizeof(i2c.data)) {
   5549 			error = EINVAL;
   5550 			break;
   5551 		}
   5552 
   5553 		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
   5554 		    i2c.dev_addr, i2c.data);
   5555 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
   5556 		break;
   5557 	}
   5558 	case SIOCSIFCAP:
   5559 		/* Layer-4 Rx checksum offload has to be turned on and
   5560 		 * off as a unit.
   5561 		 */
   5562 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   5563 		if (l4csum_en != l4csum && l4csum_en != 0)
   5564 			return EINVAL;
   5565 		/*FALLTHROUGH*/
   5566 	case SIOCADDMULTI:
   5567 	case SIOCDELMULTI:
   5568 	case SIOCSIFFLAGS:
   5569 	case SIOCSIFMTU:
   5570 	default:
   5571 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   5572 			return error;
   5573 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   5574 			;
   5575 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   5576 			IXGBE_CORE_LOCK(adapter);
   5577 			ixgbe_init_locked(adapter);
   5578 			ixgbe_recalculate_max_frame(adapter);
   5579 			IXGBE_CORE_UNLOCK(adapter);
   5580 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   5581 			/*
   5582 			 * Multicast list has changed; set the hardware filter
   5583 			 * accordingly.
   5584 			 */
   5585 			IXGBE_CORE_LOCK(adapter);
   5586 			ixgbe_disable_intr(adapter);
   5587 			ixgbe_set_multi(adapter);
   5588 			ixgbe_enable_intr(adapter);
   5589 			IXGBE_CORE_UNLOCK(adapter);
   5590 		}
   5591 		return 0;
   5592 	}
   5593 
   5594 	return error;
   5595 } /* ixgbe_ioctl */
   5596 
   5597 /************************************************************************
   5598  * ixgbe_check_fan_failure
   5599  ************************************************************************/
   5600 static void
   5601 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
   5602 {
   5603 	u32 mask;
   5604 
   5605 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
   5606 	    IXGBE_ESDP_SDP1;
   5607 
   5608 	if (reg & mask)
   5609 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
   5610 } /* ixgbe_check_fan_failure */
   5611 
   5612 /************************************************************************
   5613  * ixgbe_handle_que
   5614  ************************************************************************/
   5615 static void
   5616 ixgbe_handle_que(void *context)
   5617 {
   5618 	struct ix_queue *que = context;
   5619 	struct adapter  *adapter = que->adapter;
   5620 	struct tx_ring  *txr = que->txr;
   5621 	struct ifnet    *ifp = adapter->ifp;
   5622 
   5623 	adapter->handleq.ev_count++;
   5624 
   5625 	if (ifp->if_flags & IFF_RUNNING) {
   5626 		ixgbe_rxeof(que);
   5627 		IXGBE_TX_LOCK(txr);
   5628 		ixgbe_txeof(txr);
   5629 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5630 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   5631 				ixgbe_mq_start_locked(ifp, txr);
   5632 		/* Only for queue 0 */
   5633 		/* NetBSD still needs this for CBQ */
   5634 		if ((&adapter->queues[0] == que)
   5635 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   5636 			ixgbe_legacy_start_locked(ifp, txr);
   5637 		IXGBE_TX_UNLOCK(txr);
   5638 	}
   5639 
   5640 	/* Re-enable this interrupt */
   5641 	if (que->res != NULL)
   5642 		ixgbe_enable_queue(adapter, que->msix);
   5643 	else
   5644 		ixgbe_enable_intr(adapter);
   5645 
   5646 	return;
   5647 } /* ixgbe_handle_que */
   5648 
   5649 /************************************************************************
   5650  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
   5651  ************************************************************************/
   5652 static int
   5653 ixgbe_allocate_legacy(struct adapter *adapter,
   5654     const struct pci_attach_args *pa)
   5655 {
   5656 	device_t	dev = adapter->dev;
   5657 	struct ix_queue *que = adapter->queues;
   5658 	struct tx_ring  *txr = adapter->tx_rings;
   5659 	int		counts[PCI_INTR_TYPE_SIZE];
   5660 	pci_intr_type_t intr_type, max_type;
   5661 	char            intrbuf[PCI_INTRSTR_LEN];
   5662 	const char	*intrstr = NULL;
   5663 
   5664 	/* We allocate a single interrupt resource */
   5665 	max_type = PCI_INTR_TYPE_MSI;
   5666 	counts[PCI_INTR_TYPE_MSIX] = 0;
   5667 	counts[PCI_INTR_TYPE_MSI] =
   5668 	    (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
   5669 	counts[PCI_INTR_TYPE_INTX] =
   5670 	    (adapter->feat_en & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
   5671 
   5672 alloc_retry:
   5673 	if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
   5674 		aprint_error_dev(dev, "couldn't alloc interrupt\n");
   5675 		return ENXIO;
   5676 	}
   5677 	adapter->osdep.nintrs = 1;
   5678 	intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
   5679 	    intrbuf, sizeof(intrbuf));
   5680 	adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
   5681 	    adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
   5682 	    device_xname(dev));
   5683 	if (adapter->osdep.ihs[0] == NULL) {
   5684 		intr_type = pci_intr_type(adapter->osdep.pc,
   5685 		    adapter->osdep.intrs[0]);
   5686 		aprint_error_dev(dev,"unable to establish %s\n",
   5687 		    (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5688 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5689 		switch (intr_type) {
   5690 		case PCI_INTR_TYPE_MSI:
   5691 			/* The next try is for INTx: Disable MSI */
   5692 			max_type = PCI_INTR_TYPE_INTX;
   5693 			counts[PCI_INTR_TYPE_INTX] = 1;
   5694 			goto alloc_retry;
   5695 		case PCI_INTR_TYPE_INTX:
   5696 		default:
   5697 			/* See below */
   5698 			break;
   5699 		}
   5700 	}
   5701 	if (adapter->osdep.ihs[0] == NULL) {
   5702 		aprint_error_dev(dev,
   5703 		    "couldn't establish interrupt%s%s\n",
   5704 		    intrstr ? " at " : "", intrstr ? intrstr : "");
   5705 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5706 		return ENXIO;
   5707 	}
   5708 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   5709 	/*
   5710 	 * Try allocating a fast interrupt and the associated deferred
   5711 	 * processing contexts.
   5712 	 */
   5713 	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5714 		txr->txr_si =
   5715 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5716 			ixgbe_deferred_mq_start, txr);
   5717 	que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5718 	    ixgbe_handle_que, que);
   5719 
   5720 	/* Tasklets for Link, SFP and Multispeed Fiber */
   5721 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   5722 	    ixgbe_handle_link, adapter);
   5723 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5724 	    ixgbe_handle_mod, adapter);
   5725 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5726 	    ixgbe_handle_msf, adapter);
   5727 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5728 	    ixgbe_handle_phy, adapter);
   5729 
   5730 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   5731 		adapter->fdir_si =
   5732 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5733 			ixgbe_reinit_fdir, adapter);
   5734 
   5735 	if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) &
   5736 		(txr->txr_si == NULL)) ||
   5737 	    que->que_si == NULL ||
   5738 	    adapter->link_si == NULL ||
   5739 	    adapter->mod_si == NULL ||
   5740 	    ((adapter->feat_en & IXGBE_FEATURE_FDIR) &
   5741 		(adapter->fdir_si == NULL)) ||
   5742 	    adapter->msf_si == NULL) {
   5743 		aprint_error_dev(dev,
   5744 		    "could not establish software interrupts\n");
   5745 
   5746 		return ENXIO;
   5747 	}
   5748 	/* For simplicity in the handlers */
   5749 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
   5750 
   5751 	return (0);
   5752 } /* ixgbe_allocate_legacy */
   5753 
   5754 
   5755 /************************************************************************
   5756  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
   5757  ************************************************************************/
   5758 static int
   5759 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   5760 {
   5761 	device_t        dev = adapter->dev;
   5762 	struct 		ix_queue *que = adapter->queues;
   5763 	struct  	tx_ring *txr = adapter->tx_rings;
   5764 	pci_chipset_tag_t pc;
   5765 	char		intrbuf[PCI_INTRSTR_LEN];
   5766 	char		intr_xname[32];
   5767 	const char	*intrstr = NULL;
   5768 	int 		error, vector = 0;
   5769 	int		cpu_id = 0;
   5770 	kcpuset_t	*affinity;
   5771 #ifdef RSS
   5772 	unsigned int    rss_buckets = 0;
   5773 	kcpuset_t	cpu_mask;
   5774 #endif
   5775 
   5776 	pc = adapter->osdep.pc;
   5777 #ifdef	RSS
   5778 	/*
   5779 	 * If we're doing RSS, the number of queues needs to
   5780 	 * match the number of RSS buckets that are configured.
   5781 	 *
   5782 	 * + If there's more queues than RSS buckets, we'll end
   5783 	 *   up with queues that get no traffic.
   5784 	 *
   5785 	 * + If there's more RSS buckets than queues, we'll end
   5786 	 *   up having multiple RSS buckets map to the same queue,
   5787 	 *   so there'll be some contention.
   5788 	 */
   5789 	rss_buckets = rss_getnumbuckets();
   5790 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
   5791 	    (adapter->num_queues != rss_buckets)) {
   5792 		device_printf(dev,
   5793 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
   5794 		    "; performance will be impacted.\n",
   5795 		    __func__, adapter->num_queues, rss_buckets);
   5796 	}
   5797 #endif
   5798 
   5799 	adapter->osdep.nintrs = adapter->num_queues + 1;
   5800 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   5801 	    adapter->osdep.nintrs) != 0) {
   5802 		aprint_error_dev(dev,
   5803 		    "failed to allocate MSI-X interrupt\n");
   5804 		return (ENXIO);
   5805 	}
   5806 
   5807 	kcpuset_create(&affinity, false);
   5808 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   5809 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   5810 		    device_xname(dev), i);
   5811 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   5812 		    sizeof(intrbuf));
   5813 #ifdef IXGBE_MPSAFE
   5814 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   5815 		    true);
   5816 #endif
   5817 		/* Set the handler function */
   5818 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   5819 		    adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
   5820 		    intr_xname);
   5821 		if (que->res == NULL) {
   5822 			pci_intr_release(pc, adapter->osdep.intrs,
   5823 			    adapter->osdep.nintrs);
   5824 			aprint_error_dev(dev,
   5825 			    "Failed to register QUE handler\n");
   5826 			kcpuset_destroy(affinity);
   5827 			return ENXIO;
   5828 		}
   5829 		que->msix = vector;
   5830 		adapter->active_queues |= (u64)(1 << que->msix);
   5831 
   5832 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   5833 #ifdef	RSS
   5834 			/*
   5835 			 * The queue ID is used as the RSS layer bucket ID.
   5836 			 * We look up the queue ID -> RSS CPU ID and select
   5837 			 * that.
   5838 			 */
   5839 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
   5840 			CPU_SETOF(cpu_id, &cpu_mask);
   5841 #endif
   5842 		} else {
   5843 			/*
   5844 			 * Bind the MSI-X vector, and thus the
   5845 			 * rings to the corresponding CPU.
   5846 			 *
   5847 			 * This just happens to match the default RSS
   5848 			 * round-robin bucket -> queue -> CPU allocation.
   5849 			 */
   5850 			if (adapter->num_queues > 1)
   5851 				cpu_id = i;
   5852 		}
   5853 		/* Round-robin affinity */
   5854 		kcpuset_zero(affinity);
   5855 		kcpuset_set(affinity, cpu_id % ncpu);
   5856 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   5857 		    NULL);
   5858 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   5859 		    intrstr);
   5860 		if (error == 0) {
   5861 #if 1 /* def IXGBE_DEBUG */
   5862 #ifdef	RSS
   5863 			aprintf_normal(", bound RSS bucket %d to CPU %d", i,
   5864 			    cpu_id % ncpu);
   5865 #else
   5866 			aprint_normal(", bound queue %d to cpu %d", i,
   5867 			    cpu_id % ncpu);
   5868 #endif
   5869 #endif /* IXGBE_DEBUG */
   5870 		}
   5871 		aprint_normal("\n");
   5872 
   5873 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5874 			txr->txr_si = softint_establish(
   5875 				SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5876 				ixgbe_deferred_mq_start, txr);
   5877 		que->que_si
   5878 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5879 			ixgbe_handle_que, que);
   5880 		if (que->que_si == NULL) {
   5881 			aprint_error_dev(dev,
   5882 			    "could not establish software interrupt\n");
   5883 		}
   5884 	}
   5885 
   5886 	/* and Link */
   5887 	cpu_id++;
   5888 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   5889 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   5890 	    sizeof(intrbuf));
   5891 #ifdef IXGBE_MPSAFE
   5892 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   5893 	    true);
   5894 #endif
   5895 	/* Set the link handler function */
   5896 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   5897 	    adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
   5898 	    intr_xname);
   5899 	if (adapter->osdep.ihs[vector] == NULL) {
   5900 		adapter->res = NULL;
   5901 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   5902 		kcpuset_destroy(affinity);
   5903 		return (ENXIO);
   5904 	}
   5905 	/* Round-robin affinity */
   5906 	kcpuset_zero(affinity);
   5907 	kcpuset_set(affinity, cpu_id % ncpu);
   5908 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   5909 
   5910 	aprint_normal_dev(dev,
   5911 	    "for link, interrupting at %s", intrstr);
   5912 	if (error == 0)
   5913 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   5914 	else
   5915 		aprint_normal("\n");
   5916 
   5917 	adapter->vector = vector;
   5918 	/* Tasklets for Link, SFP and Multispeed Fiber */
   5919 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   5920 	    ixgbe_handle_link, adapter);
   5921 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5922 	    ixgbe_handle_mod, adapter);
   5923 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5924 	    ixgbe_handle_msf, adapter);
   5925 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   5926 		adapter->mbx_si =
   5927 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5928 			ixgbe_handle_mbx, adapter);
   5929 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5930 		ixgbe_handle_phy, adapter);
   5931 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   5932 		adapter->fdir_si =
   5933 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5934 			ixgbe_reinit_fdir, adapter);
   5935 
   5936 	kcpuset_destroy(affinity);
   5937 
   5938 	return (0);
   5939 } /* ixgbe_allocate_msix */
   5940 
   5941 /************************************************************************
   5942  * ixgbe_configure_interrupts
   5943  *
   5944  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
   5945  *   This will also depend on user settings.
   5946  ************************************************************************/
   5947 static int
   5948 ixgbe_configure_interrupts(struct adapter *adapter)
   5949 {
   5950 	device_t dev = adapter->dev;
   5951 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
   5952 	int want, queues, msgs;
   5953 
   5954 	/* Default to 1 queue if MSI-X setup fails */
   5955 	adapter->num_queues = 1;
   5956 
   5957 	/* Override by tuneable */
   5958 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
   5959 		goto msi;
   5960 
   5961 	/* First try MSI-X */
   5962 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   5963 	msgs = MIN(msgs, IXG_MAX_NINTR);
   5964 	if (msgs < 2)
   5965 		goto msi;
   5966 
   5967 	adapter->msix_mem = (void *)1; /* XXX */
   5968 
   5969 	/* Figure out a reasonable auto config value */
   5970 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   5971 
   5972 #ifdef	RSS
   5973 	/* If we're doing RSS, clamp at the number of RSS buckets */
   5974 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   5975 		queues = min(queues, rss_getnumbuckets());
   5976 #endif
   5977 	if (ixgbe_num_queues > queues) {
   5978 		aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
   5979 		ixgbe_num_queues = queues;
   5980 	}
   5981 
   5982 	if (ixgbe_num_queues != 0)
   5983 		queues = ixgbe_num_queues;
   5984 	else
   5985 		queues = min(queues,
   5986 		    min(mac->max_tx_queues, mac->max_rx_queues));
   5987 
   5988 	/* reflect correct sysctl value */
   5989 	ixgbe_num_queues = queues;
   5990 
   5991 	/*
   5992 	 * Want one vector (RX/TX pair) per queue
   5993 	 * plus an additional for Link.
   5994 	 */
   5995 	want = queues + 1;
   5996 	if (msgs >= want)
   5997 		msgs = want;
   5998 	else {
   5999                	aprint_error_dev(dev, "MSI-X Configuration Problem, "
   6000 		    "%d vectors but %d queues wanted!\n",
   6001 		    msgs, want);
   6002 		goto msi;
   6003 	}
   6004 	device_printf(dev,
   6005 	    "Using MSI-X interrupts with %d vectors\n", msgs);
   6006 	adapter->num_queues = queues;
   6007 	adapter->feat_en |= IXGBE_FEATURE_MSIX;
   6008 	return (0);
   6009 
   6010 	/*
   6011 	 * MSI-X allocation failed or provided us with
   6012 	 * less vectors than needed. Free MSI-X resources
   6013 	 * and we'll try enabling MSI.
   6014 	 */
   6015 msi:
   6016 	/* Without MSI-X, some features are no longer supported */
   6017 	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6018 	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
   6019 	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6020 	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
   6021 
   6022        	msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
   6023 	adapter->msix_mem = NULL; /* XXX */
   6024 	if (msgs > 1)
   6025 		msgs = 1;
   6026 	if (msgs != 0) {
   6027 		msgs = 1;
   6028 		adapter->feat_en |= IXGBE_FEATURE_MSI;
   6029 		aprint_normal_dev(dev, "Using an MSI interrupt\n");
   6030 		return (0);
   6031 	}
   6032 
   6033 	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
   6034 		aprint_error_dev(dev,
   6035 		    "Device does not support legacy interrupts.\n");
   6036 		return 1;
   6037 	}
   6038 
   6039 	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6040 	aprint_normal_dev(dev, "Using a Legacy interrupt\n");
   6041 
   6042 	return (0);
   6043 } /* ixgbe_configure_interrupts */
   6044 
   6045 
   6046 /************************************************************************
   6047  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
   6048  *
   6049  *   Done outside of interrupt context since the driver might sleep
   6050  ************************************************************************/
   6051 static void
   6052 ixgbe_handle_link(void *context)
   6053 {
   6054 	struct adapter  *adapter = context;
   6055 	struct ixgbe_hw *hw = &adapter->hw;
   6056 
   6057 	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
   6058 	ixgbe_update_link_status(adapter);
   6059 
   6060 	/* Re-enable link interrupts */
   6061 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
   6062 } /* ixgbe_handle_link */
   6063 
   6064 /************************************************************************
   6065  * ixgbe_rearm_queues
   6066  ************************************************************************/
   6067 static void
   6068 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   6069 {
   6070 	u32 mask;
   6071 
   6072 	switch (adapter->hw.mac.type) {
   6073 	case ixgbe_mac_82598EB:
   6074 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   6075 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   6076 		break;
   6077 	case ixgbe_mac_82599EB:
   6078 	case ixgbe_mac_X540:
   6079 	case ixgbe_mac_X550:
   6080 	case ixgbe_mac_X550EM_x:
   6081 	case ixgbe_mac_X550EM_a:
   6082 		mask = (queues & 0xFFFFFFFF);
   6083 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   6084 		mask = (queues >> 32);
   6085 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   6086 		break;
   6087 	default:
   6088 		break;
   6089 	}
   6090 } /* ixgbe_rearm_queues */
   6091