Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.115
      1 /* $NetBSD: ixgbe.c,v 1.115 2017/12/06 04:08:50 msaitoh Exp $ */
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 320916 2017-07-12 17:35:32Z sbruno $*/
     36 
     37 /*
     38  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39  * All rights reserved.
     40  *
     41  * This code is derived from software contributed to The NetBSD Foundation
     42  * by Coyote Point Systems, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63  * POSSIBILITY OF SUCH DAMAGE.
     64  */
     65 
     66 #ifdef _KERNEL_OPT
     67 #include "opt_inet.h"
     68 #include "opt_inet6.h"
     69 #include "opt_net_mpsafe.h"
     70 #endif
     71 
     72 #include "ixgbe.h"
     73 #include "vlan.h"
     74 
     75 #include <sys/cprng.h>
     76 #include <dev/mii/mii.h>
     77 #include <dev/mii/miivar.h>
     78 
     79 /************************************************************************
     80  * Driver version
     81  ************************************************************************/
     82 char ixgbe_driver_version[] = "3.2.12-k";
     83 
     84 
     85 /************************************************************************
     86  * PCI Device ID Table
     87  *
     88  *   Used by probe to select devices to load on
     89  *   Last field stores an index into ixgbe_strings
     90  *   Last entry must be all 0s
     91  *
     92  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     93  ************************************************************************/
     94 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     95 {
     96 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    111 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    112 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    113 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    114 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    115 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
    116 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    117 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
    118 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
    119 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
    120 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
    121 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
    122 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
    123 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
    124 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
    125 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
    126 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
    127 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
    128 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
    129 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
    130 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
    131 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
    132 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
    133 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
    134 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
    135 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
    136 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
    137 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
    138 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
    139 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
    140 	/* required last entry */
    141 	{0, 0, 0, 0, 0}
    142 };
    143 
    144 /************************************************************************
    145  * Table of branding strings
    146  ************************************************************************/
    147 static const char    *ixgbe_strings[] = {
    148 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    149 };
    150 
    151 /************************************************************************
    152  * Function prototypes
    153  ************************************************************************/
    154 static int      ixgbe_probe(device_t, cfdata_t, void *);
    155 static void     ixgbe_attach(device_t, device_t, void *);
    156 static int      ixgbe_detach(device_t, int);
    157 #if 0
    158 static int      ixgbe_shutdown(device_t);
    159 #endif
    160 static bool	ixgbe_suspend(device_t, const pmf_qual_t *);
    161 static bool	ixgbe_resume(device_t, const pmf_qual_t *);
    162 static int	ixgbe_ifflags_cb(struct ethercom *);
    163 static int      ixgbe_ioctl(struct ifnet *, u_long, void *);
    164 static void	ixgbe_ifstop(struct ifnet *, int);
    165 static int	ixgbe_init(struct ifnet *);
    166 static void	ixgbe_init_locked(struct adapter *);
    167 static void     ixgbe_stop(void *);
    168 static void     ixgbe_init_device_features(struct adapter *);
    169 static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
    170 static void	ixgbe_add_media_types(struct adapter *);
    171 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    172 static int      ixgbe_media_change(struct ifnet *);
    173 static int      ixgbe_allocate_pci_resources(struct adapter *,
    174 		    const struct pci_attach_args *);
    175 static void	ixgbe_get_slot_info(struct adapter *);
    176 static int      ixgbe_allocate_msix(struct adapter *,
    177 		    const struct pci_attach_args *);
    178 static int      ixgbe_allocate_legacy(struct adapter *,
    179 		    const struct pci_attach_args *);
    180 static int      ixgbe_configure_interrupts(struct adapter *);
    181 static void	ixgbe_free_pci_resources(struct adapter *);
    182 static void	ixgbe_local_timer(void *);
    183 static void	ixgbe_local_timer1(void *);
    184 static int	ixgbe_setup_interface(device_t, struct adapter *);
    185 static void	ixgbe_config_gpie(struct adapter *);
    186 static void	ixgbe_config_dmac(struct adapter *);
    187 static void	ixgbe_config_delay_values(struct adapter *);
    188 static void	ixgbe_config_link(struct adapter *);
    189 static void	ixgbe_check_wol_support(struct adapter *);
    190 static int	ixgbe_setup_low_power_mode(struct adapter *);
    191 static void	ixgbe_rearm_queues(struct adapter *, u64);
    192 
    193 static void     ixgbe_initialize_transmit_units(struct adapter *);
    194 static void     ixgbe_initialize_receive_units(struct adapter *);
    195 static void	ixgbe_enable_rx_drop(struct adapter *);
    196 static void	ixgbe_disable_rx_drop(struct adapter *);
    197 static void	ixgbe_initialize_rss_mapping(struct adapter *);
    198 
    199 static void     ixgbe_enable_intr(struct adapter *);
    200 static void     ixgbe_disable_intr(struct adapter *);
    201 static void     ixgbe_update_stats_counters(struct adapter *);
    202 static void     ixgbe_set_promisc(struct adapter *);
    203 static void     ixgbe_set_multi(struct adapter *);
    204 static void     ixgbe_update_link_status(struct adapter *);
    205 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    206 static void	ixgbe_configure_ivars(struct adapter *);
    207 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    208 
    209 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    210 #if 0
    211 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
    212 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
    213 #endif
    214 
    215 static void	ixgbe_add_device_sysctls(struct adapter *);
    216 static void     ixgbe_add_hw_stats(struct adapter *);
    217 static void	ixgbe_clear_evcnt(struct adapter *);
    218 static int	ixgbe_set_flowcntl(struct adapter *, int);
    219 static int	ixgbe_set_advertise(struct adapter *, int);
    220 static int      ixgbe_get_advertise(struct adapter *);
    221 
    222 /* Sysctl handlers */
    223 static void	ixgbe_set_sysctl_value(struct adapter *, const char *,
    224 		     const char *, int *, int);
    225 static int	ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
    226 static int	ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
    227 static int      ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    228 static int	ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
    229 static int	ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
    230 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
    231 #ifdef IXGBE_DEBUG
    232 static int	ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
    233 static int	ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
    234 #endif
    235 static int      ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
    236 static int      ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
    237 static int      ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
    238 static int      ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
    239 static int      ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
    240 static int	ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
    241 static int	ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
    242 
    243 /* Support for pluggable optic modules */
    244 static bool	ixgbe_sfp_probe(struct adapter *);
    245 
    246 /* Legacy (single vector) interrupt handler */
    247 static int	ixgbe_legacy_irq(void *);
    248 
    249 /* The MSI/MSI-X Interrupt handlers */
    250 static int	ixgbe_msix_que(void *);
    251 static int	ixgbe_msix_link(void *);
    252 
    253 /* Software interrupts for deferred work */
    254 static void	ixgbe_handle_que(void *);
    255 static void	ixgbe_handle_link(void *);
    256 static void	ixgbe_handle_msf(void *);
    257 static void	ixgbe_handle_mod(void *);
    258 static void	ixgbe_handle_phy(void *);
    259 
    260 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    261 
    262 /************************************************************************
    263  *  NetBSD Device Interface Entry Points
    264  ************************************************************************/
    265 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    266     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    267     DVF_DETACH_SHUTDOWN);
    268 
    269 #if 0
    270 devclass_t ix_devclass;
    271 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
    272 
    273 MODULE_DEPEND(ix, pci, 1, 1, 1);
    274 MODULE_DEPEND(ix, ether, 1, 1, 1);
    275 #ifdef DEV_NETMAP
    276 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    277 #endif
    278 #endif
    279 
    280 /*
    281  * TUNEABLE PARAMETERS:
    282  */
    283 
    284 /*
    285  * AIM: Adaptive Interrupt Moderation
    286  * which means that the interrupt rate
    287  * is varied over time based on the
    288  * traffic for that interrupt vector
    289  */
    290 static bool ixgbe_enable_aim = true;
    291 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
    292 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
    293     "Enable adaptive interrupt moderation");
    294 
    295 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    296 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
    297     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
    298 
    299 /* How many packets rxeof tries to clean at a time */
    300 static int ixgbe_rx_process_limit = 256;
    301 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
    302     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
    303 
    304 /* How many packets txeof tries to clean at a time */
    305 static int ixgbe_tx_process_limit = 256;
    306 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
    307     &ixgbe_tx_process_limit, 0,
    308     "Maximum number of sent packets to process at a time, -1 means unlimited");
    309 
    310 /* Flow control setting, default to full */
    311 static int ixgbe_flow_control = ixgbe_fc_full;
    312 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
    313     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
    314 
    315 /*
    316  * Smart speed setting, default to on
    317  * this only works as a compile option
    318  * right now as its during attach, set
    319  * this to 'ixgbe_smart_speed_off' to
    320  * disable.
    321  */
    322 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    323 
    324 /*
    325  * MSI-X should be the default for best performance,
    326  * but this allows it to be forced off for testing.
    327  */
    328 static int ixgbe_enable_msix = 1;
    329 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
    330     "Enable MSI-X interrupts");
    331 
    332 /*
    333  * Number of Queues, can be set to 0,
    334  * it then autoconfigures based on the
    335  * number of cpus with a max of 8. This
    336  * can be overriden manually here.
    337  */
    338 static int ixgbe_num_queues = 0;
    339 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
    340     "Number of queues to configure, 0 indicates autoconfigure");
    341 
    342 /*
    343  * Number of TX descriptors per ring,
    344  * setting higher than RX as this seems
    345  * the better performing choice.
    346  */
    347 static int ixgbe_txd = PERFORM_TXD;
    348 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
    349     "Number of transmit descriptors per queue");
    350 
    351 /* Number of RX descriptors per ring */
    352 static int ixgbe_rxd = PERFORM_RXD;
    353 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
    354     "Number of receive descriptors per queue");
    355 
    356 /*
    357  * Defining this on will allow the use
    358  * of unsupported SFP+ modules, note that
    359  * doing so you are on your own :)
    360  */
    361 static int allow_unsupported_sfp = false;
    362 #define TUNABLE_INT(__x, __y)
    363 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
    364 
    365 /*
    366  * Not sure if Flow Director is fully baked,
    367  * so we'll default to turning it off.
    368  */
    369 static int ixgbe_enable_fdir = 0;
    370 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
    371     "Enable Flow Director");
    372 
    373 /* Legacy Transmit (single queue) */
    374 static int ixgbe_enable_legacy_tx = 0;
    375 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
    376     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
    377 
    378 /* Receive-Side Scaling */
    379 static int ixgbe_enable_rss = 1;
    380 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
    381     "Enable Receive-Side Scaling (RSS)");
    382 
    383 /* Keep running tab on them for sanity check */
    384 static int ixgbe_total_ports;
    385 
    386 #if 0
    387 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
    388 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
    389 #endif
    390 
    391 #ifdef NET_MPSAFE
    392 #define IXGBE_MPSAFE		1
    393 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    394 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    395 #else
    396 #define IXGBE_CALLOUT_FLAGS	0
    397 #define IXGBE_SOFTINFT_FLAGS	0
    398 #endif
    399 
    400 /************************************************************************
    401  * ixgbe_initialize_rss_mapping
    402  ************************************************************************/
    403 static void
    404 ixgbe_initialize_rss_mapping(struct adapter *adapter)
    405 {
    406 	struct ixgbe_hw	*hw = &adapter->hw;
    407 	u32             reta = 0, mrqc, rss_key[10];
    408 	int             queue_id, table_size, index_mult;
    409 	int             i, j;
    410 	u32             rss_hash_config;
    411 
    412 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    413 		/* Fetch the configured RSS key */
    414 		rss_getkey((uint8_t *) &rss_key);
    415 	} else {
    416 		/* set up random bits */
    417 		cprng_fast(&rss_key, sizeof(rss_key));
    418 	}
    419 
    420 	/* Set multiplier for RETA setup and table size based on MAC */
    421 	index_mult = 0x1;
    422 	table_size = 128;
    423 	switch (adapter->hw.mac.type) {
    424 	case ixgbe_mac_82598EB:
    425 		index_mult = 0x11;
    426 		break;
    427 	case ixgbe_mac_X550:
    428 	case ixgbe_mac_X550EM_x:
    429 	case ixgbe_mac_X550EM_a:
    430 		table_size = 512;
    431 		break;
    432 	default:
    433 		break;
    434 	}
    435 
    436 	/* Set up the redirection table */
    437 	for (i = 0, j = 0; i < table_size; i++, j++) {
    438 		if (j == adapter->num_queues)
    439 			j = 0;
    440 
    441 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    442 			/*
    443 			 * Fetch the RSS bucket id for the given indirection
    444 			 * entry. Cap it at the number of configured buckets
    445 			 * (which is num_queues.)
    446 			 */
    447 			queue_id = rss_get_indirection_to_bucket(i);
    448 			queue_id = queue_id % adapter->num_queues;
    449 		} else
    450 			queue_id = (j * index_mult);
    451 
    452 		/*
    453 		 * The low 8 bits are for hash value (n+0);
    454 		 * The next 8 bits are for hash value (n+1), etc.
    455 		 */
    456 		reta = reta >> 8;
    457 		reta = reta | (((uint32_t) queue_id) << 24);
    458 		if ((i & 3) == 3) {
    459 			if (i < 128)
    460 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
    461 			else
    462 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
    463 				    reta);
    464 			reta = 0;
    465 		}
    466 	}
    467 
    468 	/* Now fill our hash function seeds */
    469 	for (i = 0; i < 10; i++)
    470 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
    471 
    472 	/* Perform hash on these packet types */
    473 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
    474 		rss_hash_config = rss_gethashconfig();
    475 	else {
    476 		/*
    477 		 * Disable UDP - IP fragments aren't currently being handled
    478 		 * and so we end up with a mix of 2-tuple and 4-tuple
    479 		 * traffic.
    480 		 */
    481 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
    482 		                | RSS_HASHTYPE_RSS_TCP_IPV4
    483 		                | RSS_HASHTYPE_RSS_IPV6
    484 		                | RSS_HASHTYPE_RSS_TCP_IPV6
    485 		                | RSS_HASHTYPE_RSS_IPV6_EX
    486 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
    487 	}
    488 
    489 	mrqc = IXGBE_MRQC_RSSEN;
    490 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
    491 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
    492 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
    493 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
    494 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
    495 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
    496 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
    497 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
    498 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
    499 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
    500 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
    501 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
    502 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
    503 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
    504 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
    505 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
    506 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
    507 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
    508 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
    509 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    510 } /* ixgbe_initialize_rss_mapping */
    511 
    512 /************************************************************************
    513  * ixgbe_initialize_receive_units - Setup receive registers and features.
    514  ************************************************************************/
    515 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
    516 
    517 static void
    518 ixgbe_initialize_receive_units(struct adapter *adapter)
    519 {
    520 	struct	rx_ring	*rxr = adapter->rx_rings;
    521 	struct ixgbe_hw	*hw = &adapter->hw;
    522 	struct ifnet    *ifp = adapter->ifp;
    523 	int             i, j;
    524 	u32		bufsz, fctrl, srrctl, rxcsum;
    525 	u32		hlreg;
    526 
    527 	/*
    528 	 * Make sure receives are disabled while
    529 	 * setting up the descriptor ring
    530 	 */
    531 	ixgbe_disable_rx(hw);
    532 
    533 	/* Enable broadcasts */
    534 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
    535 	fctrl |= IXGBE_FCTRL_BAM;
    536 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
    537 		fctrl |= IXGBE_FCTRL_DPF;
    538 		fctrl |= IXGBE_FCTRL_PMCF;
    539 	}
    540 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
    541 
    542 	/* Set for Jumbo Frames? */
    543 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
    544 	if (ifp->if_mtu > ETHERMTU)
    545 		hlreg |= IXGBE_HLREG0_JUMBOEN;
    546 	else
    547 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
    548 
    549 #ifdef DEV_NETMAP
    550 	/* CRC stripping is conditional in Netmap */
    551 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
    552 	    (ifp->if_capenable & IFCAP_NETMAP) &&
    553 	    !ix_crcstrip)
    554 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
    555 	else
    556 #endif /* DEV_NETMAP */
    557 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
    558 
    559 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
    560 
    561 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
    562 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
    563 
    564 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
    565 		u64 rdba = rxr->rxdma.dma_paddr;
    566 		u32 tqsmreg, reg;
    567 		int regnum = i / 4;	/* 1 register per 4 queues */
    568 		int regshift = i % 4;	/* 4 bits per 1 queue */
    569 		j = rxr->me;
    570 
    571 		/* Setup the Base and Length of the Rx Descriptor Ring */
    572 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
    573 		    (rdba & 0x00000000ffffffffULL));
    574 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
    575 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
    576 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
    577 
    578 		/* Set up the SRRCTL register */
    579 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
    580 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
    581 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
    582 		srrctl |= bufsz;
    583 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
    584 
    585 		/* Set RQSMR (Receive Queue Statistic Mapping) register */
    586 		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
    587 		reg &= ~(0x000000ff << (regshift * 8));
    588 		reg |= i << (regshift * 8);
    589 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
    590 
    591 		/*
    592 		 * Set RQSMR (Receive Queue Statistic Mapping) register.
    593 		 * Register location for queue 0...7 are different between
    594 		 * 82598 and newer.
    595 		 */
    596 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
    597 			tqsmreg = IXGBE_TQSMR(regnum);
    598 		else
    599 			tqsmreg = IXGBE_TQSM(regnum);
    600 		reg = IXGBE_READ_REG(hw, tqsmreg);
    601 		reg &= ~(0x000000ff << (regshift * 8));
    602 		reg |= i << (regshift * 8);
    603 		IXGBE_WRITE_REG(hw, tqsmreg, reg);
    604 
    605 		/*
    606 		 * Set DROP_EN iff we have no flow control and >1 queue.
    607 		 * Note that srrctl was cleared shortly before during reset,
    608 		 * so we do not need to clear the bit, but do it just in case
    609 		 * this code is moved elsewhere.
    610 		 */
    611 		if (adapter->num_queues > 1 &&
    612 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
    613 			srrctl |= IXGBE_SRRCTL_DROP_EN;
    614 		} else {
    615 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
    616 		}
    617 
    618 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
    619 
    620 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
    621 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
    622 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
    623 
    624 		/* Set the driver rx tail address */
    625 		rxr->tail =  IXGBE_RDT(rxr->me);
    626 	}
    627 
    628 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    629 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
    630 		            | IXGBE_PSRTYPE_UDPHDR
    631 		            | IXGBE_PSRTYPE_IPV4HDR
    632 		            | IXGBE_PSRTYPE_IPV6HDR;
    633 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
    634 	}
    635 
    636 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
    637 
    638 	ixgbe_initialize_rss_mapping(adapter);
    639 
    640 	if (adapter->num_queues > 1) {
    641 		/* RSS and RX IPP Checksum are mutually exclusive */
    642 		rxcsum |= IXGBE_RXCSUM_PCSD;
    643 	}
    644 
    645 	if (ifp->if_capenable & IFCAP_RXCSUM)
    646 		rxcsum |= IXGBE_RXCSUM_PCSD;
    647 
    648 	/* This is useful for calculating UDP/IP fragment checksums */
    649 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
    650 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
    651 
    652 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
    653 
    654 	return;
    655 } /* ixgbe_initialize_receive_units */
    656 
    657 /************************************************************************
    658  * ixgbe_initialize_transmit_units - Enable transmit units.
    659  ************************************************************************/
    660 static void
    661 ixgbe_initialize_transmit_units(struct adapter *adapter)
    662 {
    663 	struct tx_ring  *txr = adapter->tx_rings;
    664 	struct ixgbe_hw	*hw = &adapter->hw;
    665 
    666 	/* Setup the Base and Length of the Tx Descriptor Ring */
    667 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    668 		u64 tdba = txr->txdma.dma_paddr;
    669 		u32 txctrl = 0;
    670 		int j = txr->me;
    671 
    672 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
    673 		    (tdba & 0x00000000ffffffffULL));
    674 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
    675 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
    676 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
    677 
    678 		/* Setup the HW Tx Head and Tail descriptor pointers */
    679 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
    680 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
    681 
    682 		/* Cache the tail address */
    683 		txr->tail = IXGBE_TDT(j);
    684 
    685 		/* Disable Head Writeback */
    686 		/*
    687 		 * Note: for X550 series devices, these registers are actually
    688 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
    689 		 * fields remain the same.
    690 		 */
    691 		switch (hw->mac.type) {
    692 		case ixgbe_mac_82598EB:
    693 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
    694 			break;
    695 		default:
    696 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
    697 			break;
    698 		}
    699 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
    700 		switch (hw->mac.type) {
    701 		case ixgbe_mac_82598EB:
    702 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
    703 			break;
    704 		default:
    705 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
    706 			break;
    707 		}
    708 
    709 	}
    710 
    711 	if (hw->mac.type != ixgbe_mac_82598EB) {
    712 		u32 dmatxctl, rttdcs;
    713 
    714 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
    715 		dmatxctl |= IXGBE_DMATXCTL_TE;
    716 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
    717 		/* Disable arbiter to set MTQC */
    718 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
    719 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
    720 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    721 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
    722 		    ixgbe_get_mtqc(adapter->iov_mode));
    723 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
    724 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    725 	}
    726 
    727 	return;
    728 } /* ixgbe_initialize_transmit_units */
    729 
    730 /************************************************************************
    731  * ixgbe_attach - Device initialization routine
    732  *
    733  *   Called when the driver is being loaded.
    734  *   Identifies the type of hardware, allocates all resources
    735  *   and initializes the hardware.
    736  *
    737  *   return 0 on success, positive on failure
    738  ************************************************************************/
    739 static void
    740 ixgbe_attach(device_t parent, device_t dev, void *aux)
    741 {
    742 	struct adapter  *adapter;
    743 	struct ixgbe_hw *hw;
    744 	int             error = -1;
    745 	u32		ctrl_ext;
    746 	u16		high, low, nvmreg;
    747 	pcireg_t	id, subid;
    748 	ixgbe_vendor_info_t *ent;
    749 	struct pci_attach_args *pa = aux;
    750 	const char *str;
    751 	char buf[256];
    752 
    753 	INIT_DEBUGOUT("ixgbe_attach: begin");
    754 
    755 	/* Allocate, clear, and link in our adapter structure */
    756 	adapter = device_private(dev);
    757 	adapter->hw.back = adapter;
    758 	adapter->dev = dev;
    759 	hw = &adapter->hw;
    760 	adapter->osdep.pc = pa->pa_pc;
    761 	adapter->osdep.tag = pa->pa_tag;
    762 	if (pci_dma64_available(pa))
    763 		adapter->osdep.dmat = pa->pa_dmat64;
    764 	else
    765 		adapter->osdep.dmat = pa->pa_dmat;
    766 	adapter->osdep.attached = false;
    767 
    768 	ent = ixgbe_lookup(pa);
    769 
    770 	KASSERT(ent != NULL);
    771 
    772 	aprint_normal(": %s, Version - %s\n",
    773 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    774 
    775 	/* Core Lock Init*/
    776 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    777 
    778 	/* Set up the timer callout */
    779 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    780 
    781 	/* Determine hardware revision */
    782 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    783 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    784 
    785 	hw->vendor_id = PCI_VENDOR(id);
    786 	hw->device_id = PCI_PRODUCT(id);
    787 	hw->revision_id =
    788 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    789 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    790 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    791 
    792 	/*
    793 	 * Make sure BUSMASTER is set
    794 	 */
    795 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    796 
    797 	/* Do base PCI setup - map BAR0 */
    798 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    799 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    800 		error = ENXIO;
    801 		goto err_out;
    802 	}
    803 
    804 	/* let hardware know driver is loaded */
    805 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    806 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    807 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    808 
    809 	/*
    810 	 * Initialize the shared code
    811 	 */
    812 	if (ixgbe_init_shared_code(hw)) {
    813 		aprint_error_dev(dev, "Unable to initialize the shared code\n");
    814 		error = ENXIO;
    815 		goto err_out;
    816 	}
    817 
    818 	switch (hw->mac.type) {
    819 	case ixgbe_mac_82598EB:
    820 		str = "82598EB";
    821 		break;
    822 	case ixgbe_mac_82599EB:
    823 		str = "82599EB";
    824 		break;
    825 	case ixgbe_mac_X540:
    826 		str = "X540";
    827 		break;
    828 	case ixgbe_mac_X550:
    829 		str = "X550";
    830 		break;
    831 	case ixgbe_mac_X550EM_x:
    832 		str = "X550EM";
    833 		break;
    834 	case ixgbe_mac_X550EM_a:
    835 		str = "X550EM A";
    836 		break;
    837 	default:
    838 		str = "Unknown";
    839 		break;
    840 	}
    841 	aprint_normal_dev(dev, "device %s\n", str);
    842 
    843 	if (hw->mbx.ops.init_params)
    844 		hw->mbx.ops.init_params(hw);
    845 
    846 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
    847 
    848 	/* Pick up the 82599 settings */
    849 	if (hw->mac.type != ixgbe_mac_82598EB) {
    850 		hw->phy.smart_speed = ixgbe_smart_speed;
    851 		adapter->num_segs = IXGBE_82599_SCATTER;
    852 	} else
    853 		adapter->num_segs = IXGBE_82598_SCATTER;
    854 
    855 	hw->mac.ops.set_lan_id(hw);
    856 	ixgbe_init_device_features(adapter);
    857 
    858 	if (ixgbe_configure_interrupts(adapter)) {
    859 		error = ENXIO;
    860 		goto err_out;
    861 	}
    862 
    863 	/* Allocate multicast array memory. */
    864 	adapter->mta = malloc(sizeof(*adapter->mta) *
    865 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    866 	if (adapter->mta == NULL) {
    867 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    868 		error = ENOMEM;
    869 		goto err_out;
    870 	}
    871 
    872 	/* Enable WoL (if supported) */
    873 	ixgbe_check_wol_support(adapter);
    874 
    875 	/* Verify adapter fan is still functional (if applicable) */
    876 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
    877 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    878 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
    879 	}
    880 
    881 	/* Ensure SW/FW semaphore is free */
    882 	ixgbe_init_swfw_semaphore(hw);
    883 
    884 	/* Enable EEE power saving */
    885 	if (adapter->feat_en & IXGBE_FEATURE_EEE)
    886 		hw->mac.ops.setup_eee(hw, TRUE);
    887 
    888 	/* Set an initial default flow control value */
    889 	hw->fc.requested_mode = ixgbe_flow_control;
    890 
    891 	/* Sysctls for limiting the amount of work done in the taskqueues */
    892 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
    893 	    "max number of rx packets to process",
    894 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
    895 
    896 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
    897 	    "max number of tx packets to process",
    898 	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
    899 
    900 	/* Do descriptor calc and sanity checks */
    901 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    902 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    903 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    904 		adapter->num_tx_desc = DEFAULT_TXD;
    905 	} else
    906 		adapter->num_tx_desc = ixgbe_txd;
    907 
    908 	/*
    909 	 * With many RX rings it is easy to exceed the
    910 	 * system mbuf allocation. Tuning nmbclusters
    911 	 * can alleviate this.
    912 	 */
    913 	if (nmbclusters > 0) {
    914 		int s;
    915 		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
    916 		if (s > nmbclusters) {
    917 			aprint_error_dev(dev, "RX Descriptors exceed "
    918 			    "system mbuf max, using default instead!\n");
    919 			ixgbe_rxd = DEFAULT_RXD;
    920 		}
    921 	}
    922 
    923 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    924 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
    925 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    926 		adapter->num_rx_desc = DEFAULT_RXD;
    927 	} else
    928 		adapter->num_rx_desc = ixgbe_rxd;
    929 
    930 	/* Allocate our TX/RX Queues */
    931 	if (ixgbe_allocate_queues(adapter)) {
    932 		error = ENOMEM;
    933 		goto err_out;
    934 	}
    935 
    936 	hw->phy.reset_if_overtemp = TRUE;
    937 	error = ixgbe_reset_hw(hw);
    938 	hw->phy.reset_if_overtemp = FALSE;
    939 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    940 		/*
    941 		 * No optics in this port, set up
    942 		 * so the timer routine will probe
    943 		 * for later insertion.
    944 		 */
    945 		adapter->sfp_probe = TRUE;
    946 		error = IXGBE_SUCCESS;
    947 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    948 		aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
    949 		error = EIO;
    950 		goto err_late;
    951 	} else if (error) {
    952 		aprint_error_dev(dev, "Hardware initialization failed\n");
    953 		error = EIO;
    954 		goto err_late;
    955 	}
    956 
    957 	/* Make sure we have a good EEPROM before we read from it */
    958 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
    959 		aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
    960 		error = EIO;
    961 		goto err_late;
    962 	}
    963 
    964 	aprint_normal("%s:", device_xname(dev));
    965 	/* NVM Image Version */
    966 	switch (hw->mac.type) {
    967 	case ixgbe_mac_X540:
    968 	case ixgbe_mac_X550EM_a:
    969 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    970 		if (nvmreg == 0xffff)
    971 			break;
    972 		high = (nvmreg >> 12) & 0x0f;
    973 		low = (nvmreg >> 4) & 0xff;
    974 		id = nvmreg & 0x0f;
    975 		aprint_normal(" NVM Image Version %u.", high);
    976 		if (hw->mac.type == ixgbe_mac_X540)
    977 			str = "%x";
    978 		else
    979 			str = "%02x";
    980 		aprint_normal(str, low);
    981 		aprint_normal(" ID 0x%x,", id);
    982 		break;
    983 	case ixgbe_mac_X550EM_x:
    984 	case ixgbe_mac_X550:
    985 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    986 		if (nvmreg == 0xffff)
    987 			break;
    988 		high = (nvmreg >> 12) & 0x0f;
    989 		low = nvmreg & 0xff;
    990 		aprint_normal(" NVM Image Version %u.%02x,", high, low);
    991 		break;
    992 	default:
    993 		break;
    994 	}
    995 
    996 	/* PHY firmware revision */
    997 	switch (hw->mac.type) {
    998 	case ixgbe_mac_X540:
    999 	case ixgbe_mac_X550:
   1000 		hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
   1001 		if (nvmreg == 0xffff)
   1002 			break;
   1003 		high = (nvmreg >> 12) & 0x0f;
   1004 		low = (nvmreg >> 4) & 0xff;
   1005 		id = nvmreg & 0x000f;
   1006 		aprint_normal(" PHY FW Revision %u.", high);
   1007 		if (hw->mac.type == ixgbe_mac_X540)
   1008 			str = "%x";
   1009 		else
   1010 			str = "%02x";
   1011 		aprint_normal(str, low);
   1012 		aprint_normal(" ID 0x%x,", id);
   1013 		break;
   1014 	default:
   1015 		break;
   1016 	}
   1017 
   1018 	/* NVM Map version & OEM NVM Image version */
   1019 	switch (hw->mac.type) {
   1020 	case ixgbe_mac_X550:
   1021 	case ixgbe_mac_X550EM_x:
   1022 	case ixgbe_mac_X550EM_a:
   1023 		hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
   1024 		if (nvmreg != 0xffff) {
   1025 			high = (nvmreg >> 12) & 0x0f;
   1026 			low = nvmreg & 0x00ff;
   1027 			aprint_normal(" NVM Map version %u.%02x,", high, low);
   1028 		}
   1029 		hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
   1030 		if (nvmreg != 0xffff) {
   1031 			high = (nvmreg >> 12) & 0x0f;
   1032 			low = nvmreg & 0x00ff;
   1033 			aprint_verbose(" OEM NVM Image version %u.%02x,", high,
   1034 			    low);
   1035 		}
   1036 		break;
   1037 	default:
   1038 		break;
   1039 	}
   1040 
   1041 	/* Print the ETrackID */
   1042 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
   1043 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
   1044 	aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
   1045 
   1046 	/* Setup OS specific network interface */
   1047 	if (ixgbe_setup_interface(dev, adapter) != 0)
   1048 		goto err_late;
   1049 
   1050 	if (adapter->feat_en & IXGBE_FEATURE_MSIX)
   1051 		error = ixgbe_allocate_msix(adapter, pa);
   1052 	else
   1053 		error = ixgbe_allocate_legacy(adapter, pa);
   1054 	if (error)
   1055 		goto err_late;
   1056 
   1057 	error = ixgbe_start_hw(hw);
   1058 	switch (error) {
   1059 	case IXGBE_ERR_EEPROM_VERSION:
   1060 		aprint_error_dev(dev, "This device is a pre-production adapter/"
   1061 		    "LOM.  Please be aware there may be issues associated "
   1062 		    "with your hardware.\nIf you are experiencing problems "
   1063 		    "please contact your Intel or hardware representative "
   1064 		    "who provided you with this hardware.\n");
   1065 		break;
   1066 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
   1067 		aprint_error_dev(dev, "Unsupported SFP+ Module\n");
   1068 		error = EIO;
   1069 		goto err_late;
   1070 	case IXGBE_ERR_SFP_NOT_PRESENT:
   1071 		aprint_error_dev(dev, "No SFP+ Module found\n");
   1072 		/* falls thru */
   1073 	default:
   1074 		break;
   1075 	}
   1076 
   1077 	/*
   1078 	 *  Print PHY ID only for copper PHY. On device which has SFP(+) cage
   1079 	 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
   1080 	 */
   1081 	if (hw->phy.media_type == ixgbe_media_type_copper) {
   1082 		uint16_t id1, id2;
   1083 		int oui, model, rev;
   1084 		const char *descr;
   1085 
   1086 		id1 = hw->phy.id >> 16;
   1087 		id2 = hw->phy.id & 0xffff;
   1088 		oui = MII_OUI(id1, id2);
   1089 		model = MII_MODEL(id2);
   1090 		rev = MII_REV(id2);
   1091 		if ((descr = mii_get_descr(oui, model)) != NULL)
   1092 			aprint_normal_dev(dev,
   1093 			    "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
   1094 			    descr, oui, model, rev);
   1095 		else
   1096 			aprint_normal_dev(dev,
   1097 			    "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
   1098 			    oui, model, rev);
   1099 	}
   1100 
   1101 	/* Enable the optics for 82599 SFP+ fiber */
   1102 	ixgbe_enable_tx_laser(hw);
   1103 
   1104 	/* Enable power to the phy. */
   1105 	ixgbe_set_phy_power(hw, TRUE);
   1106 
   1107 	/* Initialize statistics */
   1108 	ixgbe_update_stats_counters(adapter);
   1109 
   1110 	/* Check PCIE slot type/speed/width */
   1111 	ixgbe_get_slot_info(adapter);
   1112 
   1113 	/*
   1114 	 * Do time init and sysctl init here, but
   1115 	 * only on the first port of a bypass adapter.
   1116 	 */
   1117 	ixgbe_bypass_init(adapter);
   1118 
   1119 	/* Set an initial dmac value */
   1120 	adapter->dmac = 0;
   1121 	/* Set initial advertised speeds (if applicable) */
   1122 	adapter->advertise = ixgbe_get_advertise(adapter);
   1123 
   1124 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   1125 		ixgbe_define_iov_schemas(dev, &error);
   1126 
   1127 	/* Add sysctls */
   1128 	ixgbe_add_device_sysctls(adapter);
   1129 	ixgbe_add_hw_stats(adapter);
   1130 
   1131 	/* For Netmap */
   1132 	adapter->init_locked = ixgbe_init_locked;
   1133 	adapter->stop_locked = ixgbe_stop;
   1134 
   1135 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1136 		ixgbe_netmap_attach(adapter);
   1137 
   1138 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
   1139 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
   1140 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
   1141 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
   1142 
   1143 	if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
   1144 		pmf_class_network_register(dev, adapter->ifp);
   1145 	else
   1146 		aprint_error_dev(dev, "couldn't establish power handler\n");
   1147 
   1148 	INIT_DEBUGOUT("ixgbe_attach: end");
   1149 	adapter->osdep.attached = true;
   1150 
   1151 	return;
   1152 
   1153 err_late:
   1154 	ixgbe_free_transmit_structures(adapter);
   1155 	ixgbe_free_receive_structures(adapter);
   1156 	free(adapter->queues, M_DEVBUF);
   1157 err_out:
   1158 	if (adapter->ifp != NULL)
   1159 		if_free(adapter->ifp);
   1160 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   1161 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   1162 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   1163 	ixgbe_free_pci_resources(adapter);
   1164 	if (adapter->mta != NULL)
   1165 		free(adapter->mta, M_DEVBUF);
   1166 	IXGBE_CORE_LOCK_DESTROY(adapter);
   1167 
   1168 	return;
   1169 } /* ixgbe_attach */
   1170 
   1171 /************************************************************************
   1172  * ixgbe_check_wol_support
   1173  *
   1174  *   Checks whether the adapter's ports are capable of
   1175  *   Wake On LAN by reading the adapter's NVM.
   1176  *
   1177  *   Sets each port's hw->wol_enabled value depending
   1178  *   on the value read here.
   1179  ************************************************************************/
   1180 static void
   1181 ixgbe_check_wol_support(struct adapter *adapter)
   1182 {
   1183 	struct ixgbe_hw *hw = &adapter->hw;
   1184 	u16             dev_caps = 0;
   1185 
   1186 	/* Find out WoL support for port */
   1187 	adapter->wol_support = hw->wol_enabled = 0;
   1188 	ixgbe_get_device_caps(hw, &dev_caps);
   1189 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
   1190 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
   1191 	     hw->bus.func == 0))
   1192 		adapter->wol_support = hw->wol_enabled = 1;
   1193 
   1194 	/* Save initial wake up filter configuration */
   1195 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
   1196 
   1197 	return;
   1198 } /* ixgbe_check_wol_support */
   1199 
   1200 /************************************************************************
   1201  * ixgbe_setup_interface
   1202  *
   1203  *   Setup networking device structure and register an interface.
   1204  ************************************************************************/
   1205 static int
   1206 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   1207 {
   1208 	struct ethercom *ec = &adapter->osdep.ec;
   1209 	struct ifnet   *ifp;
   1210 	int rv;
   1211 
   1212 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   1213 
   1214 	ifp = adapter->ifp = &ec->ec_if;
   1215 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1216 	ifp->if_baudrate = IF_Gbps(10);
   1217 	ifp->if_init = ixgbe_init;
   1218 	ifp->if_stop = ixgbe_ifstop;
   1219 	ifp->if_softc = adapter;
   1220 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1221 #ifdef IXGBE_MPSAFE
   1222 	ifp->if_extflags = IFEF_MPSAFE;
   1223 #endif
   1224 	ifp->if_ioctl = ixgbe_ioctl;
   1225 #if __FreeBSD_version >= 1100045
   1226 	/* TSO parameters */
   1227 	ifp->if_hw_tsomax = 65518;
   1228 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
   1229 	ifp->if_hw_tsomaxsegsize = 2048;
   1230 #endif
   1231 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1232 #if 0
   1233 		ixgbe_start_locked = ixgbe_legacy_start_locked;
   1234 #endif
   1235 	} else {
   1236 		ifp->if_transmit = ixgbe_mq_start;
   1237 #if 0
   1238 		ixgbe_start_locked = ixgbe_mq_start_locked;
   1239 #endif
   1240 	}
   1241 	ifp->if_start = ixgbe_legacy_start;
   1242 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1243 	IFQ_SET_READY(&ifp->if_snd);
   1244 
   1245 	rv = if_initialize(ifp);
   1246 	if (rv != 0) {
   1247 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1248 		return rv;
   1249 	}
   1250 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1251 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1252 	/*
   1253 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1254 	 * used.
   1255 	 */
   1256 	if_register(ifp);
   1257 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   1258 
   1259 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1260 
   1261 	/*
   1262 	 * Tell the upper layer(s) we support long frames.
   1263 	 */
   1264 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1265 
   1266 	/* Set capability flags */
   1267 	ifp->if_capabilities |= IFCAP_RXCSUM
   1268 			     |  IFCAP_TXCSUM
   1269 			     |  IFCAP_TSOv4
   1270 			     |  IFCAP_TSOv6
   1271 			     |  IFCAP_LRO;
   1272 	ifp->if_capenable = 0;
   1273 
   1274 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1275 	    		    |  ETHERCAP_VLAN_HWCSUM
   1276 	    		    |  ETHERCAP_JUMBO_MTU
   1277 	    		    |  ETHERCAP_VLAN_MTU;
   1278 
   1279 	/* Enable the above capabilities by default */
   1280 	ec->ec_capenable = ec->ec_capabilities;
   1281 
   1282 	/*
   1283 	 * Don't turn this on by default, if vlans are
   1284 	 * created on another pseudo device (eg. lagg)
   1285 	 * then vlan events are not passed thru, breaking
   1286 	 * operation, but with HW FILTER off it works. If
   1287 	 * using vlans directly on the ixgbe driver you can
   1288 	 * enable this and get full hardware tag filtering.
   1289 	 */
   1290 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1291 
   1292 	/*
   1293 	 * Specify the media types supported by this adapter and register
   1294 	 * callbacks to update media and link information
   1295 	 */
   1296 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   1297 	    ixgbe_media_status);
   1298 
   1299 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
   1300 	ixgbe_add_media_types(adapter);
   1301 
   1302 	/* Set autoselect media by default */
   1303 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1304 
   1305 	return (0);
   1306 } /* ixgbe_setup_interface */
   1307 
   1308 /************************************************************************
   1309  * ixgbe_add_media_types
   1310  ************************************************************************/
   1311 static void
   1312 ixgbe_add_media_types(struct adapter *adapter)
   1313 {
   1314 	struct ixgbe_hw *hw = &adapter->hw;
   1315 	device_t        dev = adapter->dev;
   1316 	u64             layer;
   1317 
   1318 	layer = adapter->phy_layer;
   1319 
   1320 #define	ADD(mm, dd)							\
   1321 	ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
   1322 
   1323 	/* Media types with matching NetBSD media defines */
   1324 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
   1325 		ADD(IFM_10G_T | IFM_FDX, 0);
   1326 	}
   1327 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
   1328 		ADD(IFM_1000_T | IFM_FDX, 0);
   1329 	}
   1330 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
   1331 		ADD(IFM_100_TX | IFM_FDX, 0);
   1332 	}
   1333 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
   1334 		ADD(IFM_10_T | IFM_FDX, 0);
   1335 	}
   1336 
   1337 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   1338 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
   1339 		ADD(IFM_10G_TWINAX | IFM_FDX, 0);
   1340 	}
   1341 
   1342 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
   1343 		ADD(IFM_10G_LR | IFM_FDX, 0);
   1344 		if (hw->phy.multispeed_fiber) {
   1345 			ADD(IFM_1000_LX | IFM_FDX, 0);
   1346 		}
   1347 	}
   1348 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
   1349 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1350 		if (hw->phy.multispeed_fiber) {
   1351 			ADD(IFM_1000_SX | IFM_FDX, 0);
   1352 		}
   1353 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
   1354 		ADD(IFM_1000_SX | IFM_FDX, 0);
   1355 	}
   1356 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
   1357 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1358 	}
   1359 
   1360 #ifdef IFM_ETH_XTYPE
   1361 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1362 		ADD(IFM_10G_KR | IFM_FDX, 0);
   1363 	}
   1364 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1365 		ADD(AIFM_10G_KX4 | IFM_FDX, 0);
   1366 	}
   1367 #else
   1368 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1369 		device_printf(dev, "Media supported: 10GbaseKR\n");
   1370 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
   1371 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1372 	}
   1373 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1374 		device_printf(dev, "Media supported: 10GbaseKX4\n");
   1375 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
   1376 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1377 	}
   1378 #endif
   1379 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
   1380 		ADD(IFM_1000_KX | IFM_FDX, 0);
   1381 	}
   1382 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
   1383 		ADD(IFM_2500_KX | IFM_FDX, 0);
   1384 	}
   1385 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
   1386 		ADD(IFM_2500_T | IFM_FDX, 0);
   1387 	}
   1388 	if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
   1389 		ADD(IFM_5000_T | IFM_FDX, 0);
   1390 	}
   1391 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
   1392 		device_printf(dev, "Media supported: 1000baseBX\n");
   1393 	/* XXX no ifmedia_set? */
   1394 
   1395 	ADD(IFM_AUTO, 0);
   1396 
   1397 #undef ADD
   1398 } /* ixgbe_add_media_types */
   1399 
   1400 /************************************************************************
   1401  * ixgbe_is_sfp
   1402  ************************************************************************/
   1403 static inline bool
   1404 ixgbe_is_sfp(struct ixgbe_hw *hw)
   1405 {
   1406 	switch (hw->mac.type) {
   1407 	case ixgbe_mac_82598EB:
   1408 		if (hw->phy.type == ixgbe_phy_nl)
   1409 			return TRUE;
   1410 		return FALSE;
   1411 	case ixgbe_mac_82599EB:
   1412 		switch (hw->mac.ops.get_media_type(hw)) {
   1413 		case ixgbe_media_type_fiber:
   1414 		case ixgbe_media_type_fiber_qsfp:
   1415 			return TRUE;
   1416 		default:
   1417 			return FALSE;
   1418 		}
   1419 	case ixgbe_mac_X550EM_x:
   1420 	case ixgbe_mac_X550EM_a:
   1421 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
   1422 			return TRUE;
   1423 		return FALSE;
   1424 	default:
   1425 		return FALSE;
   1426 	}
   1427 } /* ixgbe_is_sfp */
   1428 
   1429 /************************************************************************
   1430  * ixgbe_config_link
   1431  ************************************************************************/
   1432 static void
   1433 ixgbe_config_link(struct adapter *adapter)
   1434 {
   1435 	struct ixgbe_hw *hw = &adapter->hw;
   1436 	u32             autoneg, err = 0;
   1437 	bool            sfp, negotiate = false;
   1438 
   1439 	sfp = ixgbe_is_sfp(hw);
   1440 
   1441 	if (sfp) {
   1442 		if (hw->phy.multispeed_fiber) {
   1443 			hw->mac.ops.setup_sfp(hw);
   1444 			ixgbe_enable_tx_laser(hw);
   1445 			kpreempt_disable();
   1446 			softint_schedule(adapter->msf_si);
   1447 			kpreempt_enable();
   1448 		} else {
   1449 			kpreempt_disable();
   1450 			softint_schedule(adapter->mod_si);
   1451 			kpreempt_enable();
   1452 		}
   1453 	} else {
   1454 		if (hw->mac.ops.check_link)
   1455 			err = ixgbe_check_link(hw, &adapter->link_speed,
   1456 			    &adapter->link_up, FALSE);
   1457 		if (err)
   1458 			goto out;
   1459 		autoneg = hw->phy.autoneg_advertised;
   1460 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   1461                 	err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
   1462 			    &negotiate);
   1463 		if (err)
   1464 			goto out;
   1465 		if (hw->mac.ops.setup_link)
   1466                 	err = hw->mac.ops.setup_link(hw, autoneg,
   1467 			    adapter->link_up);
   1468 	}
   1469 out:
   1470 
   1471 	return;
   1472 } /* ixgbe_config_link */
   1473 
   1474 /************************************************************************
   1475  * ixgbe_update_stats_counters - Update board statistics counters.
   1476  ************************************************************************/
   1477 static void
   1478 ixgbe_update_stats_counters(struct adapter *adapter)
   1479 {
   1480 	struct ifnet          *ifp = adapter->ifp;
   1481 	struct ixgbe_hw       *hw = &adapter->hw;
   1482 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1483 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
   1484 	u64                   total_missed_rx = 0;
   1485 	uint64_t              crcerrs, rlec;
   1486 
   1487 	crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   1488 	stats->crcerrs.ev_count += crcerrs;
   1489 	stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   1490 	stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   1491 	stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   1492 	if (hw->mac.type == ixgbe_mac_X550)
   1493 		stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
   1494 
   1495 	for (int i = 0; i < __arraycount(stats->qprc); i++) {
   1496 		int j = i % adapter->num_queues;
   1497 		stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   1498 		stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   1499 		stats->qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   1500 	}
   1501 	for (int i = 0; i < __arraycount(stats->mpc); i++) {
   1502 		uint32_t mp;
   1503 		int j = i % adapter->num_queues;
   1504 
   1505 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   1506 		/* global total per queue */
   1507 		stats->mpc[j].ev_count += mp;
   1508 		/* running comprehensive total for stats display */
   1509 		total_missed_rx += mp;
   1510 
   1511 		if (hw->mac.type == ixgbe_mac_82598EB)
   1512 			stats->rnbc[j].ev_count
   1513 			    += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   1514 
   1515 	}
   1516 	stats->mpctotal.ev_count += total_missed_rx;
   1517 
   1518 	/* Document says M[LR]FC are valid when link is up and 10Gbps */
   1519 	if ((adapter->link_active == TRUE)
   1520 	    && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
   1521 		stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   1522 		stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   1523 	}
   1524 	rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
   1525 	stats->rlec.ev_count += rlec;
   1526 
   1527 	/* Hardware workaround, gprc counts missed packets */
   1528 	stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   1529 
   1530 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   1531 	stats->lxontxc.ev_count += lxon;
   1532 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   1533 	stats->lxofftxc.ev_count += lxoff;
   1534 	total = lxon + lxoff;
   1535 
   1536 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1537 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   1538 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   1539 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   1540 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   1541 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   1542 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   1543 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   1544 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   1545 	} else {
   1546 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   1547 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   1548 		/* 82598 only has a counter in the high register */
   1549 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   1550 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   1551 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   1552 	}
   1553 
   1554 	/*
   1555 	 * Workaround: mprc hardware is incorrectly counting
   1556 	 * broadcasts, so for now we subtract those.
   1557 	 */
   1558 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   1559 	stats->bprc.ev_count += bprc;
   1560 	stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
   1561 	    - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   1562 
   1563 	stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   1564 	stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   1565 	stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   1566 	stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   1567 	stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   1568 	stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   1569 
   1570 	stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   1571 	stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   1572 	stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   1573 
   1574 	stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   1575 	stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   1576 	stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   1577 	stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   1578 	stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   1579 	stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   1580 	stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   1581 	stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   1582 	stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   1583 	stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   1584 	stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   1585 	stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   1586 	stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   1587 	stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   1588 	stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   1589 	stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   1590 	stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   1591 	stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   1592 	/* Only read FCOE on 82599 */
   1593 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1594 		stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   1595 		stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   1596 		stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   1597 		stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   1598 		stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   1599 	}
   1600 
   1601 	/* Fill out the OS statistics structure */
   1602 	/*
   1603 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   1604 	 * adapter->stats counters. It's required to make ifconfig -z
   1605 	 * (SOICZIFDATA) work.
   1606 	 */
   1607 	ifp->if_collisions = 0;
   1608 
   1609 	/* Rx Errors */
   1610 	ifp->if_iqdrops += total_missed_rx;
   1611 	ifp->if_ierrors += crcerrs + rlec;
   1612 } /* ixgbe_update_stats_counters */
   1613 
   1614 /************************************************************************
   1615  * ixgbe_add_hw_stats
   1616  *
   1617  *   Add sysctl variables, one per statistic, to the system.
   1618  ************************************************************************/
   1619 static void
   1620 ixgbe_add_hw_stats(struct adapter *adapter)
   1621 {
   1622 	device_t dev = adapter->dev;
   1623 	const struct sysctlnode *rnode, *cnode;
   1624 	struct sysctllog **log = &adapter->sysctllog;
   1625 	struct tx_ring *txr = adapter->tx_rings;
   1626 	struct rx_ring *rxr = adapter->rx_rings;
   1627 	struct ixgbe_hw *hw = &adapter->hw;
   1628 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1629 	const char *xname = device_xname(dev);
   1630 
   1631 	/* Driver Statistics */
   1632 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   1633 	    NULL, xname, "Handled queue in softint");
   1634 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   1635 	    NULL, xname, "Requeued in softint");
   1636 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   1637 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   1638 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   1639 	    NULL, xname, "m_defrag() failed");
   1640 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   1641 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   1642 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   1643 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   1644 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   1645 	    NULL, xname, "Driver tx dma hard fail other");
   1646 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   1647 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   1648 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   1649 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   1650 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   1651 	    NULL, xname, "Watchdog timeouts");
   1652 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   1653 	    NULL, xname, "TSO errors");
   1654 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   1655 	    NULL, xname, "Link MSI-X IRQ Handled");
   1656 
   1657 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1658 		snprintf(adapter->queues[i].evnamebuf,
   1659 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   1660 		    xname, i);
   1661 		snprintf(adapter->queues[i].namebuf,
   1662 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   1663 
   1664 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   1665 			aprint_error_dev(dev, "could not create sysctl root\n");
   1666 			break;
   1667 		}
   1668 
   1669 		if (sysctl_createv(log, 0, &rnode, &rnode,
   1670 		    0, CTLTYPE_NODE,
   1671 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   1672 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   1673 			break;
   1674 
   1675 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1676 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   1677 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   1678 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   1679 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   1680 			break;
   1681 
   1682 #if 0 /* XXX msaitoh */
   1683 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1684 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   1685 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   1686 			NULL, 0, &(adapter->queues[i].irqs),
   1687 		    0, CTL_CREATE, CTL_EOL) != 0)
   1688 			break;
   1689 #endif
   1690 
   1691 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1692 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1693 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   1694 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   1695 		    0, CTL_CREATE, CTL_EOL) != 0)
   1696 			break;
   1697 
   1698 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1699 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1700 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   1701 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   1702 		    0, CTL_CREATE, CTL_EOL) != 0)
   1703 			break;
   1704 
   1705 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   1706 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   1707 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   1708 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   1709 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   1710 		    NULL, adapter->queues[i].evnamebuf,
   1711 		    "Queue No Descriptor Available");
   1712 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   1713 		    NULL, adapter->queues[i].evnamebuf,
   1714 		    "Queue Packets Transmitted");
   1715 #ifndef IXGBE_LEGACY_TX
   1716 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   1717 		    NULL, adapter->queues[i].evnamebuf,
   1718 		    "Packets dropped in pcq");
   1719 #endif
   1720 
   1721 #ifdef LRO
   1722 		struct lro_ctrl *lro = &rxr->lro;
   1723 #endif /* LRO */
   1724 
   1725 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1726 		    CTLFLAG_READONLY,
   1727 		    CTLTYPE_INT,
   1728 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   1729 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   1730 		    CTL_CREATE, CTL_EOL) != 0)
   1731 			break;
   1732 
   1733 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1734 		    CTLFLAG_READONLY,
   1735 		    CTLTYPE_INT,
   1736 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   1737 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   1738 		    CTL_CREATE, CTL_EOL) != 0)
   1739 			break;
   1740 
   1741 		if (i < __arraycount(stats->mpc)) {
   1742 			evcnt_attach_dynamic(&stats->mpc[i],
   1743 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1744 			    "RX Missed Packet Count");
   1745 			if (hw->mac.type == ixgbe_mac_82598EB)
   1746 				evcnt_attach_dynamic(&stats->rnbc[i],
   1747 				    EVCNT_TYPE_MISC, NULL,
   1748 				    adapter->queues[i].evnamebuf,
   1749 				    "Receive No Buffers");
   1750 		}
   1751 		if (i < __arraycount(stats->pxontxc)) {
   1752 			evcnt_attach_dynamic(&stats->pxontxc[i],
   1753 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1754 			    "pxontxc");
   1755 			evcnt_attach_dynamic(&stats->pxonrxc[i],
   1756 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1757 			    "pxonrxc");
   1758 			evcnt_attach_dynamic(&stats->pxofftxc[i],
   1759 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1760 			    "pxofftxc");
   1761 			evcnt_attach_dynamic(&stats->pxoffrxc[i],
   1762 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1763 			    "pxoffrxc");
   1764 			evcnt_attach_dynamic(&stats->pxon2offc[i],
   1765 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1766 			    "pxon2offc");
   1767 		}
   1768 		if (i < __arraycount(stats->qprc)) {
   1769 			evcnt_attach_dynamic(&stats->qprc[i],
   1770 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1771 			    "qprc");
   1772 			evcnt_attach_dynamic(&stats->qptc[i],
   1773 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1774 			    "qptc");
   1775 			evcnt_attach_dynamic(&stats->qbrc[i],
   1776 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1777 			    "qbrc");
   1778 			evcnt_attach_dynamic(&stats->qbtc[i],
   1779 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1780 			    "qbtc");
   1781 			evcnt_attach_dynamic(&stats->qprdc[i],
   1782 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1783 			    "qprdc");
   1784 		}
   1785 
   1786 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   1787 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   1788 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   1789 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   1790 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   1791 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   1792 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   1793 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   1794 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   1795 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   1796 #ifdef LRO
   1797 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   1798 				CTLFLAG_RD, &lro->lro_queued, 0,
   1799 				"LRO Queued");
   1800 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   1801 				CTLFLAG_RD, &lro->lro_flushed, 0,
   1802 				"LRO Flushed");
   1803 #endif /* LRO */
   1804 	}
   1805 
   1806 	/* MAC stats get their own sub node */
   1807 
   1808 	snprintf(stats->namebuf,
   1809 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   1810 
   1811 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   1812 	    stats->namebuf, "rx csum offload - IP");
   1813 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   1814 	    stats->namebuf, "rx csum offload - L4");
   1815 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   1816 	    stats->namebuf, "rx csum offload - IP bad");
   1817 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   1818 	    stats->namebuf, "rx csum offload - L4 bad");
   1819 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   1820 	    stats->namebuf, "Interrupt conditions zero");
   1821 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   1822 	    stats->namebuf, "Legacy interrupts");
   1823 
   1824 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   1825 	    stats->namebuf, "CRC Errors");
   1826 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   1827 	    stats->namebuf, "Illegal Byte Errors");
   1828 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   1829 	    stats->namebuf, "Byte Errors");
   1830 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   1831 	    stats->namebuf, "MAC Short Packets Discarded");
   1832 	if (hw->mac.type >= ixgbe_mac_X550)
   1833 		evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
   1834 		    stats->namebuf, "Bad SFD");
   1835 	evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
   1836 	    stats->namebuf, "Total Packets Missed");
   1837 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   1838 	    stats->namebuf, "MAC Local Faults");
   1839 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   1840 	    stats->namebuf, "MAC Remote Faults");
   1841 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   1842 	    stats->namebuf, "Receive Length Errors");
   1843 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   1844 	    stats->namebuf, "Link XON Transmitted");
   1845 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   1846 	    stats->namebuf, "Link XON Received");
   1847 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   1848 	    stats->namebuf, "Link XOFF Transmitted");
   1849 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   1850 	    stats->namebuf, "Link XOFF Received");
   1851 
   1852 	/* Packet Reception Stats */
   1853 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   1854 	    stats->namebuf, "Total Octets Received");
   1855 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   1856 	    stats->namebuf, "Good Octets Received");
   1857 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   1858 	    stats->namebuf, "Total Packets Received");
   1859 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   1860 	    stats->namebuf, "Good Packets Received");
   1861 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   1862 	    stats->namebuf, "Multicast Packets Received");
   1863 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   1864 	    stats->namebuf, "Broadcast Packets Received");
   1865 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   1866 	    stats->namebuf, "64 byte frames received ");
   1867 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   1868 	    stats->namebuf, "65-127 byte frames received");
   1869 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   1870 	    stats->namebuf, "128-255 byte frames received");
   1871 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   1872 	    stats->namebuf, "256-511 byte frames received");
   1873 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   1874 	    stats->namebuf, "512-1023 byte frames received");
   1875 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   1876 	    stats->namebuf, "1023-1522 byte frames received");
   1877 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   1878 	    stats->namebuf, "Receive Undersized");
   1879 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   1880 	    stats->namebuf, "Fragmented Packets Received ");
   1881 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   1882 	    stats->namebuf, "Oversized Packets Received");
   1883 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   1884 	    stats->namebuf, "Received Jabber");
   1885 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   1886 	    stats->namebuf, "Management Packets Received");
   1887 	evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
   1888 	    stats->namebuf, "Management Packets Dropped");
   1889 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   1890 	    stats->namebuf, "Checksum Errors");
   1891 
   1892 	/* Packet Transmission Stats */
   1893 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   1894 	    stats->namebuf, "Good Octets Transmitted");
   1895 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   1896 	    stats->namebuf, "Total Packets Transmitted");
   1897 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   1898 	    stats->namebuf, "Good Packets Transmitted");
   1899 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   1900 	    stats->namebuf, "Broadcast Packets Transmitted");
   1901 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   1902 	    stats->namebuf, "Multicast Packets Transmitted");
   1903 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   1904 	    stats->namebuf, "Management Packets Transmitted");
   1905 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   1906 	    stats->namebuf, "64 byte frames transmitted ");
   1907 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   1908 	    stats->namebuf, "65-127 byte frames transmitted");
   1909 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   1910 	    stats->namebuf, "128-255 byte frames transmitted");
   1911 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   1912 	    stats->namebuf, "256-511 byte frames transmitted");
   1913 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   1914 	    stats->namebuf, "512-1023 byte frames transmitted");
   1915 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   1916 	    stats->namebuf, "1024-1522 byte frames transmitted");
   1917 } /* ixgbe_add_hw_stats */
   1918 
   1919 static void
   1920 ixgbe_clear_evcnt(struct adapter *adapter)
   1921 {
   1922 	struct tx_ring *txr = adapter->tx_rings;
   1923 	struct rx_ring *rxr = adapter->rx_rings;
   1924 	struct ixgbe_hw *hw = &adapter->hw;
   1925 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1926 
   1927 	adapter->handleq.ev_count = 0;
   1928 	adapter->req.ev_count = 0;
   1929 	adapter->efbig_tx_dma_setup.ev_count = 0;
   1930 	adapter->mbuf_defrag_failed.ev_count = 0;
   1931 	adapter->efbig2_tx_dma_setup.ev_count = 0;
   1932 	adapter->einval_tx_dma_setup.ev_count = 0;
   1933 	adapter->other_tx_dma_setup.ev_count = 0;
   1934 	adapter->eagain_tx_dma_setup.ev_count = 0;
   1935 	adapter->enomem_tx_dma_setup.ev_count = 0;
   1936 	adapter->watchdog_events.ev_count = 0;
   1937 	adapter->tso_err.ev_count = 0;
   1938 	adapter->link_irq.ev_count = 0;
   1939 
   1940 	txr = adapter->tx_rings;
   1941 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1942 		adapter->queues[i].irqs.ev_count = 0;
   1943 		txr->no_desc_avail.ev_count = 0;
   1944 		txr->total_packets.ev_count = 0;
   1945 		txr->tso_tx.ev_count = 0;
   1946 #ifndef IXGBE_LEGACY_TX
   1947 		txr->pcq_drops.ev_count = 0;
   1948 #endif
   1949 
   1950 		if (i < __arraycount(stats->mpc)) {
   1951 			stats->mpc[i].ev_count = 0;
   1952 			if (hw->mac.type == ixgbe_mac_82598EB)
   1953 				stats->rnbc[i].ev_count = 0;
   1954 		}
   1955 		if (i < __arraycount(stats->pxontxc)) {
   1956 			stats->pxontxc[i].ev_count = 0;
   1957 			stats->pxonrxc[i].ev_count = 0;
   1958 			stats->pxofftxc[i].ev_count = 0;
   1959 			stats->pxoffrxc[i].ev_count = 0;
   1960 			stats->pxon2offc[i].ev_count = 0;
   1961 		}
   1962 		if (i < __arraycount(stats->qprc)) {
   1963 			stats->qprc[i].ev_count = 0;
   1964 			stats->qptc[i].ev_count = 0;
   1965 			stats->qbrc[i].ev_count = 0;
   1966 			stats->qbtc[i].ev_count = 0;
   1967 			stats->qprdc[i].ev_count = 0;
   1968 		}
   1969 
   1970 		rxr->rx_packets.ev_count = 0;
   1971 		rxr->rx_bytes.ev_count = 0;
   1972 		rxr->rx_copies.ev_count = 0;
   1973 		rxr->no_jmbuf.ev_count = 0;
   1974 		rxr->rx_discarded.ev_count = 0;
   1975 	}
   1976 	stats->ipcs.ev_count = 0;
   1977 	stats->l4cs.ev_count = 0;
   1978 	stats->ipcs_bad.ev_count = 0;
   1979 	stats->l4cs_bad.ev_count = 0;
   1980 	stats->intzero.ev_count = 0;
   1981 	stats->legint.ev_count = 0;
   1982 	stats->crcerrs.ev_count = 0;
   1983 	stats->illerrc.ev_count = 0;
   1984 	stats->errbc.ev_count = 0;
   1985 	stats->mspdc.ev_count = 0;
   1986 	stats->mbsdc.ev_count = 0;
   1987 	stats->mpctotal.ev_count = 0;
   1988 	stats->mlfc.ev_count = 0;
   1989 	stats->mrfc.ev_count = 0;
   1990 	stats->rlec.ev_count = 0;
   1991 	stats->lxontxc.ev_count = 0;
   1992 	stats->lxonrxc.ev_count = 0;
   1993 	stats->lxofftxc.ev_count = 0;
   1994 	stats->lxoffrxc.ev_count = 0;
   1995 
   1996 	/* Packet Reception Stats */
   1997 	stats->tor.ev_count = 0;
   1998 	stats->gorc.ev_count = 0;
   1999 	stats->tpr.ev_count = 0;
   2000 	stats->gprc.ev_count = 0;
   2001 	stats->mprc.ev_count = 0;
   2002 	stats->bprc.ev_count = 0;
   2003 	stats->prc64.ev_count = 0;
   2004 	stats->prc127.ev_count = 0;
   2005 	stats->prc255.ev_count = 0;
   2006 	stats->prc511.ev_count = 0;
   2007 	stats->prc1023.ev_count = 0;
   2008 	stats->prc1522.ev_count = 0;
   2009 	stats->ruc.ev_count = 0;
   2010 	stats->rfc.ev_count = 0;
   2011 	stats->roc.ev_count = 0;
   2012 	stats->rjc.ev_count = 0;
   2013 	stats->mngprc.ev_count = 0;
   2014 	stats->mngpdc.ev_count = 0;
   2015 	stats->xec.ev_count = 0;
   2016 
   2017 	/* Packet Transmission Stats */
   2018 	stats->gotc.ev_count = 0;
   2019 	stats->tpt.ev_count = 0;
   2020 	stats->gptc.ev_count = 0;
   2021 	stats->bptc.ev_count = 0;
   2022 	stats->mptc.ev_count = 0;
   2023 	stats->mngptc.ev_count = 0;
   2024 	stats->ptc64.ev_count = 0;
   2025 	stats->ptc127.ev_count = 0;
   2026 	stats->ptc255.ev_count = 0;
   2027 	stats->ptc511.ev_count = 0;
   2028 	stats->ptc1023.ev_count = 0;
   2029 	stats->ptc1522.ev_count = 0;
   2030 }
   2031 
   2032 /************************************************************************
   2033  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
   2034  *
   2035  *   Retrieves the TDH value from the hardware
   2036  ************************************************************************/
   2037 static int
   2038 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   2039 {
   2040 	struct sysctlnode node = *rnode;
   2041 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2042 	uint32_t val;
   2043 
   2044 	if (!txr)
   2045 		return (0);
   2046 
   2047 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
   2048 	node.sysctl_data = &val;
   2049 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2050 } /* ixgbe_sysctl_tdh_handler */
   2051 
   2052 /************************************************************************
   2053  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   2054  *
   2055  *   Retrieves the TDT value from the hardware
   2056  ************************************************************************/
   2057 static int
   2058 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   2059 {
   2060 	struct sysctlnode node = *rnode;
   2061 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2062 	uint32_t val;
   2063 
   2064 	if (!txr)
   2065 		return (0);
   2066 
   2067 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
   2068 	node.sysctl_data = &val;
   2069 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2070 } /* ixgbe_sysctl_tdt_handler */
   2071 
   2072 /************************************************************************
   2073  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
   2074  *
   2075  *   Retrieves the RDH value from the hardware
   2076  ************************************************************************/
   2077 static int
   2078 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2079 {
   2080 	struct sysctlnode node = *rnode;
   2081 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2082 	uint32_t val;
   2083 
   2084 	if (!rxr)
   2085 		return (0);
   2086 
   2087 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
   2088 	node.sysctl_data = &val;
   2089 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2090 } /* ixgbe_sysctl_rdh_handler */
   2091 
   2092 /************************************************************************
   2093  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2094  *
   2095  *   Retrieves the RDT value from the hardware
   2096  ************************************************************************/
   2097 static int
   2098 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2099 {
   2100 	struct sysctlnode node = *rnode;
   2101 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2102 	uint32_t val;
   2103 
   2104 	if (!rxr)
   2105 		return (0);
   2106 
   2107 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
   2108 	node.sysctl_data = &val;
   2109 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2110 } /* ixgbe_sysctl_rdt_handler */
   2111 
   2112 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   2113 /************************************************************************
   2114  * ixgbe_register_vlan
   2115  *
   2116  *   Run via vlan config EVENT, it enables us to use the
   2117  *   HW Filter table since we can get the vlan id. This
   2118  *   just creates the entry in the soft version of the
   2119  *   VFTA, init will repopulate the real table.
   2120  ************************************************************************/
   2121 static void
   2122 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2123 {
   2124 	struct adapter	*adapter = ifp->if_softc;
   2125 	u16		index, bit;
   2126 
   2127 	if (ifp->if_softc != arg)   /* Not our event */
   2128 		return;
   2129 
   2130 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2131 		return;
   2132 
   2133 	IXGBE_CORE_LOCK(adapter);
   2134 	index = (vtag >> 5) & 0x7F;
   2135 	bit = vtag & 0x1F;
   2136 	adapter->shadow_vfta[index] |= (1 << bit);
   2137 	ixgbe_setup_vlan_hw_support(adapter);
   2138 	IXGBE_CORE_UNLOCK(adapter);
   2139 } /* ixgbe_register_vlan */
   2140 
   2141 /************************************************************************
   2142  * ixgbe_unregister_vlan
   2143  *
   2144  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
   2145  ************************************************************************/
   2146 static void
   2147 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2148 {
   2149 	struct adapter	*adapter = ifp->if_softc;
   2150 	u16		index, bit;
   2151 
   2152 	if (ifp->if_softc != arg)
   2153 		return;
   2154 
   2155 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2156 		return;
   2157 
   2158 	IXGBE_CORE_LOCK(adapter);
   2159 	index = (vtag >> 5) & 0x7F;
   2160 	bit = vtag & 0x1F;
   2161 	adapter->shadow_vfta[index] &= ~(1 << bit);
   2162 	/* Re-init to load the changes */
   2163 	ixgbe_setup_vlan_hw_support(adapter);
   2164 	IXGBE_CORE_UNLOCK(adapter);
   2165 } /* ixgbe_unregister_vlan */
   2166 #endif
   2167 
   2168 static void
   2169 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   2170 {
   2171 	struct ethercom *ec = &adapter->osdep.ec;
   2172 	struct ixgbe_hw *hw = &adapter->hw;
   2173 	struct rx_ring	*rxr;
   2174 	int             i;
   2175 	u32		ctrl;
   2176 
   2177 
   2178 	/*
   2179 	 * We get here thru init_locked, meaning
   2180 	 * a soft reset, this has already cleared
   2181 	 * the VFTA and other state, so if there
   2182 	 * have been no vlan's registered do nothing.
   2183 	 */
   2184 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   2185 		return;
   2186 
   2187 	/* Setup the queues for vlans */
   2188 	if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
   2189 		for (i = 0; i < adapter->num_queues; i++) {
   2190 			rxr = &adapter->rx_rings[i];
   2191 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
   2192 			if (hw->mac.type != ixgbe_mac_82598EB) {
   2193 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   2194 				ctrl |= IXGBE_RXDCTL_VME;
   2195 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
   2196 			}
   2197 			rxr->vtag_strip = TRUE;
   2198 		}
   2199 	}
   2200 
   2201 	if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
   2202 		return;
   2203 	/*
   2204 	 * A soft reset zero's out the VFTA, so
   2205 	 * we need to repopulate it now.
   2206 	 */
   2207 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2208 		if (adapter->shadow_vfta[i] != 0)
   2209 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
   2210 			    adapter->shadow_vfta[i]);
   2211 
   2212 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2213 	/* Enable the Filter Table if enabled */
   2214 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
   2215 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
   2216 		ctrl |= IXGBE_VLNCTRL_VFE;
   2217 	}
   2218 	if (hw->mac.type == ixgbe_mac_82598EB)
   2219 		ctrl |= IXGBE_VLNCTRL_VME;
   2220 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2221 } /* ixgbe_setup_vlan_hw_support */
   2222 
   2223 /************************************************************************
   2224  * ixgbe_get_slot_info
   2225  *
   2226  *   Get the width and transaction speed of
   2227  *   the slot this adapter is plugged into.
   2228  ************************************************************************/
   2229 static void
   2230 ixgbe_get_slot_info(struct adapter *adapter)
   2231 {
   2232 	device_t		dev = adapter->dev;
   2233 	struct ixgbe_hw		*hw = &adapter->hw;
   2234 	u32                   offset;
   2235 //	struct ixgbe_mac_info	*mac = &hw->mac;
   2236 	u16			link;
   2237 	int                   bus_info_valid = TRUE;
   2238 
   2239 	/* Some devices are behind an internal bridge */
   2240 	switch (hw->device_id) {
   2241 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
   2242 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
   2243 		goto get_parent_info;
   2244 	default:
   2245 		break;
   2246 	}
   2247 
   2248 	ixgbe_get_bus_info(hw);
   2249 
   2250 	/*
   2251 	 * Some devices don't use PCI-E, but there is no need
   2252 	 * to display "Unknown" for bus speed and width.
   2253 	 */
   2254 	switch (hw->mac.type) {
   2255 	case ixgbe_mac_X550EM_x:
   2256 	case ixgbe_mac_X550EM_a:
   2257 		return;
   2258 	default:
   2259 		goto display;
   2260 	}
   2261 
   2262 get_parent_info:
   2263 	/*
   2264 	 * For the Quad port adapter we need to parse back
   2265 	 * up the PCI tree to find the speed of the expansion
   2266 	 * slot into which this adapter is plugged. A bit more work.
   2267 	 */
   2268 	dev = device_parent(device_parent(dev));
   2269 #if 0
   2270 #ifdef IXGBE_DEBUG
   2271 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
   2272 	    pci_get_slot(dev), pci_get_function(dev));
   2273 #endif
   2274 	dev = device_parent(device_parent(dev));
   2275 #ifdef IXGBE_DEBUG
   2276 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
   2277 	    pci_get_slot(dev), pci_get_function(dev));
   2278 #endif
   2279 #endif
   2280 	/* Now get the PCI Express Capabilities offset */
   2281 	if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
   2282 	    PCI_CAP_PCIEXPRESS, &offset, NULL)) {
   2283 		/*
   2284 		 * Hmm...can't get PCI-Express capabilities.
   2285 		 * Falling back to default method.
   2286 		 */
   2287 		bus_info_valid = FALSE;
   2288 		ixgbe_get_bus_info(hw);
   2289 		goto display;
   2290 	}
   2291 	/* ...and read the Link Status Register */
   2292 	link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
   2293 	    offset + PCIE_LCSR);
   2294 	ixgbe_set_pci_config_data_generic(hw, link >> 16);
   2295 
   2296 display:
   2297 	device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
   2298 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s" :
   2299 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s" :
   2300 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s" :
   2301 	     "Unknown"),
   2302 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
   2303 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
   2304 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
   2305 	     "Unknown"));
   2306 
   2307 	if (bus_info_valid) {
   2308 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2309 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
   2310 			(hw->bus.speed == ixgbe_bus_speed_2500))) {
   2311 			device_printf(dev, "PCI-Express bandwidth available"
   2312 			    " for this card\n     is not sufficient for"
   2313 			    " optimal performance.\n");
   2314 			device_printf(dev, "For optimal performance a x8 "
   2315 			    "PCIE, or x4 PCIE Gen2 slot is required.\n");
   2316 		}
   2317 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2318 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
   2319 			(hw->bus.speed < ixgbe_bus_speed_8000))) {
   2320 			device_printf(dev, "PCI-Express bandwidth available"
   2321 			    " for this card\n     is not sufficient for"
   2322 			    " optimal performance.\n");
   2323 			device_printf(dev, "For optimal performance a x8 "
   2324 			    "PCIE Gen3 slot is required.\n");
   2325 		}
   2326 	} else
   2327 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
   2328 
   2329 	return;
   2330 } /* ixgbe_get_slot_info */
   2331 
   2332 /************************************************************************
   2333  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
   2334  ************************************************************************/
   2335 static inline void
   2336 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   2337 {
   2338 	struct ixgbe_hw *hw = &adapter->hw;
   2339 	u64             queue = (u64)(1ULL << vector);
   2340 	u32             mask;
   2341 
   2342 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2343 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2344 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   2345 	} else {
   2346 		mask = (queue & 0xFFFFFFFF);
   2347 		if (mask)
   2348 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   2349 		mask = (queue >> 32);
   2350 		if (mask)
   2351 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   2352 	}
   2353 } /* ixgbe_enable_queue */
   2354 
   2355 /************************************************************************
   2356  * ixgbe_disable_queue
   2357  ************************************************************************/
   2358 static inline void
   2359 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   2360 {
   2361 	struct ixgbe_hw *hw = &adapter->hw;
   2362 	u64             queue = (u64)(1ULL << vector);
   2363 	u32             mask;
   2364 
   2365 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2366 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2367 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   2368 	} else {
   2369 		mask = (queue & 0xFFFFFFFF);
   2370 		if (mask)
   2371 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   2372 		mask = (queue >> 32);
   2373 		if (mask)
   2374 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   2375 	}
   2376 } /* ixgbe_disable_queue */
   2377 
   2378 /************************************************************************
   2379  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
   2380  ************************************************************************/
   2381 static int
   2382 ixgbe_msix_que(void *arg)
   2383 {
   2384 	struct ix_queue	*que = arg;
   2385 	struct adapter  *adapter = que->adapter;
   2386 	struct ifnet    *ifp = adapter->ifp;
   2387 	struct tx_ring	*txr = que->txr;
   2388 	struct rx_ring	*rxr = que->rxr;
   2389 	bool		more;
   2390 	u32		newitr = 0;
   2391 
   2392 	/* Protect against spurious interrupts */
   2393 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   2394 		return 0;
   2395 
   2396 	ixgbe_disable_queue(adapter, que->msix);
   2397 	++que->irqs.ev_count;
   2398 
   2399 #ifdef __NetBSD__
   2400 	/* Don't run ixgbe_rxeof in interrupt context */
   2401 	more = true;
   2402 #else
   2403 	more = ixgbe_rxeof(que);
   2404 #endif
   2405 
   2406 	IXGBE_TX_LOCK(txr);
   2407 	ixgbe_txeof(txr);
   2408 	IXGBE_TX_UNLOCK(txr);
   2409 
   2410 	/* Do AIM now? */
   2411 
   2412 	if (adapter->enable_aim == false)
   2413 		goto no_calc;
   2414 	/*
   2415 	 * Do Adaptive Interrupt Moderation:
   2416 	 *  - Write out last calculated setting
   2417 	 *  - Calculate based on average size over
   2418 	 *    the last interval.
   2419 	 */
   2420 	if (que->eitr_setting)
   2421 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
   2422 		    que->eitr_setting);
   2423 
   2424 	que->eitr_setting = 0;
   2425 
   2426 	/* Idle, do nothing */
   2427         if ((txr->bytes == 0) && (rxr->bytes == 0))
   2428                 goto no_calc;
   2429 
   2430 	if ((txr->bytes) && (txr->packets))
   2431 		newitr = txr->bytes/txr->packets;
   2432 	if ((rxr->bytes) && (rxr->packets))
   2433 		newitr = max(newitr, (rxr->bytes / rxr->packets));
   2434 	newitr += 24; /* account for hardware frame, crc */
   2435 
   2436 	/* set an upper boundary */
   2437 	newitr = min(newitr, 3000);
   2438 
   2439 	/* Be nice to the mid range */
   2440 	if ((newitr > 300) && (newitr < 1200))
   2441 		newitr = (newitr / 3);
   2442 	else
   2443 		newitr = (newitr / 2);
   2444 
   2445         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   2446                 newitr |= newitr << 16;
   2447         else
   2448                 newitr |= IXGBE_EITR_CNT_WDIS;
   2449 
   2450         /* save for next interrupt */
   2451         que->eitr_setting = newitr;
   2452 
   2453 	/* Reset state */
   2454 	txr->bytes = 0;
   2455 	txr->packets = 0;
   2456 	rxr->bytes = 0;
   2457 	rxr->packets = 0;
   2458 
   2459 no_calc:
   2460 	if (more)
   2461 		softint_schedule(que->que_si);
   2462 	else
   2463 		ixgbe_enable_queue(adapter, que->msix);
   2464 
   2465 	return 1;
   2466 } /* ixgbe_msix_que */
   2467 
   2468 /************************************************************************
   2469  * ixgbe_media_status - Media Ioctl callback
   2470  *
   2471  *   Called whenever the user queries the status of
   2472  *   the interface using ifconfig.
   2473  ************************************************************************/
   2474 static void
   2475 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   2476 {
   2477 	struct adapter *adapter = ifp->if_softc;
   2478 	struct ixgbe_hw *hw = &adapter->hw;
   2479 	int layer;
   2480 
   2481 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   2482 	IXGBE_CORE_LOCK(adapter);
   2483 	ixgbe_update_link_status(adapter);
   2484 
   2485 	ifmr->ifm_status = IFM_AVALID;
   2486 	ifmr->ifm_active = IFM_ETHER;
   2487 
   2488 	if (!adapter->link_active) {
   2489 		ifmr->ifm_active |= IFM_NONE;
   2490 		IXGBE_CORE_UNLOCK(adapter);
   2491 		return;
   2492 	}
   2493 
   2494 	ifmr->ifm_status |= IFM_ACTIVE;
   2495 	layer = adapter->phy_layer;
   2496 
   2497 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
   2498 	    layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
   2499 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
   2500 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
   2501 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
   2502 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
   2503 		switch (adapter->link_speed) {
   2504 		case IXGBE_LINK_SPEED_10GB_FULL:
   2505 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   2506 			break;
   2507 		case IXGBE_LINK_SPEED_5GB_FULL:
   2508 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   2509 			break;
   2510 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2511 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   2512 			break;
   2513 		case IXGBE_LINK_SPEED_1GB_FULL:
   2514 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   2515 			break;
   2516 		case IXGBE_LINK_SPEED_100_FULL:
   2517 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   2518 			break;
   2519 		case IXGBE_LINK_SPEED_10_FULL:
   2520 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   2521 			break;
   2522 		}
   2523 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   2524 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
   2525 		switch (adapter->link_speed) {
   2526 		case IXGBE_LINK_SPEED_10GB_FULL:
   2527 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
   2528 			break;
   2529 		}
   2530 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
   2531 		switch (adapter->link_speed) {
   2532 		case IXGBE_LINK_SPEED_10GB_FULL:
   2533 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
   2534 			break;
   2535 		case IXGBE_LINK_SPEED_1GB_FULL:
   2536 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2537 			break;
   2538 		}
   2539 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
   2540 		switch (adapter->link_speed) {
   2541 		case IXGBE_LINK_SPEED_10GB_FULL:
   2542 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
   2543 			break;
   2544 		case IXGBE_LINK_SPEED_1GB_FULL:
   2545 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2546 			break;
   2547 		}
   2548 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
   2549 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
   2550 		switch (adapter->link_speed) {
   2551 		case IXGBE_LINK_SPEED_10GB_FULL:
   2552 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2553 			break;
   2554 		case IXGBE_LINK_SPEED_1GB_FULL:
   2555 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
   2556 			break;
   2557 		}
   2558 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
   2559 		switch (adapter->link_speed) {
   2560 		case IXGBE_LINK_SPEED_10GB_FULL:
   2561 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2562 			break;
   2563 		}
   2564 	/*
   2565 	 * XXX: These need to use the proper media types once
   2566 	 * they're added.
   2567 	 */
   2568 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
   2569 		switch (adapter->link_speed) {
   2570 		case IXGBE_LINK_SPEED_10GB_FULL:
   2571 #ifndef IFM_ETH_XTYPE
   2572 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2573 #else
   2574 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
   2575 #endif
   2576 			break;
   2577 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2578 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2579 			break;
   2580 		case IXGBE_LINK_SPEED_1GB_FULL:
   2581 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2582 			break;
   2583 		}
   2584 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
   2585 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
   2586 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
   2587 		switch (adapter->link_speed) {
   2588 		case IXGBE_LINK_SPEED_10GB_FULL:
   2589 #ifndef IFM_ETH_XTYPE
   2590 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2591 #else
   2592 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
   2593 #endif
   2594 			break;
   2595 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2596 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2597 			break;
   2598 		case IXGBE_LINK_SPEED_1GB_FULL:
   2599 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2600 			break;
   2601 		}
   2602 
   2603 	/* If nothing is recognized... */
   2604 #if 0
   2605 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
   2606 		ifmr->ifm_active |= IFM_UNKNOWN;
   2607 #endif
   2608 
   2609 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   2610 
   2611 	/* Display current flow control setting used on link */
   2612 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
   2613 	    hw->fc.current_mode == ixgbe_fc_full)
   2614 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
   2615 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
   2616 	    hw->fc.current_mode == ixgbe_fc_full)
   2617 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
   2618 
   2619 	IXGBE_CORE_UNLOCK(adapter);
   2620 
   2621 	return;
   2622 } /* ixgbe_media_status */
   2623 
   2624 /************************************************************************
   2625  * ixgbe_media_change - Media Ioctl callback
   2626  *
   2627  *   Called when the user changes speed/duplex using
   2628  *   media/mediopt option with ifconfig.
   2629  ************************************************************************/
   2630 static int
   2631 ixgbe_media_change(struct ifnet *ifp)
   2632 {
   2633 	struct adapter   *adapter = ifp->if_softc;
   2634 	struct ifmedia   *ifm = &adapter->media;
   2635 	struct ixgbe_hw  *hw = &adapter->hw;
   2636 	ixgbe_link_speed speed = 0;
   2637 	ixgbe_link_speed link_caps = 0;
   2638 	bool negotiate = false;
   2639 	s32 err = IXGBE_NOT_IMPLEMENTED;
   2640 
   2641 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   2642 
   2643 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   2644 		return (EINVAL);
   2645 
   2646 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   2647 		return (ENODEV);
   2648 
   2649 	/*
   2650 	 * We don't actually need to check against the supported
   2651 	 * media types of the adapter; ifmedia will take care of
   2652 	 * that for us.
   2653 	 */
   2654 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   2655 	case IFM_AUTO:
   2656 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   2657 		    &negotiate);
   2658 		if (err != IXGBE_SUCCESS) {
   2659 			device_printf(adapter->dev, "Unable to determine "
   2660 			    "supported advertise speeds\n");
   2661 			return (ENODEV);
   2662 		}
   2663 		speed |= link_caps;
   2664 		break;
   2665 	case IFM_10G_T:
   2666 	case IFM_10G_LRM:
   2667 	case IFM_10G_LR:
   2668 	case IFM_10G_TWINAX:
   2669 #ifndef IFM_ETH_XTYPE
   2670 	case IFM_10G_SR: /* KR, too */
   2671 	case IFM_10G_CX4: /* KX4 */
   2672 #else
   2673 	case IFM_10G_KR:
   2674 	case IFM_10G_KX4:
   2675 #endif
   2676 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   2677 		break;
   2678 	case IFM_5000_T:
   2679 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   2680 		break;
   2681 	case IFM_2500_T:
   2682 	case IFM_2500_KX:
   2683 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   2684 		break;
   2685 	case IFM_1000_T:
   2686 	case IFM_1000_LX:
   2687 	case IFM_1000_SX:
   2688 	case IFM_1000_KX:
   2689 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   2690 		break;
   2691 	case IFM_100_TX:
   2692 		speed |= IXGBE_LINK_SPEED_100_FULL;
   2693 		break;
   2694 	case IFM_10_T:
   2695 		speed |= IXGBE_LINK_SPEED_10_FULL;
   2696 		break;
   2697 	default:
   2698 		goto invalid;
   2699 	}
   2700 
   2701 	hw->mac.autotry_restart = TRUE;
   2702 	hw->mac.ops.setup_link(hw, speed, TRUE);
   2703 	adapter->advertise = 0;
   2704 	if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
   2705 		if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
   2706 			adapter->advertise |= 1 << 2;
   2707 		if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
   2708 			adapter->advertise |= 1 << 1;
   2709 		if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
   2710 			adapter->advertise |= 1 << 0;
   2711 		if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
   2712 			adapter->advertise |= 1 << 3;
   2713 		if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
   2714 			adapter->advertise |= 1 << 4;
   2715 		if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
   2716 			adapter->advertise |= 1 << 5;
   2717 	}
   2718 
   2719 	return (0);
   2720 
   2721 invalid:
   2722 	device_printf(adapter->dev, "Invalid media type!\n");
   2723 
   2724 	return (EINVAL);
   2725 } /* ixgbe_media_change */
   2726 
   2727 /************************************************************************
   2728  * ixgbe_set_promisc
   2729  ************************************************************************/
   2730 static void
   2731 ixgbe_set_promisc(struct adapter *adapter)
   2732 {
   2733 	struct ifnet *ifp = adapter->ifp;
   2734 	int          mcnt = 0;
   2735 	u32          rctl;
   2736 	struct ether_multi *enm;
   2737 	struct ether_multistep step;
   2738 	struct ethercom *ec = &adapter->osdep.ec;
   2739 
   2740 	KASSERT(mutex_owned(&adapter->core_mtx));
   2741 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   2742 	rctl &= (~IXGBE_FCTRL_UPE);
   2743 	if (ifp->if_flags & IFF_ALLMULTI)
   2744 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
   2745 	else {
   2746 		ETHER_LOCK(ec);
   2747 		ETHER_FIRST_MULTI(step, ec, enm);
   2748 		while (enm != NULL) {
   2749 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
   2750 				break;
   2751 			mcnt++;
   2752 			ETHER_NEXT_MULTI(step, enm);
   2753 		}
   2754 		ETHER_UNLOCK(ec);
   2755 	}
   2756 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
   2757 		rctl &= (~IXGBE_FCTRL_MPE);
   2758 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2759 
   2760 	if (ifp->if_flags & IFF_PROMISC) {
   2761 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   2762 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2763 	} else if (ifp->if_flags & IFF_ALLMULTI) {
   2764 		rctl |= IXGBE_FCTRL_MPE;
   2765 		rctl &= ~IXGBE_FCTRL_UPE;
   2766 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2767 	}
   2768 } /* ixgbe_set_promisc */
   2769 
   2770 /************************************************************************
   2771  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
   2772  ************************************************************************/
   2773 static int
   2774 ixgbe_msix_link(void *arg)
   2775 {
   2776 	struct adapter	*adapter = arg;
   2777 	struct ixgbe_hw *hw = &adapter->hw;
   2778 	u32		eicr, eicr_mask;
   2779 	s32             retval;
   2780 
   2781 	++adapter->link_irq.ev_count;
   2782 
   2783 	/* Pause other interrupts */
   2784 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
   2785 
   2786 	/* First get the cause */
   2787 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   2788 	/* Be sure the queue bits are not cleared */
   2789 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
   2790 	/* Clear interrupt with write */
   2791 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
   2792 
   2793 	/* Link status change */
   2794 	if (eicr & IXGBE_EICR_LSC) {
   2795 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
   2796 		softint_schedule(adapter->link_si);
   2797 	}
   2798 
   2799 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   2800 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
   2801 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
   2802 			/* This is probably overkill :) */
   2803 			if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
   2804 				return 1;
   2805 			/* Disable the interrupt */
   2806 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
   2807 			softint_schedule(adapter->fdir_si);
   2808 		}
   2809 
   2810 		if (eicr & IXGBE_EICR_ECC) {
   2811 			device_printf(adapter->dev,
   2812 			    "CRITICAL: ECC ERROR!! Please Reboot!!\n");
   2813 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   2814 		}
   2815 
   2816 		/* Check for over temp condition */
   2817 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
   2818 			switch (adapter->hw.mac.type) {
   2819 			case ixgbe_mac_X550EM_a:
   2820 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
   2821 					break;
   2822 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
   2823 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2824 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2825 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2826 				retval = hw->phy.ops.check_overtemp(hw);
   2827 				if (retval != IXGBE_ERR_OVERTEMP)
   2828 					break;
   2829 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2830 				device_printf(adapter->dev, "System shutdown required!\n");
   2831 				break;
   2832 			default:
   2833 				if (!(eicr & IXGBE_EICR_TS))
   2834 					break;
   2835 				retval = hw->phy.ops.check_overtemp(hw);
   2836 				if (retval != IXGBE_ERR_OVERTEMP)
   2837 					break;
   2838 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2839 				device_printf(adapter->dev, "System shutdown required!\n");
   2840 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
   2841 				break;
   2842 			}
   2843 		}
   2844 
   2845 		/* Check for VF message */
   2846 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
   2847 		    (eicr & IXGBE_EICR_MAILBOX))
   2848 			softint_schedule(adapter->mbx_si);
   2849 	}
   2850 
   2851 	if (ixgbe_is_sfp(hw)) {
   2852 		/* Pluggable optics-related interrupt */
   2853 		if (hw->mac.type >= ixgbe_mac_X540)
   2854 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   2855 		else
   2856 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   2857 
   2858 		if (eicr & eicr_mask) {
   2859 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   2860 			softint_schedule(adapter->mod_si);
   2861 		}
   2862 
   2863 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   2864 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   2865 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2866 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2867 			softint_schedule(adapter->msf_si);
   2868 		}
   2869 	}
   2870 
   2871 	/* Check for fan failure */
   2872 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   2873 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
   2874 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2875 	}
   2876 
   2877 	/* External PHY interrupt */
   2878 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   2879 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
   2880 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
   2881 		softint_schedule(adapter->phy_si);
   2882  	}
   2883 
   2884 	/* Re-enable other interrupts */
   2885 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   2886 	return 1;
   2887 } /* ixgbe_msix_link */
   2888 
   2889 /************************************************************************
   2890  * ixgbe_sysctl_interrupt_rate_handler
   2891  ************************************************************************/
   2892 static int
   2893 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   2894 {
   2895 	struct sysctlnode node = *rnode;
   2896 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   2897 	uint32_t reg, usec, rate;
   2898 	int error;
   2899 
   2900 	if (que == NULL)
   2901 		return 0;
   2902 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
   2903 	usec = ((reg & 0x0FF8) >> 3);
   2904 	if (usec > 0)
   2905 		rate = 500000 / usec;
   2906 	else
   2907 		rate = 0;
   2908 	node.sysctl_data = &rate;
   2909 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2910 	if (error || newp == NULL)
   2911 		return error;
   2912 	reg &= ~0xfff; /* default, no limitation */
   2913 	ixgbe_max_interrupt_rate = 0;
   2914 	if (rate > 0 && rate < 500000) {
   2915 		if (rate < 1000)
   2916 			rate = 1000;
   2917 		ixgbe_max_interrupt_rate = rate;
   2918 		reg |= ((4000000/rate) & 0xff8);
   2919 	}
   2920 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
   2921 
   2922 	return (0);
   2923 } /* ixgbe_sysctl_interrupt_rate_handler */
   2924 
   2925 const struct sysctlnode *
   2926 ixgbe_sysctl_instance(struct adapter *adapter)
   2927 {
   2928 	const char *dvname;
   2929 	struct sysctllog **log;
   2930 	int rc;
   2931 	const struct sysctlnode *rnode;
   2932 
   2933 	if (adapter->sysctltop != NULL)
   2934 		return adapter->sysctltop;
   2935 
   2936 	log = &adapter->sysctllog;
   2937 	dvname = device_xname(adapter->dev);
   2938 
   2939 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2940 	    0, CTLTYPE_NODE, dvname,
   2941 	    SYSCTL_DESCR("ixgbe information and settings"),
   2942 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2943 		goto err;
   2944 
   2945 	return rnode;
   2946 err:
   2947 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2948 	return NULL;
   2949 }
   2950 
   2951 /************************************************************************
   2952  * ixgbe_add_device_sysctls
   2953  ************************************************************************/
   2954 static void
   2955 ixgbe_add_device_sysctls(struct adapter *adapter)
   2956 {
   2957 	device_t               dev = adapter->dev;
   2958 	struct ixgbe_hw        *hw = &adapter->hw;
   2959 	struct sysctllog **log;
   2960 	const struct sysctlnode *rnode, *cnode;
   2961 
   2962 	log = &adapter->sysctllog;
   2963 
   2964 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   2965 		aprint_error_dev(dev, "could not create sysctl root\n");
   2966 		return;
   2967 	}
   2968 
   2969 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2970 	    CTLFLAG_READONLY, CTLTYPE_INT,
   2971 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
   2972 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   2973 		aprint_error_dev(dev, "could not create sysctl\n");
   2974 
   2975 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2976 	    CTLFLAG_READONLY, CTLTYPE_INT,
   2977 	    "num_queues", SYSCTL_DESCR("Number of queues"),
   2978 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
   2979 		aprint_error_dev(dev, "could not create sysctl\n");
   2980 
   2981 	/* Sysctls for all devices */
   2982 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   2983 	    CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
   2984 	    ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
   2985 	    CTL_EOL) != 0)
   2986 		aprint_error_dev(dev, "could not create sysctl\n");
   2987 
   2988 	adapter->enable_aim = ixgbe_enable_aim;
   2989 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   2990 	    CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2991 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2992 		aprint_error_dev(dev, "could not create sysctl\n");
   2993 
   2994 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2995 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2996 	    "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
   2997 	    ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
   2998 	    CTL_EOL) != 0)
   2999 		aprint_error_dev(dev, "could not create sysctl\n");
   3000 
   3001 #ifdef IXGBE_DEBUG
   3002 	/* testing sysctls (for all devices) */
   3003 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3004 	    CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
   3005 	    ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
   3006 	    CTL_EOL) != 0)
   3007 		aprint_error_dev(dev, "could not create sysctl\n");
   3008 
   3009 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
   3010 	    CTLTYPE_STRING, "print_rss_config",
   3011 	    SYSCTL_DESCR("Prints RSS Configuration"),
   3012 	    ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
   3013 	    CTL_EOL) != 0)
   3014 		aprint_error_dev(dev, "could not create sysctl\n");
   3015 #endif
   3016 	/* for X550 series devices */
   3017 	if (hw->mac.type >= ixgbe_mac_X550)
   3018 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3019 		    CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
   3020 		    ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
   3021 		    CTL_EOL) != 0)
   3022 			aprint_error_dev(dev, "could not create sysctl\n");
   3023 
   3024 	/* for WoL-capable devices */
   3025 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3026 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3027 		    CTLTYPE_BOOL, "wol_enable",
   3028 		    SYSCTL_DESCR("Enable/Disable Wake on LAN"),
   3029 		    ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
   3030 		    CTL_EOL) != 0)
   3031 			aprint_error_dev(dev, "could not create sysctl\n");
   3032 
   3033 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3034 		    CTLTYPE_INT, "wufc",
   3035 		    SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
   3036 		    ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
   3037 		    CTL_EOL) != 0)
   3038 			aprint_error_dev(dev, "could not create sysctl\n");
   3039 	}
   3040 
   3041 	/* for X552/X557-AT devices */
   3042 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3043 		const struct sysctlnode *phy_node;
   3044 
   3045 		if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
   3046 		    "phy", SYSCTL_DESCR("External PHY sysctls"),
   3047 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
   3048 			aprint_error_dev(dev, "could not create sysctl\n");
   3049 			return;
   3050 		}
   3051 
   3052 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3053 		    CTLTYPE_INT, "temp",
   3054 		    SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
   3055 		    ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
   3056 		    CTL_EOL) != 0)
   3057 			aprint_error_dev(dev, "could not create sysctl\n");
   3058 
   3059 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3060 		    CTLTYPE_INT, "overtemp_occurred",
   3061 		    SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
   3062 		    ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
   3063 		    CTL_CREATE, CTL_EOL) != 0)
   3064 			aprint_error_dev(dev, "could not create sysctl\n");
   3065 	}
   3066 
   3067 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
   3068 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3069 		    CTLTYPE_INT, "eee_state",
   3070 		    SYSCTL_DESCR("EEE Power Save State"),
   3071 		    ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
   3072 		    CTL_EOL) != 0)
   3073 			aprint_error_dev(dev, "could not create sysctl\n");
   3074 	}
   3075 } /* ixgbe_add_device_sysctls */
   3076 
   3077 /************************************************************************
   3078  * ixgbe_allocate_pci_resources
   3079  ************************************************************************/
   3080 static int
   3081 ixgbe_allocate_pci_resources(struct adapter *adapter,
   3082     const struct pci_attach_args *pa)
   3083 {
   3084 	pcireg_t	memtype;
   3085 	device_t dev = adapter->dev;
   3086 	bus_addr_t addr;
   3087 	int flags;
   3088 
   3089 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   3090 	switch (memtype) {
   3091 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   3092 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   3093 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   3094 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   3095 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   3096 			goto map_err;
   3097 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   3098 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   3099 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3100 		}
   3101 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   3102 		     adapter->osdep.mem_size, flags,
   3103 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   3104 map_err:
   3105 			adapter->osdep.mem_size = 0;
   3106 			aprint_error_dev(dev, "unable to map BAR0\n");
   3107 			return ENXIO;
   3108 		}
   3109 		break;
   3110 	default:
   3111 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   3112 		return ENXIO;
   3113 	}
   3114 
   3115 	return (0);
   3116 } /* ixgbe_allocate_pci_resources */
   3117 
   3118 /************************************************************************
   3119  * ixgbe_detach - Device removal routine
   3120  *
   3121  *   Called when the driver is being removed.
   3122  *   Stops the adapter and deallocates all the resources
   3123  *   that were allocated for driver operation.
   3124  *
   3125  *   return 0 on success, positive on failure
   3126  ************************************************************************/
   3127 static int
   3128 ixgbe_detach(device_t dev, int flags)
   3129 {
   3130 	struct adapter *adapter = device_private(dev);
   3131 	struct ix_queue *que = adapter->queues;
   3132 	struct rx_ring *rxr = adapter->rx_rings;
   3133 	struct tx_ring *txr = adapter->tx_rings;
   3134 	struct ixgbe_hw *hw = &adapter->hw;
   3135 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   3136 	u32	ctrl_ext;
   3137 
   3138 	INIT_DEBUGOUT("ixgbe_detach: begin");
   3139 	if (adapter->osdep.attached == false)
   3140 		return 0;
   3141 
   3142 	if (ixgbe_pci_iov_detach(dev) != 0) {
   3143 		device_printf(dev, "SR-IOV in use; detach first.\n");
   3144 		return (EBUSY);
   3145 	}
   3146 
   3147 	/* Stop the interface. Callouts are stopped in it. */
   3148 	ixgbe_ifstop(adapter->ifp, 1);
   3149 #if NVLAN > 0
   3150 	/* Make sure VLANs are not using driver */
   3151 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   3152 		;	/* nothing to do: no VLANs */
   3153 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
   3154 		vlan_ifdetach(adapter->ifp);
   3155 	else {
   3156 		aprint_error_dev(dev, "VLANs in use, detach first\n");
   3157 		return (EBUSY);
   3158 	}
   3159 #endif
   3160 
   3161 	pmf_device_deregister(dev);
   3162 
   3163 	ether_ifdetach(adapter->ifp);
   3164 	/* Stop the adapter */
   3165 	IXGBE_CORE_LOCK(adapter);
   3166 	ixgbe_setup_low_power_mode(adapter);
   3167 	IXGBE_CORE_UNLOCK(adapter);
   3168 
   3169 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
   3170 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   3171 			softint_disestablish(txr->txr_si);
   3172 		softint_disestablish(que->que_si);
   3173 	}
   3174 
   3175 	/* Drain the Link queue */
   3176 	softint_disestablish(adapter->link_si);
   3177 	softint_disestablish(adapter->mod_si);
   3178 	softint_disestablish(adapter->msf_si);
   3179 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   3180 		softint_disestablish(adapter->mbx_si);
   3181 	softint_disestablish(adapter->phy_si);
   3182 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   3183 		softint_disestablish(adapter->fdir_si);
   3184 
   3185 	/* let hardware know driver is unloading */
   3186 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   3187 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   3188 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   3189 
   3190 	callout_halt(&adapter->timer, NULL);
   3191 
   3192 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   3193 		netmap_detach(adapter->ifp);
   3194 
   3195 	ixgbe_free_pci_resources(adapter);
   3196 #if 0	/* XXX the NetBSD port is probably missing something here */
   3197 	bus_generic_detach(dev);
   3198 #endif
   3199 	if_detach(adapter->ifp);
   3200 	if_percpuq_destroy(adapter->ipq);
   3201 
   3202 	sysctl_teardown(&adapter->sysctllog);
   3203 	evcnt_detach(&adapter->handleq);
   3204 	evcnt_detach(&adapter->req);
   3205 	evcnt_detach(&adapter->efbig_tx_dma_setup);
   3206 	evcnt_detach(&adapter->mbuf_defrag_failed);
   3207 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
   3208 	evcnt_detach(&adapter->einval_tx_dma_setup);
   3209 	evcnt_detach(&adapter->other_tx_dma_setup);
   3210 	evcnt_detach(&adapter->eagain_tx_dma_setup);
   3211 	evcnt_detach(&adapter->enomem_tx_dma_setup);
   3212 	evcnt_detach(&adapter->watchdog_events);
   3213 	evcnt_detach(&adapter->tso_err);
   3214 	evcnt_detach(&adapter->link_irq);
   3215 
   3216 	txr = adapter->tx_rings;
   3217 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   3218 		evcnt_detach(&adapter->queues[i].irqs);
   3219 		evcnt_detach(&txr->no_desc_avail);
   3220 		evcnt_detach(&txr->total_packets);
   3221 		evcnt_detach(&txr->tso_tx);
   3222 #ifndef IXGBE_LEGACY_TX
   3223 		evcnt_detach(&txr->pcq_drops);
   3224 #endif
   3225 
   3226 		if (i < __arraycount(stats->mpc)) {
   3227 			evcnt_detach(&stats->mpc[i]);
   3228 			if (hw->mac.type == ixgbe_mac_82598EB)
   3229 				evcnt_detach(&stats->rnbc[i]);
   3230 		}
   3231 		if (i < __arraycount(stats->pxontxc)) {
   3232 			evcnt_detach(&stats->pxontxc[i]);
   3233 			evcnt_detach(&stats->pxonrxc[i]);
   3234 			evcnt_detach(&stats->pxofftxc[i]);
   3235 			evcnt_detach(&stats->pxoffrxc[i]);
   3236 			evcnt_detach(&stats->pxon2offc[i]);
   3237 		}
   3238 		if (i < __arraycount(stats->qprc)) {
   3239 			evcnt_detach(&stats->qprc[i]);
   3240 			evcnt_detach(&stats->qptc[i]);
   3241 			evcnt_detach(&stats->qbrc[i]);
   3242 			evcnt_detach(&stats->qbtc[i]);
   3243 			evcnt_detach(&stats->qprdc[i]);
   3244 		}
   3245 
   3246 		evcnt_detach(&rxr->rx_packets);
   3247 		evcnt_detach(&rxr->rx_bytes);
   3248 		evcnt_detach(&rxr->rx_copies);
   3249 		evcnt_detach(&rxr->no_jmbuf);
   3250 		evcnt_detach(&rxr->rx_discarded);
   3251 	}
   3252 	evcnt_detach(&stats->ipcs);
   3253 	evcnt_detach(&stats->l4cs);
   3254 	evcnt_detach(&stats->ipcs_bad);
   3255 	evcnt_detach(&stats->l4cs_bad);
   3256 	evcnt_detach(&stats->intzero);
   3257 	evcnt_detach(&stats->legint);
   3258 	evcnt_detach(&stats->crcerrs);
   3259 	evcnt_detach(&stats->illerrc);
   3260 	evcnt_detach(&stats->errbc);
   3261 	evcnt_detach(&stats->mspdc);
   3262 	if (hw->mac.type >= ixgbe_mac_X550)
   3263 		evcnt_detach(&stats->mbsdc);
   3264 	evcnt_detach(&stats->mpctotal);
   3265 	evcnt_detach(&stats->mlfc);
   3266 	evcnt_detach(&stats->mrfc);
   3267 	evcnt_detach(&stats->rlec);
   3268 	evcnt_detach(&stats->lxontxc);
   3269 	evcnt_detach(&stats->lxonrxc);
   3270 	evcnt_detach(&stats->lxofftxc);
   3271 	evcnt_detach(&stats->lxoffrxc);
   3272 
   3273 	/* Packet Reception Stats */
   3274 	evcnt_detach(&stats->tor);
   3275 	evcnt_detach(&stats->gorc);
   3276 	evcnt_detach(&stats->tpr);
   3277 	evcnt_detach(&stats->gprc);
   3278 	evcnt_detach(&stats->mprc);
   3279 	evcnt_detach(&stats->bprc);
   3280 	evcnt_detach(&stats->prc64);
   3281 	evcnt_detach(&stats->prc127);
   3282 	evcnt_detach(&stats->prc255);
   3283 	evcnt_detach(&stats->prc511);
   3284 	evcnt_detach(&stats->prc1023);
   3285 	evcnt_detach(&stats->prc1522);
   3286 	evcnt_detach(&stats->ruc);
   3287 	evcnt_detach(&stats->rfc);
   3288 	evcnt_detach(&stats->roc);
   3289 	evcnt_detach(&stats->rjc);
   3290 	evcnt_detach(&stats->mngprc);
   3291 	evcnt_detach(&stats->mngpdc);
   3292 	evcnt_detach(&stats->xec);
   3293 
   3294 	/* Packet Transmission Stats */
   3295 	evcnt_detach(&stats->gotc);
   3296 	evcnt_detach(&stats->tpt);
   3297 	evcnt_detach(&stats->gptc);
   3298 	evcnt_detach(&stats->bptc);
   3299 	evcnt_detach(&stats->mptc);
   3300 	evcnt_detach(&stats->mngptc);
   3301 	evcnt_detach(&stats->ptc64);
   3302 	evcnt_detach(&stats->ptc127);
   3303 	evcnt_detach(&stats->ptc255);
   3304 	evcnt_detach(&stats->ptc511);
   3305 	evcnt_detach(&stats->ptc1023);
   3306 	evcnt_detach(&stats->ptc1522);
   3307 
   3308 	ixgbe_free_transmit_structures(adapter);
   3309 	ixgbe_free_receive_structures(adapter);
   3310 	free(adapter->queues, M_DEVBUF);
   3311 	free(adapter->mta, M_DEVBUF);
   3312 
   3313 	IXGBE_CORE_LOCK_DESTROY(adapter);
   3314 
   3315 	return (0);
   3316 } /* ixgbe_detach */
   3317 
   3318 /************************************************************************
   3319  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
   3320  *
   3321  *   Prepare the adapter/port for LPLU and/or WoL
   3322  ************************************************************************/
   3323 static int
   3324 ixgbe_setup_low_power_mode(struct adapter *adapter)
   3325 {
   3326 	struct ixgbe_hw *hw = &adapter->hw;
   3327 	device_t        dev = adapter->dev;
   3328 	s32             error = 0;
   3329 
   3330 	KASSERT(mutex_owned(&adapter->core_mtx));
   3331 
   3332 	/* Limit power management flow to X550EM baseT */
   3333 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
   3334 	    hw->phy.ops.enter_lplu) {
   3335 		/* X550EM baseT adapters need a special LPLU flow */
   3336 		hw->phy.reset_disable = true;
   3337 		ixgbe_stop(adapter);
   3338 		error = hw->phy.ops.enter_lplu(hw);
   3339 		if (error)
   3340 			device_printf(dev,
   3341 			    "Error entering LPLU: %d\n", error);
   3342 		hw->phy.reset_disable = false;
   3343 	} else {
   3344 		/* Just stop for other adapters */
   3345 		ixgbe_stop(adapter);
   3346 	}
   3347 
   3348 	if (!hw->wol_enabled) {
   3349 		ixgbe_set_phy_power(hw, FALSE);
   3350 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3351 		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
   3352 	} else {
   3353 		/* Turn off support for APM wakeup. (Using ACPI instead) */
   3354 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
   3355 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
   3356 
   3357 		/*
   3358 		 * Clear Wake Up Status register to prevent any previous wakeup
   3359 		 * events from waking us up immediately after we suspend.
   3360 		 */
   3361 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3362 
   3363 		/*
   3364 		 * Program the Wakeup Filter Control register with user filter
   3365 		 * settings
   3366 		 */
   3367 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
   3368 
   3369 		/* Enable wakeups and power management in Wakeup Control */
   3370 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
   3371 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
   3372 
   3373 	}
   3374 
   3375 	return error;
   3376 } /* ixgbe_setup_low_power_mode */
   3377 
   3378 /************************************************************************
   3379  * ixgbe_shutdown - Shutdown entry point
   3380  ************************************************************************/
   3381 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   3382 static int
   3383 ixgbe_shutdown(device_t dev)
   3384 {
   3385 	struct adapter *adapter = device_private(dev);
   3386 	int error = 0;
   3387 
   3388 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
   3389 
   3390 	IXGBE_CORE_LOCK(adapter);
   3391 	error = ixgbe_setup_low_power_mode(adapter);
   3392 	IXGBE_CORE_UNLOCK(adapter);
   3393 
   3394 	return (error);
   3395 } /* ixgbe_shutdown */
   3396 #endif
   3397 
   3398 /************************************************************************
   3399  * ixgbe_suspend
   3400  *
   3401  *   From D0 to D3
   3402  ************************************************************************/
   3403 static bool
   3404 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
   3405 {
   3406 	struct adapter *adapter = device_private(dev);
   3407 	int            error = 0;
   3408 
   3409 	INIT_DEBUGOUT("ixgbe_suspend: begin");
   3410 
   3411 	IXGBE_CORE_LOCK(adapter);
   3412 
   3413 	error = ixgbe_setup_low_power_mode(adapter);
   3414 
   3415 	IXGBE_CORE_UNLOCK(adapter);
   3416 
   3417 	return (error);
   3418 } /* ixgbe_suspend */
   3419 
   3420 /************************************************************************
   3421  * ixgbe_resume
   3422  *
   3423  *   From D3 to D0
   3424  ************************************************************************/
   3425 static bool
   3426 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
   3427 {
   3428 	struct adapter  *adapter = device_private(dev);
   3429 	struct ifnet    *ifp = adapter->ifp;
   3430 	struct ixgbe_hw *hw = &adapter->hw;
   3431 	u32             wus;
   3432 
   3433 	INIT_DEBUGOUT("ixgbe_resume: begin");
   3434 
   3435 	IXGBE_CORE_LOCK(adapter);
   3436 
   3437 	/* Read & clear WUS register */
   3438 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
   3439 	if (wus)
   3440 		device_printf(dev, "Woken up by (WUS): %#010x\n",
   3441 		    IXGBE_READ_REG(hw, IXGBE_WUS));
   3442 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3443 	/* And clear WUFC until next low-power transition */
   3444 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3445 
   3446 	/*
   3447 	 * Required after D3->D0 transition;
   3448 	 * will re-advertise all previous advertised speeds
   3449 	 */
   3450 	if (ifp->if_flags & IFF_UP)
   3451 		ixgbe_init_locked(adapter);
   3452 
   3453 	IXGBE_CORE_UNLOCK(adapter);
   3454 
   3455 	return true;
   3456 } /* ixgbe_resume */
   3457 
   3458 /*
   3459  * Set the various hardware offload abilities.
   3460  *
   3461  * This takes the ifnet's if_capenable flags (e.g. set by the user using
   3462  * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
   3463  * mbuf offload flags the driver will understand.
   3464  */
   3465 static void
   3466 ixgbe_set_if_hwassist(struct adapter *adapter)
   3467 {
   3468 	/* XXX */
   3469 }
   3470 
   3471 /************************************************************************
   3472  * ixgbe_init_locked - Init entry point
   3473  *
   3474  *   Used in two ways: It is used by the stack as an init
   3475  *   entry point in network interface structure. It is also
   3476  *   used by the driver as a hw/sw initialization routine to
   3477  *   get to a consistent state.
   3478  *
   3479  *   return 0 on success, positive on failure
   3480  ************************************************************************/
   3481 static void
   3482 ixgbe_init_locked(struct adapter *adapter)
   3483 {
   3484 	struct ifnet   *ifp = adapter->ifp;
   3485 	device_t 	dev = adapter->dev;
   3486 	struct ixgbe_hw *hw = &adapter->hw;
   3487 	struct tx_ring  *txr;
   3488 	struct rx_ring  *rxr;
   3489 	u32		txdctl, mhadd;
   3490 	u32		rxdctl, rxctrl;
   3491 	u32             ctrl_ext;
   3492 	int             err = 0;
   3493 
   3494 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   3495 
   3496 	KASSERT(mutex_owned(&adapter->core_mtx));
   3497 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
   3498 
   3499 	hw->adapter_stopped = FALSE;
   3500 	ixgbe_stop_adapter(hw);
   3501         callout_stop(&adapter->timer);
   3502 
   3503 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   3504 	adapter->max_frame_size =
   3505 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   3506 
   3507 	/* Queue indices may change with IOV mode */
   3508 	ixgbe_align_all_queue_indices(adapter);
   3509 
   3510 	/* reprogram the RAR[0] in case user changed it. */
   3511 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
   3512 
   3513 	/* Get the latest mac address, User can use a LAA */
   3514 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
   3515 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   3516 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
   3517 	hw->addr_ctrl.rar_used_count = 1;
   3518 
   3519 	/* Set hardware offload abilities from ifnet flags */
   3520 	ixgbe_set_if_hwassist(adapter);
   3521 
   3522 	/* Prepare transmit descriptors and buffers */
   3523 	if (ixgbe_setup_transmit_structures(adapter)) {
   3524 		device_printf(dev, "Could not setup transmit structures\n");
   3525 		ixgbe_stop(adapter);
   3526 		return;
   3527 	}
   3528 
   3529 	ixgbe_init_hw(hw);
   3530 	ixgbe_initialize_iov(adapter);
   3531 	ixgbe_initialize_transmit_units(adapter);
   3532 
   3533 	/* Setup Multicast table */
   3534 	ixgbe_set_multi(adapter);
   3535 
   3536 	/* Determine the correct mbuf pool, based on frame size */
   3537 	if (adapter->max_frame_size <= MCLBYTES)
   3538 		adapter->rx_mbuf_sz = MCLBYTES;
   3539 	else
   3540 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   3541 
   3542 	/* Prepare receive descriptors and buffers */
   3543 	if (ixgbe_setup_receive_structures(adapter)) {
   3544 		device_printf(dev, "Could not setup receive structures\n");
   3545 		ixgbe_stop(adapter);
   3546 		return;
   3547 	}
   3548 
   3549 	/* Configure RX settings */
   3550 	ixgbe_initialize_receive_units(adapter);
   3551 
   3552 	/* Enable SDP & MSI-X interrupts based on adapter */
   3553 	ixgbe_config_gpie(adapter);
   3554 
   3555 	/* Set MTU size */
   3556 	if (ifp->if_mtu > ETHERMTU) {
   3557 		/* aka IXGBE_MAXFRS on 82599 and newer */
   3558 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   3559 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   3560 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   3561 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   3562 	}
   3563 
   3564 	/* Now enable all the queues */
   3565 	for (int i = 0; i < adapter->num_queues; i++) {
   3566 		txr = &adapter->tx_rings[i];
   3567 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
   3568 		txdctl |= IXGBE_TXDCTL_ENABLE;
   3569 		/* Set WTHRESH to 8, burst writeback */
   3570 		txdctl |= (8 << 16);
   3571 		/*
   3572 		 * When the internal queue falls below PTHRESH (32),
   3573 		 * start prefetching as long as there are at least
   3574 		 * HTHRESH (1) buffers ready. The values are taken
   3575 		 * from the Intel linux driver 3.8.21.
   3576 		 * Prefetching enables tx line rate even with 1 queue.
   3577 		 */
   3578 		txdctl |= (32 << 0) | (1 << 8);
   3579 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
   3580 	}
   3581 
   3582 	for (int i = 0, j = 0; i < adapter->num_queues; i++) {
   3583 		rxr = &adapter->rx_rings[i];
   3584 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   3585 		if (hw->mac.type == ixgbe_mac_82598EB) {
   3586 			/*
   3587 			 * PTHRESH = 21
   3588 			 * HTHRESH = 4
   3589 			 * WTHRESH = 8
   3590 			 */
   3591 			rxdctl &= ~0x3FFFFF;
   3592 			rxdctl |= 0x080420;
   3593 		}
   3594 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   3595 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
   3596 		for (; j < 10; j++) {
   3597 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
   3598 			    IXGBE_RXDCTL_ENABLE)
   3599 				break;
   3600 			else
   3601 				msec_delay(1);
   3602 		}
   3603 		wmb();
   3604 
   3605 		/*
   3606 		 * In netmap mode, we must preserve the buffers made
   3607 		 * available to userspace before the if_init()
   3608 		 * (this is true by default on the TX side, because
   3609 		 * init makes all buffers available to userspace).
   3610 		 *
   3611 		 * netmap_reset() and the device specific routines
   3612 		 * (e.g. ixgbe_setup_receive_rings()) map these
   3613 		 * buffers at the end of the NIC ring, so here we
   3614 		 * must set the RDT (tail) register to make sure
   3615 		 * they are not overwritten.
   3616 		 *
   3617 		 * In this driver the NIC ring starts at RDH = 0,
   3618 		 * RDT points to the last slot available for reception (?),
   3619 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   3620 		 */
   3621 #ifdef DEV_NETMAP
   3622 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   3623 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   3624 			struct netmap_adapter *na = NA(adapter->ifp);
   3625 			struct netmap_kring *kring = &na->rx_rings[i];
   3626 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   3627 
   3628 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
   3629 		} else
   3630 #endif /* DEV_NETMAP */
   3631 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
   3632 			    adapter->num_rx_desc - 1);
   3633 	}
   3634 
   3635 	/* Enable Receive engine */
   3636 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   3637 	if (hw->mac.type == ixgbe_mac_82598EB)
   3638 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   3639 	rxctrl |= IXGBE_RXCTRL_RXEN;
   3640 	ixgbe_enable_rx_dma(hw, rxctrl);
   3641 
   3642 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   3643 
   3644 	/* Set up MSI-X routing */
   3645 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3646 		ixgbe_configure_ivars(adapter);
   3647 		/* Set up auto-mask */
   3648 		if (hw->mac.type == ixgbe_mac_82598EB)
   3649 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3650 		else {
   3651 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   3652 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   3653 		}
   3654 	} else {  /* Simple settings for Legacy/MSI */
   3655 		ixgbe_set_ivar(adapter, 0, 0, 0);
   3656 		ixgbe_set_ivar(adapter, 0, 0, 1);
   3657 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3658 	}
   3659 
   3660 	ixgbe_init_fdir(adapter);
   3661 
   3662 	/*
   3663 	 * Check on any SFP devices that
   3664 	 * need to be kick-started
   3665 	 */
   3666 	if (hw->phy.type == ixgbe_phy_none) {
   3667 		err = hw->phy.ops.identify(hw);
   3668 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   3669                 	device_printf(dev,
   3670 			    "Unsupported SFP+ module type was detected.\n");
   3671 			return;
   3672         	}
   3673 	}
   3674 
   3675 	/* Set moderation on the Link interrupt */
   3676 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
   3677 
   3678 	/* Config/Enable Link */
   3679 	ixgbe_config_link(adapter);
   3680 
   3681 	/* Hardware Packet Buffer & Flow Control setup */
   3682 	ixgbe_config_delay_values(adapter);
   3683 
   3684 	/* Initialize the FC settings */
   3685 	ixgbe_start_hw(hw);
   3686 
   3687 	/* Set up VLAN support and filter */
   3688 	ixgbe_setup_vlan_hw_support(adapter);
   3689 
   3690 	/* Setup DMA Coalescing */
   3691 	ixgbe_config_dmac(adapter);
   3692 
   3693 	/* And now turn on interrupts */
   3694 	ixgbe_enable_intr(adapter);
   3695 
   3696 	/* Enable the use of the MBX by the VF's */
   3697 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
   3698 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
   3699 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
   3700 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
   3701 	}
   3702 
   3703 	/* Now inform the stack we're ready */
   3704 	ifp->if_flags |= IFF_RUNNING;
   3705 
   3706 	return;
   3707 } /* ixgbe_init_locked */
   3708 
   3709 /************************************************************************
   3710  * ixgbe_init
   3711  ************************************************************************/
   3712 static int
   3713 ixgbe_init(struct ifnet *ifp)
   3714 {
   3715 	struct adapter *adapter = ifp->if_softc;
   3716 
   3717 	IXGBE_CORE_LOCK(adapter);
   3718 	ixgbe_init_locked(adapter);
   3719 	IXGBE_CORE_UNLOCK(adapter);
   3720 
   3721 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   3722 } /* ixgbe_init */
   3723 
   3724 /************************************************************************
   3725  * ixgbe_set_ivar
   3726  *
   3727  *   Setup the correct IVAR register for a particular MSI-X interrupt
   3728  *     (yes this is all very magic and confusing :)
   3729  *    - entry is the register array entry
   3730  *    - vector is the MSI-X vector for this queue
   3731  *    - type is RX/TX/MISC
   3732  ************************************************************************/
   3733 static void
   3734 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   3735 {
   3736 	struct ixgbe_hw *hw = &adapter->hw;
   3737 	u32 ivar, index;
   3738 
   3739 	vector |= IXGBE_IVAR_ALLOC_VAL;
   3740 
   3741 	switch (hw->mac.type) {
   3742 
   3743 	case ixgbe_mac_82598EB:
   3744 		if (type == -1)
   3745 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   3746 		else
   3747 			entry += (type * 64);
   3748 		index = (entry >> 2) & 0x1F;
   3749 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   3750 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
   3751 		ivar |= (vector << (8 * (entry & 0x3)));
   3752 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   3753 		break;
   3754 
   3755 	case ixgbe_mac_82599EB:
   3756 	case ixgbe_mac_X540:
   3757 	case ixgbe_mac_X550:
   3758 	case ixgbe_mac_X550EM_x:
   3759 	case ixgbe_mac_X550EM_a:
   3760 		if (type == -1) { /* MISC IVAR */
   3761 			index = (entry & 1) * 8;
   3762 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   3763 			ivar &= ~(0xFF << index);
   3764 			ivar |= (vector << index);
   3765 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   3766 		} else {	/* RX/TX IVARS */
   3767 			index = (16 * (entry & 1)) + (8 * type);
   3768 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   3769 			ivar &= ~(0xFF << index);
   3770 			ivar |= (vector << index);
   3771 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   3772 		}
   3773 
   3774 	default:
   3775 		break;
   3776 	}
   3777 } /* ixgbe_set_ivar */
   3778 
   3779 /************************************************************************
   3780  * ixgbe_configure_ivars
   3781  ************************************************************************/
   3782 static void
   3783 ixgbe_configure_ivars(struct adapter *adapter)
   3784 {
   3785 	struct ix_queue *que = adapter->queues;
   3786 	u32             newitr;
   3787 
   3788 	if (ixgbe_max_interrupt_rate > 0)
   3789 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   3790 	else {
   3791 		/*
   3792 		 * Disable DMA coalescing if interrupt moderation is
   3793 		 * disabled.
   3794 		 */
   3795 		adapter->dmac = 0;
   3796 		newitr = 0;
   3797 	}
   3798 
   3799         for (int i = 0; i < adapter->num_queues; i++, que++) {
   3800 		struct rx_ring *rxr = &adapter->rx_rings[i];
   3801 		struct tx_ring *txr = &adapter->tx_rings[i];
   3802 		/* First the RX queue entry */
   3803                 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
   3804 		/* ... and the TX */
   3805 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
   3806 		/* Set an Initial EITR value */
   3807 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
   3808 	}
   3809 
   3810 	/* For the Link interrupt */
   3811         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
   3812 } /* ixgbe_configure_ivars */
   3813 
   3814 /************************************************************************
   3815  * ixgbe_config_gpie
   3816  ************************************************************************/
   3817 static void
   3818 ixgbe_config_gpie(struct adapter *adapter)
   3819 {
   3820 	struct ixgbe_hw *hw = &adapter->hw;
   3821 	u32             gpie;
   3822 
   3823 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
   3824 
   3825 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3826 		/* Enable Enhanced MSI-X mode */
   3827 		gpie |= IXGBE_GPIE_MSIX_MODE
   3828 		     |  IXGBE_GPIE_EIAME
   3829 		     |  IXGBE_GPIE_PBA_SUPPORT
   3830 		     |  IXGBE_GPIE_OCD;
   3831 	}
   3832 
   3833 	/* Fan Failure Interrupt */
   3834 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   3835 		gpie |= IXGBE_SDP1_GPIEN;
   3836 
   3837 	/* Thermal Sensor Interrupt */
   3838 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
   3839 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3840 
   3841 	/* Link detection */
   3842 	switch (hw->mac.type) {
   3843 	case ixgbe_mac_82599EB:
   3844 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
   3845 		break;
   3846 	case ixgbe_mac_X550EM_x:
   3847 	case ixgbe_mac_X550EM_a:
   3848 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3849 		break;
   3850 	default:
   3851 		break;
   3852 	}
   3853 
   3854 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   3855 
   3856 	return;
   3857 } /* ixgbe_config_gpie */
   3858 
   3859 /************************************************************************
   3860  * ixgbe_config_delay_values
   3861  *
   3862  *   Requires adapter->max_frame_size to be set.
   3863  ************************************************************************/
   3864 static void
   3865 ixgbe_config_delay_values(struct adapter *adapter)
   3866 {
   3867 	struct ixgbe_hw *hw = &adapter->hw;
   3868 	u32             rxpb, frame, size, tmp;
   3869 
   3870 	frame = adapter->max_frame_size;
   3871 
   3872 	/* Calculate High Water */
   3873 	switch (hw->mac.type) {
   3874 	case ixgbe_mac_X540:
   3875 	case ixgbe_mac_X550:
   3876 	case ixgbe_mac_X550EM_x:
   3877 	case ixgbe_mac_X550EM_a:
   3878 		tmp = IXGBE_DV_X540(frame, frame);
   3879 		break;
   3880 	default:
   3881 		tmp = IXGBE_DV(frame, frame);
   3882 		break;
   3883 	}
   3884 	size = IXGBE_BT2KB(tmp);
   3885 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
   3886 	hw->fc.high_water[0] = rxpb - size;
   3887 
   3888 	/* Now calculate Low Water */
   3889 	switch (hw->mac.type) {
   3890 	case ixgbe_mac_X540:
   3891 	case ixgbe_mac_X550:
   3892 	case ixgbe_mac_X550EM_x:
   3893 	case ixgbe_mac_X550EM_a:
   3894 		tmp = IXGBE_LOW_DV_X540(frame);
   3895 		break;
   3896 	default:
   3897 		tmp = IXGBE_LOW_DV(frame);
   3898 		break;
   3899 	}
   3900 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
   3901 
   3902 	hw->fc.pause_time = IXGBE_FC_PAUSE;
   3903 	hw->fc.send_xon = TRUE;
   3904 } /* ixgbe_config_delay_values */
   3905 
   3906 /************************************************************************
   3907  * ixgbe_set_multi - Multicast Update
   3908  *
   3909  *   Called whenever multicast address list is updated.
   3910  ************************************************************************/
   3911 static void
   3912 ixgbe_set_multi(struct adapter *adapter)
   3913 {
   3914 	struct ixgbe_mc_addr	*mta;
   3915 	struct ifnet		*ifp = adapter->ifp;
   3916 	u8			*update_ptr;
   3917 	int			mcnt = 0;
   3918 	u32			fctrl;
   3919 	struct ethercom		*ec = &adapter->osdep.ec;
   3920 	struct ether_multi	*enm;
   3921 	struct ether_multistep	step;
   3922 
   3923 	KASSERT(mutex_owned(&adapter->core_mtx));
   3924 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   3925 
   3926 	mta = adapter->mta;
   3927 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
   3928 
   3929 	ifp->if_flags &= ~IFF_ALLMULTI;
   3930 	ETHER_LOCK(ec);
   3931 	ETHER_FIRST_MULTI(step, ec, enm);
   3932 	while (enm != NULL) {
   3933 		if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
   3934 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   3935 			ETHER_ADDR_LEN) != 0)) {
   3936 			ifp->if_flags |= IFF_ALLMULTI;
   3937 			break;
   3938 		}
   3939 		bcopy(enm->enm_addrlo,
   3940 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
   3941 		mta[mcnt].vmdq = adapter->pool;
   3942 		mcnt++;
   3943 		ETHER_NEXT_MULTI(step, enm);
   3944 	}
   3945 	ETHER_UNLOCK(ec);
   3946 
   3947 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   3948 	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   3949 	if (ifp->if_flags & IFF_PROMISC)
   3950 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   3951 	else if (ifp->if_flags & IFF_ALLMULTI) {
   3952 		fctrl |= IXGBE_FCTRL_MPE;
   3953 	}
   3954 
   3955 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   3956 
   3957 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
   3958 		update_ptr = (u8 *)mta;
   3959 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   3960 		    ixgbe_mc_array_itr, TRUE);
   3961 	}
   3962 
   3963 	return;
   3964 } /* ixgbe_set_multi */
   3965 
   3966 /************************************************************************
   3967  * ixgbe_mc_array_itr
   3968  *
   3969  *   An iterator function needed by the multicast shared code.
   3970  *   It feeds the shared code routine the addresses in the
   3971  *   array of ixgbe_set_multi() one by one.
   3972  ************************************************************************/
   3973 static u8 *
   3974 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   3975 {
   3976 	struct ixgbe_mc_addr *mta;
   3977 
   3978 	mta = (struct ixgbe_mc_addr *)*update_ptr;
   3979 	*vmdq = mta->vmdq;
   3980 
   3981 	*update_ptr = (u8*)(mta + 1);
   3982 
   3983 	return (mta->addr);
   3984 } /* ixgbe_mc_array_itr */
   3985 
   3986 /************************************************************************
   3987  * ixgbe_local_timer - Timer routine
   3988  *
   3989  *   Checks for link status, updates statistics,
   3990  *   and runs the watchdog check.
   3991  ************************************************************************/
   3992 static void
   3993 ixgbe_local_timer(void *arg)
   3994 {
   3995 	struct adapter *adapter = arg;
   3996 
   3997 	IXGBE_CORE_LOCK(adapter);
   3998 	ixgbe_local_timer1(adapter);
   3999 	IXGBE_CORE_UNLOCK(adapter);
   4000 }
   4001 
   4002 static void
   4003 ixgbe_local_timer1(void *arg)
   4004 {
   4005 	struct adapter	*adapter = arg;
   4006 	device_t	dev = adapter->dev;
   4007 	struct ix_queue *que = adapter->queues;
   4008 	u64		queues = 0;
   4009 	int		hung = 0;
   4010 
   4011 	KASSERT(mutex_owned(&adapter->core_mtx));
   4012 
   4013 	/* Check for pluggable optics */
   4014 	if (adapter->sfp_probe)
   4015 		if (!ixgbe_sfp_probe(adapter))
   4016 			goto out; /* Nothing to do */
   4017 
   4018 	ixgbe_update_link_status(adapter);
   4019 	ixgbe_update_stats_counters(adapter);
   4020 
   4021 	/*
   4022 	 * Check the TX queues status
   4023 	 *      - mark hung queues so we don't schedule on them
   4024 	 *      - watchdog only if all queues show hung
   4025 	 */
   4026 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4027 		/* Keep track of queues with work for soft irq */
   4028 		if (que->txr->busy)
   4029 			queues |= ((u64)1 << que->me);
   4030 		/*
   4031 		 * Each time txeof runs without cleaning, but there
   4032 		 * are uncleaned descriptors it increments busy. If
   4033 		 * we get to the MAX we declare it hung.
   4034 		 */
   4035 		if (que->busy == IXGBE_QUEUE_HUNG) {
   4036 			++hung;
   4037 			/* Mark the queue as inactive */
   4038 			adapter->active_queues &= ~((u64)1 << que->me);
   4039 			continue;
   4040 		} else {
   4041 			/* Check if we've come back from hung */
   4042 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   4043 				adapter->active_queues |= ((u64)1 << que->me);
   4044 		}
   4045 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   4046 			device_printf(dev,
   4047 			    "Warning queue %d appears to be hung!\n", i);
   4048 			que->txr->busy = IXGBE_QUEUE_HUNG;
   4049 			++hung;
   4050 		}
   4051 	}
   4052 
   4053 	/* Only truely watchdog if all queues show hung */
   4054 	if (hung == adapter->num_queues)
   4055 		goto watchdog;
   4056 	else if (queues != 0) { /* Force an IRQ on queues with work */
   4057 		ixgbe_rearm_queues(adapter, queues);
   4058 	}
   4059 
   4060 out:
   4061 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4062 	return;
   4063 
   4064 watchdog:
   4065 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   4066 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   4067 	adapter->watchdog_events.ev_count++;
   4068 	ixgbe_init_locked(adapter);
   4069 } /* ixgbe_local_timer */
   4070 
   4071 /************************************************************************
   4072  * ixgbe_sfp_probe
   4073  *
   4074  *   Determine if a port had optics inserted.
   4075  ************************************************************************/
   4076 static bool
   4077 ixgbe_sfp_probe(struct adapter *adapter)
   4078 {
   4079 	struct ixgbe_hw	*hw = &adapter->hw;
   4080 	device_t	dev = adapter->dev;
   4081 	bool		result = FALSE;
   4082 
   4083 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4084 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4085 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4086 		if (ret)
   4087 			goto out;
   4088 		ret = hw->phy.ops.reset(hw);
   4089 		adapter->sfp_probe = FALSE;
   4090 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4091 			device_printf(dev,"Unsupported SFP+ module detected!");
   4092 			device_printf(dev,
   4093 			    "Reload driver with supported module.\n");
   4094                         goto out;
   4095 		} else
   4096 			device_printf(dev, "SFP+ module detected!\n");
   4097 		/* We now have supported optics */
   4098 		result = TRUE;
   4099 	}
   4100 out:
   4101 
   4102 	return (result);
   4103 } /* ixgbe_sfp_probe */
   4104 
   4105 /************************************************************************
   4106  * ixgbe_handle_mod - Tasklet for SFP module interrupts
   4107  ************************************************************************/
   4108 static void
   4109 ixgbe_handle_mod(void *context)
   4110 {
   4111 	struct adapter  *adapter = context;
   4112 	struct ixgbe_hw *hw = &adapter->hw;
   4113 	device_t	dev = adapter->dev;
   4114 	u32             err, cage_full = 0;
   4115 
   4116 	if (adapter->hw.need_crosstalk_fix) {
   4117 		switch (hw->mac.type) {
   4118 		case ixgbe_mac_82599EB:
   4119 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4120 			    IXGBE_ESDP_SDP2;
   4121 			break;
   4122 		case ixgbe_mac_X550EM_x:
   4123 		case ixgbe_mac_X550EM_a:
   4124 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4125 			    IXGBE_ESDP_SDP0;
   4126 			break;
   4127 		default:
   4128 			break;
   4129 		}
   4130 
   4131 		if (!cage_full)
   4132 			return;
   4133 	}
   4134 
   4135 	err = hw->phy.ops.identify_sfp(hw);
   4136 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4137 		device_printf(dev,
   4138 		    "Unsupported SFP+ module type was detected.\n");
   4139 		return;
   4140 	}
   4141 
   4142 	err = hw->mac.ops.setup_sfp(hw);
   4143 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4144 		device_printf(dev,
   4145 		    "Setup failure - unsupported SFP+ module type.\n");
   4146 		return;
   4147 	}
   4148 	softint_schedule(adapter->msf_si);
   4149 } /* ixgbe_handle_mod */
   4150 
   4151 
   4152 /************************************************************************
   4153  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
   4154  ************************************************************************/
   4155 static void
   4156 ixgbe_handle_msf(void *context)
   4157 {
   4158 	struct adapter  *adapter = context;
   4159 	struct ixgbe_hw *hw = &adapter->hw;
   4160 	u32             autoneg;
   4161 	bool            negotiate;
   4162 
   4163 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
   4164 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
   4165 
   4166 	autoneg = hw->phy.autoneg_advertised;
   4167 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   4168 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   4169 	else
   4170 		negotiate = 0;
   4171 	if (hw->mac.ops.setup_link)
   4172 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
   4173 
   4174 	/* Adjust media types shown in ifconfig */
   4175 	ifmedia_removeall(&adapter->media);
   4176 	ixgbe_add_media_types(adapter);
   4177 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   4178 } /* ixgbe_handle_msf */
   4179 
   4180 /************************************************************************
   4181  * ixgbe_handle_phy - Tasklet for external PHY interrupts
   4182  ************************************************************************/
   4183 static void
   4184 ixgbe_handle_phy(void *context)
   4185 {
   4186 	struct adapter  *adapter = context;
   4187 	struct ixgbe_hw *hw = &adapter->hw;
   4188 	int error;
   4189 
   4190 	error = hw->phy.ops.handle_lasi(hw);
   4191 	if (error == IXGBE_ERR_OVERTEMP)
   4192 		device_printf(adapter->dev,
   4193 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
   4194 		    " PHY will downshift to lower power state!\n");
   4195 	else if (error)
   4196 		device_printf(adapter->dev,
   4197 		    "Error handling LASI interrupt: %d\n", error);
   4198 } /* ixgbe_handle_phy */
   4199 
   4200 static void
   4201 ixgbe_ifstop(struct ifnet *ifp, int disable)
   4202 {
   4203 	struct adapter *adapter = ifp->if_softc;
   4204 
   4205 	IXGBE_CORE_LOCK(adapter);
   4206 	ixgbe_stop(adapter);
   4207 	IXGBE_CORE_UNLOCK(adapter);
   4208 }
   4209 
   4210 /************************************************************************
   4211  * ixgbe_stop - Stop the hardware
   4212  *
   4213  *   Disables all traffic on the adapter by issuing a
   4214  *   global reset on the MAC and deallocates TX/RX buffers.
   4215  ************************************************************************/
   4216 static void
   4217 ixgbe_stop(void *arg)
   4218 {
   4219 	struct ifnet    *ifp;
   4220 	struct adapter  *adapter = arg;
   4221 	struct ixgbe_hw *hw = &adapter->hw;
   4222 
   4223 	ifp = adapter->ifp;
   4224 
   4225 	KASSERT(mutex_owned(&adapter->core_mtx));
   4226 
   4227 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   4228 	ixgbe_disable_intr(adapter);
   4229 	callout_stop(&adapter->timer);
   4230 
   4231 	/* Let the stack know...*/
   4232 	ifp->if_flags &= ~IFF_RUNNING;
   4233 
   4234 	ixgbe_reset_hw(hw);
   4235 	hw->adapter_stopped = FALSE;
   4236 	ixgbe_stop_adapter(hw);
   4237 	if (hw->mac.type == ixgbe_mac_82599EB)
   4238 		ixgbe_stop_mac_link_on_d3_82599(hw);
   4239 	/* Turn off the laser - noop with no optics */
   4240 	ixgbe_disable_tx_laser(hw);
   4241 
   4242 	/* Update the stack */
   4243 	adapter->link_up = FALSE;
   4244 	ixgbe_update_link_status(adapter);
   4245 
   4246 	/* reprogram the RAR[0] in case user changed it. */
   4247 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   4248 
   4249 	return;
   4250 } /* ixgbe_stop */
   4251 
   4252 /************************************************************************
   4253  * ixgbe_update_link_status - Update OS on link state
   4254  *
   4255  * Note: Only updates the OS on the cached link state.
   4256  *       The real check of the hardware only happens with
   4257  *       a link interrupt.
   4258  ************************************************************************/
   4259 static void
   4260 ixgbe_update_link_status(struct adapter *adapter)
   4261 {
   4262 	struct ifnet	*ifp = adapter->ifp;
   4263 	device_t        dev = adapter->dev;
   4264 	struct ixgbe_hw *hw = &adapter->hw;
   4265 
   4266 	if (adapter->link_up) {
   4267 		if (adapter->link_active == FALSE) {
   4268 			if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
   4269 				/*
   4270 				 *  Discard count for both MAC Local Fault and
   4271 				 * Remote Fault because those registers are
   4272 				 * valid only when the link speed is up and
   4273 				 * 10Gbps.
   4274 				 */
   4275 				IXGBE_READ_REG(hw, IXGBE_MLFC);
   4276 				IXGBE_READ_REG(hw, IXGBE_MRFC);
   4277 			}
   4278 
   4279 			if (bootverbose) {
   4280 				const char *bpsmsg;
   4281 
   4282 				switch (adapter->link_speed) {
   4283 				case IXGBE_LINK_SPEED_10GB_FULL:
   4284 					bpsmsg = "10 Gbps";
   4285 					break;
   4286 				case IXGBE_LINK_SPEED_5GB_FULL:
   4287 					bpsmsg = "5 Gbps";
   4288 					break;
   4289 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   4290 					bpsmsg = "2.5 Gbps";
   4291 					break;
   4292 				case IXGBE_LINK_SPEED_1GB_FULL:
   4293 					bpsmsg = "1 Gbps";
   4294 					break;
   4295 				case IXGBE_LINK_SPEED_100_FULL:
   4296 					bpsmsg = "100 Mbps";
   4297 					break;
   4298 				case IXGBE_LINK_SPEED_10_FULL:
   4299 					bpsmsg = "10 Mbps";
   4300 					break;
   4301 				default:
   4302 					bpsmsg = "unknown speed";
   4303 					break;
   4304 				}
   4305 				device_printf(dev, "Link is up %s %s \n",
   4306 				    bpsmsg, "Full Duplex");
   4307 			}
   4308 			adapter->link_active = TRUE;
   4309 			/* Update any Flow Control changes */
   4310 			ixgbe_fc_enable(&adapter->hw);
   4311 			/* Update DMA coalescing config */
   4312 			ixgbe_config_dmac(adapter);
   4313 			if_link_state_change(ifp, LINK_STATE_UP);
   4314 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4315 				ixgbe_ping_all_vfs(adapter);
   4316 		}
   4317 	} else { /* Link down */
   4318 		if (adapter->link_active == TRUE) {
   4319 			if (bootverbose)
   4320 				device_printf(dev, "Link is Down\n");
   4321 			if_link_state_change(ifp, LINK_STATE_DOWN);
   4322 			adapter->link_active = FALSE;
   4323 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4324 				ixgbe_ping_all_vfs(adapter);
   4325 		}
   4326 	}
   4327 
   4328 	return;
   4329 } /* ixgbe_update_link_status */
   4330 
   4331 /************************************************************************
   4332  * ixgbe_config_dmac - Configure DMA Coalescing
   4333  ************************************************************************/
   4334 static void
   4335 ixgbe_config_dmac(struct adapter *adapter)
   4336 {
   4337 	struct ixgbe_hw *hw = &adapter->hw;
   4338 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
   4339 
   4340 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
   4341 		return;
   4342 
   4343 	if (dcfg->watchdog_timer ^ adapter->dmac ||
   4344 	    dcfg->link_speed ^ adapter->link_speed) {
   4345 		dcfg->watchdog_timer = adapter->dmac;
   4346 		dcfg->fcoe_en = false;
   4347 		dcfg->link_speed = adapter->link_speed;
   4348 		dcfg->num_tcs = 1;
   4349 
   4350 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
   4351 		    dcfg->watchdog_timer, dcfg->link_speed);
   4352 
   4353 		hw->mac.ops.dmac_config(hw);
   4354 	}
   4355 } /* ixgbe_config_dmac */
   4356 
   4357 /************************************************************************
   4358  * ixgbe_enable_intr
   4359  ************************************************************************/
   4360 static void
   4361 ixgbe_enable_intr(struct adapter *adapter)
   4362 {
   4363 	struct ixgbe_hw	*hw = &adapter->hw;
   4364 	struct ix_queue	*que = adapter->queues;
   4365 	u32		mask, fwsm;
   4366 
   4367 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4368 
   4369 	switch (adapter->hw.mac.type) {
   4370 	case ixgbe_mac_82599EB:
   4371 		mask |= IXGBE_EIMS_ECC;
   4372 		/* Temperature sensor on some adapters */
   4373 		mask |= IXGBE_EIMS_GPI_SDP0;
   4374 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
   4375 		mask |= IXGBE_EIMS_GPI_SDP1;
   4376 		mask |= IXGBE_EIMS_GPI_SDP2;
   4377 		break;
   4378 	case ixgbe_mac_X540:
   4379 		/* Detect if Thermal Sensor is enabled */
   4380 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
   4381 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
   4382 			mask |= IXGBE_EIMS_TS;
   4383 		mask |= IXGBE_EIMS_ECC;
   4384 		break;
   4385 	case ixgbe_mac_X550:
   4386 		/* MAC thermal sensor is automatically enabled */
   4387 		mask |= IXGBE_EIMS_TS;
   4388 		mask |= IXGBE_EIMS_ECC;
   4389 		break;
   4390 	case ixgbe_mac_X550EM_x:
   4391 	case ixgbe_mac_X550EM_a:
   4392 		/* Some devices use SDP0 for important information */
   4393 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
   4394 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
   4395 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
   4396 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
   4397 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
   4398 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
   4399 			mask |= IXGBE_EICR_GPI_SDP0_X540;
   4400 		mask |= IXGBE_EIMS_ECC;
   4401 		break;
   4402 	default:
   4403 		break;
   4404 	}
   4405 
   4406 	/* Enable Fan Failure detection */
   4407 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4408 		mask |= IXGBE_EIMS_GPI_SDP1;
   4409 	/* Enable SR-IOV */
   4410 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4411 		mask |= IXGBE_EIMS_MAILBOX;
   4412 	/* Enable Flow Director */
   4413 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   4414 		mask |= IXGBE_EIMS_FLOW_DIR;
   4415 
   4416 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4417 
   4418 	/* With MSI-X we use auto clear */
   4419 	if (adapter->msix_mem) {
   4420 		mask = IXGBE_EIMS_ENABLE_MASK;
   4421 		/* Don't autoclear Link */
   4422 		mask &= ~IXGBE_EIMS_OTHER;
   4423 		mask &= ~IXGBE_EIMS_LSC;
   4424 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   4425 			mask &= ~IXGBE_EIMS_MAILBOX;
   4426 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4427 	}
   4428 
   4429 	/*
   4430 	 * Now enable all queues, this is done separately to
   4431 	 * allow for handling the extended (beyond 32) MSI-X
   4432 	 * vectors that can be used by 82599
   4433 	 */
   4434         for (int i = 0; i < adapter->num_queues; i++, que++)
   4435                 ixgbe_enable_queue(adapter, que->msix);
   4436 
   4437 	IXGBE_WRITE_FLUSH(hw);
   4438 
   4439 	return;
   4440 } /* ixgbe_enable_intr */
   4441 
   4442 /************************************************************************
   4443  * ixgbe_disable_intr
   4444  ************************************************************************/
   4445 static void
   4446 ixgbe_disable_intr(struct adapter *adapter)
   4447 {
   4448 	if (adapter->msix_mem)
   4449 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4450 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
   4451 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
   4452 	} else {
   4453 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
   4454 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
   4455 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
   4456 	}
   4457 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4458 
   4459 	return;
   4460 } /* ixgbe_disable_intr */
   4461 
   4462 /************************************************************************
   4463  * ixgbe_legacy_irq - Legacy Interrupt Service routine
   4464  ************************************************************************/
   4465 static int
   4466 ixgbe_legacy_irq(void *arg)
   4467 {
   4468 	struct ix_queue *que = arg;
   4469 	struct adapter	*adapter = que->adapter;
   4470 	struct ixgbe_hw	*hw = &adapter->hw;
   4471 	struct ifnet    *ifp = adapter->ifp;
   4472 	struct 		tx_ring *txr = adapter->tx_rings;
   4473 	bool		more = false;
   4474 	u32             eicr, eicr_mask;
   4475 
   4476 	/* Silicon errata #26 on 82598 */
   4477 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
   4478 
   4479 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   4480 
   4481 	adapter->stats.pf.legint.ev_count++;
   4482 	++que->irqs.ev_count;
   4483 	if (eicr == 0) {
   4484 		adapter->stats.pf.intzero.ev_count++;
   4485 		if ((ifp->if_flags & IFF_UP) != 0)
   4486 			ixgbe_enable_intr(adapter);
   4487 		return 0;
   4488 	}
   4489 
   4490 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   4491 #ifdef __NetBSD__
   4492 		/* Don't run ixgbe_rxeof in interrupt context */
   4493 		more = true;
   4494 #else
   4495 		more = ixgbe_rxeof(que);
   4496 #endif
   4497 
   4498 		IXGBE_TX_LOCK(txr);
   4499 		ixgbe_txeof(txr);
   4500 #ifdef notyet
   4501 		if (!ixgbe_ring_empty(ifp, txr->br))
   4502 			ixgbe_start_locked(ifp, txr);
   4503 #endif
   4504 		IXGBE_TX_UNLOCK(txr);
   4505 	}
   4506 
   4507 	/* Check for fan failure */
   4508 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   4509 		ixgbe_check_fan_failure(adapter, eicr, true);
   4510 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4511 	}
   4512 
   4513 	/* Link status change */
   4514 	if (eicr & IXGBE_EICR_LSC)
   4515 		softint_schedule(adapter->link_si);
   4516 
   4517 	if (ixgbe_is_sfp(hw)) {
   4518 		/* Pluggable optics-related interrupt */
   4519 		if (hw->mac.type >= ixgbe_mac_X540)
   4520 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   4521 		else
   4522 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   4523 
   4524 		if (eicr & eicr_mask) {
   4525 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   4526 			softint_schedule(adapter->mod_si);
   4527 		}
   4528 
   4529 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   4530 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   4531 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   4532 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4533 			softint_schedule(adapter->msf_si);
   4534 		}
   4535 	}
   4536 
   4537 	/* External PHY interrupt */
   4538 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   4539 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
   4540 		softint_schedule(adapter->phy_si);
   4541 
   4542 	if (more)
   4543 		softint_schedule(que->que_si);
   4544 	else
   4545 		ixgbe_enable_intr(adapter);
   4546 
   4547 	return 1;
   4548 } /* ixgbe_legacy_irq */
   4549 
   4550 /************************************************************************
   4551  * ixgbe_free_pci_resources
   4552  ************************************************************************/
   4553 static void
   4554 ixgbe_free_pci_resources(struct adapter *adapter)
   4555 {
   4556 	struct ix_queue *que = adapter->queues;
   4557 	int		rid;
   4558 
   4559 	/*
   4560 	 * Release all msix queue resources:
   4561 	 */
   4562 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4563 		if (que->res != NULL)
   4564 			pci_intr_disestablish(adapter->osdep.pc,
   4565 			    adapter->osdep.ihs[i]);
   4566 	}
   4567 
   4568 	/* Clean the Legacy or Link interrupt last */
   4569 	if (adapter->vector) /* we are doing MSIX */
   4570 		rid = adapter->vector;
   4571 	else
   4572 		rid = 0;
   4573 
   4574 	if (adapter->osdep.ihs[rid] != NULL) {
   4575 		pci_intr_disestablish(adapter->osdep.pc,
   4576 		    adapter->osdep.ihs[rid]);
   4577 		adapter->osdep.ihs[rid] = NULL;
   4578 	}
   4579 
   4580 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   4581 	    adapter->osdep.nintrs);
   4582 
   4583 	if (adapter->osdep.mem_size != 0) {
   4584 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   4585 		    adapter->osdep.mem_bus_space_handle,
   4586 		    adapter->osdep.mem_size);
   4587 	}
   4588 
   4589 	return;
   4590 } /* ixgbe_free_pci_resources */
   4591 
   4592 /************************************************************************
   4593  * ixgbe_set_sysctl_value
   4594  ************************************************************************/
   4595 static void
   4596 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
   4597     const char *description, int *limit, int value)
   4598 {
   4599 	device_t dev =  adapter->dev;
   4600 	struct sysctllog **log;
   4601 	const struct sysctlnode *rnode, *cnode;
   4602 
   4603 	log = &adapter->sysctllog;
   4604 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   4605 		aprint_error_dev(dev, "could not create sysctl root\n");
   4606 		return;
   4607 	}
   4608 	if (sysctl_createv(log, 0, &rnode, &cnode,
   4609 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   4610 	    name, SYSCTL_DESCR(description),
   4611 		NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   4612 		aprint_error_dev(dev, "could not create sysctl\n");
   4613 	*limit = value;
   4614 } /* ixgbe_set_sysctl_value */
   4615 
   4616 /************************************************************************
   4617  * ixgbe_sysctl_flowcntl
   4618  *
   4619  *   SYSCTL wrapper around setting Flow Control
   4620  ************************************************************************/
   4621 static int
   4622 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
   4623 {
   4624 	struct sysctlnode node = *rnode;
   4625 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4626 	int error, fc;
   4627 
   4628 	fc = adapter->hw.fc.current_mode;
   4629 	node.sysctl_data = &fc;
   4630 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4631 	if (error != 0 || newp == NULL)
   4632 		return error;
   4633 
   4634 	/* Don't bother if it's not changed */
   4635 	if (fc == adapter->hw.fc.current_mode)
   4636 		return (0);
   4637 
   4638 	return ixgbe_set_flowcntl(adapter, fc);
   4639 } /* ixgbe_sysctl_flowcntl */
   4640 
   4641 /************************************************************************
   4642  * ixgbe_set_flowcntl - Set flow control
   4643  *
   4644  *   Flow control values:
   4645  *     0 - off
   4646  *     1 - rx pause
   4647  *     2 - tx pause
   4648  *     3 - full
   4649  ************************************************************************/
   4650 static int
   4651 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
   4652 {
   4653 	switch (fc) {
   4654 		case ixgbe_fc_rx_pause:
   4655 		case ixgbe_fc_tx_pause:
   4656 		case ixgbe_fc_full:
   4657 			adapter->hw.fc.requested_mode = fc;
   4658 			if (adapter->num_queues > 1)
   4659 				ixgbe_disable_rx_drop(adapter);
   4660 			break;
   4661 		case ixgbe_fc_none:
   4662 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   4663 			if (adapter->num_queues > 1)
   4664 				ixgbe_enable_rx_drop(adapter);
   4665 			break;
   4666 		default:
   4667 			return (EINVAL);
   4668 	}
   4669 
   4670 #if 0 /* XXX NetBSD */
   4671 	/* Don't autoneg if forcing a value */
   4672 	adapter->hw.fc.disable_fc_autoneg = TRUE;
   4673 #endif
   4674 	ixgbe_fc_enable(&adapter->hw);
   4675 
   4676 	return (0);
   4677 } /* ixgbe_set_flowcntl */
   4678 
   4679 /************************************************************************
   4680  * ixgbe_enable_rx_drop
   4681  *
   4682  *   Enable the hardware to drop packets when the buffer is
   4683  *   full. This is useful with multiqueue, so that no single
   4684  *   queue being full stalls the entire RX engine. We only
   4685  *   enable this when Multiqueue is enabled AND Flow Control
   4686  *   is disabled.
   4687  ************************************************************************/
   4688 static void
   4689 ixgbe_enable_rx_drop(struct adapter *adapter)
   4690 {
   4691 	struct ixgbe_hw *hw = &adapter->hw;
   4692 	struct rx_ring  *rxr;
   4693 	u32             srrctl;
   4694 
   4695 	for (int i = 0; i < adapter->num_queues; i++) {
   4696 		rxr = &adapter->rx_rings[i];
   4697 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4698 		srrctl |= IXGBE_SRRCTL_DROP_EN;
   4699 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4700 	}
   4701 
   4702 	/* enable drop for each vf */
   4703 	for (int i = 0; i < adapter->num_vfs; i++) {
   4704 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4705 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
   4706 		    IXGBE_QDE_ENABLE));
   4707 	}
   4708 } /* ixgbe_enable_rx_drop */
   4709 
   4710 /************************************************************************
   4711  * ixgbe_disable_rx_drop
   4712  ************************************************************************/
   4713 static void
   4714 ixgbe_disable_rx_drop(struct adapter *adapter)
   4715 {
   4716 	struct ixgbe_hw *hw = &adapter->hw;
   4717 	struct rx_ring  *rxr;
   4718 	u32             srrctl;
   4719 
   4720 	for (int i = 0; i < adapter->num_queues; i++) {
   4721 		rxr = &adapter->rx_rings[i];
   4722         	srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4723         	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
   4724         	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4725 	}
   4726 
   4727 	/* disable drop for each vf */
   4728 	for (int i = 0; i < adapter->num_vfs; i++) {
   4729 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4730 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
   4731 	}
   4732 } /* ixgbe_disable_rx_drop */
   4733 
   4734 /************************************************************************
   4735  * ixgbe_sysctl_advertise
   4736  *
   4737  *   SYSCTL wrapper around setting advertised speed
   4738  ************************************************************************/
   4739 static int
   4740 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
   4741 {
   4742 	struct sysctlnode node = *rnode;
   4743 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4744 	int            error = 0, advertise;
   4745 
   4746 	advertise = adapter->advertise;
   4747 	node.sysctl_data = &advertise;
   4748 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4749 	if (error != 0 || newp == NULL)
   4750 		return error;
   4751 
   4752 	return ixgbe_set_advertise(adapter, advertise);
   4753 } /* ixgbe_sysctl_advertise */
   4754 
   4755 /************************************************************************
   4756  * ixgbe_set_advertise - Control advertised link speed
   4757  *
   4758  *   Flags:
   4759  *     0x00 - Default (all capable link speed)
   4760  *     0x01 - advertise 100 Mb
   4761  *     0x02 - advertise 1G
   4762  *     0x04 - advertise 10G
   4763  *     0x08 - advertise 10 Mb
   4764  *     0x10 - advertise 2.5G
   4765  *     0x20 - advertise 5G
   4766  ************************************************************************/
   4767 static int
   4768 ixgbe_set_advertise(struct adapter *adapter, int advertise)
   4769 {
   4770 	device_t         dev;
   4771 	struct ixgbe_hw  *hw;
   4772 	ixgbe_link_speed speed = 0;
   4773 	ixgbe_link_speed link_caps = 0;
   4774 	s32              err = IXGBE_NOT_IMPLEMENTED;
   4775 	bool             negotiate = FALSE;
   4776 
   4777 	/* Checks to validate new value */
   4778 	if (adapter->advertise == advertise) /* no change */
   4779 		return (0);
   4780 
   4781 	dev = adapter->dev;
   4782 	hw = &adapter->hw;
   4783 
   4784 	/* No speed changes for backplane media */
   4785 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   4786 		return (ENODEV);
   4787 
   4788 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   4789 	    (hw->phy.multispeed_fiber))) {
   4790 		device_printf(dev,
   4791 		    "Advertised speed can only be set on copper or "
   4792 		    "multispeed fiber media types.\n");
   4793 		return (EINVAL);
   4794 	}
   4795 
   4796 	if (advertise < 0x0 || advertise > 0x2f) {
   4797 		device_printf(dev,
   4798 		    "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
   4799 		return (EINVAL);
   4800 	}
   4801 
   4802 	if (hw->mac.ops.get_link_capabilities) {
   4803 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   4804 		    &negotiate);
   4805 		if (err != IXGBE_SUCCESS) {
   4806 			device_printf(dev, "Unable to determine supported advertise speeds\n");
   4807 			return (ENODEV);
   4808 		}
   4809 	}
   4810 
   4811 	/* Set new value and report new advertised mode */
   4812 	if (advertise & 0x1) {
   4813 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
   4814 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
   4815 			return (EINVAL);
   4816 		}
   4817 		speed |= IXGBE_LINK_SPEED_100_FULL;
   4818 	}
   4819 	if (advertise & 0x2) {
   4820 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
   4821 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
   4822 			return (EINVAL);
   4823 		}
   4824 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   4825 	}
   4826 	if (advertise & 0x4) {
   4827 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
   4828 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
   4829 			return (EINVAL);
   4830 		}
   4831 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   4832 	}
   4833 	if (advertise & 0x8) {
   4834 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
   4835 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
   4836 			return (EINVAL);
   4837 		}
   4838 		speed |= IXGBE_LINK_SPEED_10_FULL;
   4839 	}
   4840 	if (advertise & 0x10) {
   4841 		if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
   4842 			device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
   4843 			return (EINVAL);
   4844 		}
   4845 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   4846 	}
   4847 	if (advertise & 0x20) {
   4848 		if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
   4849 			device_printf(dev, "Interface does not support 5Gb advertised speed\n");
   4850 			return (EINVAL);
   4851 		}
   4852 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   4853 	}
   4854 	if (advertise == 0)
   4855 		speed = link_caps; /* All capable link speed */
   4856 
   4857 	hw->mac.autotry_restart = TRUE;
   4858 	hw->mac.ops.setup_link(hw, speed, TRUE);
   4859 	adapter->advertise = advertise;
   4860 
   4861 	return (0);
   4862 } /* ixgbe_set_advertise */
   4863 
   4864 /************************************************************************
   4865  * ixgbe_get_advertise - Get current advertised speed settings
   4866  *
   4867  *   Formatted for sysctl usage.
   4868  *   Flags:
   4869  *     0x01 - advertise 100 Mb
   4870  *     0x02 - advertise 1G
   4871  *     0x04 - advertise 10G
   4872  *     0x08 - advertise 10 Mb (yes, Mb)
   4873  *     0x10 - advertise 2.5G
   4874  *     0x20 - advertise 5G
   4875  ************************************************************************/
   4876 static int
   4877 ixgbe_get_advertise(struct adapter *adapter)
   4878 {
   4879 	struct ixgbe_hw  *hw = &adapter->hw;
   4880 	int              speed;
   4881 	ixgbe_link_speed link_caps = 0;
   4882 	s32              err;
   4883 	bool             negotiate = FALSE;
   4884 
   4885 	/*
   4886 	 * Advertised speed means nothing unless it's copper or
   4887 	 * multi-speed fiber
   4888 	 */
   4889 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
   4890 	    !(hw->phy.multispeed_fiber))
   4891 		return (0);
   4892 
   4893 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
   4894 	if (err != IXGBE_SUCCESS)
   4895 		return (0);
   4896 
   4897 	speed =
   4898 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x04 : 0) |
   4899 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x02 : 0) |
   4900 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)   ? 0x01 : 0) |
   4901 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)    ? 0x08 : 0) |
   4902 	    ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
   4903 	    ((link_caps & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0);
   4904 
   4905 	return speed;
   4906 } /* ixgbe_get_advertise */
   4907 
   4908 /************************************************************************
   4909  * ixgbe_sysctl_dmac - Manage DMA Coalescing
   4910  *
   4911  *   Control values:
   4912  *     0/1 - off / on (use default value of 1000)
   4913  *
   4914  *     Legal timer values are:
   4915  *     50,100,250,500,1000,2000,5000,10000
   4916  *
   4917  *     Turning off interrupt moderation will also turn this off.
   4918  ************************************************************************/
   4919 static int
   4920 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
   4921 {
   4922 	struct sysctlnode node = *rnode;
   4923 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4924 	struct ifnet   *ifp = adapter->ifp;
   4925 	int            error;
   4926 	int            newval;
   4927 
   4928 	newval = adapter->dmac;
   4929 	node.sysctl_data = &newval;
   4930 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4931 	if ((error) || (newp == NULL))
   4932 		return (error);
   4933 
   4934 	switch (newval) {
   4935 	case 0:
   4936 		/* Disabled */
   4937 		adapter->dmac = 0;
   4938 		break;
   4939 	case 1:
   4940 		/* Enable and use default */
   4941 		adapter->dmac = 1000;
   4942 		break;
   4943 	case 50:
   4944 	case 100:
   4945 	case 250:
   4946 	case 500:
   4947 	case 1000:
   4948 	case 2000:
   4949 	case 5000:
   4950 	case 10000:
   4951 		/* Legal values - allow */
   4952 		adapter->dmac = newval;
   4953 		break;
   4954 	default:
   4955 		/* Do nothing, illegal value */
   4956 		return (EINVAL);
   4957 	}
   4958 
   4959 	/* Re-initialize hardware if it's already running */
   4960 	if (ifp->if_flags & IFF_RUNNING)
   4961 		ixgbe_init(ifp);
   4962 
   4963 	return (0);
   4964 }
   4965 
   4966 #ifdef IXGBE_DEBUG
   4967 /************************************************************************
   4968  * ixgbe_sysctl_power_state
   4969  *
   4970  *   Sysctl to test power states
   4971  *   Values:
   4972  *     0      - set device to D0
   4973  *     3      - set device to D3
   4974  *     (none) - get current device power state
   4975  ************************************************************************/
   4976 static int
   4977 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
   4978 {
   4979 #ifdef notyet
   4980 	struct sysctlnode node = *rnode;
   4981 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4982 	device_t       dev =  adapter->dev;
   4983 	int            curr_ps, new_ps, error = 0;
   4984 
   4985 	curr_ps = new_ps = pci_get_powerstate(dev);
   4986 
   4987 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4988 	if ((error) || (req->newp == NULL))
   4989 		return (error);
   4990 
   4991 	if (new_ps == curr_ps)
   4992 		return (0);
   4993 
   4994 	if (new_ps == 3 && curr_ps == 0)
   4995 		error = DEVICE_SUSPEND(dev);
   4996 	else if (new_ps == 0 && curr_ps == 3)
   4997 		error = DEVICE_RESUME(dev);
   4998 	else
   4999 		return (EINVAL);
   5000 
   5001 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
   5002 
   5003 	return (error);
   5004 #else
   5005 	return 0;
   5006 #endif
   5007 } /* ixgbe_sysctl_power_state */
   5008 #endif
   5009 
   5010 /************************************************************************
   5011  * ixgbe_sysctl_wol_enable
   5012  *
   5013  *   Sysctl to enable/disable the WoL capability,
   5014  *   if supported by the adapter.
   5015  *
   5016  *   Values:
   5017  *     0 - disabled
   5018  *     1 - enabled
   5019  ************************************************************************/
   5020 static int
   5021 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
   5022 {
   5023 	struct sysctlnode node = *rnode;
   5024 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5025 	struct ixgbe_hw *hw = &adapter->hw;
   5026 	bool            new_wol_enabled;
   5027 	int             error = 0;
   5028 
   5029 	new_wol_enabled = hw->wol_enabled;
   5030 	node.sysctl_data = &new_wol_enabled;
   5031 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5032 	if ((error) || (newp == NULL))
   5033 		return (error);
   5034 	if (new_wol_enabled == hw->wol_enabled)
   5035 		return (0);
   5036 
   5037 	if (new_wol_enabled && !adapter->wol_support)
   5038 		return (ENODEV);
   5039 	else
   5040 		hw->wol_enabled = new_wol_enabled;
   5041 
   5042 	return (0);
   5043 } /* ixgbe_sysctl_wol_enable */
   5044 
   5045 /************************************************************************
   5046  * ixgbe_sysctl_wufc - Wake Up Filter Control
   5047  *
   5048  *   Sysctl to enable/disable the types of packets that the
   5049  *   adapter will wake up on upon receipt.
   5050  *   Flags:
   5051  *     0x1  - Link Status Change
   5052  *     0x2  - Magic Packet
   5053  *     0x4  - Direct Exact
   5054  *     0x8  - Directed Multicast
   5055  *     0x10 - Broadcast
   5056  *     0x20 - ARP/IPv4 Request Packet
   5057  *     0x40 - Direct IPv4 Packet
   5058  *     0x80 - Direct IPv6 Packet
   5059  *
   5060  *   Settings not listed above will cause the sysctl to return an error.
   5061  ************************************************************************/
   5062 static int
   5063 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
   5064 {
   5065 	struct sysctlnode node = *rnode;
   5066 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5067 	int error = 0;
   5068 	u32 new_wufc;
   5069 
   5070 	new_wufc = adapter->wufc;
   5071 	node.sysctl_data = &new_wufc;
   5072 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5073 	if ((error) || (newp == NULL))
   5074 		return (error);
   5075 	if (new_wufc == adapter->wufc)
   5076 		return (0);
   5077 
   5078 	if (new_wufc & 0xffffff00)
   5079 		return (EINVAL);
   5080 
   5081 	new_wufc &= 0xff;
   5082 	new_wufc |= (0xffffff & adapter->wufc);
   5083 	adapter->wufc = new_wufc;
   5084 
   5085 	return (0);
   5086 } /* ixgbe_sysctl_wufc */
   5087 
   5088 #ifdef IXGBE_DEBUG
   5089 /************************************************************************
   5090  * ixgbe_sysctl_print_rss_config
   5091  ************************************************************************/
   5092 static int
   5093 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
   5094 {
   5095 #ifdef notyet
   5096 	struct sysctlnode node = *rnode;
   5097 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5098 	struct ixgbe_hw *hw = &adapter->hw;
   5099 	device_t        dev = adapter->dev;
   5100 	struct sbuf     *buf;
   5101 	int             error = 0, reta_size;
   5102 	u32             reg;
   5103 
   5104 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
   5105 	if (!buf) {
   5106 		device_printf(dev, "Could not allocate sbuf for output.\n");
   5107 		return (ENOMEM);
   5108 	}
   5109 
   5110 	// TODO: use sbufs to make a string to print out
   5111 	/* Set multiplier for RETA setup and table size based on MAC */
   5112 	switch (adapter->hw.mac.type) {
   5113 	case ixgbe_mac_X550:
   5114 	case ixgbe_mac_X550EM_x:
   5115 	case ixgbe_mac_X550EM_a:
   5116 		reta_size = 128;
   5117 		break;
   5118 	default:
   5119 		reta_size = 32;
   5120 		break;
   5121 	}
   5122 
   5123 	/* Print out the redirection table */
   5124 	sbuf_cat(buf, "\n");
   5125 	for (int i = 0; i < reta_size; i++) {
   5126 		if (i < 32) {
   5127 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
   5128 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
   5129 		} else {
   5130 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
   5131 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
   5132 		}
   5133 	}
   5134 
   5135 	// TODO: print more config
   5136 
   5137 	error = sbuf_finish(buf);
   5138 	if (error)
   5139 		device_printf(dev, "Error finishing sbuf: %d\n", error);
   5140 
   5141 	sbuf_delete(buf);
   5142 #endif
   5143 	return (0);
   5144 } /* ixgbe_sysctl_print_rss_config */
   5145 #endif /* IXGBE_DEBUG */
   5146 
   5147 /************************************************************************
   5148  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
   5149  *
   5150  *   For X552/X557-AT devices using an external PHY
   5151  ************************************************************************/
   5152 static int
   5153 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
   5154 {
   5155 	struct sysctlnode node = *rnode;
   5156 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5157 	struct ixgbe_hw *hw = &adapter->hw;
   5158 	int val;
   5159 	u16 reg;
   5160 	int		error;
   5161 
   5162 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5163 		device_printf(adapter->dev,
   5164 		    "Device has no supported external thermal sensor.\n");
   5165 		return (ENODEV);
   5166 	}
   5167 
   5168 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
   5169 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5170 		device_printf(adapter->dev,
   5171 		    "Error reading from PHY's current temperature register\n");
   5172 		return (EAGAIN);
   5173 	}
   5174 
   5175 	node.sysctl_data = &val;
   5176 
   5177 	/* Shift temp for output */
   5178 	val = reg >> 8;
   5179 
   5180 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5181 	if ((error) || (newp == NULL))
   5182 		return (error);
   5183 
   5184 	return (0);
   5185 } /* ixgbe_sysctl_phy_temp */
   5186 
   5187 /************************************************************************
   5188  * ixgbe_sysctl_phy_overtemp_occurred
   5189  *
   5190  *   Reports (directly from the PHY) whether the current PHY
   5191  *   temperature is over the overtemp threshold.
   5192  ************************************************************************/
   5193 static int
   5194 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
   5195 {
   5196 	struct sysctlnode node = *rnode;
   5197 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5198 	struct ixgbe_hw *hw = &adapter->hw;
   5199 	int val, error;
   5200 	u16 reg;
   5201 
   5202 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5203 		device_printf(adapter->dev,
   5204 		    "Device has no supported external thermal sensor.\n");
   5205 		return (ENODEV);
   5206 	}
   5207 
   5208 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
   5209 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5210 		device_printf(adapter->dev,
   5211 		    "Error reading from PHY's temperature status register\n");
   5212 		return (EAGAIN);
   5213 	}
   5214 
   5215 	node.sysctl_data = &val;
   5216 
   5217 	/* Get occurrence bit */
   5218 	val = !!(reg & 0x4000);
   5219 
   5220 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5221 	if ((error) || (newp == NULL))
   5222 		return (error);
   5223 
   5224 	return (0);
   5225 } /* ixgbe_sysctl_phy_overtemp_occurred */
   5226 
   5227 /************************************************************************
   5228  * ixgbe_sysctl_eee_state
   5229  *
   5230  *   Sysctl to set EEE power saving feature
   5231  *   Values:
   5232  *     0      - disable EEE
   5233  *     1      - enable EEE
   5234  *     (none) - get current device EEE state
   5235  ************************************************************************/
   5236 static int
   5237 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
   5238 {
   5239 	struct sysctlnode node = *rnode;
   5240 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5241 	struct ifnet   *ifp = adapter->ifp;
   5242 	device_t       dev = adapter->dev;
   5243 	int            curr_eee, new_eee, error = 0;
   5244 	s32            retval;
   5245 
   5246 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
   5247 	node.sysctl_data = &new_eee;
   5248 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5249 	if ((error) || (newp == NULL))
   5250 		return (error);
   5251 
   5252 	/* Nothing to do */
   5253 	if (new_eee == curr_eee)
   5254 		return (0);
   5255 
   5256 	/* Not supported */
   5257 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
   5258 		return (EINVAL);
   5259 
   5260 	/* Bounds checking */
   5261 	if ((new_eee < 0) || (new_eee > 1))
   5262 		return (EINVAL);
   5263 
   5264 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
   5265 	if (retval) {
   5266 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
   5267 		return (EINVAL);
   5268 	}
   5269 
   5270 	/* Restart auto-neg */
   5271 	ixgbe_init(ifp);
   5272 
   5273 	device_printf(dev, "New EEE state: %d\n", new_eee);
   5274 
   5275 	/* Cache new value */
   5276 	if (new_eee)
   5277 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5278 	else
   5279 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
   5280 
   5281 	return (error);
   5282 } /* ixgbe_sysctl_eee_state */
   5283 
   5284 /************************************************************************
   5285  * ixgbe_init_device_features
   5286  ************************************************************************/
   5287 static void
   5288 ixgbe_init_device_features(struct adapter *adapter)
   5289 {
   5290 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   5291 	                  | IXGBE_FEATURE_RSS
   5292 	                  | IXGBE_FEATURE_MSI
   5293 	                  | IXGBE_FEATURE_MSIX
   5294 	                  | IXGBE_FEATURE_LEGACY_IRQ
   5295 	                  | IXGBE_FEATURE_LEGACY_TX;
   5296 
   5297 	/* Set capabilities first... */
   5298 	switch (adapter->hw.mac.type) {
   5299 	case ixgbe_mac_82598EB:
   5300 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
   5301 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
   5302 		break;
   5303 	case ixgbe_mac_X540:
   5304 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5305 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5306 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
   5307 		    (adapter->hw.bus.func == 0))
   5308 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5309 		break;
   5310 	case ixgbe_mac_X550:
   5311 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5312 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5313 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5314 		break;
   5315 	case ixgbe_mac_X550EM_x:
   5316 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5317 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5318 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
   5319 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5320 		break;
   5321 	case ixgbe_mac_X550EM_a:
   5322 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5323 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5324 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5325 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
   5326 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
   5327 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5328 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5329 		}
   5330 		break;
   5331 	case ixgbe_mac_82599EB:
   5332 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5333 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5334 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
   5335 		    (adapter->hw.bus.func == 0))
   5336 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5337 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
   5338 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5339 		break;
   5340 	default:
   5341 		break;
   5342 	}
   5343 
   5344 	/* Enabled by default... */
   5345 	/* Fan failure detection */
   5346 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
   5347 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
   5348 	/* Netmap */
   5349 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   5350 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   5351 	/* EEE */
   5352 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   5353 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5354 	/* Thermal Sensor */
   5355 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
   5356 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
   5357 
   5358 	/* Enabled via global sysctl... */
   5359 	/* Flow Director */
   5360 	if (ixgbe_enable_fdir) {
   5361 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
   5362 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
   5363 		else
   5364 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
   5365 	}
   5366 	/* Legacy (single queue) transmit */
   5367 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   5368 	    ixgbe_enable_legacy_tx)
   5369 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   5370 	/*
   5371 	 * Message Signal Interrupts - Extended (MSI-X)
   5372 	 * Normal MSI is only enabled if MSI-X calls fail.
   5373 	 */
   5374 	if (!ixgbe_enable_msix)
   5375 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
   5376 	/* Receive-Side Scaling (RSS) */
   5377 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
   5378 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   5379 
   5380 	/* Disable features with unmet dependencies... */
   5381 	/* No MSI-X */
   5382 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
   5383 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   5384 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   5385 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
   5386 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
   5387 	}
   5388 } /* ixgbe_init_device_features */
   5389 
   5390 /************************************************************************
   5391  * ixgbe_probe - Device identification routine
   5392  *
   5393  *   Determines if the driver should be loaded on
   5394  *   adapter based on its PCI vendor/device ID.
   5395  *
   5396  *   return BUS_PROBE_DEFAULT on success, positive on failure
   5397  ************************************************************************/
   5398 static int
   5399 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
   5400 {
   5401 	const struct pci_attach_args *pa = aux;
   5402 
   5403 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
   5404 }
   5405 
   5406 static ixgbe_vendor_info_t *
   5407 ixgbe_lookup(const struct pci_attach_args *pa)
   5408 {
   5409 	ixgbe_vendor_info_t *ent;
   5410 	pcireg_t subid;
   5411 
   5412 	INIT_DEBUGOUT("ixgbe_lookup: begin");
   5413 
   5414 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
   5415 		return NULL;
   5416 
   5417 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
   5418 
   5419 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
   5420 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
   5421 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
   5422 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
   5423 			(ent->subvendor_id == 0)) &&
   5424 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
   5425 			(ent->subdevice_id == 0))) {
   5426 			++ixgbe_total_ports;
   5427 			return ent;
   5428 		}
   5429 	}
   5430 	return NULL;
   5431 }
   5432 
   5433 static int
   5434 ixgbe_ifflags_cb(struct ethercom *ec)
   5435 {
   5436 	struct ifnet *ifp = &ec->ec_if;
   5437 	struct adapter *adapter = ifp->if_softc;
   5438 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   5439 
   5440 	IXGBE_CORE_LOCK(adapter);
   5441 
   5442 	if (change != 0)
   5443 		adapter->if_flags = ifp->if_flags;
   5444 
   5445 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   5446 		rc = ENETRESET;
   5447 	else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   5448 		ixgbe_set_promisc(adapter);
   5449 
   5450 	/* Set up VLAN support and filter */
   5451 	ixgbe_setup_vlan_hw_support(adapter);
   5452 
   5453 	IXGBE_CORE_UNLOCK(adapter);
   5454 
   5455 	return rc;
   5456 }
   5457 
   5458 /************************************************************************
   5459  * ixgbe_ioctl - Ioctl entry point
   5460  *
   5461  *   Called when the user wants to configure the interface.
   5462  *
   5463  *   return 0 on success, positive on failure
   5464  ************************************************************************/
   5465 static int
   5466 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   5467 {
   5468 	struct adapter	*adapter = ifp->if_softc;
   5469 	struct ixgbe_hw *hw = &adapter->hw;
   5470 	struct ifcapreq *ifcr = data;
   5471 	struct ifreq	*ifr = data;
   5472 	int             error = 0;
   5473 	int l4csum_en;
   5474 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   5475 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   5476 
   5477 	switch (command) {
   5478 	case SIOCSIFFLAGS:
   5479 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   5480 		break;
   5481 	case SIOCADDMULTI:
   5482 	case SIOCDELMULTI:
   5483 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   5484 		break;
   5485 	case SIOCSIFMEDIA:
   5486 	case SIOCGIFMEDIA:
   5487 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   5488 		break;
   5489 	case SIOCSIFCAP:
   5490 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   5491 		break;
   5492 	case SIOCSIFMTU:
   5493 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   5494 		break;
   5495 #ifdef __NetBSD__
   5496 	case SIOCINITIFADDR:
   5497 		IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
   5498 		break;
   5499 	case SIOCGIFFLAGS:
   5500 		IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
   5501 		break;
   5502 	case SIOCGIFAFLAG_IN:
   5503 		IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
   5504 		break;
   5505 	case SIOCGIFADDR:
   5506 		IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
   5507 		break;
   5508 	case SIOCGIFMTU:
   5509 		IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
   5510 		break;
   5511 	case SIOCGIFCAP:
   5512 		IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
   5513 		break;
   5514 	case SIOCGETHERCAP:
   5515 		IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
   5516 		break;
   5517 	case SIOCGLIFADDR:
   5518 		IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
   5519 		break;
   5520 	case SIOCZIFDATA:
   5521 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   5522 		hw->mac.ops.clear_hw_cntrs(hw);
   5523 		ixgbe_clear_evcnt(adapter);
   5524 		break;
   5525 	case SIOCAIFADDR:
   5526 		IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
   5527 		break;
   5528 #endif
   5529 	default:
   5530 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   5531 		break;
   5532 	}
   5533 
   5534 	switch (command) {
   5535 	case SIOCSIFMEDIA:
   5536 	case SIOCGIFMEDIA:
   5537 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   5538 	case SIOCGI2C:
   5539 	{
   5540 		struct ixgbe_i2c_req	i2c;
   5541 
   5542 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
   5543 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
   5544 		if (error != 0)
   5545 			break;
   5546 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
   5547 			error = EINVAL;
   5548 			break;
   5549 		}
   5550 		if (i2c.len > sizeof(i2c.data)) {
   5551 			error = EINVAL;
   5552 			break;
   5553 		}
   5554 
   5555 		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
   5556 		    i2c.dev_addr, i2c.data);
   5557 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
   5558 		break;
   5559 	}
   5560 	case SIOCSIFCAP:
   5561 		/* Layer-4 Rx checksum offload has to be turned on and
   5562 		 * off as a unit.
   5563 		 */
   5564 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   5565 		if (l4csum_en != l4csum && l4csum_en != 0)
   5566 			return EINVAL;
   5567 		/*FALLTHROUGH*/
   5568 	case SIOCADDMULTI:
   5569 	case SIOCDELMULTI:
   5570 	case SIOCSIFFLAGS:
   5571 	case SIOCSIFMTU:
   5572 	default:
   5573 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   5574 			return error;
   5575 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   5576 			;
   5577 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   5578 			IXGBE_CORE_LOCK(adapter);
   5579 			ixgbe_init_locked(adapter);
   5580 			ixgbe_recalculate_max_frame(adapter);
   5581 			IXGBE_CORE_UNLOCK(adapter);
   5582 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   5583 			/*
   5584 			 * Multicast list has changed; set the hardware filter
   5585 			 * accordingly.
   5586 			 */
   5587 			IXGBE_CORE_LOCK(adapter);
   5588 			ixgbe_disable_intr(adapter);
   5589 			ixgbe_set_multi(adapter);
   5590 			ixgbe_enable_intr(adapter);
   5591 			IXGBE_CORE_UNLOCK(adapter);
   5592 		}
   5593 		return 0;
   5594 	}
   5595 
   5596 	return error;
   5597 } /* ixgbe_ioctl */
   5598 
   5599 /************************************************************************
   5600  * ixgbe_check_fan_failure
   5601  ************************************************************************/
   5602 static void
   5603 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
   5604 {
   5605 	u32 mask;
   5606 
   5607 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
   5608 	    IXGBE_ESDP_SDP1;
   5609 
   5610 	if (reg & mask)
   5611 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
   5612 } /* ixgbe_check_fan_failure */
   5613 
   5614 /************************************************************************
   5615  * ixgbe_handle_que
   5616  ************************************************************************/
   5617 static void
   5618 ixgbe_handle_que(void *context)
   5619 {
   5620 	struct ix_queue *que = context;
   5621 	struct adapter  *adapter = que->adapter;
   5622 	struct tx_ring  *txr = que->txr;
   5623 	struct ifnet    *ifp = adapter->ifp;
   5624 
   5625 	adapter->handleq.ev_count++;
   5626 
   5627 	if (ifp->if_flags & IFF_RUNNING) {
   5628 		ixgbe_rxeof(que);
   5629 		IXGBE_TX_LOCK(txr);
   5630 		ixgbe_txeof(txr);
   5631 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5632 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   5633 				ixgbe_mq_start_locked(ifp, txr);
   5634 		/* Only for queue 0 */
   5635 		/* NetBSD still needs this for CBQ */
   5636 		if ((&adapter->queues[0] == que)
   5637 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   5638 			ixgbe_legacy_start_locked(ifp, txr);
   5639 		IXGBE_TX_UNLOCK(txr);
   5640 	}
   5641 
   5642 	/* Re-enable this interrupt */
   5643 	if (que->res != NULL)
   5644 		ixgbe_enable_queue(adapter, que->msix);
   5645 	else
   5646 		ixgbe_enable_intr(adapter);
   5647 
   5648 	return;
   5649 } /* ixgbe_handle_que */
   5650 
   5651 /************************************************************************
   5652  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
   5653  ************************************************************************/
   5654 static int
   5655 ixgbe_allocate_legacy(struct adapter *adapter,
   5656     const struct pci_attach_args *pa)
   5657 {
   5658 	device_t	dev = adapter->dev;
   5659 	struct ix_queue *que = adapter->queues;
   5660 	struct tx_ring  *txr = adapter->tx_rings;
   5661 	int		counts[PCI_INTR_TYPE_SIZE];
   5662 	pci_intr_type_t intr_type, max_type;
   5663 	char            intrbuf[PCI_INTRSTR_LEN];
   5664 	const char	*intrstr = NULL;
   5665 
   5666 	/* We allocate a single interrupt resource */
   5667 	max_type = PCI_INTR_TYPE_MSI;
   5668 	counts[PCI_INTR_TYPE_MSIX] = 0;
   5669 	counts[PCI_INTR_TYPE_MSI] =
   5670 	    (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
   5671 	counts[PCI_INTR_TYPE_INTX] =
   5672 	    (adapter->feat_en & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
   5673 
   5674 alloc_retry:
   5675 	if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
   5676 		aprint_error_dev(dev, "couldn't alloc interrupt\n");
   5677 		return ENXIO;
   5678 	}
   5679 	adapter->osdep.nintrs = 1;
   5680 	intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
   5681 	    intrbuf, sizeof(intrbuf));
   5682 	adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
   5683 	    adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
   5684 	    device_xname(dev));
   5685 	if (adapter->osdep.ihs[0] == NULL) {
   5686 		intr_type = pci_intr_type(adapter->osdep.pc,
   5687 		    adapter->osdep.intrs[0]);
   5688 		aprint_error_dev(dev,"unable to establish %s\n",
   5689 		    (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5690 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5691 		switch (intr_type) {
   5692 		case PCI_INTR_TYPE_MSI:
   5693 			/* The next try is for INTx: Disable MSI */
   5694 			max_type = PCI_INTR_TYPE_INTX;
   5695 			counts[PCI_INTR_TYPE_INTX] = 1;
   5696 			goto alloc_retry;
   5697 		case PCI_INTR_TYPE_INTX:
   5698 		default:
   5699 			/* See below */
   5700 			break;
   5701 		}
   5702 	}
   5703 	if (adapter->osdep.ihs[0] == NULL) {
   5704 		aprint_error_dev(dev,
   5705 		    "couldn't establish interrupt%s%s\n",
   5706 		    intrstr ? " at " : "", intrstr ? intrstr : "");
   5707 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5708 		return ENXIO;
   5709 	}
   5710 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   5711 	/*
   5712 	 * Try allocating a fast interrupt and the associated deferred
   5713 	 * processing contexts.
   5714 	 */
   5715 	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5716 		txr->txr_si =
   5717 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5718 			ixgbe_deferred_mq_start, txr);
   5719 	que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5720 	    ixgbe_handle_que, que);
   5721 
   5722 	/* Tasklets for Link, SFP and Multispeed Fiber */
   5723 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   5724 	    ixgbe_handle_link, adapter);
   5725 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5726 	    ixgbe_handle_mod, adapter);
   5727 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5728 	    ixgbe_handle_msf, adapter);
   5729 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5730 	    ixgbe_handle_phy, adapter);
   5731 
   5732 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   5733 		adapter->fdir_si =
   5734 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5735 			ixgbe_reinit_fdir, adapter);
   5736 
   5737 	if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) &
   5738 		(txr->txr_si == NULL)) ||
   5739 	    que->que_si == NULL ||
   5740 	    adapter->link_si == NULL ||
   5741 	    adapter->mod_si == NULL ||
   5742 	    ((adapter->feat_en & IXGBE_FEATURE_FDIR) &
   5743 		(adapter->fdir_si == NULL)) ||
   5744 	    adapter->msf_si == NULL) {
   5745 		aprint_error_dev(dev,
   5746 		    "could not establish software interrupts\n");
   5747 
   5748 		return ENXIO;
   5749 	}
   5750 	/* For simplicity in the handlers */
   5751 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
   5752 
   5753 	return (0);
   5754 } /* ixgbe_allocate_legacy */
   5755 
   5756 
   5757 /************************************************************************
   5758  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
   5759  ************************************************************************/
   5760 static int
   5761 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   5762 {
   5763 	device_t        dev = adapter->dev;
   5764 	struct 		ix_queue *que = adapter->queues;
   5765 	struct  	tx_ring *txr = adapter->tx_rings;
   5766 	pci_chipset_tag_t pc;
   5767 	char		intrbuf[PCI_INTRSTR_LEN];
   5768 	char		intr_xname[32];
   5769 	const char	*intrstr = NULL;
   5770 	int 		error, vector = 0;
   5771 	int		cpu_id = 0;
   5772 	kcpuset_t	*affinity;
   5773 #ifdef RSS
   5774 	unsigned int    rss_buckets = 0;
   5775 	kcpuset_t	cpu_mask;
   5776 #endif
   5777 
   5778 	pc = adapter->osdep.pc;
   5779 #ifdef	RSS
   5780 	/*
   5781 	 * If we're doing RSS, the number of queues needs to
   5782 	 * match the number of RSS buckets that are configured.
   5783 	 *
   5784 	 * + If there's more queues than RSS buckets, we'll end
   5785 	 *   up with queues that get no traffic.
   5786 	 *
   5787 	 * + If there's more RSS buckets than queues, we'll end
   5788 	 *   up having multiple RSS buckets map to the same queue,
   5789 	 *   so there'll be some contention.
   5790 	 */
   5791 	rss_buckets = rss_getnumbuckets();
   5792 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
   5793 	    (adapter->num_queues != rss_buckets)) {
   5794 		device_printf(dev,
   5795 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
   5796 		    "; performance will be impacted.\n",
   5797 		    __func__, adapter->num_queues, rss_buckets);
   5798 	}
   5799 #endif
   5800 
   5801 	adapter->osdep.nintrs = adapter->num_queues + 1;
   5802 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   5803 	    adapter->osdep.nintrs) != 0) {
   5804 		aprint_error_dev(dev,
   5805 		    "failed to allocate MSI-X interrupt\n");
   5806 		return (ENXIO);
   5807 	}
   5808 
   5809 	kcpuset_create(&affinity, false);
   5810 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   5811 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   5812 		    device_xname(dev), i);
   5813 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   5814 		    sizeof(intrbuf));
   5815 #ifdef IXGBE_MPSAFE
   5816 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   5817 		    true);
   5818 #endif
   5819 		/* Set the handler function */
   5820 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   5821 		    adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
   5822 		    intr_xname);
   5823 		if (que->res == NULL) {
   5824 			pci_intr_release(pc, adapter->osdep.intrs,
   5825 			    adapter->osdep.nintrs);
   5826 			aprint_error_dev(dev,
   5827 			    "Failed to register QUE handler\n");
   5828 			kcpuset_destroy(affinity);
   5829 			return ENXIO;
   5830 		}
   5831 		que->msix = vector;
   5832 		adapter->active_queues |= (u64)(1 << que->msix);
   5833 
   5834 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   5835 #ifdef	RSS
   5836 			/*
   5837 			 * The queue ID is used as the RSS layer bucket ID.
   5838 			 * We look up the queue ID -> RSS CPU ID and select
   5839 			 * that.
   5840 			 */
   5841 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
   5842 			CPU_SETOF(cpu_id, &cpu_mask);
   5843 #endif
   5844 		} else {
   5845 			/*
   5846 			 * Bind the MSI-X vector, and thus the
   5847 			 * rings to the corresponding CPU.
   5848 			 *
   5849 			 * This just happens to match the default RSS
   5850 			 * round-robin bucket -> queue -> CPU allocation.
   5851 			 */
   5852 			if (adapter->num_queues > 1)
   5853 				cpu_id = i;
   5854 		}
   5855 		/* Round-robin affinity */
   5856 		kcpuset_zero(affinity);
   5857 		kcpuset_set(affinity, cpu_id % ncpu);
   5858 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   5859 		    NULL);
   5860 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   5861 		    intrstr);
   5862 		if (error == 0) {
   5863 #if 1 /* def IXGBE_DEBUG */
   5864 #ifdef	RSS
   5865 			aprintf_normal(", bound RSS bucket %d to CPU %d", i,
   5866 			    cpu_id % ncpu);
   5867 #else
   5868 			aprint_normal(", bound queue %d to cpu %d", i,
   5869 			    cpu_id % ncpu);
   5870 #endif
   5871 #endif /* IXGBE_DEBUG */
   5872 		}
   5873 		aprint_normal("\n");
   5874 
   5875 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5876 			txr->txr_si = softint_establish(
   5877 				SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5878 				ixgbe_deferred_mq_start, txr);
   5879 		que->que_si
   5880 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5881 			ixgbe_handle_que, que);
   5882 		if (que->que_si == NULL) {
   5883 			aprint_error_dev(dev,
   5884 			    "could not establish software interrupt\n");
   5885 		}
   5886 	}
   5887 
   5888 	/* and Link */
   5889 	cpu_id++;
   5890 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   5891 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   5892 	    sizeof(intrbuf));
   5893 #ifdef IXGBE_MPSAFE
   5894 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   5895 	    true);
   5896 #endif
   5897 	/* Set the link handler function */
   5898 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   5899 	    adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
   5900 	    intr_xname);
   5901 	if (adapter->osdep.ihs[vector] == NULL) {
   5902 		adapter->res = NULL;
   5903 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   5904 		kcpuset_destroy(affinity);
   5905 		return (ENXIO);
   5906 	}
   5907 	/* Round-robin affinity */
   5908 	kcpuset_zero(affinity);
   5909 	kcpuset_set(affinity, cpu_id % ncpu);
   5910 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   5911 
   5912 	aprint_normal_dev(dev,
   5913 	    "for link, interrupting at %s", intrstr);
   5914 	if (error == 0)
   5915 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   5916 	else
   5917 		aprint_normal("\n");
   5918 
   5919 	adapter->vector = vector;
   5920 	/* Tasklets for Link, SFP and Multispeed Fiber */
   5921 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   5922 	    ixgbe_handle_link, adapter);
   5923 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5924 	    ixgbe_handle_mod, adapter);
   5925 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5926 	    ixgbe_handle_msf, adapter);
   5927 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   5928 		adapter->mbx_si =
   5929 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5930 			ixgbe_handle_mbx, adapter);
   5931 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5932 		ixgbe_handle_phy, adapter);
   5933 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   5934 		adapter->fdir_si =
   5935 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5936 			ixgbe_reinit_fdir, adapter);
   5937 
   5938 	kcpuset_destroy(affinity);
   5939 
   5940 	return (0);
   5941 } /* ixgbe_allocate_msix */
   5942 
   5943 /************************************************************************
   5944  * ixgbe_configure_interrupts
   5945  *
   5946  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
   5947  *   This will also depend on user settings.
   5948  ************************************************************************/
   5949 static int
   5950 ixgbe_configure_interrupts(struct adapter *adapter)
   5951 {
   5952 	device_t dev = adapter->dev;
   5953 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
   5954 	int want, queues, msgs;
   5955 
   5956 	/* Default to 1 queue if MSI-X setup fails */
   5957 	adapter->num_queues = 1;
   5958 
   5959 	/* Override by tuneable */
   5960 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
   5961 		goto msi;
   5962 
   5963 	/* First try MSI-X */
   5964 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   5965 	msgs = MIN(msgs, IXG_MAX_NINTR);
   5966 	if (msgs < 2)
   5967 		goto msi;
   5968 
   5969 	adapter->msix_mem = (void *)1; /* XXX */
   5970 
   5971 	/* Figure out a reasonable auto config value */
   5972 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   5973 
   5974 #ifdef	RSS
   5975 	/* If we're doing RSS, clamp at the number of RSS buckets */
   5976 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   5977 		queues = min(queues, rss_getnumbuckets());
   5978 #endif
   5979 	if (ixgbe_num_queues > queues) {
   5980 		aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
   5981 		ixgbe_num_queues = queues;
   5982 	}
   5983 
   5984 	if (ixgbe_num_queues != 0)
   5985 		queues = ixgbe_num_queues;
   5986 	else
   5987 		queues = min(queues,
   5988 		    min(mac->max_tx_queues, mac->max_rx_queues));
   5989 
   5990 	/* reflect correct sysctl value */
   5991 	ixgbe_num_queues = queues;
   5992 
   5993 	/*
   5994 	 * Want one vector (RX/TX pair) per queue
   5995 	 * plus an additional for Link.
   5996 	 */
   5997 	want = queues + 1;
   5998 	if (msgs >= want)
   5999 		msgs = want;
   6000 	else {
   6001                	aprint_error_dev(dev, "MSI-X Configuration Problem, "
   6002 		    "%d vectors but %d queues wanted!\n",
   6003 		    msgs, want);
   6004 		goto msi;
   6005 	}
   6006 	device_printf(dev,
   6007 	    "Using MSI-X interrupts with %d vectors\n", msgs);
   6008 	adapter->num_queues = queues;
   6009 	adapter->feat_en |= IXGBE_FEATURE_MSIX;
   6010 	return (0);
   6011 
   6012 	/*
   6013 	 * MSI-X allocation failed or provided us with
   6014 	 * less vectors than needed. Free MSI-X resources
   6015 	 * and we'll try enabling MSI.
   6016 	 */
   6017 msi:
   6018 	/* Without MSI-X, some features are no longer supported */
   6019 	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6020 	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
   6021 	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6022 	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
   6023 
   6024        	msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
   6025 	adapter->msix_mem = NULL; /* XXX */
   6026 	if (msgs > 1)
   6027 		msgs = 1;
   6028 	if (msgs != 0) {
   6029 		msgs = 1;
   6030 		adapter->feat_en |= IXGBE_FEATURE_MSI;
   6031 		aprint_normal_dev(dev, "Using an MSI interrupt\n");
   6032 		return (0);
   6033 	}
   6034 
   6035 	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
   6036 		aprint_error_dev(dev,
   6037 		    "Device does not support legacy interrupts.\n");
   6038 		return 1;
   6039 	}
   6040 
   6041 	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6042 	aprint_normal_dev(dev, "Using a Legacy interrupt\n");
   6043 
   6044 	return (0);
   6045 } /* ixgbe_configure_interrupts */
   6046 
   6047 
   6048 /************************************************************************
   6049  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
   6050  *
   6051  *   Done outside of interrupt context since the driver might sleep
   6052  ************************************************************************/
   6053 static void
   6054 ixgbe_handle_link(void *context)
   6055 {
   6056 	struct adapter  *adapter = context;
   6057 	struct ixgbe_hw *hw = &adapter->hw;
   6058 
   6059 	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
   6060 	ixgbe_update_link_status(adapter);
   6061 
   6062 	/* Re-enable link interrupts */
   6063 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
   6064 } /* ixgbe_handle_link */
   6065 
   6066 /************************************************************************
   6067  * ixgbe_rearm_queues
   6068  ************************************************************************/
   6069 static void
   6070 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   6071 {
   6072 	u32 mask;
   6073 
   6074 	switch (adapter->hw.mac.type) {
   6075 	case ixgbe_mac_82598EB:
   6076 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   6077 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   6078 		break;
   6079 	case ixgbe_mac_82599EB:
   6080 	case ixgbe_mac_X540:
   6081 	case ixgbe_mac_X550:
   6082 	case ixgbe_mac_X550EM_x:
   6083 	case ixgbe_mac_X550EM_a:
   6084 		mask = (queues & 0xFFFFFFFF);
   6085 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   6086 		mask = (queues >> 32);
   6087 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   6088 		break;
   6089 	default:
   6090 		break;
   6091 	}
   6092 } /* ixgbe_rearm_queues */
   6093