Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.105
      1 /* $NetBSD: ixgbe.c,v 1.105 2017/10/18 10:43:32 msaitoh Exp $ */
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 320916 2017-07-12 17:35:32Z sbruno $*/
     36 
     37 /*
     38  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39  * All rights reserved.
     40  *
     41  * This code is derived from software contributed to The NetBSD Foundation
     42  * by Coyote Point Systems, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63  * POSSIBILITY OF SUCH DAMAGE.
     64  */
     65 
     66 #ifdef _KERNEL_OPT
     67 #include "opt_inet.h"
     68 #include "opt_inet6.h"
     69 #include "opt_net_mpsafe.h"
     70 #endif
     71 
     72 #include "ixgbe.h"
     73 #include "vlan.h"
     74 
     75 #include <sys/cprng.h>
     76 #include <dev/mii/mii.h>
     77 #include <dev/mii/miivar.h>
     78 
     79 /************************************************************************
     80  * Driver version
     81  ************************************************************************/
     82 char ixgbe_driver_version[] = "3.2.12-k";
     83 
     84 
     85 /************************************************************************
     86  * PCI Device ID Table
     87  *
     88  *   Used by probe to select devices to load on
     89  *   Last field stores an index into ixgbe_strings
     90  *   Last entry must be all 0s
     91  *
     92  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     93  ************************************************************************/
     94 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     95 {
     96 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    111 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    112 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    113 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    114 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    115 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
    116 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    117 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
    118 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
    119 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
    120 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
    121 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
    122 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
    123 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
    124 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
    125 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
    126 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
    127 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
    128 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
    129 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
    130 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
    131 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
    132 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
    133 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
    134 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
    135 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
    136 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
    137 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
    138 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
    139 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
    140 	/* required last entry */
    141 	{0, 0, 0, 0, 0}
    142 };
    143 
    144 /************************************************************************
    145  * Table of branding strings
    146  ************************************************************************/
    147 static const char    *ixgbe_strings[] = {
    148 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    149 };
    150 
    151 /************************************************************************
    152  * Function prototypes
    153  ************************************************************************/
    154 static int      ixgbe_probe(device_t, cfdata_t, void *);
    155 static void     ixgbe_attach(device_t, device_t, void *);
    156 static int      ixgbe_detach(device_t, int);
    157 #if 0
    158 static int      ixgbe_shutdown(device_t);
    159 #endif
    160 static bool	ixgbe_suspend(device_t, const pmf_qual_t *);
    161 static bool	ixgbe_resume(device_t, const pmf_qual_t *);
    162 static int	ixgbe_ifflags_cb(struct ethercom *);
    163 static int      ixgbe_ioctl(struct ifnet *, u_long, void *);
    164 static void	ixgbe_ifstop(struct ifnet *, int);
    165 static int	ixgbe_init(struct ifnet *);
    166 static void	ixgbe_init_locked(struct adapter *);
    167 static void     ixgbe_stop(void *);
    168 static void     ixgbe_init_device_features(struct adapter *);
    169 static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
    170 static void	ixgbe_add_media_types(struct adapter *);
    171 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    172 static int      ixgbe_media_change(struct ifnet *);
    173 static int      ixgbe_allocate_pci_resources(struct adapter *,
    174 		    const struct pci_attach_args *);
    175 static void	ixgbe_get_slot_info(struct adapter *);
    176 static int      ixgbe_allocate_msix(struct adapter *,
    177 		    const struct pci_attach_args *);
    178 static int      ixgbe_allocate_legacy(struct adapter *,
    179 		    const struct pci_attach_args *);
    180 static int      ixgbe_configure_interrupts(struct adapter *);
    181 static void	ixgbe_free_pci_resources(struct adapter *);
    182 static void	ixgbe_local_timer(void *);
    183 static void	ixgbe_local_timer1(void *);
    184 static int	ixgbe_setup_interface(device_t, struct adapter *);
    185 static void	ixgbe_config_gpie(struct adapter *);
    186 static void	ixgbe_config_dmac(struct adapter *);
    187 static void	ixgbe_config_delay_values(struct adapter *);
    188 static void	ixgbe_config_link(struct adapter *);
    189 static void	ixgbe_check_wol_support(struct adapter *);
    190 static int	ixgbe_setup_low_power_mode(struct adapter *);
    191 static void	ixgbe_rearm_queues(struct adapter *, u64);
    192 
    193 static void     ixgbe_initialize_transmit_units(struct adapter *);
    194 static void     ixgbe_initialize_receive_units(struct adapter *);
    195 static void	ixgbe_enable_rx_drop(struct adapter *);
    196 static void	ixgbe_disable_rx_drop(struct adapter *);
    197 static void	ixgbe_initialize_rss_mapping(struct adapter *);
    198 
    199 static void     ixgbe_enable_intr(struct adapter *);
    200 static void     ixgbe_disable_intr(struct adapter *);
    201 static void     ixgbe_update_stats_counters(struct adapter *);
    202 static void     ixgbe_set_promisc(struct adapter *);
    203 static void     ixgbe_set_multi(struct adapter *);
    204 static void     ixgbe_update_link_status(struct adapter *);
    205 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    206 static void	ixgbe_configure_ivars(struct adapter *);
    207 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    208 
    209 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    210 #if 0
    211 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
    212 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
    213 #endif
    214 
    215 static void	ixgbe_add_device_sysctls(struct adapter *);
    216 static void     ixgbe_add_hw_stats(struct adapter *);
    217 static void	ixgbe_clear_evcnt(struct adapter *);
    218 static int	ixgbe_set_flowcntl(struct adapter *, int);
    219 static int	ixgbe_set_advertise(struct adapter *, int);
    220 static int      ixgbe_get_advertise(struct adapter *);
    221 
    222 /* Sysctl handlers */
    223 static void	ixgbe_set_sysctl_value(struct adapter *, const char *,
    224 		     const char *, int *, int);
    225 static int	ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
    226 static int	ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
    227 static int      ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    228 static int	ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
    229 static int	ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
    230 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
    231 #ifdef IXGBE_DEBUG
    232 static int	ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
    233 static int	ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
    234 #endif
    235 static int      ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
    236 static int      ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
    237 static int      ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
    238 static int      ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
    239 static int      ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
    240 static int	ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
    241 static int	ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
    242 
    243 /* Support for pluggable optic modules */
    244 static bool	ixgbe_sfp_probe(struct adapter *);
    245 
    246 /* Legacy (single vector) interrupt handler */
    247 static int	ixgbe_legacy_irq(void *);
    248 
    249 /* The MSI/MSI-X Interrupt handlers */
    250 static int	ixgbe_msix_que(void *);
    251 static int	ixgbe_msix_link(void *);
    252 
    253 /* Software interrupts for deferred work */
    254 static void	ixgbe_handle_que(void *);
    255 static void	ixgbe_handle_link(void *);
    256 static void	ixgbe_handle_msf(void *);
    257 static void	ixgbe_handle_mod(void *);
    258 static void	ixgbe_handle_phy(void *);
    259 
    260 const struct sysctlnode *ixgbe_sysctl_instance(struct adapter *);
    261 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    262 
    263 /************************************************************************
    264  *  NetBSD Device Interface Entry Points
    265  ************************************************************************/
    266 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    267     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    268     DVF_DETACH_SHUTDOWN);
    269 
    270 #if 0
    271 devclass_t ix_devclass;
    272 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
    273 
    274 MODULE_DEPEND(ix, pci, 1, 1, 1);
    275 MODULE_DEPEND(ix, ether, 1, 1, 1);
    276 #endif
    277 
    278 /*
    279  * TUNEABLE PARAMETERS:
    280  */
    281 
    282 /*
    283  * AIM: Adaptive Interrupt Moderation
    284  * which means that the interrupt rate
    285  * is varied over time based on the
    286  * traffic for that interrupt vector
    287  */
    288 static bool ixgbe_enable_aim = true;
    289 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
    290 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
    291     "Enable adaptive interrupt moderation");
    292 
    293 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    294 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
    295     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
    296 
    297 /* How many packets rxeof tries to clean at a time */
    298 static int ixgbe_rx_process_limit = 256;
    299 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
    300     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
    301 
    302 /* How many packets txeof tries to clean at a time */
    303 static int ixgbe_tx_process_limit = 256;
    304 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
    305     &ixgbe_tx_process_limit, 0,
    306     "Maximum number of sent packets to process at a time, -1 means unlimited");
    307 
    308 /* Flow control setting, default to full */
    309 static int ixgbe_flow_control = ixgbe_fc_full;
    310 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
    311     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
    312 
    313 /*
    314  * Smart speed setting, default to on
    315  * this only works as a compile option
    316  * right now as its during attach, set
    317  * this to 'ixgbe_smart_speed_off' to
    318  * disable.
    319  */
    320 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    321 
    322 /*
    323  * MSI-X should be the default for best performance,
    324  * but this allows it to be forced off for testing.
    325  */
    326 static int ixgbe_enable_msix = 1;
    327 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
    328     "Enable MSI-X interrupts");
    329 
    330 /*
    331  * Number of Queues, can be set to 0,
    332  * it then autoconfigures based on the
    333  * number of cpus with a max of 8. This
    334  * can be overriden manually here.
    335  */
    336 static int ixgbe_num_queues = 0;
    337 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
    338     "Number of queues to configure, 0 indicates autoconfigure");
    339 
    340 /*
    341  * Number of TX descriptors per ring,
    342  * setting higher than RX as this seems
    343  * the better performing choice.
    344  */
    345 static int ixgbe_txd = PERFORM_TXD;
    346 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
    347     "Number of transmit descriptors per queue");
    348 
    349 /* Number of RX descriptors per ring */
    350 static int ixgbe_rxd = PERFORM_RXD;
    351 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
    352     "Number of receive descriptors per queue");
    353 
    354 /*
    355  * Defining this on will allow the use
    356  * of unsupported SFP+ modules, note that
    357  * doing so you are on your own :)
    358  */
    359 static int allow_unsupported_sfp = false;
    360 #define TUNABLE_INT(__x, __y)
    361 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
    362 
    363 /*
    364  * Not sure if Flow Director is fully baked,
    365  * so we'll default to turning it off.
    366  */
    367 static int ixgbe_enable_fdir = 0;
    368 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
    369     "Enable Flow Director");
    370 
    371 /* Legacy Transmit (single queue) */
    372 static int ixgbe_enable_legacy_tx = 0;
    373 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
    374     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
    375 
    376 /* Receive-Side Scaling */
    377 static int ixgbe_enable_rss = 1;
    378 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
    379     "Enable Receive-Side Scaling (RSS)");
    380 
    381 /* Keep running tab on them for sanity check */
    382 static int ixgbe_total_ports;
    383 
    384 #if 0
    385 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
    386 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
    387 #endif
    388 
    389 #ifdef NET_MPSAFE
    390 #define IXGBE_MPSAFE		1
    391 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    392 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    393 #else
    394 #define IXGBE_CALLOUT_FLAGS	0
    395 #define IXGBE_SOFTINFT_FLAGS	0
    396 #endif
    397 
    398 /************************************************************************
    399  * ixgbe_initialize_rss_mapping
    400  ************************************************************************/
    401 static void
    402 ixgbe_initialize_rss_mapping(struct adapter *adapter)
    403 {
    404 	struct ixgbe_hw	*hw = &adapter->hw;
    405 	u32             reta = 0, mrqc, rss_key[10];
    406 	int             queue_id, table_size, index_mult;
    407 	int             i, j;
    408 	u32             rss_hash_config;
    409 
    410 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    411 		/* Fetch the configured RSS key */
    412 		rss_getkey((uint8_t *) &rss_key);
    413 	} else {
    414 		/* set up random bits */
    415 		cprng_fast(&rss_key, sizeof(rss_key));
    416 	}
    417 
    418 	/* Set multiplier for RETA setup and table size based on MAC */
    419 	index_mult = 0x1;
    420 	table_size = 128;
    421 	switch (adapter->hw.mac.type) {
    422 	case ixgbe_mac_82598EB:
    423 		index_mult = 0x11;
    424 		break;
    425 	case ixgbe_mac_X550:
    426 	case ixgbe_mac_X550EM_x:
    427 	case ixgbe_mac_X550EM_a:
    428 		table_size = 512;
    429 		break;
    430 	default:
    431 		break;
    432 	}
    433 
    434 	/* Set up the redirection table */
    435 	for (i = 0, j = 0; i < table_size; i++, j++) {
    436 		if (j == adapter->num_queues)
    437 			j = 0;
    438 
    439 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    440 			/*
    441 			 * Fetch the RSS bucket id for the given indirection
    442 			 * entry. Cap it at the number of configured buckets
    443 			 * (which is num_queues.)
    444 			 */
    445 			queue_id = rss_get_indirection_to_bucket(i);
    446 			queue_id = queue_id % adapter->num_queues;
    447 		} else
    448 			queue_id = (j * index_mult);
    449 
    450 		/*
    451 		 * The low 8 bits are for hash value (n+0);
    452 		 * The next 8 bits are for hash value (n+1), etc.
    453 		 */
    454 		reta = reta >> 8;
    455 		reta = reta | (((uint32_t) queue_id) << 24);
    456 		if ((i & 3) == 3) {
    457 			if (i < 128)
    458 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
    459 			else
    460 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
    461 				    reta);
    462 			reta = 0;
    463 		}
    464 	}
    465 
    466 	/* Now fill our hash function seeds */
    467 	for (i = 0; i < 10; i++)
    468 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
    469 
    470 	/* Perform hash on these packet types */
    471 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
    472 		rss_hash_config = rss_gethashconfig();
    473 	else {
    474 		/*
    475 		 * Disable UDP - IP fragments aren't currently being handled
    476 		 * and so we end up with a mix of 2-tuple and 4-tuple
    477 		 * traffic.
    478 		 */
    479 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
    480 		                | RSS_HASHTYPE_RSS_TCP_IPV4
    481 		                | RSS_HASHTYPE_RSS_IPV6
    482 		                | RSS_HASHTYPE_RSS_TCP_IPV6
    483 		                | RSS_HASHTYPE_RSS_IPV6_EX
    484 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
    485 	}
    486 
    487 	mrqc = IXGBE_MRQC_RSSEN;
    488 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
    489 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
    490 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
    491 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
    492 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
    493 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
    494 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
    495 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
    496 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
    497 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
    498 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
    499 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
    500 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
    501 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
    502 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
    503 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
    504 		    __func__);
    505 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
    506 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
    507 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
    508 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
    509 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
    510 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    511 } /* ixgbe_initialize_rss_mapping */
    512 
    513 /************************************************************************
    514  * ixgbe_initialize_receive_units - Setup receive registers and features.
    515  ************************************************************************/
    516 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
    517 
    518 static void
    519 ixgbe_initialize_receive_units(struct adapter *adapter)
    520 {
    521 	struct	rx_ring	*rxr = adapter->rx_rings;
    522 	struct ixgbe_hw	*hw = &adapter->hw;
    523 	struct ifnet    *ifp = adapter->ifp;
    524 	int             i, j;
    525 	u32		bufsz, fctrl, srrctl, rxcsum;
    526 	u32		hlreg;
    527 
    528 	/*
    529 	 * Make sure receives are disabled while
    530 	 * setting up the descriptor ring
    531 	 */
    532 	ixgbe_disable_rx(hw);
    533 
    534 	/* Enable broadcasts */
    535 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
    536 	fctrl |= IXGBE_FCTRL_BAM;
    537 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
    538 		fctrl |= IXGBE_FCTRL_DPF;
    539 		fctrl |= IXGBE_FCTRL_PMCF;
    540 	}
    541 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
    542 
    543 	/* Set for Jumbo Frames? */
    544 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
    545 	if (ifp->if_mtu > ETHERMTU)
    546 		hlreg |= IXGBE_HLREG0_JUMBOEN;
    547 	else
    548 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
    549 
    550 #ifdef DEV_NETMAP
    551 	/* CRC stripping is conditional in Netmap */
    552 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
    553 	    (ifp->if_capenable & IFCAP_NETMAP) &&
    554 	    !ix_crcstrip)
    555 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
    556 	else
    557 #endif /* DEV_NETMAP */
    558 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
    559 
    560 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
    561 
    562 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
    563 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
    564 
    565 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
    566 		u64 rdba = rxr->rxdma.dma_paddr;
    567 		u32 tqsmreg, reg;
    568 		int regnum = i / 4;	/* 1 register per 4 queues */
    569 		int regshift = i % 4;	/* 4 bits per 1 queue */
    570 		j = rxr->me;
    571 
    572 		/* Setup the Base and Length of the Rx Descriptor Ring */
    573 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
    574 		    (rdba & 0x00000000ffffffffULL));
    575 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
    576 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
    577 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
    578 
    579 		/* Set up the SRRCTL register */
    580 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
    581 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
    582 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
    583 		srrctl |= bufsz;
    584 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
    585 
    586 		/* Set RQSMR (Receive Queue Statistic Mapping) register */
    587 		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
    588 		reg &= ~(0x000000ff << (regshift * 8));
    589 		reg |= i << (regshift * 8);
    590 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
    591 
    592 		/*
    593 		 * Set RQSMR (Receive Queue Statistic Mapping) register.
    594 		 * Register location for queue 0...7 are different between
    595 		 * 82598 and newer.
    596 		 */
    597 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
    598 			tqsmreg = IXGBE_TQSMR(regnum);
    599 		else
    600 			tqsmreg = IXGBE_TQSM(regnum);
    601 		reg = IXGBE_READ_REG(hw, tqsmreg);
    602 		reg &= ~(0x000000ff << (regshift * 8));
    603 		reg |= i << (regshift * 8);
    604 		IXGBE_WRITE_REG(hw, tqsmreg, reg);
    605 
    606 		/*
    607 		 * Set DROP_EN iff we have no flow control and >1 queue.
    608 		 * Note that srrctl was cleared shortly before during reset,
    609 		 * so we do not need to clear the bit, but do it just in case
    610 		 * this code is moved elsewhere.
    611 		 */
    612 		if (adapter->num_queues > 1 &&
    613 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
    614 			srrctl |= IXGBE_SRRCTL_DROP_EN;
    615 		} else {
    616 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
    617 		}
    618 
    619 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
    620 
    621 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
    622 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
    623 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
    624 
    625 		/* Set the driver rx tail address */
    626 		rxr->tail =  IXGBE_RDT(rxr->me);
    627 	}
    628 
    629 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    630 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
    631 		            | IXGBE_PSRTYPE_UDPHDR
    632 		            | IXGBE_PSRTYPE_IPV4HDR
    633 		            | IXGBE_PSRTYPE_IPV6HDR;
    634 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
    635 	}
    636 
    637 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
    638 
    639 	ixgbe_initialize_rss_mapping(adapter);
    640 
    641 	if (adapter->num_queues > 1) {
    642 		/* RSS and RX IPP Checksum are mutually exclusive */
    643 		rxcsum |= IXGBE_RXCSUM_PCSD;
    644 	}
    645 
    646 	if (ifp->if_capenable & IFCAP_RXCSUM)
    647 		rxcsum |= IXGBE_RXCSUM_PCSD;
    648 
    649 	/* This is useful for calculating UDP/IP fragment checksums */
    650 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
    651 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
    652 
    653 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
    654 
    655 	return;
    656 } /* ixgbe_initialize_receive_units */
    657 
    658 /************************************************************************
    659  * ixgbe_initialize_transmit_units - Enable transmit units.
    660  ************************************************************************/
    661 static void
    662 ixgbe_initialize_transmit_units(struct adapter *adapter)
    663 {
    664 	struct tx_ring  *txr = adapter->tx_rings;
    665 	struct ixgbe_hw	*hw = &adapter->hw;
    666 
    667 	/* Setup the Base and Length of the Tx Descriptor Ring */
    668 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    669 		u64 tdba = txr->txdma.dma_paddr;
    670 		u32 txctrl = 0;
    671 		int j = txr->me;
    672 
    673 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
    674 		    (tdba & 0x00000000ffffffffULL));
    675 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
    676 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
    677 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
    678 
    679 		/* Setup the HW Tx Head and Tail descriptor pointers */
    680 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
    681 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
    682 
    683 		/* Cache the tail address */
    684 		txr->tail = IXGBE_TDT(j);
    685 
    686 		/* Disable Head Writeback */
    687 		/*
    688 		 * Note: for X550 series devices, these registers are actually
    689 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
    690 		 * fields remain the same.
    691 		 */
    692 		switch (hw->mac.type) {
    693 		case ixgbe_mac_82598EB:
    694 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
    695 			break;
    696 		default:
    697 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
    698 			break;
    699 		}
    700 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
    701 		switch (hw->mac.type) {
    702 		case ixgbe_mac_82598EB:
    703 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
    704 			break;
    705 		default:
    706 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
    707 			break;
    708 		}
    709 
    710 	}
    711 
    712 	if (hw->mac.type != ixgbe_mac_82598EB) {
    713 		u32 dmatxctl, rttdcs;
    714 
    715 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
    716 		dmatxctl |= IXGBE_DMATXCTL_TE;
    717 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
    718 		/* Disable arbiter to set MTQC */
    719 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
    720 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
    721 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    722 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
    723 		    ixgbe_get_mtqc(adapter->iov_mode));
    724 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
    725 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    726 	}
    727 
    728 	return;
    729 } /* ixgbe_initialize_transmit_units */
    730 
    731 /************************************************************************
    732  * ixgbe_attach - Device initialization routine
    733  *
    734  *   Called when the driver is being loaded.
    735  *   Identifies the type of hardware, allocates all resources
    736  *   and initializes the hardware.
    737  *
    738  *   return 0 on success, positive on failure
    739  ************************************************************************/
    740 static void
    741 ixgbe_attach(device_t parent, device_t dev, void *aux)
    742 {
    743 	struct adapter  *adapter;
    744 	struct ixgbe_hw *hw;
    745 	int             error = -1;
    746 	u32		ctrl_ext;
    747 	u16		high, low, nvmreg;
    748 	pcireg_t	id, subid;
    749 	ixgbe_vendor_info_t *ent;
    750 	struct pci_attach_args *pa = aux;
    751 	const char *str;
    752 	char buf[256];
    753 
    754 	INIT_DEBUGOUT("ixgbe_attach: begin");
    755 
    756 	/* Allocate, clear, and link in our adapter structure */
    757 	adapter = device_private(dev);
    758 	adapter->hw.back = adapter;
    759 	adapter->dev = dev;
    760 	hw = &adapter->hw;
    761 	adapter->osdep.pc = pa->pa_pc;
    762 	adapter->osdep.tag = pa->pa_tag;
    763 	if (pci_dma64_available(pa))
    764 		adapter->osdep.dmat = pa->pa_dmat64;
    765 	else
    766 		adapter->osdep.dmat = pa->pa_dmat;
    767 	adapter->osdep.attached = false;
    768 
    769 	ent = ixgbe_lookup(pa);
    770 
    771 	KASSERT(ent != NULL);
    772 
    773 	aprint_normal(": %s, Version - %s\n",
    774 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    775 
    776 	/* Core Lock Init*/
    777 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    778 
    779 	/* Set up the timer callout */
    780 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    781 
    782 	/* Determine hardware revision */
    783 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    784 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    785 
    786 	hw->vendor_id = PCI_VENDOR(id);
    787 	hw->device_id = PCI_PRODUCT(id);
    788 	hw->revision_id =
    789 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    790 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    791 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    792 
    793 	/*
    794 	 * Make sure BUSMASTER is set
    795 	 */
    796 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    797 
    798 	/* Do base PCI setup - map BAR0 */
    799 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    800 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    801 		error = ENXIO;
    802 		goto err_out;
    803 	}
    804 
    805 	/* let hardware know driver is loaded */
    806 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    807 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    808 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    809 
    810 	/*
    811 	 * Initialize the shared code
    812 	 */
    813 	if (ixgbe_init_shared_code(hw)) {
    814 		aprint_error_dev(dev, "Unable to initialize the shared code\n");
    815 		error = ENXIO;
    816 		goto err_out;
    817 	}
    818 
    819 	switch (hw->mac.type) {
    820 	case ixgbe_mac_82598EB:
    821 		str = "82598EB";
    822 		break;
    823 	case ixgbe_mac_82599EB:
    824 		str = "82599EB";
    825 		break;
    826 	case ixgbe_mac_X540:
    827 		str = "X540";
    828 		break;
    829 	case ixgbe_mac_X550:
    830 		str = "X550";
    831 		break;
    832 	case ixgbe_mac_X550EM_x:
    833 		str = "X550EM";
    834 		break;
    835 	case ixgbe_mac_X550EM_a:
    836 		str = "X550EM A";
    837 		break;
    838 	default:
    839 		str = "Unknown";
    840 		break;
    841 	}
    842 	aprint_normal_dev(dev, "device %s\n", str);
    843 
    844 	if (hw->mbx.ops.init_params)
    845 		hw->mbx.ops.init_params(hw);
    846 
    847 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
    848 
    849 	/* Pick up the 82599 settings */
    850 	if (hw->mac.type != ixgbe_mac_82598EB) {
    851 		hw->phy.smart_speed = ixgbe_smart_speed;
    852 		adapter->num_segs = IXGBE_82599_SCATTER;
    853 	} else
    854 		adapter->num_segs = IXGBE_82598_SCATTER;
    855 
    856 	ixgbe_init_device_features(adapter);
    857 
    858 	if (ixgbe_configure_interrupts(adapter)) {
    859 		error = ENXIO;
    860 		goto err_out;
    861 	}
    862 
    863 	/* Allocate multicast array memory. */
    864 	adapter->mta = malloc(sizeof(*adapter->mta) *
    865 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    866 	if (adapter->mta == NULL) {
    867 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    868 		error = ENOMEM;
    869 		goto err_out;
    870 	}
    871 
    872 	/* Enable WoL (if supported) */
    873 	ixgbe_check_wol_support(adapter);
    874 
    875 	/* Verify adapter fan is still functional (if applicable) */
    876 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
    877 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    878 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
    879 	}
    880 
    881 	/* Ensure SW/FW semaphore is free */
    882 	ixgbe_init_swfw_semaphore(hw);
    883 
    884 	/* Enable EEE power saving */
    885 	if (adapter->feat_en & IXGBE_FEATURE_EEE)
    886 		hw->mac.ops.setup_eee(hw, TRUE);
    887 
    888 	/* Set an initial default flow control value */
    889 	hw->fc.requested_mode = ixgbe_flow_control;
    890 
    891 	/* Sysctls for limiting the amount of work done in the taskqueues */
    892 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
    893 	    "max number of rx packets to process",
    894 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
    895 
    896 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
    897 	    "max number of tx packets to process",
    898 	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
    899 
    900 	/* Do descriptor calc and sanity checks */
    901 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    902 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    903 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    904 		adapter->num_tx_desc = DEFAULT_TXD;
    905 	} else
    906 		adapter->num_tx_desc = ixgbe_txd;
    907 
    908 	/*
    909 	 * With many RX rings it is easy to exceed the
    910 	 * system mbuf allocation. Tuning nmbclusters
    911 	 * can alleviate this.
    912 	 */
    913 	if (nmbclusters > 0) {
    914 		int s;
    915 		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
    916 		if (s > nmbclusters) {
    917 			aprint_error_dev(dev, "RX Descriptors exceed "
    918 			    "system mbuf max, using default instead!\n");
    919 			ixgbe_rxd = DEFAULT_RXD;
    920 		}
    921 	}
    922 
    923 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    924 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
    925 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    926 		adapter->num_rx_desc = DEFAULT_RXD;
    927 	} else
    928 		adapter->num_rx_desc = ixgbe_rxd;
    929 
    930 	/* Allocate our TX/RX Queues */
    931 	if (ixgbe_allocate_queues(adapter)) {
    932 		error = ENOMEM;
    933 		goto err_out;
    934 	}
    935 
    936 	hw->phy.reset_if_overtemp = TRUE;
    937 	error = ixgbe_reset_hw(hw);
    938 	hw->phy.reset_if_overtemp = FALSE;
    939 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    940 		/*
    941 		 * No optics in this port, set up
    942 		 * so the timer routine will probe
    943 		 * for later insertion.
    944 		 */
    945 		adapter->sfp_probe = TRUE;
    946 		error = IXGBE_SUCCESS;
    947 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    948 		aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
    949 		error = EIO;
    950 		goto err_late;
    951 	} else if (error) {
    952 		aprint_error_dev(dev, "Hardware initialization failed\n");
    953 		error = EIO;
    954 		goto err_late;
    955 	}
    956 
    957 	/* Make sure we have a good EEPROM before we read from it */
    958 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
    959 		aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
    960 		error = EIO;
    961 		goto err_late;
    962 	}
    963 
    964 	aprint_normal("%s:", device_xname(dev));
    965 	/* NVM Image Version */
    966 	switch (hw->mac.type) {
    967 	case ixgbe_mac_X540:
    968 	case ixgbe_mac_X550EM_a:
    969 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    970 		if (nvmreg == 0xffff)
    971 			break;
    972 		high = (nvmreg >> 12) & 0x0f;
    973 		low = (nvmreg >> 4) & 0xff;
    974 		id = nvmreg & 0x0f;
    975 		aprint_normal(" NVM Image Version %u.%u ID 0x%x,", high, low,
    976 		    id);
    977 		break;
    978 	case ixgbe_mac_X550EM_x:
    979 	case ixgbe_mac_X550:
    980 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    981 		if (nvmreg == 0xffff)
    982 			break;
    983 		high = (nvmreg >> 12) & 0x0f;
    984 		low = nvmreg & 0xff;
    985 		aprint_normal(" NVM Image Version %u.%u,", high, low);
    986 		break;
    987 	default:
    988 		break;
    989 	}
    990 
    991 	/* PHY firmware revision */
    992 	switch (hw->mac.type) {
    993 	case ixgbe_mac_X540:
    994 	case ixgbe_mac_X550:
    995 		hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
    996 		if (nvmreg == 0xffff)
    997 			break;
    998 		high = (nvmreg >> 12) & 0x0f;
    999 		low = (nvmreg >> 4) & 0xff;
   1000 		id = nvmreg & 0x000f;
   1001 		aprint_normal(" PHY FW Revision %u.%u ID 0x%x,", high, low,
   1002 		    id);
   1003 		break;
   1004 	default:
   1005 		break;
   1006 	}
   1007 
   1008 	/* NVM Map version & OEM NVM Image version */
   1009 	switch (hw->mac.type) {
   1010 	case ixgbe_mac_X550:
   1011 	case ixgbe_mac_X550EM_x:
   1012 	case ixgbe_mac_X550EM_a:
   1013 		hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
   1014 		if (nvmreg != 0xffff) {
   1015 			high = (nvmreg >> 12) & 0x0f;
   1016 			low = nvmreg & 0x00ff;
   1017 			aprint_normal(" NVM Map version %u.%02x,", high, low);
   1018 		}
   1019 		hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
   1020 		if (nvmreg == 0xffff) {
   1021 			high = (nvmreg >> 12) & 0x0f;
   1022 			low = nvmreg & 0x00ff;
   1023 			aprint_verbose(" OEM NVM Image version %u.%02x,", high,
   1024 			    low);
   1025 		}
   1026 		break;
   1027 	default:
   1028 		break;
   1029 	}
   1030 
   1031 	/* Print the ETrackID */
   1032 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
   1033 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
   1034 	aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
   1035 
   1036 	/* Setup OS specific network interface */
   1037 	if (ixgbe_setup_interface(dev, adapter) != 0)
   1038 		goto err_late;
   1039 
   1040 	if (adapter->feat_en & IXGBE_FEATURE_MSIX)
   1041 		error = ixgbe_allocate_msix(adapter, pa);
   1042 	else
   1043 		error = ixgbe_allocate_legacy(adapter, pa);
   1044 	if (error)
   1045 		goto err_late;
   1046 
   1047 	error = ixgbe_start_hw(hw);
   1048 	switch (error) {
   1049 	case IXGBE_ERR_EEPROM_VERSION:
   1050 		aprint_error_dev(dev, "This device is a pre-production adapter/"
   1051 		    "LOM.  Please be aware there may be issues associated "
   1052 		    "with your hardware.\nIf you are experiencing problems "
   1053 		    "please contact your Intel or hardware representative "
   1054 		    "who provided you with this hardware.\n");
   1055 		break;
   1056 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
   1057 		aprint_error_dev(dev, "Unsupported SFP+ Module\n");
   1058 		error = EIO;
   1059 		goto err_late;
   1060 	case IXGBE_ERR_SFP_NOT_PRESENT:
   1061 		aprint_error_dev(dev, "No SFP+ Module found\n");
   1062 		/* falls thru */
   1063 	default:
   1064 		break;
   1065 	}
   1066 
   1067 	if (hw->phy.id != 0) {
   1068 		uint16_t id1, id2;
   1069 		int oui, model, rev;
   1070 		const char *descr;
   1071 
   1072 		id1 = hw->phy.id >> 16;
   1073 		id2 = hw->phy.id & 0xffff;
   1074 		oui = MII_OUI(id1, id2);
   1075 		model = MII_MODEL(id2);
   1076 		rev = MII_REV(id2);
   1077 		if ((descr = mii_get_descr(oui, model)) != NULL)
   1078 			aprint_normal_dev(dev,
   1079 			    "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
   1080 			    descr, oui, model, rev);
   1081 		else
   1082 			aprint_normal_dev(dev,
   1083 			    "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
   1084 			    oui, model, rev);
   1085 	}
   1086 
   1087 	/* Enable the optics for 82599 SFP+ fiber */
   1088 	ixgbe_enable_tx_laser(hw);
   1089 
   1090 	/* Enable power to the phy. */
   1091 	ixgbe_set_phy_power(hw, TRUE);
   1092 
   1093 	/* Initialize statistics */
   1094 	ixgbe_update_stats_counters(adapter);
   1095 
   1096 	/* Check PCIE slot type/speed/width */
   1097 	ixgbe_get_slot_info(adapter);
   1098 
   1099 	/*
   1100 	 * Do time init and sysctl init here, but
   1101 	 * only on the first port of a bypass adapter.
   1102 	 */
   1103 	ixgbe_bypass_init(adapter);
   1104 
   1105 	/* Set an initial dmac value */
   1106 	adapter->dmac = 0;
   1107 	/* Set initial advertised speeds (if applicable) */
   1108 	adapter->advertise = ixgbe_get_advertise(adapter);
   1109 
   1110 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   1111 		ixgbe_define_iov_schemas(dev, &error);
   1112 
   1113 	/* Add sysctls */
   1114 	ixgbe_add_device_sysctls(adapter);
   1115 	ixgbe_add_hw_stats(adapter);
   1116 
   1117 	/* For Netmap */
   1118 	adapter->init_locked = ixgbe_init_locked;
   1119 	adapter->stop_locked = ixgbe_stop;
   1120 
   1121 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1122 		ixgbe_netmap_attach(adapter);
   1123 
   1124 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
   1125 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
   1126 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
   1127 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
   1128 
   1129 	if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
   1130 		pmf_class_network_register(dev, adapter->ifp);
   1131 	else
   1132 		aprint_error_dev(dev, "couldn't establish power handler\n");
   1133 
   1134 	INIT_DEBUGOUT("ixgbe_attach: end");
   1135 	adapter->osdep.attached = true;
   1136 
   1137 	return;
   1138 
   1139 err_late:
   1140 	ixgbe_free_transmit_structures(adapter);
   1141 	ixgbe_free_receive_structures(adapter);
   1142 	free(adapter->queues, M_DEVBUF);
   1143 err_out:
   1144 	if (adapter->ifp != NULL)
   1145 		if_free(adapter->ifp);
   1146 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   1147 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   1148 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   1149 	ixgbe_free_pci_resources(adapter);
   1150 	if (adapter->mta != NULL)
   1151 		free(adapter->mta, M_DEVBUF);
   1152 	IXGBE_CORE_LOCK_DESTROY(adapter);
   1153 
   1154 	return;
   1155 } /* ixgbe_attach */
   1156 
   1157 /************************************************************************
   1158  * ixgbe_check_wol_support
   1159  *
   1160  *   Checks whether the adapter's ports are capable of
   1161  *   Wake On LAN by reading the adapter's NVM.
   1162  *
   1163  *   Sets each port's hw->wol_enabled value depending
   1164  *   on the value read here.
   1165  ************************************************************************/
   1166 static void
   1167 ixgbe_check_wol_support(struct adapter *adapter)
   1168 {
   1169 	struct ixgbe_hw *hw = &adapter->hw;
   1170 	u16             dev_caps = 0;
   1171 
   1172 	/* Find out WoL support for port */
   1173 	adapter->wol_support = hw->wol_enabled = 0;
   1174 	ixgbe_get_device_caps(hw, &dev_caps);
   1175 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
   1176 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
   1177 	     hw->bus.func == 0))
   1178 		adapter->wol_support = hw->wol_enabled = 1;
   1179 
   1180 	/* Save initial wake up filter configuration */
   1181 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
   1182 
   1183 	return;
   1184 } /* ixgbe_check_wol_support */
   1185 
   1186 /************************************************************************
   1187  * ixgbe_setup_interface
   1188  *
   1189  *   Setup networking device structure and register an interface.
   1190  ************************************************************************/
   1191 static int
   1192 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   1193 {
   1194 	struct ethercom *ec = &adapter->osdep.ec;
   1195 	struct ifnet   *ifp;
   1196 
   1197 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   1198 
   1199 	ifp = adapter->ifp = &ec->ec_if;
   1200 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1201 	ifp->if_baudrate = IF_Gbps(10);
   1202 	ifp->if_init = ixgbe_init;
   1203 	ifp->if_stop = ixgbe_ifstop;
   1204 	ifp->if_softc = adapter;
   1205 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1206 #ifdef IXGBE_MPSAFE
   1207 	ifp->if_extflags = IFEF_START_MPSAFE;
   1208 #endif
   1209 	ifp->if_ioctl = ixgbe_ioctl;
   1210 #if __FreeBSD_version >= 1100045
   1211 	/* TSO parameters */
   1212 	ifp->if_hw_tsomax = 65518;
   1213 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
   1214 	ifp->if_hw_tsomaxsegsize = 2048;
   1215 #endif
   1216 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1217 #if 0
   1218 		ixgbe_start_locked = ixgbe_legacy_start_locked;
   1219 #endif
   1220 	} else {
   1221 		ifp->if_transmit = ixgbe_mq_start;
   1222 #if 0
   1223 		ixgbe_start_locked = ixgbe_mq_start_locked;
   1224 #endif
   1225 	}
   1226 	ifp->if_start = ixgbe_legacy_start;
   1227 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1228 	IFQ_SET_READY(&ifp->if_snd);
   1229 
   1230 	if_initialize(ifp);
   1231 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1232 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1233 	/*
   1234 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1235 	 * used.
   1236 	 */
   1237 	if_register(ifp);
   1238 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   1239 
   1240 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1241 
   1242 	/*
   1243 	 * Tell the upper layer(s) we support long frames.
   1244 	 */
   1245 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1246 
   1247 	/* Set capability flags */
   1248 	ifp->if_capabilities |= IFCAP_RXCSUM
   1249 			     |  IFCAP_TXCSUM
   1250 			     |  IFCAP_TSOv4
   1251 			     |  IFCAP_TSOv6
   1252 			     |  IFCAP_LRO;
   1253 	ifp->if_capenable = 0;
   1254 
   1255 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1256 	    		    |  ETHERCAP_VLAN_HWCSUM
   1257 	    		    |  ETHERCAP_JUMBO_MTU
   1258 	    		    |  ETHERCAP_VLAN_MTU;
   1259 
   1260 	/* Enable the above capabilities by default */
   1261 	ec->ec_capenable = ec->ec_capabilities;
   1262 
   1263 	/*
   1264 	 * Don't turn this on by default, if vlans are
   1265 	 * created on another pseudo device (eg. lagg)
   1266 	 * then vlan events are not passed thru, breaking
   1267 	 * operation, but with HW FILTER off it works. If
   1268 	 * using vlans directly on the ixgbe driver you can
   1269 	 * enable this and get full hardware tag filtering.
   1270 	 */
   1271 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1272 
   1273 	/*
   1274 	 * Specify the media types supported by this adapter and register
   1275 	 * callbacks to update media and link information
   1276 	 */
   1277 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   1278 	    ixgbe_media_status);
   1279 
   1280 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
   1281 	ixgbe_add_media_types(adapter);
   1282 
   1283 	/* Set autoselect media by default */
   1284 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1285 
   1286 	return (0);
   1287 } /* ixgbe_setup_interface */
   1288 
   1289 /************************************************************************
   1290  * ixgbe_add_media_types
   1291  ************************************************************************/
   1292 static void
   1293 ixgbe_add_media_types(struct adapter *adapter)
   1294 {
   1295 	struct ixgbe_hw *hw = &adapter->hw;
   1296 	device_t        dev = adapter->dev;
   1297 	u64             layer;
   1298 
   1299 	layer = adapter->phy_layer;
   1300 
   1301 #define	ADD(mm, dd)							\
   1302 	ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
   1303 
   1304 	/* Media types with matching NetBSD media defines */
   1305 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
   1306 		ADD(IFM_10G_T | IFM_FDX, 0);
   1307 	}
   1308 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
   1309 		ADD(IFM_1000_T, 0);
   1310 		ADD(IFM_1000_T | IFM_FDX, 0);
   1311 	}
   1312 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
   1313 		ADD(IFM_100_TX, 0);
   1314 		ADD(IFM_100_TX | IFM_FDX, 0);
   1315 	}
   1316 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
   1317 		ADD(IFM_10_T, 0);
   1318 		ADD(IFM_10_T | IFM_FDX, 0);
   1319 	}
   1320 
   1321 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   1322 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
   1323 		ADD(IFM_10G_TWINAX | IFM_FDX, 0);
   1324 	}
   1325 
   1326 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
   1327 		ADD(IFM_10G_LR | IFM_FDX, 0);
   1328 		if (hw->phy.multispeed_fiber) {
   1329 			ADD(IFM_1000_LX | IFM_FDX, 0);
   1330 		}
   1331 	}
   1332 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
   1333 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1334 		if (hw->phy.multispeed_fiber) {
   1335 			ADD(IFM_1000_SX | IFM_FDX, 0);
   1336 		}
   1337 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
   1338 		ADD(IFM_1000_SX | IFM_FDX, 0);
   1339 	}
   1340 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
   1341 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1342 	}
   1343 
   1344 #ifdef IFM_ETH_XTYPE
   1345 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1346 		ADD(IFM_10G_KR | IFM_FDX, 0);
   1347 	}
   1348 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1349 		ADD(AIFM_10G_KX4 | IFM_FDX, 0);
   1350 	}
   1351 #else
   1352 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1353 		device_printf(dev, "Media supported: 10GbaseKR\n");
   1354 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
   1355 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1356 	}
   1357 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1358 		device_printf(dev, "Media supported: 10GbaseKX4\n");
   1359 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
   1360 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1361 	}
   1362 #endif
   1363 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
   1364 		ADD(IFM_1000_KX, 0);
   1365 		ADD(IFM_1000_KX | IFM_FDX, 0);
   1366 	}
   1367 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
   1368 		ADD(IFM_2500_KX | IFM_FDX, 0);
   1369 	}
   1370 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
   1371 		ADD(IFM_2500_T | IFM_FDX, 0);
   1372 	}
   1373 	if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
   1374 		ADD(IFM_5000_T | IFM_FDX, 0);
   1375 	}
   1376 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
   1377 		device_printf(dev, "Media supported: 1000baseBX\n");
   1378 	/* XXX no ifmedia_set? */
   1379 
   1380 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
   1381 		ADD(IFM_1000_T | IFM_FDX, 0);
   1382 		ADD(IFM_1000_T, 0);
   1383 	}
   1384 
   1385 	ADD(IFM_AUTO, 0);
   1386 
   1387 #undef ADD
   1388 } /* ixgbe_add_media_types */
   1389 
   1390 /************************************************************************
   1391  * ixgbe_is_sfp
   1392  ************************************************************************/
   1393 static inline bool
   1394 ixgbe_is_sfp(struct ixgbe_hw *hw)
   1395 {
   1396 	switch (hw->mac.type) {
   1397 	case ixgbe_mac_82598EB:
   1398 		if (hw->phy.type == ixgbe_phy_nl)
   1399 			return TRUE;
   1400 		return FALSE;
   1401 	case ixgbe_mac_82599EB:
   1402 		switch (hw->mac.ops.get_media_type(hw)) {
   1403 		case ixgbe_media_type_fiber:
   1404 		case ixgbe_media_type_fiber_qsfp:
   1405 			return TRUE;
   1406 		default:
   1407 			return FALSE;
   1408 		}
   1409 	case ixgbe_mac_X550EM_x:
   1410 	case ixgbe_mac_X550EM_a:
   1411 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
   1412 			return TRUE;
   1413 		return FALSE;
   1414 	default:
   1415 		return FALSE;
   1416 	}
   1417 } /* ixgbe_is_sfp */
   1418 
   1419 /************************************************************************
   1420  * ixgbe_config_link
   1421  ************************************************************************/
   1422 static void
   1423 ixgbe_config_link(struct adapter *adapter)
   1424 {
   1425 	struct ixgbe_hw *hw = &adapter->hw;
   1426 	u32             autoneg, err = 0;
   1427 	bool            sfp, negotiate = false;
   1428 
   1429 	sfp = ixgbe_is_sfp(hw);
   1430 
   1431 	if (sfp) {
   1432 		if (hw->phy.multispeed_fiber) {
   1433 			hw->mac.ops.setup_sfp(hw);
   1434 			ixgbe_enable_tx_laser(hw);
   1435 			kpreempt_disable();
   1436 			softint_schedule(adapter->msf_si);
   1437 			kpreempt_enable();
   1438 		} else {
   1439 			kpreempt_disable();
   1440 			softint_schedule(adapter->mod_si);
   1441 			kpreempt_enable();
   1442 		}
   1443 	} else {
   1444 		if (hw->mac.ops.check_link)
   1445 			err = ixgbe_check_link(hw, &adapter->link_speed,
   1446 			    &adapter->link_up, FALSE);
   1447 		if (err)
   1448 			goto out;
   1449 		autoneg = hw->phy.autoneg_advertised;
   1450 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   1451                 	err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
   1452 			    &negotiate);
   1453 		if (err)
   1454 			goto out;
   1455 		if (hw->mac.ops.setup_link)
   1456                 	err = hw->mac.ops.setup_link(hw, autoneg,
   1457 			    adapter->link_up);
   1458 	}
   1459 out:
   1460 
   1461 	return;
   1462 } /* ixgbe_config_link */
   1463 
   1464 /************************************************************************
   1465  * ixgbe_update_stats_counters - Update board statistics counters.
   1466  ************************************************************************/
   1467 static void
   1468 ixgbe_update_stats_counters(struct adapter *adapter)
   1469 {
   1470 	struct ifnet          *ifp = adapter->ifp;
   1471 	struct ixgbe_hw       *hw = &adapter->hw;
   1472 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1473 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
   1474 	u64                   total_missed_rx = 0;
   1475 	uint64_t              crcerrs, rlec;
   1476 
   1477 	crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   1478 	stats->crcerrs.ev_count += crcerrs;
   1479 	stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   1480 	stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   1481 	stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   1482 	if (hw->mac.type == ixgbe_mac_X550)
   1483 		stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
   1484 
   1485 	for (int i = 0; i < __arraycount(stats->qprc); i++) {
   1486 		int j = i % adapter->num_queues;
   1487 		stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   1488 		stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   1489 		stats->qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   1490 	}
   1491 	for (int i = 0; i < __arraycount(stats->mpc); i++) {
   1492 		uint32_t mp;
   1493 		int j = i % adapter->num_queues;
   1494 
   1495 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   1496 		/* global total per queue */
   1497 		stats->mpc[j].ev_count += mp;
   1498 		/* running comprehensive total for stats display */
   1499 		total_missed_rx += mp;
   1500 
   1501 		if (hw->mac.type == ixgbe_mac_82598EB)
   1502 			stats->rnbc[j].ev_count
   1503 			    += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   1504 
   1505 	}
   1506 	stats->mpctotal.ev_count += total_missed_rx;
   1507 
   1508 	/* Document says M[LR]FC are valid when link is up and 10Gbps */
   1509 	if ((adapter->link_active == TRUE)
   1510 	    && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
   1511 		stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   1512 		stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   1513 	}
   1514 	rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
   1515 	stats->rlec.ev_count += rlec;
   1516 
   1517 	/* Hardware workaround, gprc counts missed packets */
   1518 	stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   1519 
   1520 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   1521 	stats->lxontxc.ev_count += lxon;
   1522 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   1523 	stats->lxofftxc.ev_count += lxoff;
   1524 	total = lxon + lxoff;
   1525 
   1526 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1527 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   1528 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   1529 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   1530 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   1531 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   1532 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   1533 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   1534 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   1535 	} else {
   1536 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   1537 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   1538 		/* 82598 only has a counter in the high register */
   1539 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   1540 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   1541 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   1542 	}
   1543 
   1544 	/*
   1545 	 * Workaround: mprc hardware is incorrectly counting
   1546 	 * broadcasts, so for now we subtract those.
   1547 	 */
   1548 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   1549 	stats->bprc.ev_count += bprc;
   1550 	stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
   1551 	    - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   1552 
   1553 	stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   1554 	stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   1555 	stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   1556 	stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   1557 	stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   1558 	stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   1559 
   1560 	stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   1561 	stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   1562 	stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   1563 
   1564 	stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   1565 	stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   1566 	stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   1567 	stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   1568 	stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   1569 	stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   1570 	stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   1571 	stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   1572 	stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   1573 	stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   1574 	stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   1575 	stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   1576 	stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   1577 	stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   1578 	stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   1579 	stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   1580 	stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   1581 	stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   1582 	/* Only read FCOE on 82599 */
   1583 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1584 		stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   1585 		stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   1586 		stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   1587 		stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   1588 		stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   1589 	}
   1590 
   1591 	/* Fill out the OS statistics structure */
   1592 	/*
   1593 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   1594 	 * adapter->stats counters. It's required to make ifconfig -z
   1595 	 * (SOICZIFDATA) work.
   1596 	 */
   1597 	ifp->if_collisions = 0;
   1598 
   1599 	/* Rx Errors */
   1600 	ifp->if_iqdrops += total_missed_rx;
   1601 	ifp->if_ierrors += crcerrs + rlec;
   1602 } /* ixgbe_update_stats_counters */
   1603 
   1604 /************************************************************************
   1605  * ixgbe_add_hw_stats
   1606  *
   1607  *   Add sysctl variables, one per statistic, to the system.
   1608  ************************************************************************/
   1609 static void
   1610 ixgbe_add_hw_stats(struct adapter *adapter)
   1611 {
   1612 	device_t dev = adapter->dev;
   1613 	const struct sysctlnode *rnode, *cnode;
   1614 	struct sysctllog **log = &adapter->sysctllog;
   1615 	struct tx_ring *txr = adapter->tx_rings;
   1616 	struct rx_ring *rxr = adapter->rx_rings;
   1617 	struct ixgbe_hw *hw = &adapter->hw;
   1618 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1619 	const char *xname = device_xname(dev);
   1620 
   1621 	/* Driver Statistics */
   1622 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   1623 	    NULL, xname, "Handled queue in softint");
   1624 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   1625 	    NULL, xname, "Requeued in softint");
   1626 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   1627 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   1628 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   1629 	    NULL, xname, "m_defrag() failed");
   1630 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   1631 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   1632 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   1633 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   1634 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   1635 	    NULL, xname, "Driver tx dma hard fail other");
   1636 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   1637 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   1638 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   1639 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   1640 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   1641 	    NULL, xname, "Watchdog timeouts");
   1642 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   1643 	    NULL, xname, "TSO errors");
   1644 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   1645 	    NULL, xname, "Link MSI-X IRQ Handled");
   1646 
   1647 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1648 		snprintf(adapter->queues[i].evnamebuf,
   1649 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   1650 		    xname, i);
   1651 		snprintf(adapter->queues[i].namebuf,
   1652 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   1653 
   1654 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   1655 			aprint_error_dev(dev, "could not create sysctl root\n");
   1656 			break;
   1657 		}
   1658 
   1659 		if (sysctl_createv(log, 0, &rnode, &rnode,
   1660 		    0, CTLTYPE_NODE,
   1661 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   1662 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   1663 			break;
   1664 
   1665 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1666 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   1667 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   1668 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   1669 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   1670 			break;
   1671 
   1672 #if 0 /* XXX msaitoh */
   1673 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1674 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   1675 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   1676 			NULL, 0, &(adapter->queues[i].irqs),
   1677 		    0, CTL_CREATE, CTL_EOL) != 0)
   1678 			break;
   1679 #endif
   1680 
   1681 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1682 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1683 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   1684 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   1685 		    0, CTL_CREATE, CTL_EOL) != 0)
   1686 			break;
   1687 
   1688 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1689 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1690 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   1691 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   1692 		    0, CTL_CREATE, CTL_EOL) != 0)
   1693 			break;
   1694 
   1695 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   1696 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   1697 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   1698 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   1699 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   1700 		    NULL, adapter->queues[i].evnamebuf,
   1701 		    "Queue No Descriptor Available");
   1702 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   1703 		    NULL, adapter->queues[i].evnamebuf,
   1704 		    "Queue Packets Transmitted");
   1705 #ifndef IXGBE_LEGACY_TX
   1706 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   1707 		    NULL, adapter->queues[i].evnamebuf,
   1708 		    "Packets dropped in pcq");
   1709 #endif
   1710 
   1711 #ifdef LRO
   1712 		struct lro_ctrl *lro = &rxr->lro;
   1713 #endif /* LRO */
   1714 
   1715 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1716 		    CTLFLAG_READONLY,
   1717 		    CTLTYPE_INT,
   1718 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   1719 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   1720 		    CTL_CREATE, CTL_EOL) != 0)
   1721 			break;
   1722 
   1723 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1724 		    CTLFLAG_READONLY,
   1725 		    CTLTYPE_INT,
   1726 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   1727 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   1728 		    CTL_CREATE, CTL_EOL) != 0)
   1729 			break;
   1730 
   1731 		if (i < __arraycount(stats->mpc)) {
   1732 			evcnt_attach_dynamic(&stats->mpc[i],
   1733 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1734 			    "RX Missed Packet Count");
   1735 			if (hw->mac.type == ixgbe_mac_82598EB)
   1736 				evcnt_attach_dynamic(&stats->rnbc[i],
   1737 				    EVCNT_TYPE_MISC, NULL,
   1738 				    adapter->queues[i].evnamebuf,
   1739 				    "Receive No Buffers");
   1740 		}
   1741 		if (i < __arraycount(stats->pxontxc)) {
   1742 			evcnt_attach_dynamic(&stats->pxontxc[i],
   1743 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1744 			    "pxontxc");
   1745 			evcnt_attach_dynamic(&stats->pxonrxc[i],
   1746 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1747 			    "pxonrxc");
   1748 			evcnt_attach_dynamic(&stats->pxofftxc[i],
   1749 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1750 			    "pxofftxc");
   1751 			evcnt_attach_dynamic(&stats->pxoffrxc[i],
   1752 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1753 			    "pxoffrxc");
   1754 			evcnt_attach_dynamic(&stats->pxon2offc[i],
   1755 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1756 			    "pxon2offc");
   1757 		}
   1758 		if (i < __arraycount(stats->qprc)) {
   1759 			evcnt_attach_dynamic(&stats->qprc[i],
   1760 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1761 			    "qprc");
   1762 			evcnt_attach_dynamic(&stats->qptc[i],
   1763 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1764 			    "qptc");
   1765 			evcnt_attach_dynamic(&stats->qbrc[i],
   1766 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1767 			    "qbrc");
   1768 			evcnt_attach_dynamic(&stats->qbtc[i],
   1769 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1770 			    "qbtc");
   1771 			evcnt_attach_dynamic(&stats->qprdc[i],
   1772 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1773 			    "qprdc");
   1774 		}
   1775 
   1776 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   1777 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   1778 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   1779 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   1780 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   1781 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   1782 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   1783 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   1784 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   1785 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   1786 #ifdef LRO
   1787 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   1788 				CTLFLAG_RD, &lro->lro_queued, 0,
   1789 				"LRO Queued");
   1790 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   1791 				CTLFLAG_RD, &lro->lro_flushed, 0,
   1792 				"LRO Flushed");
   1793 #endif /* LRO */
   1794 	}
   1795 
   1796 	/* MAC stats get their own sub node */
   1797 
   1798 	snprintf(stats->namebuf,
   1799 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   1800 
   1801 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   1802 	    stats->namebuf, "rx csum offload - IP");
   1803 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   1804 	    stats->namebuf, "rx csum offload - L4");
   1805 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   1806 	    stats->namebuf, "rx csum offload - IP bad");
   1807 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   1808 	    stats->namebuf, "rx csum offload - L4 bad");
   1809 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   1810 	    stats->namebuf, "Interrupt conditions zero");
   1811 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   1812 	    stats->namebuf, "Legacy interrupts");
   1813 
   1814 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   1815 	    stats->namebuf, "CRC Errors");
   1816 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   1817 	    stats->namebuf, "Illegal Byte Errors");
   1818 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   1819 	    stats->namebuf, "Byte Errors");
   1820 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   1821 	    stats->namebuf, "MAC Short Packets Discarded");
   1822 	if (hw->mac.type >= ixgbe_mac_X550)
   1823 		evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
   1824 		    stats->namebuf, "Bad SFD");
   1825 	evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
   1826 	    stats->namebuf, "Total Packets Missed");
   1827 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   1828 	    stats->namebuf, "MAC Local Faults");
   1829 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   1830 	    stats->namebuf, "MAC Remote Faults");
   1831 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   1832 	    stats->namebuf, "Receive Length Errors");
   1833 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   1834 	    stats->namebuf, "Link XON Transmitted");
   1835 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   1836 	    stats->namebuf, "Link XON Received");
   1837 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   1838 	    stats->namebuf, "Link XOFF Transmitted");
   1839 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   1840 	    stats->namebuf, "Link XOFF Received");
   1841 
   1842 	/* Packet Reception Stats */
   1843 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   1844 	    stats->namebuf, "Total Octets Received");
   1845 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   1846 	    stats->namebuf, "Good Octets Received");
   1847 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   1848 	    stats->namebuf, "Total Packets Received");
   1849 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   1850 	    stats->namebuf, "Good Packets Received");
   1851 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   1852 	    stats->namebuf, "Multicast Packets Received");
   1853 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   1854 	    stats->namebuf, "Broadcast Packets Received");
   1855 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   1856 	    stats->namebuf, "64 byte frames received ");
   1857 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   1858 	    stats->namebuf, "65-127 byte frames received");
   1859 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   1860 	    stats->namebuf, "128-255 byte frames received");
   1861 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   1862 	    stats->namebuf, "256-511 byte frames received");
   1863 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   1864 	    stats->namebuf, "512-1023 byte frames received");
   1865 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   1866 	    stats->namebuf, "1023-1522 byte frames received");
   1867 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   1868 	    stats->namebuf, "Receive Undersized");
   1869 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   1870 	    stats->namebuf, "Fragmented Packets Received ");
   1871 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   1872 	    stats->namebuf, "Oversized Packets Received");
   1873 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   1874 	    stats->namebuf, "Received Jabber");
   1875 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   1876 	    stats->namebuf, "Management Packets Received");
   1877 	evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
   1878 	    stats->namebuf, "Management Packets Dropped");
   1879 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   1880 	    stats->namebuf, "Checksum Errors");
   1881 
   1882 	/* Packet Transmission Stats */
   1883 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   1884 	    stats->namebuf, "Good Octets Transmitted");
   1885 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   1886 	    stats->namebuf, "Total Packets Transmitted");
   1887 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   1888 	    stats->namebuf, "Good Packets Transmitted");
   1889 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   1890 	    stats->namebuf, "Broadcast Packets Transmitted");
   1891 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   1892 	    stats->namebuf, "Multicast Packets Transmitted");
   1893 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   1894 	    stats->namebuf, "Management Packets Transmitted");
   1895 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   1896 	    stats->namebuf, "64 byte frames transmitted ");
   1897 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   1898 	    stats->namebuf, "65-127 byte frames transmitted");
   1899 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   1900 	    stats->namebuf, "128-255 byte frames transmitted");
   1901 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   1902 	    stats->namebuf, "256-511 byte frames transmitted");
   1903 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   1904 	    stats->namebuf, "512-1023 byte frames transmitted");
   1905 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   1906 	    stats->namebuf, "1024-1522 byte frames transmitted");
   1907 } /* ixgbe_add_hw_stats */
   1908 
   1909 static void
   1910 ixgbe_clear_evcnt(struct adapter *adapter)
   1911 {
   1912 	struct tx_ring *txr = adapter->tx_rings;
   1913 	struct rx_ring *rxr = adapter->rx_rings;
   1914 	struct ixgbe_hw *hw = &adapter->hw;
   1915 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1916 
   1917 	adapter->handleq.ev_count = 0;
   1918 	adapter->req.ev_count = 0;
   1919 	adapter->efbig_tx_dma_setup.ev_count = 0;
   1920 	adapter->mbuf_defrag_failed.ev_count = 0;
   1921 	adapter->efbig2_tx_dma_setup.ev_count = 0;
   1922 	adapter->einval_tx_dma_setup.ev_count = 0;
   1923 	adapter->other_tx_dma_setup.ev_count = 0;
   1924 	adapter->eagain_tx_dma_setup.ev_count = 0;
   1925 	adapter->enomem_tx_dma_setup.ev_count = 0;
   1926 	adapter->watchdog_events.ev_count = 0;
   1927 	adapter->tso_err.ev_count = 0;
   1928 	adapter->link_irq.ev_count = 0;
   1929 
   1930 	txr = adapter->tx_rings;
   1931 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1932 		adapter->queues[i].irqs.ev_count = 0;
   1933 		txr->no_desc_avail.ev_count = 0;
   1934 		txr->total_packets.ev_count = 0;
   1935 		txr->tso_tx.ev_count = 0;
   1936 #ifndef IXGBE_LEGACY_TX
   1937 		txr->pcq_drops.ev_count = 0;
   1938 #endif
   1939 
   1940 		if (i < __arraycount(stats->mpc)) {
   1941 			stats->mpc[i].ev_count = 0;
   1942 			if (hw->mac.type == ixgbe_mac_82598EB)
   1943 				stats->rnbc[i].ev_count = 0;
   1944 		}
   1945 		if (i < __arraycount(stats->pxontxc)) {
   1946 			stats->pxontxc[i].ev_count = 0;
   1947 			stats->pxonrxc[i].ev_count = 0;
   1948 			stats->pxofftxc[i].ev_count = 0;
   1949 			stats->pxoffrxc[i].ev_count = 0;
   1950 			stats->pxon2offc[i].ev_count = 0;
   1951 		}
   1952 		if (i < __arraycount(stats->qprc)) {
   1953 			stats->qprc[i].ev_count = 0;
   1954 			stats->qptc[i].ev_count = 0;
   1955 			stats->qbrc[i].ev_count = 0;
   1956 			stats->qbtc[i].ev_count = 0;
   1957 			stats->qprdc[i].ev_count = 0;
   1958 		}
   1959 
   1960 		rxr->rx_packets.ev_count = 0;
   1961 		rxr->rx_bytes.ev_count = 0;
   1962 		rxr->rx_copies.ev_count = 0;
   1963 		rxr->no_jmbuf.ev_count = 0;
   1964 		rxr->rx_discarded.ev_count = 0;
   1965 	}
   1966 	stats->ipcs.ev_count = 0;
   1967 	stats->l4cs.ev_count = 0;
   1968 	stats->ipcs_bad.ev_count = 0;
   1969 	stats->l4cs_bad.ev_count = 0;
   1970 	stats->intzero.ev_count = 0;
   1971 	stats->legint.ev_count = 0;
   1972 	stats->crcerrs.ev_count = 0;
   1973 	stats->illerrc.ev_count = 0;
   1974 	stats->errbc.ev_count = 0;
   1975 	stats->mspdc.ev_count = 0;
   1976 	stats->mbsdc.ev_count = 0;
   1977 	stats->mpctotal.ev_count = 0;
   1978 	stats->mlfc.ev_count = 0;
   1979 	stats->mrfc.ev_count = 0;
   1980 	stats->rlec.ev_count = 0;
   1981 	stats->lxontxc.ev_count = 0;
   1982 	stats->lxonrxc.ev_count = 0;
   1983 	stats->lxofftxc.ev_count = 0;
   1984 	stats->lxoffrxc.ev_count = 0;
   1985 
   1986 	/* Packet Reception Stats */
   1987 	stats->tor.ev_count = 0;
   1988 	stats->gorc.ev_count = 0;
   1989 	stats->tpr.ev_count = 0;
   1990 	stats->gprc.ev_count = 0;
   1991 	stats->mprc.ev_count = 0;
   1992 	stats->bprc.ev_count = 0;
   1993 	stats->prc64.ev_count = 0;
   1994 	stats->prc127.ev_count = 0;
   1995 	stats->prc255.ev_count = 0;
   1996 	stats->prc511.ev_count = 0;
   1997 	stats->prc1023.ev_count = 0;
   1998 	stats->prc1522.ev_count = 0;
   1999 	stats->ruc.ev_count = 0;
   2000 	stats->rfc.ev_count = 0;
   2001 	stats->roc.ev_count = 0;
   2002 	stats->rjc.ev_count = 0;
   2003 	stats->mngprc.ev_count = 0;
   2004 	stats->mngpdc.ev_count = 0;
   2005 	stats->xec.ev_count = 0;
   2006 
   2007 	/* Packet Transmission Stats */
   2008 	stats->gotc.ev_count = 0;
   2009 	stats->tpt.ev_count = 0;
   2010 	stats->gptc.ev_count = 0;
   2011 	stats->bptc.ev_count = 0;
   2012 	stats->mptc.ev_count = 0;
   2013 	stats->mngptc.ev_count = 0;
   2014 	stats->ptc64.ev_count = 0;
   2015 	stats->ptc127.ev_count = 0;
   2016 	stats->ptc255.ev_count = 0;
   2017 	stats->ptc511.ev_count = 0;
   2018 	stats->ptc1023.ev_count = 0;
   2019 	stats->ptc1522.ev_count = 0;
   2020 }
   2021 
   2022 /************************************************************************
   2023  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
   2024  *
   2025  *   Retrieves the TDH value from the hardware
   2026  ************************************************************************/
   2027 static int
   2028 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   2029 {
   2030 	struct sysctlnode node = *rnode;
   2031 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2032 	uint32_t val;
   2033 
   2034 	if (!txr)
   2035 		return (0);
   2036 
   2037 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
   2038 	node.sysctl_data = &val;
   2039 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2040 } /* ixgbe_sysctl_tdh_handler */
   2041 
   2042 /************************************************************************
   2043  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   2044  *
   2045  *   Retrieves the TDT value from the hardware
   2046  ************************************************************************/
   2047 static int
   2048 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   2049 {
   2050 	struct sysctlnode node = *rnode;
   2051 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2052 	uint32_t val;
   2053 
   2054 	if (!txr)
   2055 		return (0);
   2056 
   2057 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
   2058 	node.sysctl_data = &val;
   2059 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2060 } /* ixgbe_sysctl_tdt_handler */
   2061 
   2062 /************************************************************************
   2063  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
   2064  *
   2065  *   Retrieves the RDH value from the hardware
   2066  ************************************************************************/
   2067 static int
   2068 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2069 {
   2070 	struct sysctlnode node = *rnode;
   2071 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2072 	uint32_t val;
   2073 
   2074 	if (!rxr)
   2075 		return (0);
   2076 
   2077 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
   2078 	node.sysctl_data = &val;
   2079 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2080 } /* ixgbe_sysctl_rdh_handler */
   2081 
   2082 /************************************************************************
   2083  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2084  *
   2085  *   Retrieves the RDT value from the hardware
   2086  ************************************************************************/
   2087 static int
   2088 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2089 {
   2090 	struct sysctlnode node = *rnode;
   2091 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2092 	uint32_t val;
   2093 
   2094 	if (!rxr)
   2095 		return (0);
   2096 
   2097 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
   2098 	node.sysctl_data = &val;
   2099 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2100 } /* ixgbe_sysctl_rdt_handler */
   2101 
   2102 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   2103 /************************************************************************
   2104  * ixgbe_register_vlan
   2105  *
   2106  *   Run via vlan config EVENT, it enables us to use the
   2107  *   HW Filter table since we can get the vlan id. This
   2108  *   just creates the entry in the soft version of the
   2109  *   VFTA, init will repopulate the real table.
   2110  ************************************************************************/
   2111 static void
   2112 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2113 {
   2114 	struct adapter	*adapter = ifp->if_softc;
   2115 	u16		index, bit;
   2116 
   2117 	if (ifp->if_softc != arg)   /* Not our event */
   2118 		return;
   2119 
   2120 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2121 		return;
   2122 
   2123 	IXGBE_CORE_LOCK(adapter);
   2124 	index = (vtag >> 5) & 0x7F;
   2125 	bit = vtag & 0x1F;
   2126 	adapter->shadow_vfta[index] |= (1 << bit);
   2127 	ixgbe_setup_vlan_hw_support(adapter);
   2128 	IXGBE_CORE_UNLOCK(adapter);
   2129 } /* ixgbe_register_vlan */
   2130 
   2131 /************************************************************************
   2132  * ixgbe_unregister_vlan
   2133  *
   2134  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
   2135  ************************************************************************/
   2136 static void
   2137 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2138 {
   2139 	struct adapter	*adapter = ifp->if_softc;
   2140 	u16		index, bit;
   2141 
   2142 	if (ifp->if_softc != arg)
   2143 		return;
   2144 
   2145 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2146 		return;
   2147 
   2148 	IXGBE_CORE_LOCK(adapter);
   2149 	index = (vtag >> 5) & 0x7F;
   2150 	bit = vtag & 0x1F;
   2151 	adapter->shadow_vfta[index] &= ~(1 << bit);
   2152 	/* Re-init to load the changes */
   2153 	ixgbe_setup_vlan_hw_support(adapter);
   2154 	IXGBE_CORE_UNLOCK(adapter);
   2155 } /* ixgbe_unregister_vlan */
   2156 #endif
   2157 
   2158 static void
   2159 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   2160 {
   2161 	struct ethercom *ec = &adapter->osdep.ec;
   2162 	struct ixgbe_hw *hw = &adapter->hw;
   2163 	struct rx_ring	*rxr;
   2164 	int             i;
   2165 	u32		ctrl;
   2166 
   2167 
   2168 	/*
   2169 	 * We get here thru init_locked, meaning
   2170 	 * a soft reset, this has already cleared
   2171 	 * the VFTA and other state, so if there
   2172 	 * have been no vlan's registered do nothing.
   2173 	 */
   2174 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   2175 		return;
   2176 
   2177 	/* Setup the queues for vlans */
   2178 	for (i = 0; i < adapter->num_queues; i++) {
   2179 		rxr = &adapter->rx_rings[i];
   2180 		/* On 82599 the VLAN enable is per/queue in RXDCTL */
   2181 		if (hw->mac.type != ixgbe_mac_82598EB) {
   2182 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   2183 			ctrl |= IXGBE_RXDCTL_VME;
   2184 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
   2185 		}
   2186 		rxr->vtag_strip = TRUE;
   2187 	}
   2188 
   2189 	if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
   2190 		return;
   2191 	/*
   2192 	 * A soft reset zero's out the VFTA, so
   2193 	 * we need to repopulate it now.
   2194 	 */
   2195 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2196 		if (adapter->shadow_vfta[i] != 0)
   2197 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
   2198 			    adapter->shadow_vfta[i]);
   2199 
   2200 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2201 	/* Enable the Filter Table if enabled */
   2202 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
   2203 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
   2204 		ctrl |= IXGBE_VLNCTRL_VFE;
   2205 	}
   2206 	if (hw->mac.type == ixgbe_mac_82598EB)
   2207 		ctrl |= IXGBE_VLNCTRL_VME;
   2208 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2209 } /* ixgbe_setup_vlan_hw_support */
   2210 
   2211 /************************************************************************
   2212  * ixgbe_get_slot_info
   2213  *
   2214  *   Get the width and transaction speed of
   2215  *   the slot this adapter is plugged into.
   2216  ************************************************************************/
   2217 static void
   2218 ixgbe_get_slot_info(struct adapter *adapter)
   2219 {
   2220 	device_t		dev = adapter->dev;
   2221 	struct ixgbe_hw		*hw = &adapter->hw;
   2222 	u32                   offset;
   2223 //	struct ixgbe_mac_info	*mac = &hw->mac;
   2224 	u16			link;
   2225 	int                   bus_info_valid = TRUE;
   2226 
   2227 	/* Some devices are behind an internal bridge */
   2228 	switch (hw->device_id) {
   2229 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
   2230 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
   2231 		goto get_parent_info;
   2232 	default:
   2233 		break;
   2234 	}
   2235 
   2236 	ixgbe_get_bus_info(hw);
   2237 
   2238 	/*
   2239 	 * Some devices don't use PCI-E, but there is no need
   2240 	 * to display "Unknown" for bus speed and width.
   2241 	 */
   2242 	switch (hw->mac.type) {
   2243 	case ixgbe_mac_X550EM_x:
   2244 	case ixgbe_mac_X550EM_a:
   2245 		return;
   2246 	default:
   2247 		goto display;
   2248 	}
   2249 
   2250 get_parent_info:
   2251 	/*
   2252 	 * For the Quad port adapter we need to parse back
   2253 	 * up the PCI tree to find the speed of the expansion
   2254 	 * slot into which this adapter is plugged. A bit more work.
   2255 	 */
   2256 	dev = device_parent(device_parent(dev));
   2257 #if 0
   2258 #ifdef IXGBE_DEBUG
   2259 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
   2260 	    pci_get_slot(dev), pci_get_function(dev));
   2261 #endif
   2262 	dev = device_parent(device_parent(dev));
   2263 #ifdef IXGBE_DEBUG
   2264 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
   2265 	    pci_get_slot(dev), pci_get_function(dev));
   2266 #endif
   2267 #endif
   2268 	/* Now get the PCI Express Capabilities offset */
   2269 	if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
   2270 	    PCI_CAP_PCIEXPRESS, &offset, NULL)) {
   2271 		/*
   2272 		 * Hmm...can't get PCI-Express capabilities.
   2273 		 * Falling back to default method.
   2274 		 */
   2275 		bus_info_valid = FALSE;
   2276 		ixgbe_get_bus_info(hw);
   2277 		goto display;
   2278 	}
   2279 	/* ...and read the Link Status Register */
   2280 	link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
   2281 	    offset + PCIE_LCSR);
   2282 	ixgbe_set_pci_config_data_generic(hw, link >> 16);
   2283 
   2284 display:
   2285 	device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
   2286 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s" :
   2287 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s" :
   2288 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s" :
   2289 	     "Unknown"),
   2290 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
   2291 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
   2292 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
   2293 	     "Unknown"));
   2294 
   2295 	if (bus_info_valid) {
   2296 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2297 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
   2298 			(hw->bus.speed == ixgbe_bus_speed_2500))) {
   2299 			device_printf(dev, "PCI-Express bandwidth available"
   2300 			    " for this card\n     is not sufficient for"
   2301 			    " optimal performance.\n");
   2302 			device_printf(dev, "For optimal performance a x8 "
   2303 			    "PCIE, or x4 PCIE Gen2 slot is required.\n");
   2304 		}
   2305 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2306 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
   2307 			(hw->bus.speed < ixgbe_bus_speed_8000))) {
   2308 			device_printf(dev, "PCI-Express bandwidth available"
   2309 			    " for this card\n     is not sufficient for"
   2310 			    " optimal performance.\n");
   2311 			device_printf(dev, "For optimal performance a x8 "
   2312 			    "PCIE Gen3 slot is required.\n");
   2313 		}
   2314 	} else
   2315 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
   2316 
   2317 	return;
   2318 } /* ixgbe_get_slot_info */
   2319 
   2320 /************************************************************************
   2321  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
   2322  ************************************************************************/
   2323 static inline void
   2324 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   2325 {
   2326 	struct ixgbe_hw *hw = &adapter->hw;
   2327 	u64             queue = (u64)(1ULL << vector);
   2328 	u32             mask;
   2329 
   2330 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2331 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2332 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   2333 	} else {
   2334 		mask = (queue & 0xFFFFFFFF);
   2335 		if (mask)
   2336 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   2337 		mask = (queue >> 32);
   2338 		if (mask)
   2339 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   2340 	}
   2341 } /* ixgbe_enable_queue */
   2342 
   2343 /************************************************************************
   2344  * ixgbe_disable_queue
   2345  ************************************************************************/
   2346 static inline void
   2347 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   2348 {
   2349 	struct ixgbe_hw *hw = &adapter->hw;
   2350 	u64             queue = (u64)(1ULL << vector);
   2351 	u32             mask;
   2352 
   2353 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2354 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2355 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   2356 	} else {
   2357 		mask = (queue & 0xFFFFFFFF);
   2358 		if (mask)
   2359 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   2360 		mask = (queue >> 32);
   2361 		if (mask)
   2362 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   2363 	}
   2364 } /* ixgbe_disable_queue */
   2365 
   2366 /************************************************************************
   2367  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
   2368  ************************************************************************/
   2369 static int
   2370 ixgbe_msix_que(void *arg)
   2371 {
   2372 	struct ix_queue	*que = arg;
   2373 	struct adapter  *adapter = que->adapter;
   2374 	struct ifnet    *ifp = adapter->ifp;
   2375 	struct tx_ring	*txr = que->txr;
   2376 	struct rx_ring	*rxr = que->rxr;
   2377 	bool		more;
   2378 	u32		newitr = 0;
   2379 
   2380 	/* Protect against spurious interrupts */
   2381 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   2382 		return 0;
   2383 
   2384 	ixgbe_disable_queue(adapter, que->msix);
   2385 	++que->irqs.ev_count;
   2386 
   2387 #ifdef __NetBSD__
   2388 	/* Don't run ixgbe_rxeof in interrupt context */
   2389 	more = true;
   2390 #else
   2391 	more = ixgbe_rxeof(que);
   2392 #endif
   2393 
   2394 	IXGBE_TX_LOCK(txr);
   2395 	ixgbe_txeof(txr);
   2396 	IXGBE_TX_UNLOCK(txr);
   2397 
   2398 	/* Do AIM now? */
   2399 
   2400 	if (adapter->enable_aim == false)
   2401 		goto no_calc;
   2402 	/*
   2403 	 * Do Adaptive Interrupt Moderation:
   2404 	 *  - Write out last calculated setting
   2405 	 *  - Calculate based on average size over
   2406 	 *    the last interval.
   2407 	 */
   2408 	if (que->eitr_setting)
   2409 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
   2410 		    que->eitr_setting);
   2411 
   2412 	que->eitr_setting = 0;
   2413 
   2414 	/* Idle, do nothing */
   2415         if ((txr->bytes == 0) && (rxr->bytes == 0))
   2416                 goto no_calc;
   2417 
   2418 	if ((txr->bytes) && (txr->packets))
   2419 		newitr = txr->bytes/txr->packets;
   2420 	if ((rxr->bytes) && (rxr->packets))
   2421 		newitr = max(newitr, (rxr->bytes / rxr->packets));
   2422 	newitr += 24; /* account for hardware frame, crc */
   2423 
   2424 	/* set an upper boundary */
   2425 	newitr = min(newitr, 3000);
   2426 
   2427 	/* Be nice to the mid range */
   2428 	if ((newitr > 300) && (newitr < 1200))
   2429 		newitr = (newitr / 3);
   2430 	else
   2431 		newitr = (newitr / 2);
   2432 
   2433         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   2434                 newitr |= newitr << 16;
   2435         else
   2436                 newitr |= IXGBE_EITR_CNT_WDIS;
   2437 
   2438         /* save for next interrupt */
   2439         que->eitr_setting = newitr;
   2440 
   2441 	/* Reset state */
   2442 	txr->bytes = 0;
   2443 	txr->packets = 0;
   2444 	rxr->bytes = 0;
   2445 	rxr->packets = 0;
   2446 
   2447 no_calc:
   2448 	if (more)
   2449 		softint_schedule(que->que_si);
   2450 	else
   2451 		ixgbe_enable_queue(adapter, que->msix);
   2452 
   2453 	return 1;
   2454 } /* ixgbe_msix_que */
   2455 
   2456 /************************************************************************
   2457  * ixgbe_media_status - Media Ioctl callback
   2458  *
   2459  *   Called whenever the user queries the status of
   2460  *   the interface using ifconfig.
   2461  ************************************************************************/
   2462 static void
   2463 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   2464 {
   2465 	struct adapter *adapter = ifp->if_softc;
   2466 	struct ixgbe_hw *hw = &adapter->hw;
   2467 	int layer;
   2468 
   2469 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   2470 	IXGBE_CORE_LOCK(adapter);
   2471 	ixgbe_update_link_status(adapter);
   2472 
   2473 	ifmr->ifm_status = IFM_AVALID;
   2474 	ifmr->ifm_active = IFM_ETHER;
   2475 
   2476 	if (!adapter->link_active) {
   2477 		ifmr->ifm_active |= IFM_NONE;
   2478 		IXGBE_CORE_UNLOCK(adapter);
   2479 		return;
   2480 	}
   2481 
   2482 	ifmr->ifm_status |= IFM_ACTIVE;
   2483 	layer = adapter->phy_layer;
   2484 
   2485 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
   2486 	    layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
   2487 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
   2488 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
   2489 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
   2490 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
   2491 		switch (adapter->link_speed) {
   2492 		case IXGBE_LINK_SPEED_10GB_FULL:
   2493 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   2494 			break;
   2495 		case IXGBE_LINK_SPEED_5GB_FULL:
   2496 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   2497 			break;
   2498 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2499 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   2500 			break;
   2501 		case IXGBE_LINK_SPEED_1GB_FULL:
   2502 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   2503 			break;
   2504 		case IXGBE_LINK_SPEED_100_FULL:
   2505 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   2506 			break;
   2507 		case IXGBE_LINK_SPEED_10_FULL:
   2508 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   2509 			break;
   2510 		}
   2511 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   2512 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
   2513 		switch (adapter->link_speed) {
   2514 		case IXGBE_LINK_SPEED_10GB_FULL:
   2515 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
   2516 			break;
   2517 		}
   2518 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
   2519 		switch (adapter->link_speed) {
   2520 		case IXGBE_LINK_SPEED_10GB_FULL:
   2521 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
   2522 			break;
   2523 		case IXGBE_LINK_SPEED_1GB_FULL:
   2524 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2525 			break;
   2526 		}
   2527 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
   2528 		switch (adapter->link_speed) {
   2529 		case IXGBE_LINK_SPEED_10GB_FULL:
   2530 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
   2531 			break;
   2532 		case IXGBE_LINK_SPEED_1GB_FULL:
   2533 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2534 			break;
   2535 		}
   2536 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
   2537 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
   2538 		switch (adapter->link_speed) {
   2539 		case IXGBE_LINK_SPEED_10GB_FULL:
   2540 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2541 			break;
   2542 		case IXGBE_LINK_SPEED_1GB_FULL:
   2543 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
   2544 			break;
   2545 		}
   2546 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
   2547 		switch (adapter->link_speed) {
   2548 		case IXGBE_LINK_SPEED_10GB_FULL:
   2549 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2550 			break;
   2551 		}
   2552 	/*
   2553 	 * XXX: These need to use the proper media types once
   2554 	 * they're added.
   2555 	 */
   2556 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
   2557 		switch (adapter->link_speed) {
   2558 		case IXGBE_LINK_SPEED_10GB_FULL:
   2559 #ifndef IFM_ETH_XTYPE
   2560 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2561 #else
   2562 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
   2563 #endif
   2564 			break;
   2565 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2566 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2567 			break;
   2568 		case IXGBE_LINK_SPEED_1GB_FULL:
   2569 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2570 			break;
   2571 		}
   2572 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
   2573 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
   2574 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
   2575 		switch (adapter->link_speed) {
   2576 		case IXGBE_LINK_SPEED_10GB_FULL:
   2577 #ifndef IFM_ETH_XTYPE
   2578 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2579 #else
   2580 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
   2581 #endif
   2582 			break;
   2583 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2584 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2585 			break;
   2586 		case IXGBE_LINK_SPEED_1GB_FULL:
   2587 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2588 			break;
   2589 		}
   2590 
   2591 	/* If nothing is recognized... */
   2592 #if 0
   2593 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
   2594 		ifmr->ifm_active |= IFM_UNKNOWN;
   2595 #endif
   2596 
   2597 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   2598 
   2599 	/* Display current flow control setting used on link */
   2600 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
   2601 	    hw->fc.current_mode == ixgbe_fc_full)
   2602 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
   2603 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
   2604 	    hw->fc.current_mode == ixgbe_fc_full)
   2605 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
   2606 
   2607 	IXGBE_CORE_UNLOCK(adapter);
   2608 
   2609 	return;
   2610 } /* ixgbe_media_status */
   2611 
   2612 /************************************************************************
   2613  * ixgbe_media_change - Media Ioctl callback
   2614  *
   2615  *   Called when the user changes speed/duplex using
   2616  *   media/mediopt option with ifconfig.
   2617  ************************************************************************/
   2618 static int
   2619 ixgbe_media_change(struct ifnet *ifp)
   2620 {
   2621 	struct adapter   *adapter = ifp->if_softc;
   2622 	struct ifmedia   *ifm = &adapter->media;
   2623 	struct ixgbe_hw  *hw = &adapter->hw;
   2624 	ixgbe_link_speed speed = 0;
   2625 	ixgbe_link_speed link_caps = 0;
   2626 	bool negotiate = false;
   2627 	s32 err = IXGBE_NOT_IMPLEMENTED;
   2628 
   2629 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   2630 
   2631 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   2632 		return (EINVAL);
   2633 
   2634 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   2635 		return (ENODEV);
   2636 
   2637 	/*
   2638 	 * We don't actually need to check against the supported
   2639 	 * media types of the adapter; ifmedia will take care of
   2640 	 * that for us.
   2641 	 */
   2642 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   2643 	case IFM_AUTO:
   2644 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   2645 		    &negotiate);
   2646 		if (err != IXGBE_SUCCESS) {
   2647 			device_printf(adapter->dev, "Unable to determine "
   2648 			    "supported advertise speeds\n");
   2649 			return (ENODEV);
   2650 		}
   2651 		speed |= link_caps;
   2652 		break;
   2653 	case IFM_10G_T:
   2654 	case IFM_10G_LRM:
   2655 	case IFM_10G_LR:
   2656 	case IFM_10G_TWINAX:
   2657 #ifndef IFM_ETH_XTYPE
   2658 	case IFM_10G_SR: /* KR, too */
   2659 	case IFM_10G_CX4: /* KX4 */
   2660 #else
   2661 	case IFM_10G_KR:
   2662 	case IFM_10G_KX4:
   2663 #endif
   2664 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   2665 		break;
   2666 	case IFM_5000_T:
   2667 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   2668 		break;
   2669 	case IFM_2500_T:
   2670 	case IFM_2500_KX:
   2671 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   2672 		break;
   2673 	case IFM_1000_T:
   2674 	case IFM_1000_LX:
   2675 	case IFM_1000_SX:
   2676 	case IFM_1000_KX:
   2677 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   2678 		break;
   2679 	case IFM_100_TX:
   2680 		speed |= IXGBE_LINK_SPEED_100_FULL;
   2681 		break;
   2682 	case IFM_10_T:
   2683 		speed |= IXGBE_LINK_SPEED_10_FULL;
   2684 		break;
   2685 	default:
   2686 		goto invalid;
   2687 	}
   2688 
   2689 	hw->mac.autotry_restart = TRUE;
   2690 	hw->mac.ops.setup_link(hw, speed, TRUE);
   2691 	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
   2692 		adapter->advertise = 0;
   2693 	} else {
   2694 		if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
   2695 			adapter->advertise |= 1 << 2;
   2696 		if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
   2697 			adapter->advertise |= 1 << 1;
   2698 		if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
   2699 			adapter->advertise |= 1 << 0;
   2700 		if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
   2701 			adapter->advertise |= 1 << 3;
   2702 		if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
   2703 			adapter->advertise |= 1 << 4;
   2704 		if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
   2705 			adapter->advertise |= 1 << 5;
   2706 	}
   2707 
   2708 	return (0);
   2709 
   2710 invalid:
   2711 	device_printf(adapter->dev, "Invalid media type!\n");
   2712 
   2713 	return (EINVAL);
   2714 } /* ixgbe_media_change */
   2715 
   2716 /************************************************************************
   2717  * ixgbe_set_promisc
   2718  ************************************************************************/
   2719 static void
   2720 ixgbe_set_promisc(struct adapter *adapter)
   2721 {
   2722 	struct ifnet *ifp = adapter->ifp;
   2723 	int          mcnt = 0;
   2724 	u32          rctl;
   2725 	struct ether_multi *enm;
   2726 	struct ether_multistep step;
   2727 	struct ethercom *ec = &adapter->osdep.ec;
   2728 
   2729 	KASSERT(mutex_owned(&adapter->core_mtx));
   2730 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   2731 	rctl &= (~IXGBE_FCTRL_UPE);
   2732 	if (ifp->if_flags & IFF_ALLMULTI)
   2733 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
   2734 	else {
   2735 		ETHER_LOCK(ec);
   2736 		ETHER_FIRST_MULTI(step, ec, enm);
   2737 		while (enm != NULL) {
   2738 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
   2739 				break;
   2740 			mcnt++;
   2741 			ETHER_NEXT_MULTI(step, enm);
   2742 		}
   2743 		ETHER_UNLOCK(ec);
   2744 	}
   2745 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
   2746 		rctl &= (~IXGBE_FCTRL_MPE);
   2747 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2748 
   2749 	if (ifp->if_flags & IFF_PROMISC) {
   2750 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   2751 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2752 	} else if (ifp->if_flags & IFF_ALLMULTI) {
   2753 		rctl |= IXGBE_FCTRL_MPE;
   2754 		rctl &= ~IXGBE_FCTRL_UPE;
   2755 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2756 	}
   2757 } /* ixgbe_set_promisc */
   2758 
   2759 /************************************************************************
   2760  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
   2761  ************************************************************************/
   2762 static int
   2763 ixgbe_msix_link(void *arg)
   2764 {
   2765 	struct adapter	*adapter = arg;
   2766 	struct ixgbe_hw *hw = &adapter->hw;
   2767 	u32		eicr, eicr_mask;
   2768 	s32             retval;
   2769 
   2770 	++adapter->link_irq.ev_count;
   2771 
   2772 	/* Pause other interrupts */
   2773 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
   2774 
   2775 	/* First get the cause */
   2776 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   2777 	/* Be sure the queue bits are not cleared */
   2778 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
   2779 	/* Clear interrupt with write */
   2780 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
   2781 
   2782 	/* Link status change */
   2783 	if (eicr & IXGBE_EICR_LSC) {
   2784 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
   2785 		softint_schedule(adapter->link_si);
   2786 	}
   2787 
   2788 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   2789 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
   2790 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
   2791 			/* This is probably overkill :) */
   2792 			if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
   2793 				return 1;
   2794 			/* Disable the interrupt */
   2795 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
   2796 			softint_schedule(adapter->fdir_si);
   2797 		}
   2798 
   2799 		if (eicr & IXGBE_EICR_ECC) {
   2800 			device_printf(adapter->dev,
   2801 			    "CRITICAL: ECC ERROR!! Please Reboot!!\n");
   2802 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   2803 		}
   2804 
   2805 		/* Check for over temp condition */
   2806 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
   2807 			switch (adapter->hw.mac.type) {
   2808 			case ixgbe_mac_X550EM_a:
   2809 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
   2810 					break;
   2811 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
   2812 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2813 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2814 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2815 				retval = hw->phy.ops.check_overtemp(hw);
   2816 				if (retval != IXGBE_ERR_OVERTEMP)
   2817 					break;
   2818 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2819 				device_printf(adapter->dev, "System shutdown required!\n");
   2820 				break;
   2821 			default:
   2822 				if (!(eicr & IXGBE_EICR_TS))
   2823 					break;
   2824 				retval = hw->phy.ops.check_overtemp(hw);
   2825 				if (retval != IXGBE_ERR_OVERTEMP)
   2826 					break;
   2827 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2828 				device_printf(adapter->dev, "System shutdown required!\n");
   2829 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
   2830 				break;
   2831 			}
   2832 		}
   2833 
   2834 		/* Check for VF message */
   2835 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
   2836 		    (eicr & IXGBE_EICR_MAILBOX))
   2837 			softint_schedule(adapter->mbx_si);
   2838 	}
   2839 
   2840 	if (ixgbe_is_sfp(hw)) {
   2841 		/* Pluggable optics-related interrupt */
   2842 		if (hw->mac.type >= ixgbe_mac_X540)
   2843 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   2844 		else
   2845 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   2846 
   2847 		if (eicr & eicr_mask) {
   2848 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   2849 			softint_schedule(adapter->mod_si);
   2850 		}
   2851 
   2852 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   2853 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   2854 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2855 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2856 			softint_schedule(adapter->msf_si);
   2857 		}
   2858 	}
   2859 
   2860 	/* Check for fan failure */
   2861 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   2862 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
   2863 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2864 	}
   2865 
   2866 	/* External PHY interrupt */
   2867 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   2868 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
   2869 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
   2870 		softint_schedule(adapter->phy_si);
   2871  	}
   2872 
   2873 	/* Re-enable other interrupts */
   2874 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   2875 	return 1;
   2876 } /* ixgbe_msix_link */
   2877 
   2878 /************************************************************************
   2879  * ixgbe_sysctl_interrupt_rate_handler
   2880  ************************************************************************/
   2881 static int
   2882 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   2883 {
   2884 	struct sysctlnode node = *rnode;
   2885 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   2886 	uint32_t reg, usec, rate;
   2887 	int error;
   2888 
   2889 	if (que == NULL)
   2890 		return 0;
   2891 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
   2892 	usec = ((reg & 0x0FF8) >> 3);
   2893 	if (usec > 0)
   2894 		rate = 500000 / usec;
   2895 	else
   2896 		rate = 0;
   2897 	node.sysctl_data = &rate;
   2898 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2899 	if (error || newp == NULL)
   2900 		return error;
   2901 	reg &= ~0xfff; /* default, no limitation */
   2902 	ixgbe_max_interrupt_rate = 0;
   2903 	if (rate > 0 && rate < 500000) {
   2904 		if (rate < 1000)
   2905 			rate = 1000;
   2906 		ixgbe_max_interrupt_rate = rate;
   2907 		reg |= ((4000000/rate) & 0xff8);
   2908 	}
   2909 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
   2910 
   2911 	return (0);
   2912 } /* ixgbe_sysctl_interrupt_rate_handler */
   2913 
   2914 const struct sysctlnode *
   2915 ixgbe_sysctl_instance(struct adapter *adapter)
   2916 {
   2917 	const char *dvname;
   2918 	struct sysctllog **log;
   2919 	int rc;
   2920 	const struct sysctlnode *rnode;
   2921 
   2922 	if (adapter->sysctltop != NULL)
   2923 		return adapter->sysctltop;
   2924 
   2925 	log = &adapter->sysctllog;
   2926 	dvname = device_xname(adapter->dev);
   2927 
   2928 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2929 	    0, CTLTYPE_NODE, dvname,
   2930 	    SYSCTL_DESCR("ixgbe information and settings"),
   2931 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2932 		goto err;
   2933 
   2934 	return rnode;
   2935 err:
   2936 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2937 	return NULL;
   2938 }
   2939 
   2940 /************************************************************************
   2941  * ixgbe_add_device_sysctls
   2942  ************************************************************************/
   2943 static void
   2944 ixgbe_add_device_sysctls(struct adapter *adapter)
   2945 {
   2946 	device_t               dev = adapter->dev;
   2947 	struct ixgbe_hw        *hw = &adapter->hw;
   2948 	struct sysctllog **log;
   2949 	const struct sysctlnode *rnode, *cnode;
   2950 
   2951 	log = &adapter->sysctllog;
   2952 
   2953 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   2954 		aprint_error_dev(dev, "could not create sysctl root\n");
   2955 		return;
   2956 	}
   2957 
   2958 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2959 	    CTLFLAG_READONLY, CTLTYPE_INT,
   2960 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
   2961 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   2962 		aprint_error_dev(dev, "could not create sysctl\n");
   2963 
   2964 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2965 	    CTLFLAG_READONLY, CTLTYPE_INT,
   2966 	    "num_queues", SYSCTL_DESCR("Number of queues"),
   2967 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
   2968 		aprint_error_dev(dev, "could not create sysctl\n");
   2969 
   2970 	/* Sysctls for all devices */
   2971 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   2972 	    CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
   2973 	    ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
   2974 	    CTL_EOL) != 0)
   2975 		aprint_error_dev(dev, "could not create sysctl\n");
   2976 
   2977 	adapter->enable_aim = ixgbe_enable_aim;
   2978 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   2979 	    CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2980 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2981 		aprint_error_dev(dev, "could not create sysctl\n");
   2982 
   2983 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2984 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2985 	    "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
   2986 	    ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
   2987 	    CTL_EOL) != 0)
   2988 		aprint_error_dev(dev, "could not create sysctl\n");
   2989 
   2990 #ifdef IXGBE_DEBUG
   2991 	/* testing sysctls (for all devices) */
   2992 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   2993 	    CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
   2994 	    ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
   2995 	    CTL_EOL) != 0)
   2996 		aprint_error_dev(dev, "could not create sysctl\n");
   2997 
   2998 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
   2999 	    CTLTYPE_STRING, "print_rss_config",
   3000 	    SYSCTL_DESCR("Prints RSS Configuration"),
   3001 	    ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
   3002 	    CTL_EOL) != 0)
   3003 		aprint_error_dev(dev, "could not create sysctl\n");
   3004 #endif
   3005 	/* for X550 series devices */
   3006 	if (hw->mac.type >= ixgbe_mac_X550)
   3007 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3008 		    CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
   3009 		    ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
   3010 		    CTL_EOL) != 0)
   3011 			aprint_error_dev(dev, "could not create sysctl\n");
   3012 
   3013 	/* for WoL-capable devices */
   3014 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3015 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3016 		    CTLTYPE_BOOL, "wol_enable",
   3017 		    SYSCTL_DESCR("Enable/Disable Wake on LAN"),
   3018 		    ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
   3019 		    CTL_EOL) != 0)
   3020 			aprint_error_dev(dev, "could not create sysctl\n");
   3021 
   3022 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3023 		    CTLTYPE_INT, "wufc",
   3024 		    SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
   3025 		    ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
   3026 		    CTL_EOL) != 0)
   3027 			aprint_error_dev(dev, "could not create sysctl\n");
   3028 	}
   3029 
   3030 	/* for X552/X557-AT devices */
   3031 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3032 		const struct sysctlnode *phy_node;
   3033 
   3034 		if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
   3035 		    "phy", SYSCTL_DESCR("External PHY sysctls"),
   3036 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
   3037 			aprint_error_dev(dev, "could not create sysctl\n");
   3038 			return;
   3039 		}
   3040 
   3041 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3042 		    CTLTYPE_INT, "temp",
   3043 		    SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
   3044 		    ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
   3045 		    CTL_EOL) != 0)
   3046 			aprint_error_dev(dev, "could not create sysctl\n");
   3047 
   3048 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3049 		    CTLTYPE_INT, "overtemp_occurred",
   3050 		    SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
   3051 		    ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
   3052 		    CTL_CREATE, CTL_EOL) != 0)
   3053 			aprint_error_dev(dev, "could not create sysctl\n");
   3054 	}
   3055 
   3056 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
   3057 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3058 		    CTLTYPE_INT, "eee_state",
   3059 		    SYSCTL_DESCR("EEE Power Save State"),
   3060 		    ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
   3061 		    CTL_EOL) != 0)
   3062 			aprint_error_dev(dev, "could not create sysctl\n");
   3063 	}
   3064 } /* ixgbe_add_device_sysctls */
   3065 
   3066 /************************************************************************
   3067  * ixgbe_allocate_pci_resources
   3068  ************************************************************************/
   3069 static int
   3070 ixgbe_allocate_pci_resources(struct adapter *adapter,
   3071     const struct pci_attach_args *pa)
   3072 {
   3073 	pcireg_t	memtype;
   3074 	device_t dev = adapter->dev;
   3075 	bus_addr_t addr;
   3076 	int flags;
   3077 
   3078 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   3079 	switch (memtype) {
   3080 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   3081 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   3082 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   3083 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   3084 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   3085 			goto map_err;
   3086 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   3087 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   3088 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3089 		}
   3090 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   3091 		     adapter->osdep.mem_size, flags,
   3092 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   3093 map_err:
   3094 			adapter->osdep.mem_size = 0;
   3095 			aprint_error_dev(dev, "unable to map BAR0\n");
   3096 			return ENXIO;
   3097 		}
   3098 		break;
   3099 	default:
   3100 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   3101 		return ENXIO;
   3102 	}
   3103 
   3104 	return (0);
   3105 } /* ixgbe_allocate_pci_resources */
   3106 
   3107 /************************************************************************
   3108  * ixgbe_detach - Device removal routine
   3109  *
   3110  *   Called when the driver is being removed.
   3111  *   Stops the adapter and deallocates all the resources
   3112  *   that were allocated for driver operation.
   3113  *
   3114  *   return 0 on success, positive on failure
   3115  ************************************************************************/
   3116 static int
   3117 ixgbe_detach(device_t dev, int flags)
   3118 {
   3119 	struct adapter *adapter = device_private(dev);
   3120 	struct ix_queue *que = adapter->queues;
   3121 	struct rx_ring *rxr = adapter->rx_rings;
   3122 	struct tx_ring *txr = adapter->tx_rings;
   3123 	struct ixgbe_hw *hw = &adapter->hw;
   3124 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   3125 	u32	ctrl_ext;
   3126 
   3127 	INIT_DEBUGOUT("ixgbe_detach: begin");
   3128 	if (adapter->osdep.attached == false)
   3129 		return 0;
   3130 
   3131 	if (ixgbe_pci_iov_detach(dev) != 0) {
   3132 		device_printf(dev, "SR-IOV in use; detach first.\n");
   3133 		return (EBUSY);
   3134 	}
   3135 
   3136 	/* Stop the interface. Callouts are stopped in it. */
   3137 	ixgbe_ifstop(adapter->ifp, 1);
   3138 #if NVLAN > 0
   3139 	/* Make sure VLANs are not using driver */
   3140 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   3141 		;	/* nothing to do: no VLANs */
   3142 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
   3143 		vlan_ifdetach(adapter->ifp);
   3144 	else {
   3145 		aprint_error_dev(dev, "VLANs in use, detach first\n");
   3146 		return (EBUSY);
   3147 	}
   3148 #endif
   3149 
   3150 	pmf_device_deregister(dev);
   3151 
   3152 	ether_ifdetach(adapter->ifp);
   3153 	/* Stop the adapter */
   3154 	IXGBE_CORE_LOCK(adapter);
   3155 	ixgbe_setup_low_power_mode(adapter);
   3156 	IXGBE_CORE_UNLOCK(adapter);
   3157 
   3158 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
   3159 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   3160 			softint_disestablish(txr->txr_si);
   3161 		softint_disestablish(que->que_si);
   3162 	}
   3163 
   3164 	/* Drain the Link queue */
   3165 	softint_disestablish(adapter->link_si);
   3166 	softint_disestablish(adapter->mod_si);
   3167 	softint_disestablish(adapter->msf_si);
   3168 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   3169 		softint_disestablish(adapter->mbx_si);
   3170 	softint_disestablish(adapter->phy_si);
   3171 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   3172 		softint_disestablish(adapter->fdir_si);
   3173 
   3174 	/* let hardware know driver is unloading */
   3175 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   3176 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   3177 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   3178 
   3179 	callout_halt(&adapter->timer, NULL);
   3180 
   3181 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   3182 		netmap_detach(adapter->ifp);
   3183 
   3184 	ixgbe_free_pci_resources(adapter);
   3185 #if 0	/* XXX the NetBSD port is probably missing something here */
   3186 	bus_generic_detach(dev);
   3187 #endif
   3188 	if_detach(adapter->ifp);
   3189 	if_percpuq_destroy(adapter->ipq);
   3190 
   3191 	sysctl_teardown(&adapter->sysctllog);
   3192 	evcnt_detach(&adapter->handleq);
   3193 	evcnt_detach(&adapter->req);
   3194 	evcnt_detach(&adapter->efbig_tx_dma_setup);
   3195 	evcnt_detach(&adapter->mbuf_defrag_failed);
   3196 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
   3197 	evcnt_detach(&adapter->einval_tx_dma_setup);
   3198 	evcnt_detach(&adapter->other_tx_dma_setup);
   3199 	evcnt_detach(&adapter->eagain_tx_dma_setup);
   3200 	evcnt_detach(&adapter->enomem_tx_dma_setup);
   3201 	evcnt_detach(&adapter->watchdog_events);
   3202 	evcnt_detach(&adapter->tso_err);
   3203 	evcnt_detach(&adapter->link_irq);
   3204 
   3205 	txr = adapter->tx_rings;
   3206 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   3207 		evcnt_detach(&adapter->queues[i].irqs);
   3208 		evcnt_detach(&txr->no_desc_avail);
   3209 		evcnt_detach(&txr->total_packets);
   3210 		evcnt_detach(&txr->tso_tx);
   3211 #ifndef IXGBE_LEGACY_TX
   3212 		evcnt_detach(&txr->pcq_drops);
   3213 #endif
   3214 
   3215 		if (i < __arraycount(stats->mpc)) {
   3216 			evcnt_detach(&stats->mpc[i]);
   3217 			if (hw->mac.type == ixgbe_mac_82598EB)
   3218 				evcnt_detach(&stats->rnbc[i]);
   3219 		}
   3220 		if (i < __arraycount(stats->pxontxc)) {
   3221 			evcnt_detach(&stats->pxontxc[i]);
   3222 			evcnt_detach(&stats->pxonrxc[i]);
   3223 			evcnt_detach(&stats->pxofftxc[i]);
   3224 			evcnt_detach(&stats->pxoffrxc[i]);
   3225 			evcnt_detach(&stats->pxon2offc[i]);
   3226 		}
   3227 		if (i < __arraycount(stats->qprc)) {
   3228 			evcnt_detach(&stats->qprc[i]);
   3229 			evcnt_detach(&stats->qptc[i]);
   3230 			evcnt_detach(&stats->qbrc[i]);
   3231 			evcnt_detach(&stats->qbtc[i]);
   3232 			evcnt_detach(&stats->qprdc[i]);
   3233 		}
   3234 
   3235 		evcnt_detach(&rxr->rx_packets);
   3236 		evcnt_detach(&rxr->rx_bytes);
   3237 		evcnt_detach(&rxr->rx_copies);
   3238 		evcnt_detach(&rxr->no_jmbuf);
   3239 		evcnt_detach(&rxr->rx_discarded);
   3240 	}
   3241 	evcnt_detach(&stats->ipcs);
   3242 	evcnt_detach(&stats->l4cs);
   3243 	evcnt_detach(&stats->ipcs_bad);
   3244 	evcnt_detach(&stats->l4cs_bad);
   3245 	evcnt_detach(&stats->intzero);
   3246 	evcnt_detach(&stats->legint);
   3247 	evcnt_detach(&stats->crcerrs);
   3248 	evcnt_detach(&stats->illerrc);
   3249 	evcnt_detach(&stats->errbc);
   3250 	evcnt_detach(&stats->mspdc);
   3251 	if (hw->mac.type >= ixgbe_mac_X550)
   3252 		evcnt_detach(&stats->mbsdc);
   3253 	evcnt_detach(&stats->mpctotal);
   3254 	evcnt_detach(&stats->mlfc);
   3255 	evcnt_detach(&stats->mrfc);
   3256 	evcnt_detach(&stats->rlec);
   3257 	evcnt_detach(&stats->lxontxc);
   3258 	evcnt_detach(&stats->lxonrxc);
   3259 	evcnt_detach(&stats->lxofftxc);
   3260 	evcnt_detach(&stats->lxoffrxc);
   3261 
   3262 	/* Packet Reception Stats */
   3263 	evcnt_detach(&stats->tor);
   3264 	evcnt_detach(&stats->gorc);
   3265 	evcnt_detach(&stats->tpr);
   3266 	evcnt_detach(&stats->gprc);
   3267 	evcnt_detach(&stats->mprc);
   3268 	evcnt_detach(&stats->bprc);
   3269 	evcnt_detach(&stats->prc64);
   3270 	evcnt_detach(&stats->prc127);
   3271 	evcnt_detach(&stats->prc255);
   3272 	evcnt_detach(&stats->prc511);
   3273 	evcnt_detach(&stats->prc1023);
   3274 	evcnt_detach(&stats->prc1522);
   3275 	evcnt_detach(&stats->ruc);
   3276 	evcnt_detach(&stats->rfc);
   3277 	evcnt_detach(&stats->roc);
   3278 	evcnt_detach(&stats->rjc);
   3279 	evcnt_detach(&stats->mngprc);
   3280 	evcnt_detach(&stats->mngpdc);
   3281 	evcnt_detach(&stats->xec);
   3282 
   3283 	/* Packet Transmission Stats */
   3284 	evcnt_detach(&stats->gotc);
   3285 	evcnt_detach(&stats->tpt);
   3286 	evcnt_detach(&stats->gptc);
   3287 	evcnt_detach(&stats->bptc);
   3288 	evcnt_detach(&stats->mptc);
   3289 	evcnt_detach(&stats->mngptc);
   3290 	evcnt_detach(&stats->ptc64);
   3291 	evcnt_detach(&stats->ptc127);
   3292 	evcnt_detach(&stats->ptc255);
   3293 	evcnt_detach(&stats->ptc511);
   3294 	evcnt_detach(&stats->ptc1023);
   3295 	evcnt_detach(&stats->ptc1522);
   3296 
   3297 	ixgbe_free_transmit_structures(adapter);
   3298 	ixgbe_free_receive_structures(adapter);
   3299 	free(adapter->queues, M_DEVBUF);
   3300 	free(adapter->mta, M_DEVBUF);
   3301 
   3302 	IXGBE_CORE_LOCK_DESTROY(adapter);
   3303 
   3304 	return (0);
   3305 } /* ixgbe_detach */
   3306 
   3307 /************************************************************************
   3308  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
   3309  *
   3310  *   Prepare the adapter/port for LPLU and/or WoL
   3311  ************************************************************************/
   3312 static int
   3313 ixgbe_setup_low_power_mode(struct adapter *adapter)
   3314 {
   3315 	struct ixgbe_hw *hw = &adapter->hw;
   3316 	device_t        dev = adapter->dev;
   3317 	s32             error = 0;
   3318 
   3319 	KASSERT(mutex_owned(&adapter->core_mtx));
   3320 
   3321 	/* Limit power management flow to X550EM baseT */
   3322 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
   3323 	    hw->phy.ops.enter_lplu) {
   3324 		/* X550EM baseT adapters need a special LPLU flow */
   3325 		hw->phy.reset_disable = true;
   3326 		ixgbe_stop(adapter);
   3327 		error = hw->phy.ops.enter_lplu(hw);
   3328 		if (error)
   3329 			device_printf(dev,
   3330 			    "Error entering LPLU: %d\n", error);
   3331 		hw->phy.reset_disable = false;
   3332 	} else {
   3333 		/* Just stop for other adapters */
   3334 		ixgbe_stop(adapter);
   3335 	}
   3336 
   3337 	if (!hw->wol_enabled) {
   3338 		ixgbe_set_phy_power(hw, FALSE);
   3339 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3340 		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
   3341 	} else {
   3342 		/* Turn off support for APM wakeup. (Using ACPI instead) */
   3343 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
   3344 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
   3345 
   3346 		/*
   3347 		 * Clear Wake Up Status register to prevent any previous wakeup
   3348 		 * events from waking us up immediately after we suspend.
   3349 		 */
   3350 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3351 
   3352 		/*
   3353 		 * Program the Wakeup Filter Control register with user filter
   3354 		 * settings
   3355 		 */
   3356 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
   3357 
   3358 		/* Enable wakeups and power management in Wakeup Control */
   3359 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
   3360 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
   3361 
   3362 	}
   3363 
   3364 	return error;
   3365 } /* ixgbe_setup_low_power_mode */
   3366 
   3367 /************************************************************************
   3368  * ixgbe_shutdown - Shutdown entry point
   3369  ************************************************************************/
   3370 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   3371 static int
   3372 ixgbe_shutdown(device_t dev)
   3373 {
   3374 	struct adapter *adapter = device_private(dev);
   3375 	int error = 0;
   3376 
   3377 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
   3378 
   3379 	IXGBE_CORE_LOCK(adapter);
   3380 	error = ixgbe_setup_low_power_mode(adapter);
   3381 	IXGBE_CORE_UNLOCK(adapter);
   3382 
   3383 	return (error);
   3384 } /* ixgbe_shutdown */
   3385 #endif
   3386 
   3387 /************************************************************************
   3388  * ixgbe_suspend
   3389  *
   3390  *   From D0 to D3
   3391  ************************************************************************/
   3392 static bool
   3393 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
   3394 {
   3395 	struct adapter *adapter = device_private(dev);
   3396 	int            error = 0;
   3397 
   3398 	INIT_DEBUGOUT("ixgbe_suspend: begin");
   3399 
   3400 	IXGBE_CORE_LOCK(adapter);
   3401 
   3402 	error = ixgbe_setup_low_power_mode(adapter);
   3403 
   3404 	IXGBE_CORE_UNLOCK(adapter);
   3405 
   3406 	return (error);
   3407 } /* ixgbe_suspend */
   3408 
   3409 /************************************************************************
   3410  * ixgbe_resume
   3411  *
   3412  *   From D3 to D0
   3413  ************************************************************************/
   3414 static bool
   3415 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
   3416 {
   3417 	struct adapter  *adapter = device_private(dev);
   3418 	struct ifnet    *ifp = adapter->ifp;
   3419 	struct ixgbe_hw *hw = &adapter->hw;
   3420 	u32             wus;
   3421 
   3422 	INIT_DEBUGOUT("ixgbe_resume: begin");
   3423 
   3424 	IXGBE_CORE_LOCK(adapter);
   3425 
   3426 	/* Read & clear WUS register */
   3427 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
   3428 	if (wus)
   3429 		device_printf(dev, "Woken up by (WUS): %#010x\n",
   3430 		    IXGBE_READ_REG(hw, IXGBE_WUS));
   3431 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3432 	/* And clear WUFC until next low-power transition */
   3433 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3434 
   3435 	/*
   3436 	 * Required after D3->D0 transition;
   3437 	 * will re-advertise all previous advertised speeds
   3438 	 */
   3439 	if (ifp->if_flags & IFF_UP)
   3440 		ixgbe_init_locked(adapter);
   3441 
   3442 	IXGBE_CORE_UNLOCK(adapter);
   3443 
   3444 	return true;
   3445 } /* ixgbe_resume */
   3446 
   3447 /*
   3448  * Set the various hardware offload abilities.
   3449  *
   3450  * This takes the ifnet's if_capenable flags (e.g. set by the user using
   3451  * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
   3452  * mbuf offload flags the driver will understand.
   3453  */
   3454 static void
   3455 ixgbe_set_if_hwassist(struct adapter *adapter)
   3456 {
   3457 	/* XXX */
   3458 }
   3459 
   3460 /************************************************************************
   3461  * ixgbe_init_locked - Init entry point
   3462  *
   3463  *   Used in two ways: It is used by the stack as an init
   3464  *   entry point in network interface structure. It is also
   3465  *   used by the driver as a hw/sw initialization routine to
   3466  *   get to a consistent state.
   3467  *
   3468  *   return 0 on success, positive on failure
   3469  ************************************************************************/
   3470 static void
   3471 ixgbe_init_locked(struct adapter *adapter)
   3472 {
   3473 	struct ifnet   *ifp = adapter->ifp;
   3474 	device_t 	dev = adapter->dev;
   3475 	struct ixgbe_hw *hw = &adapter->hw;
   3476 	struct tx_ring  *txr;
   3477 	struct rx_ring  *rxr;
   3478 	u32		txdctl, mhadd;
   3479 	u32		rxdctl, rxctrl;
   3480 	u32             ctrl_ext;
   3481 	int             err = 0;
   3482 
   3483 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   3484 
   3485 	KASSERT(mutex_owned(&adapter->core_mtx));
   3486 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
   3487 
   3488 	hw->adapter_stopped = FALSE;
   3489 	ixgbe_stop_adapter(hw);
   3490         callout_stop(&adapter->timer);
   3491 
   3492 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   3493 	adapter->max_frame_size =
   3494 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   3495 
   3496 	/* Queue indices may change with IOV mode */
   3497 	ixgbe_align_all_queue_indices(adapter);
   3498 
   3499 	/* reprogram the RAR[0] in case user changed it. */
   3500 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
   3501 
   3502 	/* Get the latest mac address, User can use a LAA */
   3503 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
   3504 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   3505 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
   3506 	hw->addr_ctrl.rar_used_count = 1;
   3507 
   3508 	/* Set hardware offload abilities from ifnet flags */
   3509 	ixgbe_set_if_hwassist(adapter);
   3510 
   3511 	/* Prepare transmit descriptors and buffers */
   3512 	if (ixgbe_setup_transmit_structures(adapter)) {
   3513 		device_printf(dev, "Could not setup transmit structures\n");
   3514 		ixgbe_stop(adapter);
   3515 		return;
   3516 	}
   3517 
   3518 	ixgbe_init_hw(hw);
   3519 	ixgbe_initialize_iov(adapter);
   3520 	ixgbe_initialize_transmit_units(adapter);
   3521 
   3522 	/* Setup Multicast table */
   3523 	ixgbe_set_multi(adapter);
   3524 
   3525 	/* Determine the correct mbuf pool, based on frame size */
   3526 	if (adapter->max_frame_size <= MCLBYTES)
   3527 		adapter->rx_mbuf_sz = MCLBYTES;
   3528 	else
   3529 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   3530 
   3531 	/* Prepare receive descriptors and buffers */
   3532 	if (ixgbe_setup_receive_structures(adapter)) {
   3533 		device_printf(dev, "Could not setup receive structures\n");
   3534 		ixgbe_stop(adapter);
   3535 		return;
   3536 	}
   3537 
   3538 	/* Configure RX settings */
   3539 	ixgbe_initialize_receive_units(adapter);
   3540 
   3541 	/* Enable SDP & MSI-X interrupts based on adapter */
   3542 	ixgbe_config_gpie(adapter);
   3543 
   3544 	/* Set MTU size */
   3545 	if (ifp->if_mtu > ETHERMTU) {
   3546 		/* aka IXGBE_MAXFRS on 82599 and newer */
   3547 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   3548 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   3549 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   3550 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   3551 	}
   3552 
   3553 	/* Now enable all the queues */
   3554 	for (int i = 0; i < adapter->num_queues; i++) {
   3555 		txr = &adapter->tx_rings[i];
   3556 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
   3557 		txdctl |= IXGBE_TXDCTL_ENABLE;
   3558 		/* Set WTHRESH to 8, burst writeback */
   3559 		txdctl |= (8 << 16);
   3560 		/*
   3561 		 * When the internal queue falls below PTHRESH (32),
   3562 		 * start prefetching as long as there are at least
   3563 		 * HTHRESH (1) buffers ready. The values are taken
   3564 		 * from the Intel linux driver 3.8.21.
   3565 		 * Prefetching enables tx line rate even with 1 queue.
   3566 		 */
   3567 		txdctl |= (32 << 0) | (1 << 8);
   3568 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
   3569 	}
   3570 
   3571 	for (int i = 0, j = 0; i < adapter->num_queues; i++) {
   3572 		rxr = &adapter->rx_rings[i];
   3573 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   3574 		if (hw->mac.type == ixgbe_mac_82598EB) {
   3575 			/*
   3576 			 * PTHRESH = 21
   3577 			 * HTHRESH = 4
   3578 			 * WTHRESH = 8
   3579 			 */
   3580 			rxdctl &= ~0x3FFFFF;
   3581 			rxdctl |= 0x080420;
   3582 		}
   3583 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   3584 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
   3585 		for (; j < 10; j++) {
   3586 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
   3587 			    IXGBE_RXDCTL_ENABLE)
   3588 				break;
   3589 			else
   3590 				msec_delay(1);
   3591 		}
   3592 		wmb();
   3593 
   3594 		/*
   3595 		 * In netmap mode, we must preserve the buffers made
   3596 		 * available to userspace before the if_init()
   3597 		 * (this is true by default on the TX side, because
   3598 		 * init makes all buffers available to userspace).
   3599 		 *
   3600 		 * netmap_reset() and the device specific routines
   3601 		 * (e.g. ixgbe_setup_receive_rings()) map these
   3602 		 * buffers at the end of the NIC ring, so here we
   3603 		 * must set the RDT (tail) register to make sure
   3604 		 * they are not overwritten.
   3605 		 *
   3606 		 * In this driver the NIC ring starts at RDH = 0,
   3607 		 * RDT points to the last slot available for reception (?),
   3608 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   3609 		 */
   3610 #ifdef DEV_NETMAP
   3611 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   3612 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   3613 			struct netmap_adapter *na = NA(adapter->ifp);
   3614 			struct netmap_kring *kring = &na->rx_rings[i];
   3615 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   3616 
   3617 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
   3618 		} else
   3619 #endif /* DEV_NETMAP */
   3620 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
   3621 			    adapter->num_rx_desc - 1);
   3622 	}
   3623 
   3624 	/* Enable Receive engine */
   3625 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   3626 	if (hw->mac.type == ixgbe_mac_82598EB)
   3627 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   3628 	rxctrl |= IXGBE_RXCTRL_RXEN;
   3629 	ixgbe_enable_rx_dma(hw, rxctrl);
   3630 
   3631 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   3632 
   3633 	/* Set up MSI-X routing */
   3634 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3635 		ixgbe_configure_ivars(adapter);
   3636 		/* Set up auto-mask */
   3637 		if (hw->mac.type == ixgbe_mac_82598EB)
   3638 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3639 		else {
   3640 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   3641 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   3642 		}
   3643 	} else {  /* Simple settings for Legacy/MSI */
   3644 		ixgbe_set_ivar(adapter, 0, 0, 0);
   3645 		ixgbe_set_ivar(adapter, 0, 0, 1);
   3646 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3647 	}
   3648 
   3649 	ixgbe_init_fdir(adapter);
   3650 
   3651 	/*
   3652 	 * Check on any SFP devices that
   3653 	 * need to be kick-started
   3654 	 */
   3655 	if (hw->phy.type == ixgbe_phy_none) {
   3656 		err = hw->phy.ops.identify(hw);
   3657 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   3658                 	device_printf(dev,
   3659 			    "Unsupported SFP+ module type was detected.\n");
   3660 			return;
   3661         	}
   3662 	}
   3663 
   3664 	/* Set moderation on the Link interrupt */
   3665 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
   3666 
   3667 	/* Config/Enable Link */
   3668 	ixgbe_config_link(adapter);
   3669 
   3670 	/* Hardware Packet Buffer & Flow Control setup */
   3671 	ixgbe_config_delay_values(adapter);
   3672 
   3673 	/* Initialize the FC settings */
   3674 	ixgbe_start_hw(hw);
   3675 
   3676 	/* Set up VLAN support and filter */
   3677 	ixgbe_setup_vlan_hw_support(adapter);
   3678 
   3679 	/* Setup DMA Coalescing */
   3680 	ixgbe_config_dmac(adapter);
   3681 
   3682 	/* And now turn on interrupts */
   3683 	ixgbe_enable_intr(adapter);
   3684 
   3685 	/* Enable the use of the MBX by the VF's */
   3686 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
   3687 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
   3688 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
   3689 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
   3690 	}
   3691 
   3692 	/* Now inform the stack we're ready */
   3693 	ifp->if_flags |= IFF_RUNNING;
   3694 
   3695 	return;
   3696 } /* ixgbe_init_locked */
   3697 
   3698 /************************************************************************
   3699  * ixgbe_init
   3700  ************************************************************************/
   3701 static int
   3702 ixgbe_init(struct ifnet *ifp)
   3703 {
   3704 	struct adapter *adapter = ifp->if_softc;
   3705 
   3706 	IXGBE_CORE_LOCK(adapter);
   3707 	ixgbe_init_locked(adapter);
   3708 	IXGBE_CORE_UNLOCK(adapter);
   3709 
   3710 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   3711 } /* ixgbe_init */
   3712 
   3713 /************************************************************************
   3714  * ixgbe_set_ivar
   3715  *
   3716  *   Setup the correct IVAR register for a particular MSI-X interrupt
   3717  *     (yes this is all very magic and confusing :)
   3718  *    - entry is the register array entry
   3719  *    - vector is the MSI-X vector for this queue
   3720  *    - type is RX/TX/MISC
   3721  ************************************************************************/
   3722 static void
   3723 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   3724 {
   3725 	struct ixgbe_hw *hw = &adapter->hw;
   3726 	u32 ivar, index;
   3727 
   3728 	vector |= IXGBE_IVAR_ALLOC_VAL;
   3729 
   3730 	switch (hw->mac.type) {
   3731 
   3732 	case ixgbe_mac_82598EB:
   3733 		if (type == -1)
   3734 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   3735 		else
   3736 			entry += (type * 64);
   3737 		index = (entry >> 2) & 0x1F;
   3738 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   3739 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
   3740 		ivar |= (vector << (8 * (entry & 0x3)));
   3741 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   3742 		break;
   3743 
   3744 	case ixgbe_mac_82599EB:
   3745 	case ixgbe_mac_X540:
   3746 	case ixgbe_mac_X550:
   3747 	case ixgbe_mac_X550EM_x:
   3748 	case ixgbe_mac_X550EM_a:
   3749 		if (type == -1) { /* MISC IVAR */
   3750 			index = (entry & 1) * 8;
   3751 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   3752 			ivar &= ~(0xFF << index);
   3753 			ivar |= (vector << index);
   3754 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   3755 		} else {	/* RX/TX IVARS */
   3756 			index = (16 * (entry & 1)) + (8 * type);
   3757 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   3758 			ivar &= ~(0xFF << index);
   3759 			ivar |= (vector << index);
   3760 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   3761 		}
   3762 
   3763 	default:
   3764 		break;
   3765 	}
   3766 } /* ixgbe_set_ivar */
   3767 
   3768 /************************************************************************
   3769  * ixgbe_configure_ivars
   3770  ************************************************************************/
   3771 static void
   3772 ixgbe_configure_ivars(struct adapter *adapter)
   3773 {
   3774 	struct ix_queue *que = adapter->queues;
   3775 	u32             newitr;
   3776 
   3777 	if (ixgbe_max_interrupt_rate > 0)
   3778 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   3779 	else {
   3780 		/*
   3781 		 * Disable DMA coalescing if interrupt moderation is
   3782 		 * disabled.
   3783 		 */
   3784 		adapter->dmac = 0;
   3785 		newitr = 0;
   3786 	}
   3787 
   3788         for (int i = 0; i < adapter->num_queues; i++, que++) {
   3789 		struct rx_ring *rxr = &adapter->rx_rings[i];
   3790 		struct tx_ring *txr = &adapter->tx_rings[i];
   3791 		/* First the RX queue entry */
   3792                 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
   3793 		/* ... and the TX */
   3794 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
   3795 		/* Set an Initial EITR value */
   3796 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
   3797 	}
   3798 
   3799 	/* For the Link interrupt */
   3800         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
   3801 } /* ixgbe_configure_ivars */
   3802 
   3803 /************************************************************************
   3804  * ixgbe_config_gpie
   3805  ************************************************************************/
   3806 static void
   3807 ixgbe_config_gpie(struct adapter *adapter)
   3808 {
   3809 	struct ixgbe_hw *hw = &adapter->hw;
   3810 	u32             gpie;
   3811 
   3812 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
   3813 
   3814 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3815 		/* Enable Enhanced MSI-X mode */
   3816 		gpie |= IXGBE_GPIE_MSIX_MODE
   3817 		     |  IXGBE_GPIE_EIAME
   3818 		     |  IXGBE_GPIE_PBA_SUPPORT
   3819 		     |  IXGBE_GPIE_OCD;
   3820 	}
   3821 
   3822 	/* Fan Failure Interrupt */
   3823 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   3824 		gpie |= IXGBE_SDP1_GPIEN;
   3825 
   3826 	/* Thermal Sensor Interrupt */
   3827 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
   3828 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3829 
   3830 	/* Link detection */
   3831 	switch (hw->mac.type) {
   3832 	case ixgbe_mac_82599EB:
   3833 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
   3834 		break;
   3835 	case ixgbe_mac_X550EM_x:
   3836 	case ixgbe_mac_X550EM_a:
   3837 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3838 		break;
   3839 	default:
   3840 		break;
   3841 	}
   3842 
   3843 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   3844 
   3845 	return;
   3846 } /* ixgbe_config_gpie */
   3847 
   3848 /************************************************************************
   3849  * ixgbe_config_delay_values
   3850  *
   3851  *   Requires adapter->max_frame_size to be set.
   3852  ************************************************************************/
   3853 static void
   3854 ixgbe_config_delay_values(struct adapter *adapter)
   3855 {
   3856 	struct ixgbe_hw *hw = &adapter->hw;
   3857 	u32             rxpb, frame, size, tmp;
   3858 
   3859 	frame = adapter->max_frame_size;
   3860 
   3861 	/* Calculate High Water */
   3862 	switch (hw->mac.type) {
   3863 	case ixgbe_mac_X540:
   3864 	case ixgbe_mac_X550:
   3865 	case ixgbe_mac_X550EM_x:
   3866 	case ixgbe_mac_X550EM_a:
   3867 		tmp = IXGBE_DV_X540(frame, frame);
   3868 		break;
   3869 	default:
   3870 		tmp = IXGBE_DV(frame, frame);
   3871 		break;
   3872 	}
   3873 	size = IXGBE_BT2KB(tmp);
   3874 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
   3875 	hw->fc.high_water[0] = rxpb - size;
   3876 
   3877 	/* Now calculate Low Water */
   3878 	switch (hw->mac.type) {
   3879 	case ixgbe_mac_X540:
   3880 	case ixgbe_mac_X550:
   3881 	case ixgbe_mac_X550EM_x:
   3882 	case ixgbe_mac_X550EM_a:
   3883 		tmp = IXGBE_LOW_DV_X540(frame);
   3884 		break;
   3885 	default:
   3886 		tmp = IXGBE_LOW_DV(frame);
   3887 		break;
   3888 	}
   3889 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
   3890 
   3891 	hw->fc.pause_time = IXGBE_FC_PAUSE;
   3892 	hw->fc.send_xon = TRUE;
   3893 } /* ixgbe_config_delay_values */
   3894 
   3895 /************************************************************************
   3896  * ixgbe_set_multi - Multicast Update
   3897  *
   3898  *   Called whenever multicast address list is updated.
   3899  ************************************************************************/
   3900 static void
   3901 ixgbe_set_multi(struct adapter *adapter)
   3902 {
   3903 	struct ixgbe_mc_addr	*mta;
   3904 	struct ifnet		*ifp = adapter->ifp;
   3905 	u8			*update_ptr;
   3906 	int			mcnt = 0;
   3907 	u32			fctrl;
   3908 	struct ethercom		*ec = &adapter->osdep.ec;
   3909 	struct ether_multi	*enm;
   3910 	struct ether_multistep	step;
   3911 
   3912 	KASSERT(mutex_owned(&adapter->core_mtx));
   3913 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   3914 
   3915 	mta = adapter->mta;
   3916 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
   3917 
   3918 	ifp->if_flags &= ~IFF_ALLMULTI;
   3919 	ETHER_LOCK(ec);
   3920 	ETHER_FIRST_MULTI(step, ec, enm);
   3921 	while (enm != NULL) {
   3922 		if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
   3923 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   3924 			ETHER_ADDR_LEN) != 0)) {
   3925 			ifp->if_flags |= IFF_ALLMULTI;
   3926 			break;
   3927 		}
   3928 		bcopy(enm->enm_addrlo,
   3929 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
   3930 		mta[mcnt].vmdq = adapter->pool;
   3931 		mcnt++;
   3932 		ETHER_NEXT_MULTI(step, enm);
   3933 	}
   3934 	ETHER_UNLOCK(ec);
   3935 
   3936 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   3937 	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   3938 	if (ifp->if_flags & IFF_PROMISC)
   3939 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   3940 	else if (ifp->if_flags & IFF_ALLMULTI) {
   3941 		fctrl |= IXGBE_FCTRL_MPE;
   3942 	}
   3943 
   3944 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   3945 
   3946 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
   3947 		update_ptr = (u8 *)mta;
   3948 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   3949 		    ixgbe_mc_array_itr, TRUE);
   3950 	}
   3951 
   3952 	return;
   3953 } /* ixgbe_set_multi */
   3954 
   3955 /************************************************************************
   3956  * ixgbe_mc_array_itr
   3957  *
   3958  *   An iterator function needed by the multicast shared code.
   3959  *   It feeds the shared code routine the addresses in the
   3960  *   array of ixgbe_set_multi() one by one.
   3961  ************************************************************************/
   3962 static u8 *
   3963 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   3964 {
   3965 	struct ixgbe_mc_addr *mta;
   3966 
   3967 	mta = (struct ixgbe_mc_addr *)*update_ptr;
   3968 	*vmdq = mta->vmdq;
   3969 
   3970 	*update_ptr = (u8*)(mta + 1);
   3971 
   3972 	return (mta->addr);
   3973 } /* ixgbe_mc_array_itr */
   3974 
   3975 /************************************************************************
   3976  * ixgbe_local_timer - Timer routine
   3977  *
   3978  *   Checks for link status, updates statistics,
   3979  *   and runs the watchdog check.
   3980  ************************************************************************/
   3981 static void
   3982 ixgbe_local_timer(void *arg)
   3983 {
   3984 	struct adapter *adapter = arg;
   3985 
   3986 	IXGBE_CORE_LOCK(adapter);
   3987 	ixgbe_local_timer1(adapter);
   3988 	IXGBE_CORE_UNLOCK(adapter);
   3989 }
   3990 
   3991 static void
   3992 ixgbe_local_timer1(void *arg)
   3993 {
   3994 	struct adapter	*adapter = arg;
   3995 	device_t	dev = adapter->dev;
   3996 	struct ix_queue *que = adapter->queues;
   3997 	u64		queues = 0;
   3998 	int		hung = 0;
   3999 
   4000 	KASSERT(mutex_owned(&adapter->core_mtx));
   4001 
   4002 	/* Check for pluggable optics */
   4003 	if (adapter->sfp_probe)
   4004 		if (!ixgbe_sfp_probe(adapter))
   4005 			goto out; /* Nothing to do */
   4006 
   4007 	ixgbe_update_link_status(adapter);
   4008 	ixgbe_update_stats_counters(adapter);
   4009 
   4010 	/*
   4011 	 * Check the TX queues status
   4012 	 *      - mark hung queues so we don't schedule on them
   4013 	 *      - watchdog only if all queues show hung
   4014 	 */
   4015 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4016 		/* Keep track of queues with work for soft irq */
   4017 		if (que->txr->busy)
   4018 			queues |= ((u64)1 << que->me);
   4019 		/*
   4020 		 * Each time txeof runs without cleaning, but there
   4021 		 * are uncleaned descriptors it increments busy. If
   4022 		 * we get to the MAX we declare it hung.
   4023 		 */
   4024 		if (que->busy == IXGBE_QUEUE_HUNG) {
   4025 			++hung;
   4026 			/* Mark the queue as inactive */
   4027 			adapter->active_queues &= ~((u64)1 << que->me);
   4028 			continue;
   4029 		} else {
   4030 			/* Check if we've come back from hung */
   4031 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   4032 				adapter->active_queues |= ((u64)1 << que->me);
   4033 		}
   4034 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   4035 			device_printf(dev,
   4036 			    "Warning queue %d appears to be hung!\n", i);
   4037 			que->txr->busy = IXGBE_QUEUE_HUNG;
   4038 			++hung;
   4039 		}
   4040 	}
   4041 
   4042 	/* Only truely watchdog if all queues show hung */
   4043 	if (hung == adapter->num_queues)
   4044 		goto watchdog;
   4045 	else if (queues != 0) { /* Force an IRQ on queues with work */
   4046 		ixgbe_rearm_queues(adapter, queues);
   4047 	}
   4048 
   4049 out:
   4050 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4051 	return;
   4052 
   4053 watchdog:
   4054 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   4055 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   4056 	adapter->watchdog_events.ev_count++;
   4057 	ixgbe_init_locked(adapter);
   4058 } /* ixgbe_local_timer */
   4059 
   4060 /************************************************************************
   4061  * ixgbe_sfp_probe
   4062  *
   4063  *   Determine if a port had optics inserted.
   4064  ************************************************************************/
   4065 static bool
   4066 ixgbe_sfp_probe(struct adapter *adapter)
   4067 {
   4068 	struct ixgbe_hw	*hw = &adapter->hw;
   4069 	device_t	dev = adapter->dev;
   4070 	bool		result = FALSE;
   4071 
   4072 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4073 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4074 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4075 		if (ret)
   4076 			goto out;
   4077 		ret = hw->phy.ops.reset(hw);
   4078 		adapter->sfp_probe = FALSE;
   4079 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4080 			device_printf(dev,"Unsupported SFP+ module detected!");
   4081 			device_printf(dev,
   4082 			    "Reload driver with supported module.\n");
   4083                         goto out;
   4084 		} else
   4085 			device_printf(dev, "SFP+ module detected!\n");
   4086 		/* We now have supported optics */
   4087 		result = TRUE;
   4088 	}
   4089 out:
   4090 
   4091 	return (result);
   4092 } /* ixgbe_sfp_probe */
   4093 
   4094 /************************************************************************
   4095  * ixgbe_handle_mod - Tasklet for SFP module interrupts
   4096  ************************************************************************/
   4097 static void
   4098 ixgbe_handle_mod(void *context)
   4099 {
   4100 	struct adapter  *adapter = context;
   4101 	struct ixgbe_hw *hw = &adapter->hw;
   4102 	device_t	dev = adapter->dev;
   4103 	u32             err, cage_full = 0;
   4104 
   4105 	if (adapter->hw.need_crosstalk_fix) {
   4106 		switch (hw->mac.type) {
   4107 		case ixgbe_mac_82599EB:
   4108 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4109 			    IXGBE_ESDP_SDP2;
   4110 			break;
   4111 		case ixgbe_mac_X550EM_x:
   4112 		case ixgbe_mac_X550EM_a:
   4113 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4114 			    IXGBE_ESDP_SDP0;
   4115 			break;
   4116 		default:
   4117 			break;
   4118 		}
   4119 
   4120 		if (!cage_full)
   4121 			return;
   4122 	}
   4123 
   4124 	err = hw->phy.ops.identify_sfp(hw);
   4125 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4126 		device_printf(dev,
   4127 		    "Unsupported SFP+ module type was detected.\n");
   4128 		return;
   4129 	}
   4130 
   4131 	err = hw->mac.ops.setup_sfp(hw);
   4132 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4133 		device_printf(dev,
   4134 		    "Setup failure - unsupported SFP+ module type.\n");
   4135 		return;
   4136 	}
   4137 	softint_schedule(adapter->msf_si);
   4138 } /* ixgbe_handle_mod */
   4139 
   4140 
   4141 /************************************************************************
   4142  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
   4143  ************************************************************************/
   4144 static void
   4145 ixgbe_handle_msf(void *context)
   4146 {
   4147 	struct adapter  *adapter = context;
   4148 	struct ixgbe_hw *hw = &adapter->hw;
   4149 	u32             autoneg;
   4150 	bool            negotiate;
   4151 
   4152 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
   4153 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
   4154 
   4155 	autoneg = hw->phy.autoneg_advertised;
   4156 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   4157 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   4158 	else
   4159 		negotiate = 0;
   4160 	if (hw->mac.ops.setup_link)
   4161 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
   4162 
   4163 	/* Adjust media types shown in ifconfig */
   4164 	ifmedia_removeall(&adapter->media);
   4165 	ixgbe_add_media_types(adapter);
   4166 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   4167 } /* ixgbe_handle_msf */
   4168 
   4169 /************************************************************************
   4170  * ixgbe_handle_phy - Tasklet for external PHY interrupts
   4171  ************************************************************************/
   4172 static void
   4173 ixgbe_handle_phy(void *context)
   4174 {
   4175 	struct adapter  *adapter = context;
   4176 	struct ixgbe_hw *hw = &adapter->hw;
   4177 	int error;
   4178 
   4179 	error = hw->phy.ops.handle_lasi(hw);
   4180 	if (error == IXGBE_ERR_OVERTEMP)
   4181 		device_printf(adapter->dev,
   4182 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
   4183 		    " PHY will downshift to lower power state!\n");
   4184 	else if (error)
   4185 		device_printf(adapter->dev,
   4186 		    "Error handling LASI interrupt: %d\n", error);
   4187 } /* ixgbe_handle_phy */
   4188 
   4189 static void
   4190 ixgbe_ifstop(struct ifnet *ifp, int disable)
   4191 {
   4192 	struct adapter *adapter = ifp->if_softc;
   4193 
   4194 	IXGBE_CORE_LOCK(adapter);
   4195 	ixgbe_stop(adapter);
   4196 	IXGBE_CORE_UNLOCK(adapter);
   4197 }
   4198 
   4199 /************************************************************************
   4200  * ixgbe_stop - Stop the hardware
   4201  *
   4202  *   Disables all traffic on the adapter by issuing a
   4203  *   global reset on the MAC and deallocates TX/RX buffers.
   4204  ************************************************************************/
   4205 static void
   4206 ixgbe_stop(void *arg)
   4207 {
   4208 	struct ifnet    *ifp;
   4209 	struct adapter  *adapter = arg;
   4210 	struct ixgbe_hw *hw = &adapter->hw;
   4211 
   4212 	ifp = adapter->ifp;
   4213 
   4214 	KASSERT(mutex_owned(&adapter->core_mtx));
   4215 
   4216 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   4217 	ixgbe_disable_intr(adapter);
   4218 	callout_stop(&adapter->timer);
   4219 
   4220 	/* Let the stack know...*/
   4221 	ifp->if_flags &= ~IFF_RUNNING;
   4222 
   4223 	ixgbe_reset_hw(hw);
   4224 	hw->adapter_stopped = FALSE;
   4225 	ixgbe_stop_adapter(hw);
   4226 	if (hw->mac.type == ixgbe_mac_82599EB)
   4227 		ixgbe_stop_mac_link_on_d3_82599(hw);
   4228 	/* Turn off the laser - noop with no optics */
   4229 	ixgbe_disable_tx_laser(hw);
   4230 
   4231 	/* Update the stack */
   4232 	adapter->link_up = FALSE;
   4233 	ixgbe_update_link_status(adapter);
   4234 
   4235 	/* reprogram the RAR[0] in case user changed it. */
   4236 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   4237 
   4238 	return;
   4239 } /* ixgbe_stop */
   4240 
   4241 /************************************************************************
   4242  * ixgbe_update_link_status - Update OS on link state
   4243  *
   4244  * Note: Only updates the OS on the cached link state.
   4245  *       The real check of the hardware only happens with
   4246  *       a link interrupt.
   4247  ************************************************************************/
   4248 static void
   4249 ixgbe_update_link_status(struct adapter *adapter)
   4250 {
   4251 	struct ifnet	*ifp = adapter->ifp;
   4252 	device_t        dev = adapter->dev;
   4253 	struct ixgbe_hw *hw = &adapter->hw;
   4254 
   4255 	if (adapter->link_up) {
   4256 		if (adapter->link_active == FALSE) {
   4257 			if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
   4258 				/*
   4259 				 *  Discard count for both MAC Local Fault and
   4260 				 * Remote Fault because those registers are
   4261 				 * valid only when the link speed is up and
   4262 				 * 10Gbps.
   4263 				 */
   4264 				IXGBE_READ_REG(hw, IXGBE_MLFC);
   4265 				IXGBE_READ_REG(hw, IXGBE_MRFC);
   4266 			}
   4267 
   4268 			if (bootverbose) {
   4269 				const char *bpsmsg;
   4270 
   4271 				switch (adapter->link_speed) {
   4272 				case IXGBE_LINK_SPEED_10GB_FULL:
   4273 					bpsmsg = "10 Gbps";
   4274 					break;
   4275 				case IXGBE_LINK_SPEED_5GB_FULL:
   4276 					bpsmsg = "5 Gbps";
   4277 					break;
   4278 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   4279 					bpsmsg = "2.5 Gbps";
   4280 					break;
   4281 				case IXGBE_LINK_SPEED_1GB_FULL:
   4282 					bpsmsg = "1 Gbps";
   4283 					break;
   4284 				case IXGBE_LINK_SPEED_100_FULL:
   4285 					bpsmsg = "100 Mbps";
   4286 					break;
   4287 				case IXGBE_LINK_SPEED_10_FULL:
   4288 					bpsmsg = "10 Mbps";
   4289 					break;
   4290 				default:
   4291 					bpsmsg = "unknown speed";
   4292 					break;
   4293 				}
   4294 				device_printf(dev, "Link is up %s %s \n",
   4295 				    bpsmsg, "Full Duplex");
   4296 			}
   4297 			adapter->link_active = TRUE;
   4298 			/* Update any Flow Control changes */
   4299 			ixgbe_fc_enable(&adapter->hw);
   4300 			/* Update DMA coalescing config */
   4301 			ixgbe_config_dmac(adapter);
   4302 			if_link_state_change(ifp, LINK_STATE_UP);
   4303 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4304 				ixgbe_ping_all_vfs(adapter);
   4305 		}
   4306 	} else { /* Link down */
   4307 		if (adapter->link_active == TRUE) {
   4308 			if (bootverbose)
   4309 				device_printf(dev, "Link is Down\n");
   4310 			if_link_state_change(ifp, LINK_STATE_DOWN);
   4311 			adapter->link_active = FALSE;
   4312 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4313 				ixgbe_ping_all_vfs(adapter);
   4314 		}
   4315 	}
   4316 
   4317 	return;
   4318 } /* ixgbe_update_link_status */
   4319 
   4320 /************************************************************************
   4321  * ixgbe_config_dmac - Configure DMA Coalescing
   4322  ************************************************************************/
   4323 static void
   4324 ixgbe_config_dmac(struct adapter *adapter)
   4325 {
   4326 	struct ixgbe_hw *hw = &adapter->hw;
   4327 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
   4328 
   4329 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
   4330 		return;
   4331 
   4332 	if (dcfg->watchdog_timer ^ adapter->dmac ||
   4333 	    dcfg->link_speed ^ adapter->link_speed) {
   4334 		dcfg->watchdog_timer = adapter->dmac;
   4335 		dcfg->fcoe_en = false;
   4336 		dcfg->link_speed = adapter->link_speed;
   4337 		dcfg->num_tcs = 1;
   4338 
   4339 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
   4340 		    dcfg->watchdog_timer, dcfg->link_speed);
   4341 
   4342 		hw->mac.ops.dmac_config(hw);
   4343 	}
   4344 } /* ixgbe_config_dmac */
   4345 
   4346 /************************************************************************
   4347  * ixgbe_enable_intr
   4348  ************************************************************************/
   4349 static void
   4350 ixgbe_enable_intr(struct adapter *adapter)
   4351 {
   4352 	struct ixgbe_hw	*hw = &adapter->hw;
   4353 	struct ix_queue	*que = adapter->queues;
   4354 	u32		mask, fwsm;
   4355 
   4356 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4357 
   4358 	switch (adapter->hw.mac.type) {
   4359 	case ixgbe_mac_82599EB:
   4360 		mask |= IXGBE_EIMS_ECC;
   4361 		/* Temperature sensor on some adapters */
   4362 		mask |= IXGBE_EIMS_GPI_SDP0;
   4363 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
   4364 		mask |= IXGBE_EIMS_GPI_SDP1;
   4365 		mask |= IXGBE_EIMS_GPI_SDP2;
   4366 		break;
   4367 	case ixgbe_mac_X540:
   4368 		/* Detect if Thermal Sensor is enabled */
   4369 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
   4370 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
   4371 			mask |= IXGBE_EIMS_TS;
   4372 		mask |= IXGBE_EIMS_ECC;
   4373 		break;
   4374 	case ixgbe_mac_X550:
   4375 		/* MAC thermal sensor is automatically enabled */
   4376 		mask |= IXGBE_EIMS_TS;
   4377 		mask |= IXGBE_EIMS_ECC;
   4378 		break;
   4379 	case ixgbe_mac_X550EM_x:
   4380 	case ixgbe_mac_X550EM_a:
   4381 		/* Some devices use SDP0 for important information */
   4382 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
   4383 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
   4384 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
   4385 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
   4386 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
   4387 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
   4388 			mask |= IXGBE_EICR_GPI_SDP0_X540;
   4389 		mask |= IXGBE_EIMS_ECC;
   4390 		break;
   4391 	default:
   4392 		break;
   4393 	}
   4394 
   4395 	/* Enable Fan Failure detection */
   4396 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4397 		mask |= IXGBE_EIMS_GPI_SDP1;
   4398 	/* Enable SR-IOV */
   4399 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4400 		mask |= IXGBE_EIMS_MAILBOX;
   4401 	/* Enable Flow Director */
   4402 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   4403 		mask |= IXGBE_EIMS_FLOW_DIR;
   4404 
   4405 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4406 
   4407 	/* With MSI-X we use auto clear */
   4408 	if (adapter->msix_mem) {
   4409 		mask = IXGBE_EIMS_ENABLE_MASK;
   4410 		/* Don't autoclear Link */
   4411 		mask &= ~IXGBE_EIMS_OTHER;
   4412 		mask &= ~IXGBE_EIMS_LSC;
   4413 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   4414 			mask &= ~IXGBE_EIMS_MAILBOX;
   4415 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4416 	}
   4417 
   4418 	/*
   4419 	 * Now enable all queues, this is done separately to
   4420 	 * allow for handling the extended (beyond 32) MSI-X
   4421 	 * vectors that can be used by 82599
   4422 	 */
   4423         for (int i = 0; i < adapter->num_queues; i++, que++)
   4424                 ixgbe_enable_queue(adapter, que->msix);
   4425 
   4426 	IXGBE_WRITE_FLUSH(hw);
   4427 
   4428 	return;
   4429 } /* ixgbe_enable_intr */
   4430 
   4431 /************************************************************************
   4432  * ixgbe_disable_intr
   4433  ************************************************************************/
   4434 static void
   4435 ixgbe_disable_intr(struct adapter *adapter)
   4436 {
   4437 	if (adapter->msix_mem)
   4438 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4439 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
   4440 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
   4441 	} else {
   4442 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
   4443 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
   4444 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
   4445 	}
   4446 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4447 
   4448 	return;
   4449 } /* ixgbe_disable_intr */
   4450 
   4451 /************************************************************************
   4452  * ixgbe_legacy_irq - Legacy Interrupt Service routine
   4453  ************************************************************************/
   4454 static int
   4455 ixgbe_legacy_irq(void *arg)
   4456 {
   4457 	struct ix_queue *que = arg;
   4458 	struct adapter	*adapter = que->adapter;
   4459 	struct ixgbe_hw	*hw = &adapter->hw;
   4460 	struct ifnet    *ifp = adapter->ifp;
   4461 	struct 		tx_ring *txr = adapter->tx_rings;
   4462 	bool		more = false;
   4463 	u32             eicr, eicr_mask;
   4464 
   4465 	/* Silicon errata #26 on 82598 */
   4466 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
   4467 
   4468 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   4469 
   4470 	adapter->stats.pf.legint.ev_count++;
   4471 	++que->irqs.ev_count;
   4472 	if (eicr == 0) {
   4473 		adapter->stats.pf.intzero.ev_count++;
   4474 		if ((ifp->if_flags & IFF_UP) != 0)
   4475 			ixgbe_enable_intr(adapter);
   4476 		return 0;
   4477 	}
   4478 
   4479 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   4480 #ifdef __NetBSD__
   4481 		/* Don't run ixgbe_rxeof in interrupt context */
   4482 		more = true;
   4483 #else
   4484 		more = ixgbe_rxeof(que);
   4485 #endif
   4486 
   4487 		IXGBE_TX_LOCK(txr);
   4488 		ixgbe_txeof(txr);
   4489 #ifdef notyet
   4490 		if (!ixgbe_ring_empty(ifp, txr->br))
   4491 			ixgbe_start_locked(ifp, txr);
   4492 #endif
   4493 		IXGBE_TX_UNLOCK(txr);
   4494 	}
   4495 
   4496 	/* Check for fan failure */
   4497 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   4498 		ixgbe_check_fan_failure(adapter, eicr, true);
   4499 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4500 	}
   4501 
   4502 	/* Link status change */
   4503 	if (eicr & IXGBE_EICR_LSC)
   4504 		softint_schedule(adapter->link_si);
   4505 
   4506 	if (ixgbe_is_sfp(hw)) {
   4507 		/* Pluggable optics-related interrupt */
   4508 		if (hw->mac.type >= ixgbe_mac_X540)
   4509 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   4510 		else
   4511 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   4512 
   4513 		if (eicr & eicr_mask) {
   4514 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   4515 			softint_schedule(adapter->mod_si);
   4516 		}
   4517 
   4518 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   4519 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   4520 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   4521 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4522 			softint_schedule(adapter->msf_si);
   4523 		}
   4524 	}
   4525 
   4526 	/* External PHY interrupt */
   4527 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   4528 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
   4529 		softint_schedule(adapter->phy_si);
   4530 
   4531 	if (more)
   4532 		softint_schedule(que->que_si);
   4533 	else
   4534 		ixgbe_enable_intr(adapter);
   4535 
   4536 	return 1;
   4537 } /* ixgbe_legacy_irq */
   4538 
   4539 /************************************************************************
   4540  * ixgbe_free_pci_resources
   4541  ************************************************************************/
   4542 static void
   4543 ixgbe_free_pci_resources(struct adapter *adapter)
   4544 {
   4545 	struct ix_queue *que = adapter->queues;
   4546 	int		rid;
   4547 
   4548 	/*
   4549 	 * Release all msix queue resources:
   4550 	 */
   4551 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4552 		if (que->res != NULL)
   4553 			pci_intr_disestablish(adapter->osdep.pc,
   4554 			    adapter->osdep.ihs[i]);
   4555 	}
   4556 
   4557 	/* Clean the Legacy or Link interrupt last */
   4558 	if (adapter->vector) /* we are doing MSIX */
   4559 		rid = adapter->vector;
   4560 	else
   4561 		rid = 0;
   4562 
   4563 	if (adapter->osdep.ihs[rid] != NULL) {
   4564 		pci_intr_disestablish(adapter->osdep.pc,
   4565 		    adapter->osdep.ihs[rid]);
   4566 		adapter->osdep.ihs[rid] = NULL;
   4567 	}
   4568 
   4569 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   4570 	    adapter->osdep.nintrs);
   4571 
   4572 	if (adapter->osdep.mem_size != 0) {
   4573 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   4574 		    adapter->osdep.mem_bus_space_handle,
   4575 		    adapter->osdep.mem_size);
   4576 	}
   4577 
   4578 	return;
   4579 } /* ixgbe_free_pci_resources */
   4580 
   4581 /************************************************************************
   4582  * ixgbe_set_sysctl_value
   4583  ************************************************************************/
   4584 static void
   4585 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
   4586     const char *description, int *limit, int value)
   4587 {
   4588 	device_t dev =  adapter->dev;
   4589 	struct sysctllog **log;
   4590 	const struct sysctlnode *rnode, *cnode;
   4591 
   4592 	log = &adapter->sysctllog;
   4593 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   4594 		aprint_error_dev(dev, "could not create sysctl root\n");
   4595 		return;
   4596 	}
   4597 	if (sysctl_createv(log, 0, &rnode, &cnode,
   4598 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   4599 	    name, SYSCTL_DESCR(description),
   4600 		NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   4601 		aprint_error_dev(dev, "could not create sysctl\n");
   4602 	*limit = value;
   4603 } /* ixgbe_set_sysctl_value */
   4604 
   4605 /************************************************************************
   4606  * ixgbe_sysctl_flowcntl
   4607  *
   4608  *   SYSCTL wrapper around setting Flow Control
   4609  ************************************************************************/
   4610 static int
   4611 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
   4612 {
   4613 	struct sysctlnode node = *rnode;
   4614 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4615 	int error, fc;
   4616 
   4617 	fc = adapter->hw.fc.current_mode;
   4618 	node.sysctl_data = &fc;
   4619 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4620 	if (error != 0 || newp == NULL)
   4621 		return error;
   4622 
   4623 	/* Don't bother if it's not changed */
   4624 	if (fc == adapter->hw.fc.current_mode)
   4625 		return (0);
   4626 
   4627 	return ixgbe_set_flowcntl(adapter, fc);
   4628 } /* ixgbe_sysctl_flowcntl */
   4629 
   4630 /************************************************************************
   4631  * ixgbe_set_flowcntl - Set flow control
   4632  *
   4633  *   Flow control values:
   4634  *     0 - off
   4635  *     1 - rx pause
   4636  *     2 - tx pause
   4637  *     3 - full
   4638  ************************************************************************/
   4639 static int
   4640 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
   4641 {
   4642 	switch (fc) {
   4643 		case ixgbe_fc_rx_pause:
   4644 		case ixgbe_fc_tx_pause:
   4645 		case ixgbe_fc_full:
   4646 			adapter->hw.fc.requested_mode = fc;
   4647 			if (adapter->num_queues > 1)
   4648 				ixgbe_disable_rx_drop(adapter);
   4649 			break;
   4650 		case ixgbe_fc_none:
   4651 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   4652 			if (adapter->num_queues > 1)
   4653 				ixgbe_enable_rx_drop(adapter);
   4654 			break;
   4655 		default:
   4656 			return (EINVAL);
   4657 	}
   4658 
   4659 #if 0 /* XXX NetBSD */
   4660 	/* Don't autoneg if forcing a value */
   4661 	adapter->hw.fc.disable_fc_autoneg = TRUE;
   4662 #endif
   4663 	ixgbe_fc_enable(&adapter->hw);
   4664 
   4665 	return (0);
   4666 } /* ixgbe_set_flowcntl */
   4667 
   4668 /************************************************************************
   4669  * ixgbe_enable_rx_drop
   4670  *
   4671  *   Enable the hardware to drop packets when the buffer is
   4672  *   full. This is useful with multiqueue, so that no single
   4673  *   queue being full stalls the entire RX engine. We only
   4674  *   enable this when Multiqueue is enabled AND Flow Control
   4675  *   is disabled.
   4676  ************************************************************************/
   4677 static void
   4678 ixgbe_enable_rx_drop(struct adapter *adapter)
   4679 {
   4680 	struct ixgbe_hw *hw = &adapter->hw;
   4681 	struct rx_ring  *rxr;
   4682 	u32             srrctl;
   4683 
   4684 	for (int i = 0; i < adapter->num_queues; i++) {
   4685 		rxr = &adapter->rx_rings[i];
   4686 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4687 		srrctl |= IXGBE_SRRCTL_DROP_EN;
   4688 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4689 	}
   4690 
   4691 	/* enable drop for each vf */
   4692 	for (int i = 0; i < adapter->num_vfs; i++) {
   4693 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4694 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
   4695 		    IXGBE_QDE_ENABLE));
   4696 	}
   4697 } /* ixgbe_enable_rx_drop */
   4698 
   4699 /************************************************************************
   4700  * ixgbe_disable_rx_drop
   4701  ************************************************************************/
   4702 static void
   4703 ixgbe_disable_rx_drop(struct adapter *adapter)
   4704 {
   4705 	struct ixgbe_hw *hw = &adapter->hw;
   4706 	struct rx_ring  *rxr;
   4707 	u32             srrctl;
   4708 
   4709 	for (int i = 0; i < adapter->num_queues; i++) {
   4710 		rxr = &adapter->rx_rings[i];
   4711         	srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4712         	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
   4713         	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4714 	}
   4715 
   4716 	/* disable drop for each vf */
   4717 	for (int i = 0; i < adapter->num_vfs; i++) {
   4718 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4719 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
   4720 	}
   4721 } /* ixgbe_disable_rx_drop */
   4722 
   4723 /************************************************************************
   4724  * ixgbe_sysctl_advertise
   4725  *
   4726  *   SYSCTL wrapper around setting advertised speed
   4727  ************************************************************************/
   4728 static int
   4729 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
   4730 {
   4731 	struct sysctlnode node = *rnode;
   4732 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4733 	int            error = 0, advertise;
   4734 
   4735 	advertise = adapter->advertise;
   4736 	node.sysctl_data = &advertise;
   4737 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4738 	if (error != 0 || newp == NULL)
   4739 		return error;
   4740 
   4741 	return ixgbe_set_advertise(adapter, advertise);
   4742 } /* ixgbe_sysctl_advertise */
   4743 
   4744 /************************************************************************
   4745  * ixgbe_set_advertise - Control advertised link speed
   4746  *
   4747  *   Flags:
   4748  *     0x00 - Default (all capable link speed)
   4749  *     0x01 - advertise 100 Mb
   4750  *     0x02 - advertise 1G
   4751  *     0x04 - advertise 10G
   4752  *     0x08 - advertise 10 Mb
   4753  *     0x10 - advertise 2.5G
   4754  *     0x20 - advertise 5G
   4755  ************************************************************************/
   4756 static int
   4757 ixgbe_set_advertise(struct adapter *adapter, int advertise)
   4758 {
   4759 	device_t         dev;
   4760 	struct ixgbe_hw  *hw;
   4761 	ixgbe_link_speed speed = 0;
   4762 	ixgbe_link_speed link_caps = 0;
   4763 	s32              err = IXGBE_NOT_IMPLEMENTED;
   4764 	bool             negotiate = FALSE;
   4765 
   4766 	/* Checks to validate new value */
   4767 	if (adapter->advertise == advertise) /* no change */
   4768 		return (0);
   4769 
   4770 	dev = adapter->dev;
   4771 	hw = &adapter->hw;
   4772 
   4773 	/* No speed changes for backplane media */
   4774 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   4775 		return (ENODEV);
   4776 
   4777 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   4778 	    (hw->phy.multispeed_fiber))) {
   4779 		device_printf(dev,
   4780 		    "Advertised speed can only be set on copper or "
   4781 		    "multispeed fiber media types.\n");
   4782 		return (EINVAL);
   4783 	}
   4784 
   4785 	if (advertise < 0x0 || advertise > 0xF) {
   4786 		device_printf(dev,
   4787 		    "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
   4788 		return (EINVAL);
   4789 	}
   4790 
   4791 	if (hw->mac.ops.get_link_capabilities) {
   4792 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   4793 		    &negotiate);
   4794 		if (err != IXGBE_SUCCESS) {
   4795 			device_printf(dev, "Unable to determine supported advertise speeds\n");
   4796 			return (ENODEV);
   4797 		}
   4798 	}
   4799 
   4800 	/* Set new value and report new advertised mode */
   4801 	if (advertise & 0x1) {
   4802 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
   4803 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
   4804 			return (EINVAL);
   4805 		}
   4806 		speed |= IXGBE_LINK_SPEED_100_FULL;
   4807 	}
   4808 	if (advertise & 0x2) {
   4809 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
   4810 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
   4811 			return (EINVAL);
   4812 		}
   4813 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   4814 	}
   4815 	if (advertise & 0x4) {
   4816 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
   4817 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
   4818 			return (EINVAL);
   4819 		}
   4820 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   4821 	}
   4822 	if (advertise & 0x8) {
   4823 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
   4824 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
   4825 			return (EINVAL);
   4826 		}
   4827 		speed |= IXGBE_LINK_SPEED_10_FULL;
   4828 	}
   4829 	if (advertise & 0x10) {
   4830 		if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
   4831 			device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
   4832 			return (EINVAL);
   4833 		}
   4834 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   4835 	}
   4836 	if (advertise & 0x20) {
   4837 		if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
   4838 			device_printf(dev, "Interface does not support 5Gb advertised speed\n");
   4839 			return (EINVAL);
   4840 		}
   4841 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   4842 	}
   4843 	if (advertise == 0)
   4844 		speed = link_caps; /* All capable link speed */
   4845 
   4846 	hw->mac.autotry_restart = TRUE;
   4847 	hw->mac.ops.setup_link(hw, speed, TRUE);
   4848 	adapter->advertise = advertise;
   4849 
   4850 	return (0);
   4851 } /* ixgbe_set_advertise */
   4852 
   4853 /************************************************************************
   4854  * ixgbe_get_advertise - Get current advertised speed settings
   4855  *
   4856  *   Formatted for sysctl usage.
   4857  *   Flags:
   4858  *     0x01 - advertise 100 Mb
   4859  *     0x02 - advertise 1G
   4860  *     0x04 - advertise 10G
   4861  *     0x08 - advertise 10 Mb (yes, Mb)
   4862  *     0x10 - advertise 2.5G
   4863  *     0x20 - advertise 5G
   4864  ************************************************************************/
   4865 static int
   4866 ixgbe_get_advertise(struct adapter *adapter)
   4867 {
   4868 	struct ixgbe_hw  *hw = &adapter->hw;
   4869 	int              speed;
   4870 	ixgbe_link_speed link_caps = 0;
   4871 	s32              err;
   4872 	bool             negotiate = FALSE;
   4873 
   4874 	/*
   4875 	 * Advertised speed means nothing unless it's copper or
   4876 	 * multi-speed fiber
   4877 	 */
   4878 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
   4879 	    !(hw->phy.multispeed_fiber))
   4880 		return (0);
   4881 
   4882 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
   4883 	if (err != IXGBE_SUCCESS)
   4884 		return (0);
   4885 
   4886 	speed =
   4887 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x04 : 0) |
   4888 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x02 : 0) |
   4889 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)   ? 0x01 : 0) |
   4890 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)    ? 0x08 : 0) |
   4891 	    ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
   4892 	    ((link_caps & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0);
   4893 
   4894 	return speed;
   4895 } /* ixgbe_get_advertise */
   4896 
   4897 /************************************************************************
   4898  * ixgbe_sysctl_dmac - Manage DMA Coalescing
   4899  *
   4900  *   Control values:
   4901  *     0/1 - off / on (use default value of 1000)
   4902  *
   4903  *     Legal timer values are:
   4904  *     50,100,250,500,1000,2000,5000,10000
   4905  *
   4906  *     Turning off interrupt moderation will also turn this off.
   4907  ************************************************************************/
   4908 static int
   4909 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
   4910 {
   4911 	struct sysctlnode node = *rnode;
   4912 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4913 	struct ifnet   *ifp = adapter->ifp;
   4914 	int            error;
   4915 	int            newval;
   4916 
   4917 	newval = adapter->dmac;
   4918 	node.sysctl_data = &newval;
   4919 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4920 	if ((error) || (newp == NULL))
   4921 		return (error);
   4922 
   4923 	switch (newval) {
   4924 	case 0:
   4925 		/* Disabled */
   4926 		adapter->dmac = 0;
   4927 		break;
   4928 	case 1:
   4929 		/* Enable and use default */
   4930 		adapter->dmac = 1000;
   4931 		break;
   4932 	case 50:
   4933 	case 100:
   4934 	case 250:
   4935 	case 500:
   4936 	case 1000:
   4937 	case 2000:
   4938 	case 5000:
   4939 	case 10000:
   4940 		/* Legal values - allow */
   4941 		adapter->dmac = newval;
   4942 		break;
   4943 	default:
   4944 		/* Do nothing, illegal value */
   4945 		return (EINVAL);
   4946 	}
   4947 
   4948 	/* Re-initialize hardware if it's already running */
   4949 	if (ifp->if_flags & IFF_RUNNING)
   4950 		ixgbe_init(ifp);
   4951 
   4952 	return (0);
   4953 }
   4954 
   4955 #ifdef IXGBE_DEBUG
   4956 /************************************************************************
   4957  * ixgbe_sysctl_power_state
   4958  *
   4959  *   Sysctl to test power states
   4960  *   Values:
   4961  *     0      - set device to D0
   4962  *     3      - set device to D3
   4963  *     (none) - get current device power state
   4964  ************************************************************************/
   4965 static int
   4966 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
   4967 {
   4968 #ifdef notyet
   4969 	struct sysctlnode node = *rnode;
   4970 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4971 	device_t       dev =  adapter->dev;
   4972 	int            curr_ps, new_ps, error = 0;
   4973 
   4974 	curr_ps = new_ps = pci_get_powerstate(dev);
   4975 
   4976 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4977 	if ((error) || (req->newp == NULL))
   4978 		return (error);
   4979 
   4980 	if (new_ps == curr_ps)
   4981 		return (0);
   4982 
   4983 	if (new_ps == 3 && curr_ps == 0)
   4984 		error = DEVICE_SUSPEND(dev);
   4985 	else if (new_ps == 0 && curr_ps == 3)
   4986 		error = DEVICE_RESUME(dev);
   4987 	else
   4988 		return (EINVAL);
   4989 
   4990 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
   4991 
   4992 	return (error);
   4993 #else
   4994 	return 0;
   4995 #endif
   4996 } /* ixgbe_sysctl_power_state */
   4997 #endif
   4998 
   4999 /************************************************************************
   5000  * ixgbe_sysctl_wol_enable
   5001  *
   5002  *   Sysctl to enable/disable the WoL capability,
   5003  *   if supported by the adapter.
   5004  *
   5005  *   Values:
   5006  *     0 - disabled
   5007  *     1 - enabled
   5008  ************************************************************************/
   5009 static int
   5010 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
   5011 {
   5012 	struct sysctlnode node = *rnode;
   5013 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5014 	struct ixgbe_hw *hw = &adapter->hw;
   5015 	bool            new_wol_enabled;
   5016 	int             error = 0;
   5017 
   5018 	new_wol_enabled = hw->wol_enabled;
   5019 	node.sysctl_data = &new_wol_enabled;
   5020 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5021 	if ((error) || (newp == NULL))
   5022 		return (error);
   5023 	if (new_wol_enabled == hw->wol_enabled)
   5024 		return (0);
   5025 
   5026 	if (new_wol_enabled && !adapter->wol_support)
   5027 		return (ENODEV);
   5028 	else
   5029 		hw->wol_enabled = new_wol_enabled;
   5030 
   5031 	return (0);
   5032 } /* ixgbe_sysctl_wol_enable */
   5033 
   5034 /************************************************************************
   5035  * ixgbe_sysctl_wufc - Wake Up Filter Control
   5036  *
   5037  *   Sysctl to enable/disable the types of packets that the
   5038  *   adapter will wake up on upon receipt.
   5039  *   Flags:
   5040  *     0x1  - Link Status Change
   5041  *     0x2  - Magic Packet
   5042  *     0x4  - Direct Exact
   5043  *     0x8  - Directed Multicast
   5044  *     0x10 - Broadcast
   5045  *     0x20 - ARP/IPv4 Request Packet
   5046  *     0x40 - Direct IPv4 Packet
   5047  *     0x80 - Direct IPv6 Packet
   5048  *
   5049  *   Settings not listed above will cause the sysctl to return an error.
   5050  ************************************************************************/
   5051 static int
   5052 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
   5053 {
   5054 	struct sysctlnode node = *rnode;
   5055 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5056 	int error = 0;
   5057 	u32 new_wufc;
   5058 
   5059 	new_wufc = adapter->wufc;
   5060 	node.sysctl_data = &new_wufc;
   5061 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5062 	if ((error) || (newp == NULL))
   5063 		return (error);
   5064 	if (new_wufc == adapter->wufc)
   5065 		return (0);
   5066 
   5067 	if (new_wufc & 0xffffff00)
   5068 		return (EINVAL);
   5069 
   5070 	new_wufc &= 0xff;
   5071 	new_wufc |= (0xffffff & adapter->wufc);
   5072 	adapter->wufc = new_wufc;
   5073 
   5074 	return (0);
   5075 } /* ixgbe_sysctl_wufc */
   5076 
   5077 #ifdef IXGBE_DEBUG
   5078 /************************************************************************
   5079  * ixgbe_sysctl_print_rss_config
   5080  ************************************************************************/
   5081 static int
   5082 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
   5083 {
   5084 #ifdef notyet
   5085 	struct sysctlnode node = *rnode;
   5086 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5087 	struct ixgbe_hw *hw = &adapter->hw;
   5088 	device_t        dev = adapter->dev;
   5089 	struct sbuf     *buf;
   5090 	int             error = 0, reta_size;
   5091 	u32             reg;
   5092 
   5093 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
   5094 	if (!buf) {
   5095 		device_printf(dev, "Could not allocate sbuf for output.\n");
   5096 		return (ENOMEM);
   5097 	}
   5098 
   5099 	// TODO: use sbufs to make a string to print out
   5100 	/* Set multiplier for RETA setup and table size based on MAC */
   5101 	switch (adapter->hw.mac.type) {
   5102 	case ixgbe_mac_X550:
   5103 	case ixgbe_mac_X550EM_x:
   5104 	case ixgbe_mac_X550EM_a:
   5105 		reta_size = 128;
   5106 		break;
   5107 	default:
   5108 		reta_size = 32;
   5109 		break;
   5110 	}
   5111 
   5112 	/* Print out the redirection table */
   5113 	sbuf_cat(buf, "\n");
   5114 	for (int i = 0; i < reta_size; i++) {
   5115 		if (i < 32) {
   5116 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
   5117 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
   5118 		} else {
   5119 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
   5120 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
   5121 		}
   5122 	}
   5123 
   5124 	// TODO: print more config
   5125 
   5126 	error = sbuf_finish(buf);
   5127 	if (error)
   5128 		device_printf(dev, "Error finishing sbuf: %d\n", error);
   5129 
   5130 	sbuf_delete(buf);
   5131 #endif
   5132 	return (0);
   5133 } /* ixgbe_sysctl_print_rss_config */
   5134 #endif /* IXGBE_DEBUG */
   5135 
   5136 /************************************************************************
   5137  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
   5138  *
   5139  *   For X552/X557-AT devices using an external PHY
   5140  ************************************************************************/
   5141 static int
   5142 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
   5143 {
   5144 	struct sysctlnode node = *rnode;
   5145 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5146 	struct ixgbe_hw *hw = &adapter->hw;
   5147 	int val;
   5148 	u16 reg;
   5149 	int		error;
   5150 
   5151 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5152 		device_printf(adapter->dev,
   5153 		    "Device has no supported external thermal sensor.\n");
   5154 		return (ENODEV);
   5155 	}
   5156 
   5157 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
   5158 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5159 		device_printf(adapter->dev,
   5160 		    "Error reading from PHY's current temperature register\n");
   5161 		return (EAGAIN);
   5162 	}
   5163 
   5164 	node.sysctl_data = &val;
   5165 
   5166 	/* Shift temp for output */
   5167 	val = reg >> 8;
   5168 
   5169 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5170 	if ((error) || (newp == NULL))
   5171 		return (error);
   5172 
   5173 	return (0);
   5174 } /* ixgbe_sysctl_phy_temp */
   5175 
   5176 /************************************************************************
   5177  * ixgbe_sysctl_phy_overtemp_occurred
   5178  *
   5179  *   Reports (directly from the PHY) whether the current PHY
   5180  *   temperature is over the overtemp threshold.
   5181  ************************************************************************/
   5182 static int
   5183 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
   5184 {
   5185 	struct sysctlnode node = *rnode;
   5186 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5187 	struct ixgbe_hw *hw = &adapter->hw;
   5188 	int val, error;
   5189 	u16 reg;
   5190 
   5191 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5192 		device_printf(adapter->dev,
   5193 		    "Device has no supported external thermal sensor.\n");
   5194 		return (ENODEV);
   5195 	}
   5196 
   5197 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
   5198 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5199 		device_printf(adapter->dev,
   5200 		    "Error reading from PHY's temperature status register\n");
   5201 		return (EAGAIN);
   5202 	}
   5203 
   5204 	node.sysctl_data = &val;
   5205 
   5206 	/* Get occurrence bit */
   5207 	val = !!(reg & 0x4000);
   5208 
   5209 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5210 	if ((error) || (newp == NULL))
   5211 		return (error);
   5212 
   5213 	return (0);
   5214 } /* ixgbe_sysctl_phy_overtemp_occurred */
   5215 
   5216 /************************************************************************
   5217  * ixgbe_sysctl_eee_state
   5218  *
   5219  *   Sysctl to set EEE power saving feature
   5220  *   Values:
   5221  *     0      - disable EEE
   5222  *     1      - enable EEE
   5223  *     (none) - get current device EEE state
   5224  ************************************************************************/
   5225 static int
   5226 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
   5227 {
   5228 	struct sysctlnode node = *rnode;
   5229 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5230 	struct ifnet   *ifp = adapter->ifp;
   5231 	device_t       dev = adapter->dev;
   5232 	int            curr_eee, new_eee, error = 0;
   5233 	s32            retval;
   5234 
   5235 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
   5236 	node.sysctl_data = &new_eee;
   5237 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5238 	if ((error) || (newp == NULL))
   5239 		return (error);
   5240 
   5241 	/* Nothing to do */
   5242 	if (new_eee == curr_eee)
   5243 		return (0);
   5244 
   5245 	/* Not supported */
   5246 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
   5247 		return (EINVAL);
   5248 
   5249 	/* Bounds checking */
   5250 	if ((new_eee < 0) || (new_eee > 1))
   5251 		return (EINVAL);
   5252 
   5253 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
   5254 	if (retval) {
   5255 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
   5256 		return (EINVAL);
   5257 	}
   5258 
   5259 	/* Restart auto-neg */
   5260 	ixgbe_init(ifp);
   5261 
   5262 	device_printf(dev, "New EEE state: %d\n", new_eee);
   5263 
   5264 	/* Cache new value */
   5265 	if (new_eee)
   5266 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5267 	else
   5268 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
   5269 
   5270 	return (error);
   5271 } /* ixgbe_sysctl_eee_state */
   5272 
   5273 /************************************************************************
   5274  * ixgbe_init_device_features
   5275  ************************************************************************/
   5276 static void
   5277 ixgbe_init_device_features(struct adapter *adapter)
   5278 {
   5279 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   5280 	                  | IXGBE_FEATURE_RSS
   5281 	                  | IXGBE_FEATURE_MSI
   5282 	                  | IXGBE_FEATURE_MSIX
   5283 	                  | IXGBE_FEATURE_LEGACY_IRQ
   5284 	                  | IXGBE_FEATURE_LEGACY_TX;
   5285 
   5286 	/* Set capabilities first... */
   5287 	switch (adapter->hw.mac.type) {
   5288 	case ixgbe_mac_82598EB:
   5289 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
   5290 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
   5291 		break;
   5292 	case ixgbe_mac_X540:
   5293 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5294 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5295 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
   5296 		    (adapter->hw.bus.func == 0))
   5297 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5298 		break;
   5299 	case ixgbe_mac_X550:
   5300 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5301 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5302 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5303 		break;
   5304 	case ixgbe_mac_X550EM_x:
   5305 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5306 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5307 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
   5308 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5309 		break;
   5310 	case ixgbe_mac_X550EM_a:
   5311 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5312 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5313 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5314 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
   5315 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
   5316 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5317 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5318 		}
   5319 		break;
   5320 	case ixgbe_mac_82599EB:
   5321 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5322 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5323 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
   5324 		    (adapter->hw.bus.func == 0))
   5325 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5326 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
   5327 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5328 		break;
   5329 	default:
   5330 		break;
   5331 	}
   5332 
   5333 	/* Enabled by default... */
   5334 	/* Fan failure detection */
   5335 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
   5336 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
   5337 	/* Netmap */
   5338 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   5339 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   5340 	/* EEE */
   5341 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   5342 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5343 	/* Thermal Sensor */
   5344 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
   5345 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
   5346 
   5347 	/* Enabled via global sysctl... */
   5348 	/* Flow Director */
   5349 	if (ixgbe_enable_fdir) {
   5350 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
   5351 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
   5352 		else
   5353 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
   5354 	}
   5355 	/* Legacy (single queue) transmit */
   5356 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   5357 	    ixgbe_enable_legacy_tx)
   5358 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   5359 	/*
   5360 	 * Message Signal Interrupts - Extended (MSI-X)
   5361 	 * Normal MSI is only enabled if MSI-X calls fail.
   5362 	 */
   5363 	if (!ixgbe_enable_msix)
   5364 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
   5365 	/* Receive-Side Scaling (RSS) */
   5366 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
   5367 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   5368 
   5369 	/* Disable features with unmet dependencies... */
   5370 	/* No MSI-X */
   5371 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
   5372 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   5373 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   5374 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
   5375 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
   5376 	}
   5377 } /* ixgbe_init_device_features */
   5378 
   5379 /************************************************************************
   5380  * ixgbe_probe - Device identification routine
   5381  *
   5382  *   Determines if the driver should be loaded on
   5383  *   adapter based on its PCI vendor/device ID.
   5384  *
   5385  *   return BUS_PROBE_DEFAULT on success, positive on failure
   5386  ************************************************************************/
   5387 static int
   5388 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
   5389 {
   5390 	const struct pci_attach_args *pa = aux;
   5391 
   5392 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
   5393 }
   5394 
   5395 static ixgbe_vendor_info_t *
   5396 ixgbe_lookup(const struct pci_attach_args *pa)
   5397 {
   5398 	ixgbe_vendor_info_t *ent;
   5399 	pcireg_t subid;
   5400 
   5401 	INIT_DEBUGOUT("ixgbe_lookup: begin");
   5402 
   5403 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
   5404 		return NULL;
   5405 
   5406 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
   5407 
   5408 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
   5409 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
   5410 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
   5411 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
   5412 			(ent->subvendor_id == 0)) &&
   5413 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
   5414 			(ent->subdevice_id == 0))) {
   5415 			++ixgbe_total_ports;
   5416 			return ent;
   5417 		}
   5418 	}
   5419 	return NULL;
   5420 }
   5421 
   5422 static int
   5423 ixgbe_ifflags_cb(struct ethercom *ec)
   5424 {
   5425 	struct ifnet *ifp = &ec->ec_if;
   5426 	struct adapter *adapter = ifp->if_softc;
   5427 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   5428 
   5429 	IXGBE_CORE_LOCK(adapter);
   5430 
   5431 	if (change != 0)
   5432 		adapter->if_flags = ifp->if_flags;
   5433 
   5434 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   5435 		rc = ENETRESET;
   5436 	else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   5437 		ixgbe_set_promisc(adapter);
   5438 
   5439 	/* Set up VLAN support and filter */
   5440 	ixgbe_setup_vlan_hw_support(adapter);
   5441 
   5442 	IXGBE_CORE_UNLOCK(adapter);
   5443 
   5444 	return rc;
   5445 }
   5446 
   5447 /************************************************************************
   5448  * ixgbe_ioctl - Ioctl entry point
   5449  *
   5450  *   Called when the user wants to configure the interface.
   5451  *
   5452  *   return 0 on success, positive on failure
   5453  ************************************************************************/
   5454 static int
   5455 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   5456 {
   5457 	struct adapter	*adapter = ifp->if_softc;
   5458 	struct ixgbe_hw *hw = &adapter->hw;
   5459 	struct ifcapreq *ifcr = data;
   5460 	struct ifreq	*ifr = data;
   5461 	int             error = 0;
   5462 	int l4csum_en;
   5463 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   5464 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   5465 
   5466 	switch (command) {
   5467 	case SIOCSIFFLAGS:
   5468 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   5469 		break;
   5470 	case SIOCADDMULTI:
   5471 	case SIOCDELMULTI:
   5472 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   5473 		break;
   5474 	case SIOCSIFMEDIA:
   5475 	case SIOCGIFMEDIA:
   5476 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   5477 		break;
   5478 	case SIOCSIFCAP:
   5479 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   5480 		break;
   5481 	case SIOCSIFMTU:
   5482 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   5483 		break;
   5484 #ifdef __NetBSD__
   5485 	case SIOCINITIFADDR:
   5486 		IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
   5487 		break;
   5488 	case SIOCGIFFLAGS:
   5489 		IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
   5490 		break;
   5491 	case SIOCGIFAFLAG_IN:
   5492 		IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
   5493 		break;
   5494 	case SIOCGIFADDR:
   5495 		IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
   5496 		break;
   5497 	case SIOCGIFMTU:
   5498 		IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
   5499 		break;
   5500 	case SIOCGIFCAP:
   5501 		IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
   5502 		break;
   5503 	case SIOCGETHERCAP:
   5504 		IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
   5505 		break;
   5506 	case SIOCGLIFADDR:
   5507 		IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
   5508 		break;
   5509 	case SIOCZIFDATA:
   5510 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   5511 		hw->mac.ops.clear_hw_cntrs(hw);
   5512 		ixgbe_clear_evcnt(adapter);
   5513 		break;
   5514 	case SIOCAIFADDR:
   5515 		IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
   5516 		break;
   5517 #endif
   5518 	default:
   5519 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   5520 		break;
   5521 	}
   5522 
   5523 	switch (command) {
   5524 	case SIOCSIFMEDIA:
   5525 	case SIOCGIFMEDIA:
   5526 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   5527 	case SIOCGI2C:
   5528 	{
   5529 		struct ixgbe_i2c_req	i2c;
   5530 
   5531 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
   5532 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
   5533 		if (error != 0)
   5534 			break;
   5535 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
   5536 			error = EINVAL;
   5537 			break;
   5538 		}
   5539 		if (i2c.len > sizeof(i2c.data)) {
   5540 			error = EINVAL;
   5541 			break;
   5542 		}
   5543 
   5544 		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
   5545 		    i2c.dev_addr, i2c.data);
   5546 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
   5547 		break;
   5548 	}
   5549 	case SIOCSIFCAP:
   5550 		/* Layer-4 Rx checksum offload has to be turned on and
   5551 		 * off as a unit.
   5552 		 */
   5553 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   5554 		if (l4csum_en != l4csum && l4csum_en != 0)
   5555 			return EINVAL;
   5556 		/*FALLTHROUGH*/
   5557 	case SIOCADDMULTI:
   5558 	case SIOCDELMULTI:
   5559 	case SIOCSIFFLAGS:
   5560 	case SIOCSIFMTU:
   5561 	default:
   5562 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   5563 			return error;
   5564 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   5565 			;
   5566 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   5567 			IXGBE_CORE_LOCK(adapter);
   5568 			ixgbe_init_locked(adapter);
   5569 			ixgbe_recalculate_max_frame(adapter);
   5570 			IXGBE_CORE_UNLOCK(adapter);
   5571 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   5572 			/*
   5573 			 * Multicast list has changed; set the hardware filter
   5574 			 * accordingly.
   5575 			 */
   5576 			IXGBE_CORE_LOCK(adapter);
   5577 			ixgbe_disable_intr(adapter);
   5578 			ixgbe_set_multi(adapter);
   5579 			ixgbe_enable_intr(adapter);
   5580 			IXGBE_CORE_UNLOCK(adapter);
   5581 		}
   5582 		return 0;
   5583 	}
   5584 
   5585 	return error;
   5586 } /* ixgbe_ioctl */
   5587 
   5588 /************************************************************************
   5589  * ixgbe_check_fan_failure
   5590  ************************************************************************/
   5591 static void
   5592 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
   5593 {
   5594 	u32 mask;
   5595 
   5596 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
   5597 	    IXGBE_ESDP_SDP1;
   5598 
   5599 	if (reg & mask)
   5600 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
   5601 } /* ixgbe_check_fan_failure */
   5602 
   5603 /************************************************************************
   5604  * ixgbe_handle_que
   5605  ************************************************************************/
   5606 static void
   5607 ixgbe_handle_que(void *context)
   5608 {
   5609 	struct ix_queue *que = context;
   5610 	struct adapter  *adapter = que->adapter;
   5611 	struct tx_ring  *txr = que->txr;
   5612 	struct ifnet    *ifp = adapter->ifp;
   5613 
   5614 	adapter->handleq.ev_count++;
   5615 
   5616 	if (ifp->if_flags & IFF_RUNNING) {
   5617 		ixgbe_rxeof(que);
   5618 		IXGBE_TX_LOCK(txr);
   5619 		ixgbe_txeof(txr);
   5620 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5621 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   5622 				ixgbe_mq_start_locked(ifp, txr);
   5623 		/* Only for queue 0 */
   5624 		/* NetBSD still needs this for CBQ */
   5625 		if ((&adapter->queues[0] == que)
   5626 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   5627 			ixgbe_legacy_start_locked(ifp, txr);
   5628 		IXGBE_TX_UNLOCK(txr);
   5629 	}
   5630 
   5631 	/* Re-enable this interrupt */
   5632 	if (que->res != NULL)
   5633 		ixgbe_enable_queue(adapter, que->msix);
   5634 	else
   5635 		ixgbe_enable_intr(adapter);
   5636 
   5637 	return;
   5638 } /* ixgbe_handle_que */
   5639 
   5640 /************************************************************************
   5641  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
   5642  ************************************************************************/
   5643 static int
   5644 ixgbe_allocate_legacy(struct adapter *adapter,
   5645     const struct pci_attach_args *pa)
   5646 {
   5647 	device_t	dev = adapter->dev;
   5648 	struct ix_queue *que = adapter->queues;
   5649 	struct tx_ring  *txr = adapter->tx_rings;
   5650 	int		counts[PCI_INTR_TYPE_SIZE];
   5651 	pci_intr_type_t intr_type, max_type;
   5652 	char            intrbuf[PCI_INTRSTR_LEN];
   5653 	const char	*intrstr = NULL;
   5654 
   5655 	/* We allocate a single interrupt resource */
   5656 	max_type = PCI_INTR_TYPE_MSI;
   5657 	counts[PCI_INTR_TYPE_MSIX] = 0;
   5658 	counts[PCI_INTR_TYPE_MSI] =
   5659 	    (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
   5660 	counts[PCI_INTR_TYPE_INTX] =
   5661 	    (adapter->feat_en & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
   5662 
   5663 alloc_retry:
   5664 	if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
   5665 		aprint_error_dev(dev, "couldn't alloc interrupt\n");
   5666 		return ENXIO;
   5667 	}
   5668 	adapter->osdep.nintrs = 1;
   5669 	intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
   5670 	    intrbuf, sizeof(intrbuf));
   5671 	adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
   5672 	    adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
   5673 	    device_xname(dev));
   5674 	if (adapter->osdep.ihs[0] == NULL) {
   5675 		intr_type = pci_intr_type(adapter->osdep.pc,
   5676 		    adapter->osdep.intrs[0]);
   5677 		aprint_error_dev(dev,"unable to establish %s\n",
   5678 		    (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5679 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5680 		switch (intr_type) {
   5681 		case PCI_INTR_TYPE_MSI:
   5682 			/* The next try is for INTx: Disable MSI */
   5683 			max_type = PCI_INTR_TYPE_INTX;
   5684 			counts[PCI_INTR_TYPE_INTX] = 1;
   5685 			goto alloc_retry;
   5686 		case PCI_INTR_TYPE_INTX:
   5687 		default:
   5688 			/* See below */
   5689 			break;
   5690 		}
   5691 	}
   5692 	if (adapter->osdep.ihs[0] == NULL) {
   5693 		aprint_error_dev(dev,
   5694 		    "couldn't establish interrupt%s%s\n",
   5695 		    intrstr ? " at " : "", intrstr ? intrstr : "");
   5696 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5697 		return ENXIO;
   5698 	}
   5699 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   5700 	/*
   5701 	 * Try allocating a fast interrupt and the associated deferred
   5702 	 * processing contexts.
   5703 	 */
   5704 	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5705 		txr->txr_si =
   5706 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5707 			ixgbe_deferred_mq_start, txr);
   5708 	que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5709 	    ixgbe_handle_que, que);
   5710 
   5711 	/* Tasklets for Link, SFP and Multispeed Fiber */
   5712 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   5713 	    ixgbe_handle_link, adapter);
   5714 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5715 	    ixgbe_handle_mod, adapter);
   5716 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5717 	    ixgbe_handle_msf, adapter);
   5718 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5719 	    ixgbe_handle_phy, adapter);
   5720 
   5721 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   5722 		adapter->fdir_si =
   5723 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5724 			ixgbe_reinit_fdir, adapter);
   5725 
   5726 	if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) &
   5727 		(txr->txr_si == NULL)) ||
   5728 	    que->que_si == NULL ||
   5729 	    adapter->link_si == NULL ||
   5730 	    adapter->mod_si == NULL ||
   5731 	    ((adapter->feat_en & IXGBE_FEATURE_FDIR) &
   5732 		(adapter->fdir_si == NULL)) ||
   5733 	    adapter->msf_si == NULL) {
   5734 		aprint_error_dev(dev,
   5735 		    "could not establish software interrupts\n");
   5736 
   5737 		return ENXIO;
   5738 	}
   5739 	/* For simplicity in the handlers */
   5740 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
   5741 
   5742 	return (0);
   5743 } /* ixgbe_allocate_legacy */
   5744 
   5745 
   5746 /************************************************************************
   5747  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
   5748  ************************************************************************/
   5749 static int
   5750 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   5751 {
   5752 	device_t        dev = adapter->dev;
   5753 	struct 		ix_queue *que = adapter->queues;
   5754 	struct  	tx_ring *txr = adapter->tx_rings;
   5755 	pci_chipset_tag_t pc;
   5756 	char		intrbuf[PCI_INTRSTR_LEN];
   5757 	char		intr_xname[32];
   5758 	const char	*intrstr = NULL;
   5759 	int 		error, vector = 0;
   5760 	int		cpu_id = 0;
   5761 	kcpuset_t	*affinity;
   5762 #ifdef RSS
   5763 	unsigned int    rss_buckets = 0;
   5764 	kcpuset_t	cpu_mask;
   5765 #endif
   5766 
   5767 	pc = adapter->osdep.pc;
   5768 #ifdef	RSS
   5769 	/*
   5770 	 * If we're doing RSS, the number of queues needs to
   5771 	 * match the number of RSS buckets that are configured.
   5772 	 *
   5773 	 * + If there's more queues than RSS buckets, we'll end
   5774 	 *   up with queues that get no traffic.
   5775 	 *
   5776 	 * + If there's more RSS buckets than queues, we'll end
   5777 	 *   up having multiple RSS buckets map to the same queue,
   5778 	 *   so there'll be some contention.
   5779 	 */
   5780 	rss_buckets = rss_getnumbuckets();
   5781 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
   5782 	    (adapter->num_queues != rss_buckets)) {
   5783 		device_printf(dev,
   5784 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
   5785 		    "; performance will be impacted.\n",
   5786 		    __func__, adapter->num_queues, rss_buckets);
   5787 	}
   5788 #endif
   5789 
   5790 	adapter->osdep.nintrs = adapter->num_queues + 1;
   5791 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   5792 	    adapter->osdep.nintrs) != 0) {
   5793 		aprint_error_dev(dev,
   5794 		    "failed to allocate MSI-X interrupt\n");
   5795 		return (ENXIO);
   5796 	}
   5797 
   5798 	kcpuset_create(&affinity, false);
   5799 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   5800 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   5801 		    device_xname(dev), i);
   5802 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   5803 		    sizeof(intrbuf));
   5804 #ifdef IXGBE_MPSAFE
   5805 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   5806 		    true);
   5807 #endif
   5808 		/* Set the handler function */
   5809 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   5810 		    adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
   5811 		    intr_xname);
   5812 		if (que->res == NULL) {
   5813 			pci_intr_release(pc, adapter->osdep.intrs,
   5814 			    adapter->osdep.nintrs);
   5815 			aprint_error_dev(dev,
   5816 			    "Failed to register QUE handler\n");
   5817 			kcpuset_destroy(affinity);
   5818 			return ENXIO;
   5819 		}
   5820 		que->msix = vector;
   5821 		adapter->active_queues |= (u64)(1 << que->msix);
   5822 
   5823 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   5824 #ifdef	RSS
   5825 			/*
   5826 			 * The queue ID is used as the RSS layer bucket ID.
   5827 			 * We look up the queue ID -> RSS CPU ID and select
   5828 			 * that.
   5829 			 */
   5830 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
   5831 			CPU_SETOF(cpu_id, &cpu_mask);
   5832 #endif
   5833 		} else {
   5834 			/*
   5835 			 * Bind the MSI-X vector, and thus the
   5836 			 * rings to the corresponding CPU.
   5837 			 *
   5838 			 * This just happens to match the default RSS
   5839 			 * round-robin bucket -> queue -> CPU allocation.
   5840 			 */
   5841 			if (adapter->num_queues > 1)
   5842 				cpu_id = i;
   5843 		}
   5844 		/* Round-robin affinity */
   5845 		kcpuset_zero(affinity);
   5846 		kcpuset_set(affinity, cpu_id % ncpu);
   5847 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   5848 		    NULL);
   5849 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   5850 		    intrstr);
   5851 		if (error == 0) {
   5852 #if 1 /* def IXGBE_DEBUG */
   5853 #ifdef	RSS
   5854 			aprintf_normal(", bound RSS bucket %d to CPU %d", i,
   5855 			    cpu_id % ncpu);
   5856 #else
   5857 			aprint_normal(", bound queue %d to cpu %d", i,
   5858 			    cpu_id % ncpu);
   5859 #endif
   5860 #endif /* IXGBE_DEBUG */
   5861 		}
   5862 		aprint_normal("\n");
   5863 
   5864 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5865 			txr->txr_si = softint_establish(
   5866 				SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5867 				ixgbe_deferred_mq_start, txr);
   5868 		que->que_si
   5869 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5870 			ixgbe_handle_que, que);
   5871 		if (que->que_si == NULL) {
   5872 			aprint_error_dev(dev,
   5873 			    "could not establish software interrupt\n");
   5874 		}
   5875 	}
   5876 
   5877 	/* and Link */
   5878 	cpu_id++;
   5879 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   5880 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   5881 	    sizeof(intrbuf));
   5882 #ifdef IXGBE_MPSAFE
   5883 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   5884 	    true);
   5885 #endif
   5886 	/* Set the link handler function */
   5887 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   5888 	    adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
   5889 	    intr_xname);
   5890 	if (adapter->osdep.ihs[vector] == NULL) {
   5891 		adapter->res = NULL;
   5892 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   5893 		kcpuset_destroy(affinity);
   5894 		return (ENXIO);
   5895 	}
   5896 	/* Round-robin affinity */
   5897 	kcpuset_zero(affinity);
   5898 	kcpuset_set(affinity, cpu_id % ncpu);
   5899 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   5900 
   5901 	aprint_normal_dev(dev,
   5902 	    "for link, interrupting at %s", intrstr);
   5903 	if (error == 0)
   5904 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   5905 	else
   5906 		aprint_normal("\n");
   5907 
   5908 	adapter->vector = vector;
   5909 	/* Tasklets for Link, SFP and Multispeed Fiber */
   5910 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   5911 	    ixgbe_handle_link, adapter);
   5912 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5913 	    ixgbe_handle_mod, adapter);
   5914 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5915 	    ixgbe_handle_msf, adapter);
   5916 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   5917 		adapter->mbx_si =
   5918 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5919 			ixgbe_handle_mbx, adapter);
   5920 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5921 		ixgbe_handle_phy, adapter);
   5922 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   5923 		adapter->fdir_si =
   5924 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5925 			ixgbe_reinit_fdir, adapter);
   5926 
   5927 	kcpuset_destroy(affinity);
   5928 
   5929 	return (0);
   5930 } /* ixgbe_allocate_msix */
   5931 
   5932 /************************************************************************
   5933  * ixgbe_configure_interrupts
   5934  *
   5935  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
   5936  *   This will also depend on user settings.
   5937  ************************************************************************/
   5938 static int
   5939 ixgbe_configure_interrupts(struct adapter *adapter)
   5940 {
   5941 	device_t dev = adapter->dev;
   5942 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
   5943 	int want, queues, msgs;
   5944 
   5945 	/* Default to 1 queue if MSI-X setup fails */
   5946 	adapter->num_queues = 1;
   5947 
   5948 	/* Override by tuneable */
   5949 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
   5950 		goto msi;
   5951 
   5952 	/* First try MSI-X */
   5953 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   5954 	msgs = MIN(msgs, IXG_MAX_NINTR);
   5955 	if (msgs < 2)
   5956 		goto msi;
   5957 
   5958 	adapter->msix_mem = (void *)1; /* XXX */
   5959 
   5960 	/* Figure out a reasonable auto config value */
   5961 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   5962 
   5963 #ifdef	RSS
   5964 	/* If we're doing RSS, clamp at the number of RSS buckets */
   5965 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   5966 		queues = min(queues, rss_getnumbuckets());
   5967 #endif
   5968 	if (ixgbe_num_queues > queues) {
   5969 		aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
   5970 		ixgbe_num_queues = queues;
   5971 	}
   5972 
   5973 	if (ixgbe_num_queues != 0)
   5974 		queues = ixgbe_num_queues;
   5975 	/* Set max queues to 8 when autoconfiguring */
   5976 	else
   5977 		queues = min(queues,
   5978 		    min(mac->max_tx_queues, mac->max_rx_queues));
   5979 
   5980 	/* reflect correct sysctl value */
   5981 	ixgbe_num_queues = queues;
   5982 
   5983 	/*
   5984 	 * Want one vector (RX/TX pair) per queue
   5985 	 * plus an additional for Link.
   5986 	 */
   5987 	want = queues + 1;
   5988 	if (msgs >= want)
   5989 		msgs = want;
   5990 	else {
   5991                	aprint_error_dev(dev, "MSI-X Configuration Problem, "
   5992 		    "%d vectors but %d queues wanted!\n",
   5993 		    msgs, want);
   5994 		goto msi;
   5995 	}
   5996 	device_printf(dev,
   5997 	    "Using MSI-X interrupts with %d vectors\n", msgs);
   5998 	adapter->num_queues = queues;
   5999 	adapter->feat_en |= IXGBE_FEATURE_MSIX;
   6000 	return (0);
   6001 
   6002 	/*
   6003 	 * MSI-X allocation failed or provided us with
   6004 	 * less vectors than needed. Free MSI-X resources
   6005 	 * and we'll try enabling MSI.
   6006 	 */
   6007 msi:
   6008 	/* Without MSI-X, some features are no longer supported */
   6009 	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6010 	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
   6011 	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6012 	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
   6013 
   6014        	msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
   6015 	adapter->msix_mem = NULL; /* XXX */
   6016 	if (msgs > 1)
   6017 		msgs = 1;
   6018 	if (msgs != 0) {
   6019 		msgs = 1;
   6020 		adapter->feat_en |= IXGBE_FEATURE_MSI;
   6021 		aprint_normal_dev(dev, "Using an MSI interrupt\n");
   6022 		return (0);
   6023 	}
   6024 
   6025 	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
   6026 		aprint_error_dev(dev,
   6027 		    "Device does not support legacy interrupts.\n");
   6028 		return 1;
   6029 	}
   6030 
   6031 	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6032 	aprint_normal_dev(dev, "Using a Legacy interrupt\n");
   6033 
   6034 	return (0);
   6035 } /* ixgbe_configure_interrupts */
   6036 
   6037 
   6038 /************************************************************************
   6039  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
   6040  *
   6041  *   Done outside of interrupt context since the driver might sleep
   6042  ************************************************************************/
   6043 static void
   6044 ixgbe_handle_link(void *context)
   6045 {
   6046 	struct adapter  *adapter = context;
   6047 	struct ixgbe_hw *hw = &adapter->hw;
   6048 
   6049 	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
   6050 	ixgbe_update_link_status(adapter);
   6051 
   6052 	/* Re-enable link interrupts */
   6053 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
   6054 } /* ixgbe_handle_link */
   6055 
   6056 /************************************************************************
   6057  * ixgbe_rearm_queues
   6058  ************************************************************************/
   6059 static void
   6060 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   6061 {
   6062 	u32 mask;
   6063 
   6064 	switch (adapter->hw.mac.type) {
   6065 	case ixgbe_mac_82598EB:
   6066 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   6067 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   6068 		break;
   6069 	case ixgbe_mac_82599EB:
   6070 	case ixgbe_mac_X540:
   6071 	case ixgbe_mac_X550:
   6072 	case ixgbe_mac_X550EM_x:
   6073 	case ixgbe_mac_X550EM_a:
   6074 		mask = (queues & 0xFFFFFFFF);
   6075 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   6076 		mask = (queues >> 32);
   6077 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   6078 		break;
   6079 	default:
   6080 		break;
   6081 	}
   6082 } /* ixgbe_rearm_queues */
   6083