Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.124
      1 /* $NetBSD: ixgbe.c,v 1.124 2018/02/20 07:24:37 msaitoh Exp $ */
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 320916 2017-07-12 17:35:32Z sbruno $*/
     36 
     37 /*
     38  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39  * All rights reserved.
     40  *
     41  * This code is derived from software contributed to The NetBSD Foundation
     42  * by Coyote Point Systems, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63  * POSSIBILITY OF SUCH DAMAGE.
     64  */
     65 
     66 #ifdef _KERNEL_OPT
     67 #include "opt_inet.h"
     68 #include "opt_inet6.h"
     69 #include "opt_net_mpsafe.h"
     70 #endif
     71 
     72 #include "ixgbe.h"
     73 #include "vlan.h"
     74 
     75 #include <sys/cprng.h>
     76 #include <dev/mii/mii.h>
     77 #include <dev/mii/miivar.h>
     78 
     79 /************************************************************************
     80  * Driver version
     81  ************************************************************************/
     82 char ixgbe_driver_version[] = "3.2.12-k";
     83 
     84 
     85 /************************************************************************
     86  * PCI Device ID Table
     87  *
     88  *   Used by probe to select devices to load on
     89  *   Last field stores an index into ixgbe_strings
     90  *   Last entry must be all 0s
     91  *
     92  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     93  ************************************************************************/
     94 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     95 {
     96 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    111 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    112 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    113 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    114 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    115 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
    116 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    117 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
    118 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
    119 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
    120 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
    121 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
    122 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
    123 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
    124 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
    125 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
    126 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
    127 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
    128 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
    129 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
    130 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
    131 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
    132 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
    133 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
    134 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
    135 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
    136 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
    137 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
    138 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
    139 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
    140 	/* required last entry */
    141 	{0, 0, 0, 0, 0}
    142 };
    143 
    144 /************************************************************************
    145  * Table of branding strings
    146  ************************************************************************/
    147 static const char    *ixgbe_strings[] = {
    148 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    149 };
    150 
    151 /************************************************************************
    152  * Function prototypes
    153  ************************************************************************/
    154 static int      ixgbe_probe(device_t, cfdata_t, void *);
    155 static void     ixgbe_attach(device_t, device_t, void *);
    156 static int      ixgbe_detach(device_t, int);
    157 #if 0
    158 static int      ixgbe_shutdown(device_t);
    159 #endif
    160 static bool	ixgbe_suspend(device_t, const pmf_qual_t *);
    161 static bool	ixgbe_resume(device_t, const pmf_qual_t *);
    162 static int	ixgbe_ifflags_cb(struct ethercom *);
    163 static int      ixgbe_ioctl(struct ifnet *, u_long, void *);
    164 static void	ixgbe_ifstop(struct ifnet *, int);
    165 static int	ixgbe_init(struct ifnet *);
    166 static void	ixgbe_init_locked(struct adapter *);
    167 static void     ixgbe_stop(void *);
    168 static void     ixgbe_init_device_features(struct adapter *);
    169 static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
    170 static void	ixgbe_add_media_types(struct adapter *);
    171 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    172 static int      ixgbe_media_change(struct ifnet *);
    173 static int      ixgbe_allocate_pci_resources(struct adapter *,
    174 		    const struct pci_attach_args *);
    175 static void      ixgbe_free_softint(struct adapter *);
    176 static void	ixgbe_get_slot_info(struct adapter *);
    177 static int      ixgbe_allocate_msix(struct adapter *,
    178 		    const struct pci_attach_args *);
    179 static int      ixgbe_allocate_legacy(struct adapter *,
    180 		    const struct pci_attach_args *);
    181 static int      ixgbe_configure_interrupts(struct adapter *);
    182 static void	ixgbe_free_pciintr_resources(struct adapter *);
    183 static void	ixgbe_free_pci_resources(struct adapter *);
    184 static void	ixgbe_local_timer(void *);
    185 static void	ixgbe_local_timer1(void *);
    186 static int	ixgbe_setup_interface(device_t, struct adapter *);
    187 static void	ixgbe_config_gpie(struct adapter *);
    188 static void	ixgbe_config_dmac(struct adapter *);
    189 static void	ixgbe_config_delay_values(struct adapter *);
    190 static void	ixgbe_config_link(struct adapter *);
    191 static void	ixgbe_check_wol_support(struct adapter *);
    192 static int	ixgbe_setup_low_power_mode(struct adapter *);
    193 static void	ixgbe_rearm_queues(struct adapter *, u64);
    194 
    195 static void     ixgbe_initialize_transmit_units(struct adapter *);
    196 static void     ixgbe_initialize_receive_units(struct adapter *);
    197 static void	ixgbe_enable_rx_drop(struct adapter *);
    198 static void	ixgbe_disable_rx_drop(struct adapter *);
    199 static void	ixgbe_initialize_rss_mapping(struct adapter *);
    200 
    201 static void     ixgbe_enable_intr(struct adapter *);
    202 static void     ixgbe_disable_intr(struct adapter *);
    203 static void     ixgbe_update_stats_counters(struct adapter *);
    204 static void     ixgbe_set_promisc(struct adapter *);
    205 static void     ixgbe_set_multi(struct adapter *);
    206 static void     ixgbe_update_link_status(struct adapter *);
    207 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    208 static void	ixgbe_configure_ivars(struct adapter *);
    209 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    210 static void	ixgbe_eitr_write(struct ix_queue *, uint32_t);
    211 
    212 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    213 #if 0
    214 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
    215 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
    216 #endif
    217 
    218 static void	ixgbe_add_device_sysctls(struct adapter *);
    219 static void     ixgbe_add_hw_stats(struct adapter *);
    220 static void	ixgbe_clear_evcnt(struct adapter *);
    221 static int	ixgbe_set_flowcntl(struct adapter *, int);
    222 static int	ixgbe_set_advertise(struct adapter *, int);
    223 static int      ixgbe_get_advertise(struct adapter *);
    224 
    225 /* Sysctl handlers */
    226 static void	ixgbe_set_sysctl_value(struct adapter *, const char *,
    227 		     const char *, int *, int);
    228 static int	ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
    229 static int	ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
    230 static int      ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    231 static int	ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
    232 static int	ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
    233 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
    234 #ifdef IXGBE_DEBUG
    235 static int	ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
    236 static int	ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
    237 #endif
    238 static int      ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
    239 static int      ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
    240 static int      ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
    241 static int      ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
    242 static int      ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
    243 static int	ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
    244 static int	ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
    245 
    246 /* Support for pluggable optic modules */
    247 static bool	ixgbe_sfp_probe(struct adapter *);
    248 
    249 /* Legacy (single vector) interrupt handler */
    250 static int	ixgbe_legacy_irq(void *);
    251 
    252 /* The MSI/MSI-X Interrupt handlers */
    253 static int	ixgbe_msix_que(void *);
    254 static int	ixgbe_msix_link(void *);
    255 
    256 /* Software interrupts for deferred work */
    257 static void	ixgbe_handle_que(void *);
    258 static void	ixgbe_handle_link(void *);
    259 static void	ixgbe_handle_msf(void *);
    260 static void	ixgbe_handle_mod(void *);
    261 static void	ixgbe_handle_phy(void *);
    262 
    263 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    264 
    265 /************************************************************************
    266  *  NetBSD Device Interface Entry Points
    267  ************************************************************************/
    268 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    269     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    270     DVF_DETACH_SHUTDOWN);
    271 
    272 #if 0
    273 devclass_t ix_devclass;
    274 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
    275 
    276 MODULE_DEPEND(ix, pci, 1, 1, 1);
    277 MODULE_DEPEND(ix, ether, 1, 1, 1);
    278 #ifdef DEV_NETMAP
    279 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    280 #endif
    281 #endif
    282 
    283 /*
    284  * TUNEABLE PARAMETERS:
    285  */
    286 
    287 /*
    288  * AIM: Adaptive Interrupt Moderation
    289  * which means that the interrupt rate
    290  * is varied over time based on the
    291  * traffic for that interrupt vector
    292  */
    293 static bool ixgbe_enable_aim = true;
    294 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
    295 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
    296     "Enable adaptive interrupt moderation");
    297 
    298 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    299 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
    300     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
    301 
    302 /* How many packets rxeof tries to clean at a time */
    303 static int ixgbe_rx_process_limit = 256;
    304 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
    305     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
    306 
    307 /* How many packets txeof tries to clean at a time */
    308 static int ixgbe_tx_process_limit = 256;
    309 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
    310     &ixgbe_tx_process_limit, 0,
    311     "Maximum number of sent packets to process at a time, -1 means unlimited");
    312 
    313 /* Flow control setting, default to full */
    314 static int ixgbe_flow_control = ixgbe_fc_full;
    315 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
    316     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
    317 
    318 /*
    319  * Smart speed setting, default to on
    320  * this only works as a compile option
    321  * right now as its during attach, set
    322  * this to 'ixgbe_smart_speed_off' to
    323  * disable.
    324  */
    325 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    326 
    327 /*
    328  * MSI-X should be the default for best performance,
    329  * but this allows it to be forced off for testing.
    330  */
    331 static int ixgbe_enable_msix = 1;
    332 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
    333     "Enable MSI-X interrupts");
    334 
    335 /*
    336  * Number of Queues, can be set to 0,
    337  * it then autoconfigures based on the
    338  * number of cpus with a max of 8. This
    339  * can be overriden manually here.
    340  */
    341 static int ixgbe_num_queues = 0;
    342 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
    343     "Number of queues to configure, 0 indicates autoconfigure");
    344 
    345 /*
    346  * Number of TX descriptors per ring,
    347  * setting higher than RX as this seems
    348  * the better performing choice.
    349  */
    350 static int ixgbe_txd = PERFORM_TXD;
    351 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
    352     "Number of transmit descriptors per queue");
    353 
    354 /* Number of RX descriptors per ring */
    355 static int ixgbe_rxd = PERFORM_RXD;
    356 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
    357     "Number of receive descriptors per queue");
    358 
    359 /*
    360  * Defining this on will allow the use
    361  * of unsupported SFP+ modules, note that
    362  * doing so you are on your own :)
    363  */
    364 static int allow_unsupported_sfp = false;
    365 #define TUNABLE_INT(__x, __y)
    366 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
    367 
    368 /*
    369  * Not sure if Flow Director is fully baked,
    370  * so we'll default to turning it off.
    371  */
    372 static int ixgbe_enable_fdir = 0;
    373 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
    374     "Enable Flow Director");
    375 
    376 /* Legacy Transmit (single queue) */
    377 static int ixgbe_enable_legacy_tx = 0;
    378 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
    379     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
    380 
    381 /* Receive-Side Scaling */
    382 static int ixgbe_enable_rss = 1;
    383 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
    384     "Enable Receive-Side Scaling (RSS)");
    385 
    386 /* Keep running tab on them for sanity check */
    387 static int ixgbe_total_ports;
    388 
    389 #if 0
    390 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
    391 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
    392 #endif
    393 
    394 #ifdef NET_MPSAFE
    395 #define IXGBE_MPSAFE		1
    396 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    397 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    398 #else
    399 #define IXGBE_CALLOUT_FLAGS	0
    400 #define IXGBE_SOFTINFT_FLAGS	0
    401 #endif
    402 
    403 /************************************************************************
    404  * ixgbe_initialize_rss_mapping
    405  ************************************************************************/
    406 static void
    407 ixgbe_initialize_rss_mapping(struct adapter *adapter)
    408 {
    409 	struct ixgbe_hw	*hw = &adapter->hw;
    410 	u32             reta = 0, mrqc, rss_key[10];
    411 	int             queue_id, table_size, index_mult;
    412 	int             i, j;
    413 	u32             rss_hash_config;
    414 
    415 	/* force use default RSS key. */
    416 #ifdef __NetBSD__
    417 	rss_getkey((uint8_t *) &rss_key);
    418 #else
    419 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    420 		/* Fetch the configured RSS key */
    421 		rss_getkey((uint8_t *) &rss_key);
    422 	} else {
    423 		/* set up random bits */
    424 		cprng_fast(&rss_key, sizeof(rss_key));
    425 	}
    426 #endif
    427 
    428 	/* Set multiplier for RETA setup and table size based on MAC */
    429 	index_mult = 0x1;
    430 	table_size = 128;
    431 	switch (adapter->hw.mac.type) {
    432 	case ixgbe_mac_82598EB:
    433 		index_mult = 0x11;
    434 		break;
    435 	case ixgbe_mac_X550:
    436 	case ixgbe_mac_X550EM_x:
    437 	case ixgbe_mac_X550EM_a:
    438 		table_size = 512;
    439 		break;
    440 	default:
    441 		break;
    442 	}
    443 
    444 	/* Set up the redirection table */
    445 	for (i = 0, j = 0; i < table_size; i++, j++) {
    446 		if (j == adapter->num_queues)
    447 			j = 0;
    448 
    449 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    450 			/*
    451 			 * Fetch the RSS bucket id for the given indirection
    452 			 * entry. Cap it at the number of configured buckets
    453 			 * (which is num_queues.)
    454 			 */
    455 			queue_id = rss_get_indirection_to_bucket(i);
    456 			queue_id = queue_id % adapter->num_queues;
    457 		} else
    458 			queue_id = (j * index_mult);
    459 
    460 		/*
    461 		 * The low 8 bits are for hash value (n+0);
    462 		 * The next 8 bits are for hash value (n+1), etc.
    463 		 */
    464 		reta = reta >> 8;
    465 		reta = reta | (((uint32_t) queue_id) << 24);
    466 		if ((i & 3) == 3) {
    467 			if (i < 128)
    468 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
    469 			else
    470 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
    471 				    reta);
    472 			reta = 0;
    473 		}
    474 	}
    475 
    476 	/* Now fill our hash function seeds */
    477 	for (i = 0; i < 10; i++)
    478 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
    479 
    480 	/* Perform hash on these packet types */
    481 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
    482 		rss_hash_config = rss_gethashconfig();
    483 	else {
    484 		/*
    485 		 * Disable UDP - IP fragments aren't currently being handled
    486 		 * and so we end up with a mix of 2-tuple and 4-tuple
    487 		 * traffic.
    488 		 */
    489 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
    490 		                | RSS_HASHTYPE_RSS_TCP_IPV4
    491 		                | RSS_HASHTYPE_RSS_IPV6
    492 		                | RSS_HASHTYPE_RSS_TCP_IPV6
    493 		                | RSS_HASHTYPE_RSS_IPV6_EX
    494 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
    495 	}
    496 
    497 	mrqc = IXGBE_MRQC_RSSEN;
    498 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
    499 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
    500 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
    501 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
    502 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
    503 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
    504 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
    505 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
    506 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
    507 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
    508 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
    509 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
    510 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
    511 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
    512 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
    513 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
    514 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
    515 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
    516 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
    517 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    518 } /* ixgbe_initialize_rss_mapping */
    519 
    520 /************************************************************************
    521  * ixgbe_initialize_receive_units - Setup receive registers and features.
    522  ************************************************************************/
    523 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
    524 
    525 static void
    526 ixgbe_initialize_receive_units(struct adapter *adapter)
    527 {
    528 	struct	rx_ring	*rxr = adapter->rx_rings;
    529 	struct ixgbe_hw	*hw = &adapter->hw;
    530 	struct ifnet    *ifp = adapter->ifp;
    531 	int             i, j;
    532 	u32		bufsz, fctrl, srrctl, rxcsum;
    533 	u32		hlreg;
    534 
    535 	/*
    536 	 * Make sure receives are disabled while
    537 	 * setting up the descriptor ring
    538 	 */
    539 	ixgbe_disable_rx(hw);
    540 
    541 	/* Enable broadcasts */
    542 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
    543 	fctrl |= IXGBE_FCTRL_BAM;
    544 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
    545 		fctrl |= IXGBE_FCTRL_DPF;
    546 		fctrl |= IXGBE_FCTRL_PMCF;
    547 	}
    548 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
    549 
    550 	/* Set for Jumbo Frames? */
    551 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
    552 	if (ifp->if_mtu > ETHERMTU)
    553 		hlreg |= IXGBE_HLREG0_JUMBOEN;
    554 	else
    555 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
    556 
    557 #ifdef DEV_NETMAP
    558 	/* CRC stripping is conditional in Netmap */
    559 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
    560 	    (ifp->if_capenable & IFCAP_NETMAP) &&
    561 	    !ix_crcstrip)
    562 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
    563 	else
    564 #endif /* DEV_NETMAP */
    565 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
    566 
    567 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
    568 
    569 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
    570 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
    571 
    572 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
    573 		u64 rdba = rxr->rxdma.dma_paddr;
    574 		u32 tqsmreg, reg;
    575 		int regnum = i / 4;	/* 1 register per 4 queues */
    576 		int regshift = i % 4;	/* 4 bits per 1 queue */
    577 		j = rxr->me;
    578 
    579 		/* Setup the Base and Length of the Rx Descriptor Ring */
    580 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
    581 		    (rdba & 0x00000000ffffffffULL));
    582 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
    583 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
    584 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
    585 
    586 		/* Set up the SRRCTL register */
    587 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
    588 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
    589 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
    590 		srrctl |= bufsz;
    591 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
    592 
    593 		/* Set RQSMR (Receive Queue Statistic Mapping) register */
    594 		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
    595 		reg &= ~(0x000000ff << (regshift * 8));
    596 		reg |= i << (regshift * 8);
    597 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
    598 
    599 		/*
    600 		 * Set RQSMR (Receive Queue Statistic Mapping) register.
    601 		 * Register location for queue 0...7 are different between
    602 		 * 82598 and newer.
    603 		 */
    604 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
    605 			tqsmreg = IXGBE_TQSMR(regnum);
    606 		else
    607 			tqsmreg = IXGBE_TQSM(regnum);
    608 		reg = IXGBE_READ_REG(hw, tqsmreg);
    609 		reg &= ~(0x000000ff << (regshift * 8));
    610 		reg |= i << (regshift * 8);
    611 		IXGBE_WRITE_REG(hw, tqsmreg, reg);
    612 
    613 		/*
    614 		 * Set DROP_EN iff we have no flow control and >1 queue.
    615 		 * Note that srrctl was cleared shortly before during reset,
    616 		 * so we do not need to clear the bit, but do it just in case
    617 		 * this code is moved elsewhere.
    618 		 */
    619 		if (adapter->num_queues > 1 &&
    620 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
    621 			srrctl |= IXGBE_SRRCTL_DROP_EN;
    622 		} else {
    623 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
    624 		}
    625 
    626 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
    627 
    628 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
    629 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
    630 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
    631 
    632 		/* Set the driver rx tail address */
    633 		rxr->tail =  IXGBE_RDT(rxr->me);
    634 	}
    635 
    636 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    637 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
    638 		            | IXGBE_PSRTYPE_UDPHDR
    639 		            | IXGBE_PSRTYPE_IPV4HDR
    640 		            | IXGBE_PSRTYPE_IPV6HDR;
    641 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
    642 	}
    643 
    644 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
    645 
    646 	ixgbe_initialize_rss_mapping(adapter);
    647 
    648 	if (adapter->num_queues > 1) {
    649 		/* RSS and RX IPP Checksum are mutually exclusive */
    650 		rxcsum |= IXGBE_RXCSUM_PCSD;
    651 	}
    652 
    653 	if (ifp->if_capenable & IFCAP_RXCSUM)
    654 		rxcsum |= IXGBE_RXCSUM_PCSD;
    655 
    656 	/* This is useful for calculating UDP/IP fragment checksums */
    657 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
    658 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
    659 
    660 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
    661 
    662 	return;
    663 } /* ixgbe_initialize_receive_units */
    664 
    665 /************************************************************************
    666  * ixgbe_initialize_transmit_units - Enable transmit units.
    667  ************************************************************************/
    668 static void
    669 ixgbe_initialize_transmit_units(struct adapter *adapter)
    670 {
    671 	struct tx_ring  *txr = adapter->tx_rings;
    672 	struct ixgbe_hw	*hw = &adapter->hw;
    673 
    674 	/* Setup the Base and Length of the Tx Descriptor Ring */
    675 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    676 		u64 tdba = txr->txdma.dma_paddr;
    677 		u32 txctrl = 0;
    678 		int j = txr->me;
    679 
    680 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
    681 		    (tdba & 0x00000000ffffffffULL));
    682 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
    683 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
    684 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
    685 
    686 		/* Setup the HW Tx Head and Tail descriptor pointers */
    687 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
    688 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
    689 
    690 		/* Cache the tail address */
    691 		txr->tail = IXGBE_TDT(j);
    692 
    693 		/* Disable Head Writeback */
    694 		/*
    695 		 * Note: for X550 series devices, these registers are actually
    696 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
    697 		 * fields remain the same.
    698 		 */
    699 		switch (hw->mac.type) {
    700 		case ixgbe_mac_82598EB:
    701 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
    702 			break;
    703 		default:
    704 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
    705 			break;
    706 		}
    707 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
    708 		switch (hw->mac.type) {
    709 		case ixgbe_mac_82598EB:
    710 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
    711 			break;
    712 		default:
    713 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
    714 			break;
    715 		}
    716 
    717 	}
    718 
    719 	if (hw->mac.type != ixgbe_mac_82598EB) {
    720 		u32 dmatxctl, rttdcs;
    721 
    722 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
    723 		dmatxctl |= IXGBE_DMATXCTL_TE;
    724 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
    725 		/* Disable arbiter to set MTQC */
    726 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
    727 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
    728 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    729 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
    730 		    ixgbe_get_mtqc(adapter->iov_mode));
    731 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
    732 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    733 	}
    734 
    735 	return;
    736 } /* ixgbe_initialize_transmit_units */
    737 
    738 /************************************************************************
    739  * ixgbe_attach - Device initialization routine
    740  *
    741  *   Called when the driver is being loaded.
    742  *   Identifies the type of hardware, allocates all resources
    743  *   and initializes the hardware.
    744  *
    745  *   return 0 on success, positive on failure
    746  ************************************************************************/
    747 static void
    748 ixgbe_attach(device_t parent, device_t dev, void *aux)
    749 {
    750 	struct adapter  *adapter;
    751 	struct ixgbe_hw *hw;
    752 	int             error = -1;
    753 	u32		ctrl_ext;
    754 	u16		high, low, nvmreg;
    755 	pcireg_t	id, subid;
    756 	ixgbe_vendor_info_t *ent;
    757 	struct pci_attach_args *pa = aux;
    758 	const char *str;
    759 	char buf[256];
    760 
    761 	INIT_DEBUGOUT("ixgbe_attach: begin");
    762 
    763 	/* Allocate, clear, and link in our adapter structure */
    764 	adapter = device_private(dev);
    765 	adapter->hw.back = adapter;
    766 	adapter->dev = dev;
    767 	hw = &adapter->hw;
    768 	adapter->osdep.pc = pa->pa_pc;
    769 	adapter->osdep.tag = pa->pa_tag;
    770 	if (pci_dma64_available(pa))
    771 		adapter->osdep.dmat = pa->pa_dmat64;
    772 	else
    773 		adapter->osdep.dmat = pa->pa_dmat;
    774 	adapter->osdep.attached = false;
    775 
    776 	ent = ixgbe_lookup(pa);
    777 
    778 	KASSERT(ent != NULL);
    779 
    780 	aprint_normal(": %s, Version - %s\n",
    781 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    782 
    783 	/* Core Lock Init*/
    784 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    785 
    786 	/* Set up the timer callout */
    787 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    788 
    789 	/* Determine hardware revision */
    790 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    791 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    792 
    793 	hw->vendor_id = PCI_VENDOR(id);
    794 	hw->device_id = PCI_PRODUCT(id);
    795 	hw->revision_id =
    796 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    797 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    798 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    799 
    800 	/*
    801 	 * Make sure BUSMASTER is set
    802 	 */
    803 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    804 
    805 	/* Do base PCI setup - map BAR0 */
    806 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    807 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    808 		error = ENXIO;
    809 		goto err_out;
    810 	}
    811 
    812 	/* let hardware know driver is loaded */
    813 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    814 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    815 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    816 
    817 	/*
    818 	 * Initialize the shared code
    819 	 */
    820 	if (ixgbe_init_shared_code(hw)) {
    821 		aprint_error_dev(dev, "Unable to initialize the shared code\n");
    822 		error = ENXIO;
    823 		goto err_out;
    824 	}
    825 
    826 	switch (hw->mac.type) {
    827 	case ixgbe_mac_82598EB:
    828 		str = "82598EB";
    829 		break;
    830 	case ixgbe_mac_82599EB:
    831 		str = "82599EB";
    832 		break;
    833 	case ixgbe_mac_X540:
    834 		str = "X540";
    835 		break;
    836 	case ixgbe_mac_X550:
    837 		str = "X550";
    838 		break;
    839 	case ixgbe_mac_X550EM_x:
    840 		str = "X550EM";
    841 		break;
    842 	case ixgbe_mac_X550EM_a:
    843 		str = "X550EM A";
    844 		break;
    845 	default:
    846 		str = "Unknown";
    847 		break;
    848 	}
    849 	aprint_normal_dev(dev, "device %s\n", str);
    850 
    851 	if (hw->mbx.ops.init_params)
    852 		hw->mbx.ops.init_params(hw);
    853 
    854 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
    855 
    856 	/* Pick up the 82599 settings */
    857 	if (hw->mac.type != ixgbe_mac_82598EB) {
    858 		hw->phy.smart_speed = ixgbe_smart_speed;
    859 		adapter->num_segs = IXGBE_82599_SCATTER;
    860 	} else
    861 		adapter->num_segs = IXGBE_82598_SCATTER;
    862 
    863 	hw->mac.ops.set_lan_id(hw);
    864 	ixgbe_init_device_features(adapter);
    865 
    866 	if (ixgbe_configure_interrupts(adapter)) {
    867 		error = ENXIO;
    868 		goto err_out;
    869 	}
    870 
    871 	/* Allocate multicast array memory. */
    872 	adapter->mta = malloc(sizeof(*adapter->mta) *
    873 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    874 	if (adapter->mta == NULL) {
    875 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    876 		error = ENOMEM;
    877 		goto err_out;
    878 	}
    879 
    880 	/* Enable WoL (if supported) */
    881 	ixgbe_check_wol_support(adapter);
    882 
    883 	/* Verify adapter fan is still functional (if applicable) */
    884 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
    885 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    886 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
    887 	}
    888 
    889 	/* Ensure SW/FW semaphore is free */
    890 	ixgbe_init_swfw_semaphore(hw);
    891 
    892 	/* Enable EEE power saving */
    893 	if (adapter->feat_en & IXGBE_FEATURE_EEE)
    894 		hw->mac.ops.setup_eee(hw, TRUE);
    895 
    896 	/* Set an initial default flow control value */
    897 	hw->fc.requested_mode = ixgbe_flow_control;
    898 
    899 	/* Sysctls for limiting the amount of work done in the taskqueues */
    900 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
    901 	    "max number of rx packets to process",
    902 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
    903 
    904 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
    905 	    "max number of tx packets to process",
    906 	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
    907 
    908 	/* Do descriptor calc and sanity checks */
    909 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    910 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    911 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    912 		adapter->num_tx_desc = DEFAULT_TXD;
    913 	} else
    914 		adapter->num_tx_desc = ixgbe_txd;
    915 
    916 	/*
    917 	 * With many RX rings it is easy to exceed the
    918 	 * system mbuf allocation. Tuning nmbclusters
    919 	 * can alleviate this.
    920 	 */
    921 	if (nmbclusters > 0) {
    922 		int s;
    923 		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
    924 		if (s > nmbclusters) {
    925 			aprint_error_dev(dev, "RX Descriptors exceed "
    926 			    "system mbuf max, using default instead!\n");
    927 			ixgbe_rxd = DEFAULT_RXD;
    928 		}
    929 	}
    930 
    931 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    932 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
    933 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    934 		adapter->num_rx_desc = DEFAULT_RXD;
    935 	} else
    936 		adapter->num_rx_desc = ixgbe_rxd;
    937 
    938 	/* Allocate our TX/RX Queues */
    939 	if (ixgbe_allocate_queues(adapter)) {
    940 		error = ENOMEM;
    941 		goto err_out;
    942 	}
    943 
    944 	hw->phy.reset_if_overtemp = TRUE;
    945 	error = ixgbe_reset_hw(hw);
    946 	hw->phy.reset_if_overtemp = FALSE;
    947 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    948 		/*
    949 		 * No optics in this port, set up
    950 		 * so the timer routine will probe
    951 		 * for later insertion.
    952 		 */
    953 		adapter->sfp_probe = TRUE;
    954 		error = IXGBE_SUCCESS;
    955 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    956 		aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
    957 		error = EIO;
    958 		goto err_late;
    959 	} else if (error) {
    960 		aprint_error_dev(dev, "Hardware initialization failed\n");
    961 		error = EIO;
    962 		goto err_late;
    963 	}
    964 
    965 	/* Make sure we have a good EEPROM before we read from it */
    966 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
    967 		aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
    968 		error = EIO;
    969 		goto err_late;
    970 	}
    971 
    972 	aprint_normal("%s:", device_xname(dev));
    973 	/* NVM Image Version */
    974 	switch (hw->mac.type) {
    975 	case ixgbe_mac_X540:
    976 	case ixgbe_mac_X550EM_a:
    977 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    978 		if (nvmreg == 0xffff)
    979 			break;
    980 		high = (nvmreg >> 12) & 0x0f;
    981 		low = (nvmreg >> 4) & 0xff;
    982 		id = nvmreg & 0x0f;
    983 		aprint_normal(" NVM Image Version %u.", high);
    984 		if (hw->mac.type == ixgbe_mac_X540)
    985 			str = "%x";
    986 		else
    987 			str = "%02x";
    988 		aprint_normal(str, low);
    989 		aprint_normal(" ID 0x%x,", id);
    990 		break;
    991 	case ixgbe_mac_X550EM_x:
    992 	case ixgbe_mac_X550:
    993 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    994 		if (nvmreg == 0xffff)
    995 			break;
    996 		high = (nvmreg >> 12) & 0x0f;
    997 		low = nvmreg & 0xff;
    998 		aprint_normal(" NVM Image Version %u.%02x,", high, low);
    999 		break;
   1000 	default:
   1001 		break;
   1002 	}
   1003 
   1004 	/* PHY firmware revision */
   1005 	switch (hw->mac.type) {
   1006 	case ixgbe_mac_X540:
   1007 	case ixgbe_mac_X550:
   1008 		hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
   1009 		if (nvmreg == 0xffff)
   1010 			break;
   1011 		high = (nvmreg >> 12) & 0x0f;
   1012 		low = (nvmreg >> 4) & 0xff;
   1013 		id = nvmreg & 0x000f;
   1014 		aprint_normal(" PHY FW Revision %u.", high);
   1015 		if (hw->mac.type == ixgbe_mac_X540)
   1016 			str = "%x";
   1017 		else
   1018 			str = "%02x";
   1019 		aprint_normal(str, low);
   1020 		aprint_normal(" ID 0x%x,", id);
   1021 		break;
   1022 	default:
   1023 		break;
   1024 	}
   1025 
   1026 	/* NVM Map version & OEM NVM Image version */
   1027 	switch (hw->mac.type) {
   1028 	case ixgbe_mac_X550:
   1029 	case ixgbe_mac_X550EM_x:
   1030 	case ixgbe_mac_X550EM_a:
   1031 		hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
   1032 		if (nvmreg != 0xffff) {
   1033 			high = (nvmreg >> 12) & 0x0f;
   1034 			low = nvmreg & 0x00ff;
   1035 			aprint_normal(" NVM Map version %u.%02x,", high, low);
   1036 		}
   1037 		hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
   1038 		if (nvmreg != 0xffff) {
   1039 			high = (nvmreg >> 12) & 0x0f;
   1040 			low = nvmreg & 0x00ff;
   1041 			aprint_verbose(" OEM NVM Image version %u.%02x,", high,
   1042 			    low);
   1043 		}
   1044 		break;
   1045 	default:
   1046 		break;
   1047 	}
   1048 
   1049 	/* Print the ETrackID */
   1050 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
   1051 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
   1052 	aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
   1053 
   1054 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   1055 		error = ixgbe_allocate_msix(adapter, pa);
   1056 		if (error) {
   1057 			/* Free allocated queue structures first */
   1058 			ixgbe_free_transmit_structures(adapter);
   1059 			ixgbe_free_receive_structures(adapter);
   1060 			free(adapter->queues, M_DEVBUF);
   1061 
   1062 			/* Fallback to legacy interrupt */
   1063 			adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
   1064 			if (adapter->feat_cap & IXGBE_FEATURE_MSI)
   1065 				adapter->feat_en |= IXGBE_FEATURE_MSI;
   1066 			adapter->num_queues = 1;
   1067 
   1068 			/* Allocate our TX/RX Queues again */
   1069 			if (ixgbe_allocate_queues(adapter)) {
   1070 				error = ENOMEM;
   1071 				goto err_out;
   1072 			}
   1073 		}
   1074 	}
   1075 	if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
   1076 		error = ixgbe_allocate_legacy(adapter, pa);
   1077 	if (error)
   1078 		goto err_late;
   1079 
   1080 	/* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
   1081 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   1082 	    ixgbe_handle_link, adapter);
   1083 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1084 	    ixgbe_handle_mod, adapter);
   1085 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1086 	    ixgbe_handle_msf, adapter);
   1087 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1088 	    ixgbe_handle_phy, adapter);
   1089 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   1090 		adapter->fdir_si =
   1091 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1092 			ixgbe_reinit_fdir, adapter);
   1093 	if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
   1094 	    || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
   1095 	    || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
   1096 		&& (adapter->fdir_si == NULL))) {
   1097 		aprint_error_dev(dev,
   1098 		    "could not establish software interrupts ()\n");
   1099 		goto err_out;
   1100 	}
   1101 
   1102 	error = ixgbe_start_hw(hw);
   1103 	switch (error) {
   1104 	case IXGBE_ERR_EEPROM_VERSION:
   1105 		aprint_error_dev(dev, "This device is a pre-production adapter/"
   1106 		    "LOM.  Please be aware there may be issues associated "
   1107 		    "with your hardware.\nIf you are experiencing problems "
   1108 		    "please contact your Intel or hardware representative "
   1109 		    "who provided you with this hardware.\n");
   1110 		break;
   1111 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
   1112 		aprint_error_dev(dev, "Unsupported SFP+ Module\n");
   1113 		error = EIO;
   1114 		goto err_late;
   1115 	case IXGBE_ERR_SFP_NOT_PRESENT:
   1116 		aprint_error_dev(dev, "No SFP+ Module found\n");
   1117 		/* falls thru */
   1118 	default:
   1119 		break;
   1120 	}
   1121 
   1122 	/* Setup OS specific network interface */
   1123 	if (ixgbe_setup_interface(dev, adapter) != 0)
   1124 		goto err_late;
   1125 
   1126 	/*
   1127 	 *  Print PHY ID only for copper PHY. On device which has SFP(+) cage
   1128 	 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
   1129 	 */
   1130 	if (hw->phy.media_type == ixgbe_media_type_copper) {
   1131 		uint16_t id1, id2;
   1132 		int oui, model, rev;
   1133 		const char *descr;
   1134 
   1135 		id1 = hw->phy.id >> 16;
   1136 		id2 = hw->phy.id & 0xffff;
   1137 		oui = MII_OUI(id1, id2);
   1138 		model = MII_MODEL(id2);
   1139 		rev = MII_REV(id2);
   1140 		if ((descr = mii_get_descr(oui, model)) != NULL)
   1141 			aprint_normal_dev(dev,
   1142 			    "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
   1143 			    descr, oui, model, rev);
   1144 		else
   1145 			aprint_normal_dev(dev,
   1146 			    "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
   1147 			    oui, model, rev);
   1148 	}
   1149 
   1150 	/* Enable the optics for 82599 SFP+ fiber */
   1151 	ixgbe_enable_tx_laser(hw);
   1152 
   1153 	/* Enable power to the phy. */
   1154 	ixgbe_set_phy_power(hw, TRUE);
   1155 
   1156 	/* Initialize statistics */
   1157 	ixgbe_update_stats_counters(adapter);
   1158 
   1159 	/* Check PCIE slot type/speed/width */
   1160 	ixgbe_get_slot_info(adapter);
   1161 
   1162 	/*
   1163 	 * Do time init and sysctl init here, but
   1164 	 * only on the first port of a bypass adapter.
   1165 	 */
   1166 	ixgbe_bypass_init(adapter);
   1167 
   1168 	/* Set an initial dmac value */
   1169 	adapter->dmac = 0;
   1170 	/* Set initial advertised speeds (if applicable) */
   1171 	adapter->advertise = ixgbe_get_advertise(adapter);
   1172 
   1173 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   1174 		ixgbe_define_iov_schemas(dev, &error);
   1175 
   1176 	/* Add sysctls */
   1177 	ixgbe_add_device_sysctls(adapter);
   1178 	ixgbe_add_hw_stats(adapter);
   1179 
   1180 	/* For Netmap */
   1181 	adapter->init_locked = ixgbe_init_locked;
   1182 	adapter->stop_locked = ixgbe_stop;
   1183 
   1184 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1185 		ixgbe_netmap_attach(adapter);
   1186 
   1187 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
   1188 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
   1189 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
   1190 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
   1191 
   1192 	if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
   1193 		pmf_class_network_register(dev, adapter->ifp);
   1194 	else
   1195 		aprint_error_dev(dev, "couldn't establish power handler\n");
   1196 
   1197 	INIT_DEBUGOUT("ixgbe_attach: end");
   1198 	adapter->osdep.attached = true;
   1199 
   1200 	return;
   1201 
   1202 err_late:
   1203 	ixgbe_free_transmit_structures(adapter);
   1204 	ixgbe_free_receive_structures(adapter);
   1205 	free(adapter->queues, M_DEVBUF);
   1206 err_out:
   1207 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   1208 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   1209 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   1210 	ixgbe_free_softint(adapter);
   1211 	ixgbe_free_pci_resources(adapter);
   1212 	if (adapter->mta != NULL)
   1213 		free(adapter->mta, M_DEVBUF);
   1214 	IXGBE_CORE_LOCK_DESTROY(adapter);
   1215 
   1216 	return;
   1217 } /* ixgbe_attach */
   1218 
   1219 /************************************************************************
   1220  * ixgbe_check_wol_support
   1221  *
   1222  *   Checks whether the adapter's ports are capable of
   1223  *   Wake On LAN by reading the adapter's NVM.
   1224  *
   1225  *   Sets each port's hw->wol_enabled value depending
   1226  *   on the value read here.
   1227  ************************************************************************/
   1228 static void
   1229 ixgbe_check_wol_support(struct adapter *adapter)
   1230 {
   1231 	struct ixgbe_hw *hw = &adapter->hw;
   1232 	u16             dev_caps = 0;
   1233 
   1234 	/* Find out WoL support for port */
   1235 	adapter->wol_support = hw->wol_enabled = 0;
   1236 	ixgbe_get_device_caps(hw, &dev_caps);
   1237 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
   1238 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
   1239 	     hw->bus.func == 0))
   1240 		adapter->wol_support = hw->wol_enabled = 1;
   1241 
   1242 	/* Save initial wake up filter configuration */
   1243 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
   1244 
   1245 	return;
   1246 } /* ixgbe_check_wol_support */
   1247 
   1248 /************************************************************************
   1249  * ixgbe_setup_interface
   1250  *
   1251  *   Setup networking device structure and register an interface.
   1252  ************************************************************************/
   1253 static int
   1254 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   1255 {
   1256 	struct ethercom *ec = &adapter->osdep.ec;
   1257 	struct ifnet   *ifp;
   1258 	int rv;
   1259 
   1260 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   1261 
   1262 	ifp = adapter->ifp = &ec->ec_if;
   1263 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1264 	ifp->if_baudrate = IF_Gbps(10);
   1265 	ifp->if_init = ixgbe_init;
   1266 	ifp->if_stop = ixgbe_ifstop;
   1267 	ifp->if_softc = adapter;
   1268 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1269 #ifdef IXGBE_MPSAFE
   1270 	ifp->if_extflags = IFEF_MPSAFE;
   1271 #endif
   1272 	ifp->if_ioctl = ixgbe_ioctl;
   1273 #if __FreeBSD_version >= 1100045
   1274 	/* TSO parameters */
   1275 	ifp->if_hw_tsomax = 65518;
   1276 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
   1277 	ifp->if_hw_tsomaxsegsize = 2048;
   1278 #endif
   1279 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1280 #if 0
   1281 		ixgbe_start_locked = ixgbe_legacy_start_locked;
   1282 #endif
   1283 	} else {
   1284 		ifp->if_transmit = ixgbe_mq_start;
   1285 #if 0
   1286 		ixgbe_start_locked = ixgbe_mq_start_locked;
   1287 #endif
   1288 	}
   1289 	ifp->if_start = ixgbe_legacy_start;
   1290 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1291 	IFQ_SET_READY(&ifp->if_snd);
   1292 
   1293 	rv = if_initialize(ifp);
   1294 	if (rv != 0) {
   1295 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1296 		return rv;
   1297 	}
   1298 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1299 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1300 	/*
   1301 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1302 	 * used.
   1303 	 */
   1304 	if_register(ifp);
   1305 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   1306 
   1307 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1308 
   1309 	/*
   1310 	 * Tell the upper layer(s) we support long frames.
   1311 	 */
   1312 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1313 
   1314 	/* Set capability flags */
   1315 	ifp->if_capabilities |= IFCAP_RXCSUM
   1316 			     |  IFCAP_TXCSUM
   1317 			     |  IFCAP_TSOv4
   1318 			     |  IFCAP_TSOv6
   1319 			     |  IFCAP_LRO;
   1320 	ifp->if_capenable = 0;
   1321 
   1322 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1323 	    		    |  ETHERCAP_VLAN_HWCSUM
   1324 	    		    |  ETHERCAP_JUMBO_MTU
   1325 	    		    |  ETHERCAP_VLAN_MTU;
   1326 
   1327 	/* Enable the above capabilities by default */
   1328 	ec->ec_capenable = ec->ec_capabilities;
   1329 
   1330 	/*
   1331 	 * Don't turn this on by default, if vlans are
   1332 	 * created on another pseudo device (eg. lagg)
   1333 	 * then vlan events are not passed thru, breaking
   1334 	 * operation, but with HW FILTER off it works. If
   1335 	 * using vlans directly on the ixgbe driver you can
   1336 	 * enable this and get full hardware tag filtering.
   1337 	 */
   1338 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1339 
   1340 	/*
   1341 	 * Specify the media types supported by this adapter and register
   1342 	 * callbacks to update media and link information
   1343 	 */
   1344 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   1345 	    ixgbe_media_status);
   1346 
   1347 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
   1348 	ixgbe_add_media_types(adapter);
   1349 
   1350 	/* Set autoselect media by default */
   1351 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1352 
   1353 	return (0);
   1354 } /* ixgbe_setup_interface */
   1355 
   1356 /************************************************************************
   1357  * ixgbe_add_media_types
   1358  ************************************************************************/
   1359 static void
   1360 ixgbe_add_media_types(struct adapter *adapter)
   1361 {
   1362 	struct ixgbe_hw *hw = &adapter->hw;
   1363 	device_t        dev = adapter->dev;
   1364 	u64             layer;
   1365 
   1366 	layer = adapter->phy_layer;
   1367 
   1368 #define	ADD(mm, dd)							\
   1369 	ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
   1370 
   1371 	/* Media types with matching NetBSD media defines */
   1372 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
   1373 		ADD(IFM_10G_T | IFM_FDX, 0);
   1374 	}
   1375 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
   1376 		ADD(IFM_1000_T | IFM_FDX, 0);
   1377 	}
   1378 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
   1379 		ADD(IFM_100_TX | IFM_FDX, 0);
   1380 	}
   1381 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
   1382 		ADD(IFM_10_T | IFM_FDX, 0);
   1383 	}
   1384 
   1385 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   1386 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
   1387 		ADD(IFM_10G_TWINAX | IFM_FDX, 0);
   1388 	}
   1389 
   1390 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
   1391 		ADD(IFM_10G_LR | IFM_FDX, 0);
   1392 		if (hw->phy.multispeed_fiber) {
   1393 			ADD(IFM_1000_LX | IFM_FDX, 0);
   1394 		}
   1395 	}
   1396 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
   1397 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1398 		if (hw->phy.multispeed_fiber) {
   1399 			ADD(IFM_1000_SX | IFM_FDX, 0);
   1400 		}
   1401 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
   1402 		ADD(IFM_1000_SX | IFM_FDX, 0);
   1403 	}
   1404 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
   1405 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1406 	}
   1407 
   1408 #ifdef IFM_ETH_XTYPE
   1409 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1410 		ADD(IFM_10G_KR | IFM_FDX, 0);
   1411 	}
   1412 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1413 		ADD(AIFM_10G_KX4 | IFM_FDX, 0);
   1414 	}
   1415 #else
   1416 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1417 		device_printf(dev, "Media supported: 10GbaseKR\n");
   1418 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
   1419 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1420 	}
   1421 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1422 		device_printf(dev, "Media supported: 10GbaseKX4\n");
   1423 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
   1424 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1425 	}
   1426 #endif
   1427 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
   1428 		ADD(IFM_1000_KX | IFM_FDX, 0);
   1429 	}
   1430 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
   1431 		ADD(IFM_2500_KX | IFM_FDX, 0);
   1432 	}
   1433 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
   1434 		ADD(IFM_2500_T | IFM_FDX, 0);
   1435 	}
   1436 	if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
   1437 		ADD(IFM_5000_T | IFM_FDX, 0);
   1438 	}
   1439 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
   1440 		device_printf(dev, "Media supported: 1000baseBX\n");
   1441 	/* XXX no ifmedia_set? */
   1442 
   1443 	ADD(IFM_AUTO, 0);
   1444 
   1445 #undef ADD
   1446 } /* ixgbe_add_media_types */
   1447 
   1448 /************************************************************************
   1449  * ixgbe_is_sfp
   1450  ************************************************************************/
   1451 static inline bool
   1452 ixgbe_is_sfp(struct ixgbe_hw *hw)
   1453 {
   1454 	switch (hw->mac.type) {
   1455 	case ixgbe_mac_82598EB:
   1456 		if (hw->phy.type == ixgbe_phy_nl)
   1457 			return TRUE;
   1458 		return FALSE;
   1459 	case ixgbe_mac_82599EB:
   1460 		switch (hw->mac.ops.get_media_type(hw)) {
   1461 		case ixgbe_media_type_fiber:
   1462 		case ixgbe_media_type_fiber_qsfp:
   1463 			return TRUE;
   1464 		default:
   1465 			return FALSE;
   1466 		}
   1467 	case ixgbe_mac_X550EM_x:
   1468 	case ixgbe_mac_X550EM_a:
   1469 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
   1470 			return TRUE;
   1471 		return FALSE;
   1472 	default:
   1473 		return FALSE;
   1474 	}
   1475 } /* ixgbe_is_sfp */
   1476 
   1477 /************************************************************************
   1478  * ixgbe_config_link
   1479  ************************************************************************/
   1480 static void
   1481 ixgbe_config_link(struct adapter *adapter)
   1482 {
   1483 	struct ixgbe_hw *hw = &adapter->hw;
   1484 	u32             autoneg, err = 0;
   1485 	bool            sfp, negotiate = false;
   1486 
   1487 	sfp = ixgbe_is_sfp(hw);
   1488 
   1489 	if (sfp) {
   1490 		if (hw->phy.multispeed_fiber) {
   1491 			hw->mac.ops.setup_sfp(hw);
   1492 			ixgbe_enable_tx_laser(hw);
   1493 			kpreempt_disable();
   1494 			softint_schedule(adapter->msf_si);
   1495 			kpreempt_enable();
   1496 		} else {
   1497 			kpreempt_disable();
   1498 			softint_schedule(adapter->mod_si);
   1499 			kpreempt_enable();
   1500 		}
   1501 	} else {
   1502 		if (hw->mac.ops.check_link)
   1503 			err = ixgbe_check_link(hw, &adapter->link_speed,
   1504 			    &adapter->link_up, FALSE);
   1505 		if (err)
   1506 			goto out;
   1507 		autoneg = hw->phy.autoneg_advertised;
   1508 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   1509                 	err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
   1510 			    &negotiate);
   1511 		if (err)
   1512 			goto out;
   1513 		if (hw->mac.ops.setup_link)
   1514                 	err = hw->mac.ops.setup_link(hw, autoneg,
   1515 			    adapter->link_up);
   1516 	}
   1517 out:
   1518 
   1519 	return;
   1520 } /* ixgbe_config_link */
   1521 
   1522 /************************************************************************
   1523  * ixgbe_update_stats_counters - Update board statistics counters.
   1524  ************************************************************************/
   1525 static void
   1526 ixgbe_update_stats_counters(struct adapter *adapter)
   1527 {
   1528 	struct ifnet          *ifp = adapter->ifp;
   1529 	struct ixgbe_hw       *hw = &adapter->hw;
   1530 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1531 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
   1532 	u64                   total_missed_rx = 0;
   1533 	uint64_t              crcerrs, rlec;
   1534 
   1535 	crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   1536 	stats->crcerrs.ev_count += crcerrs;
   1537 	stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   1538 	stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   1539 	stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   1540 	if (hw->mac.type == ixgbe_mac_X550)
   1541 		stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
   1542 
   1543 	for (int i = 0; i < __arraycount(stats->qprc); i++) {
   1544 		int j = i % adapter->num_queues;
   1545 		stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   1546 		stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   1547 		stats->qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   1548 	}
   1549 	for (int i = 0; i < __arraycount(stats->mpc); i++) {
   1550 		uint32_t mp;
   1551 		int j = i % adapter->num_queues;
   1552 
   1553 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   1554 		/* global total per queue */
   1555 		stats->mpc[j].ev_count += mp;
   1556 		/* running comprehensive total for stats display */
   1557 		total_missed_rx += mp;
   1558 
   1559 		if (hw->mac.type == ixgbe_mac_82598EB)
   1560 			stats->rnbc[j].ev_count
   1561 			    += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   1562 
   1563 	}
   1564 	stats->mpctotal.ev_count += total_missed_rx;
   1565 
   1566 	/* Document says M[LR]FC are valid when link is up and 10Gbps */
   1567 	if ((adapter->link_active == TRUE)
   1568 	    && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
   1569 		stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   1570 		stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   1571 	}
   1572 	rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
   1573 	stats->rlec.ev_count += rlec;
   1574 
   1575 	/* Hardware workaround, gprc counts missed packets */
   1576 	stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   1577 
   1578 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   1579 	stats->lxontxc.ev_count += lxon;
   1580 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   1581 	stats->lxofftxc.ev_count += lxoff;
   1582 	total = lxon + lxoff;
   1583 
   1584 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1585 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   1586 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   1587 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   1588 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   1589 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   1590 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   1591 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   1592 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   1593 	} else {
   1594 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   1595 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   1596 		/* 82598 only has a counter in the high register */
   1597 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   1598 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   1599 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   1600 	}
   1601 
   1602 	/*
   1603 	 * Workaround: mprc hardware is incorrectly counting
   1604 	 * broadcasts, so for now we subtract those.
   1605 	 */
   1606 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   1607 	stats->bprc.ev_count += bprc;
   1608 	stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
   1609 	    - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   1610 
   1611 	stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   1612 	stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   1613 	stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   1614 	stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   1615 	stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   1616 	stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   1617 
   1618 	stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   1619 	stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   1620 	stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   1621 
   1622 	stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   1623 	stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   1624 	stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   1625 	stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   1626 	stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   1627 	stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   1628 	stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   1629 	stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   1630 	stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   1631 	stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   1632 	stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   1633 	stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   1634 	stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   1635 	stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   1636 	stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   1637 	stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   1638 	stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   1639 	stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   1640 	/* Only read FCOE on 82599 */
   1641 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1642 		stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   1643 		stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   1644 		stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   1645 		stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   1646 		stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   1647 	}
   1648 
   1649 	/* Fill out the OS statistics structure */
   1650 	/*
   1651 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   1652 	 * adapter->stats counters. It's required to make ifconfig -z
   1653 	 * (SOICZIFDATA) work.
   1654 	 */
   1655 	ifp->if_collisions = 0;
   1656 
   1657 	/* Rx Errors */
   1658 	ifp->if_iqdrops += total_missed_rx;
   1659 	ifp->if_ierrors += crcerrs + rlec;
   1660 } /* ixgbe_update_stats_counters */
   1661 
   1662 /************************************************************************
   1663  * ixgbe_add_hw_stats
   1664  *
   1665  *   Add sysctl variables, one per statistic, to the system.
   1666  ************************************************************************/
   1667 static void
   1668 ixgbe_add_hw_stats(struct adapter *adapter)
   1669 {
   1670 	device_t dev = adapter->dev;
   1671 	const struct sysctlnode *rnode, *cnode;
   1672 	struct sysctllog **log = &adapter->sysctllog;
   1673 	struct tx_ring *txr = adapter->tx_rings;
   1674 	struct rx_ring *rxr = adapter->rx_rings;
   1675 	struct ixgbe_hw *hw = &adapter->hw;
   1676 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1677 	const char *xname = device_xname(dev);
   1678 
   1679 	/* Driver Statistics */
   1680 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   1681 	    NULL, xname, "Handled queue in softint");
   1682 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   1683 	    NULL, xname, "Requeued in softint");
   1684 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   1685 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   1686 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   1687 	    NULL, xname, "m_defrag() failed");
   1688 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   1689 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   1690 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   1691 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   1692 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   1693 	    NULL, xname, "Driver tx dma hard fail other");
   1694 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   1695 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   1696 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   1697 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   1698 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   1699 	    NULL, xname, "Watchdog timeouts");
   1700 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   1701 	    NULL, xname, "TSO errors");
   1702 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   1703 	    NULL, xname, "Link MSI-X IRQ Handled");
   1704 
   1705 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1706 		snprintf(adapter->queues[i].evnamebuf,
   1707 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   1708 		    xname, i);
   1709 		snprintf(adapter->queues[i].namebuf,
   1710 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   1711 
   1712 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   1713 			aprint_error_dev(dev, "could not create sysctl root\n");
   1714 			break;
   1715 		}
   1716 
   1717 		if (sysctl_createv(log, 0, &rnode, &rnode,
   1718 		    0, CTLTYPE_NODE,
   1719 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   1720 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   1721 			break;
   1722 
   1723 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1724 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   1725 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   1726 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   1727 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   1728 			break;
   1729 
   1730 #if 0 /* XXX msaitoh */
   1731 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1732 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   1733 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   1734 			NULL, 0, &(adapter->queues[i].irqs),
   1735 		    0, CTL_CREATE, CTL_EOL) != 0)
   1736 			break;
   1737 #endif
   1738 
   1739 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1740 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1741 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   1742 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   1743 		    0, CTL_CREATE, CTL_EOL) != 0)
   1744 			break;
   1745 
   1746 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1747 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1748 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   1749 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   1750 		    0, CTL_CREATE, CTL_EOL) != 0)
   1751 			break;
   1752 
   1753 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   1754 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   1755 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   1756 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   1757 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   1758 		    NULL, adapter->queues[i].evnamebuf,
   1759 		    "Queue No Descriptor Available");
   1760 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   1761 		    NULL, adapter->queues[i].evnamebuf,
   1762 		    "Queue Packets Transmitted");
   1763 #ifndef IXGBE_LEGACY_TX
   1764 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   1765 		    NULL, adapter->queues[i].evnamebuf,
   1766 		    "Packets dropped in pcq");
   1767 #endif
   1768 
   1769 #ifdef LRO
   1770 		struct lro_ctrl *lro = &rxr->lro;
   1771 #endif /* LRO */
   1772 
   1773 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1774 		    CTLFLAG_READONLY,
   1775 		    CTLTYPE_INT,
   1776 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   1777 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   1778 		    CTL_CREATE, CTL_EOL) != 0)
   1779 			break;
   1780 
   1781 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1782 		    CTLFLAG_READONLY,
   1783 		    CTLTYPE_INT,
   1784 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   1785 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   1786 		    CTL_CREATE, CTL_EOL) != 0)
   1787 			break;
   1788 
   1789 		if (i < __arraycount(stats->mpc)) {
   1790 			evcnt_attach_dynamic(&stats->mpc[i],
   1791 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1792 			    "RX Missed Packet Count");
   1793 			if (hw->mac.type == ixgbe_mac_82598EB)
   1794 				evcnt_attach_dynamic(&stats->rnbc[i],
   1795 				    EVCNT_TYPE_MISC, NULL,
   1796 				    adapter->queues[i].evnamebuf,
   1797 				    "Receive No Buffers");
   1798 		}
   1799 		if (i < __arraycount(stats->pxontxc)) {
   1800 			evcnt_attach_dynamic(&stats->pxontxc[i],
   1801 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1802 			    "pxontxc");
   1803 			evcnt_attach_dynamic(&stats->pxonrxc[i],
   1804 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1805 			    "pxonrxc");
   1806 			evcnt_attach_dynamic(&stats->pxofftxc[i],
   1807 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1808 			    "pxofftxc");
   1809 			evcnt_attach_dynamic(&stats->pxoffrxc[i],
   1810 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1811 			    "pxoffrxc");
   1812 			evcnt_attach_dynamic(&stats->pxon2offc[i],
   1813 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1814 			    "pxon2offc");
   1815 		}
   1816 		if (i < __arraycount(stats->qprc)) {
   1817 			evcnt_attach_dynamic(&stats->qprc[i],
   1818 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1819 			    "qprc");
   1820 			evcnt_attach_dynamic(&stats->qptc[i],
   1821 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1822 			    "qptc");
   1823 			evcnt_attach_dynamic(&stats->qbrc[i],
   1824 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1825 			    "qbrc");
   1826 			evcnt_attach_dynamic(&stats->qbtc[i],
   1827 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1828 			    "qbtc");
   1829 			evcnt_attach_dynamic(&stats->qprdc[i],
   1830 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1831 			    "qprdc");
   1832 		}
   1833 
   1834 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   1835 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   1836 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   1837 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   1838 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   1839 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   1840 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   1841 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   1842 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   1843 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   1844 #ifdef LRO
   1845 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   1846 				CTLFLAG_RD, &lro->lro_queued, 0,
   1847 				"LRO Queued");
   1848 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   1849 				CTLFLAG_RD, &lro->lro_flushed, 0,
   1850 				"LRO Flushed");
   1851 #endif /* LRO */
   1852 	}
   1853 
   1854 	/* MAC stats get their own sub node */
   1855 
   1856 	snprintf(stats->namebuf,
   1857 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   1858 
   1859 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   1860 	    stats->namebuf, "rx csum offload - IP");
   1861 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   1862 	    stats->namebuf, "rx csum offload - L4");
   1863 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   1864 	    stats->namebuf, "rx csum offload - IP bad");
   1865 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   1866 	    stats->namebuf, "rx csum offload - L4 bad");
   1867 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   1868 	    stats->namebuf, "Interrupt conditions zero");
   1869 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   1870 	    stats->namebuf, "Legacy interrupts");
   1871 
   1872 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   1873 	    stats->namebuf, "CRC Errors");
   1874 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   1875 	    stats->namebuf, "Illegal Byte Errors");
   1876 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   1877 	    stats->namebuf, "Byte Errors");
   1878 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   1879 	    stats->namebuf, "MAC Short Packets Discarded");
   1880 	if (hw->mac.type >= ixgbe_mac_X550)
   1881 		evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
   1882 		    stats->namebuf, "Bad SFD");
   1883 	evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
   1884 	    stats->namebuf, "Total Packets Missed");
   1885 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   1886 	    stats->namebuf, "MAC Local Faults");
   1887 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   1888 	    stats->namebuf, "MAC Remote Faults");
   1889 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   1890 	    stats->namebuf, "Receive Length Errors");
   1891 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   1892 	    stats->namebuf, "Link XON Transmitted");
   1893 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   1894 	    stats->namebuf, "Link XON Received");
   1895 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   1896 	    stats->namebuf, "Link XOFF Transmitted");
   1897 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   1898 	    stats->namebuf, "Link XOFF Received");
   1899 
   1900 	/* Packet Reception Stats */
   1901 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   1902 	    stats->namebuf, "Total Octets Received");
   1903 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   1904 	    stats->namebuf, "Good Octets Received");
   1905 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   1906 	    stats->namebuf, "Total Packets Received");
   1907 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   1908 	    stats->namebuf, "Good Packets Received");
   1909 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   1910 	    stats->namebuf, "Multicast Packets Received");
   1911 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   1912 	    stats->namebuf, "Broadcast Packets Received");
   1913 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   1914 	    stats->namebuf, "64 byte frames received ");
   1915 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   1916 	    stats->namebuf, "65-127 byte frames received");
   1917 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   1918 	    stats->namebuf, "128-255 byte frames received");
   1919 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   1920 	    stats->namebuf, "256-511 byte frames received");
   1921 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   1922 	    stats->namebuf, "512-1023 byte frames received");
   1923 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   1924 	    stats->namebuf, "1023-1522 byte frames received");
   1925 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   1926 	    stats->namebuf, "Receive Undersized");
   1927 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   1928 	    stats->namebuf, "Fragmented Packets Received ");
   1929 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   1930 	    stats->namebuf, "Oversized Packets Received");
   1931 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   1932 	    stats->namebuf, "Received Jabber");
   1933 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   1934 	    stats->namebuf, "Management Packets Received");
   1935 	evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
   1936 	    stats->namebuf, "Management Packets Dropped");
   1937 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   1938 	    stats->namebuf, "Checksum Errors");
   1939 
   1940 	/* Packet Transmission Stats */
   1941 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   1942 	    stats->namebuf, "Good Octets Transmitted");
   1943 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   1944 	    stats->namebuf, "Total Packets Transmitted");
   1945 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   1946 	    stats->namebuf, "Good Packets Transmitted");
   1947 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   1948 	    stats->namebuf, "Broadcast Packets Transmitted");
   1949 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   1950 	    stats->namebuf, "Multicast Packets Transmitted");
   1951 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   1952 	    stats->namebuf, "Management Packets Transmitted");
   1953 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   1954 	    stats->namebuf, "64 byte frames transmitted ");
   1955 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   1956 	    stats->namebuf, "65-127 byte frames transmitted");
   1957 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   1958 	    stats->namebuf, "128-255 byte frames transmitted");
   1959 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   1960 	    stats->namebuf, "256-511 byte frames transmitted");
   1961 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   1962 	    stats->namebuf, "512-1023 byte frames transmitted");
   1963 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   1964 	    stats->namebuf, "1024-1522 byte frames transmitted");
   1965 } /* ixgbe_add_hw_stats */
   1966 
   1967 static void
   1968 ixgbe_clear_evcnt(struct adapter *adapter)
   1969 {
   1970 	struct tx_ring *txr = adapter->tx_rings;
   1971 	struct rx_ring *rxr = adapter->rx_rings;
   1972 	struct ixgbe_hw *hw = &adapter->hw;
   1973 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1974 
   1975 	adapter->handleq.ev_count = 0;
   1976 	adapter->req.ev_count = 0;
   1977 	adapter->efbig_tx_dma_setup.ev_count = 0;
   1978 	adapter->mbuf_defrag_failed.ev_count = 0;
   1979 	adapter->efbig2_tx_dma_setup.ev_count = 0;
   1980 	adapter->einval_tx_dma_setup.ev_count = 0;
   1981 	adapter->other_tx_dma_setup.ev_count = 0;
   1982 	adapter->eagain_tx_dma_setup.ev_count = 0;
   1983 	adapter->enomem_tx_dma_setup.ev_count = 0;
   1984 	adapter->watchdog_events.ev_count = 0;
   1985 	adapter->tso_err.ev_count = 0;
   1986 	adapter->link_irq.ev_count = 0;
   1987 
   1988 	txr = adapter->tx_rings;
   1989 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1990 		adapter->queues[i].irqs.ev_count = 0;
   1991 		txr->no_desc_avail.ev_count = 0;
   1992 		txr->total_packets.ev_count = 0;
   1993 		txr->tso_tx.ev_count = 0;
   1994 #ifndef IXGBE_LEGACY_TX
   1995 		txr->pcq_drops.ev_count = 0;
   1996 #endif
   1997 
   1998 		if (i < __arraycount(stats->mpc)) {
   1999 			stats->mpc[i].ev_count = 0;
   2000 			if (hw->mac.type == ixgbe_mac_82598EB)
   2001 				stats->rnbc[i].ev_count = 0;
   2002 		}
   2003 		if (i < __arraycount(stats->pxontxc)) {
   2004 			stats->pxontxc[i].ev_count = 0;
   2005 			stats->pxonrxc[i].ev_count = 0;
   2006 			stats->pxofftxc[i].ev_count = 0;
   2007 			stats->pxoffrxc[i].ev_count = 0;
   2008 			stats->pxon2offc[i].ev_count = 0;
   2009 		}
   2010 		if (i < __arraycount(stats->qprc)) {
   2011 			stats->qprc[i].ev_count = 0;
   2012 			stats->qptc[i].ev_count = 0;
   2013 			stats->qbrc[i].ev_count = 0;
   2014 			stats->qbtc[i].ev_count = 0;
   2015 			stats->qprdc[i].ev_count = 0;
   2016 		}
   2017 
   2018 		rxr->rx_packets.ev_count = 0;
   2019 		rxr->rx_bytes.ev_count = 0;
   2020 		rxr->rx_copies.ev_count = 0;
   2021 		rxr->no_jmbuf.ev_count = 0;
   2022 		rxr->rx_discarded.ev_count = 0;
   2023 	}
   2024 	stats->ipcs.ev_count = 0;
   2025 	stats->l4cs.ev_count = 0;
   2026 	stats->ipcs_bad.ev_count = 0;
   2027 	stats->l4cs_bad.ev_count = 0;
   2028 	stats->intzero.ev_count = 0;
   2029 	stats->legint.ev_count = 0;
   2030 	stats->crcerrs.ev_count = 0;
   2031 	stats->illerrc.ev_count = 0;
   2032 	stats->errbc.ev_count = 0;
   2033 	stats->mspdc.ev_count = 0;
   2034 	stats->mbsdc.ev_count = 0;
   2035 	stats->mpctotal.ev_count = 0;
   2036 	stats->mlfc.ev_count = 0;
   2037 	stats->mrfc.ev_count = 0;
   2038 	stats->rlec.ev_count = 0;
   2039 	stats->lxontxc.ev_count = 0;
   2040 	stats->lxonrxc.ev_count = 0;
   2041 	stats->lxofftxc.ev_count = 0;
   2042 	stats->lxoffrxc.ev_count = 0;
   2043 
   2044 	/* Packet Reception Stats */
   2045 	stats->tor.ev_count = 0;
   2046 	stats->gorc.ev_count = 0;
   2047 	stats->tpr.ev_count = 0;
   2048 	stats->gprc.ev_count = 0;
   2049 	stats->mprc.ev_count = 0;
   2050 	stats->bprc.ev_count = 0;
   2051 	stats->prc64.ev_count = 0;
   2052 	stats->prc127.ev_count = 0;
   2053 	stats->prc255.ev_count = 0;
   2054 	stats->prc511.ev_count = 0;
   2055 	stats->prc1023.ev_count = 0;
   2056 	stats->prc1522.ev_count = 0;
   2057 	stats->ruc.ev_count = 0;
   2058 	stats->rfc.ev_count = 0;
   2059 	stats->roc.ev_count = 0;
   2060 	stats->rjc.ev_count = 0;
   2061 	stats->mngprc.ev_count = 0;
   2062 	stats->mngpdc.ev_count = 0;
   2063 	stats->xec.ev_count = 0;
   2064 
   2065 	/* Packet Transmission Stats */
   2066 	stats->gotc.ev_count = 0;
   2067 	stats->tpt.ev_count = 0;
   2068 	stats->gptc.ev_count = 0;
   2069 	stats->bptc.ev_count = 0;
   2070 	stats->mptc.ev_count = 0;
   2071 	stats->mngptc.ev_count = 0;
   2072 	stats->ptc64.ev_count = 0;
   2073 	stats->ptc127.ev_count = 0;
   2074 	stats->ptc255.ev_count = 0;
   2075 	stats->ptc511.ev_count = 0;
   2076 	stats->ptc1023.ev_count = 0;
   2077 	stats->ptc1522.ev_count = 0;
   2078 }
   2079 
   2080 /************************************************************************
   2081  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
   2082  *
   2083  *   Retrieves the TDH value from the hardware
   2084  ************************************************************************/
   2085 static int
   2086 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   2087 {
   2088 	struct sysctlnode node = *rnode;
   2089 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2090 	uint32_t val;
   2091 
   2092 	if (!txr)
   2093 		return (0);
   2094 
   2095 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
   2096 	node.sysctl_data = &val;
   2097 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2098 } /* ixgbe_sysctl_tdh_handler */
   2099 
   2100 /************************************************************************
   2101  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   2102  *
   2103  *   Retrieves the TDT value from the hardware
   2104  ************************************************************************/
   2105 static int
   2106 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   2107 {
   2108 	struct sysctlnode node = *rnode;
   2109 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2110 	uint32_t val;
   2111 
   2112 	if (!txr)
   2113 		return (0);
   2114 
   2115 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
   2116 	node.sysctl_data = &val;
   2117 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2118 } /* ixgbe_sysctl_tdt_handler */
   2119 
   2120 /************************************************************************
   2121  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
   2122  *
   2123  *   Retrieves the RDH value from the hardware
   2124  ************************************************************************/
   2125 static int
   2126 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2127 {
   2128 	struct sysctlnode node = *rnode;
   2129 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2130 	uint32_t val;
   2131 
   2132 	if (!rxr)
   2133 		return (0);
   2134 
   2135 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
   2136 	node.sysctl_data = &val;
   2137 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2138 } /* ixgbe_sysctl_rdh_handler */
   2139 
   2140 /************************************************************************
   2141  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2142  *
   2143  *   Retrieves the RDT value from the hardware
   2144  ************************************************************************/
   2145 static int
   2146 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2147 {
   2148 	struct sysctlnode node = *rnode;
   2149 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2150 	uint32_t val;
   2151 
   2152 	if (!rxr)
   2153 		return (0);
   2154 
   2155 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
   2156 	node.sysctl_data = &val;
   2157 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2158 } /* ixgbe_sysctl_rdt_handler */
   2159 
   2160 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   2161 /************************************************************************
   2162  * ixgbe_register_vlan
   2163  *
   2164  *   Run via vlan config EVENT, it enables us to use the
   2165  *   HW Filter table since we can get the vlan id. This
   2166  *   just creates the entry in the soft version of the
   2167  *   VFTA, init will repopulate the real table.
   2168  ************************************************************************/
   2169 static void
   2170 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2171 {
   2172 	struct adapter	*adapter = ifp->if_softc;
   2173 	u16		index, bit;
   2174 
   2175 	if (ifp->if_softc != arg)   /* Not our event */
   2176 		return;
   2177 
   2178 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2179 		return;
   2180 
   2181 	IXGBE_CORE_LOCK(adapter);
   2182 	index = (vtag >> 5) & 0x7F;
   2183 	bit = vtag & 0x1F;
   2184 	adapter->shadow_vfta[index] |= (1 << bit);
   2185 	ixgbe_setup_vlan_hw_support(adapter);
   2186 	IXGBE_CORE_UNLOCK(adapter);
   2187 } /* ixgbe_register_vlan */
   2188 
   2189 /************************************************************************
   2190  * ixgbe_unregister_vlan
   2191  *
   2192  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
   2193  ************************************************************************/
   2194 static void
   2195 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2196 {
   2197 	struct adapter	*adapter = ifp->if_softc;
   2198 	u16		index, bit;
   2199 
   2200 	if (ifp->if_softc != arg)
   2201 		return;
   2202 
   2203 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2204 		return;
   2205 
   2206 	IXGBE_CORE_LOCK(adapter);
   2207 	index = (vtag >> 5) & 0x7F;
   2208 	bit = vtag & 0x1F;
   2209 	adapter->shadow_vfta[index] &= ~(1 << bit);
   2210 	/* Re-init to load the changes */
   2211 	ixgbe_setup_vlan_hw_support(adapter);
   2212 	IXGBE_CORE_UNLOCK(adapter);
   2213 } /* ixgbe_unregister_vlan */
   2214 #endif
   2215 
   2216 static void
   2217 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   2218 {
   2219 	struct ethercom *ec = &adapter->osdep.ec;
   2220 	struct ixgbe_hw *hw = &adapter->hw;
   2221 	struct rx_ring	*rxr;
   2222 	int             i;
   2223 	u32		ctrl;
   2224 
   2225 
   2226 	/*
   2227 	 * We get here thru init_locked, meaning
   2228 	 * a soft reset, this has already cleared
   2229 	 * the VFTA and other state, so if there
   2230 	 * have been no vlan's registered do nothing.
   2231 	 */
   2232 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   2233 		return;
   2234 
   2235 	/* Setup the queues for vlans */
   2236 	if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
   2237 		for (i = 0; i < adapter->num_queues; i++) {
   2238 			rxr = &adapter->rx_rings[i];
   2239 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
   2240 			if (hw->mac.type != ixgbe_mac_82598EB) {
   2241 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   2242 				ctrl |= IXGBE_RXDCTL_VME;
   2243 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
   2244 			}
   2245 			rxr->vtag_strip = TRUE;
   2246 		}
   2247 	}
   2248 
   2249 	if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
   2250 		return;
   2251 	/*
   2252 	 * A soft reset zero's out the VFTA, so
   2253 	 * we need to repopulate it now.
   2254 	 */
   2255 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2256 		if (adapter->shadow_vfta[i] != 0)
   2257 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
   2258 			    adapter->shadow_vfta[i]);
   2259 
   2260 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2261 	/* Enable the Filter Table if enabled */
   2262 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
   2263 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
   2264 		ctrl |= IXGBE_VLNCTRL_VFE;
   2265 	}
   2266 	if (hw->mac.type == ixgbe_mac_82598EB)
   2267 		ctrl |= IXGBE_VLNCTRL_VME;
   2268 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2269 } /* ixgbe_setup_vlan_hw_support */
   2270 
   2271 /************************************************************************
   2272  * ixgbe_get_slot_info
   2273  *
   2274  *   Get the width and transaction speed of
   2275  *   the slot this adapter is plugged into.
   2276  ************************************************************************/
   2277 static void
   2278 ixgbe_get_slot_info(struct adapter *adapter)
   2279 {
   2280 	device_t		dev = adapter->dev;
   2281 	struct ixgbe_hw		*hw = &adapter->hw;
   2282 	u32                   offset;
   2283 //	struct ixgbe_mac_info	*mac = &hw->mac;
   2284 	u16			link;
   2285 	int                   bus_info_valid = TRUE;
   2286 
   2287 	/* Some devices are behind an internal bridge */
   2288 	switch (hw->device_id) {
   2289 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
   2290 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
   2291 		goto get_parent_info;
   2292 	default:
   2293 		break;
   2294 	}
   2295 
   2296 	ixgbe_get_bus_info(hw);
   2297 
   2298 	/*
   2299 	 * Some devices don't use PCI-E, but there is no need
   2300 	 * to display "Unknown" for bus speed and width.
   2301 	 */
   2302 	switch (hw->mac.type) {
   2303 	case ixgbe_mac_X550EM_x:
   2304 	case ixgbe_mac_X550EM_a:
   2305 		return;
   2306 	default:
   2307 		goto display;
   2308 	}
   2309 
   2310 get_parent_info:
   2311 	/*
   2312 	 * For the Quad port adapter we need to parse back
   2313 	 * up the PCI tree to find the speed of the expansion
   2314 	 * slot into which this adapter is plugged. A bit more work.
   2315 	 */
   2316 	dev = device_parent(device_parent(dev));
   2317 #if 0
   2318 #ifdef IXGBE_DEBUG
   2319 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
   2320 	    pci_get_slot(dev), pci_get_function(dev));
   2321 #endif
   2322 	dev = device_parent(device_parent(dev));
   2323 #ifdef IXGBE_DEBUG
   2324 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
   2325 	    pci_get_slot(dev), pci_get_function(dev));
   2326 #endif
   2327 #endif
   2328 	/* Now get the PCI Express Capabilities offset */
   2329 	if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
   2330 	    PCI_CAP_PCIEXPRESS, &offset, NULL)) {
   2331 		/*
   2332 		 * Hmm...can't get PCI-Express capabilities.
   2333 		 * Falling back to default method.
   2334 		 */
   2335 		bus_info_valid = FALSE;
   2336 		ixgbe_get_bus_info(hw);
   2337 		goto display;
   2338 	}
   2339 	/* ...and read the Link Status Register */
   2340 	link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
   2341 	    offset + PCIE_LCSR) >> 16;
   2342 	ixgbe_set_pci_config_data_generic(hw, link);
   2343 
   2344 display:
   2345 	device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
   2346 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s" :
   2347 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s" :
   2348 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s" :
   2349 	     "Unknown"),
   2350 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
   2351 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
   2352 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
   2353 	     "Unknown"));
   2354 
   2355 	if (bus_info_valid) {
   2356 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2357 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
   2358 			(hw->bus.speed == ixgbe_bus_speed_2500))) {
   2359 			device_printf(dev, "PCI-Express bandwidth available"
   2360 			    " for this card\n     is not sufficient for"
   2361 			    " optimal performance.\n");
   2362 			device_printf(dev, "For optimal performance a x8 "
   2363 			    "PCIE, or x4 PCIE Gen2 slot is required.\n");
   2364 		}
   2365 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2366 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
   2367 			(hw->bus.speed < ixgbe_bus_speed_8000))) {
   2368 			device_printf(dev, "PCI-Express bandwidth available"
   2369 			    " for this card\n     is not sufficient for"
   2370 			    " optimal performance.\n");
   2371 			device_printf(dev, "For optimal performance a x8 "
   2372 			    "PCIE Gen3 slot is required.\n");
   2373 		}
   2374 	} else
   2375 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
   2376 
   2377 	return;
   2378 } /* ixgbe_get_slot_info */
   2379 
   2380 /************************************************************************
   2381  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
   2382  ************************************************************************/
   2383 static inline void
   2384 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   2385 {
   2386 	struct ixgbe_hw *hw = &adapter->hw;
   2387 	u64             queue = (u64)(1ULL << vector);
   2388 	u32             mask;
   2389 
   2390 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2391 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2392 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   2393 	} else {
   2394 		mask = (queue & 0xFFFFFFFF);
   2395 		if (mask)
   2396 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   2397 		mask = (queue >> 32);
   2398 		if (mask)
   2399 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   2400 	}
   2401 } /* ixgbe_enable_queue */
   2402 
   2403 /************************************************************************
   2404  * ixgbe_disable_queue
   2405  ************************************************************************/
   2406 static inline void
   2407 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   2408 {
   2409 	struct ixgbe_hw *hw = &adapter->hw;
   2410 	u64             queue = (u64)(1ULL << vector);
   2411 	u32             mask;
   2412 
   2413 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2414 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2415 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   2416 	} else {
   2417 		mask = (queue & 0xFFFFFFFF);
   2418 		if (mask)
   2419 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   2420 		mask = (queue >> 32);
   2421 		if (mask)
   2422 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   2423 	}
   2424 } /* ixgbe_disable_queue */
   2425 
   2426 /************************************************************************
   2427  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
   2428  ************************************************************************/
   2429 static int
   2430 ixgbe_msix_que(void *arg)
   2431 {
   2432 	struct ix_queue	*que = arg;
   2433 	struct adapter  *adapter = que->adapter;
   2434 	struct ifnet    *ifp = adapter->ifp;
   2435 	struct tx_ring	*txr = que->txr;
   2436 	struct rx_ring	*rxr = que->rxr;
   2437 	bool		more;
   2438 	u32		newitr = 0;
   2439 
   2440 	/* Protect against spurious interrupts */
   2441 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   2442 		return 0;
   2443 
   2444 	ixgbe_disable_queue(adapter, que->msix);
   2445 	++que->irqs.ev_count;
   2446 
   2447 #ifdef __NetBSD__
   2448 	/* Don't run ixgbe_rxeof in interrupt context */
   2449 	more = true;
   2450 #else
   2451 	more = ixgbe_rxeof(que);
   2452 #endif
   2453 
   2454 	IXGBE_TX_LOCK(txr);
   2455 	ixgbe_txeof(txr);
   2456 	IXGBE_TX_UNLOCK(txr);
   2457 
   2458 	/* Do AIM now? */
   2459 
   2460 	if (adapter->enable_aim == false)
   2461 		goto no_calc;
   2462 	/*
   2463 	 * Do Adaptive Interrupt Moderation:
   2464 	 *  - Write out last calculated setting
   2465 	 *  - Calculate based on average size over
   2466 	 *    the last interval.
   2467 	 */
   2468 	if (que->eitr_setting)
   2469 		ixgbe_eitr_write(que, que->eitr_setting);
   2470 
   2471 	que->eitr_setting = 0;
   2472 
   2473 	/* Idle, do nothing */
   2474         if ((txr->bytes == 0) && (rxr->bytes == 0))
   2475                 goto no_calc;
   2476 
   2477 	if ((txr->bytes) && (txr->packets))
   2478 		newitr = txr->bytes/txr->packets;
   2479 	if ((rxr->bytes) && (rxr->packets))
   2480 		newitr = max(newitr, (rxr->bytes / rxr->packets));
   2481 	newitr += 24; /* account for hardware frame, crc */
   2482 
   2483 	/* set an upper boundary */
   2484 	newitr = min(newitr, 3000);
   2485 
   2486 	/* Be nice to the mid range */
   2487 	if ((newitr > 300) && (newitr < 1200))
   2488 		newitr = (newitr / 3);
   2489 	else
   2490 		newitr = (newitr / 2);
   2491 
   2492 	/*
   2493 	 * When RSC is used, ITR interval must be larger than RSC_DELAY.
   2494 	 * Currently, we use 2us for RSC_DELAY. The minimum value is always
   2495 	 * greater than 2us on 100M (and 10M?(not documented)), but it's not
   2496 	 * on 1G and higher.
   2497 	 */
   2498 	if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   2499 	    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   2500 		if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
   2501 			newitr = IXGBE_MIN_RSC_EITR_10G1G;
   2502 	}
   2503 
   2504         /* save for next interrupt */
   2505         que->eitr_setting = newitr;
   2506 
   2507 	/* Reset state */
   2508 	txr->bytes = 0;
   2509 	txr->packets = 0;
   2510 	rxr->bytes = 0;
   2511 	rxr->packets = 0;
   2512 
   2513 no_calc:
   2514 	if (more)
   2515 		softint_schedule(que->que_si);
   2516 	else
   2517 		ixgbe_enable_queue(adapter, que->msix);
   2518 
   2519 	return 1;
   2520 } /* ixgbe_msix_que */
   2521 
   2522 /************************************************************************
   2523  * ixgbe_media_status - Media Ioctl callback
   2524  *
   2525  *   Called whenever the user queries the status of
   2526  *   the interface using ifconfig.
   2527  ************************************************************************/
   2528 static void
   2529 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   2530 {
   2531 	struct adapter *adapter = ifp->if_softc;
   2532 	struct ixgbe_hw *hw = &adapter->hw;
   2533 	int layer;
   2534 
   2535 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   2536 	IXGBE_CORE_LOCK(adapter);
   2537 	ixgbe_update_link_status(adapter);
   2538 
   2539 	ifmr->ifm_status = IFM_AVALID;
   2540 	ifmr->ifm_active = IFM_ETHER;
   2541 
   2542 	if (!adapter->link_active) {
   2543 		ifmr->ifm_active |= IFM_NONE;
   2544 		IXGBE_CORE_UNLOCK(adapter);
   2545 		return;
   2546 	}
   2547 
   2548 	ifmr->ifm_status |= IFM_ACTIVE;
   2549 	layer = adapter->phy_layer;
   2550 
   2551 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
   2552 	    layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
   2553 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
   2554 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
   2555 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
   2556 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
   2557 		switch (adapter->link_speed) {
   2558 		case IXGBE_LINK_SPEED_10GB_FULL:
   2559 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   2560 			break;
   2561 		case IXGBE_LINK_SPEED_5GB_FULL:
   2562 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   2563 			break;
   2564 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2565 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   2566 			break;
   2567 		case IXGBE_LINK_SPEED_1GB_FULL:
   2568 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   2569 			break;
   2570 		case IXGBE_LINK_SPEED_100_FULL:
   2571 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   2572 			break;
   2573 		case IXGBE_LINK_SPEED_10_FULL:
   2574 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   2575 			break;
   2576 		}
   2577 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   2578 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
   2579 		switch (adapter->link_speed) {
   2580 		case IXGBE_LINK_SPEED_10GB_FULL:
   2581 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
   2582 			break;
   2583 		}
   2584 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
   2585 		switch (adapter->link_speed) {
   2586 		case IXGBE_LINK_SPEED_10GB_FULL:
   2587 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
   2588 			break;
   2589 		case IXGBE_LINK_SPEED_1GB_FULL:
   2590 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2591 			break;
   2592 		}
   2593 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
   2594 		switch (adapter->link_speed) {
   2595 		case IXGBE_LINK_SPEED_10GB_FULL:
   2596 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
   2597 			break;
   2598 		case IXGBE_LINK_SPEED_1GB_FULL:
   2599 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2600 			break;
   2601 		}
   2602 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
   2603 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
   2604 		switch (adapter->link_speed) {
   2605 		case IXGBE_LINK_SPEED_10GB_FULL:
   2606 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2607 			break;
   2608 		case IXGBE_LINK_SPEED_1GB_FULL:
   2609 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
   2610 			break;
   2611 		}
   2612 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
   2613 		switch (adapter->link_speed) {
   2614 		case IXGBE_LINK_SPEED_10GB_FULL:
   2615 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2616 			break;
   2617 		}
   2618 	/*
   2619 	 * XXX: These need to use the proper media types once
   2620 	 * they're added.
   2621 	 */
   2622 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
   2623 		switch (adapter->link_speed) {
   2624 		case IXGBE_LINK_SPEED_10GB_FULL:
   2625 #ifndef IFM_ETH_XTYPE
   2626 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2627 #else
   2628 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
   2629 #endif
   2630 			break;
   2631 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2632 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2633 			break;
   2634 		case IXGBE_LINK_SPEED_1GB_FULL:
   2635 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2636 			break;
   2637 		}
   2638 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
   2639 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
   2640 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
   2641 		switch (adapter->link_speed) {
   2642 		case IXGBE_LINK_SPEED_10GB_FULL:
   2643 #ifndef IFM_ETH_XTYPE
   2644 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2645 #else
   2646 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
   2647 #endif
   2648 			break;
   2649 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2650 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2651 			break;
   2652 		case IXGBE_LINK_SPEED_1GB_FULL:
   2653 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2654 			break;
   2655 		}
   2656 
   2657 	/* If nothing is recognized... */
   2658 #if 0
   2659 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
   2660 		ifmr->ifm_active |= IFM_UNKNOWN;
   2661 #endif
   2662 
   2663 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   2664 
   2665 	/* Display current flow control setting used on link */
   2666 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
   2667 	    hw->fc.current_mode == ixgbe_fc_full)
   2668 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
   2669 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
   2670 	    hw->fc.current_mode == ixgbe_fc_full)
   2671 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
   2672 
   2673 	IXGBE_CORE_UNLOCK(adapter);
   2674 
   2675 	return;
   2676 } /* ixgbe_media_status */
   2677 
   2678 /************************************************************************
   2679  * ixgbe_media_change - Media Ioctl callback
   2680  *
   2681  *   Called when the user changes speed/duplex using
   2682  *   media/mediopt option with ifconfig.
   2683  ************************************************************************/
   2684 static int
   2685 ixgbe_media_change(struct ifnet *ifp)
   2686 {
   2687 	struct adapter   *adapter = ifp->if_softc;
   2688 	struct ifmedia   *ifm = &adapter->media;
   2689 	struct ixgbe_hw  *hw = &adapter->hw;
   2690 	ixgbe_link_speed speed = 0;
   2691 	ixgbe_link_speed link_caps = 0;
   2692 	bool negotiate = false;
   2693 	s32 err = IXGBE_NOT_IMPLEMENTED;
   2694 
   2695 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   2696 
   2697 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   2698 		return (EINVAL);
   2699 
   2700 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   2701 		return (ENODEV);
   2702 
   2703 	/*
   2704 	 * We don't actually need to check against the supported
   2705 	 * media types of the adapter; ifmedia will take care of
   2706 	 * that for us.
   2707 	 */
   2708 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   2709 	case IFM_AUTO:
   2710 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   2711 		    &negotiate);
   2712 		if (err != IXGBE_SUCCESS) {
   2713 			device_printf(adapter->dev, "Unable to determine "
   2714 			    "supported advertise speeds\n");
   2715 			return (ENODEV);
   2716 		}
   2717 		speed |= link_caps;
   2718 		break;
   2719 	case IFM_10G_T:
   2720 	case IFM_10G_LRM:
   2721 	case IFM_10G_LR:
   2722 	case IFM_10G_TWINAX:
   2723 #ifndef IFM_ETH_XTYPE
   2724 	case IFM_10G_SR: /* KR, too */
   2725 	case IFM_10G_CX4: /* KX4 */
   2726 #else
   2727 	case IFM_10G_KR:
   2728 	case IFM_10G_KX4:
   2729 #endif
   2730 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   2731 		break;
   2732 	case IFM_5000_T:
   2733 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   2734 		break;
   2735 	case IFM_2500_T:
   2736 	case IFM_2500_KX:
   2737 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   2738 		break;
   2739 	case IFM_1000_T:
   2740 	case IFM_1000_LX:
   2741 	case IFM_1000_SX:
   2742 	case IFM_1000_KX:
   2743 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   2744 		break;
   2745 	case IFM_100_TX:
   2746 		speed |= IXGBE_LINK_SPEED_100_FULL;
   2747 		break;
   2748 	case IFM_10_T:
   2749 		speed |= IXGBE_LINK_SPEED_10_FULL;
   2750 		break;
   2751 	default:
   2752 		goto invalid;
   2753 	}
   2754 
   2755 	hw->mac.autotry_restart = TRUE;
   2756 	hw->mac.ops.setup_link(hw, speed, TRUE);
   2757 	adapter->advertise = 0;
   2758 	if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
   2759 		if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
   2760 			adapter->advertise |= 1 << 2;
   2761 		if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
   2762 			adapter->advertise |= 1 << 1;
   2763 		if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
   2764 			adapter->advertise |= 1 << 0;
   2765 		if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
   2766 			adapter->advertise |= 1 << 3;
   2767 		if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
   2768 			adapter->advertise |= 1 << 4;
   2769 		if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
   2770 			adapter->advertise |= 1 << 5;
   2771 	}
   2772 
   2773 	return (0);
   2774 
   2775 invalid:
   2776 	device_printf(adapter->dev, "Invalid media type!\n");
   2777 
   2778 	return (EINVAL);
   2779 } /* ixgbe_media_change */
   2780 
   2781 /************************************************************************
   2782  * ixgbe_set_promisc
   2783  ************************************************************************/
   2784 static void
   2785 ixgbe_set_promisc(struct adapter *adapter)
   2786 {
   2787 	struct ifnet *ifp = adapter->ifp;
   2788 	int          mcnt = 0;
   2789 	u32          rctl;
   2790 	struct ether_multi *enm;
   2791 	struct ether_multistep step;
   2792 	struct ethercom *ec = &adapter->osdep.ec;
   2793 
   2794 	KASSERT(mutex_owned(&adapter->core_mtx));
   2795 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   2796 	rctl &= (~IXGBE_FCTRL_UPE);
   2797 	if (ifp->if_flags & IFF_ALLMULTI)
   2798 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
   2799 	else {
   2800 		ETHER_LOCK(ec);
   2801 		ETHER_FIRST_MULTI(step, ec, enm);
   2802 		while (enm != NULL) {
   2803 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
   2804 				break;
   2805 			mcnt++;
   2806 			ETHER_NEXT_MULTI(step, enm);
   2807 		}
   2808 		ETHER_UNLOCK(ec);
   2809 	}
   2810 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
   2811 		rctl &= (~IXGBE_FCTRL_MPE);
   2812 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2813 
   2814 	if (ifp->if_flags & IFF_PROMISC) {
   2815 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   2816 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2817 	} else if (ifp->if_flags & IFF_ALLMULTI) {
   2818 		rctl |= IXGBE_FCTRL_MPE;
   2819 		rctl &= ~IXGBE_FCTRL_UPE;
   2820 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2821 	}
   2822 } /* ixgbe_set_promisc */
   2823 
   2824 /************************************************************************
   2825  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
   2826  ************************************************************************/
   2827 static int
   2828 ixgbe_msix_link(void *arg)
   2829 {
   2830 	struct adapter	*adapter = arg;
   2831 	struct ixgbe_hw *hw = &adapter->hw;
   2832 	u32		eicr, eicr_mask;
   2833 	s32             retval;
   2834 
   2835 	++adapter->link_irq.ev_count;
   2836 
   2837 	/* Pause other interrupts */
   2838 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
   2839 
   2840 	/* First get the cause */
   2841 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   2842 	/* Be sure the queue bits are not cleared */
   2843 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
   2844 	/* Clear interrupt with write */
   2845 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
   2846 
   2847 	/* Link status change */
   2848 	if (eicr & IXGBE_EICR_LSC) {
   2849 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
   2850 		softint_schedule(adapter->link_si);
   2851 	}
   2852 
   2853 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   2854 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
   2855 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
   2856 			/* This is probably overkill :) */
   2857 			if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
   2858 				return 1;
   2859 			/* Disable the interrupt */
   2860 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
   2861 			softint_schedule(adapter->fdir_si);
   2862 		}
   2863 
   2864 		if (eicr & IXGBE_EICR_ECC) {
   2865 			device_printf(adapter->dev,
   2866 			    "CRITICAL: ECC ERROR!! Please Reboot!!\n");
   2867 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   2868 		}
   2869 
   2870 		/* Check for over temp condition */
   2871 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
   2872 			switch (adapter->hw.mac.type) {
   2873 			case ixgbe_mac_X550EM_a:
   2874 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
   2875 					break;
   2876 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
   2877 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2878 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2879 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2880 				retval = hw->phy.ops.check_overtemp(hw);
   2881 				if (retval != IXGBE_ERR_OVERTEMP)
   2882 					break;
   2883 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2884 				device_printf(adapter->dev, "System shutdown required!\n");
   2885 				break;
   2886 			default:
   2887 				if (!(eicr & IXGBE_EICR_TS))
   2888 					break;
   2889 				retval = hw->phy.ops.check_overtemp(hw);
   2890 				if (retval != IXGBE_ERR_OVERTEMP)
   2891 					break;
   2892 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2893 				device_printf(adapter->dev, "System shutdown required!\n");
   2894 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
   2895 				break;
   2896 			}
   2897 		}
   2898 
   2899 		/* Check for VF message */
   2900 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
   2901 		    (eicr & IXGBE_EICR_MAILBOX))
   2902 			softint_schedule(adapter->mbx_si);
   2903 	}
   2904 
   2905 	if (ixgbe_is_sfp(hw)) {
   2906 		/* Pluggable optics-related interrupt */
   2907 		if (hw->mac.type >= ixgbe_mac_X540)
   2908 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   2909 		else
   2910 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   2911 
   2912 		if (eicr & eicr_mask) {
   2913 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   2914 			softint_schedule(adapter->mod_si);
   2915 		}
   2916 
   2917 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   2918 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   2919 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2920 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2921 			softint_schedule(adapter->msf_si);
   2922 		}
   2923 	}
   2924 
   2925 	/* Check for fan failure */
   2926 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   2927 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
   2928 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2929 	}
   2930 
   2931 	/* External PHY interrupt */
   2932 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   2933 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
   2934 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
   2935 		softint_schedule(adapter->phy_si);
   2936  	}
   2937 
   2938 	/* Re-enable other interrupts */
   2939 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   2940 	return 1;
   2941 } /* ixgbe_msix_link */
   2942 
   2943 static void
   2944 ixgbe_eitr_write(struct ix_queue *que, uint32_t itr)
   2945 {
   2946 	struct adapter *adapter = que->adapter;
   2947 
   2948         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   2949                 itr |= itr << 16;
   2950         else
   2951                 itr |= IXGBE_EITR_CNT_WDIS;
   2952 
   2953 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
   2954 	    itr);
   2955 }
   2956 
   2957 
   2958 /************************************************************************
   2959  * ixgbe_sysctl_interrupt_rate_handler
   2960  ************************************************************************/
   2961 static int
   2962 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   2963 {
   2964 	struct sysctlnode node = *rnode;
   2965 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   2966 	struct adapter  *adapter = que->adapter;
   2967 	uint32_t reg, usec, rate;
   2968 	int error;
   2969 
   2970 	if (que == NULL)
   2971 		return 0;
   2972 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
   2973 	usec = ((reg & 0x0FF8) >> 3);
   2974 	if (usec > 0)
   2975 		rate = 500000 / usec;
   2976 	else
   2977 		rate = 0;
   2978 	node.sysctl_data = &rate;
   2979 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2980 	if (error || newp == NULL)
   2981 		return error;
   2982 	reg &= ~0xfff; /* default, no limitation */
   2983 	if (rate > 0 && rate < 500000) {
   2984 		if (rate < 1000)
   2985 			rate = 1000;
   2986 		reg |= ((4000000/rate) & 0xff8);
   2987 		/*
   2988 		 * When RSC is used, ITR interval must be larger than
   2989 		 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
   2990 		 * The minimum value is always greater than 2us on 100M
   2991 		 * (and 10M?(not documented)), but it's not on 1G and higher.
   2992 		 */
   2993 		if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   2994 		    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   2995 			if ((adapter->num_queues > 1)
   2996 			    && (reg < IXGBE_MIN_RSC_EITR_10G1G))
   2997 				return EINVAL;
   2998 		}
   2999 		ixgbe_max_interrupt_rate = rate;
   3000 	} else
   3001 		ixgbe_max_interrupt_rate = 0;
   3002 	ixgbe_eitr_write(que, reg);
   3003 
   3004 	return (0);
   3005 } /* ixgbe_sysctl_interrupt_rate_handler */
   3006 
   3007 const struct sysctlnode *
   3008 ixgbe_sysctl_instance(struct adapter *adapter)
   3009 {
   3010 	const char *dvname;
   3011 	struct sysctllog **log;
   3012 	int rc;
   3013 	const struct sysctlnode *rnode;
   3014 
   3015 	if (adapter->sysctltop != NULL)
   3016 		return adapter->sysctltop;
   3017 
   3018 	log = &adapter->sysctllog;
   3019 	dvname = device_xname(adapter->dev);
   3020 
   3021 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   3022 	    0, CTLTYPE_NODE, dvname,
   3023 	    SYSCTL_DESCR("ixgbe information and settings"),
   3024 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   3025 		goto err;
   3026 
   3027 	return rnode;
   3028 err:
   3029 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   3030 	return NULL;
   3031 }
   3032 
   3033 /************************************************************************
   3034  * ixgbe_add_device_sysctls
   3035  ************************************************************************/
   3036 static void
   3037 ixgbe_add_device_sysctls(struct adapter *adapter)
   3038 {
   3039 	device_t               dev = adapter->dev;
   3040 	struct ixgbe_hw        *hw = &adapter->hw;
   3041 	struct sysctllog **log;
   3042 	const struct sysctlnode *rnode, *cnode;
   3043 
   3044 	log = &adapter->sysctllog;
   3045 
   3046 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   3047 		aprint_error_dev(dev, "could not create sysctl root\n");
   3048 		return;
   3049 	}
   3050 
   3051 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3052 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3053 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
   3054 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   3055 		aprint_error_dev(dev, "could not create sysctl\n");
   3056 
   3057 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3058 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3059 	    "num_queues", SYSCTL_DESCR("Number of queues"),
   3060 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
   3061 		aprint_error_dev(dev, "could not create sysctl\n");
   3062 
   3063 	/* Sysctls for all devices */
   3064 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3065 	    CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
   3066 	    ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
   3067 	    CTL_EOL) != 0)
   3068 		aprint_error_dev(dev, "could not create sysctl\n");
   3069 
   3070 	adapter->enable_aim = ixgbe_enable_aim;
   3071 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3072 	    CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   3073 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   3074 		aprint_error_dev(dev, "could not create sysctl\n");
   3075 
   3076 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3077 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   3078 	    "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
   3079 	    ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
   3080 	    CTL_EOL) != 0)
   3081 		aprint_error_dev(dev, "could not create sysctl\n");
   3082 
   3083 #ifdef IXGBE_DEBUG
   3084 	/* testing sysctls (for all devices) */
   3085 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3086 	    CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
   3087 	    ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
   3088 	    CTL_EOL) != 0)
   3089 		aprint_error_dev(dev, "could not create sysctl\n");
   3090 
   3091 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
   3092 	    CTLTYPE_STRING, "print_rss_config",
   3093 	    SYSCTL_DESCR("Prints RSS Configuration"),
   3094 	    ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
   3095 	    CTL_EOL) != 0)
   3096 		aprint_error_dev(dev, "could not create sysctl\n");
   3097 #endif
   3098 	/* for X550 series devices */
   3099 	if (hw->mac.type >= ixgbe_mac_X550)
   3100 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3101 		    CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
   3102 		    ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
   3103 		    CTL_EOL) != 0)
   3104 			aprint_error_dev(dev, "could not create sysctl\n");
   3105 
   3106 	/* for WoL-capable devices */
   3107 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3108 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3109 		    CTLTYPE_BOOL, "wol_enable",
   3110 		    SYSCTL_DESCR("Enable/Disable Wake on LAN"),
   3111 		    ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
   3112 		    CTL_EOL) != 0)
   3113 			aprint_error_dev(dev, "could not create sysctl\n");
   3114 
   3115 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3116 		    CTLTYPE_INT, "wufc",
   3117 		    SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
   3118 		    ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
   3119 		    CTL_EOL) != 0)
   3120 			aprint_error_dev(dev, "could not create sysctl\n");
   3121 	}
   3122 
   3123 	/* for X552/X557-AT devices */
   3124 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3125 		const struct sysctlnode *phy_node;
   3126 
   3127 		if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
   3128 		    "phy", SYSCTL_DESCR("External PHY sysctls"),
   3129 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
   3130 			aprint_error_dev(dev, "could not create sysctl\n");
   3131 			return;
   3132 		}
   3133 
   3134 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3135 		    CTLTYPE_INT, "temp",
   3136 		    SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
   3137 		    ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
   3138 		    CTL_EOL) != 0)
   3139 			aprint_error_dev(dev, "could not create sysctl\n");
   3140 
   3141 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3142 		    CTLTYPE_INT, "overtemp_occurred",
   3143 		    SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
   3144 		    ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
   3145 		    CTL_CREATE, CTL_EOL) != 0)
   3146 			aprint_error_dev(dev, "could not create sysctl\n");
   3147 	}
   3148 
   3149 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
   3150 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3151 		    CTLTYPE_INT, "eee_state",
   3152 		    SYSCTL_DESCR("EEE Power Save State"),
   3153 		    ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
   3154 		    CTL_EOL) != 0)
   3155 			aprint_error_dev(dev, "could not create sysctl\n");
   3156 	}
   3157 } /* ixgbe_add_device_sysctls */
   3158 
   3159 /************************************************************************
   3160  * ixgbe_allocate_pci_resources
   3161  ************************************************************************/
   3162 static int
   3163 ixgbe_allocate_pci_resources(struct adapter *adapter,
   3164     const struct pci_attach_args *pa)
   3165 {
   3166 	pcireg_t	memtype;
   3167 	device_t dev = adapter->dev;
   3168 	bus_addr_t addr;
   3169 	int flags;
   3170 
   3171 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   3172 	switch (memtype) {
   3173 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   3174 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   3175 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   3176 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   3177 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   3178 			goto map_err;
   3179 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   3180 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   3181 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3182 		}
   3183 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   3184 		     adapter->osdep.mem_size, flags,
   3185 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   3186 map_err:
   3187 			adapter->osdep.mem_size = 0;
   3188 			aprint_error_dev(dev, "unable to map BAR0\n");
   3189 			return ENXIO;
   3190 		}
   3191 		break;
   3192 	default:
   3193 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   3194 		return ENXIO;
   3195 	}
   3196 
   3197 	return (0);
   3198 } /* ixgbe_allocate_pci_resources */
   3199 
   3200 static void
   3201 ixgbe_free_softint(struct adapter *adapter)
   3202 {
   3203 	struct ix_queue *que = adapter->queues;
   3204 	struct tx_ring *txr = adapter->tx_rings;
   3205 	int i;
   3206 
   3207 	for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
   3208 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   3209 			if (txr->txr_si != NULL)
   3210 				softint_disestablish(txr->txr_si);
   3211 		}
   3212 		if (que->que_si != NULL)
   3213 			softint_disestablish(que->que_si);
   3214 	}
   3215 
   3216 	/* Drain the Link queue */
   3217 	if (adapter->link_si != NULL) {
   3218 		softint_disestablish(adapter->link_si);
   3219 		adapter->link_si = NULL;
   3220 	}
   3221 	if (adapter->mod_si != NULL) {
   3222 		softint_disestablish(adapter->mod_si);
   3223 		adapter->mod_si = NULL;
   3224 	}
   3225 	if (adapter->msf_si != NULL) {
   3226 		softint_disestablish(adapter->msf_si);
   3227 		adapter->msf_si = NULL;
   3228 	}
   3229 	if (adapter->phy_si != NULL) {
   3230 		softint_disestablish(adapter->phy_si);
   3231 		adapter->phy_si = NULL;
   3232 	}
   3233 	if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
   3234 		if (adapter->fdir_si != NULL) {
   3235 			softint_disestablish(adapter->fdir_si);
   3236 			adapter->fdir_si = NULL;
   3237 		}
   3238 	}
   3239 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   3240 		if (adapter->mbx_si != NULL) {
   3241 			softint_disestablish(adapter->mbx_si);
   3242 			adapter->mbx_si = NULL;
   3243 		}
   3244 	}
   3245 } /* ixgbe_free_softint */
   3246 
   3247 /************************************************************************
   3248  * ixgbe_detach - Device removal routine
   3249  *
   3250  *   Called when the driver is being removed.
   3251  *   Stops the adapter and deallocates all the resources
   3252  *   that were allocated for driver operation.
   3253  *
   3254  *   return 0 on success, positive on failure
   3255  ************************************************************************/
   3256 static int
   3257 ixgbe_detach(device_t dev, int flags)
   3258 {
   3259 	struct adapter *adapter = device_private(dev);
   3260 	struct rx_ring *rxr = adapter->rx_rings;
   3261 	struct tx_ring *txr = adapter->tx_rings;
   3262 	struct ixgbe_hw *hw = &adapter->hw;
   3263 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   3264 	u32	ctrl_ext;
   3265 
   3266 	INIT_DEBUGOUT("ixgbe_detach: begin");
   3267 	if (adapter->osdep.attached == false)
   3268 		return 0;
   3269 
   3270 	if (ixgbe_pci_iov_detach(dev) != 0) {
   3271 		device_printf(dev, "SR-IOV in use; detach first.\n");
   3272 		return (EBUSY);
   3273 	}
   3274 
   3275 	/* Stop the interface. Callouts are stopped in it. */
   3276 	ixgbe_ifstop(adapter->ifp, 1);
   3277 #if NVLAN > 0
   3278 	/* Make sure VLANs are not using driver */
   3279 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   3280 		;	/* nothing to do: no VLANs */
   3281 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
   3282 		vlan_ifdetach(adapter->ifp);
   3283 	else {
   3284 		aprint_error_dev(dev, "VLANs in use, detach first\n");
   3285 		return (EBUSY);
   3286 	}
   3287 #endif
   3288 
   3289 	pmf_device_deregister(dev);
   3290 
   3291 	ether_ifdetach(adapter->ifp);
   3292 	/* Stop the adapter */
   3293 	IXGBE_CORE_LOCK(adapter);
   3294 	ixgbe_setup_low_power_mode(adapter);
   3295 	IXGBE_CORE_UNLOCK(adapter);
   3296 
   3297 	ixgbe_free_softint(adapter);
   3298 
   3299 	/* let hardware know driver is unloading */
   3300 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   3301 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   3302 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   3303 
   3304 	callout_halt(&adapter->timer, NULL);
   3305 
   3306 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   3307 		netmap_detach(adapter->ifp);
   3308 
   3309 	ixgbe_free_pci_resources(adapter);
   3310 #if 0	/* XXX the NetBSD port is probably missing something here */
   3311 	bus_generic_detach(dev);
   3312 #endif
   3313 	if_detach(adapter->ifp);
   3314 	if_percpuq_destroy(adapter->ipq);
   3315 
   3316 	sysctl_teardown(&adapter->sysctllog);
   3317 	evcnt_detach(&adapter->handleq);
   3318 	evcnt_detach(&adapter->req);
   3319 	evcnt_detach(&adapter->efbig_tx_dma_setup);
   3320 	evcnt_detach(&adapter->mbuf_defrag_failed);
   3321 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
   3322 	evcnt_detach(&adapter->einval_tx_dma_setup);
   3323 	evcnt_detach(&adapter->other_tx_dma_setup);
   3324 	evcnt_detach(&adapter->eagain_tx_dma_setup);
   3325 	evcnt_detach(&adapter->enomem_tx_dma_setup);
   3326 	evcnt_detach(&adapter->watchdog_events);
   3327 	evcnt_detach(&adapter->tso_err);
   3328 	evcnt_detach(&adapter->link_irq);
   3329 
   3330 	txr = adapter->tx_rings;
   3331 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   3332 		evcnt_detach(&adapter->queues[i].irqs);
   3333 		evcnt_detach(&txr->no_desc_avail);
   3334 		evcnt_detach(&txr->total_packets);
   3335 		evcnt_detach(&txr->tso_tx);
   3336 #ifndef IXGBE_LEGACY_TX
   3337 		evcnt_detach(&txr->pcq_drops);
   3338 #endif
   3339 
   3340 		if (i < __arraycount(stats->mpc)) {
   3341 			evcnt_detach(&stats->mpc[i]);
   3342 			if (hw->mac.type == ixgbe_mac_82598EB)
   3343 				evcnt_detach(&stats->rnbc[i]);
   3344 		}
   3345 		if (i < __arraycount(stats->pxontxc)) {
   3346 			evcnt_detach(&stats->pxontxc[i]);
   3347 			evcnt_detach(&stats->pxonrxc[i]);
   3348 			evcnt_detach(&stats->pxofftxc[i]);
   3349 			evcnt_detach(&stats->pxoffrxc[i]);
   3350 			evcnt_detach(&stats->pxon2offc[i]);
   3351 		}
   3352 		if (i < __arraycount(stats->qprc)) {
   3353 			evcnt_detach(&stats->qprc[i]);
   3354 			evcnt_detach(&stats->qptc[i]);
   3355 			evcnt_detach(&stats->qbrc[i]);
   3356 			evcnt_detach(&stats->qbtc[i]);
   3357 			evcnt_detach(&stats->qprdc[i]);
   3358 		}
   3359 
   3360 		evcnt_detach(&rxr->rx_packets);
   3361 		evcnt_detach(&rxr->rx_bytes);
   3362 		evcnt_detach(&rxr->rx_copies);
   3363 		evcnt_detach(&rxr->no_jmbuf);
   3364 		evcnt_detach(&rxr->rx_discarded);
   3365 	}
   3366 	evcnt_detach(&stats->ipcs);
   3367 	evcnt_detach(&stats->l4cs);
   3368 	evcnt_detach(&stats->ipcs_bad);
   3369 	evcnt_detach(&stats->l4cs_bad);
   3370 	evcnt_detach(&stats->intzero);
   3371 	evcnt_detach(&stats->legint);
   3372 	evcnt_detach(&stats->crcerrs);
   3373 	evcnt_detach(&stats->illerrc);
   3374 	evcnt_detach(&stats->errbc);
   3375 	evcnt_detach(&stats->mspdc);
   3376 	if (hw->mac.type >= ixgbe_mac_X550)
   3377 		evcnt_detach(&stats->mbsdc);
   3378 	evcnt_detach(&stats->mpctotal);
   3379 	evcnt_detach(&stats->mlfc);
   3380 	evcnt_detach(&stats->mrfc);
   3381 	evcnt_detach(&stats->rlec);
   3382 	evcnt_detach(&stats->lxontxc);
   3383 	evcnt_detach(&stats->lxonrxc);
   3384 	evcnt_detach(&stats->lxofftxc);
   3385 	evcnt_detach(&stats->lxoffrxc);
   3386 
   3387 	/* Packet Reception Stats */
   3388 	evcnt_detach(&stats->tor);
   3389 	evcnt_detach(&stats->gorc);
   3390 	evcnt_detach(&stats->tpr);
   3391 	evcnt_detach(&stats->gprc);
   3392 	evcnt_detach(&stats->mprc);
   3393 	evcnt_detach(&stats->bprc);
   3394 	evcnt_detach(&stats->prc64);
   3395 	evcnt_detach(&stats->prc127);
   3396 	evcnt_detach(&stats->prc255);
   3397 	evcnt_detach(&stats->prc511);
   3398 	evcnt_detach(&stats->prc1023);
   3399 	evcnt_detach(&stats->prc1522);
   3400 	evcnt_detach(&stats->ruc);
   3401 	evcnt_detach(&stats->rfc);
   3402 	evcnt_detach(&stats->roc);
   3403 	evcnt_detach(&stats->rjc);
   3404 	evcnt_detach(&stats->mngprc);
   3405 	evcnt_detach(&stats->mngpdc);
   3406 	evcnt_detach(&stats->xec);
   3407 
   3408 	/* Packet Transmission Stats */
   3409 	evcnt_detach(&stats->gotc);
   3410 	evcnt_detach(&stats->tpt);
   3411 	evcnt_detach(&stats->gptc);
   3412 	evcnt_detach(&stats->bptc);
   3413 	evcnt_detach(&stats->mptc);
   3414 	evcnt_detach(&stats->mngptc);
   3415 	evcnt_detach(&stats->ptc64);
   3416 	evcnt_detach(&stats->ptc127);
   3417 	evcnt_detach(&stats->ptc255);
   3418 	evcnt_detach(&stats->ptc511);
   3419 	evcnt_detach(&stats->ptc1023);
   3420 	evcnt_detach(&stats->ptc1522);
   3421 
   3422 	ixgbe_free_transmit_structures(adapter);
   3423 	ixgbe_free_receive_structures(adapter);
   3424 	free(adapter->queues, M_DEVBUF);
   3425 	free(adapter->mta, M_DEVBUF);
   3426 
   3427 	IXGBE_CORE_LOCK_DESTROY(adapter);
   3428 
   3429 	return (0);
   3430 } /* ixgbe_detach */
   3431 
   3432 /************************************************************************
   3433  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
   3434  *
   3435  *   Prepare the adapter/port for LPLU and/or WoL
   3436  ************************************************************************/
   3437 static int
   3438 ixgbe_setup_low_power_mode(struct adapter *adapter)
   3439 {
   3440 	struct ixgbe_hw *hw = &adapter->hw;
   3441 	device_t        dev = adapter->dev;
   3442 	s32             error = 0;
   3443 
   3444 	KASSERT(mutex_owned(&adapter->core_mtx));
   3445 
   3446 	/* Limit power management flow to X550EM baseT */
   3447 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
   3448 	    hw->phy.ops.enter_lplu) {
   3449 		/* X550EM baseT adapters need a special LPLU flow */
   3450 		hw->phy.reset_disable = true;
   3451 		ixgbe_stop(adapter);
   3452 		error = hw->phy.ops.enter_lplu(hw);
   3453 		if (error)
   3454 			device_printf(dev,
   3455 			    "Error entering LPLU: %d\n", error);
   3456 		hw->phy.reset_disable = false;
   3457 	} else {
   3458 		/* Just stop for other adapters */
   3459 		ixgbe_stop(adapter);
   3460 	}
   3461 
   3462 	if (!hw->wol_enabled) {
   3463 		ixgbe_set_phy_power(hw, FALSE);
   3464 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3465 		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
   3466 	} else {
   3467 		/* Turn off support for APM wakeup. (Using ACPI instead) */
   3468 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
   3469 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
   3470 
   3471 		/*
   3472 		 * Clear Wake Up Status register to prevent any previous wakeup
   3473 		 * events from waking us up immediately after we suspend.
   3474 		 */
   3475 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3476 
   3477 		/*
   3478 		 * Program the Wakeup Filter Control register with user filter
   3479 		 * settings
   3480 		 */
   3481 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
   3482 
   3483 		/* Enable wakeups and power management in Wakeup Control */
   3484 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
   3485 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
   3486 
   3487 	}
   3488 
   3489 	return error;
   3490 } /* ixgbe_setup_low_power_mode */
   3491 
   3492 /************************************************************************
   3493  * ixgbe_shutdown - Shutdown entry point
   3494  ************************************************************************/
   3495 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   3496 static int
   3497 ixgbe_shutdown(device_t dev)
   3498 {
   3499 	struct adapter *adapter = device_private(dev);
   3500 	int error = 0;
   3501 
   3502 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
   3503 
   3504 	IXGBE_CORE_LOCK(adapter);
   3505 	error = ixgbe_setup_low_power_mode(adapter);
   3506 	IXGBE_CORE_UNLOCK(adapter);
   3507 
   3508 	return (error);
   3509 } /* ixgbe_shutdown */
   3510 #endif
   3511 
   3512 /************************************************************************
   3513  * ixgbe_suspend
   3514  *
   3515  *   From D0 to D3
   3516  ************************************************************************/
   3517 static bool
   3518 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
   3519 {
   3520 	struct adapter *adapter = device_private(dev);
   3521 	int            error = 0;
   3522 
   3523 	INIT_DEBUGOUT("ixgbe_suspend: begin");
   3524 
   3525 	IXGBE_CORE_LOCK(adapter);
   3526 
   3527 	error = ixgbe_setup_low_power_mode(adapter);
   3528 
   3529 	IXGBE_CORE_UNLOCK(adapter);
   3530 
   3531 	return (error);
   3532 } /* ixgbe_suspend */
   3533 
   3534 /************************************************************************
   3535  * ixgbe_resume
   3536  *
   3537  *   From D3 to D0
   3538  ************************************************************************/
   3539 static bool
   3540 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
   3541 {
   3542 	struct adapter  *adapter = device_private(dev);
   3543 	struct ifnet    *ifp = adapter->ifp;
   3544 	struct ixgbe_hw *hw = &adapter->hw;
   3545 	u32             wus;
   3546 
   3547 	INIT_DEBUGOUT("ixgbe_resume: begin");
   3548 
   3549 	IXGBE_CORE_LOCK(adapter);
   3550 
   3551 	/* Read & clear WUS register */
   3552 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
   3553 	if (wus)
   3554 		device_printf(dev, "Woken up by (WUS): %#010x\n",
   3555 		    IXGBE_READ_REG(hw, IXGBE_WUS));
   3556 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3557 	/* And clear WUFC until next low-power transition */
   3558 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3559 
   3560 	/*
   3561 	 * Required after D3->D0 transition;
   3562 	 * will re-advertise all previous advertised speeds
   3563 	 */
   3564 	if (ifp->if_flags & IFF_UP)
   3565 		ixgbe_init_locked(adapter);
   3566 
   3567 	IXGBE_CORE_UNLOCK(adapter);
   3568 
   3569 	return true;
   3570 } /* ixgbe_resume */
   3571 
   3572 /*
   3573  * Set the various hardware offload abilities.
   3574  *
   3575  * This takes the ifnet's if_capenable flags (e.g. set by the user using
   3576  * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
   3577  * mbuf offload flags the driver will understand.
   3578  */
   3579 static void
   3580 ixgbe_set_if_hwassist(struct adapter *adapter)
   3581 {
   3582 	/* XXX */
   3583 }
   3584 
   3585 /************************************************************************
   3586  * ixgbe_init_locked - Init entry point
   3587  *
   3588  *   Used in two ways: It is used by the stack as an init
   3589  *   entry point in network interface structure. It is also
   3590  *   used by the driver as a hw/sw initialization routine to
   3591  *   get to a consistent state.
   3592  *
   3593  *   return 0 on success, positive on failure
   3594  ************************************************************************/
   3595 static void
   3596 ixgbe_init_locked(struct adapter *adapter)
   3597 {
   3598 	struct ifnet   *ifp = adapter->ifp;
   3599 	device_t 	dev = adapter->dev;
   3600 	struct ixgbe_hw *hw = &adapter->hw;
   3601 	struct tx_ring  *txr;
   3602 	struct rx_ring  *rxr;
   3603 	u32		txdctl, mhadd;
   3604 	u32		rxdctl, rxctrl;
   3605 	u32             ctrl_ext;
   3606 	int             err = 0;
   3607 
   3608 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   3609 
   3610 	KASSERT(mutex_owned(&adapter->core_mtx));
   3611 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
   3612 
   3613 	hw->adapter_stopped = FALSE;
   3614 	ixgbe_stop_adapter(hw);
   3615         callout_stop(&adapter->timer);
   3616 
   3617 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   3618 	adapter->max_frame_size =
   3619 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   3620 
   3621 	/* Queue indices may change with IOV mode */
   3622 	ixgbe_align_all_queue_indices(adapter);
   3623 
   3624 	/* reprogram the RAR[0] in case user changed it. */
   3625 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
   3626 
   3627 	/* Get the latest mac address, User can use a LAA */
   3628 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
   3629 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   3630 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
   3631 	hw->addr_ctrl.rar_used_count = 1;
   3632 
   3633 	/* Set hardware offload abilities from ifnet flags */
   3634 	ixgbe_set_if_hwassist(adapter);
   3635 
   3636 	/* Prepare transmit descriptors and buffers */
   3637 	if (ixgbe_setup_transmit_structures(adapter)) {
   3638 		device_printf(dev, "Could not setup transmit structures\n");
   3639 		ixgbe_stop(adapter);
   3640 		return;
   3641 	}
   3642 
   3643 	ixgbe_init_hw(hw);
   3644 	ixgbe_initialize_iov(adapter);
   3645 	ixgbe_initialize_transmit_units(adapter);
   3646 
   3647 	/* Setup Multicast table */
   3648 	ixgbe_set_multi(adapter);
   3649 
   3650 	/* Determine the correct mbuf pool, based on frame size */
   3651 	if (adapter->max_frame_size <= MCLBYTES)
   3652 		adapter->rx_mbuf_sz = MCLBYTES;
   3653 	else
   3654 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   3655 
   3656 	/* Prepare receive descriptors and buffers */
   3657 	if (ixgbe_setup_receive_structures(adapter)) {
   3658 		device_printf(dev, "Could not setup receive structures\n");
   3659 		ixgbe_stop(adapter);
   3660 		return;
   3661 	}
   3662 
   3663 	/* Configure RX settings */
   3664 	ixgbe_initialize_receive_units(adapter);
   3665 
   3666 	/* Enable SDP & MSI-X interrupts based on adapter */
   3667 	ixgbe_config_gpie(adapter);
   3668 
   3669 	/* Set MTU size */
   3670 	if (ifp->if_mtu > ETHERMTU) {
   3671 		/* aka IXGBE_MAXFRS on 82599 and newer */
   3672 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   3673 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   3674 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   3675 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   3676 	}
   3677 
   3678 	/* Now enable all the queues */
   3679 	for (int i = 0; i < adapter->num_queues; i++) {
   3680 		txr = &adapter->tx_rings[i];
   3681 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
   3682 		txdctl |= IXGBE_TXDCTL_ENABLE;
   3683 		/* Set WTHRESH to 8, burst writeback */
   3684 		txdctl |= (8 << 16);
   3685 		/*
   3686 		 * When the internal queue falls below PTHRESH (32),
   3687 		 * start prefetching as long as there are at least
   3688 		 * HTHRESH (1) buffers ready. The values are taken
   3689 		 * from the Intel linux driver 3.8.21.
   3690 		 * Prefetching enables tx line rate even with 1 queue.
   3691 		 */
   3692 		txdctl |= (32 << 0) | (1 << 8);
   3693 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
   3694 	}
   3695 
   3696 	for (int i = 0, j = 0; i < adapter->num_queues; i++) {
   3697 		rxr = &adapter->rx_rings[i];
   3698 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   3699 		if (hw->mac.type == ixgbe_mac_82598EB) {
   3700 			/*
   3701 			 * PTHRESH = 21
   3702 			 * HTHRESH = 4
   3703 			 * WTHRESH = 8
   3704 			 */
   3705 			rxdctl &= ~0x3FFFFF;
   3706 			rxdctl |= 0x080420;
   3707 		}
   3708 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   3709 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
   3710 		for (; j < 10; j++) {
   3711 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
   3712 			    IXGBE_RXDCTL_ENABLE)
   3713 				break;
   3714 			else
   3715 				msec_delay(1);
   3716 		}
   3717 		wmb();
   3718 
   3719 		/*
   3720 		 * In netmap mode, we must preserve the buffers made
   3721 		 * available to userspace before the if_init()
   3722 		 * (this is true by default on the TX side, because
   3723 		 * init makes all buffers available to userspace).
   3724 		 *
   3725 		 * netmap_reset() and the device specific routines
   3726 		 * (e.g. ixgbe_setup_receive_rings()) map these
   3727 		 * buffers at the end of the NIC ring, so here we
   3728 		 * must set the RDT (tail) register to make sure
   3729 		 * they are not overwritten.
   3730 		 *
   3731 		 * In this driver the NIC ring starts at RDH = 0,
   3732 		 * RDT points to the last slot available for reception (?),
   3733 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   3734 		 */
   3735 #ifdef DEV_NETMAP
   3736 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   3737 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   3738 			struct netmap_adapter *na = NA(adapter->ifp);
   3739 			struct netmap_kring *kring = &na->rx_rings[i];
   3740 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   3741 
   3742 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
   3743 		} else
   3744 #endif /* DEV_NETMAP */
   3745 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
   3746 			    adapter->num_rx_desc - 1);
   3747 	}
   3748 
   3749 	/* Enable Receive engine */
   3750 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   3751 	if (hw->mac.type == ixgbe_mac_82598EB)
   3752 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   3753 	rxctrl |= IXGBE_RXCTRL_RXEN;
   3754 	ixgbe_enable_rx_dma(hw, rxctrl);
   3755 
   3756 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   3757 
   3758 	/* Set up MSI-X routing */
   3759 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3760 		ixgbe_configure_ivars(adapter);
   3761 		/* Set up auto-mask */
   3762 		if (hw->mac.type == ixgbe_mac_82598EB)
   3763 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3764 		else {
   3765 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   3766 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   3767 		}
   3768 	} else {  /* Simple settings for Legacy/MSI */
   3769 		ixgbe_set_ivar(adapter, 0, 0, 0);
   3770 		ixgbe_set_ivar(adapter, 0, 0, 1);
   3771 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3772 	}
   3773 
   3774 	ixgbe_init_fdir(adapter);
   3775 
   3776 	/*
   3777 	 * Check on any SFP devices that
   3778 	 * need to be kick-started
   3779 	 */
   3780 	if (hw->phy.type == ixgbe_phy_none) {
   3781 		err = hw->phy.ops.identify(hw);
   3782 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   3783                 	device_printf(dev,
   3784 			    "Unsupported SFP+ module type was detected.\n");
   3785 			return;
   3786         	}
   3787 	}
   3788 
   3789 	/* Set moderation on the Link interrupt */
   3790 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
   3791 
   3792 	/* Config/Enable Link */
   3793 	ixgbe_config_link(adapter);
   3794 
   3795 	/* Hardware Packet Buffer & Flow Control setup */
   3796 	ixgbe_config_delay_values(adapter);
   3797 
   3798 	/* Initialize the FC settings */
   3799 	ixgbe_start_hw(hw);
   3800 
   3801 	/* Set up VLAN support and filter */
   3802 	ixgbe_setup_vlan_hw_support(adapter);
   3803 
   3804 	/* Setup DMA Coalescing */
   3805 	ixgbe_config_dmac(adapter);
   3806 
   3807 	/* And now turn on interrupts */
   3808 	ixgbe_enable_intr(adapter);
   3809 
   3810 	/* Enable the use of the MBX by the VF's */
   3811 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
   3812 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
   3813 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
   3814 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
   3815 	}
   3816 
   3817 	/* Update saved flags. See ixgbe_ifflags_cb() */
   3818 	adapter->if_flags = ifp->if_flags;
   3819 
   3820 	/* Now inform the stack we're ready */
   3821 	ifp->if_flags |= IFF_RUNNING;
   3822 
   3823 	return;
   3824 } /* ixgbe_init_locked */
   3825 
   3826 /************************************************************************
   3827  * ixgbe_init
   3828  ************************************************************************/
   3829 static int
   3830 ixgbe_init(struct ifnet *ifp)
   3831 {
   3832 	struct adapter *adapter = ifp->if_softc;
   3833 
   3834 	IXGBE_CORE_LOCK(adapter);
   3835 	ixgbe_init_locked(adapter);
   3836 	IXGBE_CORE_UNLOCK(adapter);
   3837 
   3838 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   3839 } /* ixgbe_init */
   3840 
   3841 /************************************************************************
   3842  * ixgbe_set_ivar
   3843  *
   3844  *   Setup the correct IVAR register for a particular MSI-X interrupt
   3845  *     (yes this is all very magic and confusing :)
   3846  *    - entry is the register array entry
   3847  *    - vector is the MSI-X vector for this queue
   3848  *    - type is RX/TX/MISC
   3849  ************************************************************************/
   3850 static void
   3851 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   3852 {
   3853 	struct ixgbe_hw *hw = &adapter->hw;
   3854 	u32 ivar, index;
   3855 
   3856 	vector |= IXGBE_IVAR_ALLOC_VAL;
   3857 
   3858 	switch (hw->mac.type) {
   3859 
   3860 	case ixgbe_mac_82598EB:
   3861 		if (type == -1)
   3862 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   3863 		else
   3864 			entry += (type * 64);
   3865 		index = (entry >> 2) & 0x1F;
   3866 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   3867 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
   3868 		ivar |= (vector << (8 * (entry & 0x3)));
   3869 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   3870 		break;
   3871 
   3872 	case ixgbe_mac_82599EB:
   3873 	case ixgbe_mac_X540:
   3874 	case ixgbe_mac_X550:
   3875 	case ixgbe_mac_X550EM_x:
   3876 	case ixgbe_mac_X550EM_a:
   3877 		if (type == -1) { /* MISC IVAR */
   3878 			index = (entry & 1) * 8;
   3879 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   3880 			ivar &= ~(0xFF << index);
   3881 			ivar |= (vector << index);
   3882 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   3883 		} else {	/* RX/TX IVARS */
   3884 			index = (16 * (entry & 1)) + (8 * type);
   3885 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   3886 			ivar &= ~(0xFF << index);
   3887 			ivar |= (vector << index);
   3888 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   3889 		}
   3890 
   3891 	default:
   3892 		break;
   3893 	}
   3894 } /* ixgbe_set_ivar */
   3895 
   3896 /************************************************************************
   3897  * ixgbe_configure_ivars
   3898  ************************************************************************/
   3899 static void
   3900 ixgbe_configure_ivars(struct adapter *adapter)
   3901 {
   3902 	struct ix_queue *que = adapter->queues;
   3903 	u32             newitr;
   3904 
   3905 	if (ixgbe_max_interrupt_rate > 0)
   3906 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   3907 	else {
   3908 		/*
   3909 		 * Disable DMA coalescing if interrupt moderation is
   3910 		 * disabled.
   3911 		 */
   3912 		adapter->dmac = 0;
   3913 		newitr = 0;
   3914 	}
   3915 
   3916         for (int i = 0; i < adapter->num_queues; i++, que++) {
   3917 		struct rx_ring *rxr = &adapter->rx_rings[i];
   3918 		struct tx_ring *txr = &adapter->tx_rings[i];
   3919 		/* First the RX queue entry */
   3920                 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
   3921 		/* ... and the TX */
   3922 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
   3923 		/* Set an Initial EITR value */
   3924 		ixgbe_eitr_write(que, newitr);
   3925 	}
   3926 
   3927 	/* For the Link interrupt */
   3928         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
   3929 } /* ixgbe_configure_ivars */
   3930 
   3931 /************************************************************************
   3932  * ixgbe_config_gpie
   3933  ************************************************************************/
   3934 static void
   3935 ixgbe_config_gpie(struct adapter *adapter)
   3936 {
   3937 	struct ixgbe_hw *hw = &adapter->hw;
   3938 	u32             gpie;
   3939 
   3940 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
   3941 
   3942 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3943 		/* Enable Enhanced MSI-X mode */
   3944 		gpie |= IXGBE_GPIE_MSIX_MODE
   3945 		     |  IXGBE_GPIE_EIAME
   3946 		     |  IXGBE_GPIE_PBA_SUPPORT
   3947 		     |  IXGBE_GPIE_OCD;
   3948 	}
   3949 
   3950 	/* Fan Failure Interrupt */
   3951 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   3952 		gpie |= IXGBE_SDP1_GPIEN;
   3953 
   3954 	/* Thermal Sensor Interrupt */
   3955 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
   3956 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3957 
   3958 	/* Link detection */
   3959 	switch (hw->mac.type) {
   3960 	case ixgbe_mac_82599EB:
   3961 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
   3962 		break;
   3963 	case ixgbe_mac_X550EM_x:
   3964 	case ixgbe_mac_X550EM_a:
   3965 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3966 		break;
   3967 	default:
   3968 		break;
   3969 	}
   3970 
   3971 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   3972 
   3973 	return;
   3974 } /* ixgbe_config_gpie */
   3975 
   3976 /************************************************************************
   3977  * ixgbe_config_delay_values
   3978  *
   3979  *   Requires adapter->max_frame_size to be set.
   3980  ************************************************************************/
   3981 static void
   3982 ixgbe_config_delay_values(struct adapter *adapter)
   3983 {
   3984 	struct ixgbe_hw *hw = &adapter->hw;
   3985 	u32             rxpb, frame, size, tmp;
   3986 
   3987 	frame = adapter->max_frame_size;
   3988 
   3989 	/* Calculate High Water */
   3990 	switch (hw->mac.type) {
   3991 	case ixgbe_mac_X540:
   3992 	case ixgbe_mac_X550:
   3993 	case ixgbe_mac_X550EM_x:
   3994 	case ixgbe_mac_X550EM_a:
   3995 		tmp = IXGBE_DV_X540(frame, frame);
   3996 		break;
   3997 	default:
   3998 		tmp = IXGBE_DV(frame, frame);
   3999 		break;
   4000 	}
   4001 	size = IXGBE_BT2KB(tmp);
   4002 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
   4003 	hw->fc.high_water[0] = rxpb - size;
   4004 
   4005 	/* Now calculate Low Water */
   4006 	switch (hw->mac.type) {
   4007 	case ixgbe_mac_X540:
   4008 	case ixgbe_mac_X550:
   4009 	case ixgbe_mac_X550EM_x:
   4010 	case ixgbe_mac_X550EM_a:
   4011 		tmp = IXGBE_LOW_DV_X540(frame);
   4012 		break;
   4013 	default:
   4014 		tmp = IXGBE_LOW_DV(frame);
   4015 		break;
   4016 	}
   4017 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
   4018 
   4019 	hw->fc.pause_time = IXGBE_FC_PAUSE;
   4020 	hw->fc.send_xon = TRUE;
   4021 } /* ixgbe_config_delay_values */
   4022 
   4023 /************************************************************************
   4024  * ixgbe_set_multi - Multicast Update
   4025  *
   4026  *   Called whenever multicast address list is updated.
   4027  ************************************************************************/
   4028 static void
   4029 ixgbe_set_multi(struct adapter *adapter)
   4030 {
   4031 	struct ixgbe_mc_addr	*mta;
   4032 	struct ifnet		*ifp = adapter->ifp;
   4033 	u8			*update_ptr;
   4034 	int			mcnt = 0;
   4035 	u32			fctrl;
   4036 	struct ethercom		*ec = &adapter->osdep.ec;
   4037 	struct ether_multi	*enm;
   4038 	struct ether_multistep	step;
   4039 
   4040 	KASSERT(mutex_owned(&adapter->core_mtx));
   4041 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   4042 
   4043 	mta = adapter->mta;
   4044 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
   4045 
   4046 	ifp->if_flags &= ~IFF_ALLMULTI;
   4047 	ETHER_LOCK(ec);
   4048 	ETHER_FIRST_MULTI(step, ec, enm);
   4049 	while (enm != NULL) {
   4050 		if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
   4051 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   4052 			ETHER_ADDR_LEN) != 0)) {
   4053 			ifp->if_flags |= IFF_ALLMULTI;
   4054 			break;
   4055 		}
   4056 		bcopy(enm->enm_addrlo,
   4057 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
   4058 		mta[mcnt].vmdq = adapter->pool;
   4059 		mcnt++;
   4060 		ETHER_NEXT_MULTI(step, enm);
   4061 	}
   4062 	ETHER_UNLOCK(ec);
   4063 
   4064 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   4065 	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4066 	if (ifp->if_flags & IFF_PROMISC)
   4067 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4068 	else if (ifp->if_flags & IFF_ALLMULTI) {
   4069 		fctrl |= IXGBE_FCTRL_MPE;
   4070 	}
   4071 
   4072 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   4073 
   4074 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
   4075 		update_ptr = (u8 *)mta;
   4076 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   4077 		    ixgbe_mc_array_itr, TRUE);
   4078 	}
   4079 
   4080 	return;
   4081 } /* ixgbe_set_multi */
   4082 
   4083 /************************************************************************
   4084  * ixgbe_mc_array_itr
   4085  *
   4086  *   An iterator function needed by the multicast shared code.
   4087  *   It feeds the shared code routine the addresses in the
   4088  *   array of ixgbe_set_multi() one by one.
   4089  ************************************************************************/
   4090 static u8 *
   4091 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   4092 {
   4093 	struct ixgbe_mc_addr *mta;
   4094 
   4095 	mta = (struct ixgbe_mc_addr *)*update_ptr;
   4096 	*vmdq = mta->vmdq;
   4097 
   4098 	*update_ptr = (u8*)(mta + 1);
   4099 
   4100 	return (mta->addr);
   4101 } /* ixgbe_mc_array_itr */
   4102 
   4103 /************************************************************************
   4104  * ixgbe_local_timer - Timer routine
   4105  *
   4106  *   Checks for link status, updates statistics,
   4107  *   and runs the watchdog check.
   4108  ************************************************************************/
   4109 static void
   4110 ixgbe_local_timer(void *arg)
   4111 {
   4112 	struct adapter *adapter = arg;
   4113 
   4114 	IXGBE_CORE_LOCK(adapter);
   4115 	ixgbe_local_timer1(adapter);
   4116 	IXGBE_CORE_UNLOCK(adapter);
   4117 }
   4118 
   4119 static void
   4120 ixgbe_local_timer1(void *arg)
   4121 {
   4122 	struct adapter	*adapter = arg;
   4123 	device_t	dev = adapter->dev;
   4124 	struct ix_queue *que = adapter->queues;
   4125 	u64		queues = 0;
   4126 	int		hung = 0;
   4127 
   4128 	KASSERT(mutex_owned(&adapter->core_mtx));
   4129 
   4130 	/* Check for pluggable optics */
   4131 	if (adapter->sfp_probe)
   4132 		if (!ixgbe_sfp_probe(adapter))
   4133 			goto out; /* Nothing to do */
   4134 
   4135 	ixgbe_update_link_status(adapter);
   4136 	ixgbe_update_stats_counters(adapter);
   4137 
   4138 	/*
   4139 	 * Check the TX queues status
   4140 	 *      - mark hung queues so we don't schedule on them
   4141 	 *      - watchdog only if all queues show hung
   4142 	 */
   4143 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4144 		/* Keep track of queues with work for soft irq */
   4145 		if (que->txr->busy)
   4146 			queues |= ((u64)1 << que->me);
   4147 		/*
   4148 		 * Each time txeof runs without cleaning, but there
   4149 		 * are uncleaned descriptors it increments busy. If
   4150 		 * we get to the MAX we declare it hung.
   4151 		 */
   4152 		if (que->busy == IXGBE_QUEUE_HUNG) {
   4153 			++hung;
   4154 			/* Mark the queue as inactive */
   4155 			adapter->active_queues &= ~((u64)1 << que->me);
   4156 			continue;
   4157 		} else {
   4158 			/* Check if we've come back from hung */
   4159 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   4160 				adapter->active_queues |= ((u64)1 << que->me);
   4161 		}
   4162 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   4163 			device_printf(dev,
   4164 			    "Warning queue %d appears to be hung!\n", i);
   4165 			que->txr->busy = IXGBE_QUEUE_HUNG;
   4166 			++hung;
   4167 		}
   4168 	}
   4169 
   4170 	/* Only truely watchdog if all queues show hung */
   4171 	if (hung == adapter->num_queues)
   4172 		goto watchdog;
   4173 	else if (queues != 0) { /* Force an IRQ on queues with work */
   4174 		ixgbe_rearm_queues(adapter, queues);
   4175 	}
   4176 
   4177 out:
   4178 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4179 	return;
   4180 
   4181 watchdog:
   4182 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   4183 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   4184 	adapter->watchdog_events.ev_count++;
   4185 	ixgbe_init_locked(adapter);
   4186 } /* ixgbe_local_timer */
   4187 
   4188 /************************************************************************
   4189  * ixgbe_sfp_probe
   4190  *
   4191  *   Determine if a port had optics inserted.
   4192  ************************************************************************/
   4193 static bool
   4194 ixgbe_sfp_probe(struct adapter *adapter)
   4195 {
   4196 	struct ixgbe_hw	*hw = &adapter->hw;
   4197 	device_t	dev = adapter->dev;
   4198 	bool		result = FALSE;
   4199 
   4200 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4201 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4202 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4203 		if (ret)
   4204 			goto out;
   4205 		ret = hw->phy.ops.reset(hw);
   4206 		adapter->sfp_probe = FALSE;
   4207 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4208 			device_printf(dev,"Unsupported SFP+ module detected!");
   4209 			device_printf(dev,
   4210 			    "Reload driver with supported module.\n");
   4211                         goto out;
   4212 		} else
   4213 			device_printf(dev, "SFP+ module detected!\n");
   4214 		/* We now have supported optics */
   4215 		result = TRUE;
   4216 	}
   4217 out:
   4218 
   4219 	return (result);
   4220 } /* ixgbe_sfp_probe */
   4221 
   4222 /************************************************************************
   4223  * ixgbe_handle_mod - Tasklet for SFP module interrupts
   4224  ************************************************************************/
   4225 static void
   4226 ixgbe_handle_mod(void *context)
   4227 {
   4228 	struct adapter  *adapter = context;
   4229 	struct ixgbe_hw *hw = &adapter->hw;
   4230 	device_t	dev = adapter->dev;
   4231 	u32             err, cage_full = 0;
   4232 
   4233 	if (adapter->hw.need_crosstalk_fix) {
   4234 		switch (hw->mac.type) {
   4235 		case ixgbe_mac_82599EB:
   4236 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4237 			    IXGBE_ESDP_SDP2;
   4238 			break;
   4239 		case ixgbe_mac_X550EM_x:
   4240 		case ixgbe_mac_X550EM_a:
   4241 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4242 			    IXGBE_ESDP_SDP0;
   4243 			break;
   4244 		default:
   4245 			break;
   4246 		}
   4247 
   4248 		if (!cage_full)
   4249 			return;
   4250 	}
   4251 
   4252 	err = hw->phy.ops.identify_sfp(hw);
   4253 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4254 		device_printf(dev,
   4255 		    "Unsupported SFP+ module type was detected.\n");
   4256 		return;
   4257 	}
   4258 
   4259 	err = hw->mac.ops.setup_sfp(hw);
   4260 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4261 		device_printf(dev,
   4262 		    "Setup failure - unsupported SFP+ module type.\n");
   4263 		return;
   4264 	}
   4265 	softint_schedule(adapter->msf_si);
   4266 } /* ixgbe_handle_mod */
   4267 
   4268 
   4269 /************************************************************************
   4270  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
   4271  ************************************************************************/
   4272 static void
   4273 ixgbe_handle_msf(void *context)
   4274 {
   4275 	struct adapter  *adapter = context;
   4276 	struct ixgbe_hw *hw = &adapter->hw;
   4277 	u32             autoneg;
   4278 	bool            negotiate;
   4279 
   4280 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
   4281 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
   4282 
   4283 	autoneg = hw->phy.autoneg_advertised;
   4284 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   4285 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   4286 	else
   4287 		negotiate = 0;
   4288 	if (hw->mac.ops.setup_link)
   4289 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
   4290 
   4291 	/* Adjust media types shown in ifconfig */
   4292 	ifmedia_removeall(&adapter->media);
   4293 	ixgbe_add_media_types(adapter);
   4294 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   4295 } /* ixgbe_handle_msf */
   4296 
   4297 /************************************************************************
   4298  * ixgbe_handle_phy - Tasklet for external PHY interrupts
   4299  ************************************************************************/
   4300 static void
   4301 ixgbe_handle_phy(void *context)
   4302 {
   4303 	struct adapter  *adapter = context;
   4304 	struct ixgbe_hw *hw = &adapter->hw;
   4305 	int error;
   4306 
   4307 	error = hw->phy.ops.handle_lasi(hw);
   4308 	if (error == IXGBE_ERR_OVERTEMP)
   4309 		device_printf(adapter->dev,
   4310 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
   4311 		    " PHY will downshift to lower power state!\n");
   4312 	else if (error)
   4313 		device_printf(adapter->dev,
   4314 		    "Error handling LASI interrupt: %d\n", error);
   4315 } /* ixgbe_handle_phy */
   4316 
   4317 static void
   4318 ixgbe_ifstop(struct ifnet *ifp, int disable)
   4319 {
   4320 	struct adapter *adapter = ifp->if_softc;
   4321 
   4322 	IXGBE_CORE_LOCK(adapter);
   4323 	ixgbe_stop(adapter);
   4324 	IXGBE_CORE_UNLOCK(adapter);
   4325 }
   4326 
   4327 /************************************************************************
   4328  * ixgbe_stop - Stop the hardware
   4329  *
   4330  *   Disables all traffic on the adapter by issuing a
   4331  *   global reset on the MAC and deallocates TX/RX buffers.
   4332  ************************************************************************/
   4333 static void
   4334 ixgbe_stop(void *arg)
   4335 {
   4336 	struct ifnet    *ifp;
   4337 	struct adapter  *adapter = arg;
   4338 	struct ixgbe_hw *hw = &adapter->hw;
   4339 
   4340 	ifp = adapter->ifp;
   4341 
   4342 	KASSERT(mutex_owned(&adapter->core_mtx));
   4343 
   4344 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   4345 	ixgbe_disable_intr(adapter);
   4346 	callout_stop(&adapter->timer);
   4347 
   4348 	/* Let the stack know...*/
   4349 	ifp->if_flags &= ~IFF_RUNNING;
   4350 
   4351 	ixgbe_reset_hw(hw);
   4352 	hw->adapter_stopped = FALSE;
   4353 	ixgbe_stop_adapter(hw);
   4354 	if (hw->mac.type == ixgbe_mac_82599EB)
   4355 		ixgbe_stop_mac_link_on_d3_82599(hw);
   4356 	/* Turn off the laser - noop with no optics */
   4357 	ixgbe_disable_tx_laser(hw);
   4358 
   4359 	/* Update the stack */
   4360 	adapter->link_up = FALSE;
   4361 	ixgbe_update_link_status(adapter);
   4362 
   4363 	/* reprogram the RAR[0] in case user changed it. */
   4364 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   4365 
   4366 	return;
   4367 } /* ixgbe_stop */
   4368 
   4369 /************************************************************************
   4370  * ixgbe_update_link_status - Update OS on link state
   4371  *
   4372  * Note: Only updates the OS on the cached link state.
   4373  *       The real check of the hardware only happens with
   4374  *       a link interrupt.
   4375  ************************************************************************/
   4376 static void
   4377 ixgbe_update_link_status(struct adapter *adapter)
   4378 {
   4379 	struct ifnet	*ifp = adapter->ifp;
   4380 	device_t        dev = adapter->dev;
   4381 	struct ixgbe_hw *hw = &adapter->hw;
   4382 
   4383 	if (adapter->link_up) {
   4384 		if (adapter->link_active == FALSE) {
   4385 			if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
   4386 				/*
   4387 				 *  Discard count for both MAC Local Fault and
   4388 				 * Remote Fault because those registers are
   4389 				 * valid only when the link speed is up and
   4390 				 * 10Gbps.
   4391 				 */
   4392 				IXGBE_READ_REG(hw, IXGBE_MLFC);
   4393 				IXGBE_READ_REG(hw, IXGBE_MRFC);
   4394 			}
   4395 
   4396 			if (bootverbose) {
   4397 				const char *bpsmsg;
   4398 
   4399 				switch (adapter->link_speed) {
   4400 				case IXGBE_LINK_SPEED_10GB_FULL:
   4401 					bpsmsg = "10 Gbps";
   4402 					break;
   4403 				case IXGBE_LINK_SPEED_5GB_FULL:
   4404 					bpsmsg = "5 Gbps";
   4405 					break;
   4406 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   4407 					bpsmsg = "2.5 Gbps";
   4408 					break;
   4409 				case IXGBE_LINK_SPEED_1GB_FULL:
   4410 					bpsmsg = "1 Gbps";
   4411 					break;
   4412 				case IXGBE_LINK_SPEED_100_FULL:
   4413 					bpsmsg = "100 Mbps";
   4414 					break;
   4415 				case IXGBE_LINK_SPEED_10_FULL:
   4416 					bpsmsg = "10 Mbps";
   4417 					break;
   4418 				default:
   4419 					bpsmsg = "unknown speed";
   4420 					break;
   4421 				}
   4422 				device_printf(dev, "Link is up %s %s \n",
   4423 				    bpsmsg, "Full Duplex");
   4424 			}
   4425 			adapter->link_active = TRUE;
   4426 			/* Update any Flow Control changes */
   4427 			ixgbe_fc_enable(&adapter->hw);
   4428 			/* Update DMA coalescing config */
   4429 			ixgbe_config_dmac(adapter);
   4430 			if_link_state_change(ifp, LINK_STATE_UP);
   4431 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4432 				ixgbe_ping_all_vfs(adapter);
   4433 		}
   4434 	} else { /* Link down */
   4435 		if (adapter->link_active == TRUE) {
   4436 			if (bootverbose)
   4437 				device_printf(dev, "Link is Down\n");
   4438 			if_link_state_change(ifp, LINK_STATE_DOWN);
   4439 			adapter->link_active = FALSE;
   4440 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4441 				ixgbe_ping_all_vfs(adapter);
   4442 		}
   4443 	}
   4444 
   4445 	return;
   4446 } /* ixgbe_update_link_status */
   4447 
   4448 /************************************************************************
   4449  * ixgbe_config_dmac - Configure DMA Coalescing
   4450  ************************************************************************/
   4451 static void
   4452 ixgbe_config_dmac(struct adapter *adapter)
   4453 {
   4454 	struct ixgbe_hw *hw = &adapter->hw;
   4455 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
   4456 
   4457 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
   4458 		return;
   4459 
   4460 	if (dcfg->watchdog_timer ^ adapter->dmac ||
   4461 	    dcfg->link_speed ^ adapter->link_speed) {
   4462 		dcfg->watchdog_timer = adapter->dmac;
   4463 		dcfg->fcoe_en = false;
   4464 		dcfg->link_speed = adapter->link_speed;
   4465 		dcfg->num_tcs = 1;
   4466 
   4467 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
   4468 		    dcfg->watchdog_timer, dcfg->link_speed);
   4469 
   4470 		hw->mac.ops.dmac_config(hw);
   4471 	}
   4472 } /* ixgbe_config_dmac */
   4473 
   4474 /************************************************************************
   4475  * ixgbe_enable_intr
   4476  ************************************************************************/
   4477 static void
   4478 ixgbe_enable_intr(struct adapter *adapter)
   4479 {
   4480 	struct ixgbe_hw	*hw = &adapter->hw;
   4481 	struct ix_queue	*que = adapter->queues;
   4482 	u32		mask, fwsm;
   4483 
   4484 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4485 
   4486 	switch (adapter->hw.mac.type) {
   4487 	case ixgbe_mac_82599EB:
   4488 		mask |= IXGBE_EIMS_ECC;
   4489 		/* Temperature sensor on some adapters */
   4490 		mask |= IXGBE_EIMS_GPI_SDP0;
   4491 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
   4492 		mask |= IXGBE_EIMS_GPI_SDP1;
   4493 		mask |= IXGBE_EIMS_GPI_SDP2;
   4494 		break;
   4495 	case ixgbe_mac_X540:
   4496 		/* Detect if Thermal Sensor is enabled */
   4497 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
   4498 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
   4499 			mask |= IXGBE_EIMS_TS;
   4500 		mask |= IXGBE_EIMS_ECC;
   4501 		break;
   4502 	case ixgbe_mac_X550:
   4503 		/* MAC thermal sensor is automatically enabled */
   4504 		mask |= IXGBE_EIMS_TS;
   4505 		mask |= IXGBE_EIMS_ECC;
   4506 		break;
   4507 	case ixgbe_mac_X550EM_x:
   4508 	case ixgbe_mac_X550EM_a:
   4509 		/* Some devices use SDP0 for important information */
   4510 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
   4511 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
   4512 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
   4513 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
   4514 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
   4515 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
   4516 			mask |= IXGBE_EICR_GPI_SDP0_X540;
   4517 		mask |= IXGBE_EIMS_ECC;
   4518 		break;
   4519 	default:
   4520 		break;
   4521 	}
   4522 
   4523 	/* Enable Fan Failure detection */
   4524 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4525 		mask |= IXGBE_EIMS_GPI_SDP1;
   4526 	/* Enable SR-IOV */
   4527 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4528 		mask |= IXGBE_EIMS_MAILBOX;
   4529 	/* Enable Flow Director */
   4530 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   4531 		mask |= IXGBE_EIMS_FLOW_DIR;
   4532 
   4533 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4534 
   4535 	/* With MSI-X we use auto clear */
   4536 	if (adapter->msix_mem) {
   4537 		mask = IXGBE_EIMS_ENABLE_MASK;
   4538 		/* Don't autoclear Link */
   4539 		mask &= ~IXGBE_EIMS_OTHER;
   4540 		mask &= ~IXGBE_EIMS_LSC;
   4541 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   4542 			mask &= ~IXGBE_EIMS_MAILBOX;
   4543 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4544 	}
   4545 
   4546 	/*
   4547 	 * Now enable all queues, this is done separately to
   4548 	 * allow for handling the extended (beyond 32) MSI-X
   4549 	 * vectors that can be used by 82599
   4550 	 */
   4551         for (int i = 0; i < adapter->num_queues; i++, que++)
   4552                 ixgbe_enable_queue(adapter, que->msix);
   4553 
   4554 	IXGBE_WRITE_FLUSH(hw);
   4555 
   4556 	return;
   4557 } /* ixgbe_enable_intr */
   4558 
   4559 /************************************************************************
   4560  * ixgbe_disable_intr
   4561  ************************************************************************/
   4562 static void
   4563 ixgbe_disable_intr(struct adapter *adapter)
   4564 {
   4565 	if (adapter->msix_mem)
   4566 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4567 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
   4568 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
   4569 	} else {
   4570 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
   4571 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
   4572 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
   4573 	}
   4574 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4575 
   4576 	return;
   4577 } /* ixgbe_disable_intr */
   4578 
   4579 /************************************************************************
   4580  * ixgbe_legacy_irq - Legacy Interrupt Service routine
   4581  ************************************************************************/
   4582 static int
   4583 ixgbe_legacy_irq(void *arg)
   4584 {
   4585 	struct ix_queue *que = arg;
   4586 	struct adapter	*adapter = que->adapter;
   4587 	struct ixgbe_hw	*hw = &adapter->hw;
   4588 	struct ifnet    *ifp = adapter->ifp;
   4589 	struct 		tx_ring *txr = adapter->tx_rings;
   4590 	bool		more = false;
   4591 	u32             eicr, eicr_mask;
   4592 
   4593 	/* Silicon errata #26 on 82598 */
   4594 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
   4595 
   4596 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   4597 
   4598 	adapter->stats.pf.legint.ev_count++;
   4599 	++que->irqs.ev_count;
   4600 	if (eicr == 0) {
   4601 		adapter->stats.pf.intzero.ev_count++;
   4602 		if ((ifp->if_flags & IFF_UP) != 0)
   4603 			ixgbe_enable_intr(adapter);
   4604 		return 0;
   4605 	}
   4606 
   4607 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   4608 #ifdef __NetBSD__
   4609 		/* Don't run ixgbe_rxeof in interrupt context */
   4610 		more = true;
   4611 #else
   4612 		more = ixgbe_rxeof(que);
   4613 #endif
   4614 
   4615 		IXGBE_TX_LOCK(txr);
   4616 		ixgbe_txeof(txr);
   4617 #ifdef notyet
   4618 		if (!ixgbe_ring_empty(ifp, txr->br))
   4619 			ixgbe_start_locked(ifp, txr);
   4620 #endif
   4621 		IXGBE_TX_UNLOCK(txr);
   4622 	}
   4623 
   4624 	/* Check for fan failure */
   4625 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   4626 		ixgbe_check_fan_failure(adapter, eicr, true);
   4627 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4628 	}
   4629 
   4630 	/* Link status change */
   4631 	if (eicr & IXGBE_EICR_LSC)
   4632 		softint_schedule(adapter->link_si);
   4633 
   4634 	if (ixgbe_is_sfp(hw)) {
   4635 		/* Pluggable optics-related interrupt */
   4636 		if (hw->mac.type >= ixgbe_mac_X540)
   4637 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   4638 		else
   4639 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   4640 
   4641 		if (eicr & eicr_mask) {
   4642 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   4643 			softint_schedule(adapter->mod_si);
   4644 		}
   4645 
   4646 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   4647 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   4648 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   4649 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4650 			softint_schedule(adapter->msf_si);
   4651 		}
   4652 	}
   4653 
   4654 	/* External PHY interrupt */
   4655 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   4656 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
   4657 		softint_schedule(adapter->phy_si);
   4658 
   4659 	if (more)
   4660 		softint_schedule(que->que_si);
   4661 	else
   4662 		ixgbe_enable_intr(adapter);
   4663 
   4664 	return 1;
   4665 } /* ixgbe_legacy_irq */
   4666 
   4667 /************************************************************************
   4668  * ixgbe_free_pciintr_resources
   4669  ************************************************************************/
   4670 static void
   4671 ixgbe_free_pciintr_resources(struct adapter *adapter)
   4672 {
   4673 	struct ix_queue *que = adapter->queues;
   4674 	int		rid;
   4675 
   4676 	/*
   4677 	 * Release all msix queue resources:
   4678 	 */
   4679 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4680 		if (que->res != NULL) {
   4681 			pci_intr_disestablish(adapter->osdep.pc,
   4682 			    adapter->osdep.ihs[i]);
   4683 			adapter->osdep.ihs[i] = NULL;
   4684 		}
   4685 	}
   4686 
   4687 	/* Clean the Legacy or Link interrupt last */
   4688 	if (adapter->vector) /* we are doing MSIX */
   4689 		rid = adapter->vector;
   4690 	else
   4691 		rid = 0;
   4692 
   4693 	if (adapter->osdep.ihs[rid] != NULL) {
   4694 		pci_intr_disestablish(adapter->osdep.pc,
   4695 		    adapter->osdep.ihs[rid]);
   4696 		adapter->osdep.ihs[rid] = NULL;
   4697 	}
   4698 
   4699 	if (adapter->osdep.intrs != NULL) {
   4700 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   4701 		    adapter->osdep.nintrs);
   4702 		adapter->osdep.intrs = NULL;
   4703 	}
   4704 
   4705 	return;
   4706 } /* ixgbe_free_pciintr_resources */
   4707 
   4708 /************************************************************************
   4709  * ixgbe_free_pci_resources
   4710  ************************************************************************/
   4711 static void
   4712 ixgbe_free_pci_resources(struct adapter *adapter)
   4713 {
   4714 
   4715 	ixgbe_free_pciintr_resources(adapter);
   4716 
   4717 	if (adapter->osdep.mem_size != 0) {
   4718 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   4719 		    adapter->osdep.mem_bus_space_handle,
   4720 		    adapter->osdep.mem_size);
   4721 	}
   4722 
   4723 	return;
   4724 } /* ixgbe_free_pci_resources */
   4725 
   4726 /************************************************************************
   4727  * ixgbe_set_sysctl_value
   4728  ************************************************************************/
   4729 static void
   4730 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
   4731     const char *description, int *limit, int value)
   4732 {
   4733 	device_t dev =  adapter->dev;
   4734 	struct sysctllog **log;
   4735 	const struct sysctlnode *rnode, *cnode;
   4736 
   4737 	log = &adapter->sysctllog;
   4738 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   4739 		aprint_error_dev(dev, "could not create sysctl root\n");
   4740 		return;
   4741 	}
   4742 	if (sysctl_createv(log, 0, &rnode, &cnode,
   4743 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   4744 	    name, SYSCTL_DESCR(description),
   4745 		NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   4746 		aprint_error_dev(dev, "could not create sysctl\n");
   4747 	*limit = value;
   4748 } /* ixgbe_set_sysctl_value */
   4749 
   4750 /************************************************************************
   4751  * ixgbe_sysctl_flowcntl
   4752  *
   4753  *   SYSCTL wrapper around setting Flow Control
   4754  ************************************************************************/
   4755 static int
   4756 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
   4757 {
   4758 	struct sysctlnode node = *rnode;
   4759 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4760 	int error, fc;
   4761 
   4762 	fc = adapter->hw.fc.current_mode;
   4763 	node.sysctl_data = &fc;
   4764 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4765 	if (error != 0 || newp == NULL)
   4766 		return error;
   4767 
   4768 	/* Don't bother if it's not changed */
   4769 	if (fc == adapter->hw.fc.current_mode)
   4770 		return (0);
   4771 
   4772 	return ixgbe_set_flowcntl(adapter, fc);
   4773 } /* ixgbe_sysctl_flowcntl */
   4774 
   4775 /************************************************************************
   4776  * ixgbe_set_flowcntl - Set flow control
   4777  *
   4778  *   Flow control values:
   4779  *     0 - off
   4780  *     1 - rx pause
   4781  *     2 - tx pause
   4782  *     3 - full
   4783  ************************************************************************/
   4784 static int
   4785 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
   4786 {
   4787 	switch (fc) {
   4788 		case ixgbe_fc_rx_pause:
   4789 		case ixgbe_fc_tx_pause:
   4790 		case ixgbe_fc_full:
   4791 			adapter->hw.fc.requested_mode = fc;
   4792 			if (adapter->num_queues > 1)
   4793 				ixgbe_disable_rx_drop(adapter);
   4794 			break;
   4795 		case ixgbe_fc_none:
   4796 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   4797 			if (adapter->num_queues > 1)
   4798 				ixgbe_enable_rx_drop(adapter);
   4799 			break;
   4800 		default:
   4801 			return (EINVAL);
   4802 	}
   4803 
   4804 #if 0 /* XXX NetBSD */
   4805 	/* Don't autoneg if forcing a value */
   4806 	adapter->hw.fc.disable_fc_autoneg = TRUE;
   4807 #endif
   4808 	ixgbe_fc_enable(&adapter->hw);
   4809 
   4810 	return (0);
   4811 } /* ixgbe_set_flowcntl */
   4812 
   4813 /************************************************************************
   4814  * ixgbe_enable_rx_drop
   4815  *
   4816  *   Enable the hardware to drop packets when the buffer is
   4817  *   full. This is useful with multiqueue, so that no single
   4818  *   queue being full stalls the entire RX engine. We only
   4819  *   enable this when Multiqueue is enabled AND Flow Control
   4820  *   is disabled.
   4821  ************************************************************************/
   4822 static void
   4823 ixgbe_enable_rx_drop(struct adapter *adapter)
   4824 {
   4825 	struct ixgbe_hw *hw = &adapter->hw;
   4826 	struct rx_ring  *rxr;
   4827 	u32             srrctl;
   4828 
   4829 	for (int i = 0; i < adapter->num_queues; i++) {
   4830 		rxr = &adapter->rx_rings[i];
   4831 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4832 		srrctl |= IXGBE_SRRCTL_DROP_EN;
   4833 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4834 	}
   4835 
   4836 	/* enable drop for each vf */
   4837 	for (int i = 0; i < adapter->num_vfs; i++) {
   4838 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4839 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
   4840 		    IXGBE_QDE_ENABLE));
   4841 	}
   4842 } /* ixgbe_enable_rx_drop */
   4843 
   4844 /************************************************************************
   4845  * ixgbe_disable_rx_drop
   4846  ************************************************************************/
   4847 static void
   4848 ixgbe_disable_rx_drop(struct adapter *adapter)
   4849 {
   4850 	struct ixgbe_hw *hw = &adapter->hw;
   4851 	struct rx_ring  *rxr;
   4852 	u32             srrctl;
   4853 
   4854 	for (int i = 0; i < adapter->num_queues; i++) {
   4855 		rxr = &adapter->rx_rings[i];
   4856         	srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4857         	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
   4858         	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4859 	}
   4860 
   4861 	/* disable drop for each vf */
   4862 	for (int i = 0; i < adapter->num_vfs; i++) {
   4863 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4864 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
   4865 	}
   4866 } /* ixgbe_disable_rx_drop */
   4867 
   4868 /************************************************************************
   4869  * ixgbe_sysctl_advertise
   4870  *
   4871  *   SYSCTL wrapper around setting advertised speed
   4872  ************************************************************************/
   4873 static int
   4874 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
   4875 {
   4876 	struct sysctlnode node = *rnode;
   4877 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4878 	int            error = 0, advertise;
   4879 
   4880 	advertise = adapter->advertise;
   4881 	node.sysctl_data = &advertise;
   4882 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4883 	if (error != 0 || newp == NULL)
   4884 		return error;
   4885 
   4886 	return ixgbe_set_advertise(adapter, advertise);
   4887 } /* ixgbe_sysctl_advertise */
   4888 
   4889 /************************************************************************
   4890  * ixgbe_set_advertise - Control advertised link speed
   4891  *
   4892  *   Flags:
   4893  *     0x00 - Default (all capable link speed)
   4894  *     0x01 - advertise 100 Mb
   4895  *     0x02 - advertise 1G
   4896  *     0x04 - advertise 10G
   4897  *     0x08 - advertise 10 Mb
   4898  *     0x10 - advertise 2.5G
   4899  *     0x20 - advertise 5G
   4900  ************************************************************************/
   4901 static int
   4902 ixgbe_set_advertise(struct adapter *adapter, int advertise)
   4903 {
   4904 	device_t         dev;
   4905 	struct ixgbe_hw  *hw;
   4906 	ixgbe_link_speed speed = 0;
   4907 	ixgbe_link_speed link_caps = 0;
   4908 	s32              err = IXGBE_NOT_IMPLEMENTED;
   4909 	bool             negotiate = FALSE;
   4910 
   4911 	/* Checks to validate new value */
   4912 	if (adapter->advertise == advertise) /* no change */
   4913 		return (0);
   4914 
   4915 	dev = adapter->dev;
   4916 	hw = &adapter->hw;
   4917 
   4918 	/* No speed changes for backplane media */
   4919 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   4920 		return (ENODEV);
   4921 
   4922 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   4923 	    (hw->phy.multispeed_fiber))) {
   4924 		device_printf(dev,
   4925 		    "Advertised speed can only be set on copper or "
   4926 		    "multispeed fiber media types.\n");
   4927 		return (EINVAL);
   4928 	}
   4929 
   4930 	if (advertise < 0x0 || advertise > 0x2f) {
   4931 		device_printf(dev,
   4932 		    "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
   4933 		return (EINVAL);
   4934 	}
   4935 
   4936 	if (hw->mac.ops.get_link_capabilities) {
   4937 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   4938 		    &negotiate);
   4939 		if (err != IXGBE_SUCCESS) {
   4940 			device_printf(dev, "Unable to determine supported advertise speeds\n");
   4941 			return (ENODEV);
   4942 		}
   4943 	}
   4944 
   4945 	/* Set new value and report new advertised mode */
   4946 	if (advertise & 0x1) {
   4947 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
   4948 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
   4949 			return (EINVAL);
   4950 		}
   4951 		speed |= IXGBE_LINK_SPEED_100_FULL;
   4952 	}
   4953 	if (advertise & 0x2) {
   4954 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
   4955 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
   4956 			return (EINVAL);
   4957 		}
   4958 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   4959 	}
   4960 	if (advertise & 0x4) {
   4961 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
   4962 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
   4963 			return (EINVAL);
   4964 		}
   4965 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   4966 	}
   4967 	if (advertise & 0x8) {
   4968 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
   4969 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
   4970 			return (EINVAL);
   4971 		}
   4972 		speed |= IXGBE_LINK_SPEED_10_FULL;
   4973 	}
   4974 	if (advertise & 0x10) {
   4975 		if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
   4976 			device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
   4977 			return (EINVAL);
   4978 		}
   4979 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   4980 	}
   4981 	if (advertise & 0x20) {
   4982 		if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
   4983 			device_printf(dev, "Interface does not support 5Gb advertised speed\n");
   4984 			return (EINVAL);
   4985 		}
   4986 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   4987 	}
   4988 	if (advertise == 0)
   4989 		speed = link_caps; /* All capable link speed */
   4990 
   4991 	hw->mac.autotry_restart = TRUE;
   4992 	hw->mac.ops.setup_link(hw, speed, TRUE);
   4993 	adapter->advertise = advertise;
   4994 
   4995 	return (0);
   4996 } /* ixgbe_set_advertise */
   4997 
   4998 /************************************************************************
   4999  * ixgbe_get_advertise - Get current advertised speed settings
   5000  *
   5001  *   Formatted for sysctl usage.
   5002  *   Flags:
   5003  *     0x01 - advertise 100 Mb
   5004  *     0x02 - advertise 1G
   5005  *     0x04 - advertise 10G
   5006  *     0x08 - advertise 10 Mb (yes, Mb)
   5007  *     0x10 - advertise 2.5G
   5008  *     0x20 - advertise 5G
   5009  ************************************************************************/
   5010 static int
   5011 ixgbe_get_advertise(struct adapter *adapter)
   5012 {
   5013 	struct ixgbe_hw  *hw = &adapter->hw;
   5014 	int              speed;
   5015 	ixgbe_link_speed link_caps = 0;
   5016 	s32              err;
   5017 	bool             negotiate = FALSE;
   5018 
   5019 	/*
   5020 	 * Advertised speed means nothing unless it's copper or
   5021 	 * multi-speed fiber
   5022 	 */
   5023 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
   5024 	    !(hw->phy.multispeed_fiber))
   5025 		return (0);
   5026 
   5027 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
   5028 	if (err != IXGBE_SUCCESS)
   5029 		return (0);
   5030 
   5031 	speed =
   5032 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x04 : 0) |
   5033 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x02 : 0) |
   5034 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)   ? 0x01 : 0) |
   5035 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)    ? 0x08 : 0) |
   5036 	    ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
   5037 	    ((link_caps & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0);
   5038 
   5039 	return speed;
   5040 } /* ixgbe_get_advertise */
   5041 
   5042 /************************************************************************
   5043  * ixgbe_sysctl_dmac - Manage DMA Coalescing
   5044  *
   5045  *   Control values:
   5046  *     0/1 - off / on (use default value of 1000)
   5047  *
   5048  *     Legal timer values are:
   5049  *     50,100,250,500,1000,2000,5000,10000
   5050  *
   5051  *     Turning off interrupt moderation will also turn this off.
   5052  ************************************************************************/
   5053 static int
   5054 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
   5055 {
   5056 	struct sysctlnode node = *rnode;
   5057 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5058 	struct ifnet   *ifp = adapter->ifp;
   5059 	int            error;
   5060 	int            newval;
   5061 
   5062 	newval = adapter->dmac;
   5063 	node.sysctl_data = &newval;
   5064 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5065 	if ((error) || (newp == NULL))
   5066 		return (error);
   5067 
   5068 	switch (newval) {
   5069 	case 0:
   5070 		/* Disabled */
   5071 		adapter->dmac = 0;
   5072 		break;
   5073 	case 1:
   5074 		/* Enable and use default */
   5075 		adapter->dmac = 1000;
   5076 		break;
   5077 	case 50:
   5078 	case 100:
   5079 	case 250:
   5080 	case 500:
   5081 	case 1000:
   5082 	case 2000:
   5083 	case 5000:
   5084 	case 10000:
   5085 		/* Legal values - allow */
   5086 		adapter->dmac = newval;
   5087 		break;
   5088 	default:
   5089 		/* Do nothing, illegal value */
   5090 		return (EINVAL);
   5091 	}
   5092 
   5093 	/* Re-initialize hardware if it's already running */
   5094 	if (ifp->if_flags & IFF_RUNNING)
   5095 		ixgbe_init(ifp);
   5096 
   5097 	return (0);
   5098 }
   5099 
   5100 #ifdef IXGBE_DEBUG
   5101 /************************************************************************
   5102  * ixgbe_sysctl_power_state
   5103  *
   5104  *   Sysctl to test power states
   5105  *   Values:
   5106  *     0      - set device to D0
   5107  *     3      - set device to D3
   5108  *     (none) - get current device power state
   5109  ************************************************************************/
   5110 static int
   5111 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
   5112 {
   5113 #ifdef notyet
   5114 	struct sysctlnode node = *rnode;
   5115 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5116 	device_t       dev =  adapter->dev;
   5117 	int            curr_ps, new_ps, error = 0;
   5118 
   5119 	curr_ps = new_ps = pci_get_powerstate(dev);
   5120 
   5121 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5122 	if ((error) || (req->newp == NULL))
   5123 		return (error);
   5124 
   5125 	if (new_ps == curr_ps)
   5126 		return (0);
   5127 
   5128 	if (new_ps == 3 && curr_ps == 0)
   5129 		error = DEVICE_SUSPEND(dev);
   5130 	else if (new_ps == 0 && curr_ps == 3)
   5131 		error = DEVICE_RESUME(dev);
   5132 	else
   5133 		return (EINVAL);
   5134 
   5135 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
   5136 
   5137 	return (error);
   5138 #else
   5139 	return 0;
   5140 #endif
   5141 } /* ixgbe_sysctl_power_state */
   5142 #endif
   5143 
   5144 /************************************************************************
   5145  * ixgbe_sysctl_wol_enable
   5146  *
   5147  *   Sysctl to enable/disable the WoL capability,
   5148  *   if supported by the adapter.
   5149  *
   5150  *   Values:
   5151  *     0 - disabled
   5152  *     1 - enabled
   5153  ************************************************************************/
   5154 static int
   5155 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
   5156 {
   5157 	struct sysctlnode node = *rnode;
   5158 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5159 	struct ixgbe_hw *hw = &adapter->hw;
   5160 	bool            new_wol_enabled;
   5161 	int             error = 0;
   5162 
   5163 	new_wol_enabled = hw->wol_enabled;
   5164 	node.sysctl_data = &new_wol_enabled;
   5165 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5166 	if ((error) || (newp == NULL))
   5167 		return (error);
   5168 	if (new_wol_enabled == hw->wol_enabled)
   5169 		return (0);
   5170 
   5171 	if (new_wol_enabled && !adapter->wol_support)
   5172 		return (ENODEV);
   5173 	else
   5174 		hw->wol_enabled = new_wol_enabled;
   5175 
   5176 	return (0);
   5177 } /* ixgbe_sysctl_wol_enable */
   5178 
   5179 /************************************************************************
   5180  * ixgbe_sysctl_wufc - Wake Up Filter Control
   5181  *
   5182  *   Sysctl to enable/disable the types of packets that the
   5183  *   adapter will wake up on upon receipt.
   5184  *   Flags:
   5185  *     0x1  - Link Status Change
   5186  *     0x2  - Magic Packet
   5187  *     0x4  - Direct Exact
   5188  *     0x8  - Directed Multicast
   5189  *     0x10 - Broadcast
   5190  *     0x20 - ARP/IPv4 Request Packet
   5191  *     0x40 - Direct IPv4 Packet
   5192  *     0x80 - Direct IPv6 Packet
   5193  *
   5194  *   Settings not listed above will cause the sysctl to return an error.
   5195  ************************************************************************/
   5196 static int
   5197 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
   5198 {
   5199 	struct sysctlnode node = *rnode;
   5200 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5201 	int error = 0;
   5202 	u32 new_wufc;
   5203 
   5204 	new_wufc = adapter->wufc;
   5205 	node.sysctl_data = &new_wufc;
   5206 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5207 	if ((error) || (newp == NULL))
   5208 		return (error);
   5209 	if (new_wufc == adapter->wufc)
   5210 		return (0);
   5211 
   5212 	if (new_wufc & 0xffffff00)
   5213 		return (EINVAL);
   5214 
   5215 	new_wufc &= 0xff;
   5216 	new_wufc |= (0xffffff & adapter->wufc);
   5217 	adapter->wufc = new_wufc;
   5218 
   5219 	return (0);
   5220 } /* ixgbe_sysctl_wufc */
   5221 
   5222 #ifdef IXGBE_DEBUG
   5223 /************************************************************************
   5224  * ixgbe_sysctl_print_rss_config
   5225  ************************************************************************/
   5226 static int
   5227 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
   5228 {
   5229 #ifdef notyet
   5230 	struct sysctlnode node = *rnode;
   5231 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5232 	struct ixgbe_hw *hw = &adapter->hw;
   5233 	device_t        dev = adapter->dev;
   5234 	struct sbuf     *buf;
   5235 	int             error = 0, reta_size;
   5236 	u32             reg;
   5237 
   5238 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
   5239 	if (!buf) {
   5240 		device_printf(dev, "Could not allocate sbuf for output.\n");
   5241 		return (ENOMEM);
   5242 	}
   5243 
   5244 	// TODO: use sbufs to make a string to print out
   5245 	/* Set multiplier for RETA setup and table size based on MAC */
   5246 	switch (adapter->hw.mac.type) {
   5247 	case ixgbe_mac_X550:
   5248 	case ixgbe_mac_X550EM_x:
   5249 	case ixgbe_mac_X550EM_a:
   5250 		reta_size = 128;
   5251 		break;
   5252 	default:
   5253 		reta_size = 32;
   5254 		break;
   5255 	}
   5256 
   5257 	/* Print out the redirection table */
   5258 	sbuf_cat(buf, "\n");
   5259 	for (int i = 0; i < reta_size; i++) {
   5260 		if (i < 32) {
   5261 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
   5262 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
   5263 		} else {
   5264 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
   5265 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
   5266 		}
   5267 	}
   5268 
   5269 	// TODO: print more config
   5270 
   5271 	error = sbuf_finish(buf);
   5272 	if (error)
   5273 		device_printf(dev, "Error finishing sbuf: %d\n", error);
   5274 
   5275 	sbuf_delete(buf);
   5276 #endif
   5277 	return (0);
   5278 } /* ixgbe_sysctl_print_rss_config */
   5279 #endif /* IXGBE_DEBUG */
   5280 
   5281 /************************************************************************
   5282  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
   5283  *
   5284  *   For X552/X557-AT devices using an external PHY
   5285  ************************************************************************/
   5286 static int
   5287 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
   5288 {
   5289 	struct sysctlnode node = *rnode;
   5290 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5291 	struct ixgbe_hw *hw = &adapter->hw;
   5292 	int val;
   5293 	u16 reg;
   5294 	int		error;
   5295 
   5296 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5297 		device_printf(adapter->dev,
   5298 		    "Device has no supported external thermal sensor.\n");
   5299 		return (ENODEV);
   5300 	}
   5301 
   5302 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
   5303 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5304 		device_printf(adapter->dev,
   5305 		    "Error reading from PHY's current temperature register\n");
   5306 		return (EAGAIN);
   5307 	}
   5308 
   5309 	node.sysctl_data = &val;
   5310 
   5311 	/* Shift temp for output */
   5312 	val = reg >> 8;
   5313 
   5314 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5315 	if ((error) || (newp == NULL))
   5316 		return (error);
   5317 
   5318 	return (0);
   5319 } /* ixgbe_sysctl_phy_temp */
   5320 
   5321 /************************************************************************
   5322  * ixgbe_sysctl_phy_overtemp_occurred
   5323  *
   5324  *   Reports (directly from the PHY) whether the current PHY
   5325  *   temperature is over the overtemp threshold.
   5326  ************************************************************************/
   5327 static int
   5328 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
   5329 {
   5330 	struct sysctlnode node = *rnode;
   5331 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5332 	struct ixgbe_hw *hw = &adapter->hw;
   5333 	int val, error;
   5334 	u16 reg;
   5335 
   5336 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5337 		device_printf(adapter->dev,
   5338 		    "Device has no supported external thermal sensor.\n");
   5339 		return (ENODEV);
   5340 	}
   5341 
   5342 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
   5343 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5344 		device_printf(adapter->dev,
   5345 		    "Error reading from PHY's temperature status register\n");
   5346 		return (EAGAIN);
   5347 	}
   5348 
   5349 	node.sysctl_data = &val;
   5350 
   5351 	/* Get occurrence bit */
   5352 	val = !!(reg & 0x4000);
   5353 
   5354 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5355 	if ((error) || (newp == NULL))
   5356 		return (error);
   5357 
   5358 	return (0);
   5359 } /* ixgbe_sysctl_phy_overtemp_occurred */
   5360 
   5361 /************************************************************************
   5362  * ixgbe_sysctl_eee_state
   5363  *
   5364  *   Sysctl to set EEE power saving feature
   5365  *   Values:
   5366  *     0      - disable EEE
   5367  *     1      - enable EEE
   5368  *     (none) - get current device EEE state
   5369  ************************************************************************/
   5370 static int
   5371 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
   5372 {
   5373 	struct sysctlnode node = *rnode;
   5374 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5375 	struct ifnet   *ifp = adapter->ifp;
   5376 	device_t       dev = adapter->dev;
   5377 	int            curr_eee, new_eee, error = 0;
   5378 	s32            retval;
   5379 
   5380 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
   5381 	node.sysctl_data = &new_eee;
   5382 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5383 	if ((error) || (newp == NULL))
   5384 		return (error);
   5385 
   5386 	/* Nothing to do */
   5387 	if (new_eee == curr_eee)
   5388 		return (0);
   5389 
   5390 	/* Not supported */
   5391 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
   5392 		return (EINVAL);
   5393 
   5394 	/* Bounds checking */
   5395 	if ((new_eee < 0) || (new_eee > 1))
   5396 		return (EINVAL);
   5397 
   5398 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
   5399 	if (retval) {
   5400 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
   5401 		return (EINVAL);
   5402 	}
   5403 
   5404 	/* Restart auto-neg */
   5405 	ixgbe_init(ifp);
   5406 
   5407 	device_printf(dev, "New EEE state: %d\n", new_eee);
   5408 
   5409 	/* Cache new value */
   5410 	if (new_eee)
   5411 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5412 	else
   5413 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
   5414 
   5415 	return (error);
   5416 } /* ixgbe_sysctl_eee_state */
   5417 
   5418 /************************************************************************
   5419  * ixgbe_init_device_features
   5420  ************************************************************************/
   5421 static void
   5422 ixgbe_init_device_features(struct adapter *adapter)
   5423 {
   5424 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   5425 	                  | IXGBE_FEATURE_RSS
   5426 	                  | IXGBE_FEATURE_MSI
   5427 	                  | IXGBE_FEATURE_MSIX
   5428 	                  | IXGBE_FEATURE_LEGACY_IRQ
   5429 	                  | IXGBE_FEATURE_LEGACY_TX;
   5430 
   5431 	/* Set capabilities first... */
   5432 	switch (adapter->hw.mac.type) {
   5433 	case ixgbe_mac_82598EB:
   5434 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
   5435 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
   5436 		break;
   5437 	case ixgbe_mac_X540:
   5438 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5439 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5440 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
   5441 		    (adapter->hw.bus.func == 0))
   5442 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5443 		break;
   5444 	case ixgbe_mac_X550:
   5445 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5446 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5447 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5448 		break;
   5449 	case ixgbe_mac_X550EM_x:
   5450 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5451 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5452 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
   5453 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5454 		break;
   5455 	case ixgbe_mac_X550EM_a:
   5456 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5457 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5458 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5459 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
   5460 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
   5461 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5462 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5463 		}
   5464 		break;
   5465 	case ixgbe_mac_82599EB:
   5466 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5467 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5468 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
   5469 		    (adapter->hw.bus.func == 0))
   5470 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5471 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
   5472 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5473 		break;
   5474 	default:
   5475 		break;
   5476 	}
   5477 
   5478 	/* Enabled by default... */
   5479 	/* Fan failure detection */
   5480 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
   5481 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
   5482 	/* Netmap */
   5483 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   5484 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   5485 	/* EEE */
   5486 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   5487 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5488 	/* Thermal Sensor */
   5489 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
   5490 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
   5491 
   5492 	/* Enabled via global sysctl... */
   5493 	/* Flow Director */
   5494 	if (ixgbe_enable_fdir) {
   5495 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
   5496 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
   5497 		else
   5498 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
   5499 	}
   5500 	/* Legacy (single queue) transmit */
   5501 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   5502 	    ixgbe_enable_legacy_tx)
   5503 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   5504 	/*
   5505 	 * Message Signal Interrupts - Extended (MSI-X)
   5506 	 * Normal MSI is only enabled if MSI-X calls fail.
   5507 	 */
   5508 	if (!ixgbe_enable_msix)
   5509 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
   5510 	/* Receive-Side Scaling (RSS) */
   5511 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
   5512 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   5513 
   5514 	/* Disable features with unmet dependencies... */
   5515 	/* No MSI-X */
   5516 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
   5517 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   5518 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   5519 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
   5520 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
   5521 	}
   5522 } /* ixgbe_init_device_features */
   5523 
   5524 /************************************************************************
   5525  * ixgbe_probe - Device identification routine
   5526  *
   5527  *   Determines if the driver should be loaded on
   5528  *   adapter based on its PCI vendor/device ID.
   5529  *
   5530  *   return BUS_PROBE_DEFAULT on success, positive on failure
   5531  ************************************************************************/
   5532 static int
   5533 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
   5534 {
   5535 	const struct pci_attach_args *pa = aux;
   5536 
   5537 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
   5538 }
   5539 
   5540 static ixgbe_vendor_info_t *
   5541 ixgbe_lookup(const struct pci_attach_args *pa)
   5542 {
   5543 	ixgbe_vendor_info_t *ent;
   5544 	pcireg_t subid;
   5545 
   5546 	INIT_DEBUGOUT("ixgbe_lookup: begin");
   5547 
   5548 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
   5549 		return NULL;
   5550 
   5551 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
   5552 
   5553 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
   5554 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
   5555 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
   5556 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
   5557 			(ent->subvendor_id == 0)) &&
   5558 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
   5559 			(ent->subdevice_id == 0))) {
   5560 			++ixgbe_total_ports;
   5561 			return ent;
   5562 		}
   5563 	}
   5564 	return NULL;
   5565 }
   5566 
   5567 static int
   5568 ixgbe_ifflags_cb(struct ethercom *ec)
   5569 {
   5570 	struct ifnet *ifp = &ec->ec_if;
   5571 	struct adapter *adapter = ifp->if_softc;
   5572 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   5573 
   5574 	IXGBE_CORE_LOCK(adapter);
   5575 
   5576 	if (change != 0)
   5577 		adapter->if_flags = ifp->if_flags;
   5578 
   5579 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   5580 		rc = ENETRESET;
   5581 	else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   5582 		ixgbe_set_promisc(adapter);
   5583 
   5584 	/* Set up VLAN support and filter */
   5585 	ixgbe_setup_vlan_hw_support(adapter);
   5586 
   5587 	IXGBE_CORE_UNLOCK(adapter);
   5588 
   5589 	return rc;
   5590 }
   5591 
   5592 /************************************************************************
   5593  * ixgbe_ioctl - Ioctl entry point
   5594  *
   5595  *   Called when the user wants to configure the interface.
   5596  *
   5597  *   return 0 on success, positive on failure
   5598  ************************************************************************/
   5599 static int
   5600 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   5601 {
   5602 	struct adapter	*adapter = ifp->if_softc;
   5603 	struct ixgbe_hw *hw = &adapter->hw;
   5604 	struct ifcapreq *ifcr = data;
   5605 	struct ifreq	*ifr = data;
   5606 	int             error = 0;
   5607 	int l4csum_en;
   5608 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   5609 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   5610 
   5611 	switch (command) {
   5612 	case SIOCSIFFLAGS:
   5613 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   5614 		break;
   5615 	case SIOCADDMULTI:
   5616 	case SIOCDELMULTI:
   5617 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   5618 		break;
   5619 	case SIOCSIFMEDIA:
   5620 	case SIOCGIFMEDIA:
   5621 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   5622 		break;
   5623 	case SIOCSIFCAP:
   5624 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   5625 		break;
   5626 	case SIOCSIFMTU:
   5627 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   5628 		break;
   5629 #ifdef __NetBSD__
   5630 	case SIOCINITIFADDR:
   5631 		IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
   5632 		break;
   5633 	case SIOCGIFFLAGS:
   5634 		IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
   5635 		break;
   5636 	case SIOCGIFAFLAG_IN:
   5637 		IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
   5638 		break;
   5639 	case SIOCGIFADDR:
   5640 		IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
   5641 		break;
   5642 	case SIOCGIFMTU:
   5643 		IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
   5644 		break;
   5645 	case SIOCGIFCAP:
   5646 		IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
   5647 		break;
   5648 	case SIOCGETHERCAP:
   5649 		IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
   5650 		break;
   5651 	case SIOCGLIFADDR:
   5652 		IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
   5653 		break;
   5654 	case SIOCZIFDATA:
   5655 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   5656 		hw->mac.ops.clear_hw_cntrs(hw);
   5657 		ixgbe_clear_evcnt(adapter);
   5658 		break;
   5659 	case SIOCAIFADDR:
   5660 		IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
   5661 		break;
   5662 #endif
   5663 	default:
   5664 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   5665 		break;
   5666 	}
   5667 
   5668 	switch (command) {
   5669 	case SIOCSIFMEDIA:
   5670 	case SIOCGIFMEDIA:
   5671 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   5672 	case SIOCGI2C:
   5673 	{
   5674 		struct ixgbe_i2c_req	i2c;
   5675 
   5676 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
   5677 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
   5678 		if (error != 0)
   5679 			break;
   5680 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
   5681 			error = EINVAL;
   5682 			break;
   5683 		}
   5684 		if (i2c.len > sizeof(i2c.data)) {
   5685 			error = EINVAL;
   5686 			break;
   5687 		}
   5688 
   5689 		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
   5690 		    i2c.dev_addr, i2c.data);
   5691 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
   5692 		break;
   5693 	}
   5694 	case SIOCSIFCAP:
   5695 		/* Layer-4 Rx checksum offload has to be turned on and
   5696 		 * off as a unit.
   5697 		 */
   5698 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   5699 		if (l4csum_en != l4csum && l4csum_en != 0)
   5700 			return EINVAL;
   5701 		/*FALLTHROUGH*/
   5702 	case SIOCADDMULTI:
   5703 	case SIOCDELMULTI:
   5704 	case SIOCSIFFLAGS:
   5705 	case SIOCSIFMTU:
   5706 	default:
   5707 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   5708 			return error;
   5709 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   5710 			;
   5711 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   5712 			IXGBE_CORE_LOCK(adapter);
   5713 			ixgbe_init_locked(adapter);
   5714 			ixgbe_recalculate_max_frame(adapter);
   5715 			IXGBE_CORE_UNLOCK(adapter);
   5716 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   5717 			/*
   5718 			 * Multicast list has changed; set the hardware filter
   5719 			 * accordingly.
   5720 			 */
   5721 			IXGBE_CORE_LOCK(adapter);
   5722 			ixgbe_disable_intr(adapter);
   5723 			ixgbe_set_multi(adapter);
   5724 			ixgbe_enable_intr(adapter);
   5725 			IXGBE_CORE_UNLOCK(adapter);
   5726 		}
   5727 		return 0;
   5728 	}
   5729 
   5730 	return error;
   5731 } /* ixgbe_ioctl */
   5732 
   5733 /************************************************************************
   5734  * ixgbe_check_fan_failure
   5735  ************************************************************************/
   5736 static void
   5737 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
   5738 {
   5739 	u32 mask;
   5740 
   5741 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
   5742 	    IXGBE_ESDP_SDP1;
   5743 
   5744 	if (reg & mask)
   5745 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
   5746 } /* ixgbe_check_fan_failure */
   5747 
   5748 /************************************************************************
   5749  * ixgbe_handle_que
   5750  ************************************************************************/
   5751 static void
   5752 ixgbe_handle_que(void *context)
   5753 {
   5754 	struct ix_queue *que = context;
   5755 	struct adapter  *adapter = que->adapter;
   5756 	struct tx_ring  *txr = que->txr;
   5757 	struct ifnet    *ifp = adapter->ifp;
   5758 	bool		more = false;
   5759 
   5760 	adapter->handleq.ev_count++;
   5761 
   5762 	if (ifp->if_flags & IFF_RUNNING) {
   5763 		more = ixgbe_rxeof(que);
   5764 		IXGBE_TX_LOCK(txr);
   5765 		ixgbe_txeof(txr);
   5766 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5767 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   5768 				ixgbe_mq_start_locked(ifp, txr);
   5769 		/* Only for queue 0 */
   5770 		/* NetBSD still needs this for CBQ */
   5771 		if ((&adapter->queues[0] == que)
   5772 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   5773 			ixgbe_legacy_start_locked(ifp, txr);
   5774 		IXGBE_TX_UNLOCK(txr);
   5775 	}
   5776 
   5777 	if (more)
   5778 		softint_schedule(que->que_si);
   5779 	else if (que->res != NULL) {
   5780 		/* Re-enable this interrupt */
   5781 		ixgbe_enable_queue(adapter, que->msix);
   5782 	} else
   5783 		ixgbe_enable_intr(adapter);
   5784 
   5785 	return;
   5786 } /* ixgbe_handle_que */
   5787 
   5788 /************************************************************************
   5789  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
   5790  ************************************************************************/
   5791 static int
   5792 ixgbe_allocate_legacy(struct adapter *adapter,
   5793     const struct pci_attach_args *pa)
   5794 {
   5795 	device_t	dev = adapter->dev;
   5796 	struct ix_queue *que = adapter->queues;
   5797 	struct tx_ring  *txr = adapter->tx_rings;
   5798 	int		counts[PCI_INTR_TYPE_SIZE];
   5799 	pci_intr_type_t intr_type, max_type;
   5800 	char            intrbuf[PCI_INTRSTR_LEN];
   5801 	const char	*intrstr = NULL;
   5802 
   5803 	/* We allocate a single interrupt resource */
   5804 	max_type = PCI_INTR_TYPE_MSI;
   5805 	counts[PCI_INTR_TYPE_MSIX] = 0;
   5806 	counts[PCI_INTR_TYPE_MSI] =
   5807 	    (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
   5808 	/* Check not feat_en but feat_cap to fallback to INTx */
   5809 	counts[PCI_INTR_TYPE_INTX] =
   5810 	    (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
   5811 
   5812 alloc_retry:
   5813 	if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
   5814 		aprint_error_dev(dev, "couldn't alloc interrupt\n");
   5815 		return ENXIO;
   5816 	}
   5817 	adapter->osdep.nintrs = 1;
   5818 	intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
   5819 	    intrbuf, sizeof(intrbuf));
   5820 	adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
   5821 	    adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
   5822 	    device_xname(dev));
   5823 	intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
   5824 	if (adapter->osdep.ihs[0] == NULL) {
   5825 		aprint_error_dev(dev,"unable to establish %s\n",
   5826 		    (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5827 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5828 		adapter->osdep.intrs = NULL;
   5829 		switch (intr_type) {
   5830 		case PCI_INTR_TYPE_MSI:
   5831 			/* The next try is for INTx: Disable MSI */
   5832 			max_type = PCI_INTR_TYPE_INTX;
   5833 			counts[PCI_INTR_TYPE_INTX] = 1;
   5834 			adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   5835 			if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
   5836 				adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   5837 				goto alloc_retry;
   5838 			} else
   5839 				break;
   5840 		case PCI_INTR_TYPE_INTX:
   5841 		default:
   5842 			/* See below */
   5843 			break;
   5844 		}
   5845 	}
   5846 	if (intr_type == PCI_INTR_TYPE_INTX) {
   5847 		adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   5848 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   5849 	}
   5850 	if (adapter->osdep.ihs[0] == NULL) {
   5851 		aprint_error_dev(dev,
   5852 		    "couldn't establish interrupt%s%s\n",
   5853 		    intrstr ? " at " : "", intrstr ? intrstr : "");
   5854 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5855 		adapter->osdep.intrs = NULL;
   5856 		return ENXIO;
   5857 	}
   5858 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   5859 	/*
   5860 	 * Try allocating a fast interrupt and the associated deferred
   5861 	 * processing contexts.
   5862 	 */
   5863 	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5864 		txr->txr_si =
   5865 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5866 			ixgbe_deferred_mq_start, txr);
   5867 	que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5868 	    ixgbe_handle_que, que);
   5869 
   5870 	if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
   5871 		& (txr->txr_si == NULL)) || (que->que_si == NULL)) {
   5872 		aprint_error_dev(dev,
   5873 		    "could not establish software interrupts\n");
   5874 
   5875 		return ENXIO;
   5876 	}
   5877 	/* For simplicity in the handlers */
   5878 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
   5879 
   5880 	return (0);
   5881 } /* ixgbe_allocate_legacy */
   5882 
   5883 
   5884 /************************************************************************
   5885  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
   5886  ************************************************************************/
   5887 static int
   5888 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   5889 {
   5890 	device_t        dev = adapter->dev;
   5891 	struct 		ix_queue *que = adapter->queues;
   5892 	struct  	tx_ring *txr = adapter->tx_rings;
   5893 	pci_chipset_tag_t pc;
   5894 	char		intrbuf[PCI_INTRSTR_LEN];
   5895 	char		intr_xname[32];
   5896 	const char	*intrstr = NULL;
   5897 	int 		error, vector = 0;
   5898 	int		cpu_id = 0;
   5899 	kcpuset_t	*affinity;
   5900 #ifdef RSS
   5901 	unsigned int    rss_buckets = 0;
   5902 	kcpuset_t	cpu_mask;
   5903 #endif
   5904 
   5905 	pc = adapter->osdep.pc;
   5906 #ifdef	RSS
   5907 	/*
   5908 	 * If we're doing RSS, the number of queues needs to
   5909 	 * match the number of RSS buckets that are configured.
   5910 	 *
   5911 	 * + If there's more queues than RSS buckets, we'll end
   5912 	 *   up with queues that get no traffic.
   5913 	 *
   5914 	 * + If there's more RSS buckets than queues, we'll end
   5915 	 *   up having multiple RSS buckets map to the same queue,
   5916 	 *   so there'll be some contention.
   5917 	 */
   5918 	rss_buckets = rss_getnumbuckets();
   5919 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
   5920 	    (adapter->num_queues != rss_buckets)) {
   5921 		device_printf(dev,
   5922 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
   5923 		    "; performance will be impacted.\n",
   5924 		    __func__, adapter->num_queues, rss_buckets);
   5925 	}
   5926 #endif
   5927 
   5928 	adapter->osdep.nintrs = adapter->num_queues + 1;
   5929 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   5930 	    adapter->osdep.nintrs) != 0) {
   5931 		aprint_error_dev(dev,
   5932 		    "failed to allocate MSI-X interrupt\n");
   5933 		return (ENXIO);
   5934 	}
   5935 
   5936 	kcpuset_create(&affinity, false);
   5937 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   5938 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   5939 		    device_xname(dev), i);
   5940 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   5941 		    sizeof(intrbuf));
   5942 #ifdef IXGBE_MPSAFE
   5943 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   5944 		    true);
   5945 #endif
   5946 		/* Set the handler function */
   5947 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   5948 		    adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
   5949 		    intr_xname);
   5950 		if (que->res == NULL) {
   5951 			aprint_error_dev(dev,
   5952 			    "Failed to register QUE handler\n");
   5953 			error = ENXIO;
   5954 			goto err_out;
   5955 		}
   5956 		que->msix = vector;
   5957 		adapter->active_queues |= (u64)(1 << que->msix);
   5958 
   5959 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   5960 #ifdef	RSS
   5961 			/*
   5962 			 * The queue ID is used as the RSS layer bucket ID.
   5963 			 * We look up the queue ID -> RSS CPU ID and select
   5964 			 * that.
   5965 			 */
   5966 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
   5967 			CPU_SETOF(cpu_id, &cpu_mask);
   5968 #endif
   5969 		} else {
   5970 			/*
   5971 			 * Bind the MSI-X vector, and thus the
   5972 			 * rings to the corresponding CPU.
   5973 			 *
   5974 			 * This just happens to match the default RSS
   5975 			 * round-robin bucket -> queue -> CPU allocation.
   5976 			 */
   5977 			if (adapter->num_queues > 1)
   5978 				cpu_id = i;
   5979 		}
   5980 		/* Round-robin affinity */
   5981 		kcpuset_zero(affinity);
   5982 		kcpuset_set(affinity, cpu_id % ncpu);
   5983 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   5984 		    NULL);
   5985 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   5986 		    intrstr);
   5987 		if (error == 0) {
   5988 #if 1 /* def IXGBE_DEBUG */
   5989 #ifdef	RSS
   5990 			aprintf_normal(", bound RSS bucket %d to CPU %d", i,
   5991 			    cpu_id % ncpu);
   5992 #else
   5993 			aprint_normal(", bound queue %d to cpu %d", i,
   5994 			    cpu_id % ncpu);
   5995 #endif
   5996 #endif /* IXGBE_DEBUG */
   5997 		}
   5998 		aprint_normal("\n");
   5999 
   6000 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   6001 			txr->txr_si = softint_establish(
   6002 				SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6003 				ixgbe_deferred_mq_start, txr);
   6004 			if (txr->txr_si == NULL) {
   6005 				aprint_error_dev(dev,
   6006 				    "couldn't establish software interrupt\n");
   6007 				error = ENXIO;
   6008 				goto err_out;
   6009 			}
   6010 		}
   6011 		que->que_si
   6012 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6013 			ixgbe_handle_que, que);
   6014 		if (que->que_si == NULL) {
   6015 			aprint_error_dev(dev,
   6016 			    "couldn't establish software interrupt\n");
   6017 			error = ENXIO;
   6018 			goto err_out;
   6019 		}
   6020 	}
   6021 
   6022 	/* and Link */
   6023 	cpu_id++;
   6024 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   6025 	adapter->vector = vector;
   6026 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   6027 	    sizeof(intrbuf));
   6028 #ifdef IXGBE_MPSAFE
   6029 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   6030 	    true);
   6031 #endif
   6032 	/* Set the link handler function */
   6033 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   6034 	    adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
   6035 	    intr_xname);
   6036 	if (adapter->osdep.ihs[vector] == NULL) {
   6037 		adapter->res = NULL;
   6038 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   6039 		error = ENXIO;
   6040 		goto err_out;
   6041 	}
   6042 	/* Round-robin affinity */
   6043 	kcpuset_zero(affinity);
   6044 	kcpuset_set(affinity, cpu_id % ncpu);
   6045 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
   6046 	    NULL);
   6047 
   6048 	aprint_normal_dev(dev,
   6049 	    "for link, interrupting at %s", intrstr);
   6050 	if (error == 0)
   6051 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   6052 	else
   6053 		aprint_normal("\n");
   6054 
   6055 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   6056 		adapter->mbx_si =
   6057 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6058 			ixgbe_handle_mbx, adapter);
   6059 		if (adapter->mbx_si == NULL) {
   6060 			aprint_error_dev(dev,
   6061 			    "could not establish software interrupts\n");
   6062 
   6063 			error = ENXIO;
   6064 			goto err_out;
   6065 		}
   6066 	}
   6067 
   6068 	kcpuset_destroy(affinity);
   6069 	aprint_normal_dev(dev,
   6070 	    "Using MSI-X interrupts with %d vectors\n", vector + 1);
   6071 
   6072 	return (0);
   6073 
   6074 err_out:
   6075 	kcpuset_destroy(affinity);
   6076 	ixgbe_free_softint(adapter);
   6077 	ixgbe_free_pciintr_resources(adapter);
   6078 	return (error);
   6079 } /* ixgbe_allocate_msix */
   6080 
   6081 /************************************************************************
   6082  * ixgbe_configure_interrupts
   6083  *
   6084  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
   6085  *   This will also depend on user settings.
   6086  ************************************************************************/
   6087 static int
   6088 ixgbe_configure_interrupts(struct adapter *adapter)
   6089 {
   6090 	device_t dev = adapter->dev;
   6091 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
   6092 	int want, queues, msgs;
   6093 
   6094 	/* Default to 1 queue if MSI-X setup fails */
   6095 	adapter->num_queues = 1;
   6096 
   6097 	/* Override by tuneable */
   6098 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
   6099 		goto msi;
   6100 
   6101 	/*
   6102 	 *  NetBSD only: Use single vector MSI when number of CPU is 1 to save
   6103 	 * interrupt slot.
   6104 	 */
   6105 	if (ncpu == 1)
   6106 		goto msi;
   6107 
   6108 	/* First try MSI-X */
   6109 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   6110 	msgs = MIN(msgs, IXG_MAX_NINTR);
   6111 	if (msgs < 2)
   6112 		goto msi;
   6113 
   6114 	adapter->msix_mem = (void *)1; /* XXX */
   6115 
   6116 	/* Figure out a reasonable auto config value */
   6117 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   6118 
   6119 #ifdef	RSS
   6120 	/* If we're doing RSS, clamp at the number of RSS buckets */
   6121 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   6122 		queues = min(queues, rss_getnumbuckets());
   6123 #endif
   6124 	if (ixgbe_num_queues > queues) {
   6125 		aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
   6126 		ixgbe_num_queues = queues;
   6127 	}
   6128 
   6129 	if (ixgbe_num_queues != 0)
   6130 		queues = ixgbe_num_queues;
   6131 	else
   6132 		queues = min(queues,
   6133 		    min(mac->max_tx_queues, mac->max_rx_queues));
   6134 
   6135 	/* reflect correct sysctl value */
   6136 	ixgbe_num_queues = queues;
   6137 
   6138 	/*
   6139 	 * Want one vector (RX/TX pair) per queue
   6140 	 * plus an additional for Link.
   6141 	 */
   6142 	want = queues + 1;
   6143 	if (msgs >= want)
   6144 		msgs = want;
   6145 	else {
   6146                	aprint_error_dev(dev, "MSI-X Configuration Problem, "
   6147 		    "%d vectors but %d queues wanted!\n",
   6148 		    msgs, want);
   6149 		goto msi;
   6150 	}
   6151 	adapter->num_queues = queues;
   6152 	adapter->feat_en |= IXGBE_FEATURE_MSIX;
   6153 	return (0);
   6154 
   6155 	/*
   6156 	 * MSI-X allocation failed or provided us with
   6157 	 * less vectors than needed. Free MSI-X resources
   6158 	 * and we'll try enabling MSI.
   6159 	 */
   6160 msi:
   6161 	/* Without MSI-X, some features are no longer supported */
   6162 	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6163 	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
   6164 	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6165 	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
   6166 
   6167        	msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
   6168 	adapter->msix_mem = NULL; /* XXX */
   6169 	if (msgs > 1)
   6170 		msgs = 1;
   6171 	if (msgs != 0) {
   6172 		msgs = 1;
   6173 		adapter->feat_en |= IXGBE_FEATURE_MSI;
   6174 		return (0);
   6175 	}
   6176 
   6177 	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
   6178 		aprint_error_dev(dev,
   6179 		    "Device does not support legacy interrupts.\n");
   6180 		return 1;
   6181 	}
   6182 
   6183 	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6184 
   6185 	return (0);
   6186 } /* ixgbe_configure_interrupts */
   6187 
   6188 
   6189 /************************************************************************
   6190  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
   6191  *
   6192  *   Done outside of interrupt context since the driver might sleep
   6193  ************************************************************************/
   6194 static void
   6195 ixgbe_handle_link(void *context)
   6196 {
   6197 	struct adapter  *adapter = context;
   6198 	struct ixgbe_hw *hw = &adapter->hw;
   6199 
   6200 	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
   6201 	ixgbe_update_link_status(adapter);
   6202 
   6203 	/* Re-enable link interrupts */
   6204 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
   6205 } /* ixgbe_handle_link */
   6206 
   6207 /************************************************************************
   6208  * ixgbe_rearm_queues
   6209  ************************************************************************/
   6210 static void
   6211 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   6212 {
   6213 	u32 mask;
   6214 
   6215 	switch (adapter->hw.mac.type) {
   6216 	case ixgbe_mac_82598EB:
   6217 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   6218 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   6219 		break;
   6220 	case ixgbe_mac_82599EB:
   6221 	case ixgbe_mac_X540:
   6222 	case ixgbe_mac_X550:
   6223 	case ixgbe_mac_X550EM_x:
   6224 	case ixgbe_mac_X550EM_a:
   6225 		mask = (queues & 0xFFFFFFFF);
   6226 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   6227 		mask = (queues >> 32);
   6228 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   6229 		break;
   6230 	default:
   6231 		break;
   6232 	}
   6233 } /* ixgbe_rearm_queues */
   6234