Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.125
      1 /* $NetBSD: ixgbe.c,v 1.125 2018/02/20 08:49:23 knakahara Exp $ */
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 320916 2017-07-12 17:35:32Z sbruno $*/
     36 
     37 /*
     38  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39  * All rights reserved.
     40  *
     41  * This code is derived from software contributed to The NetBSD Foundation
     42  * by Coyote Point Systems, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63  * POSSIBILITY OF SUCH DAMAGE.
     64  */
     65 
     66 #ifdef _KERNEL_OPT
     67 #include "opt_inet.h"
     68 #include "opt_inet6.h"
     69 #include "opt_net_mpsafe.h"
     70 #endif
     71 
     72 #include "ixgbe.h"
     73 #include "vlan.h"
     74 
     75 #include <sys/cprng.h>
     76 #include <dev/mii/mii.h>
     77 #include <dev/mii/miivar.h>
     78 
     79 /************************************************************************
     80  * Driver version
     81  ************************************************************************/
     82 char ixgbe_driver_version[] = "3.2.12-k";
     83 
     84 
     85 /************************************************************************
     86  * PCI Device ID Table
     87  *
     88  *   Used by probe to select devices to load on
     89  *   Last field stores an index into ixgbe_strings
     90  *   Last entry must be all 0s
     91  *
     92  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     93  ************************************************************************/
     94 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     95 {
     96 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    111 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    112 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    113 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    114 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    115 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
    116 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    117 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
    118 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
    119 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
    120 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
    121 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
    122 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
    123 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
    124 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
    125 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
    126 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
    127 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
    128 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
    129 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
    130 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
    131 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
    132 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
    133 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
    134 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
    135 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
    136 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
    137 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
    138 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
    139 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
    140 	/* required last entry */
    141 	{0, 0, 0, 0, 0}
    142 };
    143 
    144 /************************************************************************
    145  * Table of branding strings
    146  ************************************************************************/
    147 static const char    *ixgbe_strings[] = {
    148 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    149 };
    150 
    151 /************************************************************************
    152  * Function prototypes
    153  ************************************************************************/
    154 static int      ixgbe_probe(device_t, cfdata_t, void *);
    155 static void     ixgbe_attach(device_t, device_t, void *);
    156 static int      ixgbe_detach(device_t, int);
    157 #if 0
    158 static int      ixgbe_shutdown(device_t);
    159 #endif
    160 static bool	ixgbe_suspend(device_t, const pmf_qual_t *);
    161 static bool	ixgbe_resume(device_t, const pmf_qual_t *);
    162 static int	ixgbe_ifflags_cb(struct ethercom *);
    163 static int      ixgbe_ioctl(struct ifnet *, u_long, void *);
    164 static void	ixgbe_ifstop(struct ifnet *, int);
    165 static int	ixgbe_init(struct ifnet *);
    166 static void	ixgbe_init_locked(struct adapter *);
    167 static void     ixgbe_stop(void *);
    168 static void     ixgbe_init_device_features(struct adapter *);
    169 static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
    170 static void	ixgbe_add_media_types(struct adapter *);
    171 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    172 static int      ixgbe_media_change(struct ifnet *);
    173 static int      ixgbe_allocate_pci_resources(struct adapter *,
    174 		    const struct pci_attach_args *);
    175 static void      ixgbe_free_softint(struct adapter *);
    176 static void	ixgbe_get_slot_info(struct adapter *);
    177 static int      ixgbe_allocate_msix(struct adapter *,
    178 		    const struct pci_attach_args *);
    179 static int      ixgbe_allocate_legacy(struct adapter *,
    180 		    const struct pci_attach_args *);
    181 static int      ixgbe_configure_interrupts(struct adapter *);
    182 static void	ixgbe_free_pciintr_resources(struct adapter *);
    183 static void	ixgbe_free_pci_resources(struct adapter *);
    184 static void	ixgbe_local_timer(void *);
    185 static void	ixgbe_local_timer1(void *);
    186 static int	ixgbe_setup_interface(device_t, struct adapter *);
    187 static void	ixgbe_config_gpie(struct adapter *);
    188 static void	ixgbe_config_dmac(struct adapter *);
    189 static void	ixgbe_config_delay_values(struct adapter *);
    190 static void	ixgbe_config_link(struct adapter *);
    191 static void	ixgbe_check_wol_support(struct adapter *);
    192 static int	ixgbe_setup_low_power_mode(struct adapter *);
    193 static void	ixgbe_rearm_queues(struct adapter *, u64);
    194 
    195 static void     ixgbe_initialize_transmit_units(struct adapter *);
    196 static void     ixgbe_initialize_receive_units(struct adapter *);
    197 static void	ixgbe_enable_rx_drop(struct adapter *);
    198 static void	ixgbe_disable_rx_drop(struct adapter *);
    199 static void	ixgbe_initialize_rss_mapping(struct adapter *);
    200 
    201 static void     ixgbe_enable_intr(struct adapter *);
    202 static void     ixgbe_disable_intr(struct adapter *);
    203 static void     ixgbe_update_stats_counters(struct adapter *);
    204 static void     ixgbe_set_promisc(struct adapter *);
    205 static void     ixgbe_set_multi(struct adapter *);
    206 static void     ixgbe_update_link_status(struct adapter *);
    207 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    208 static void	ixgbe_configure_ivars(struct adapter *);
    209 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    210 static void	ixgbe_eitr_write(struct ix_queue *, uint32_t);
    211 
    212 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    213 #if 0
    214 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
    215 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
    216 #endif
    217 
    218 static void	ixgbe_add_device_sysctls(struct adapter *);
    219 static void     ixgbe_add_hw_stats(struct adapter *);
    220 static void	ixgbe_clear_evcnt(struct adapter *);
    221 static int	ixgbe_set_flowcntl(struct adapter *, int);
    222 static int	ixgbe_set_advertise(struct adapter *, int);
    223 static int      ixgbe_get_advertise(struct adapter *);
    224 
    225 /* Sysctl handlers */
    226 static void	ixgbe_set_sysctl_value(struct adapter *, const char *,
    227 		     const char *, int *, int);
    228 static int	ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
    229 static int	ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
    230 static int      ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    231 static int	ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
    232 static int	ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
    233 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
    234 #ifdef IXGBE_DEBUG
    235 static int	ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
    236 static int	ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
    237 #endif
    238 static int      ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
    239 static int      ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
    240 static int      ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
    241 static int      ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
    242 static int      ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
    243 static int	ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
    244 static int	ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
    245 
    246 /* Support for pluggable optic modules */
    247 static bool	ixgbe_sfp_probe(struct adapter *);
    248 
    249 /* Legacy (single vector) interrupt handler */
    250 static int	ixgbe_legacy_irq(void *);
    251 
    252 /* The MSI/MSI-X Interrupt handlers */
    253 static int	ixgbe_msix_que(void *);
    254 static int	ixgbe_msix_link(void *);
    255 
    256 /* Software interrupts for deferred work */
    257 static void	ixgbe_handle_que(void *);
    258 static void	ixgbe_handle_link(void *);
    259 static void	ixgbe_handle_msf(void *);
    260 static void	ixgbe_handle_mod(void *);
    261 static void	ixgbe_handle_phy(void *);
    262 
    263 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    264 
    265 /************************************************************************
    266  *  NetBSD Device Interface Entry Points
    267  ************************************************************************/
    268 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    269     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    270     DVF_DETACH_SHUTDOWN);
    271 
    272 #if 0
    273 devclass_t ix_devclass;
    274 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
    275 
    276 MODULE_DEPEND(ix, pci, 1, 1, 1);
    277 MODULE_DEPEND(ix, ether, 1, 1, 1);
    278 #ifdef DEV_NETMAP
    279 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    280 #endif
    281 #endif
    282 
    283 /*
    284  * TUNEABLE PARAMETERS:
    285  */
    286 
    287 /*
    288  * AIM: Adaptive Interrupt Moderation
    289  * which means that the interrupt rate
    290  * is varied over time based on the
    291  * traffic for that interrupt vector
    292  */
    293 static bool ixgbe_enable_aim = true;
    294 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
    295 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
    296     "Enable adaptive interrupt moderation");
    297 
    298 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    299 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
    300     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
    301 
    302 /* How many packets rxeof tries to clean at a time */
    303 static int ixgbe_rx_process_limit = 256;
    304 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
    305     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
    306 
    307 /* How many packets txeof tries to clean at a time */
    308 static int ixgbe_tx_process_limit = 256;
    309 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
    310     &ixgbe_tx_process_limit, 0,
    311     "Maximum number of sent packets to process at a time, -1 means unlimited");
    312 
    313 /* Flow control setting, default to full */
    314 static int ixgbe_flow_control = ixgbe_fc_full;
    315 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
    316     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
    317 
    318 /*
    319  * Smart speed setting, default to on
    320  * this only works as a compile option
    321  * right now as its during attach, set
    322  * this to 'ixgbe_smart_speed_off' to
    323  * disable.
    324  */
    325 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    326 
    327 /*
    328  * MSI-X should be the default for best performance,
    329  * but this allows it to be forced off for testing.
    330  */
    331 static int ixgbe_enable_msix = 1;
    332 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
    333     "Enable MSI-X interrupts");
    334 
    335 /*
    336  * Number of Queues, can be set to 0,
    337  * it then autoconfigures based on the
    338  * number of cpus with a max of 8. This
    339  * can be overriden manually here.
    340  */
    341 static int ixgbe_num_queues = 0;
    342 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
    343     "Number of queues to configure, 0 indicates autoconfigure");
    344 
    345 /*
    346  * Number of TX descriptors per ring,
    347  * setting higher than RX as this seems
    348  * the better performing choice.
    349  */
    350 static int ixgbe_txd = PERFORM_TXD;
    351 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
    352     "Number of transmit descriptors per queue");
    353 
    354 /* Number of RX descriptors per ring */
    355 static int ixgbe_rxd = PERFORM_RXD;
    356 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
    357     "Number of receive descriptors per queue");
    358 
    359 /*
    360  * Defining this on will allow the use
    361  * of unsupported SFP+ modules, note that
    362  * doing so you are on your own :)
    363  */
    364 static int allow_unsupported_sfp = false;
    365 #define TUNABLE_INT(__x, __y)
    366 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
    367 
    368 /*
    369  * Not sure if Flow Director is fully baked,
    370  * so we'll default to turning it off.
    371  */
    372 static int ixgbe_enable_fdir = 0;
    373 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
    374     "Enable Flow Director");
    375 
    376 /* Legacy Transmit (single queue) */
    377 static int ixgbe_enable_legacy_tx = 0;
    378 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
    379     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
    380 
    381 /* Receive-Side Scaling */
    382 static int ixgbe_enable_rss = 1;
    383 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
    384     "Enable Receive-Side Scaling (RSS)");
    385 
    386 /* Keep running tab on them for sanity check */
    387 static int ixgbe_total_ports;
    388 
    389 #if 0
    390 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
    391 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
    392 #endif
    393 
    394 #ifdef NET_MPSAFE
    395 #define IXGBE_MPSAFE		1
    396 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    397 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    398 #else
    399 #define IXGBE_CALLOUT_FLAGS	0
    400 #define IXGBE_SOFTINFT_FLAGS	0
    401 #endif
    402 
    403 /************************************************************************
    404  * ixgbe_initialize_rss_mapping
    405  ************************************************************************/
    406 static void
    407 ixgbe_initialize_rss_mapping(struct adapter *adapter)
    408 {
    409 	struct ixgbe_hw	*hw = &adapter->hw;
    410 	u32             reta = 0, mrqc, rss_key[10];
    411 	int             queue_id, table_size, index_mult;
    412 	int             i, j;
    413 	u32             rss_hash_config;
    414 
    415 	/* force use default RSS key. */
    416 #ifdef __NetBSD__
    417 	rss_getkey((uint8_t *) &rss_key);
    418 #else
    419 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    420 		/* Fetch the configured RSS key */
    421 		rss_getkey((uint8_t *) &rss_key);
    422 	} else {
    423 		/* set up random bits */
    424 		cprng_fast(&rss_key, sizeof(rss_key));
    425 	}
    426 #endif
    427 
    428 	/* Set multiplier for RETA setup and table size based on MAC */
    429 	index_mult = 0x1;
    430 	table_size = 128;
    431 	switch (adapter->hw.mac.type) {
    432 	case ixgbe_mac_82598EB:
    433 		index_mult = 0x11;
    434 		break;
    435 	case ixgbe_mac_X550:
    436 	case ixgbe_mac_X550EM_x:
    437 	case ixgbe_mac_X550EM_a:
    438 		table_size = 512;
    439 		break;
    440 	default:
    441 		break;
    442 	}
    443 
    444 	/* Set up the redirection table */
    445 	for (i = 0, j = 0; i < table_size; i++, j++) {
    446 		if (j == adapter->num_queues)
    447 			j = 0;
    448 
    449 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    450 			/*
    451 			 * Fetch the RSS bucket id for the given indirection
    452 			 * entry. Cap it at the number of configured buckets
    453 			 * (which is num_queues.)
    454 			 */
    455 			queue_id = rss_get_indirection_to_bucket(i);
    456 			queue_id = queue_id % adapter->num_queues;
    457 		} else
    458 			queue_id = (j * index_mult);
    459 
    460 		/*
    461 		 * The low 8 bits are for hash value (n+0);
    462 		 * The next 8 bits are for hash value (n+1), etc.
    463 		 */
    464 		reta = reta >> 8;
    465 		reta = reta | (((uint32_t) queue_id) << 24);
    466 		if ((i & 3) == 3) {
    467 			if (i < 128)
    468 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
    469 			else
    470 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
    471 				    reta);
    472 			reta = 0;
    473 		}
    474 	}
    475 
    476 	/* Now fill our hash function seeds */
    477 	for (i = 0; i < 10; i++)
    478 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
    479 
    480 	/* Perform hash on these packet types */
    481 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
    482 		rss_hash_config = rss_gethashconfig();
    483 	else {
    484 		/*
    485 		 * Disable UDP - IP fragments aren't currently being handled
    486 		 * and so we end up with a mix of 2-tuple and 4-tuple
    487 		 * traffic.
    488 		 */
    489 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
    490 		                | RSS_HASHTYPE_RSS_TCP_IPV4
    491 		                | RSS_HASHTYPE_RSS_IPV6
    492 		                | RSS_HASHTYPE_RSS_TCP_IPV6
    493 		                | RSS_HASHTYPE_RSS_IPV6_EX
    494 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
    495 	}
    496 
    497 	mrqc = IXGBE_MRQC_RSSEN;
    498 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
    499 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
    500 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
    501 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
    502 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
    503 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
    504 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
    505 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
    506 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
    507 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
    508 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
    509 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
    510 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
    511 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
    512 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
    513 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
    514 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
    515 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
    516 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
    517 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    518 } /* ixgbe_initialize_rss_mapping */
    519 
    520 /************************************************************************
    521  * ixgbe_initialize_receive_units - Setup receive registers and features.
    522  ************************************************************************/
    523 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
    524 
    525 static void
    526 ixgbe_initialize_receive_units(struct adapter *adapter)
    527 {
    528 	struct	rx_ring	*rxr = adapter->rx_rings;
    529 	struct ixgbe_hw	*hw = &adapter->hw;
    530 	struct ifnet    *ifp = adapter->ifp;
    531 	int             i, j;
    532 	u32		bufsz, fctrl, srrctl, rxcsum;
    533 	u32		hlreg;
    534 
    535 	/*
    536 	 * Make sure receives are disabled while
    537 	 * setting up the descriptor ring
    538 	 */
    539 	ixgbe_disable_rx(hw);
    540 
    541 	/* Enable broadcasts */
    542 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
    543 	fctrl |= IXGBE_FCTRL_BAM;
    544 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
    545 		fctrl |= IXGBE_FCTRL_DPF;
    546 		fctrl |= IXGBE_FCTRL_PMCF;
    547 	}
    548 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
    549 
    550 	/* Set for Jumbo Frames? */
    551 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
    552 	if (ifp->if_mtu > ETHERMTU)
    553 		hlreg |= IXGBE_HLREG0_JUMBOEN;
    554 	else
    555 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
    556 
    557 #ifdef DEV_NETMAP
    558 	/* CRC stripping is conditional in Netmap */
    559 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
    560 	    (ifp->if_capenable & IFCAP_NETMAP) &&
    561 	    !ix_crcstrip)
    562 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
    563 	else
    564 #endif /* DEV_NETMAP */
    565 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
    566 
    567 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
    568 
    569 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
    570 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
    571 
    572 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
    573 		u64 rdba = rxr->rxdma.dma_paddr;
    574 		u32 tqsmreg, reg;
    575 		int regnum = i / 4;	/* 1 register per 4 queues */
    576 		int regshift = i % 4;	/* 4 bits per 1 queue */
    577 		j = rxr->me;
    578 
    579 		/* Setup the Base and Length of the Rx Descriptor Ring */
    580 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
    581 		    (rdba & 0x00000000ffffffffULL));
    582 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
    583 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
    584 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
    585 
    586 		/* Set up the SRRCTL register */
    587 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
    588 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
    589 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
    590 		srrctl |= bufsz;
    591 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
    592 
    593 		/* Set RQSMR (Receive Queue Statistic Mapping) register */
    594 		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
    595 		reg &= ~(0x000000ff << (regshift * 8));
    596 		reg |= i << (regshift * 8);
    597 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
    598 
    599 		/*
    600 		 * Set RQSMR (Receive Queue Statistic Mapping) register.
    601 		 * Register location for queue 0...7 are different between
    602 		 * 82598 and newer.
    603 		 */
    604 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
    605 			tqsmreg = IXGBE_TQSMR(regnum);
    606 		else
    607 			tqsmreg = IXGBE_TQSM(regnum);
    608 		reg = IXGBE_READ_REG(hw, tqsmreg);
    609 		reg &= ~(0x000000ff << (regshift * 8));
    610 		reg |= i << (regshift * 8);
    611 		IXGBE_WRITE_REG(hw, tqsmreg, reg);
    612 
    613 		/*
    614 		 * Set DROP_EN iff we have no flow control and >1 queue.
    615 		 * Note that srrctl was cleared shortly before during reset,
    616 		 * so we do not need to clear the bit, but do it just in case
    617 		 * this code is moved elsewhere.
    618 		 */
    619 		if (adapter->num_queues > 1 &&
    620 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
    621 			srrctl |= IXGBE_SRRCTL_DROP_EN;
    622 		} else {
    623 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
    624 		}
    625 
    626 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
    627 
    628 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
    629 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
    630 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
    631 
    632 		/* Set the driver rx tail address */
    633 		rxr->tail =  IXGBE_RDT(rxr->me);
    634 	}
    635 
    636 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    637 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
    638 		            | IXGBE_PSRTYPE_UDPHDR
    639 		            | IXGBE_PSRTYPE_IPV4HDR
    640 		            | IXGBE_PSRTYPE_IPV6HDR;
    641 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
    642 	}
    643 
    644 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
    645 
    646 	ixgbe_initialize_rss_mapping(adapter);
    647 
    648 	if (adapter->num_queues > 1) {
    649 		/* RSS and RX IPP Checksum are mutually exclusive */
    650 		rxcsum |= IXGBE_RXCSUM_PCSD;
    651 	}
    652 
    653 	if (ifp->if_capenable & IFCAP_RXCSUM)
    654 		rxcsum |= IXGBE_RXCSUM_PCSD;
    655 
    656 	/* This is useful for calculating UDP/IP fragment checksums */
    657 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
    658 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
    659 
    660 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
    661 
    662 	return;
    663 } /* ixgbe_initialize_receive_units */
    664 
    665 /************************************************************************
    666  * ixgbe_initialize_transmit_units - Enable transmit units.
    667  ************************************************************************/
    668 static void
    669 ixgbe_initialize_transmit_units(struct adapter *adapter)
    670 {
    671 	struct tx_ring  *txr = adapter->tx_rings;
    672 	struct ixgbe_hw	*hw = &adapter->hw;
    673 
    674 	/* Setup the Base and Length of the Tx Descriptor Ring */
    675 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    676 		u64 tdba = txr->txdma.dma_paddr;
    677 		u32 txctrl = 0;
    678 		int j = txr->me;
    679 
    680 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
    681 		    (tdba & 0x00000000ffffffffULL));
    682 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
    683 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
    684 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
    685 
    686 		/* Setup the HW Tx Head and Tail descriptor pointers */
    687 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
    688 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
    689 
    690 		/* Cache the tail address */
    691 		txr->tail = IXGBE_TDT(j);
    692 
    693 		/* Disable Head Writeback */
    694 		/*
    695 		 * Note: for X550 series devices, these registers are actually
    696 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
    697 		 * fields remain the same.
    698 		 */
    699 		switch (hw->mac.type) {
    700 		case ixgbe_mac_82598EB:
    701 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
    702 			break;
    703 		default:
    704 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
    705 			break;
    706 		}
    707 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
    708 		switch (hw->mac.type) {
    709 		case ixgbe_mac_82598EB:
    710 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
    711 			break;
    712 		default:
    713 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
    714 			break;
    715 		}
    716 
    717 	}
    718 
    719 	if (hw->mac.type != ixgbe_mac_82598EB) {
    720 		u32 dmatxctl, rttdcs;
    721 
    722 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
    723 		dmatxctl |= IXGBE_DMATXCTL_TE;
    724 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
    725 		/* Disable arbiter to set MTQC */
    726 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
    727 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
    728 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    729 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
    730 		    ixgbe_get_mtqc(adapter->iov_mode));
    731 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
    732 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    733 	}
    734 
    735 	return;
    736 } /* ixgbe_initialize_transmit_units */
    737 
    738 /************************************************************************
    739  * ixgbe_attach - Device initialization routine
    740  *
    741  *   Called when the driver is being loaded.
    742  *   Identifies the type of hardware, allocates all resources
    743  *   and initializes the hardware.
    744  *
    745  *   return 0 on success, positive on failure
    746  ************************************************************************/
    747 static void
    748 ixgbe_attach(device_t parent, device_t dev, void *aux)
    749 {
    750 	struct adapter  *adapter;
    751 	struct ixgbe_hw *hw;
    752 	int             error = -1;
    753 	u32		ctrl_ext;
    754 	u16		high, low, nvmreg;
    755 	pcireg_t	id, subid;
    756 	ixgbe_vendor_info_t *ent;
    757 	struct pci_attach_args *pa = aux;
    758 	const char *str;
    759 	char buf[256];
    760 
    761 	INIT_DEBUGOUT("ixgbe_attach: begin");
    762 
    763 	/* Allocate, clear, and link in our adapter structure */
    764 	adapter = device_private(dev);
    765 	adapter->hw.back = adapter;
    766 	adapter->dev = dev;
    767 	hw = &adapter->hw;
    768 	adapter->osdep.pc = pa->pa_pc;
    769 	adapter->osdep.tag = pa->pa_tag;
    770 	if (pci_dma64_available(pa))
    771 		adapter->osdep.dmat = pa->pa_dmat64;
    772 	else
    773 		adapter->osdep.dmat = pa->pa_dmat;
    774 	adapter->osdep.attached = false;
    775 
    776 	ent = ixgbe_lookup(pa);
    777 
    778 	KASSERT(ent != NULL);
    779 
    780 	aprint_normal(": %s, Version - %s\n",
    781 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    782 
    783 	/* Core Lock Init*/
    784 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    785 
    786 	/* Set up the timer callout */
    787 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    788 
    789 	/* Determine hardware revision */
    790 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    791 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    792 
    793 	hw->vendor_id = PCI_VENDOR(id);
    794 	hw->device_id = PCI_PRODUCT(id);
    795 	hw->revision_id =
    796 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    797 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    798 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    799 
    800 	/*
    801 	 * Make sure BUSMASTER is set
    802 	 */
    803 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    804 
    805 	/* Do base PCI setup - map BAR0 */
    806 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    807 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    808 		error = ENXIO;
    809 		goto err_out;
    810 	}
    811 
    812 	/* let hardware know driver is loaded */
    813 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    814 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    815 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    816 
    817 	/*
    818 	 * Initialize the shared code
    819 	 */
    820 	if (ixgbe_init_shared_code(hw)) {
    821 		aprint_error_dev(dev, "Unable to initialize the shared code\n");
    822 		error = ENXIO;
    823 		goto err_out;
    824 	}
    825 
    826 	switch (hw->mac.type) {
    827 	case ixgbe_mac_82598EB:
    828 		str = "82598EB";
    829 		break;
    830 	case ixgbe_mac_82599EB:
    831 		str = "82599EB";
    832 		break;
    833 	case ixgbe_mac_X540:
    834 		str = "X540";
    835 		break;
    836 	case ixgbe_mac_X550:
    837 		str = "X550";
    838 		break;
    839 	case ixgbe_mac_X550EM_x:
    840 		str = "X550EM";
    841 		break;
    842 	case ixgbe_mac_X550EM_a:
    843 		str = "X550EM A";
    844 		break;
    845 	default:
    846 		str = "Unknown";
    847 		break;
    848 	}
    849 	aprint_normal_dev(dev, "device %s\n", str);
    850 
    851 	if (hw->mbx.ops.init_params)
    852 		hw->mbx.ops.init_params(hw);
    853 
    854 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
    855 
    856 	/* Pick up the 82599 settings */
    857 	if (hw->mac.type != ixgbe_mac_82598EB) {
    858 		hw->phy.smart_speed = ixgbe_smart_speed;
    859 		adapter->num_segs = IXGBE_82599_SCATTER;
    860 	} else
    861 		adapter->num_segs = IXGBE_82598_SCATTER;
    862 
    863 	hw->mac.ops.set_lan_id(hw);
    864 	ixgbe_init_device_features(adapter);
    865 
    866 	if (ixgbe_configure_interrupts(adapter)) {
    867 		error = ENXIO;
    868 		goto err_out;
    869 	}
    870 
    871 	/* Allocate multicast array memory. */
    872 	adapter->mta = malloc(sizeof(*adapter->mta) *
    873 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    874 	if (adapter->mta == NULL) {
    875 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    876 		error = ENOMEM;
    877 		goto err_out;
    878 	}
    879 
    880 	/* Enable WoL (if supported) */
    881 	ixgbe_check_wol_support(adapter);
    882 
    883 	/* Verify adapter fan is still functional (if applicable) */
    884 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
    885 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    886 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
    887 	}
    888 
    889 	/* Ensure SW/FW semaphore is free */
    890 	ixgbe_init_swfw_semaphore(hw);
    891 
    892 	/* Enable EEE power saving */
    893 	if (adapter->feat_en & IXGBE_FEATURE_EEE)
    894 		hw->mac.ops.setup_eee(hw, TRUE);
    895 
    896 	/* Set an initial default flow control value */
    897 	hw->fc.requested_mode = ixgbe_flow_control;
    898 
    899 	/* Sysctls for limiting the amount of work done in the taskqueues */
    900 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
    901 	    "max number of rx packets to process",
    902 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
    903 
    904 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
    905 	    "max number of tx packets to process",
    906 	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
    907 
    908 	/* Do descriptor calc and sanity checks */
    909 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    910 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    911 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    912 		adapter->num_tx_desc = DEFAULT_TXD;
    913 	} else
    914 		adapter->num_tx_desc = ixgbe_txd;
    915 
    916 	/*
    917 	 * With many RX rings it is easy to exceed the
    918 	 * system mbuf allocation. Tuning nmbclusters
    919 	 * can alleviate this.
    920 	 */
    921 	if (nmbclusters > 0) {
    922 		int s;
    923 		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
    924 		if (s > nmbclusters) {
    925 			aprint_error_dev(dev, "RX Descriptors exceed "
    926 			    "system mbuf max, using default instead!\n");
    927 			ixgbe_rxd = DEFAULT_RXD;
    928 		}
    929 	}
    930 
    931 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    932 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
    933 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    934 		adapter->num_rx_desc = DEFAULT_RXD;
    935 	} else
    936 		adapter->num_rx_desc = ixgbe_rxd;
    937 
    938 	/* Allocate our TX/RX Queues */
    939 	if (ixgbe_allocate_queues(adapter)) {
    940 		error = ENOMEM;
    941 		goto err_out;
    942 	}
    943 
    944 	hw->phy.reset_if_overtemp = TRUE;
    945 	error = ixgbe_reset_hw(hw);
    946 	hw->phy.reset_if_overtemp = FALSE;
    947 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    948 		/*
    949 		 * No optics in this port, set up
    950 		 * so the timer routine will probe
    951 		 * for later insertion.
    952 		 */
    953 		adapter->sfp_probe = TRUE;
    954 		error = IXGBE_SUCCESS;
    955 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    956 		aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
    957 		error = EIO;
    958 		goto err_late;
    959 	} else if (error) {
    960 		aprint_error_dev(dev, "Hardware initialization failed\n");
    961 		error = EIO;
    962 		goto err_late;
    963 	}
    964 
    965 	/* Make sure we have a good EEPROM before we read from it */
    966 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
    967 		aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
    968 		error = EIO;
    969 		goto err_late;
    970 	}
    971 
    972 	aprint_normal("%s:", device_xname(dev));
    973 	/* NVM Image Version */
    974 	switch (hw->mac.type) {
    975 	case ixgbe_mac_X540:
    976 	case ixgbe_mac_X550EM_a:
    977 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    978 		if (nvmreg == 0xffff)
    979 			break;
    980 		high = (nvmreg >> 12) & 0x0f;
    981 		low = (nvmreg >> 4) & 0xff;
    982 		id = nvmreg & 0x0f;
    983 		aprint_normal(" NVM Image Version %u.", high);
    984 		if (hw->mac.type == ixgbe_mac_X540)
    985 			str = "%x";
    986 		else
    987 			str = "%02x";
    988 		aprint_normal(str, low);
    989 		aprint_normal(" ID 0x%x,", id);
    990 		break;
    991 	case ixgbe_mac_X550EM_x:
    992 	case ixgbe_mac_X550:
    993 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    994 		if (nvmreg == 0xffff)
    995 			break;
    996 		high = (nvmreg >> 12) & 0x0f;
    997 		low = nvmreg & 0xff;
    998 		aprint_normal(" NVM Image Version %u.%02x,", high, low);
    999 		break;
   1000 	default:
   1001 		break;
   1002 	}
   1003 
   1004 	/* PHY firmware revision */
   1005 	switch (hw->mac.type) {
   1006 	case ixgbe_mac_X540:
   1007 	case ixgbe_mac_X550:
   1008 		hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
   1009 		if (nvmreg == 0xffff)
   1010 			break;
   1011 		high = (nvmreg >> 12) & 0x0f;
   1012 		low = (nvmreg >> 4) & 0xff;
   1013 		id = nvmreg & 0x000f;
   1014 		aprint_normal(" PHY FW Revision %u.", high);
   1015 		if (hw->mac.type == ixgbe_mac_X540)
   1016 			str = "%x";
   1017 		else
   1018 			str = "%02x";
   1019 		aprint_normal(str, low);
   1020 		aprint_normal(" ID 0x%x,", id);
   1021 		break;
   1022 	default:
   1023 		break;
   1024 	}
   1025 
   1026 	/* NVM Map version & OEM NVM Image version */
   1027 	switch (hw->mac.type) {
   1028 	case ixgbe_mac_X550:
   1029 	case ixgbe_mac_X550EM_x:
   1030 	case ixgbe_mac_X550EM_a:
   1031 		hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
   1032 		if (nvmreg != 0xffff) {
   1033 			high = (nvmreg >> 12) & 0x0f;
   1034 			low = nvmreg & 0x00ff;
   1035 			aprint_normal(" NVM Map version %u.%02x,", high, low);
   1036 		}
   1037 		hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
   1038 		if (nvmreg != 0xffff) {
   1039 			high = (nvmreg >> 12) & 0x0f;
   1040 			low = nvmreg & 0x00ff;
   1041 			aprint_verbose(" OEM NVM Image version %u.%02x,", high,
   1042 			    low);
   1043 		}
   1044 		break;
   1045 	default:
   1046 		break;
   1047 	}
   1048 
   1049 	/* Print the ETrackID */
   1050 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
   1051 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
   1052 	aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
   1053 
   1054 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   1055 		error = ixgbe_allocate_msix(adapter, pa);
   1056 		if (error) {
   1057 			/* Free allocated queue structures first */
   1058 			ixgbe_free_transmit_structures(adapter);
   1059 			ixgbe_free_receive_structures(adapter);
   1060 			free(adapter->queues, M_DEVBUF);
   1061 
   1062 			/* Fallback to legacy interrupt */
   1063 			adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
   1064 			if (adapter->feat_cap & IXGBE_FEATURE_MSI)
   1065 				adapter->feat_en |= IXGBE_FEATURE_MSI;
   1066 			adapter->num_queues = 1;
   1067 
   1068 			/* Allocate our TX/RX Queues again */
   1069 			if (ixgbe_allocate_queues(adapter)) {
   1070 				error = ENOMEM;
   1071 				goto err_out;
   1072 			}
   1073 		}
   1074 	}
   1075 	if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
   1076 		error = ixgbe_allocate_legacy(adapter, pa);
   1077 	if (error)
   1078 		goto err_late;
   1079 
   1080 	/* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
   1081 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   1082 	    ixgbe_handle_link, adapter);
   1083 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1084 	    ixgbe_handle_mod, adapter);
   1085 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1086 	    ixgbe_handle_msf, adapter);
   1087 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1088 	    ixgbe_handle_phy, adapter);
   1089 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   1090 		adapter->fdir_si =
   1091 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1092 			ixgbe_reinit_fdir, adapter);
   1093 	if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
   1094 	    || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
   1095 	    || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
   1096 		&& (adapter->fdir_si == NULL))) {
   1097 		aprint_error_dev(dev,
   1098 		    "could not establish software interrupts ()\n");
   1099 		goto err_out;
   1100 	}
   1101 
   1102 	error = ixgbe_start_hw(hw);
   1103 	switch (error) {
   1104 	case IXGBE_ERR_EEPROM_VERSION:
   1105 		aprint_error_dev(dev, "This device is a pre-production adapter/"
   1106 		    "LOM.  Please be aware there may be issues associated "
   1107 		    "with your hardware.\nIf you are experiencing problems "
   1108 		    "please contact your Intel or hardware representative "
   1109 		    "who provided you with this hardware.\n");
   1110 		break;
   1111 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
   1112 		aprint_error_dev(dev, "Unsupported SFP+ Module\n");
   1113 		error = EIO;
   1114 		goto err_late;
   1115 	case IXGBE_ERR_SFP_NOT_PRESENT:
   1116 		aprint_error_dev(dev, "No SFP+ Module found\n");
   1117 		/* falls thru */
   1118 	default:
   1119 		break;
   1120 	}
   1121 
   1122 	/* Setup OS specific network interface */
   1123 	if (ixgbe_setup_interface(dev, adapter) != 0)
   1124 		goto err_late;
   1125 
   1126 	/*
   1127 	 *  Print PHY ID only for copper PHY. On device which has SFP(+) cage
   1128 	 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
   1129 	 */
   1130 	if (hw->phy.media_type == ixgbe_media_type_copper) {
   1131 		uint16_t id1, id2;
   1132 		int oui, model, rev;
   1133 		const char *descr;
   1134 
   1135 		id1 = hw->phy.id >> 16;
   1136 		id2 = hw->phy.id & 0xffff;
   1137 		oui = MII_OUI(id1, id2);
   1138 		model = MII_MODEL(id2);
   1139 		rev = MII_REV(id2);
   1140 		if ((descr = mii_get_descr(oui, model)) != NULL)
   1141 			aprint_normal_dev(dev,
   1142 			    "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
   1143 			    descr, oui, model, rev);
   1144 		else
   1145 			aprint_normal_dev(dev,
   1146 			    "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
   1147 			    oui, model, rev);
   1148 	}
   1149 
   1150 	/* Enable the optics for 82599 SFP+ fiber */
   1151 	ixgbe_enable_tx_laser(hw);
   1152 
   1153 	/* Enable power to the phy. */
   1154 	ixgbe_set_phy_power(hw, TRUE);
   1155 
   1156 	/* Initialize statistics */
   1157 	ixgbe_update_stats_counters(adapter);
   1158 
   1159 	/* Check PCIE slot type/speed/width */
   1160 	ixgbe_get_slot_info(adapter);
   1161 
   1162 	/*
   1163 	 * Do time init and sysctl init here, but
   1164 	 * only on the first port of a bypass adapter.
   1165 	 */
   1166 	ixgbe_bypass_init(adapter);
   1167 
   1168 	/* Set an initial dmac value */
   1169 	adapter->dmac = 0;
   1170 	/* Set initial advertised speeds (if applicable) */
   1171 	adapter->advertise = ixgbe_get_advertise(adapter);
   1172 
   1173 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   1174 		ixgbe_define_iov_schemas(dev, &error);
   1175 
   1176 	/* Add sysctls */
   1177 	ixgbe_add_device_sysctls(adapter);
   1178 	ixgbe_add_hw_stats(adapter);
   1179 
   1180 	/* For Netmap */
   1181 	adapter->init_locked = ixgbe_init_locked;
   1182 	adapter->stop_locked = ixgbe_stop;
   1183 
   1184 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1185 		ixgbe_netmap_attach(adapter);
   1186 
   1187 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
   1188 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
   1189 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
   1190 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
   1191 
   1192 	if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
   1193 		pmf_class_network_register(dev, adapter->ifp);
   1194 	else
   1195 		aprint_error_dev(dev, "couldn't establish power handler\n");
   1196 
   1197 	INIT_DEBUGOUT("ixgbe_attach: end");
   1198 	adapter->osdep.attached = true;
   1199 
   1200 	return;
   1201 
   1202 err_late:
   1203 	ixgbe_free_transmit_structures(adapter);
   1204 	ixgbe_free_receive_structures(adapter);
   1205 	free(adapter->queues, M_DEVBUF);
   1206 err_out:
   1207 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   1208 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   1209 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   1210 	ixgbe_free_softint(adapter);
   1211 	ixgbe_free_pci_resources(adapter);
   1212 	if (adapter->mta != NULL)
   1213 		free(adapter->mta, M_DEVBUF);
   1214 	IXGBE_CORE_LOCK_DESTROY(adapter);
   1215 
   1216 	return;
   1217 } /* ixgbe_attach */
   1218 
   1219 /************************************************************************
   1220  * ixgbe_check_wol_support
   1221  *
   1222  *   Checks whether the adapter's ports are capable of
   1223  *   Wake On LAN by reading the adapter's NVM.
   1224  *
   1225  *   Sets each port's hw->wol_enabled value depending
   1226  *   on the value read here.
   1227  ************************************************************************/
   1228 static void
   1229 ixgbe_check_wol_support(struct adapter *adapter)
   1230 {
   1231 	struct ixgbe_hw *hw = &adapter->hw;
   1232 	u16             dev_caps = 0;
   1233 
   1234 	/* Find out WoL support for port */
   1235 	adapter->wol_support = hw->wol_enabled = 0;
   1236 	ixgbe_get_device_caps(hw, &dev_caps);
   1237 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
   1238 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
   1239 	     hw->bus.func == 0))
   1240 		adapter->wol_support = hw->wol_enabled = 1;
   1241 
   1242 	/* Save initial wake up filter configuration */
   1243 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
   1244 
   1245 	return;
   1246 } /* ixgbe_check_wol_support */
   1247 
   1248 /************************************************************************
   1249  * ixgbe_setup_interface
   1250  *
   1251  *   Setup networking device structure and register an interface.
   1252  ************************************************************************/
   1253 static int
   1254 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   1255 {
   1256 	struct ethercom *ec = &adapter->osdep.ec;
   1257 	struct ifnet   *ifp;
   1258 	int rv;
   1259 
   1260 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   1261 
   1262 	ifp = adapter->ifp = &ec->ec_if;
   1263 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1264 	ifp->if_baudrate = IF_Gbps(10);
   1265 	ifp->if_init = ixgbe_init;
   1266 	ifp->if_stop = ixgbe_ifstop;
   1267 	ifp->if_softc = adapter;
   1268 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1269 #ifdef IXGBE_MPSAFE
   1270 	ifp->if_extflags = IFEF_MPSAFE;
   1271 #endif
   1272 	ifp->if_ioctl = ixgbe_ioctl;
   1273 #if __FreeBSD_version >= 1100045
   1274 	/* TSO parameters */
   1275 	ifp->if_hw_tsomax = 65518;
   1276 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
   1277 	ifp->if_hw_tsomaxsegsize = 2048;
   1278 #endif
   1279 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1280 #if 0
   1281 		ixgbe_start_locked = ixgbe_legacy_start_locked;
   1282 #endif
   1283 	} else {
   1284 		ifp->if_transmit = ixgbe_mq_start;
   1285 #if 0
   1286 		ixgbe_start_locked = ixgbe_mq_start_locked;
   1287 #endif
   1288 	}
   1289 	ifp->if_start = ixgbe_legacy_start;
   1290 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1291 	IFQ_SET_READY(&ifp->if_snd);
   1292 
   1293 	rv = if_initialize(ifp);
   1294 	if (rv != 0) {
   1295 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1296 		return rv;
   1297 	}
   1298 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1299 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1300 	/*
   1301 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1302 	 * used.
   1303 	 */
   1304 	if_register(ifp);
   1305 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   1306 
   1307 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1308 
   1309 	/*
   1310 	 * Tell the upper layer(s) we support long frames.
   1311 	 */
   1312 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1313 
   1314 	/* Set capability flags */
   1315 	ifp->if_capabilities |= IFCAP_RXCSUM
   1316 			     |  IFCAP_TXCSUM
   1317 			     |  IFCAP_TSOv4
   1318 			     |  IFCAP_TSOv6
   1319 			     |  IFCAP_LRO;
   1320 	ifp->if_capenable = 0;
   1321 
   1322 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1323 	    		    |  ETHERCAP_VLAN_HWCSUM
   1324 	    		    |  ETHERCAP_JUMBO_MTU
   1325 	    		    |  ETHERCAP_VLAN_MTU;
   1326 
   1327 	/* Enable the above capabilities by default */
   1328 	ec->ec_capenable = ec->ec_capabilities;
   1329 
   1330 	/*
   1331 	 * Don't turn this on by default, if vlans are
   1332 	 * created on another pseudo device (eg. lagg)
   1333 	 * then vlan events are not passed thru, breaking
   1334 	 * operation, but with HW FILTER off it works. If
   1335 	 * using vlans directly on the ixgbe driver you can
   1336 	 * enable this and get full hardware tag filtering.
   1337 	 */
   1338 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1339 
   1340 	/*
   1341 	 * Specify the media types supported by this adapter and register
   1342 	 * callbacks to update media and link information
   1343 	 */
   1344 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   1345 	    ixgbe_media_status);
   1346 
   1347 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
   1348 	ixgbe_add_media_types(adapter);
   1349 
   1350 	/* Set autoselect media by default */
   1351 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1352 
   1353 	return (0);
   1354 } /* ixgbe_setup_interface */
   1355 
   1356 /************************************************************************
   1357  * ixgbe_add_media_types
   1358  ************************************************************************/
   1359 static void
   1360 ixgbe_add_media_types(struct adapter *adapter)
   1361 {
   1362 	struct ixgbe_hw *hw = &adapter->hw;
   1363 	device_t        dev = adapter->dev;
   1364 	u64             layer;
   1365 
   1366 	layer = adapter->phy_layer;
   1367 
   1368 #define	ADD(mm, dd)							\
   1369 	ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
   1370 
   1371 	/* Media types with matching NetBSD media defines */
   1372 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
   1373 		ADD(IFM_10G_T | IFM_FDX, 0);
   1374 	}
   1375 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
   1376 		ADD(IFM_1000_T | IFM_FDX, 0);
   1377 	}
   1378 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
   1379 		ADD(IFM_100_TX | IFM_FDX, 0);
   1380 	}
   1381 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
   1382 		ADD(IFM_10_T | IFM_FDX, 0);
   1383 	}
   1384 
   1385 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   1386 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
   1387 		ADD(IFM_10G_TWINAX | IFM_FDX, 0);
   1388 	}
   1389 
   1390 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
   1391 		ADD(IFM_10G_LR | IFM_FDX, 0);
   1392 		if (hw->phy.multispeed_fiber) {
   1393 			ADD(IFM_1000_LX | IFM_FDX, 0);
   1394 		}
   1395 	}
   1396 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
   1397 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1398 		if (hw->phy.multispeed_fiber) {
   1399 			ADD(IFM_1000_SX | IFM_FDX, 0);
   1400 		}
   1401 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
   1402 		ADD(IFM_1000_SX | IFM_FDX, 0);
   1403 	}
   1404 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
   1405 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1406 	}
   1407 
   1408 #ifdef IFM_ETH_XTYPE
   1409 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1410 		ADD(IFM_10G_KR | IFM_FDX, 0);
   1411 	}
   1412 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1413 		ADD(AIFM_10G_KX4 | IFM_FDX, 0);
   1414 	}
   1415 #else
   1416 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1417 		device_printf(dev, "Media supported: 10GbaseKR\n");
   1418 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
   1419 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1420 	}
   1421 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1422 		device_printf(dev, "Media supported: 10GbaseKX4\n");
   1423 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
   1424 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1425 	}
   1426 #endif
   1427 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
   1428 		ADD(IFM_1000_KX | IFM_FDX, 0);
   1429 	}
   1430 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
   1431 		ADD(IFM_2500_KX | IFM_FDX, 0);
   1432 	}
   1433 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
   1434 		ADD(IFM_2500_T | IFM_FDX, 0);
   1435 	}
   1436 	if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
   1437 		ADD(IFM_5000_T | IFM_FDX, 0);
   1438 	}
   1439 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
   1440 		device_printf(dev, "Media supported: 1000baseBX\n");
   1441 	/* XXX no ifmedia_set? */
   1442 
   1443 	ADD(IFM_AUTO, 0);
   1444 
   1445 #undef ADD
   1446 } /* ixgbe_add_media_types */
   1447 
   1448 /************************************************************************
   1449  * ixgbe_is_sfp
   1450  ************************************************************************/
   1451 static inline bool
   1452 ixgbe_is_sfp(struct ixgbe_hw *hw)
   1453 {
   1454 	switch (hw->mac.type) {
   1455 	case ixgbe_mac_82598EB:
   1456 		if (hw->phy.type == ixgbe_phy_nl)
   1457 			return TRUE;
   1458 		return FALSE;
   1459 	case ixgbe_mac_82599EB:
   1460 		switch (hw->mac.ops.get_media_type(hw)) {
   1461 		case ixgbe_media_type_fiber:
   1462 		case ixgbe_media_type_fiber_qsfp:
   1463 			return TRUE;
   1464 		default:
   1465 			return FALSE;
   1466 		}
   1467 	case ixgbe_mac_X550EM_x:
   1468 	case ixgbe_mac_X550EM_a:
   1469 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
   1470 			return TRUE;
   1471 		return FALSE;
   1472 	default:
   1473 		return FALSE;
   1474 	}
   1475 } /* ixgbe_is_sfp */
   1476 
   1477 /************************************************************************
   1478  * ixgbe_config_link
   1479  ************************************************************************/
   1480 static void
   1481 ixgbe_config_link(struct adapter *adapter)
   1482 {
   1483 	struct ixgbe_hw *hw = &adapter->hw;
   1484 	u32             autoneg, err = 0;
   1485 	bool            sfp, negotiate = false;
   1486 
   1487 	sfp = ixgbe_is_sfp(hw);
   1488 
   1489 	if (sfp) {
   1490 		if (hw->phy.multispeed_fiber) {
   1491 			hw->mac.ops.setup_sfp(hw);
   1492 			ixgbe_enable_tx_laser(hw);
   1493 			kpreempt_disable();
   1494 			softint_schedule(adapter->msf_si);
   1495 			kpreempt_enable();
   1496 		} else {
   1497 			kpreempt_disable();
   1498 			softint_schedule(adapter->mod_si);
   1499 			kpreempt_enable();
   1500 		}
   1501 	} else {
   1502 		if (hw->mac.ops.check_link)
   1503 			err = ixgbe_check_link(hw, &adapter->link_speed,
   1504 			    &adapter->link_up, FALSE);
   1505 		if (err)
   1506 			goto out;
   1507 		autoneg = hw->phy.autoneg_advertised;
   1508 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   1509                 	err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
   1510 			    &negotiate);
   1511 		if (err)
   1512 			goto out;
   1513 		if (hw->mac.ops.setup_link)
   1514                 	err = hw->mac.ops.setup_link(hw, autoneg,
   1515 			    adapter->link_up);
   1516 	}
   1517 out:
   1518 
   1519 	return;
   1520 } /* ixgbe_config_link */
   1521 
   1522 /************************************************************************
   1523  * ixgbe_update_stats_counters - Update board statistics counters.
   1524  ************************************************************************/
   1525 static void
   1526 ixgbe_update_stats_counters(struct adapter *adapter)
   1527 {
   1528 	struct ifnet          *ifp = adapter->ifp;
   1529 	struct ixgbe_hw       *hw = &adapter->hw;
   1530 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1531 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
   1532 	u64                   total_missed_rx = 0;
   1533 	uint64_t              crcerrs, rlec;
   1534 
   1535 	crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   1536 	stats->crcerrs.ev_count += crcerrs;
   1537 	stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   1538 	stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   1539 	stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   1540 	if (hw->mac.type == ixgbe_mac_X550)
   1541 		stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
   1542 
   1543 	for (int i = 0; i < __arraycount(stats->qprc); i++) {
   1544 		int j = i % adapter->num_queues;
   1545 		stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   1546 		stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   1547 		stats->qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   1548 	}
   1549 	for (int i = 0; i < __arraycount(stats->mpc); i++) {
   1550 		uint32_t mp;
   1551 		int j = i % adapter->num_queues;
   1552 
   1553 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   1554 		/* global total per queue */
   1555 		stats->mpc[j].ev_count += mp;
   1556 		/* running comprehensive total for stats display */
   1557 		total_missed_rx += mp;
   1558 
   1559 		if (hw->mac.type == ixgbe_mac_82598EB)
   1560 			stats->rnbc[j].ev_count
   1561 			    += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   1562 
   1563 	}
   1564 	stats->mpctotal.ev_count += total_missed_rx;
   1565 
   1566 	/* Document says M[LR]FC are valid when link is up and 10Gbps */
   1567 	if ((adapter->link_active == TRUE)
   1568 	    && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
   1569 		stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   1570 		stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   1571 	}
   1572 	rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
   1573 	stats->rlec.ev_count += rlec;
   1574 
   1575 	/* Hardware workaround, gprc counts missed packets */
   1576 	stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   1577 
   1578 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   1579 	stats->lxontxc.ev_count += lxon;
   1580 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   1581 	stats->lxofftxc.ev_count += lxoff;
   1582 	total = lxon + lxoff;
   1583 
   1584 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1585 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   1586 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   1587 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   1588 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   1589 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   1590 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   1591 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   1592 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   1593 	} else {
   1594 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   1595 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   1596 		/* 82598 only has a counter in the high register */
   1597 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   1598 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   1599 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   1600 	}
   1601 
   1602 	/*
   1603 	 * Workaround: mprc hardware is incorrectly counting
   1604 	 * broadcasts, so for now we subtract those.
   1605 	 */
   1606 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   1607 	stats->bprc.ev_count += bprc;
   1608 	stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
   1609 	    - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   1610 
   1611 	stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   1612 	stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   1613 	stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   1614 	stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   1615 	stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   1616 	stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   1617 
   1618 	stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   1619 	stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   1620 	stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   1621 
   1622 	stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   1623 	stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   1624 	stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   1625 	stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   1626 	stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   1627 	stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   1628 	stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   1629 	stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   1630 	stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   1631 	stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   1632 	stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   1633 	stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   1634 	stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   1635 	stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   1636 	stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   1637 	stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   1638 	stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   1639 	stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   1640 	/* Only read FCOE on 82599 */
   1641 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1642 		stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   1643 		stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   1644 		stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   1645 		stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   1646 		stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   1647 	}
   1648 
   1649 	/* Fill out the OS statistics structure */
   1650 	/*
   1651 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   1652 	 * adapter->stats counters. It's required to make ifconfig -z
   1653 	 * (SOICZIFDATA) work.
   1654 	 */
   1655 	ifp->if_collisions = 0;
   1656 
   1657 	/* Rx Errors */
   1658 	ifp->if_iqdrops += total_missed_rx;
   1659 	ifp->if_ierrors += crcerrs + rlec;
   1660 } /* ixgbe_update_stats_counters */
   1661 
   1662 /************************************************************************
   1663  * ixgbe_add_hw_stats
   1664  *
   1665  *   Add sysctl variables, one per statistic, to the system.
   1666  ************************************************************************/
   1667 static void
   1668 ixgbe_add_hw_stats(struct adapter *adapter)
   1669 {
   1670 	device_t dev = adapter->dev;
   1671 	const struct sysctlnode *rnode, *cnode;
   1672 	struct sysctllog **log = &adapter->sysctllog;
   1673 	struct tx_ring *txr = adapter->tx_rings;
   1674 	struct rx_ring *rxr = adapter->rx_rings;
   1675 	struct ixgbe_hw *hw = &adapter->hw;
   1676 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1677 	const char *xname = device_xname(dev);
   1678 
   1679 	/* Driver Statistics */
   1680 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   1681 	    NULL, xname, "Handled queue in softint");
   1682 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   1683 	    NULL, xname, "Requeued in softint");
   1684 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   1685 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   1686 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   1687 	    NULL, xname, "m_defrag() failed");
   1688 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   1689 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   1690 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   1691 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   1692 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   1693 	    NULL, xname, "Driver tx dma hard fail other");
   1694 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   1695 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   1696 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   1697 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   1698 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   1699 	    NULL, xname, "Watchdog timeouts");
   1700 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   1701 	    NULL, xname, "TSO errors");
   1702 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   1703 	    NULL, xname, "Link MSI-X IRQ Handled");
   1704 
   1705 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1706 		snprintf(adapter->queues[i].evnamebuf,
   1707 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   1708 		    xname, i);
   1709 		snprintf(adapter->queues[i].namebuf,
   1710 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   1711 
   1712 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   1713 			aprint_error_dev(dev, "could not create sysctl root\n");
   1714 			break;
   1715 		}
   1716 
   1717 		if (sysctl_createv(log, 0, &rnode, &rnode,
   1718 		    0, CTLTYPE_NODE,
   1719 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   1720 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   1721 			break;
   1722 
   1723 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1724 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   1725 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   1726 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   1727 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   1728 			break;
   1729 
   1730 #if 0 /* XXX msaitoh */
   1731 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1732 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   1733 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   1734 			NULL, 0, &(adapter->queues[i].irqs),
   1735 		    0, CTL_CREATE, CTL_EOL) != 0)
   1736 			break;
   1737 #endif
   1738 
   1739 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1740 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1741 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   1742 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   1743 		    0, CTL_CREATE, CTL_EOL) != 0)
   1744 			break;
   1745 
   1746 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1747 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1748 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   1749 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   1750 		    0, CTL_CREATE, CTL_EOL) != 0)
   1751 			break;
   1752 
   1753 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   1754 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   1755 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   1756 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   1757 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   1758 		    NULL, adapter->queues[i].evnamebuf,
   1759 		    "Queue No Descriptor Available");
   1760 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   1761 		    NULL, adapter->queues[i].evnamebuf,
   1762 		    "Queue Packets Transmitted");
   1763 #ifndef IXGBE_LEGACY_TX
   1764 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   1765 		    NULL, adapter->queues[i].evnamebuf,
   1766 		    "Packets dropped in pcq");
   1767 #endif
   1768 
   1769 #ifdef LRO
   1770 		struct lro_ctrl *lro = &rxr->lro;
   1771 #endif /* LRO */
   1772 
   1773 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1774 		    CTLFLAG_READONLY,
   1775 		    CTLTYPE_INT,
   1776 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   1777 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   1778 		    CTL_CREATE, CTL_EOL) != 0)
   1779 			break;
   1780 
   1781 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1782 		    CTLFLAG_READONLY,
   1783 		    CTLTYPE_INT,
   1784 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   1785 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   1786 		    CTL_CREATE, CTL_EOL) != 0)
   1787 			break;
   1788 
   1789 		if (i < __arraycount(stats->mpc)) {
   1790 			evcnt_attach_dynamic(&stats->mpc[i],
   1791 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1792 			    "RX Missed Packet Count");
   1793 			if (hw->mac.type == ixgbe_mac_82598EB)
   1794 				evcnt_attach_dynamic(&stats->rnbc[i],
   1795 				    EVCNT_TYPE_MISC, NULL,
   1796 				    adapter->queues[i].evnamebuf,
   1797 				    "Receive No Buffers");
   1798 		}
   1799 		if (i < __arraycount(stats->pxontxc)) {
   1800 			evcnt_attach_dynamic(&stats->pxontxc[i],
   1801 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1802 			    "pxontxc");
   1803 			evcnt_attach_dynamic(&stats->pxonrxc[i],
   1804 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1805 			    "pxonrxc");
   1806 			evcnt_attach_dynamic(&stats->pxofftxc[i],
   1807 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1808 			    "pxofftxc");
   1809 			evcnt_attach_dynamic(&stats->pxoffrxc[i],
   1810 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1811 			    "pxoffrxc");
   1812 			evcnt_attach_dynamic(&stats->pxon2offc[i],
   1813 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1814 			    "pxon2offc");
   1815 		}
   1816 		if (i < __arraycount(stats->qprc)) {
   1817 			evcnt_attach_dynamic(&stats->qprc[i],
   1818 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1819 			    "qprc");
   1820 			evcnt_attach_dynamic(&stats->qptc[i],
   1821 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1822 			    "qptc");
   1823 			evcnt_attach_dynamic(&stats->qbrc[i],
   1824 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1825 			    "qbrc");
   1826 			evcnt_attach_dynamic(&stats->qbtc[i],
   1827 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1828 			    "qbtc");
   1829 			evcnt_attach_dynamic(&stats->qprdc[i],
   1830 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1831 			    "qprdc");
   1832 		}
   1833 
   1834 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   1835 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   1836 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   1837 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   1838 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   1839 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   1840 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   1841 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   1842 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   1843 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   1844 #ifdef LRO
   1845 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   1846 				CTLFLAG_RD, &lro->lro_queued, 0,
   1847 				"LRO Queued");
   1848 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   1849 				CTLFLAG_RD, &lro->lro_flushed, 0,
   1850 				"LRO Flushed");
   1851 #endif /* LRO */
   1852 	}
   1853 
   1854 	/* MAC stats get their own sub node */
   1855 
   1856 	snprintf(stats->namebuf,
   1857 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   1858 
   1859 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   1860 	    stats->namebuf, "rx csum offload - IP");
   1861 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   1862 	    stats->namebuf, "rx csum offload - L4");
   1863 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   1864 	    stats->namebuf, "rx csum offload - IP bad");
   1865 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   1866 	    stats->namebuf, "rx csum offload - L4 bad");
   1867 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   1868 	    stats->namebuf, "Interrupt conditions zero");
   1869 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   1870 	    stats->namebuf, "Legacy interrupts");
   1871 
   1872 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   1873 	    stats->namebuf, "CRC Errors");
   1874 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   1875 	    stats->namebuf, "Illegal Byte Errors");
   1876 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   1877 	    stats->namebuf, "Byte Errors");
   1878 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   1879 	    stats->namebuf, "MAC Short Packets Discarded");
   1880 	if (hw->mac.type >= ixgbe_mac_X550)
   1881 		evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
   1882 		    stats->namebuf, "Bad SFD");
   1883 	evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
   1884 	    stats->namebuf, "Total Packets Missed");
   1885 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   1886 	    stats->namebuf, "MAC Local Faults");
   1887 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   1888 	    stats->namebuf, "MAC Remote Faults");
   1889 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   1890 	    stats->namebuf, "Receive Length Errors");
   1891 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   1892 	    stats->namebuf, "Link XON Transmitted");
   1893 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   1894 	    stats->namebuf, "Link XON Received");
   1895 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   1896 	    stats->namebuf, "Link XOFF Transmitted");
   1897 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   1898 	    stats->namebuf, "Link XOFF Received");
   1899 
   1900 	/* Packet Reception Stats */
   1901 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   1902 	    stats->namebuf, "Total Octets Received");
   1903 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   1904 	    stats->namebuf, "Good Octets Received");
   1905 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   1906 	    stats->namebuf, "Total Packets Received");
   1907 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   1908 	    stats->namebuf, "Good Packets Received");
   1909 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   1910 	    stats->namebuf, "Multicast Packets Received");
   1911 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   1912 	    stats->namebuf, "Broadcast Packets Received");
   1913 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   1914 	    stats->namebuf, "64 byte frames received ");
   1915 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   1916 	    stats->namebuf, "65-127 byte frames received");
   1917 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   1918 	    stats->namebuf, "128-255 byte frames received");
   1919 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   1920 	    stats->namebuf, "256-511 byte frames received");
   1921 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   1922 	    stats->namebuf, "512-1023 byte frames received");
   1923 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   1924 	    stats->namebuf, "1023-1522 byte frames received");
   1925 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   1926 	    stats->namebuf, "Receive Undersized");
   1927 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   1928 	    stats->namebuf, "Fragmented Packets Received ");
   1929 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   1930 	    stats->namebuf, "Oversized Packets Received");
   1931 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   1932 	    stats->namebuf, "Received Jabber");
   1933 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   1934 	    stats->namebuf, "Management Packets Received");
   1935 	evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
   1936 	    stats->namebuf, "Management Packets Dropped");
   1937 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   1938 	    stats->namebuf, "Checksum Errors");
   1939 
   1940 	/* Packet Transmission Stats */
   1941 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   1942 	    stats->namebuf, "Good Octets Transmitted");
   1943 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   1944 	    stats->namebuf, "Total Packets Transmitted");
   1945 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   1946 	    stats->namebuf, "Good Packets Transmitted");
   1947 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   1948 	    stats->namebuf, "Broadcast Packets Transmitted");
   1949 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   1950 	    stats->namebuf, "Multicast Packets Transmitted");
   1951 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   1952 	    stats->namebuf, "Management Packets Transmitted");
   1953 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   1954 	    stats->namebuf, "64 byte frames transmitted ");
   1955 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   1956 	    stats->namebuf, "65-127 byte frames transmitted");
   1957 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   1958 	    stats->namebuf, "128-255 byte frames transmitted");
   1959 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   1960 	    stats->namebuf, "256-511 byte frames transmitted");
   1961 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   1962 	    stats->namebuf, "512-1023 byte frames transmitted");
   1963 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   1964 	    stats->namebuf, "1024-1522 byte frames transmitted");
   1965 } /* ixgbe_add_hw_stats */
   1966 
   1967 static void
   1968 ixgbe_clear_evcnt(struct adapter *adapter)
   1969 {
   1970 	struct tx_ring *txr = adapter->tx_rings;
   1971 	struct rx_ring *rxr = adapter->rx_rings;
   1972 	struct ixgbe_hw *hw = &adapter->hw;
   1973 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1974 
   1975 	adapter->handleq.ev_count = 0;
   1976 	adapter->req.ev_count = 0;
   1977 	adapter->efbig_tx_dma_setup.ev_count = 0;
   1978 	adapter->mbuf_defrag_failed.ev_count = 0;
   1979 	adapter->efbig2_tx_dma_setup.ev_count = 0;
   1980 	adapter->einval_tx_dma_setup.ev_count = 0;
   1981 	adapter->other_tx_dma_setup.ev_count = 0;
   1982 	adapter->eagain_tx_dma_setup.ev_count = 0;
   1983 	adapter->enomem_tx_dma_setup.ev_count = 0;
   1984 	adapter->watchdog_events.ev_count = 0;
   1985 	adapter->tso_err.ev_count = 0;
   1986 	adapter->link_irq.ev_count = 0;
   1987 
   1988 	txr = adapter->tx_rings;
   1989 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1990 		adapter->queues[i].irqs.ev_count = 0;
   1991 		txr->no_desc_avail.ev_count = 0;
   1992 		txr->total_packets.ev_count = 0;
   1993 		txr->tso_tx.ev_count = 0;
   1994 #ifndef IXGBE_LEGACY_TX
   1995 		txr->pcq_drops.ev_count = 0;
   1996 #endif
   1997 
   1998 		if (i < __arraycount(stats->mpc)) {
   1999 			stats->mpc[i].ev_count = 0;
   2000 			if (hw->mac.type == ixgbe_mac_82598EB)
   2001 				stats->rnbc[i].ev_count = 0;
   2002 		}
   2003 		if (i < __arraycount(stats->pxontxc)) {
   2004 			stats->pxontxc[i].ev_count = 0;
   2005 			stats->pxonrxc[i].ev_count = 0;
   2006 			stats->pxofftxc[i].ev_count = 0;
   2007 			stats->pxoffrxc[i].ev_count = 0;
   2008 			stats->pxon2offc[i].ev_count = 0;
   2009 		}
   2010 		if (i < __arraycount(stats->qprc)) {
   2011 			stats->qprc[i].ev_count = 0;
   2012 			stats->qptc[i].ev_count = 0;
   2013 			stats->qbrc[i].ev_count = 0;
   2014 			stats->qbtc[i].ev_count = 0;
   2015 			stats->qprdc[i].ev_count = 0;
   2016 		}
   2017 
   2018 		rxr->rx_packets.ev_count = 0;
   2019 		rxr->rx_bytes.ev_count = 0;
   2020 		rxr->rx_copies.ev_count = 0;
   2021 		rxr->no_jmbuf.ev_count = 0;
   2022 		rxr->rx_discarded.ev_count = 0;
   2023 	}
   2024 	stats->ipcs.ev_count = 0;
   2025 	stats->l4cs.ev_count = 0;
   2026 	stats->ipcs_bad.ev_count = 0;
   2027 	stats->l4cs_bad.ev_count = 0;
   2028 	stats->intzero.ev_count = 0;
   2029 	stats->legint.ev_count = 0;
   2030 	stats->crcerrs.ev_count = 0;
   2031 	stats->illerrc.ev_count = 0;
   2032 	stats->errbc.ev_count = 0;
   2033 	stats->mspdc.ev_count = 0;
   2034 	stats->mbsdc.ev_count = 0;
   2035 	stats->mpctotal.ev_count = 0;
   2036 	stats->mlfc.ev_count = 0;
   2037 	stats->mrfc.ev_count = 0;
   2038 	stats->rlec.ev_count = 0;
   2039 	stats->lxontxc.ev_count = 0;
   2040 	stats->lxonrxc.ev_count = 0;
   2041 	stats->lxofftxc.ev_count = 0;
   2042 	stats->lxoffrxc.ev_count = 0;
   2043 
   2044 	/* Packet Reception Stats */
   2045 	stats->tor.ev_count = 0;
   2046 	stats->gorc.ev_count = 0;
   2047 	stats->tpr.ev_count = 0;
   2048 	stats->gprc.ev_count = 0;
   2049 	stats->mprc.ev_count = 0;
   2050 	stats->bprc.ev_count = 0;
   2051 	stats->prc64.ev_count = 0;
   2052 	stats->prc127.ev_count = 0;
   2053 	stats->prc255.ev_count = 0;
   2054 	stats->prc511.ev_count = 0;
   2055 	stats->prc1023.ev_count = 0;
   2056 	stats->prc1522.ev_count = 0;
   2057 	stats->ruc.ev_count = 0;
   2058 	stats->rfc.ev_count = 0;
   2059 	stats->roc.ev_count = 0;
   2060 	stats->rjc.ev_count = 0;
   2061 	stats->mngprc.ev_count = 0;
   2062 	stats->mngpdc.ev_count = 0;
   2063 	stats->xec.ev_count = 0;
   2064 
   2065 	/* Packet Transmission Stats */
   2066 	stats->gotc.ev_count = 0;
   2067 	stats->tpt.ev_count = 0;
   2068 	stats->gptc.ev_count = 0;
   2069 	stats->bptc.ev_count = 0;
   2070 	stats->mptc.ev_count = 0;
   2071 	stats->mngptc.ev_count = 0;
   2072 	stats->ptc64.ev_count = 0;
   2073 	stats->ptc127.ev_count = 0;
   2074 	stats->ptc255.ev_count = 0;
   2075 	stats->ptc511.ev_count = 0;
   2076 	stats->ptc1023.ev_count = 0;
   2077 	stats->ptc1522.ev_count = 0;
   2078 }
   2079 
   2080 /************************************************************************
   2081  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
   2082  *
   2083  *   Retrieves the TDH value from the hardware
   2084  ************************************************************************/
   2085 static int
   2086 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   2087 {
   2088 	struct sysctlnode node = *rnode;
   2089 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2090 	uint32_t val;
   2091 
   2092 	if (!txr)
   2093 		return (0);
   2094 
   2095 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
   2096 	node.sysctl_data = &val;
   2097 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2098 } /* ixgbe_sysctl_tdh_handler */
   2099 
   2100 /************************************************************************
   2101  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   2102  *
   2103  *   Retrieves the TDT value from the hardware
   2104  ************************************************************************/
   2105 static int
   2106 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   2107 {
   2108 	struct sysctlnode node = *rnode;
   2109 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2110 	uint32_t val;
   2111 
   2112 	if (!txr)
   2113 		return (0);
   2114 
   2115 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
   2116 	node.sysctl_data = &val;
   2117 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2118 } /* ixgbe_sysctl_tdt_handler */
   2119 
   2120 /************************************************************************
   2121  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
   2122  *
   2123  *   Retrieves the RDH value from the hardware
   2124  ************************************************************************/
   2125 static int
   2126 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2127 {
   2128 	struct sysctlnode node = *rnode;
   2129 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2130 	uint32_t val;
   2131 
   2132 	if (!rxr)
   2133 		return (0);
   2134 
   2135 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
   2136 	node.sysctl_data = &val;
   2137 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2138 } /* ixgbe_sysctl_rdh_handler */
   2139 
   2140 /************************************************************************
   2141  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2142  *
   2143  *   Retrieves the RDT value from the hardware
   2144  ************************************************************************/
   2145 static int
   2146 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2147 {
   2148 	struct sysctlnode node = *rnode;
   2149 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2150 	uint32_t val;
   2151 
   2152 	if (!rxr)
   2153 		return (0);
   2154 
   2155 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
   2156 	node.sysctl_data = &val;
   2157 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2158 } /* ixgbe_sysctl_rdt_handler */
   2159 
   2160 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   2161 /************************************************************************
   2162  * ixgbe_register_vlan
   2163  *
   2164  *   Run via vlan config EVENT, it enables us to use the
   2165  *   HW Filter table since we can get the vlan id. This
   2166  *   just creates the entry in the soft version of the
   2167  *   VFTA, init will repopulate the real table.
   2168  ************************************************************************/
   2169 static void
   2170 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2171 {
   2172 	struct adapter	*adapter = ifp->if_softc;
   2173 	u16		index, bit;
   2174 
   2175 	if (ifp->if_softc != arg)   /* Not our event */
   2176 		return;
   2177 
   2178 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2179 		return;
   2180 
   2181 	IXGBE_CORE_LOCK(adapter);
   2182 	index = (vtag >> 5) & 0x7F;
   2183 	bit = vtag & 0x1F;
   2184 	adapter->shadow_vfta[index] |= (1 << bit);
   2185 	ixgbe_setup_vlan_hw_support(adapter);
   2186 	IXGBE_CORE_UNLOCK(adapter);
   2187 } /* ixgbe_register_vlan */
   2188 
   2189 /************************************************************************
   2190  * ixgbe_unregister_vlan
   2191  *
   2192  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
   2193  ************************************************************************/
   2194 static void
   2195 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2196 {
   2197 	struct adapter	*adapter = ifp->if_softc;
   2198 	u16		index, bit;
   2199 
   2200 	if (ifp->if_softc != arg)
   2201 		return;
   2202 
   2203 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2204 		return;
   2205 
   2206 	IXGBE_CORE_LOCK(adapter);
   2207 	index = (vtag >> 5) & 0x7F;
   2208 	bit = vtag & 0x1F;
   2209 	adapter->shadow_vfta[index] &= ~(1 << bit);
   2210 	/* Re-init to load the changes */
   2211 	ixgbe_setup_vlan_hw_support(adapter);
   2212 	IXGBE_CORE_UNLOCK(adapter);
   2213 } /* ixgbe_unregister_vlan */
   2214 #endif
   2215 
   2216 static void
   2217 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   2218 {
   2219 	struct ethercom *ec = &adapter->osdep.ec;
   2220 	struct ixgbe_hw *hw = &adapter->hw;
   2221 	struct rx_ring	*rxr;
   2222 	int             i;
   2223 	u32		ctrl;
   2224 
   2225 
   2226 	/*
   2227 	 * We get here thru init_locked, meaning
   2228 	 * a soft reset, this has already cleared
   2229 	 * the VFTA and other state, so if there
   2230 	 * have been no vlan's registered do nothing.
   2231 	 */
   2232 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   2233 		return;
   2234 
   2235 	/* Setup the queues for vlans */
   2236 	if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
   2237 		for (i = 0; i < adapter->num_queues; i++) {
   2238 			rxr = &adapter->rx_rings[i];
   2239 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
   2240 			if (hw->mac.type != ixgbe_mac_82598EB) {
   2241 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   2242 				ctrl |= IXGBE_RXDCTL_VME;
   2243 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
   2244 			}
   2245 			rxr->vtag_strip = TRUE;
   2246 		}
   2247 	}
   2248 
   2249 	if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
   2250 		return;
   2251 	/*
   2252 	 * A soft reset zero's out the VFTA, so
   2253 	 * we need to repopulate it now.
   2254 	 */
   2255 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2256 		if (adapter->shadow_vfta[i] != 0)
   2257 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
   2258 			    adapter->shadow_vfta[i]);
   2259 
   2260 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2261 	/* Enable the Filter Table if enabled */
   2262 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
   2263 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
   2264 		ctrl |= IXGBE_VLNCTRL_VFE;
   2265 	}
   2266 	if (hw->mac.type == ixgbe_mac_82598EB)
   2267 		ctrl |= IXGBE_VLNCTRL_VME;
   2268 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2269 } /* ixgbe_setup_vlan_hw_support */
   2270 
   2271 /************************************************************************
   2272  * ixgbe_get_slot_info
   2273  *
   2274  *   Get the width and transaction speed of
   2275  *   the slot this adapter is plugged into.
   2276  ************************************************************************/
   2277 static void
   2278 ixgbe_get_slot_info(struct adapter *adapter)
   2279 {
   2280 	device_t		dev = adapter->dev;
   2281 	struct ixgbe_hw		*hw = &adapter->hw;
   2282 	u32                   offset;
   2283 //	struct ixgbe_mac_info	*mac = &hw->mac;
   2284 	u16			link;
   2285 	int                   bus_info_valid = TRUE;
   2286 
   2287 	/* Some devices are behind an internal bridge */
   2288 	switch (hw->device_id) {
   2289 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
   2290 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
   2291 		goto get_parent_info;
   2292 	default:
   2293 		break;
   2294 	}
   2295 
   2296 	ixgbe_get_bus_info(hw);
   2297 
   2298 	/*
   2299 	 * Some devices don't use PCI-E, but there is no need
   2300 	 * to display "Unknown" for bus speed and width.
   2301 	 */
   2302 	switch (hw->mac.type) {
   2303 	case ixgbe_mac_X550EM_x:
   2304 	case ixgbe_mac_X550EM_a:
   2305 		return;
   2306 	default:
   2307 		goto display;
   2308 	}
   2309 
   2310 get_parent_info:
   2311 	/*
   2312 	 * For the Quad port adapter we need to parse back
   2313 	 * up the PCI tree to find the speed of the expansion
   2314 	 * slot into which this adapter is plugged. A bit more work.
   2315 	 */
   2316 	dev = device_parent(device_parent(dev));
   2317 #if 0
   2318 #ifdef IXGBE_DEBUG
   2319 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
   2320 	    pci_get_slot(dev), pci_get_function(dev));
   2321 #endif
   2322 	dev = device_parent(device_parent(dev));
   2323 #ifdef IXGBE_DEBUG
   2324 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
   2325 	    pci_get_slot(dev), pci_get_function(dev));
   2326 #endif
   2327 #endif
   2328 	/* Now get the PCI Express Capabilities offset */
   2329 	if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
   2330 	    PCI_CAP_PCIEXPRESS, &offset, NULL)) {
   2331 		/*
   2332 		 * Hmm...can't get PCI-Express capabilities.
   2333 		 * Falling back to default method.
   2334 		 */
   2335 		bus_info_valid = FALSE;
   2336 		ixgbe_get_bus_info(hw);
   2337 		goto display;
   2338 	}
   2339 	/* ...and read the Link Status Register */
   2340 	link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
   2341 	    offset + PCIE_LCSR) >> 16;
   2342 	ixgbe_set_pci_config_data_generic(hw, link);
   2343 
   2344 display:
   2345 	device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
   2346 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s" :
   2347 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s" :
   2348 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s" :
   2349 	     "Unknown"),
   2350 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
   2351 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
   2352 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
   2353 	     "Unknown"));
   2354 
   2355 	if (bus_info_valid) {
   2356 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2357 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
   2358 			(hw->bus.speed == ixgbe_bus_speed_2500))) {
   2359 			device_printf(dev, "PCI-Express bandwidth available"
   2360 			    " for this card\n     is not sufficient for"
   2361 			    " optimal performance.\n");
   2362 			device_printf(dev, "For optimal performance a x8 "
   2363 			    "PCIE, or x4 PCIE Gen2 slot is required.\n");
   2364 		}
   2365 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2366 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
   2367 			(hw->bus.speed < ixgbe_bus_speed_8000))) {
   2368 			device_printf(dev, "PCI-Express bandwidth available"
   2369 			    " for this card\n     is not sufficient for"
   2370 			    " optimal performance.\n");
   2371 			device_printf(dev, "For optimal performance a x8 "
   2372 			    "PCIE Gen3 slot is required.\n");
   2373 		}
   2374 	} else
   2375 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
   2376 
   2377 	return;
   2378 } /* ixgbe_get_slot_info */
   2379 
   2380 /************************************************************************
   2381  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
   2382  ************************************************************************/
   2383 static inline void
   2384 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   2385 {
   2386 	struct ixgbe_hw *hw = &adapter->hw;
   2387 	u64             queue = (u64)(1ULL << vector);
   2388 	u32             mask;
   2389 
   2390 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2391 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2392 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   2393 	} else {
   2394 		mask = (queue & 0xFFFFFFFF);
   2395 		if (mask)
   2396 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   2397 		mask = (queue >> 32);
   2398 		if (mask)
   2399 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   2400 	}
   2401 } /* ixgbe_enable_queue */
   2402 
   2403 /************************************************************************
   2404  * ixgbe_disable_queue
   2405  ************************************************************************/
   2406 static inline void
   2407 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   2408 {
   2409 	struct ixgbe_hw *hw = &adapter->hw;
   2410 	u64             queue = (u64)(1ULL << vector);
   2411 	u32             mask;
   2412 
   2413 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2414 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2415 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   2416 	} else {
   2417 		mask = (queue & 0xFFFFFFFF);
   2418 		if (mask)
   2419 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   2420 		mask = (queue >> 32);
   2421 		if (mask)
   2422 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   2423 	}
   2424 } /* ixgbe_disable_queue */
   2425 
   2426 /************************************************************************
   2427  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
   2428  ************************************************************************/
   2429 static int
   2430 ixgbe_msix_que(void *arg)
   2431 {
   2432 	struct ix_queue	*que = arg;
   2433 	struct adapter  *adapter = que->adapter;
   2434 	struct ifnet    *ifp = adapter->ifp;
   2435 	struct tx_ring	*txr = que->txr;
   2436 	struct rx_ring	*rxr = que->rxr;
   2437 	bool		more;
   2438 	u32		newitr = 0;
   2439 
   2440 	/* Protect against spurious interrupts */
   2441 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   2442 		return 0;
   2443 
   2444 	ixgbe_disable_queue(adapter, que->msix);
   2445 	++que->irqs.ev_count;
   2446 
   2447 #ifdef __NetBSD__
   2448 	/* Don't run ixgbe_rxeof in interrupt context */
   2449 	more = true;
   2450 #else
   2451 	more = ixgbe_rxeof(que);
   2452 #endif
   2453 
   2454 	IXGBE_TX_LOCK(txr);
   2455 	ixgbe_txeof(txr);
   2456 	IXGBE_TX_UNLOCK(txr);
   2457 
   2458 	/* Do AIM now? */
   2459 
   2460 	if (adapter->enable_aim == false)
   2461 		goto no_calc;
   2462 	/*
   2463 	 * Do Adaptive Interrupt Moderation:
   2464 	 *  - Write out last calculated setting
   2465 	 *  - Calculate based on average size over
   2466 	 *    the last interval.
   2467 	 */
   2468 	if (que->eitr_setting)
   2469 		ixgbe_eitr_write(que, que->eitr_setting);
   2470 
   2471 	que->eitr_setting = 0;
   2472 
   2473 	/* Idle, do nothing */
   2474         if ((txr->bytes == 0) && (rxr->bytes == 0))
   2475                 goto no_calc;
   2476 
   2477 	if ((txr->bytes) && (txr->packets))
   2478 		newitr = txr->bytes/txr->packets;
   2479 	if ((rxr->bytes) && (rxr->packets))
   2480 		newitr = max(newitr, (rxr->bytes / rxr->packets));
   2481 	newitr += 24; /* account for hardware frame, crc */
   2482 
   2483 	/* set an upper boundary */
   2484 	newitr = min(newitr, 3000);
   2485 
   2486 	/* Be nice to the mid range */
   2487 	if ((newitr > 300) && (newitr < 1200))
   2488 		newitr = (newitr / 3);
   2489 	else
   2490 		newitr = (newitr / 2);
   2491 
   2492 	/*
   2493 	 * When RSC is used, ITR interval must be larger than RSC_DELAY.
   2494 	 * Currently, we use 2us for RSC_DELAY. The minimum value is always
   2495 	 * greater than 2us on 100M (and 10M?(not documented)), but it's not
   2496 	 * on 1G and higher.
   2497 	 */
   2498 	if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   2499 	    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   2500 		if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
   2501 			newitr = IXGBE_MIN_RSC_EITR_10G1G;
   2502 	}
   2503 
   2504         /* save for next interrupt */
   2505         que->eitr_setting = newitr;
   2506 
   2507 	/* Reset state */
   2508 	txr->bytes = 0;
   2509 	txr->packets = 0;
   2510 	rxr->bytes = 0;
   2511 	rxr->packets = 0;
   2512 
   2513 no_calc:
   2514 	if (more)
   2515 		softint_schedule(que->que_si);
   2516 	else
   2517 		ixgbe_enable_queue(adapter, que->msix);
   2518 
   2519 	return 1;
   2520 } /* ixgbe_msix_que */
   2521 
   2522 /************************************************************************
   2523  * ixgbe_media_status - Media Ioctl callback
   2524  *
   2525  *   Called whenever the user queries the status of
   2526  *   the interface using ifconfig.
   2527  ************************************************************************/
   2528 static void
   2529 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   2530 {
   2531 	struct adapter *adapter = ifp->if_softc;
   2532 	struct ixgbe_hw *hw = &adapter->hw;
   2533 	int layer;
   2534 
   2535 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   2536 	IXGBE_CORE_LOCK(adapter);
   2537 	ixgbe_update_link_status(adapter);
   2538 
   2539 	ifmr->ifm_status = IFM_AVALID;
   2540 	ifmr->ifm_active = IFM_ETHER;
   2541 
   2542 	if (!adapter->link_active) {
   2543 		ifmr->ifm_active |= IFM_NONE;
   2544 		IXGBE_CORE_UNLOCK(adapter);
   2545 		return;
   2546 	}
   2547 
   2548 	ifmr->ifm_status |= IFM_ACTIVE;
   2549 	layer = adapter->phy_layer;
   2550 
   2551 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
   2552 	    layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
   2553 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
   2554 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
   2555 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
   2556 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
   2557 		switch (adapter->link_speed) {
   2558 		case IXGBE_LINK_SPEED_10GB_FULL:
   2559 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   2560 			break;
   2561 		case IXGBE_LINK_SPEED_5GB_FULL:
   2562 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   2563 			break;
   2564 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2565 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   2566 			break;
   2567 		case IXGBE_LINK_SPEED_1GB_FULL:
   2568 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   2569 			break;
   2570 		case IXGBE_LINK_SPEED_100_FULL:
   2571 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   2572 			break;
   2573 		case IXGBE_LINK_SPEED_10_FULL:
   2574 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   2575 			break;
   2576 		}
   2577 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   2578 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
   2579 		switch (adapter->link_speed) {
   2580 		case IXGBE_LINK_SPEED_10GB_FULL:
   2581 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
   2582 			break;
   2583 		}
   2584 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
   2585 		switch (adapter->link_speed) {
   2586 		case IXGBE_LINK_SPEED_10GB_FULL:
   2587 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
   2588 			break;
   2589 		case IXGBE_LINK_SPEED_1GB_FULL:
   2590 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2591 			break;
   2592 		}
   2593 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
   2594 		switch (adapter->link_speed) {
   2595 		case IXGBE_LINK_SPEED_10GB_FULL:
   2596 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
   2597 			break;
   2598 		case IXGBE_LINK_SPEED_1GB_FULL:
   2599 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2600 			break;
   2601 		}
   2602 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
   2603 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
   2604 		switch (adapter->link_speed) {
   2605 		case IXGBE_LINK_SPEED_10GB_FULL:
   2606 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2607 			break;
   2608 		case IXGBE_LINK_SPEED_1GB_FULL:
   2609 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
   2610 			break;
   2611 		}
   2612 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
   2613 		switch (adapter->link_speed) {
   2614 		case IXGBE_LINK_SPEED_10GB_FULL:
   2615 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2616 			break;
   2617 		}
   2618 	/*
   2619 	 * XXX: These need to use the proper media types once
   2620 	 * they're added.
   2621 	 */
   2622 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
   2623 		switch (adapter->link_speed) {
   2624 		case IXGBE_LINK_SPEED_10GB_FULL:
   2625 #ifndef IFM_ETH_XTYPE
   2626 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2627 #else
   2628 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
   2629 #endif
   2630 			break;
   2631 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2632 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2633 			break;
   2634 		case IXGBE_LINK_SPEED_1GB_FULL:
   2635 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2636 			break;
   2637 		}
   2638 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
   2639 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
   2640 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
   2641 		switch (adapter->link_speed) {
   2642 		case IXGBE_LINK_SPEED_10GB_FULL:
   2643 #ifndef IFM_ETH_XTYPE
   2644 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2645 #else
   2646 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
   2647 #endif
   2648 			break;
   2649 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2650 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2651 			break;
   2652 		case IXGBE_LINK_SPEED_1GB_FULL:
   2653 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2654 			break;
   2655 		}
   2656 
   2657 	/* If nothing is recognized... */
   2658 #if 0
   2659 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
   2660 		ifmr->ifm_active |= IFM_UNKNOWN;
   2661 #endif
   2662 
   2663 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   2664 
   2665 	/* Display current flow control setting used on link */
   2666 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
   2667 	    hw->fc.current_mode == ixgbe_fc_full)
   2668 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
   2669 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
   2670 	    hw->fc.current_mode == ixgbe_fc_full)
   2671 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
   2672 
   2673 	IXGBE_CORE_UNLOCK(adapter);
   2674 
   2675 	return;
   2676 } /* ixgbe_media_status */
   2677 
   2678 /************************************************************************
   2679  * ixgbe_media_change - Media Ioctl callback
   2680  *
   2681  *   Called when the user changes speed/duplex using
   2682  *   media/mediopt option with ifconfig.
   2683  ************************************************************************/
   2684 static int
   2685 ixgbe_media_change(struct ifnet *ifp)
   2686 {
   2687 	struct adapter   *adapter = ifp->if_softc;
   2688 	struct ifmedia   *ifm = &adapter->media;
   2689 	struct ixgbe_hw  *hw = &adapter->hw;
   2690 	ixgbe_link_speed speed = 0;
   2691 	ixgbe_link_speed link_caps = 0;
   2692 	bool negotiate = false;
   2693 	s32 err = IXGBE_NOT_IMPLEMENTED;
   2694 
   2695 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   2696 
   2697 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   2698 		return (EINVAL);
   2699 
   2700 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   2701 		return (ENODEV);
   2702 
   2703 	/*
   2704 	 * We don't actually need to check against the supported
   2705 	 * media types of the adapter; ifmedia will take care of
   2706 	 * that for us.
   2707 	 */
   2708 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   2709 	case IFM_AUTO:
   2710 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   2711 		    &negotiate);
   2712 		if (err != IXGBE_SUCCESS) {
   2713 			device_printf(adapter->dev, "Unable to determine "
   2714 			    "supported advertise speeds\n");
   2715 			return (ENODEV);
   2716 		}
   2717 		speed |= link_caps;
   2718 		break;
   2719 	case IFM_10G_T:
   2720 	case IFM_10G_LRM:
   2721 	case IFM_10G_LR:
   2722 	case IFM_10G_TWINAX:
   2723 #ifndef IFM_ETH_XTYPE
   2724 	case IFM_10G_SR: /* KR, too */
   2725 	case IFM_10G_CX4: /* KX4 */
   2726 #else
   2727 	case IFM_10G_KR:
   2728 	case IFM_10G_KX4:
   2729 #endif
   2730 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   2731 		break;
   2732 	case IFM_5000_T:
   2733 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   2734 		break;
   2735 	case IFM_2500_T:
   2736 	case IFM_2500_KX:
   2737 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   2738 		break;
   2739 	case IFM_1000_T:
   2740 	case IFM_1000_LX:
   2741 	case IFM_1000_SX:
   2742 	case IFM_1000_KX:
   2743 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   2744 		break;
   2745 	case IFM_100_TX:
   2746 		speed |= IXGBE_LINK_SPEED_100_FULL;
   2747 		break;
   2748 	case IFM_10_T:
   2749 		speed |= IXGBE_LINK_SPEED_10_FULL;
   2750 		break;
   2751 	default:
   2752 		goto invalid;
   2753 	}
   2754 
   2755 	hw->mac.autotry_restart = TRUE;
   2756 	hw->mac.ops.setup_link(hw, speed, TRUE);
   2757 	adapter->advertise = 0;
   2758 	if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
   2759 		if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
   2760 			adapter->advertise |= 1 << 2;
   2761 		if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
   2762 			adapter->advertise |= 1 << 1;
   2763 		if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
   2764 			adapter->advertise |= 1 << 0;
   2765 		if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
   2766 			adapter->advertise |= 1 << 3;
   2767 		if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
   2768 			adapter->advertise |= 1 << 4;
   2769 		if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
   2770 			adapter->advertise |= 1 << 5;
   2771 	}
   2772 
   2773 	return (0);
   2774 
   2775 invalid:
   2776 	device_printf(adapter->dev, "Invalid media type!\n");
   2777 
   2778 	return (EINVAL);
   2779 } /* ixgbe_media_change */
   2780 
   2781 /************************************************************************
   2782  * ixgbe_set_promisc
   2783  ************************************************************************/
   2784 static void
   2785 ixgbe_set_promisc(struct adapter *adapter)
   2786 {
   2787 	struct ifnet *ifp = adapter->ifp;
   2788 	int          mcnt = 0;
   2789 	u32          rctl;
   2790 	struct ether_multi *enm;
   2791 	struct ether_multistep step;
   2792 	struct ethercom *ec = &adapter->osdep.ec;
   2793 
   2794 	KASSERT(mutex_owned(&adapter->core_mtx));
   2795 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   2796 	rctl &= (~IXGBE_FCTRL_UPE);
   2797 	if (ifp->if_flags & IFF_ALLMULTI)
   2798 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
   2799 	else {
   2800 		ETHER_LOCK(ec);
   2801 		ETHER_FIRST_MULTI(step, ec, enm);
   2802 		while (enm != NULL) {
   2803 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
   2804 				break;
   2805 			mcnt++;
   2806 			ETHER_NEXT_MULTI(step, enm);
   2807 		}
   2808 		ETHER_UNLOCK(ec);
   2809 	}
   2810 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
   2811 		rctl &= (~IXGBE_FCTRL_MPE);
   2812 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2813 
   2814 	if (ifp->if_flags & IFF_PROMISC) {
   2815 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   2816 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2817 	} else if (ifp->if_flags & IFF_ALLMULTI) {
   2818 		rctl |= IXGBE_FCTRL_MPE;
   2819 		rctl &= ~IXGBE_FCTRL_UPE;
   2820 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2821 	}
   2822 } /* ixgbe_set_promisc */
   2823 
   2824 /************************************************************************
   2825  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
   2826  ************************************************************************/
   2827 static int
   2828 ixgbe_msix_link(void *arg)
   2829 {
   2830 	struct adapter	*adapter = arg;
   2831 	struct ixgbe_hw *hw = &adapter->hw;
   2832 	u32		eicr, eicr_mask;
   2833 	s32             retval;
   2834 
   2835 	++adapter->link_irq.ev_count;
   2836 
   2837 	/* Pause other interrupts */
   2838 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
   2839 
   2840 	/* First get the cause */
   2841 	/*
   2842 	 * The specifications of 82598, 82599, X540 and X550 say EICS register
   2843 	 * is write only. However, Linux says it is a workaround for silicon
   2844 	 * errata to read EICS instead of EICR to get interrupt cause. It seems
   2845 	 * there is a problem about read clear mechanism for EICR register.
   2846 	 */
   2847 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   2848 	/* Be sure the queue bits are not cleared */
   2849 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
   2850 	/* Clear interrupt with write */
   2851 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
   2852 
   2853 	/* Link status change */
   2854 	if (eicr & IXGBE_EICR_LSC) {
   2855 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
   2856 		softint_schedule(adapter->link_si);
   2857 	}
   2858 
   2859 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   2860 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
   2861 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
   2862 			/* This is probably overkill :) */
   2863 			if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
   2864 				return 1;
   2865 			/* Disable the interrupt */
   2866 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
   2867 			softint_schedule(adapter->fdir_si);
   2868 		}
   2869 
   2870 		if (eicr & IXGBE_EICR_ECC) {
   2871 			device_printf(adapter->dev,
   2872 			    "CRITICAL: ECC ERROR!! Please Reboot!!\n");
   2873 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   2874 		}
   2875 
   2876 		/* Check for over temp condition */
   2877 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
   2878 			switch (adapter->hw.mac.type) {
   2879 			case ixgbe_mac_X550EM_a:
   2880 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
   2881 					break;
   2882 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
   2883 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2884 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2885 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2886 				retval = hw->phy.ops.check_overtemp(hw);
   2887 				if (retval != IXGBE_ERR_OVERTEMP)
   2888 					break;
   2889 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2890 				device_printf(adapter->dev, "System shutdown required!\n");
   2891 				break;
   2892 			default:
   2893 				if (!(eicr & IXGBE_EICR_TS))
   2894 					break;
   2895 				retval = hw->phy.ops.check_overtemp(hw);
   2896 				if (retval != IXGBE_ERR_OVERTEMP)
   2897 					break;
   2898 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2899 				device_printf(adapter->dev, "System shutdown required!\n");
   2900 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
   2901 				break;
   2902 			}
   2903 		}
   2904 
   2905 		/* Check for VF message */
   2906 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
   2907 		    (eicr & IXGBE_EICR_MAILBOX))
   2908 			softint_schedule(adapter->mbx_si);
   2909 	}
   2910 
   2911 	if (ixgbe_is_sfp(hw)) {
   2912 		/* Pluggable optics-related interrupt */
   2913 		if (hw->mac.type >= ixgbe_mac_X540)
   2914 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   2915 		else
   2916 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   2917 
   2918 		if (eicr & eicr_mask) {
   2919 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   2920 			softint_schedule(adapter->mod_si);
   2921 		}
   2922 
   2923 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   2924 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   2925 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2926 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2927 			softint_schedule(adapter->msf_si);
   2928 		}
   2929 	}
   2930 
   2931 	/* Check for fan failure */
   2932 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   2933 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
   2934 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2935 	}
   2936 
   2937 	/* External PHY interrupt */
   2938 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   2939 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
   2940 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
   2941 		softint_schedule(adapter->phy_si);
   2942  	}
   2943 
   2944 	/* Re-enable other interrupts */
   2945 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   2946 	return 1;
   2947 } /* ixgbe_msix_link */
   2948 
   2949 static void
   2950 ixgbe_eitr_write(struct ix_queue *que, uint32_t itr)
   2951 {
   2952 	struct adapter *adapter = que->adapter;
   2953 
   2954         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   2955                 itr |= itr << 16;
   2956         else
   2957                 itr |= IXGBE_EITR_CNT_WDIS;
   2958 
   2959 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
   2960 	    itr);
   2961 }
   2962 
   2963 
   2964 /************************************************************************
   2965  * ixgbe_sysctl_interrupt_rate_handler
   2966  ************************************************************************/
   2967 static int
   2968 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   2969 {
   2970 	struct sysctlnode node = *rnode;
   2971 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   2972 	struct adapter  *adapter = que->adapter;
   2973 	uint32_t reg, usec, rate;
   2974 	int error;
   2975 
   2976 	if (que == NULL)
   2977 		return 0;
   2978 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
   2979 	usec = ((reg & 0x0FF8) >> 3);
   2980 	if (usec > 0)
   2981 		rate = 500000 / usec;
   2982 	else
   2983 		rate = 0;
   2984 	node.sysctl_data = &rate;
   2985 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2986 	if (error || newp == NULL)
   2987 		return error;
   2988 	reg &= ~0xfff; /* default, no limitation */
   2989 	if (rate > 0 && rate < 500000) {
   2990 		if (rate < 1000)
   2991 			rate = 1000;
   2992 		reg |= ((4000000/rate) & 0xff8);
   2993 		/*
   2994 		 * When RSC is used, ITR interval must be larger than
   2995 		 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
   2996 		 * The minimum value is always greater than 2us on 100M
   2997 		 * (and 10M?(not documented)), but it's not on 1G and higher.
   2998 		 */
   2999 		if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   3000 		    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   3001 			if ((adapter->num_queues > 1)
   3002 			    && (reg < IXGBE_MIN_RSC_EITR_10G1G))
   3003 				return EINVAL;
   3004 		}
   3005 		ixgbe_max_interrupt_rate = rate;
   3006 	} else
   3007 		ixgbe_max_interrupt_rate = 0;
   3008 	ixgbe_eitr_write(que, reg);
   3009 
   3010 	return (0);
   3011 } /* ixgbe_sysctl_interrupt_rate_handler */
   3012 
   3013 const struct sysctlnode *
   3014 ixgbe_sysctl_instance(struct adapter *adapter)
   3015 {
   3016 	const char *dvname;
   3017 	struct sysctllog **log;
   3018 	int rc;
   3019 	const struct sysctlnode *rnode;
   3020 
   3021 	if (adapter->sysctltop != NULL)
   3022 		return adapter->sysctltop;
   3023 
   3024 	log = &adapter->sysctllog;
   3025 	dvname = device_xname(adapter->dev);
   3026 
   3027 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   3028 	    0, CTLTYPE_NODE, dvname,
   3029 	    SYSCTL_DESCR("ixgbe information and settings"),
   3030 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   3031 		goto err;
   3032 
   3033 	return rnode;
   3034 err:
   3035 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   3036 	return NULL;
   3037 }
   3038 
   3039 /************************************************************************
   3040  * ixgbe_add_device_sysctls
   3041  ************************************************************************/
   3042 static void
   3043 ixgbe_add_device_sysctls(struct adapter *adapter)
   3044 {
   3045 	device_t               dev = adapter->dev;
   3046 	struct ixgbe_hw        *hw = &adapter->hw;
   3047 	struct sysctllog **log;
   3048 	const struct sysctlnode *rnode, *cnode;
   3049 
   3050 	log = &adapter->sysctllog;
   3051 
   3052 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   3053 		aprint_error_dev(dev, "could not create sysctl root\n");
   3054 		return;
   3055 	}
   3056 
   3057 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3058 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3059 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
   3060 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   3061 		aprint_error_dev(dev, "could not create sysctl\n");
   3062 
   3063 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3064 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3065 	    "num_queues", SYSCTL_DESCR("Number of queues"),
   3066 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
   3067 		aprint_error_dev(dev, "could not create sysctl\n");
   3068 
   3069 	/* Sysctls for all devices */
   3070 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3071 	    CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
   3072 	    ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
   3073 	    CTL_EOL) != 0)
   3074 		aprint_error_dev(dev, "could not create sysctl\n");
   3075 
   3076 	adapter->enable_aim = ixgbe_enable_aim;
   3077 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3078 	    CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   3079 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   3080 		aprint_error_dev(dev, "could not create sysctl\n");
   3081 
   3082 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3083 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   3084 	    "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
   3085 	    ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
   3086 	    CTL_EOL) != 0)
   3087 		aprint_error_dev(dev, "could not create sysctl\n");
   3088 
   3089 #ifdef IXGBE_DEBUG
   3090 	/* testing sysctls (for all devices) */
   3091 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3092 	    CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
   3093 	    ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
   3094 	    CTL_EOL) != 0)
   3095 		aprint_error_dev(dev, "could not create sysctl\n");
   3096 
   3097 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
   3098 	    CTLTYPE_STRING, "print_rss_config",
   3099 	    SYSCTL_DESCR("Prints RSS Configuration"),
   3100 	    ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
   3101 	    CTL_EOL) != 0)
   3102 		aprint_error_dev(dev, "could not create sysctl\n");
   3103 #endif
   3104 	/* for X550 series devices */
   3105 	if (hw->mac.type >= ixgbe_mac_X550)
   3106 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3107 		    CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
   3108 		    ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
   3109 		    CTL_EOL) != 0)
   3110 			aprint_error_dev(dev, "could not create sysctl\n");
   3111 
   3112 	/* for WoL-capable devices */
   3113 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3114 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3115 		    CTLTYPE_BOOL, "wol_enable",
   3116 		    SYSCTL_DESCR("Enable/Disable Wake on LAN"),
   3117 		    ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
   3118 		    CTL_EOL) != 0)
   3119 			aprint_error_dev(dev, "could not create sysctl\n");
   3120 
   3121 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3122 		    CTLTYPE_INT, "wufc",
   3123 		    SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
   3124 		    ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
   3125 		    CTL_EOL) != 0)
   3126 			aprint_error_dev(dev, "could not create sysctl\n");
   3127 	}
   3128 
   3129 	/* for X552/X557-AT devices */
   3130 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3131 		const struct sysctlnode *phy_node;
   3132 
   3133 		if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
   3134 		    "phy", SYSCTL_DESCR("External PHY sysctls"),
   3135 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
   3136 			aprint_error_dev(dev, "could not create sysctl\n");
   3137 			return;
   3138 		}
   3139 
   3140 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3141 		    CTLTYPE_INT, "temp",
   3142 		    SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
   3143 		    ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
   3144 		    CTL_EOL) != 0)
   3145 			aprint_error_dev(dev, "could not create sysctl\n");
   3146 
   3147 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3148 		    CTLTYPE_INT, "overtemp_occurred",
   3149 		    SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
   3150 		    ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
   3151 		    CTL_CREATE, CTL_EOL) != 0)
   3152 			aprint_error_dev(dev, "could not create sysctl\n");
   3153 	}
   3154 
   3155 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
   3156 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3157 		    CTLTYPE_INT, "eee_state",
   3158 		    SYSCTL_DESCR("EEE Power Save State"),
   3159 		    ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
   3160 		    CTL_EOL) != 0)
   3161 			aprint_error_dev(dev, "could not create sysctl\n");
   3162 	}
   3163 } /* ixgbe_add_device_sysctls */
   3164 
   3165 /************************************************************************
   3166  * ixgbe_allocate_pci_resources
   3167  ************************************************************************/
   3168 static int
   3169 ixgbe_allocate_pci_resources(struct adapter *adapter,
   3170     const struct pci_attach_args *pa)
   3171 {
   3172 	pcireg_t	memtype;
   3173 	device_t dev = adapter->dev;
   3174 	bus_addr_t addr;
   3175 	int flags;
   3176 
   3177 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   3178 	switch (memtype) {
   3179 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   3180 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   3181 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   3182 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   3183 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   3184 			goto map_err;
   3185 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   3186 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   3187 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3188 		}
   3189 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   3190 		     adapter->osdep.mem_size, flags,
   3191 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   3192 map_err:
   3193 			adapter->osdep.mem_size = 0;
   3194 			aprint_error_dev(dev, "unable to map BAR0\n");
   3195 			return ENXIO;
   3196 		}
   3197 		break;
   3198 	default:
   3199 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   3200 		return ENXIO;
   3201 	}
   3202 
   3203 	return (0);
   3204 } /* ixgbe_allocate_pci_resources */
   3205 
   3206 static void
   3207 ixgbe_free_softint(struct adapter *adapter)
   3208 {
   3209 	struct ix_queue *que = adapter->queues;
   3210 	struct tx_ring *txr = adapter->tx_rings;
   3211 	int i;
   3212 
   3213 	for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
   3214 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   3215 			if (txr->txr_si != NULL)
   3216 				softint_disestablish(txr->txr_si);
   3217 		}
   3218 		if (que->que_si != NULL)
   3219 			softint_disestablish(que->que_si);
   3220 	}
   3221 
   3222 	/* Drain the Link queue */
   3223 	if (adapter->link_si != NULL) {
   3224 		softint_disestablish(adapter->link_si);
   3225 		adapter->link_si = NULL;
   3226 	}
   3227 	if (adapter->mod_si != NULL) {
   3228 		softint_disestablish(adapter->mod_si);
   3229 		adapter->mod_si = NULL;
   3230 	}
   3231 	if (adapter->msf_si != NULL) {
   3232 		softint_disestablish(adapter->msf_si);
   3233 		adapter->msf_si = NULL;
   3234 	}
   3235 	if (adapter->phy_si != NULL) {
   3236 		softint_disestablish(adapter->phy_si);
   3237 		adapter->phy_si = NULL;
   3238 	}
   3239 	if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
   3240 		if (adapter->fdir_si != NULL) {
   3241 			softint_disestablish(adapter->fdir_si);
   3242 			adapter->fdir_si = NULL;
   3243 		}
   3244 	}
   3245 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   3246 		if (adapter->mbx_si != NULL) {
   3247 			softint_disestablish(adapter->mbx_si);
   3248 			adapter->mbx_si = NULL;
   3249 		}
   3250 	}
   3251 } /* ixgbe_free_softint */
   3252 
   3253 /************************************************************************
   3254  * ixgbe_detach - Device removal routine
   3255  *
   3256  *   Called when the driver is being removed.
   3257  *   Stops the adapter and deallocates all the resources
   3258  *   that were allocated for driver operation.
   3259  *
   3260  *   return 0 on success, positive on failure
   3261  ************************************************************************/
   3262 static int
   3263 ixgbe_detach(device_t dev, int flags)
   3264 {
   3265 	struct adapter *adapter = device_private(dev);
   3266 	struct rx_ring *rxr = adapter->rx_rings;
   3267 	struct tx_ring *txr = adapter->tx_rings;
   3268 	struct ixgbe_hw *hw = &adapter->hw;
   3269 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   3270 	u32	ctrl_ext;
   3271 
   3272 	INIT_DEBUGOUT("ixgbe_detach: begin");
   3273 	if (adapter->osdep.attached == false)
   3274 		return 0;
   3275 
   3276 	if (ixgbe_pci_iov_detach(dev) != 0) {
   3277 		device_printf(dev, "SR-IOV in use; detach first.\n");
   3278 		return (EBUSY);
   3279 	}
   3280 
   3281 	/* Stop the interface. Callouts are stopped in it. */
   3282 	ixgbe_ifstop(adapter->ifp, 1);
   3283 #if NVLAN > 0
   3284 	/* Make sure VLANs are not using driver */
   3285 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   3286 		;	/* nothing to do: no VLANs */
   3287 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
   3288 		vlan_ifdetach(adapter->ifp);
   3289 	else {
   3290 		aprint_error_dev(dev, "VLANs in use, detach first\n");
   3291 		return (EBUSY);
   3292 	}
   3293 #endif
   3294 
   3295 	pmf_device_deregister(dev);
   3296 
   3297 	ether_ifdetach(adapter->ifp);
   3298 	/* Stop the adapter */
   3299 	IXGBE_CORE_LOCK(adapter);
   3300 	ixgbe_setup_low_power_mode(adapter);
   3301 	IXGBE_CORE_UNLOCK(adapter);
   3302 
   3303 	ixgbe_free_softint(adapter);
   3304 
   3305 	/* let hardware know driver is unloading */
   3306 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   3307 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   3308 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   3309 
   3310 	callout_halt(&adapter->timer, NULL);
   3311 
   3312 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   3313 		netmap_detach(adapter->ifp);
   3314 
   3315 	ixgbe_free_pci_resources(adapter);
   3316 #if 0	/* XXX the NetBSD port is probably missing something here */
   3317 	bus_generic_detach(dev);
   3318 #endif
   3319 	if_detach(adapter->ifp);
   3320 	if_percpuq_destroy(adapter->ipq);
   3321 
   3322 	sysctl_teardown(&adapter->sysctllog);
   3323 	evcnt_detach(&adapter->handleq);
   3324 	evcnt_detach(&adapter->req);
   3325 	evcnt_detach(&adapter->efbig_tx_dma_setup);
   3326 	evcnt_detach(&adapter->mbuf_defrag_failed);
   3327 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
   3328 	evcnt_detach(&adapter->einval_tx_dma_setup);
   3329 	evcnt_detach(&adapter->other_tx_dma_setup);
   3330 	evcnt_detach(&adapter->eagain_tx_dma_setup);
   3331 	evcnt_detach(&adapter->enomem_tx_dma_setup);
   3332 	evcnt_detach(&adapter->watchdog_events);
   3333 	evcnt_detach(&adapter->tso_err);
   3334 	evcnt_detach(&adapter->link_irq);
   3335 
   3336 	txr = adapter->tx_rings;
   3337 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   3338 		evcnt_detach(&adapter->queues[i].irqs);
   3339 		evcnt_detach(&txr->no_desc_avail);
   3340 		evcnt_detach(&txr->total_packets);
   3341 		evcnt_detach(&txr->tso_tx);
   3342 #ifndef IXGBE_LEGACY_TX
   3343 		evcnt_detach(&txr->pcq_drops);
   3344 #endif
   3345 
   3346 		if (i < __arraycount(stats->mpc)) {
   3347 			evcnt_detach(&stats->mpc[i]);
   3348 			if (hw->mac.type == ixgbe_mac_82598EB)
   3349 				evcnt_detach(&stats->rnbc[i]);
   3350 		}
   3351 		if (i < __arraycount(stats->pxontxc)) {
   3352 			evcnt_detach(&stats->pxontxc[i]);
   3353 			evcnt_detach(&stats->pxonrxc[i]);
   3354 			evcnt_detach(&stats->pxofftxc[i]);
   3355 			evcnt_detach(&stats->pxoffrxc[i]);
   3356 			evcnt_detach(&stats->pxon2offc[i]);
   3357 		}
   3358 		if (i < __arraycount(stats->qprc)) {
   3359 			evcnt_detach(&stats->qprc[i]);
   3360 			evcnt_detach(&stats->qptc[i]);
   3361 			evcnt_detach(&stats->qbrc[i]);
   3362 			evcnt_detach(&stats->qbtc[i]);
   3363 			evcnt_detach(&stats->qprdc[i]);
   3364 		}
   3365 
   3366 		evcnt_detach(&rxr->rx_packets);
   3367 		evcnt_detach(&rxr->rx_bytes);
   3368 		evcnt_detach(&rxr->rx_copies);
   3369 		evcnt_detach(&rxr->no_jmbuf);
   3370 		evcnt_detach(&rxr->rx_discarded);
   3371 	}
   3372 	evcnt_detach(&stats->ipcs);
   3373 	evcnt_detach(&stats->l4cs);
   3374 	evcnt_detach(&stats->ipcs_bad);
   3375 	evcnt_detach(&stats->l4cs_bad);
   3376 	evcnt_detach(&stats->intzero);
   3377 	evcnt_detach(&stats->legint);
   3378 	evcnt_detach(&stats->crcerrs);
   3379 	evcnt_detach(&stats->illerrc);
   3380 	evcnt_detach(&stats->errbc);
   3381 	evcnt_detach(&stats->mspdc);
   3382 	if (hw->mac.type >= ixgbe_mac_X550)
   3383 		evcnt_detach(&stats->mbsdc);
   3384 	evcnt_detach(&stats->mpctotal);
   3385 	evcnt_detach(&stats->mlfc);
   3386 	evcnt_detach(&stats->mrfc);
   3387 	evcnt_detach(&stats->rlec);
   3388 	evcnt_detach(&stats->lxontxc);
   3389 	evcnt_detach(&stats->lxonrxc);
   3390 	evcnt_detach(&stats->lxofftxc);
   3391 	evcnt_detach(&stats->lxoffrxc);
   3392 
   3393 	/* Packet Reception Stats */
   3394 	evcnt_detach(&stats->tor);
   3395 	evcnt_detach(&stats->gorc);
   3396 	evcnt_detach(&stats->tpr);
   3397 	evcnt_detach(&stats->gprc);
   3398 	evcnt_detach(&stats->mprc);
   3399 	evcnt_detach(&stats->bprc);
   3400 	evcnt_detach(&stats->prc64);
   3401 	evcnt_detach(&stats->prc127);
   3402 	evcnt_detach(&stats->prc255);
   3403 	evcnt_detach(&stats->prc511);
   3404 	evcnt_detach(&stats->prc1023);
   3405 	evcnt_detach(&stats->prc1522);
   3406 	evcnt_detach(&stats->ruc);
   3407 	evcnt_detach(&stats->rfc);
   3408 	evcnt_detach(&stats->roc);
   3409 	evcnt_detach(&stats->rjc);
   3410 	evcnt_detach(&stats->mngprc);
   3411 	evcnt_detach(&stats->mngpdc);
   3412 	evcnt_detach(&stats->xec);
   3413 
   3414 	/* Packet Transmission Stats */
   3415 	evcnt_detach(&stats->gotc);
   3416 	evcnt_detach(&stats->tpt);
   3417 	evcnt_detach(&stats->gptc);
   3418 	evcnt_detach(&stats->bptc);
   3419 	evcnt_detach(&stats->mptc);
   3420 	evcnt_detach(&stats->mngptc);
   3421 	evcnt_detach(&stats->ptc64);
   3422 	evcnt_detach(&stats->ptc127);
   3423 	evcnt_detach(&stats->ptc255);
   3424 	evcnt_detach(&stats->ptc511);
   3425 	evcnt_detach(&stats->ptc1023);
   3426 	evcnt_detach(&stats->ptc1522);
   3427 
   3428 	ixgbe_free_transmit_structures(adapter);
   3429 	ixgbe_free_receive_structures(adapter);
   3430 	free(adapter->queues, M_DEVBUF);
   3431 	free(adapter->mta, M_DEVBUF);
   3432 
   3433 	IXGBE_CORE_LOCK_DESTROY(adapter);
   3434 
   3435 	return (0);
   3436 } /* ixgbe_detach */
   3437 
   3438 /************************************************************************
   3439  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
   3440  *
   3441  *   Prepare the adapter/port for LPLU and/or WoL
   3442  ************************************************************************/
   3443 static int
   3444 ixgbe_setup_low_power_mode(struct adapter *adapter)
   3445 {
   3446 	struct ixgbe_hw *hw = &adapter->hw;
   3447 	device_t        dev = adapter->dev;
   3448 	s32             error = 0;
   3449 
   3450 	KASSERT(mutex_owned(&adapter->core_mtx));
   3451 
   3452 	/* Limit power management flow to X550EM baseT */
   3453 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
   3454 	    hw->phy.ops.enter_lplu) {
   3455 		/* X550EM baseT adapters need a special LPLU flow */
   3456 		hw->phy.reset_disable = true;
   3457 		ixgbe_stop(adapter);
   3458 		error = hw->phy.ops.enter_lplu(hw);
   3459 		if (error)
   3460 			device_printf(dev,
   3461 			    "Error entering LPLU: %d\n", error);
   3462 		hw->phy.reset_disable = false;
   3463 	} else {
   3464 		/* Just stop for other adapters */
   3465 		ixgbe_stop(adapter);
   3466 	}
   3467 
   3468 	if (!hw->wol_enabled) {
   3469 		ixgbe_set_phy_power(hw, FALSE);
   3470 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3471 		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
   3472 	} else {
   3473 		/* Turn off support for APM wakeup. (Using ACPI instead) */
   3474 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
   3475 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
   3476 
   3477 		/*
   3478 		 * Clear Wake Up Status register to prevent any previous wakeup
   3479 		 * events from waking us up immediately after we suspend.
   3480 		 */
   3481 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3482 
   3483 		/*
   3484 		 * Program the Wakeup Filter Control register with user filter
   3485 		 * settings
   3486 		 */
   3487 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
   3488 
   3489 		/* Enable wakeups and power management in Wakeup Control */
   3490 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
   3491 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
   3492 
   3493 	}
   3494 
   3495 	return error;
   3496 } /* ixgbe_setup_low_power_mode */
   3497 
   3498 /************************************************************************
   3499  * ixgbe_shutdown - Shutdown entry point
   3500  ************************************************************************/
   3501 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   3502 static int
   3503 ixgbe_shutdown(device_t dev)
   3504 {
   3505 	struct adapter *adapter = device_private(dev);
   3506 	int error = 0;
   3507 
   3508 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
   3509 
   3510 	IXGBE_CORE_LOCK(adapter);
   3511 	error = ixgbe_setup_low_power_mode(adapter);
   3512 	IXGBE_CORE_UNLOCK(adapter);
   3513 
   3514 	return (error);
   3515 } /* ixgbe_shutdown */
   3516 #endif
   3517 
   3518 /************************************************************************
   3519  * ixgbe_suspend
   3520  *
   3521  *   From D0 to D3
   3522  ************************************************************************/
   3523 static bool
   3524 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
   3525 {
   3526 	struct adapter *adapter = device_private(dev);
   3527 	int            error = 0;
   3528 
   3529 	INIT_DEBUGOUT("ixgbe_suspend: begin");
   3530 
   3531 	IXGBE_CORE_LOCK(adapter);
   3532 
   3533 	error = ixgbe_setup_low_power_mode(adapter);
   3534 
   3535 	IXGBE_CORE_UNLOCK(adapter);
   3536 
   3537 	return (error);
   3538 } /* ixgbe_suspend */
   3539 
   3540 /************************************************************************
   3541  * ixgbe_resume
   3542  *
   3543  *   From D3 to D0
   3544  ************************************************************************/
   3545 static bool
   3546 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
   3547 {
   3548 	struct adapter  *adapter = device_private(dev);
   3549 	struct ifnet    *ifp = adapter->ifp;
   3550 	struct ixgbe_hw *hw = &adapter->hw;
   3551 	u32             wus;
   3552 
   3553 	INIT_DEBUGOUT("ixgbe_resume: begin");
   3554 
   3555 	IXGBE_CORE_LOCK(adapter);
   3556 
   3557 	/* Read & clear WUS register */
   3558 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
   3559 	if (wus)
   3560 		device_printf(dev, "Woken up by (WUS): %#010x\n",
   3561 		    IXGBE_READ_REG(hw, IXGBE_WUS));
   3562 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3563 	/* And clear WUFC until next low-power transition */
   3564 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3565 
   3566 	/*
   3567 	 * Required after D3->D0 transition;
   3568 	 * will re-advertise all previous advertised speeds
   3569 	 */
   3570 	if (ifp->if_flags & IFF_UP)
   3571 		ixgbe_init_locked(adapter);
   3572 
   3573 	IXGBE_CORE_UNLOCK(adapter);
   3574 
   3575 	return true;
   3576 } /* ixgbe_resume */
   3577 
   3578 /*
   3579  * Set the various hardware offload abilities.
   3580  *
   3581  * This takes the ifnet's if_capenable flags (e.g. set by the user using
   3582  * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
   3583  * mbuf offload flags the driver will understand.
   3584  */
   3585 static void
   3586 ixgbe_set_if_hwassist(struct adapter *adapter)
   3587 {
   3588 	/* XXX */
   3589 }
   3590 
   3591 /************************************************************************
   3592  * ixgbe_init_locked - Init entry point
   3593  *
   3594  *   Used in two ways: It is used by the stack as an init
   3595  *   entry point in network interface structure. It is also
   3596  *   used by the driver as a hw/sw initialization routine to
   3597  *   get to a consistent state.
   3598  *
   3599  *   return 0 on success, positive on failure
   3600  ************************************************************************/
   3601 static void
   3602 ixgbe_init_locked(struct adapter *adapter)
   3603 {
   3604 	struct ifnet   *ifp = adapter->ifp;
   3605 	device_t 	dev = adapter->dev;
   3606 	struct ixgbe_hw *hw = &adapter->hw;
   3607 	struct tx_ring  *txr;
   3608 	struct rx_ring  *rxr;
   3609 	u32		txdctl, mhadd;
   3610 	u32		rxdctl, rxctrl;
   3611 	u32             ctrl_ext;
   3612 	int             err = 0;
   3613 
   3614 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   3615 
   3616 	KASSERT(mutex_owned(&adapter->core_mtx));
   3617 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
   3618 
   3619 	hw->adapter_stopped = FALSE;
   3620 	ixgbe_stop_adapter(hw);
   3621         callout_stop(&adapter->timer);
   3622 
   3623 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   3624 	adapter->max_frame_size =
   3625 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   3626 
   3627 	/* Queue indices may change with IOV mode */
   3628 	ixgbe_align_all_queue_indices(adapter);
   3629 
   3630 	/* reprogram the RAR[0] in case user changed it. */
   3631 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
   3632 
   3633 	/* Get the latest mac address, User can use a LAA */
   3634 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
   3635 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   3636 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
   3637 	hw->addr_ctrl.rar_used_count = 1;
   3638 
   3639 	/* Set hardware offload abilities from ifnet flags */
   3640 	ixgbe_set_if_hwassist(adapter);
   3641 
   3642 	/* Prepare transmit descriptors and buffers */
   3643 	if (ixgbe_setup_transmit_structures(adapter)) {
   3644 		device_printf(dev, "Could not setup transmit structures\n");
   3645 		ixgbe_stop(adapter);
   3646 		return;
   3647 	}
   3648 
   3649 	ixgbe_init_hw(hw);
   3650 	ixgbe_initialize_iov(adapter);
   3651 	ixgbe_initialize_transmit_units(adapter);
   3652 
   3653 	/* Setup Multicast table */
   3654 	ixgbe_set_multi(adapter);
   3655 
   3656 	/* Determine the correct mbuf pool, based on frame size */
   3657 	if (adapter->max_frame_size <= MCLBYTES)
   3658 		adapter->rx_mbuf_sz = MCLBYTES;
   3659 	else
   3660 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   3661 
   3662 	/* Prepare receive descriptors and buffers */
   3663 	if (ixgbe_setup_receive_structures(adapter)) {
   3664 		device_printf(dev, "Could not setup receive structures\n");
   3665 		ixgbe_stop(adapter);
   3666 		return;
   3667 	}
   3668 
   3669 	/* Configure RX settings */
   3670 	ixgbe_initialize_receive_units(adapter);
   3671 
   3672 	/* Enable SDP & MSI-X interrupts based on adapter */
   3673 	ixgbe_config_gpie(adapter);
   3674 
   3675 	/* Set MTU size */
   3676 	if (ifp->if_mtu > ETHERMTU) {
   3677 		/* aka IXGBE_MAXFRS on 82599 and newer */
   3678 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   3679 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   3680 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   3681 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   3682 	}
   3683 
   3684 	/* Now enable all the queues */
   3685 	for (int i = 0; i < adapter->num_queues; i++) {
   3686 		txr = &adapter->tx_rings[i];
   3687 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
   3688 		txdctl |= IXGBE_TXDCTL_ENABLE;
   3689 		/* Set WTHRESH to 8, burst writeback */
   3690 		txdctl |= (8 << 16);
   3691 		/*
   3692 		 * When the internal queue falls below PTHRESH (32),
   3693 		 * start prefetching as long as there are at least
   3694 		 * HTHRESH (1) buffers ready. The values are taken
   3695 		 * from the Intel linux driver 3.8.21.
   3696 		 * Prefetching enables tx line rate even with 1 queue.
   3697 		 */
   3698 		txdctl |= (32 << 0) | (1 << 8);
   3699 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
   3700 	}
   3701 
   3702 	for (int i = 0, j = 0; i < adapter->num_queues; i++) {
   3703 		rxr = &adapter->rx_rings[i];
   3704 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   3705 		if (hw->mac.type == ixgbe_mac_82598EB) {
   3706 			/*
   3707 			 * PTHRESH = 21
   3708 			 * HTHRESH = 4
   3709 			 * WTHRESH = 8
   3710 			 */
   3711 			rxdctl &= ~0x3FFFFF;
   3712 			rxdctl |= 0x080420;
   3713 		}
   3714 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   3715 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
   3716 		for (; j < 10; j++) {
   3717 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
   3718 			    IXGBE_RXDCTL_ENABLE)
   3719 				break;
   3720 			else
   3721 				msec_delay(1);
   3722 		}
   3723 		wmb();
   3724 
   3725 		/*
   3726 		 * In netmap mode, we must preserve the buffers made
   3727 		 * available to userspace before the if_init()
   3728 		 * (this is true by default on the TX side, because
   3729 		 * init makes all buffers available to userspace).
   3730 		 *
   3731 		 * netmap_reset() and the device specific routines
   3732 		 * (e.g. ixgbe_setup_receive_rings()) map these
   3733 		 * buffers at the end of the NIC ring, so here we
   3734 		 * must set the RDT (tail) register to make sure
   3735 		 * they are not overwritten.
   3736 		 *
   3737 		 * In this driver the NIC ring starts at RDH = 0,
   3738 		 * RDT points to the last slot available for reception (?),
   3739 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   3740 		 */
   3741 #ifdef DEV_NETMAP
   3742 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   3743 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   3744 			struct netmap_adapter *na = NA(adapter->ifp);
   3745 			struct netmap_kring *kring = &na->rx_rings[i];
   3746 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   3747 
   3748 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
   3749 		} else
   3750 #endif /* DEV_NETMAP */
   3751 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
   3752 			    adapter->num_rx_desc - 1);
   3753 	}
   3754 
   3755 	/* Enable Receive engine */
   3756 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   3757 	if (hw->mac.type == ixgbe_mac_82598EB)
   3758 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   3759 	rxctrl |= IXGBE_RXCTRL_RXEN;
   3760 	ixgbe_enable_rx_dma(hw, rxctrl);
   3761 
   3762 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   3763 
   3764 	/* Set up MSI-X routing */
   3765 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3766 		ixgbe_configure_ivars(adapter);
   3767 		/* Set up auto-mask */
   3768 		if (hw->mac.type == ixgbe_mac_82598EB)
   3769 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3770 		else {
   3771 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   3772 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   3773 		}
   3774 	} else {  /* Simple settings for Legacy/MSI */
   3775 		ixgbe_set_ivar(adapter, 0, 0, 0);
   3776 		ixgbe_set_ivar(adapter, 0, 0, 1);
   3777 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3778 	}
   3779 
   3780 	ixgbe_init_fdir(adapter);
   3781 
   3782 	/*
   3783 	 * Check on any SFP devices that
   3784 	 * need to be kick-started
   3785 	 */
   3786 	if (hw->phy.type == ixgbe_phy_none) {
   3787 		err = hw->phy.ops.identify(hw);
   3788 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   3789                 	device_printf(dev,
   3790 			    "Unsupported SFP+ module type was detected.\n");
   3791 			return;
   3792         	}
   3793 	}
   3794 
   3795 	/* Set moderation on the Link interrupt */
   3796 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
   3797 
   3798 	/* Config/Enable Link */
   3799 	ixgbe_config_link(adapter);
   3800 
   3801 	/* Hardware Packet Buffer & Flow Control setup */
   3802 	ixgbe_config_delay_values(adapter);
   3803 
   3804 	/* Initialize the FC settings */
   3805 	ixgbe_start_hw(hw);
   3806 
   3807 	/* Set up VLAN support and filter */
   3808 	ixgbe_setup_vlan_hw_support(adapter);
   3809 
   3810 	/* Setup DMA Coalescing */
   3811 	ixgbe_config_dmac(adapter);
   3812 
   3813 	/* And now turn on interrupts */
   3814 	ixgbe_enable_intr(adapter);
   3815 
   3816 	/* Enable the use of the MBX by the VF's */
   3817 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
   3818 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
   3819 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
   3820 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
   3821 	}
   3822 
   3823 	/* Update saved flags. See ixgbe_ifflags_cb() */
   3824 	adapter->if_flags = ifp->if_flags;
   3825 
   3826 	/* Now inform the stack we're ready */
   3827 	ifp->if_flags |= IFF_RUNNING;
   3828 
   3829 	return;
   3830 } /* ixgbe_init_locked */
   3831 
   3832 /************************************************************************
   3833  * ixgbe_init
   3834  ************************************************************************/
   3835 static int
   3836 ixgbe_init(struct ifnet *ifp)
   3837 {
   3838 	struct adapter *adapter = ifp->if_softc;
   3839 
   3840 	IXGBE_CORE_LOCK(adapter);
   3841 	ixgbe_init_locked(adapter);
   3842 	IXGBE_CORE_UNLOCK(adapter);
   3843 
   3844 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   3845 } /* ixgbe_init */
   3846 
   3847 /************************************************************************
   3848  * ixgbe_set_ivar
   3849  *
   3850  *   Setup the correct IVAR register for a particular MSI-X interrupt
   3851  *     (yes this is all very magic and confusing :)
   3852  *    - entry is the register array entry
   3853  *    - vector is the MSI-X vector for this queue
   3854  *    - type is RX/TX/MISC
   3855  ************************************************************************/
   3856 static void
   3857 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   3858 {
   3859 	struct ixgbe_hw *hw = &adapter->hw;
   3860 	u32 ivar, index;
   3861 
   3862 	vector |= IXGBE_IVAR_ALLOC_VAL;
   3863 
   3864 	switch (hw->mac.type) {
   3865 
   3866 	case ixgbe_mac_82598EB:
   3867 		if (type == -1)
   3868 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   3869 		else
   3870 			entry += (type * 64);
   3871 		index = (entry >> 2) & 0x1F;
   3872 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   3873 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
   3874 		ivar |= (vector << (8 * (entry & 0x3)));
   3875 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   3876 		break;
   3877 
   3878 	case ixgbe_mac_82599EB:
   3879 	case ixgbe_mac_X540:
   3880 	case ixgbe_mac_X550:
   3881 	case ixgbe_mac_X550EM_x:
   3882 	case ixgbe_mac_X550EM_a:
   3883 		if (type == -1) { /* MISC IVAR */
   3884 			index = (entry & 1) * 8;
   3885 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   3886 			ivar &= ~(0xFF << index);
   3887 			ivar |= (vector << index);
   3888 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   3889 		} else {	/* RX/TX IVARS */
   3890 			index = (16 * (entry & 1)) + (8 * type);
   3891 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   3892 			ivar &= ~(0xFF << index);
   3893 			ivar |= (vector << index);
   3894 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   3895 		}
   3896 
   3897 	default:
   3898 		break;
   3899 	}
   3900 } /* ixgbe_set_ivar */
   3901 
   3902 /************************************************************************
   3903  * ixgbe_configure_ivars
   3904  ************************************************************************/
   3905 static void
   3906 ixgbe_configure_ivars(struct adapter *adapter)
   3907 {
   3908 	struct ix_queue *que = adapter->queues;
   3909 	u32             newitr;
   3910 
   3911 	if (ixgbe_max_interrupt_rate > 0)
   3912 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   3913 	else {
   3914 		/*
   3915 		 * Disable DMA coalescing if interrupt moderation is
   3916 		 * disabled.
   3917 		 */
   3918 		adapter->dmac = 0;
   3919 		newitr = 0;
   3920 	}
   3921 
   3922         for (int i = 0; i < adapter->num_queues; i++, que++) {
   3923 		struct rx_ring *rxr = &adapter->rx_rings[i];
   3924 		struct tx_ring *txr = &adapter->tx_rings[i];
   3925 		/* First the RX queue entry */
   3926                 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
   3927 		/* ... and the TX */
   3928 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
   3929 		/* Set an Initial EITR value */
   3930 		ixgbe_eitr_write(que, newitr);
   3931 	}
   3932 
   3933 	/* For the Link interrupt */
   3934         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
   3935 } /* ixgbe_configure_ivars */
   3936 
   3937 /************************************************************************
   3938  * ixgbe_config_gpie
   3939  ************************************************************************/
   3940 static void
   3941 ixgbe_config_gpie(struct adapter *adapter)
   3942 {
   3943 	struct ixgbe_hw *hw = &adapter->hw;
   3944 	u32             gpie;
   3945 
   3946 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
   3947 
   3948 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3949 		/* Enable Enhanced MSI-X mode */
   3950 		gpie |= IXGBE_GPIE_MSIX_MODE
   3951 		     |  IXGBE_GPIE_EIAME
   3952 		     |  IXGBE_GPIE_PBA_SUPPORT
   3953 		     |  IXGBE_GPIE_OCD;
   3954 	}
   3955 
   3956 	/* Fan Failure Interrupt */
   3957 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   3958 		gpie |= IXGBE_SDP1_GPIEN;
   3959 
   3960 	/* Thermal Sensor Interrupt */
   3961 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
   3962 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3963 
   3964 	/* Link detection */
   3965 	switch (hw->mac.type) {
   3966 	case ixgbe_mac_82599EB:
   3967 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
   3968 		break;
   3969 	case ixgbe_mac_X550EM_x:
   3970 	case ixgbe_mac_X550EM_a:
   3971 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3972 		break;
   3973 	default:
   3974 		break;
   3975 	}
   3976 
   3977 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   3978 
   3979 	return;
   3980 } /* ixgbe_config_gpie */
   3981 
   3982 /************************************************************************
   3983  * ixgbe_config_delay_values
   3984  *
   3985  *   Requires adapter->max_frame_size to be set.
   3986  ************************************************************************/
   3987 static void
   3988 ixgbe_config_delay_values(struct adapter *adapter)
   3989 {
   3990 	struct ixgbe_hw *hw = &adapter->hw;
   3991 	u32             rxpb, frame, size, tmp;
   3992 
   3993 	frame = adapter->max_frame_size;
   3994 
   3995 	/* Calculate High Water */
   3996 	switch (hw->mac.type) {
   3997 	case ixgbe_mac_X540:
   3998 	case ixgbe_mac_X550:
   3999 	case ixgbe_mac_X550EM_x:
   4000 	case ixgbe_mac_X550EM_a:
   4001 		tmp = IXGBE_DV_X540(frame, frame);
   4002 		break;
   4003 	default:
   4004 		tmp = IXGBE_DV(frame, frame);
   4005 		break;
   4006 	}
   4007 	size = IXGBE_BT2KB(tmp);
   4008 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
   4009 	hw->fc.high_water[0] = rxpb - size;
   4010 
   4011 	/* Now calculate Low Water */
   4012 	switch (hw->mac.type) {
   4013 	case ixgbe_mac_X540:
   4014 	case ixgbe_mac_X550:
   4015 	case ixgbe_mac_X550EM_x:
   4016 	case ixgbe_mac_X550EM_a:
   4017 		tmp = IXGBE_LOW_DV_X540(frame);
   4018 		break;
   4019 	default:
   4020 		tmp = IXGBE_LOW_DV(frame);
   4021 		break;
   4022 	}
   4023 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
   4024 
   4025 	hw->fc.pause_time = IXGBE_FC_PAUSE;
   4026 	hw->fc.send_xon = TRUE;
   4027 } /* ixgbe_config_delay_values */
   4028 
   4029 /************************************************************************
   4030  * ixgbe_set_multi - Multicast Update
   4031  *
   4032  *   Called whenever multicast address list is updated.
   4033  ************************************************************************/
   4034 static void
   4035 ixgbe_set_multi(struct adapter *adapter)
   4036 {
   4037 	struct ixgbe_mc_addr	*mta;
   4038 	struct ifnet		*ifp = adapter->ifp;
   4039 	u8			*update_ptr;
   4040 	int			mcnt = 0;
   4041 	u32			fctrl;
   4042 	struct ethercom		*ec = &adapter->osdep.ec;
   4043 	struct ether_multi	*enm;
   4044 	struct ether_multistep	step;
   4045 
   4046 	KASSERT(mutex_owned(&adapter->core_mtx));
   4047 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   4048 
   4049 	mta = adapter->mta;
   4050 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
   4051 
   4052 	ifp->if_flags &= ~IFF_ALLMULTI;
   4053 	ETHER_LOCK(ec);
   4054 	ETHER_FIRST_MULTI(step, ec, enm);
   4055 	while (enm != NULL) {
   4056 		if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
   4057 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   4058 			ETHER_ADDR_LEN) != 0)) {
   4059 			ifp->if_flags |= IFF_ALLMULTI;
   4060 			break;
   4061 		}
   4062 		bcopy(enm->enm_addrlo,
   4063 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
   4064 		mta[mcnt].vmdq = adapter->pool;
   4065 		mcnt++;
   4066 		ETHER_NEXT_MULTI(step, enm);
   4067 	}
   4068 	ETHER_UNLOCK(ec);
   4069 
   4070 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   4071 	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4072 	if (ifp->if_flags & IFF_PROMISC)
   4073 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4074 	else if (ifp->if_flags & IFF_ALLMULTI) {
   4075 		fctrl |= IXGBE_FCTRL_MPE;
   4076 	}
   4077 
   4078 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   4079 
   4080 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
   4081 		update_ptr = (u8 *)mta;
   4082 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   4083 		    ixgbe_mc_array_itr, TRUE);
   4084 	}
   4085 
   4086 	return;
   4087 } /* ixgbe_set_multi */
   4088 
   4089 /************************************************************************
   4090  * ixgbe_mc_array_itr
   4091  *
   4092  *   An iterator function needed by the multicast shared code.
   4093  *   It feeds the shared code routine the addresses in the
   4094  *   array of ixgbe_set_multi() one by one.
   4095  ************************************************************************/
   4096 static u8 *
   4097 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   4098 {
   4099 	struct ixgbe_mc_addr *mta;
   4100 
   4101 	mta = (struct ixgbe_mc_addr *)*update_ptr;
   4102 	*vmdq = mta->vmdq;
   4103 
   4104 	*update_ptr = (u8*)(mta + 1);
   4105 
   4106 	return (mta->addr);
   4107 } /* ixgbe_mc_array_itr */
   4108 
   4109 /************************************************************************
   4110  * ixgbe_local_timer - Timer routine
   4111  *
   4112  *   Checks for link status, updates statistics,
   4113  *   and runs the watchdog check.
   4114  ************************************************************************/
   4115 static void
   4116 ixgbe_local_timer(void *arg)
   4117 {
   4118 	struct adapter *adapter = arg;
   4119 
   4120 	IXGBE_CORE_LOCK(adapter);
   4121 	ixgbe_local_timer1(adapter);
   4122 	IXGBE_CORE_UNLOCK(adapter);
   4123 }
   4124 
   4125 static void
   4126 ixgbe_local_timer1(void *arg)
   4127 {
   4128 	struct adapter	*adapter = arg;
   4129 	device_t	dev = adapter->dev;
   4130 	struct ix_queue *que = adapter->queues;
   4131 	u64		queues = 0;
   4132 	int		hung = 0;
   4133 
   4134 	KASSERT(mutex_owned(&adapter->core_mtx));
   4135 
   4136 	/* Check for pluggable optics */
   4137 	if (adapter->sfp_probe)
   4138 		if (!ixgbe_sfp_probe(adapter))
   4139 			goto out; /* Nothing to do */
   4140 
   4141 	ixgbe_update_link_status(adapter);
   4142 	ixgbe_update_stats_counters(adapter);
   4143 
   4144 	/*
   4145 	 * Check the TX queues status
   4146 	 *      - mark hung queues so we don't schedule on them
   4147 	 *      - watchdog only if all queues show hung
   4148 	 */
   4149 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4150 		/* Keep track of queues with work for soft irq */
   4151 		if (que->txr->busy)
   4152 			queues |= ((u64)1 << que->me);
   4153 		/*
   4154 		 * Each time txeof runs without cleaning, but there
   4155 		 * are uncleaned descriptors it increments busy. If
   4156 		 * we get to the MAX we declare it hung.
   4157 		 */
   4158 		if (que->busy == IXGBE_QUEUE_HUNG) {
   4159 			++hung;
   4160 			/* Mark the queue as inactive */
   4161 			adapter->active_queues &= ~((u64)1 << que->me);
   4162 			continue;
   4163 		} else {
   4164 			/* Check if we've come back from hung */
   4165 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   4166 				adapter->active_queues |= ((u64)1 << que->me);
   4167 		}
   4168 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   4169 			device_printf(dev,
   4170 			    "Warning queue %d appears to be hung!\n", i);
   4171 			que->txr->busy = IXGBE_QUEUE_HUNG;
   4172 			++hung;
   4173 		}
   4174 	}
   4175 
   4176 	/* Only truely watchdog if all queues show hung */
   4177 	if (hung == adapter->num_queues)
   4178 		goto watchdog;
   4179 	else if (queues != 0) { /* Force an IRQ on queues with work */
   4180 		ixgbe_rearm_queues(adapter, queues);
   4181 	}
   4182 
   4183 out:
   4184 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4185 	return;
   4186 
   4187 watchdog:
   4188 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   4189 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   4190 	adapter->watchdog_events.ev_count++;
   4191 	ixgbe_init_locked(adapter);
   4192 } /* ixgbe_local_timer */
   4193 
   4194 /************************************************************************
   4195  * ixgbe_sfp_probe
   4196  *
   4197  *   Determine if a port had optics inserted.
   4198  ************************************************************************/
   4199 static bool
   4200 ixgbe_sfp_probe(struct adapter *adapter)
   4201 {
   4202 	struct ixgbe_hw	*hw = &adapter->hw;
   4203 	device_t	dev = adapter->dev;
   4204 	bool		result = FALSE;
   4205 
   4206 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4207 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4208 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4209 		if (ret)
   4210 			goto out;
   4211 		ret = hw->phy.ops.reset(hw);
   4212 		adapter->sfp_probe = FALSE;
   4213 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4214 			device_printf(dev,"Unsupported SFP+ module detected!");
   4215 			device_printf(dev,
   4216 			    "Reload driver with supported module.\n");
   4217                         goto out;
   4218 		} else
   4219 			device_printf(dev, "SFP+ module detected!\n");
   4220 		/* We now have supported optics */
   4221 		result = TRUE;
   4222 	}
   4223 out:
   4224 
   4225 	return (result);
   4226 } /* ixgbe_sfp_probe */
   4227 
   4228 /************************************************************************
   4229  * ixgbe_handle_mod - Tasklet for SFP module interrupts
   4230  ************************************************************************/
   4231 static void
   4232 ixgbe_handle_mod(void *context)
   4233 {
   4234 	struct adapter  *adapter = context;
   4235 	struct ixgbe_hw *hw = &adapter->hw;
   4236 	device_t	dev = adapter->dev;
   4237 	u32             err, cage_full = 0;
   4238 
   4239 	if (adapter->hw.need_crosstalk_fix) {
   4240 		switch (hw->mac.type) {
   4241 		case ixgbe_mac_82599EB:
   4242 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4243 			    IXGBE_ESDP_SDP2;
   4244 			break;
   4245 		case ixgbe_mac_X550EM_x:
   4246 		case ixgbe_mac_X550EM_a:
   4247 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4248 			    IXGBE_ESDP_SDP0;
   4249 			break;
   4250 		default:
   4251 			break;
   4252 		}
   4253 
   4254 		if (!cage_full)
   4255 			return;
   4256 	}
   4257 
   4258 	err = hw->phy.ops.identify_sfp(hw);
   4259 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4260 		device_printf(dev,
   4261 		    "Unsupported SFP+ module type was detected.\n");
   4262 		return;
   4263 	}
   4264 
   4265 	err = hw->mac.ops.setup_sfp(hw);
   4266 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4267 		device_printf(dev,
   4268 		    "Setup failure - unsupported SFP+ module type.\n");
   4269 		return;
   4270 	}
   4271 	softint_schedule(adapter->msf_si);
   4272 } /* ixgbe_handle_mod */
   4273 
   4274 
   4275 /************************************************************************
   4276  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
   4277  ************************************************************************/
   4278 static void
   4279 ixgbe_handle_msf(void *context)
   4280 {
   4281 	struct adapter  *adapter = context;
   4282 	struct ixgbe_hw *hw = &adapter->hw;
   4283 	u32             autoneg;
   4284 	bool            negotiate;
   4285 
   4286 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
   4287 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
   4288 
   4289 	autoneg = hw->phy.autoneg_advertised;
   4290 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   4291 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   4292 	else
   4293 		negotiate = 0;
   4294 	if (hw->mac.ops.setup_link)
   4295 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
   4296 
   4297 	/* Adjust media types shown in ifconfig */
   4298 	ifmedia_removeall(&adapter->media);
   4299 	ixgbe_add_media_types(adapter);
   4300 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   4301 } /* ixgbe_handle_msf */
   4302 
   4303 /************************************************************************
   4304  * ixgbe_handle_phy - Tasklet for external PHY interrupts
   4305  ************************************************************************/
   4306 static void
   4307 ixgbe_handle_phy(void *context)
   4308 {
   4309 	struct adapter  *adapter = context;
   4310 	struct ixgbe_hw *hw = &adapter->hw;
   4311 	int error;
   4312 
   4313 	error = hw->phy.ops.handle_lasi(hw);
   4314 	if (error == IXGBE_ERR_OVERTEMP)
   4315 		device_printf(adapter->dev,
   4316 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
   4317 		    " PHY will downshift to lower power state!\n");
   4318 	else if (error)
   4319 		device_printf(adapter->dev,
   4320 		    "Error handling LASI interrupt: %d\n", error);
   4321 } /* ixgbe_handle_phy */
   4322 
   4323 static void
   4324 ixgbe_ifstop(struct ifnet *ifp, int disable)
   4325 {
   4326 	struct adapter *adapter = ifp->if_softc;
   4327 
   4328 	IXGBE_CORE_LOCK(adapter);
   4329 	ixgbe_stop(adapter);
   4330 	IXGBE_CORE_UNLOCK(adapter);
   4331 }
   4332 
   4333 /************************************************************************
   4334  * ixgbe_stop - Stop the hardware
   4335  *
   4336  *   Disables all traffic on the adapter by issuing a
   4337  *   global reset on the MAC and deallocates TX/RX buffers.
   4338  ************************************************************************/
   4339 static void
   4340 ixgbe_stop(void *arg)
   4341 {
   4342 	struct ifnet    *ifp;
   4343 	struct adapter  *adapter = arg;
   4344 	struct ixgbe_hw *hw = &adapter->hw;
   4345 
   4346 	ifp = adapter->ifp;
   4347 
   4348 	KASSERT(mutex_owned(&adapter->core_mtx));
   4349 
   4350 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   4351 	ixgbe_disable_intr(adapter);
   4352 	callout_stop(&adapter->timer);
   4353 
   4354 	/* Let the stack know...*/
   4355 	ifp->if_flags &= ~IFF_RUNNING;
   4356 
   4357 	ixgbe_reset_hw(hw);
   4358 	hw->adapter_stopped = FALSE;
   4359 	ixgbe_stop_adapter(hw);
   4360 	if (hw->mac.type == ixgbe_mac_82599EB)
   4361 		ixgbe_stop_mac_link_on_d3_82599(hw);
   4362 	/* Turn off the laser - noop with no optics */
   4363 	ixgbe_disable_tx_laser(hw);
   4364 
   4365 	/* Update the stack */
   4366 	adapter->link_up = FALSE;
   4367 	ixgbe_update_link_status(adapter);
   4368 
   4369 	/* reprogram the RAR[0] in case user changed it. */
   4370 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   4371 
   4372 	return;
   4373 } /* ixgbe_stop */
   4374 
   4375 /************************************************************************
   4376  * ixgbe_update_link_status - Update OS on link state
   4377  *
   4378  * Note: Only updates the OS on the cached link state.
   4379  *       The real check of the hardware only happens with
   4380  *       a link interrupt.
   4381  ************************************************************************/
   4382 static void
   4383 ixgbe_update_link_status(struct adapter *adapter)
   4384 {
   4385 	struct ifnet	*ifp = adapter->ifp;
   4386 	device_t        dev = adapter->dev;
   4387 	struct ixgbe_hw *hw = &adapter->hw;
   4388 
   4389 	if (adapter->link_up) {
   4390 		if (adapter->link_active == FALSE) {
   4391 			if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
   4392 				/*
   4393 				 *  Discard count for both MAC Local Fault and
   4394 				 * Remote Fault because those registers are
   4395 				 * valid only when the link speed is up and
   4396 				 * 10Gbps.
   4397 				 */
   4398 				IXGBE_READ_REG(hw, IXGBE_MLFC);
   4399 				IXGBE_READ_REG(hw, IXGBE_MRFC);
   4400 			}
   4401 
   4402 			if (bootverbose) {
   4403 				const char *bpsmsg;
   4404 
   4405 				switch (adapter->link_speed) {
   4406 				case IXGBE_LINK_SPEED_10GB_FULL:
   4407 					bpsmsg = "10 Gbps";
   4408 					break;
   4409 				case IXGBE_LINK_SPEED_5GB_FULL:
   4410 					bpsmsg = "5 Gbps";
   4411 					break;
   4412 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   4413 					bpsmsg = "2.5 Gbps";
   4414 					break;
   4415 				case IXGBE_LINK_SPEED_1GB_FULL:
   4416 					bpsmsg = "1 Gbps";
   4417 					break;
   4418 				case IXGBE_LINK_SPEED_100_FULL:
   4419 					bpsmsg = "100 Mbps";
   4420 					break;
   4421 				case IXGBE_LINK_SPEED_10_FULL:
   4422 					bpsmsg = "10 Mbps";
   4423 					break;
   4424 				default:
   4425 					bpsmsg = "unknown speed";
   4426 					break;
   4427 				}
   4428 				device_printf(dev, "Link is up %s %s \n",
   4429 				    bpsmsg, "Full Duplex");
   4430 			}
   4431 			adapter->link_active = TRUE;
   4432 			/* Update any Flow Control changes */
   4433 			ixgbe_fc_enable(&adapter->hw);
   4434 			/* Update DMA coalescing config */
   4435 			ixgbe_config_dmac(adapter);
   4436 			if_link_state_change(ifp, LINK_STATE_UP);
   4437 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4438 				ixgbe_ping_all_vfs(adapter);
   4439 		}
   4440 	} else { /* Link down */
   4441 		if (adapter->link_active == TRUE) {
   4442 			if (bootverbose)
   4443 				device_printf(dev, "Link is Down\n");
   4444 			if_link_state_change(ifp, LINK_STATE_DOWN);
   4445 			adapter->link_active = FALSE;
   4446 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4447 				ixgbe_ping_all_vfs(adapter);
   4448 		}
   4449 	}
   4450 
   4451 	return;
   4452 } /* ixgbe_update_link_status */
   4453 
   4454 /************************************************************************
   4455  * ixgbe_config_dmac - Configure DMA Coalescing
   4456  ************************************************************************/
   4457 static void
   4458 ixgbe_config_dmac(struct adapter *adapter)
   4459 {
   4460 	struct ixgbe_hw *hw = &adapter->hw;
   4461 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
   4462 
   4463 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
   4464 		return;
   4465 
   4466 	if (dcfg->watchdog_timer ^ adapter->dmac ||
   4467 	    dcfg->link_speed ^ adapter->link_speed) {
   4468 		dcfg->watchdog_timer = adapter->dmac;
   4469 		dcfg->fcoe_en = false;
   4470 		dcfg->link_speed = adapter->link_speed;
   4471 		dcfg->num_tcs = 1;
   4472 
   4473 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
   4474 		    dcfg->watchdog_timer, dcfg->link_speed);
   4475 
   4476 		hw->mac.ops.dmac_config(hw);
   4477 	}
   4478 } /* ixgbe_config_dmac */
   4479 
   4480 /************************************************************************
   4481  * ixgbe_enable_intr
   4482  ************************************************************************/
   4483 static void
   4484 ixgbe_enable_intr(struct adapter *adapter)
   4485 {
   4486 	struct ixgbe_hw	*hw = &adapter->hw;
   4487 	struct ix_queue	*que = adapter->queues;
   4488 	u32		mask, fwsm;
   4489 
   4490 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4491 
   4492 	switch (adapter->hw.mac.type) {
   4493 	case ixgbe_mac_82599EB:
   4494 		mask |= IXGBE_EIMS_ECC;
   4495 		/* Temperature sensor on some adapters */
   4496 		mask |= IXGBE_EIMS_GPI_SDP0;
   4497 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
   4498 		mask |= IXGBE_EIMS_GPI_SDP1;
   4499 		mask |= IXGBE_EIMS_GPI_SDP2;
   4500 		break;
   4501 	case ixgbe_mac_X540:
   4502 		/* Detect if Thermal Sensor is enabled */
   4503 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
   4504 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
   4505 			mask |= IXGBE_EIMS_TS;
   4506 		mask |= IXGBE_EIMS_ECC;
   4507 		break;
   4508 	case ixgbe_mac_X550:
   4509 		/* MAC thermal sensor is automatically enabled */
   4510 		mask |= IXGBE_EIMS_TS;
   4511 		mask |= IXGBE_EIMS_ECC;
   4512 		break;
   4513 	case ixgbe_mac_X550EM_x:
   4514 	case ixgbe_mac_X550EM_a:
   4515 		/* Some devices use SDP0 for important information */
   4516 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
   4517 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
   4518 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
   4519 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
   4520 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
   4521 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
   4522 			mask |= IXGBE_EICR_GPI_SDP0_X540;
   4523 		mask |= IXGBE_EIMS_ECC;
   4524 		break;
   4525 	default:
   4526 		break;
   4527 	}
   4528 
   4529 	/* Enable Fan Failure detection */
   4530 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4531 		mask |= IXGBE_EIMS_GPI_SDP1;
   4532 	/* Enable SR-IOV */
   4533 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4534 		mask |= IXGBE_EIMS_MAILBOX;
   4535 	/* Enable Flow Director */
   4536 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   4537 		mask |= IXGBE_EIMS_FLOW_DIR;
   4538 
   4539 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4540 
   4541 	/* With MSI-X we use auto clear */
   4542 	if (adapter->msix_mem) {
   4543 		mask = IXGBE_EIMS_ENABLE_MASK;
   4544 		/* Don't autoclear Link */
   4545 		mask &= ~IXGBE_EIMS_OTHER;
   4546 		mask &= ~IXGBE_EIMS_LSC;
   4547 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   4548 			mask &= ~IXGBE_EIMS_MAILBOX;
   4549 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4550 	}
   4551 
   4552 	/*
   4553 	 * Now enable all queues, this is done separately to
   4554 	 * allow for handling the extended (beyond 32) MSI-X
   4555 	 * vectors that can be used by 82599
   4556 	 */
   4557         for (int i = 0; i < adapter->num_queues; i++, que++)
   4558                 ixgbe_enable_queue(adapter, que->msix);
   4559 
   4560 	IXGBE_WRITE_FLUSH(hw);
   4561 
   4562 	return;
   4563 } /* ixgbe_enable_intr */
   4564 
   4565 /************************************************************************
   4566  * ixgbe_disable_intr
   4567  ************************************************************************/
   4568 static void
   4569 ixgbe_disable_intr(struct adapter *adapter)
   4570 {
   4571 	if (adapter->msix_mem)
   4572 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4573 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
   4574 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
   4575 	} else {
   4576 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
   4577 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
   4578 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
   4579 	}
   4580 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4581 
   4582 	return;
   4583 } /* ixgbe_disable_intr */
   4584 
   4585 /************************************************************************
   4586  * ixgbe_legacy_irq - Legacy Interrupt Service routine
   4587  ************************************************************************/
   4588 static int
   4589 ixgbe_legacy_irq(void *arg)
   4590 {
   4591 	struct ix_queue *que = arg;
   4592 	struct adapter	*adapter = que->adapter;
   4593 	struct ixgbe_hw	*hw = &adapter->hw;
   4594 	struct ifnet    *ifp = adapter->ifp;
   4595 	struct 		tx_ring *txr = adapter->tx_rings;
   4596 	bool		more = false;
   4597 	u32             eicr, eicr_mask;
   4598 
   4599 	/* Silicon errata #26 on 82598 */
   4600 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
   4601 
   4602 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   4603 
   4604 	adapter->stats.pf.legint.ev_count++;
   4605 	++que->irqs.ev_count;
   4606 	if (eicr == 0) {
   4607 		adapter->stats.pf.intzero.ev_count++;
   4608 		if ((ifp->if_flags & IFF_UP) != 0)
   4609 			ixgbe_enable_intr(adapter);
   4610 		return 0;
   4611 	}
   4612 
   4613 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   4614 #ifdef __NetBSD__
   4615 		/* Don't run ixgbe_rxeof in interrupt context */
   4616 		more = true;
   4617 #else
   4618 		more = ixgbe_rxeof(que);
   4619 #endif
   4620 
   4621 		IXGBE_TX_LOCK(txr);
   4622 		ixgbe_txeof(txr);
   4623 #ifdef notyet
   4624 		if (!ixgbe_ring_empty(ifp, txr->br))
   4625 			ixgbe_start_locked(ifp, txr);
   4626 #endif
   4627 		IXGBE_TX_UNLOCK(txr);
   4628 	}
   4629 
   4630 	/* Check for fan failure */
   4631 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   4632 		ixgbe_check_fan_failure(adapter, eicr, true);
   4633 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4634 	}
   4635 
   4636 	/* Link status change */
   4637 	if (eicr & IXGBE_EICR_LSC)
   4638 		softint_schedule(adapter->link_si);
   4639 
   4640 	if (ixgbe_is_sfp(hw)) {
   4641 		/* Pluggable optics-related interrupt */
   4642 		if (hw->mac.type >= ixgbe_mac_X540)
   4643 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   4644 		else
   4645 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   4646 
   4647 		if (eicr & eicr_mask) {
   4648 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   4649 			softint_schedule(adapter->mod_si);
   4650 		}
   4651 
   4652 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   4653 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   4654 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   4655 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4656 			softint_schedule(adapter->msf_si);
   4657 		}
   4658 	}
   4659 
   4660 	/* External PHY interrupt */
   4661 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   4662 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
   4663 		softint_schedule(adapter->phy_si);
   4664 
   4665 	if (more)
   4666 		softint_schedule(que->que_si);
   4667 	else
   4668 		ixgbe_enable_intr(adapter);
   4669 
   4670 	return 1;
   4671 } /* ixgbe_legacy_irq */
   4672 
   4673 /************************************************************************
   4674  * ixgbe_free_pciintr_resources
   4675  ************************************************************************/
   4676 static void
   4677 ixgbe_free_pciintr_resources(struct adapter *adapter)
   4678 {
   4679 	struct ix_queue *que = adapter->queues;
   4680 	int		rid;
   4681 
   4682 	/*
   4683 	 * Release all msix queue resources:
   4684 	 */
   4685 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4686 		if (que->res != NULL) {
   4687 			pci_intr_disestablish(adapter->osdep.pc,
   4688 			    adapter->osdep.ihs[i]);
   4689 			adapter->osdep.ihs[i] = NULL;
   4690 		}
   4691 	}
   4692 
   4693 	/* Clean the Legacy or Link interrupt last */
   4694 	if (adapter->vector) /* we are doing MSIX */
   4695 		rid = adapter->vector;
   4696 	else
   4697 		rid = 0;
   4698 
   4699 	if (adapter->osdep.ihs[rid] != NULL) {
   4700 		pci_intr_disestablish(adapter->osdep.pc,
   4701 		    adapter->osdep.ihs[rid]);
   4702 		adapter->osdep.ihs[rid] = NULL;
   4703 	}
   4704 
   4705 	if (adapter->osdep.intrs != NULL) {
   4706 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   4707 		    adapter->osdep.nintrs);
   4708 		adapter->osdep.intrs = NULL;
   4709 	}
   4710 
   4711 	return;
   4712 } /* ixgbe_free_pciintr_resources */
   4713 
   4714 /************************************************************************
   4715  * ixgbe_free_pci_resources
   4716  ************************************************************************/
   4717 static void
   4718 ixgbe_free_pci_resources(struct adapter *adapter)
   4719 {
   4720 
   4721 	ixgbe_free_pciintr_resources(adapter);
   4722 
   4723 	if (adapter->osdep.mem_size != 0) {
   4724 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   4725 		    adapter->osdep.mem_bus_space_handle,
   4726 		    adapter->osdep.mem_size);
   4727 	}
   4728 
   4729 	return;
   4730 } /* ixgbe_free_pci_resources */
   4731 
   4732 /************************************************************************
   4733  * ixgbe_set_sysctl_value
   4734  ************************************************************************/
   4735 static void
   4736 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
   4737     const char *description, int *limit, int value)
   4738 {
   4739 	device_t dev =  adapter->dev;
   4740 	struct sysctllog **log;
   4741 	const struct sysctlnode *rnode, *cnode;
   4742 
   4743 	log = &adapter->sysctllog;
   4744 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   4745 		aprint_error_dev(dev, "could not create sysctl root\n");
   4746 		return;
   4747 	}
   4748 	if (sysctl_createv(log, 0, &rnode, &cnode,
   4749 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   4750 	    name, SYSCTL_DESCR(description),
   4751 		NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   4752 		aprint_error_dev(dev, "could not create sysctl\n");
   4753 	*limit = value;
   4754 } /* ixgbe_set_sysctl_value */
   4755 
   4756 /************************************************************************
   4757  * ixgbe_sysctl_flowcntl
   4758  *
   4759  *   SYSCTL wrapper around setting Flow Control
   4760  ************************************************************************/
   4761 static int
   4762 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
   4763 {
   4764 	struct sysctlnode node = *rnode;
   4765 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4766 	int error, fc;
   4767 
   4768 	fc = adapter->hw.fc.current_mode;
   4769 	node.sysctl_data = &fc;
   4770 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4771 	if (error != 0 || newp == NULL)
   4772 		return error;
   4773 
   4774 	/* Don't bother if it's not changed */
   4775 	if (fc == adapter->hw.fc.current_mode)
   4776 		return (0);
   4777 
   4778 	return ixgbe_set_flowcntl(adapter, fc);
   4779 } /* ixgbe_sysctl_flowcntl */
   4780 
   4781 /************************************************************************
   4782  * ixgbe_set_flowcntl - Set flow control
   4783  *
   4784  *   Flow control values:
   4785  *     0 - off
   4786  *     1 - rx pause
   4787  *     2 - tx pause
   4788  *     3 - full
   4789  ************************************************************************/
   4790 static int
   4791 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
   4792 {
   4793 	switch (fc) {
   4794 		case ixgbe_fc_rx_pause:
   4795 		case ixgbe_fc_tx_pause:
   4796 		case ixgbe_fc_full:
   4797 			adapter->hw.fc.requested_mode = fc;
   4798 			if (adapter->num_queues > 1)
   4799 				ixgbe_disable_rx_drop(adapter);
   4800 			break;
   4801 		case ixgbe_fc_none:
   4802 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   4803 			if (adapter->num_queues > 1)
   4804 				ixgbe_enable_rx_drop(adapter);
   4805 			break;
   4806 		default:
   4807 			return (EINVAL);
   4808 	}
   4809 
   4810 #if 0 /* XXX NetBSD */
   4811 	/* Don't autoneg if forcing a value */
   4812 	adapter->hw.fc.disable_fc_autoneg = TRUE;
   4813 #endif
   4814 	ixgbe_fc_enable(&adapter->hw);
   4815 
   4816 	return (0);
   4817 } /* ixgbe_set_flowcntl */
   4818 
   4819 /************************************************************************
   4820  * ixgbe_enable_rx_drop
   4821  *
   4822  *   Enable the hardware to drop packets when the buffer is
   4823  *   full. This is useful with multiqueue, so that no single
   4824  *   queue being full stalls the entire RX engine. We only
   4825  *   enable this when Multiqueue is enabled AND Flow Control
   4826  *   is disabled.
   4827  ************************************************************************/
   4828 static void
   4829 ixgbe_enable_rx_drop(struct adapter *adapter)
   4830 {
   4831 	struct ixgbe_hw *hw = &adapter->hw;
   4832 	struct rx_ring  *rxr;
   4833 	u32             srrctl;
   4834 
   4835 	for (int i = 0; i < adapter->num_queues; i++) {
   4836 		rxr = &adapter->rx_rings[i];
   4837 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4838 		srrctl |= IXGBE_SRRCTL_DROP_EN;
   4839 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4840 	}
   4841 
   4842 	/* enable drop for each vf */
   4843 	for (int i = 0; i < adapter->num_vfs; i++) {
   4844 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4845 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
   4846 		    IXGBE_QDE_ENABLE));
   4847 	}
   4848 } /* ixgbe_enable_rx_drop */
   4849 
   4850 /************************************************************************
   4851  * ixgbe_disable_rx_drop
   4852  ************************************************************************/
   4853 static void
   4854 ixgbe_disable_rx_drop(struct adapter *adapter)
   4855 {
   4856 	struct ixgbe_hw *hw = &adapter->hw;
   4857 	struct rx_ring  *rxr;
   4858 	u32             srrctl;
   4859 
   4860 	for (int i = 0; i < adapter->num_queues; i++) {
   4861 		rxr = &adapter->rx_rings[i];
   4862         	srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4863         	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
   4864         	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4865 	}
   4866 
   4867 	/* disable drop for each vf */
   4868 	for (int i = 0; i < adapter->num_vfs; i++) {
   4869 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4870 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
   4871 	}
   4872 } /* ixgbe_disable_rx_drop */
   4873 
   4874 /************************************************************************
   4875  * ixgbe_sysctl_advertise
   4876  *
   4877  *   SYSCTL wrapper around setting advertised speed
   4878  ************************************************************************/
   4879 static int
   4880 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
   4881 {
   4882 	struct sysctlnode node = *rnode;
   4883 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4884 	int            error = 0, advertise;
   4885 
   4886 	advertise = adapter->advertise;
   4887 	node.sysctl_data = &advertise;
   4888 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4889 	if (error != 0 || newp == NULL)
   4890 		return error;
   4891 
   4892 	return ixgbe_set_advertise(adapter, advertise);
   4893 } /* ixgbe_sysctl_advertise */
   4894 
   4895 /************************************************************************
   4896  * ixgbe_set_advertise - Control advertised link speed
   4897  *
   4898  *   Flags:
   4899  *     0x00 - Default (all capable link speed)
   4900  *     0x01 - advertise 100 Mb
   4901  *     0x02 - advertise 1G
   4902  *     0x04 - advertise 10G
   4903  *     0x08 - advertise 10 Mb
   4904  *     0x10 - advertise 2.5G
   4905  *     0x20 - advertise 5G
   4906  ************************************************************************/
   4907 static int
   4908 ixgbe_set_advertise(struct adapter *adapter, int advertise)
   4909 {
   4910 	device_t         dev;
   4911 	struct ixgbe_hw  *hw;
   4912 	ixgbe_link_speed speed = 0;
   4913 	ixgbe_link_speed link_caps = 0;
   4914 	s32              err = IXGBE_NOT_IMPLEMENTED;
   4915 	bool             negotiate = FALSE;
   4916 
   4917 	/* Checks to validate new value */
   4918 	if (adapter->advertise == advertise) /* no change */
   4919 		return (0);
   4920 
   4921 	dev = adapter->dev;
   4922 	hw = &adapter->hw;
   4923 
   4924 	/* No speed changes for backplane media */
   4925 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   4926 		return (ENODEV);
   4927 
   4928 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   4929 	    (hw->phy.multispeed_fiber))) {
   4930 		device_printf(dev,
   4931 		    "Advertised speed can only be set on copper or "
   4932 		    "multispeed fiber media types.\n");
   4933 		return (EINVAL);
   4934 	}
   4935 
   4936 	if (advertise < 0x0 || advertise > 0x2f) {
   4937 		device_printf(dev,
   4938 		    "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
   4939 		return (EINVAL);
   4940 	}
   4941 
   4942 	if (hw->mac.ops.get_link_capabilities) {
   4943 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   4944 		    &negotiate);
   4945 		if (err != IXGBE_SUCCESS) {
   4946 			device_printf(dev, "Unable to determine supported advertise speeds\n");
   4947 			return (ENODEV);
   4948 		}
   4949 	}
   4950 
   4951 	/* Set new value and report new advertised mode */
   4952 	if (advertise & 0x1) {
   4953 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
   4954 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
   4955 			return (EINVAL);
   4956 		}
   4957 		speed |= IXGBE_LINK_SPEED_100_FULL;
   4958 	}
   4959 	if (advertise & 0x2) {
   4960 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
   4961 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
   4962 			return (EINVAL);
   4963 		}
   4964 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   4965 	}
   4966 	if (advertise & 0x4) {
   4967 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
   4968 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
   4969 			return (EINVAL);
   4970 		}
   4971 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   4972 	}
   4973 	if (advertise & 0x8) {
   4974 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
   4975 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
   4976 			return (EINVAL);
   4977 		}
   4978 		speed |= IXGBE_LINK_SPEED_10_FULL;
   4979 	}
   4980 	if (advertise & 0x10) {
   4981 		if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
   4982 			device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
   4983 			return (EINVAL);
   4984 		}
   4985 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   4986 	}
   4987 	if (advertise & 0x20) {
   4988 		if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
   4989 			device_printf(dev, "Interface does not support 5Gb advertised speed\n");
   4990 			return (EINVAL);
   4991 		}
   4992 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   4993 	}
   4994 	if (advertise == 0)
   4995 		speed = link_caps; /* All capable link speed */
   4996 
   4997 	hw->mac.autotry_restart = TRUE;
   4998 	hw->mac.ops.setup_link(hw, speed, TRUE);
   4999 	adapter->advertise = advertise;
   5000 
   5001 	return (0);
   5002 } /* ixgbe_set_advertise */
   5003 
   5004 /************************************************************************
   5005  * ixgbe_get_advertise - Get current advertised speed settings
   5006  *
   5007  *   Formatted for sysctl usage.
   5008  *   Flags:
   5009  *     0x01 - advertise 100 Mb
   5010  *     0x02 - advertise 1G
   5011  *     0x04 - advertise 10G
   5012  *     0x08 - advertise 10 Mb (yes, Mb)
   5013  *     0x10 - advertise 2.5G
   5014  *     0x20 - advertise 5G
   5015  ************************************************************************/
   5016 static int
   5017 ixgbe_get_advertise(struct adapter *adapter)
   5018 {
   5019 	struct ixgbe_hw  *hw = &adapter->hw;
   5020 	int              speed;
   5021 	ixgbe_link_speed link_caps = 0;
   5022 	s32              err;
   5023 	bool             negotiate = FALSE;
   5024 
   5025 	/*
   5026 	 * Advertised speed means nothing unless it's copper or
   5027 	 * multi-speed fiber
   5028 	 */
   5029 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
   5030 	    !(hw->phy.multispeed_fiber))
   5031 		return (0);
   5032 
   5033 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
   5034 	if (err != IXGBE_SUCCESS)
   5035 		return (0);
   5036 
   5037 	speed =
   5038 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x04 : 0) |
   5039 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x02 : 0) |
   5040 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)   ? 0x01 : 0) |
   5041 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)    ? 0x08 : 0) |
   5042 	    ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
   5043 	    ((link_caps & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0);
   5044 
   5045 	return speed;
   5046 } /* ixgbe_get_advertise */
   5047 
   5048 /************************************************************************
   5049  * ixgbe_sysctl_dmac - Manage DMA Coalescing
   5050  *
   5051  *   Control values:
   5052  *     0/1 - off / on (use default value of 1000)
   5053  *
   5054  *     Legal timer values are:
   5055  *     50,100,250,500,1000,2000,5000,10000
   5056  *
   5057  *     Turning off interrupt moderation will also turn this off.
   5058  ************************************************************************/
   5059 static int
   5060 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
   5061 {
   5062 	struct sysctlnode node = *rnode;
   5063 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5064 	struct ifnet   *ifp = adapter->ifp;
   5065 	int            error;
   5066 	int            newval;
   5067 
   5068 	newval = adapter->dmac;
   5069 	node.sysctl_data = &newval;
   5070 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5071 	if ((error) || (newp == NULL))
   5072 		return (error);
   5073 
   5074 	switch (newval) {
   5075 	case 0:
   5076 		/* Disabled */
   5077 		adapter->dmac = 0;
   5078 		break;
   5079 	case 1:
   5080 		/* Enable and use default */
   5081 		adapter->dmac = 1000;
   5082 		break;
   5083 	case 50:
   5084 	case 100:
   5085 	case 250:
   5086 	case 500:
   5087 	case 1000:
   5088 	case 2000:
   5089 	case 5000:
   5090 	case 10000:
   5091 		/* Legal values - allow */
   5092 		adapter->dmac = newval;
   5093 		break;
   5094 	default:
   5095 		/* Do nothing, illegal value */
   5096 		return (EINVAL);
   5097 	}
   5098 
   5099 	/* Re-initialize hardware if it's already running */
   5100 	if (ifp->if_flags & IFF_RUNNING)
   5101 		ixgbe_init(ifp);
   5102 
   5103 	return (0);
   5104 }
   5105 
   5106 #ifdef IXGBE_DEBUG
   5107 /************************************************************************
   5108  * ixgbe_sysctl_power_state
   5109  *
   5110  *   Sysctl to test power states
   5111  *   Values:
   5112  *     0      - set device to D0
   5113  *     3      - set device to D3
   5114  *     (none) - get current device power state
   5115  ************************************************************************/
   5116 static int
   5117 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
   5118 {
   5119 #ifdef notyet
   5120 	struct sysctlnode node = *rnode;
   5121 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5122 	device_t       dev =  adapter->dev;
   5123 	int            curr_ps, new_ps, error = 0;
   5124 
   5125 	curr_ps = new_ps = pci_get_powerstate(dev);
   5126 
   5127 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5128 	if ((error) || (req->newp == NULL))
   5129 		return (error);
   5130 
   5131 	if (new_ps == curr_ps)
   5132 		return (0);
   5133 
   5134 	if (new_ps == 3 && curr_ps == 0)
   5135 		error = DEVICE_SUSPEND(dev);
   5136 	else if (new_ps == 0 && curr_ps == 3)
   5137 		error = DEVICE_RESUME(dev);
   5138 	else
   5139 		return (EINVAL);
   5140 
   5141 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
   5142 
   5143 	return (error);
   5144 #else
   5145 	return 0;
   5146 #endif
   5147 } /* ixgbe_sysctl_power_state */
   5148 #endif
   5149 
   5150 /************************************************************************
   5151  * ixgbe_sysctl_wol_enable
   5152  *
   5153  *   Sysctl to enable/disable the WoL capability,
   5154  *   if supported by the adapter.
   5155  *
   5156  *   Values:
   5157  *     0 - disabled
   5158  *     1 - enabled
   5159  ************************************************************************/
   5160 static int
   5161 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
   5162 {
   5163 	struct sysctlnode node = *rnode;
   5164 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5165 	struct ixgbe_hw *hw = &adapter->hw;
   5166 	bool            new_wol_enabled;
   5167 	int             error = 0;
   5168 
   5169 	new_wol_enabled = hw->wol_enabled;
   5170 	node.sysctl_data = &new_wol_enabled;
   5171 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5172 	if ((error) || (newp == NULL))
   5173 		return (error);
   5174 	if (new_wol_enabled == hw->wol_enabled)
   5175 		return (0);
   5176 
   5177 	if (new_wol_enabled && !adapter->wol_support)
   5178 		return (ENODEV);
   5179 	else
   5180 		hw->wol_enabled = new_wol_enabled;
   5181 
   5182 	return (0);
   5183 } /* ixgbe_sysctl_wol_enable */
   5184 
   5185 /************************************************************************
   5186  * ixgbe_sysctl_wufc - Wake Up Filter Control
   5187  *
   5188  *   Sysctl to enable/disable the types of packets that the
   5189  *   adapter will wake up on upon receipt.
   5190  *   Flags:
   5191  *     0x1  - Link Status Change
   5192  *     0x2  - Magic Packet
   5193  *     0x4  - Direct Exact
   5194  *     0x8  - Directed Multicast
   5195  *     0x10 - Broadcast
   5196  *     0x20 - ARP/IPv4 Request Packet
   5197  *     0x40 - Direct IPv4 Packet
   5198  *     0x80 - Direct IPv6 Packet
   5199  *
   5200  *   Settings not listed above will cause the sysctl to return an error.
   5201  ************************************************************************/
   5202 static int
   5203 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
   5204 {
   5205 	struct sysctlnode node = *rnode;
   5206 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5207 	int error = 0;
   5208 	u32 new_wufc;
   5209 
   5210 	new_wufc = adapter->wufc;
   5211 	node.sysctl_data = &new_wufc;
   5212 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5213 	if ((error) || (newp == NULL))
   5214 		return (error);
   5215 	if (new_wufc == adapter->wufc)
   5216 		return (0);
   5217 
   5218 	if (new_wufc & 0xffffff00)
   5219 		return (EINVAL);
   5220 
   5221 	new_wufc &= 0xff;
   5222 	new_wufc |= (0xffffff & adapter->wufc);
   5223 	adapter->wufc = new_wufc;
   5224 
   5225 	return (0);
   5226 } /* ixgbe_sysctl_wufc */
   5227 
   5228 #ifdef IXGBE_DEBUG
   5229 /************************************************************************
   5230  * ixgbe_sysctl_print_rss_config
   5231  ************************************************************************/
   5232 static int
   5233 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
   5234 {
   5235 #ifdef notyet
   5236 	struct sysctlnode node = *rnode;
   5237 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5238 	struct ixgbe_hw *hw = &adapter->hw;
   5239 	device_t        dev = adapter->dev;
   5240 	struct sbuf     *buf;
   5241 	int             error = 0, reta_size;
   5242 	u32             reg;
   5243 
   5244 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
   5245 	if (!buf) {
   5246 		device_printf(dev, "Could not allocate sbuf for output.\n");
   5247 		return (ENOMEM);
   5248 	}
   5249 
   5250 	// TODO: use sbufs to make a string to print out
   5251 	/* Set multiplier for RETA setup and table size based on MAC */
   5252 	switch (adapter->hw.mac.type) {
   5253 	case ixgbe_mac_X550:
   5254 	case ixgbe_mac_X550EM_x:
   5255 	case ixgbe_mac_X550EM_a:
   5256 		reta_size = 128;
   5257 		break;
   5258 	default:
   5259 		reta_size = 32;
   5260 		break;
   5261 	}
   5262 
   5263 	/* Print out the redirection table */
   5264 	sbuf_cat(buf, "\n");
   5265 	for (int i = 0; i < reta_size; i++) {
   5266 		if (i < 32) {
   5267 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
   5268 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
   5269 		} else {
   5270 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
   5271 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
   5272 		}
   5273 	}
   5274 
   5275 	// TODO: print more config
   5276 
   5277 	error = sbuf_finish(buf);
   5278 	if (error)
   5279 		device_printf(dev, "Error finishing sbuf: %d\n", error);
   5280 
   5281 	sbuf_delete(buf);
   5282 #endif
   5283 	return (0);
   5284 } /* ixgbe_sysctl_print_rss_config */
   5285 #endif /* IXGBE_DEBUG */
   5286 
   5287 /************************************************************************
   5288  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
   5289  *
   5290  *   For X552/X557-AT devices using an external PHY
   5291  ************************************************************************/
   5292 static int
   5293 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
   5294 {
   5295 	struct sysctlnode node = *rnode;
   5296 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5297 	struct ixgbe_hw *hw = &adapter->hw;
   5298 	int val;
   5299 	u16 reg;
   5300 	int		error;
   5301 
   5302 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5303 		device_printf(adapter->dev,
   5304 		    "Device has no supported external thermal sensor.\n");
   5305 		return (ENODEV);
   5306 	}
   5307 
   5308 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
   5309 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5310 		device_printf(adapter->dev,
   5311 		    "Error reading from PHY's current temperature register\n");
   5312 		return (EAGAIN);
   5313 	}
   5314 
   5315 	node.sysctl_data = &val;
   5316 
   5317 	/* Shift temp for output */
   5318 	val = reg >> 8;
   5319 
   5320 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5321 	if ((error) || (newp == NULL))
   5322 		return (error);
   5323 
   5324 	return (0);
   5325 } /* ixgbe_sysctl_phy_temp */
   5326 
   5327 /************************************************************************
   5328  * ixgbe_sysctl_phy_overtemp_occurred
   5329  *
   5330  *   Reports (directly from the PHY) whether the current PHY
   5331  *   temperature is over the overtemp threshold.
   5332  ************************************************************************/
   5333 static int
   5334 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
   5335 {
   5336 	struct sysctlnode node = *rnode;
   5337 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5338 	struct ixgbe_hw *hw = &adapter->hw;
   5339 	int val, error;
   5340 	u16 reg;
   5341 
   5342 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5343 		device_printf(adapter->dev,
   5344 		    "Device has no supported external thermal sensor.\n");
   5345 		return (ENODEV);
   5346 	}
   5347 
   5348 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
   5349 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5350 		device_printf(adapter->dev,
   5351 		    "Error reading from PHY's temperature status register\n");
   5352 		return (EAGAIN);
   5353 	}
   5354 
   5355 	node.sysctl_data = &val;
   5356 
   5357 	/* Get occurrence bit */
   5358 	val = !!(reg & 0x4000);
   5359 
   5360 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5361 	if ((error) || (newp == NULL))
   5362 		return (error);
   5363 
   5364 	return (0);
   5365 } /* ixgbe_sysctl_phy_overtemp_occurred */
   5366 
   5367 /************************************************************************
   5368  * ixgbe_sysctl_eee_state
   5369  *
   5370  *   Sysctl to set EEE power saving feature
   5371  *   Values:
   5372  *     0      - disable EEE
   5373  *     1      - enable EEE
   5374  *     (none) - get current device EEE state
   5375  ************************************************************************/
   5376 static int
   5377 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
   5378 {
   5379 	struct sysctlnode node = *rnode;
   5380 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5381 	struct ifnet   *ifp = adapter->ifp;
   5382 	device_t       dev = adapter->dev;
   5383 	int            curr_eee, new_eee, error = 0;
   5384 	s32            retval;
   5385 
   5386 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
   5387 	node.sysctl_data = &new_eee;
   5388 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5389 	if ((error) || (newp == NULL))
   5390 		return (error);
   5391 
   5392 	/* Nothing to do */
   5393 	if (new_eee == curr_eee)
   5394 		return (0);
   5395 
   5396 	/* Not supported */
   5397 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
   5398 		return (EINVAL);
   5399 
   5400 	/* Bounds checking */
   5401 	if ((new_eee < 0) || (new_eee > 1))
   5402 		return (EINVAL);
   5403 
   5404 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
   5405 	if (retval) {
   5406 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
   5407 		return (EINVAL);
   5408 	}
   5409 
   5410 	/* Restart auto-neg */
   5411 	ixgbe_init(ifp);
   5412 
   5413 	device_printf(dev, "New EEE state: %d\n", new_eee);
   5414 
   5415 	/* Cache new value */
   5416 	if (new_eee)
   5417 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5418 	else
   5419 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
   5420 
   5421 	return (error);
   5422 } /* ixgbe_sysctl_eee_state */
   5423 
   5424 /************************************************************************
   5425  * ixgbe_init_device_features
   5426  ************************************************************************/
   5427 static void
   5428 ixgbe_init_device_features(struct adapter *adapter)
   5429 {
   5430 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   5431 	                  | IXGBE_FEATURE_RSS
   5432 	                  | IXGBE_FEATURE_MSI
   5433 	                  | IXGBE_FEATURE_MSIX
   5434 	                  | IXGBE_FEATURE_LEGACY_IRQ
   5435 	                  | IXGBE_FEATURE_LEGACY_TX;
   5436 
   5437 	/* Set capabilities first... */
   5438 	switch (adapter->hw.mac.type) {
   5439 	case ixgbe_mac_82598EB:
   5440 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
   5441 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
   5442 		break;
   5443 	case ixgbe_mac_X540:
   5444 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5445 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5446 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
   5447 		    (adapter->hw.bus.func == 0))
   5448 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5449 		break;
   5450 	case ixgbe_mac_X550:
   5451 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5452 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5453 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5454 		break;
   5455 	case ixgbe_mac_X550EM_x:
   5456 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5457 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5458 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
   5459 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5460 		break;
   5461 	case ixgbe_mac_X550EM_a:
   5462 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5463 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5464 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5465 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
   5466 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
   5467 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5468 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5469 		}
   5470 		break;
   5471 	case ixgbe_mac_82599EB:
   5472 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5473 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5474 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
   5475 		    (adapter->hw.bus.func == 0))
   5476 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5477 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
   5478 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5479 		break;
   5480 	default:
   5481 		break;
   5482 	}
   5483 
   5484 	/* Enabled by default... */
   5485 	/* Fan failure detection */
   5486 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
   5487 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
   5488 	/* Netmap */
   5489 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   5490 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   5491 	/* EEE */
   5492 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   5493 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5494 	/* Thermal Sensor */
   5495 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
   5496 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
   5497 
   5498 	/* Enabled via global sysctl... */
   5499 	/* Flow Director */
   5500 	if (ixgbe_enable_fdir) {
   5501 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
   5502 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
   5503 		else
   5504 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
   5505 	}
   5506 	/* Legacy (single queue) transmit */
   5507 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   5508 	    ixgbe_enable_legacy_tx)
   5509 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   5510 	/*
   5511 	 * Message Signal Interrupts - Extended (MSI-X)
   5512 	 * Normal MSI is only enabled if MSI-X calls fail.
   5513 	 */
   5514 	if (!ixgbe_enable_msix)
   5515 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
   5516 	/* Receive-Side Scaling (RSS) */
   5517 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
   5518 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   5519 
   5520 	/* Disable features with unmet dependencies... */
   5521 	/* No MSI-X */
   5522 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
   5523 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   5524 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   5525 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
   5526 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
   5527 	}
   5528 } /* ixgbe_init_device_features */
   5529 
   5530 /************************************************************************
   5531  * ixgbe_probe - Device identification routine
   5532  *
   5533  *   Determines if the driver should be loaded on
   5534  *   adapter based on its PCI vendor/device ID.
   5535  *
   5536  *   return BUS_PROBE_DEFAULT on success, positive on failure
   5537  ************************************************************************/
   5538 static int
   5539 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
   5540 {
   5541 	const struct pci_attach_args *pa = aux;
   5542 
   5543 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
   5544 }
   5545 
   5546 static ixgbe_vendor_info_t *
   5547 ixgbe_lookup(const struct pci_attach_args *pa)
   5548 {
   5549 	ixgbe_vendor_info_t *ent;
   5550 	pcireg_t subid;
   5551 
   5552 	INIT_DEBUGOUT("ixgbe_lookup: begin");
   5553 
   5554 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
   5555 		return NULL;
   5556 
   5557 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
   5558 
   5559 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
   5560 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
   5561 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
   5562 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
   5563 			(ent->subvendor_id == 0)) &&
   5564 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
   5565 			(ent->subdevice_id == 0))) {
   5566 			++ixgbe_total_ports;
   5567 			return ent;
   5568 		}
   5569 	}
   5570 	return NULL;
   5571 }
   5572 
   5573 static int
   5574 ixgbe_ifflags_cb(struct ethercom *ec)
   5575 {
   5576 	struct ifnet *ifp = &ec->ec_if;
   5577 	struct adapter *adapter = ifp->if_softc;
   5578 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   5579 
   5580 	IXGBE_CORE_LOCK(adapter);
   5581 
   5582 	if (change != 0)
   5583 		adapter->if_flags = ifp->if_flags;
   5584 
   5585 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   5586 		rc = ENETRESET;
   5587 	else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   5588 		ixgbe_set_promisc(adapter);
   5589 
   5590 	/* Set up VLAN support and filter */
   5591 	ixgbe_setup_vlan_hw_support(adapter);
   5592 
   5593 	IXGBE_CORE_UNLOCK(adapter);
   5594 
   5595 	return rc;
   5596 }
   5597 
   5598 /************************************************************************
   5599  * ixgbe_ioctl - Ioctl entry point
   5600  *
   5601  *   Called when the user wants to configure the interface.
   5602  *
   5603  *   return 0 on success, positive on failure
   5604  ************************************************************************/
   5605 static int
   5606 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   5607 {
   5608 	struct adapter	*adapter = ifp->if_softc;
   5609 	struct ixgbe_hw *hw = &adapter->hw;
   5610 	struct ifcapreq *ifcr = data;
   5611 	struct ifreq	*ifr = data;
   5612 	int             error = 0;
   5613 	int l4csum_en;
   5614 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   5615 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   5616 
   5617 	switch (command) {
   5618 	case SIOCSIFFLAGS:
   5619 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   5620 		break;
   5621 	case SIOCADDMULTI:
   5622 	case SIOCDELMULTI:
   5623 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   5624 		break;
   5625 	case SIOCSIFMEDIA:
   5626 	case SIOCGIFMEDIA:
   5627 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   5628 		break;
   5629 	case SIOCSIFCAP:
   5630 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   5631 		break;
   5632 	case SIOCSIFMTU:
   5633 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   5634 		break;
   5635 #ifdef __NetBSD__
   5636 	case SIOCINITIFADDR:
   5637 		IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
   5638 		break;
   5639 	case SIOCGIFFLAGS:
   5640 		IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
   5641 		break;
   5642 	case SIOCGIFAFLAG_IN:
   5643 		IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
   5644 		break;
   5645 	case SIOCGIFADDR:
   5646 		IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
   5647 		break;
   5648 	case SIOCGIFMTU:
   5649 		IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
   5650 		break;
   5651 	case SIOCGIFCAP:
   5652 		IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
   5653 		break;
   5654 	case SIOCGETHERCAP:
   5655 		IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
   5656 		break;
   5657 	case SIOCGLIFADDR:
   5658 		IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
   5659 		break;
   5660 	case SIOCZIFDATA:
   5661 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   5662 		hw->mac.ops.clear_hw_cntrs(hw);
   5663 		ixgbe_clear_evcnt(adapter);
   5664 		break;
   5665 	case SIOCAIFADDR:
   5666 		IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
   5667 		break;
   5668 #endif
   5669 	default:
   5670 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   5671 		break;
   5672 	}
   5673 
   5674 	switch (command) {
   5675 	case SIOCSIFMEDIA:
   5676 	case SIOCGIFMEDIA:
   5677 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   5678 	case SIOCGI2C:
   5679 	{
   5680 		struct ixgbe_i2c_req	i2c;
   5681 
   5682 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
   5683 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
   5684 		if (error != 0)
   5685 			break;
   5686 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
   5687 			error = EINVAL;
   5688 			break;
   5689 		}
   5690 		if (i2c.len > sizeof(i2c.data)) {
   5691 			error = EINVAL;
   5692 			break;
   5693 		}
   5694 
   5695 		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
   5696 		    i2c.dev_addr, i2c.data);
   5697 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
   5698 		break;
   5699 	}
   5700 	case SIOCSIFCAP:
   5701 		/* Layer-4 Rx checksum offload has to be turned on and
   5702 		 * off as a unit.
   5703 		 */
   5704 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   5705 		if (l4csum_en != l4csum && l4csum_en != 0)
   5706 			return EINVAL;
   5707 		/*FALLTHROUGH*/
   5708 	case SIOCADDMULTI:
   5709 	case SIOCDELMULTI:
   5710 	case SIOCSIFFLAGS:
   5711 	case SIOCSIFMTU:
   5712 	default:
   5713 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   5714 			return error;
   5715 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   5716 			;
   5717 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   5718 			IXGBE_CORE_LOCK(adapter);
   5719 			ixgbe_init_locked(adapter);
   5720 			ixgbe_recalculate_max_frame(adapter);
   5721 			IXGBE_CORE_UNLOCK(adapter);
   5722 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   5723 			/*
   5724 			 * Multicast list has changed; set the hardware filter
   5725 			 * accordingly.
   5726 			 */
   5727 			IXGBE_CORE_LOCK(adapter);
   5728 			ixgbe_disable_intr(adapter);
   5729 			ixgbe_set_multi(adapter);
   5730 			ixgbe_enable_intr(adapter);
   5731 			IXGBE_CORE_UNLOCK(adapter);
   5732 		}
   5733 		return 0;
   5734 	}
   5735 
   5736 	return error;
   5737 } /* ixgbe_ioctl */
   5738 
   5739 /************************************************************************
   5740  * ixgbe_check_fan_failure
   5741  ************************************************************************/
   5742 static void
   5743 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
   5744 {
   5745 	u32 mask;
   5746 
   5747 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
   5748 	    IXGBE_ESDP_SDP1;
   5749 
   5750 	if (reg & mask)
   5751 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
   5752 } /* ixgbe_check_fan_failure */
   5753 
   5754 /************************************************************************
   5755  * ixgbe_handle_que
   5756  ************************************************************************/
   5757 static void
   5758 ixgbe_handle_que(void *context)
   5759 {
   5760 	struct ix_queue *que = context;
   5761 	struct adapter  *adapter = que->adapter;
   5762 	struct tx_ring  *txr = que->txr;
   5763 	struct ifnet    *ifp = adapter->ifp;
   5764 	bool		more = false;
   5765 
   5766 	adapter->handleq.ev_count++;
   5767 
   5768 	if (ifp->if_flags & IFF_RUNNING) {
   5769 		more = ixgbe_rxeof(que);
   5770 		IXGBE_TX_LOCK(txr);
   5771 		ixgbe_txeof(txr);
   5772 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5773 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   5774 				ixgbe_mq_start_locked(ifp, txr);
   5775 		/* Only for queue 0 */
   5776 		/* NetBSD still needs this for CBQ */
   5777 		if ((&adapter->queues[0] == que)
   5778 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   5779 			ixgbe_legacy_start_locked(ifp, txr);
   5780 		IXGBE_TX_UNLOCK(txr);
   5781 	}
   5782 
   5783 	if (more)
   5784 		softint_schedule(que->que_si);
   5785 	else if (que->res != NULL) {
   5786 		/* Re-enable this interrupt */
   5787 		ixgbe_enable_queue(adapter, que->msix);
   5788 	} else
   5789 		ixgbe_enable_intr(adapter);
   5790 
   5791 	return;
   5792 } /* ixgbe_handle_que */
   5793 
   5794 /************************************************************************
   5795  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
   5796  ************************************************************************/
   5797 static int
   5798 ixgbe_allocate_legacy(struct adapter *adapter,
   5799     const struct pci_attach_args *pa)
   5800 {
   5801 	device_t	dev = adapter->dev;
   5802 	struct ix_queue *que = adapter->queues;
   5803 	struct tx_ring  *txr = adapter->tx_rings;
   5804 	int		counts[PCI_INTR_TYPE_SIZE];
   5805 	pci_intr_type_t intr_type, max_type;
   5806 	char            intrbuf[PCI_INTRSTR_LEN];
   5807 	const char	*intrstr = NULL;
   5808 
   5809 	/* We allocate a single interrupt resource */
   5810 	max_type = PCI_INTR_TYPE_MSI;
   5811 	counts[PCI_INTR_TYPE_MSIX] = 0;
   5812 	counts[PCI_INTR_TYPE_MSI] =
   5813 	    (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
   5814 	/* Check not feat_en but feat_cap to fallback to INTx */
   5815 	counts[PCI_INTR_TYPE_INTX] =
   5816 	    (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
   5817 
   5818 alloc_retry:
   5819 	if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
   5820 		aprint_error_dev(dev, "couldn't alloc interrupt\n");
   5821 		return ENXIO;
   5822 	}
   5823 	adapter->osdep.nintrs = 1;
   5824 	intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
   5825 	    intrbuf, sizeof(intrbuf));
   5826 	adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
   5827 	    adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
   5828 	    device_xname(dev));
   5829 	intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
   5830 	if (adapter->osdep.ihs[0] == NULL) {
   5831 		aprint_error_dev(dev,"unable to establish %s\n",
   5832 		    (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5833 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5834 		adapter->osdep.intrs = NULL;
   5835 		switch (intr_type) {
   5836 		case PCI_INTR_TYPE_MSI:
   5837 			/* The next try is for INTx: Disable MSI */
   5838 			max_type = PCI_INTR_TYPE_INTX;
   5839 			counts[PCI_INTR_TYPE_INTX] = 1;
   5840 			adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   5841 			if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
   5842 				adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   5843 				goto alloc_retry;
   5844 			} else
   5845 				break;
   5846 		case PCI_INTR_TYPE_INTX:
   5847 		default:
   5848 			/* See below */
   5849 			break;
   5850 		}
   5851 	}
   5852 	if (intr_type == PCI_INTR_TYPE_INTX) {
   5853 		adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   5854 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   5855 	}
   5856 	if (adapter->osdep.ihs[0] == NULL) {
   5857 		aprint_error_dev(dev,
   5858 		    "couldn't establish interrupt%s%s\n",
   5859 		    intrstr ? " at " : "", intrstr ? intrstr : "");
   5860 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5861 		adapter->osdep.intrs = NULL;
   5862 		return ENXIO;
   5863 	}
   5864 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   5865 	/*
   5866 	 * Try allocating a fast interrupt and the associated deferred
   5867 	 * processing contexts.
   5868 	 */
   5869 	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5870 		txr->txr_si =
   5871 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5872 			ixgbe_deferred_mq_start, txr);
   5873 	que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5874 	    ixgbe_handle_que, que);
   5875 
   5876 	if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
   5877 		& (txr->txr_si == NULL)) || (que->que_si == NULL)) {
   5878 		aprint_error_dev(dev,
   5879 		    "could not establish software interrupts\n");
   5880 
   5881 		return ENXIO;
   5882 	}
   5883 	/* For simplicity in the handlers */
   5884 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
   5885 
   5886 	return (0);
   5887 } /* ixgbe_allocate_legacy */
   5888 
   5889 
   5890 /************************************************************************
   5891  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
   5892  ************************************************************************/
   5893 static int
   5894 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   5895 {
   5896 	device_t        dev = adapter->dev;
   5897 	struct 		ix_queue *que = adapter->queues;
   5898 	struct  	tx_ring *txr = adapter->tx_rings;
   5899 	pci_chipset_tag_t pc;
   5900 	char		intrbuf[PCI_INTRSTR_LEN];
   5901 	char		intr_xname[32];
   5902 	const char	*intrstr = NULL;
   5903 	int 		error, vector = 0;
   5904 	int		cpu_id = 0;
   5905 	kcpuset_t	*affinity;
   5906 #ifdef RSS
   5907 	unsigned int    rss_buckets = 0;
   5908 	kcpuset_t	cpu_mask;
   5909 #endif
   5910 
   5911 	pc = adapter->osdep.pc;
   5912 #ifdef	RSS
   5913 	/*
   5914 	 * If we're doing RSS, the number of queues needs to
   5915 	 * match the number of RSS buckets that are configured.
   5916 	 *
   5917 	 * + If there's more queues than RSS buckets, we'll end
   5918 	 *   up with queues that get no traffic.
   5919 	 *
   5920 	 * + If there's more RSS buckets than queues, we'll end
   5921 	 *   up having multiple RSS buckets map to the same queue,
   5922 	 *   so there'll be some contention.
   5923 	 */
   5924 	rss_buckets = rss_getnumbuckets();
   5925 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
   5926 	    (adapter->num_queues != rss_buckets)) {
   5927 		device_printf(dev,
   5928 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
   5929 		    "; performance will be impacted.\n",
   5930 		    __func__, adapter->num_queues, rss_buckets);
   5931 	}
   5932 #endif
   5933 
   5934 	adapter->osdep.nintrs = adapter->num_queues + 1;
   5935 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   5936 	    adapter->osdep.nintrs) != 0) {
   5937 		aprint_error_dev(dev,
   5938 		    "failed to allocate MSI-X interrupt\n");
   5939 		return (ENXIO);
   5940 	}
   5941 
   5942 	kcpuset_create(&affinity, false);
   5943 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   5944 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   5945 		    device_xname(dev), i);
   5946 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   5947 		    sizeof(intrbuf));
   5948 #ifdef IXGBE_MPSAFE
   5949 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   5950 		    true);
   5951 #endif
   5952 		/* Set the handler function */
   5953 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   5954 		    adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
   5955 		    intr_xname);
   5956 		if (que->res == NULL) {
   5957 			aprint_error_dev(dev,
   5958 			    "Failed to register QUE handler\n");
   5959 			error = ENXIO;
   5960 			goto err_out;
   5961 		}
   5962 		que->msix = vector;
   5963 		adapter->active_queues |= (u64)(1 << que->msix);
   5964 
   5965 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   5966 #ifdef	RSS
   5967 			/*
   5968 			 * The queue ID is used as the RSS layer bucket ID.
   5969 			 * We look up the queue ID -> RSS CPU ID and select
   5970 			 * that.
   5971 			 */
   5972 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
   5973 			CPU_SETOF(cpu_id, &cpu_mask);
   5974 #endif
   5975 		} else {
   5976 			/*
   5977 			 * Bind the MSI-X vector, and thus the
   5978 			 * rings to the corresponding CPU.
   5979 			 *
   5980 			 * This just happens to match the default RSS
   5981 			 * round-robin bucket -> queue -> CPU allocation.
   5982 			 */
   5983 			if (adapter->num_queues > 1)
   5984 				cpu_id = i;
   5985 		}
   5986 		/* Round-robin affinity */
   5987 		kcpuset_zero(affinity);
   5988 		kcpuset_set(affinity, cpu_id % ncpu);
   5989 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   5990 		    NULL);
   5991 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   5992 		    intrstr);
   5993 		if (error == 0) {
   5994 #if 1 /* def IXGBE_DEBUG */
   5995 #ifdef	RSS
   5996 			aprintf_normal(", bound RSS bucket %d to CPU %d", i,
   5997 			    cpu_id % ncpu);
   5998 #else
   5999 			aprint_normal(", bound queue %d to cpu %d", i,
   6000 			    cpu_id % ncpu);
   6001 #endif
   6002 #endif /* IXGBE_DEBUG */
   6003 		}
   6004 		aprint_normal("\n");
   6005 
   6006 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   6007 			txr->txr_si = softint_establish(
   6008 				SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6009 				ixgbe_deferred_mq_start, txr);
   6010 			if (txr->txr_si == NULL) {
   6011 				aprint_error_dev(dev,
   6012 				    "couldn't establish software interrupt\n");
   6013 				error = ENXIO;
   6014 				goto err_out;
   6015 			}
   6016 		}
   6017 		que->que_si
   6018 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6019 			ixgbe_handle_que, que);
   6020 		if (que->que_si == NULL) {
   6021 			aprint_error_dev(dev,
   6022 			    "couldn't establish software interrupt\n");
   6023 			error = ENXIO;
   6024 			goto err_out;
   6025 		}
   6026 	}
   6027 
   6028 	/* and Link */
   6029 	cpu_id++;
   6030 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   6031 	adapter->vector = vector;
   6032 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   6033 	    sizeof(intrbuf));
   6034 #ifdef IXGBE_MPSAFE
   6035 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   6036 	    true);
   6037 #endif
   6038 	/* Set the link handler function */
   6039 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   6040 	    adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
   6041 	    intr_xname);
   6042 	if (adapter->osdep.ihs[vector] == NULL) {
   6043 		adapter->res = NULL;
   6044 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   6045 		error = ENXIO;
   6046 		goto err_out;
   6047 	}
   6048 	/* Round-robin affinity */
   6049 	kcpuset_zero(affinity);
   6050 	kcpuset_set(affinity, cpu_id % ncpu);
   6051 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
   6052 	    NULL);
   6053 
   6054 	aprint_normal_dev(dev,
   6055 	    "for link, interrupting at %s", intrstr);
   6056 	if (error == 0)
   6057 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   6058 	else
   6059 		aprint_normal("\n");
   6060 
   6061 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   6062 		adapter->mbx_si =
   6063 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6064 			ixgbe_handle_mbx, adapter);
   6065 		if (adapter->mbx_si == NULL) {
   6066 			aprint_error_dev(dev,
   6067 			    "could not establish software interrupts\n");
   6068 
   6069 			error = ENXIO;
   6070 			goto err_out;
   6071 		}
   6072 	}
   6073 
   6074 	kcpuset_destroy(affinity);
   6075 	aprint_normal_dev(dev,
   6076 	    "Using MSI-X interrupts with %d vectors\n", vector + 1);
   6077 
   6078 	return (0);
   6079 
   6080 err_out:
   6081 	kcpuset_destroy(affinity);
   6082 	ixgbe_free_softint(adapter);
   6083 	ixgbe_free_pciintr_resources(adapter);
   6084 	return (error);
   6085 } /* ixgbe_allocate_msix */
   6086 
   6087 /************************************************************************
   6088  * ixgbe_configure_interrupts
   6089  *
   6090  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
   6091  *   This will also depend on user settings.
   6092  ************************************************************************/
   6093 static int
   6094 ixgbe_configure_interrupts(struct adapter *adapter)
   6095 {
   6096 	device_t dev = adapter->dev;
   6097 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
   6098 	int want, queues, msgs;
   6099 
   6100 	/* Default to 1 queue if MSI-X setup fails */
   6101 	adapter->num_queues = 1;
   6102 
   6103 	/* Override by tuneable */
   6104 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
   6105 		goto msi;
   6106 
   6107 	/*
   6108 	 *  NetBSD only: Use single vector MSI when number of CPU is 1 to save
   6109 	 * interrupt slot.
   6110 	 */
   6111 	if (ncpu == 1)
   6112 		goto msi;
   6113 
   6114 	/* First try MSI-X */
   6115 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   6116 	msgs = MIN(msgs, IXG_MAX_NINTR);
   6117 	if (msgs < 2)
   6118 		goto msi;
   6119 
   6120 	adapter->msix_mem = (void *)1; /* XXX */
   6121 
   6122 	/* Figure out a reasonable auto config value */
   6123 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   6124 
   6125 #ifdef	RSS
   6126 	/* If we're doing RSS, clamp at the number of RSS buckets */
   6127 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   6128 		queues = min(queues, rss_getnumbuckets());
   6129 #endif
   6130 	if (ixgbe_num_queues > queues) {
   6131 		aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
   6132 		ixgbe_num_queues = queues;
   6133 	}
   6134 
   6135 	if (ixgbe_num_queues != 0)
   6136 		queues = ixgbe_num_queues;
   6137 	else
   6138 		queues = min(queues,
   6139 		    min(mac->max_tx_queues, mac->max_rx_queues));
   6140 
   6141 	/* reflect correct sysctl value */
   6142 	ixgbe_num_queues = queues;
   6143 
   6144 	/*
   6145 	 * Want one vector (RX/TX pair) per queue
   6146 	 * plus an additional for Link.
   6147 	 */
   6148 	want = queues + 1;
   6149 	if (msgs >= want)
   6150 		msgs = want;
   6151 	else {
   6152                	aprint_error_dev(dev, "MSI-X Configuration Problem, "
   6153 		    "%d vectors but %d queues wanted!\n",
   6154 		    msgs, want);
   6155 		goto msi;
   6156 	}
   6157 	adapter->num_queues = queues;
   6158 	adapter->feat_en |= IXGBE_FEATURE_MSIX;
   6159 	return (0);
   6160 
   6161 	/*
   6162 	 * MSI-X allocation failed or provided us with
   6163 	 * less vectors than needed. Free MSI-X resources
   6164 	 * and we'll try enabling MSI.
   6165 	 */
   6166 msi:
   6167 	/* Without MSI-X, some features are no longer supported */
   6168 	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6169 	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
   6170 	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6171 	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
   6172 
   6173        	msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
   6174 	adapter->msix_mem = NULL; /* XXX */
   6175 	if (msgs > 1)
   6176 		msgs = 1;
   6177 	if (msgs != 0) {
   6178 		msgs = 1;
   6179 		adapter->feat_en |= IXGBE_FEATURE_MSI;
   6180 		return (0);
   6181 	}
   6182 
   6183 	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
   6184 		aprint_error_dev(dev,
   6185 		    "Device does not support legacy interrupts.\n");
   6186 		return 1;
   6187 	}
   6188 
   6189 	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6190 
   6191 	return (0);
   6192 } /* ixgbe_configure_interrupts */
   6193 
   6194 
   6195 /************************************************************************
   6196  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
   6197  *
   6198  *   Done outside of interrupt context since the driver might sleep
   6199  ************************************************************************/
   6200 static void
   6201 ixgbe_handle_link(void *context)
   6202 {
   6203 	struct adapter  *adapter = context;
   6204 	struct ixgbe_hw *hw = &adapter->hw;
   6205 
   6206 	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
   6207 	ixgbe_update_link_status(adapter);
   6208 
   6209 	/* Re-enable link interrupts */
   6210 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
   6211 } /* ixgbe_handle_link */
   6212 
   6213 /************************************************************************
   6214  * ixgbe_rearm_queues
   6215  ************************************************************************/
   6216 static void
   6217 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   6218 {
   6219 	u32 mask;
   6220 
   6221 	switch (adapter->hw.mac.type) {
   6222 	case ixgbe_mac_82598EB:
   6223 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   6224 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   6225 		break;
   6226 	case ixgbe_mac_82599EB:
   6227 	case ixgbe_mac_X540:
   6228 	case ixgbe_mac_X550:
   6229 	case ixgbe_mac_X550EM_x:
   6230 	case ixgbe_mac_X550EM_a:
   6231 		mask = (queues & 0xFFFFFFFF);
   6232 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   6233 		mask = (queues >> 32);
   6234 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   6235 		break;
   6236 	default:
   6237 		break;
   6238 	}
   6239 } /* ixgbe_rearm_queues */
   6240