Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.138
      1 /* $NetBSD: ixgbe.c,v 1.138 2018/03/30 03:56:38 knakahara Exp $ */
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 320916 2017-07-12 17:35:32Z sbruno $*/
     36 
     37 /*
     38  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39  * All rights reserved.
     40  *
     41  * This code is derived from software contributed to The NetBSD Foundation
     42  * by Coyote Point Systems, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63  * POSSIBILITY OF SUCH DAMAGE.
     64  */
     65 
     66 #ifdef _KERNEL_OPT
     67 #include "opt_inet.h"
     68 #include "opt_inet6.h"
     69 #include "opt_net_mpsafe.h"
     70 #endif
     71 
     72 #include "ixgbe.h"
     73 #include "ixgbe_sriov.h"
     74 #include "vlan.h"
     75 
     76 #include <sys/cprng.h>
     77 #include <dev/mii/mii.h>
     78 #include <dev/mii/miivar.h>
     79 
     80 /************************************************************************
     81  * Driver version
     82  ************************************************************************/
     83 char ixgbe_driver_version[] = "3.2.12-k";
     84 
     85 
     86 /************************************************************************
     87  * PCI Device ID Table
     88  *
     89  *   Used by probe to select devices to load on
     90  *   Last field stores an index into ixgbe_strings
     91  *   Last entry must be all 0s
     92  *
     93  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     94  ************************************************************************/
     95 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     96 {
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    111 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    112 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    113 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    114 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    115 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    116 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
    117 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    118 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
    119 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
    120 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
    121 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
    122 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
    123 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
    124 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
    125 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
    126 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
    127 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
    128 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
    129 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
    130 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
    131 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
    132 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
    133 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
    134 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
    135 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
    136 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
    137 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
    138 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
    139 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
    140 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
    141 	/* required last entry */
    142 	{0, 0, 0, 0, 0}
    143 };
    144 
    145 /************************************************************************
    146  * Table of branding strings
    147  ************************************************************************/
    148 static const char    *ixgbe_strings[] = {
    149 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    150 };
    151 
    152 /************************************************************************
    153  * Function prototypes
    154  ************************************************************************/
    155 static int      ixgbe_probe(device_t, cfdata_t, void *);
    156 static void     ixgbe_attach(device_t, device_t, void *);
    157 static int      ixgbe_detach(device_t, int);
    158 #if 0
    159 static int      ixgbe_shutdown(device_t);
    160 #endif
    161 static bool	ixgbe_suspend(device_t, const pmf_qual_t *);
    162 static bool	ixgbe_resume(device_t, const pmf_qual_t *);
    163 static int	ixgbe_ifflags_cb(struct ethercom *);
    164 static int      ixgbe_ioctl(struct ifnet *, u_long, void *);
    165 static void	ixgbe_ifstop(struct ifnet *, int);
    166 static int	ixgbe_init(struct ifnet *);
    167 static void	ixgbe_init_locked(struct adapter *);
    168 static void     ixgbe_stop(void *);
    169 static void     ixgbe_init_device_features(struct adapter *);
    170 static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
    171 static void	ixgbe_add_media_types(struct adapter *);
    172 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    173 static int      ixgbe_media_change(struct ifnet *);
    174 static int      ixgbe_allocate_pci_resources(struct adapter *,
    175 		    const struct pci_attach_args *);
    176 static void     ixgbe_free_softint(struct adapter *);
    177 static void	ixgbe_get_slot_info(struct adapter *);
    178 static int      ixgbe_allocate_msix(struct adapter *,
    179 		    const struct pci_attach_args *);
    180 static int      ixgbe_allocate_legacy(struct adapter *,
    181 		    const struct pci_attach_args *);
    182 static int      ixgbe_configure_interrupts(struct adapter *);
    183 static void	ixgbe_free_pciintr_resources(struct adapter *);
    184 static void	ixgbe_free_pci_resources(struct adapter *);
    185 static void	ixgbe_local_timer(void *);
    186 static void	ixgbe_local_timer1(void *);
    187 static int	ixgbe_setup_interface(device_t, struct adapter *);
    188 static void	ixgbe_config_gpie(struct adapter *);
    189 static void	ixgbe_config_dmac(struct adapter *);
    190 static void	ixgbe_config_delay_values(struct adapter *);
    191 static void	ixgbe_config_link(struct adapter *);
    192 static void	ixgbe_check_wol_support(struct adapter *);
    193 static int	ixgbe_setup_low_power_mode(struct adapter *);
    194 static void	ixgbe_rearm_queues(struct adapter *, u64);
    195 
    196 static void     ixgbe_initialize_transmit_units(struct adapter *);
    197 static void     ixgbe_initialize_receive_units(struct adapter *);
    198 static void	ixgbe_enable_rx_drop(struct adapter *);
    199 static void	ixgbe_disable_rx_drop(struct adapter *);
    200 static void	ixgbe_initialize_rss_mapping(struct adapter *);
    201 
    202 static void     ixgbe_enable_intr(struct adapter *);
    203 static void     ixgbe_disable_intr(struct adapter *);
    204 static void     ixgbe_update_stats_counters(struct adapter *);
    205 static void     ixgbe_set_promisc(struct adapter *);
    206 static void     ixgbe_set_multi(struct adapter *);
    207 static void     ixgbe_update_link_status(struct adapter *);
    208 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    209 static void	ixgbe_configure_ivars(struct adapter *);
    210 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    211 static void	ixgbe_eitr_write(struct ix_queue *, uint32_t);
    212 
    213 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    214 #if 0
    215 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
    216 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
    217 #endif
    218 
    219 static void	ixgbe_add_device_sysctls(struct adapter *);
    220 static void     ixgbe_add_hw_stats(struct adapter *);
    221 static void	ixgbe_clear_evcnt(struct adapter *);
    222 static int	ixgbe_set_flowcntl(struct adapter *, int);
    223 static int	ixgbe_set_advertise(struct adapter *, int);
    224 static int      ixgbe_get_advertise(struct adapter *);
    225 
    226 /* Sysctl handlers */
    227 static void	ixgbe_set_sysctl_value(struct adapter *, const char *,
    228 		     const char *, int *, int);
    229 static int	ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
    230 static int	ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
    231 static int      ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    232 static int	ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
    233 static int	ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
    234 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
    235 #ifdef IXGBE_DEBUG
    236 static int	ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
    237 static int	ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
    238 #endif
    239 static int      ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
    240 static int      ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
    241 static int      ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
    242 static int      ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
    243 static int      ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
    244 static int	ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
    245 static int	ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
    246 
    247 /* Support for pluggable optic modules */
    248 static bool	ixgbe_sfp_probe(struct adapter *);
    249 
    250 /* Legacy (single vector) interrupt handler */
    251 static int	ixgbe_legacy_irq(void *);
    252 
    253 /* The MSI/MSI-X Interrupt handlers */
    254 static int	ixgbe_msix_que(void *);
    255 static int	ixgbe_msix_link(void *);
    256 
    257 /* Software interrupts for deferred work */
    258 static void	ixgbe_handle_que(void *);
    259 static void	ixgbe_handle_link(void *);
    260 static void	ixgbe_handle_msf(void *);
    261 static void	ixgbe_handle_mod(void *);
    262 static void	ixgbe_handle_phy(void *);
    263 
    264 /* Workqueue handler for deferred work */
    265 static void	ixgbe_handle_que_work(struct work *, void *);
    266 
    267 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    268 
    269 /************************************************************************
    270  *  NetBSD Device Interface Entry Points
    271  ************************************************************************/
    272 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    273     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    274     DVF_DETACH_SHUTDOWN);
    275 
    276 #if 0
    277 devclass_t ix_devclass;
    278 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
    279 
    280 MODULE_DEPEND(ix, pci, 1, 1, 1);
    281 MODULE_DEPEND(ix, ether, 1, 1, 1);
    282 #ifdef DEV_NETMAP
    283 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    284 #endif
    285 #endif
    286 
    287 /*
    288  * TUNEABLE PARAMETERS:
    289  */
    290 
    291 /*
    292  * AIM: Adaptive Interrupt Moderation
    293  * which means that the interrupt rate
    294  * is varied over time based on the
    295  * traffic for that interrupt vector
    296  */
    297 static bool ixgbe_enable_aim = true;
    298 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
    299 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
    300     "Enable adaptive interrupt moderation");
    301 
    302 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    303 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
    304     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
    305 
    306 /* How many packets rxeof tries to clean at a time */
    307 static int ixgbe_rx_process_limit = 256;
    308 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
    309     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
    310 
    311 /* How many packets txeof tries to clean at a time */
    312 static int ixgbe_tx_process_limit = 256;
    313 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
    314     &ixgbe_tx_process_limit, 0,
    315     "Maximum number of sent packets to process at a time, -1 means unlimited");
    316 
    317 /* Flow control setting, default to full */
    318 static int ixgbe_flow_control = ixgbe_fc_full;
    319 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
    320     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
    321 
    322 /* Which pakcet processing uses workqueue or softint */
    323 static bool ixgbe_txrx_workqueue = false;
    324 
    325 /*
    326  * Smart speed setting, default to on
    327  * this only works as a compile option
    328  * right now as its during attach, set
    329  * this to 'ixgbe_smart_speed_off' to
    330  * disable.
    331  */
    332 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    333 
    334 /*
    335  * MSI-X should be the default for best performance,
    336  * but this allows it to be forced off for testing.
    337  */
    338 static int ixgbe_enable_msix = 1;
    339 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
    340     "Enable MSI-X interrupts");
    341 
    342 /*
    343  * Number of Queues, can be set to 0,
    344  * it then autoconfigures based on the
    345  * number of cpus with a max of 8. This
    346  * can be overriden manually here.
    347  */
    348 static int ixgbe_num_queues = 0;
    349 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
    350     "Number of queues to configure, 0 indicates autoconfigure");
    351 
    352 /*
    353  * Number of TX descriptors per ring,
    354  * setting higher than RX as this seems
    355  * the better performing choice.
    356  */
    357 static int ixgbe_txd = PERFORM_TXD;
    358 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
    359     "Number of transmit descriptors per queue");
    360 
    361 /* Number of RX descriptors per ring */
    362 static int ixgbe_rxd = PERFORM_RXD;
    363 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
    364     "Number of receive descriptors per queue");
    365 
    366 /*
    367  * Defining this on will allow the use
    368  * of unsupported SFP+ modules, note that
    369  * doing so you are on your own :)
    370  */
    371 static int allow_unsupported_sfp = false;
    372 #define TUNABLE_INT(__x, __y)
    373 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
    374 
    375 /*
    376  * Not sure if Flow Director is fully baked,
    377  * so we'll default to turning it off.
    378  */
    379 static int ixgbe_enable_fdir = 0;
    380 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
    381     "Enable Flow Director");
    382 
    383 /* Legacy Transmit (single queue) */
    384 static int ixgbe_enable_legacy_tx = 0;
    385 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
    386     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
    387 
    388 /* Receive-Side Scaling */
    389 static int ixgbe_enable_rss = 1;
    390 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
    391     "Enable Receive-Side Scaling (RSS)");
    392 
    393 /* Keep running tab on them for sanity check */
    394 static int ixgbe_total_ports;
    395 
    396 #if 0
    397 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
    398 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
    399 #endif
    400 
    401 #ifdef NET_MPSAFE
    402 #define IXGBE_MPSAFE		1
    403 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    404 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    405 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    406 #else
    407 #define IXGBE_CALLOUT_FLAGS	0
    408 #define IXGBE_SOFTINFT_FLAGS	0
    409 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU
    410 #endif
    411 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
    412 
    413 /************************************************************************
    414  * ixgbe_initialize_rss_mapping
    415  ************************************************************************/
    416 static void
    417 ixgbe_initialize_rss_mapping(struct adapter *adapter)
    418 {
    419 	struct ixgbe_hw	*hw = &adapter->hw;
    420 	u32             reta = 0, mrqc, rss_key[10];
    421 	int             queue_id, table_size, index_mult;
    422 	int             i, j;
    423 	u32             rss_hash_config;
    424 
    425 	/* force use default RSS key. */
    426 #ifdef __NetBSD__
    427 	rss_getkey((uint8_t *) &rss_key);
    428 #else
    429 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    430 		/* Fetch the configured RSS key */
    431 		rss_getkey((uint8_t *) &rss_key);
    432 	} else {
    433 		/* set up random bits */
    434 		cprng_fast(&rss_key, sizeof(rss_key));
    435 	}
    436 #endif
    437 
    438 	/* Set multiplier for RETA setup and table size based on MAC */
    439 	index_mult = 0x1;
    440 	table_size = 128;
    441 	switch (adapter->hw.mac.type) {
    442 	case ixgbe_mac_82598EB:
    443 		index_mult = 0x11;
    444 		break;
    445 	case ixgbe_mac_X550:
    446 	case ixgbe_mac_X550EM_x:
    447 	case ixgbe_mac_X550EM_a:
    448 		table_size = 512;
    449 		break;
    450 	default:
    451 		break;
    452 	}
    453 
    454 	/* Set up the redirection table */
    455 	for (i = 0, j = 0; i < table_size; i++, j++) {
    456 		if (j == adapter->num_queues)
    457 			j = 0;
    458 
    459 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    460 			/*
    461 			 * Fetch the RSS bucket id for the given indirection
    462 			 * entry. Cap it at the number of configured buckets
    463 			 * (which is num_queues.)
    464 			 */
    465 			queue_id = rss_get_indirection_to_bucket(i);
    466 			queue_id = queue_id % adapter->num_queues;
    467 		} else
    468 			queue_id = (j * index_mult);
    469 
    470 		/*
    471 		 * The low 8 bits are for hash value (n+0);
    472 		 * The next 8 bits are for hash value (n+1), etc.
    473 		 */
    474 		reta = reta >> 8;
    475 		reta = reta | (((uint32_t) queue_id) << 24);
    476 		if ((i & 3) == 3) {
    477 			if (i < 128)
    478 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
    479 			else
    480 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
    481 				    reta);
    482 			reta = 0;
    483 		}
    484 	}
    485 
    486 	/* Now fill our hash function seeds */
    487 	for (i = 0; i < 10; i++)
    488 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
    489 
    490 	/* Perform hash on these packet types */
    491 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
    492 		rss_hash_config = rss_gethashconfig();
    493 	else {
    494 		/*
    495 		 * Disable UDP - IP fragments aren't currently being handled
    496 		 * and so we end up with a mix of 2-tuple and 4-tuple
    497 		 * traffic.
    498 		 */
    499 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
    500 		                | RSS_HASHTYPE_RSS_TCP_IPV4
    501 		                | RSS_HASHTYPE_RSS_IPV6
    502 		                | RSS_HASHTYPE_RSS_TCP_IPV6
    503 		                | RSS_HASHTYPE_RSS_IPV6_EX
    504 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
    505 	}
    506 
    507 	mrqc = IXGBE_MRQC_RSSEN;
    508 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
    509 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
    510 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
    511 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
    512 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
    513 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
    514 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
    515 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
    516 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
    517 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
    518 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
    519 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
    520 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
    521 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
    522 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
    523 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
    524 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
    525 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
    526 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
    527 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    528 } /* ixgbe_initialize_rss_mapping */
    529 
    530 /************************************************************************
    531  * ixgbe_initialize_receive_units - Setup receive registers and features.
    532  ************************************************************************/
    533 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
    534 
    535 static void
    536 ixgbe_initialize_receive_units(struct adapter *adapter)
    537 {
    538 	struct	rx_ring	*rxr = adapter->rx_rings;
    539 	struct ixgbe_hw	*hw = &adapter->hw;
    540 	struct ifnet    *ifp = adapter->ifp;
    541 	int             i, j;
    542 	u32		bufsz, fctrl, srrctl, rxcsum;
    543 	u32		hlreg;
    544 
    545 	/*
    546 	 * Make sure receives are disabled while
    547 	 * setting up the descriptor ring
    548 	 */
    549 	ixgbe_disable_rx(hw);
    550 
    551 	/* Enable broadcasts */
    552 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
    553 	fctrl |= IXGBE_FCTRL_BAM;
    554 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
    555 		fctrl |= IXGBE_FCTRL_DPF;
    556 		fctrl |= IXGBE_FCTRL_PMCF;
    557 	}
    558 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
    559 
    560 	/* Set for Jumbo Frames? */
    561 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
    562 	if (ifp->if_mtu > ETHERMTU)
    563 		hlreg |= IXGBE_HLREG0_JUMBOEN;
    564 	else
    565 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
    566 
    567 #ifdef DEV_NETMAP
    568 	/* CRC stripping is conditional in Netmap */
    569 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
    570 	    (ifp->if_capenable & IFCAP_NETMAP) &&
    571 	    !ix_crcstrip)
    572 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
    573 	else
    574 #endif /* DEV_NETMAP */
    575 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
    576 
    577 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
    578 
    579 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
    580 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
    581 
    582 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
    583 		u64 rdba = rxr->rxdma.dma_paddr;
    584 		u32 tqsmreg, reg;
    585 		int regnum = i / 4;	/* 1 register per 4 queues */
    586 		int regshift = i % 4;	/* 4 bits per 1 queue */
    587 		j = rxr->me;
    588 
    589 		/* Setup the Base and Length of the Rx Descriptor Ring */
    590 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
    591 		    (rdba & 0x00000000ffffffffULL));
    592 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
    593 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
    594 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
    595 
    596 		/* Set up the SRRCTL register */
    597 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
    598 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
    599 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
    600 		srrctl |= bufsz;
    601 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
    602 
    603 		/* Set RQSMR (Receive Queue Statistic Mapping) register */
    604 		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
    605 		reg &= ~(0x000000ff << (regshift * 8));
    606 		reg |= i << (regshift * 8);
    607 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
    608 
    609 		/*
    610 		 * Set RQSMR (Receive Queue Statistic Mapping) register.
    611 		 * Register location for queue 0...7 are different between
    612 		 * 82598 and newer.
    613 		 */
    614 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
    615 			tqsmreg = IXGBE_TQSMR(regnum);
    616 		else
    617 			tqsmreg = IXGBE_TQSM(regnum);
    618 		reg = IXGBE_READ_REG(hw, tqsmreg);
    619 		reg &= ~(0x000000ff << (regshift * 8));
    620 		reg |= i << (regshift * 8);
    621 		IXGBE_WRITE_REG(hw, tqsmreg, reg);
    622 
    623 		/*
    624 		 * Set DROP_EN iff we have no flow control and >1 queue.
    625 		 * Note that srrctl was cleared shortly before during reset,
    626 		 * so we do not need to clear the bit, but do it just in case
    627 		 * this code is moved elsewhere.
    628 		 */
    629 		if (adapter->num_queues > 1 &&
    630 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
    631 			srrctl |= IXGBE_SRRCTL_DROP_EN;
    632 		} else {
    633 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
    634 		}
    635 
    636 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
    637 
    638 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
    639 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
    640 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
    641 
    642 		/* Set the driver rx tail address */
    643 		rxr->tail =  IXGBE_RDT(rxr->me);
    644 	}
    645 
    646 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    647 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
    648 		            | IXGBE_PSRTYPE_UDPHDR
    649 		            | IXGBE_PSRTYPE_IPV4HDR
    650 		            | IXGBE_PSRTYPE_IPV6HDR;
    651 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
    652 	}
    653 
    654 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
    655 
    656 	ixgbe_initialize_rss_mapping(adapter);
    657 
    658 	if (adapter->num_queues > 1) {
    659 		/* RSS and RX IPP Checksum are mutually exclusive */
    660 		rxcsum |= IXGBE_RXCSUM_PCSD;
    661 	}
    662 
    663 	if (ifp->if_capenable & IFCAP_RXCSUM)
    664 		rxcsum |= IXGBE_RXCSUM_PCSD;
    665 
    666 	/* This is useful for calculating UDP/IP fragment checksums */
    667 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
    668 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
    669 
    670 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
    671 
    672 	return;
    673 } /* ixgbe_initialize_receive_units */
    674 
    675 /************************************************************************
    676  * ixgbe_initialize_transmit_units - Enable transmit units.
    677  ************************************************************************/
    678 static void
    679 ixgbe_initialize_transmit_units(struct adapter *adapter)
    680 {
    681 	struct tx_ring  *txr = adapter->tx_rings;
    682 	struct ixgbe_hw	*hw = &adapter->hw;
    683 
    684 	/* Setup the Base and Length of the Tx Descriptor Ring */
    685 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    686 		u64 tdba = txr->txdma.dma_paddr;
    687 		u32 txctrl = 0;
    688 		int j = txr->me;
    689 
    690 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
    691 		    (tdba & 0x00000000ffffffffULL));
    692 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
    693 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
    694 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
    695 
    696 		/* Setup the HW Tx Head and Tail descriptor pointers */
    697 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
    698 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
    699 
    700 		/* Cache the tail address */
    701 		txr->tail = IXGBE_TDT(j);
    702 
    703 		/* Disable Head Writeback */
    704 		/*
    705 		 * Note: for X550 series devices, these registers are actually
    706 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
    707 		 * fields remain the same.
    708 		 */
    709 		switch (hw->mac.type) {
    710 		case ixgbe_mac_82598EB:
    711 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
    712 			break;
    713 		default:
    714 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
    715 			break;
    716 		}
    717 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
    718 		switch (hw->mac.type) {
    719 		case ixgbe_mac_82598EB:
    720 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
    721 			break;
    722 		default:
    723 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
    724 			break;
    725 		}
    726 
    727 	}
    728 
    729 	if (hw->mac.type != ixgbe_mac_82598EB) {
    730 		u32 dmatxctl, rttdcs;
    731 
    732 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
    733 		dmatxctl |= IXGBE_DMATXCTL_TE;
    734 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
    735 		/* Disable arbiter to set MTQC */
    736 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
    737 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
    738 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    739 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
    740 		    ixgbe_get_mtqc(adapter->iov_mode));
    741 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
    742 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    743 	}
    744 
    745 	return;
    746 } /* ixgbe_initialize_transmit_units */
    747 
    748 /************************************************************************
    749  * ixgbe_attach - Device initialization routine
    750  *
    751  *   Called when the driver is being loaded.
    752  *   Identifies the type of hardware, allocates all resources
    753  *   and initializes the hardware.
    754  *
    755  *   return 0 on success, positive on failure
    756  ************************************************************************/
    757 static void
    758 ixgbe_attach(device_t parent, device_t dev, void *aux)
    759 {
    760 	struct adapter  *adapter;
    761 	struct ixgbe_hw *hw;
    762 	int             error = -1;
    763 	u32		ctrl_ext;
    764 	u16		high, low, nvmreg;
    765 	pcireg_t	id, subid;
    766 	ixgbe_vendor_info_t *ent;
    767 	struct pci_attach_args *pa = aux;
    768 	const char *str;
    769 	char buf[256];
    770 
    771 	INIT_DEBUGOUT("ixgbe_attach: begin");
    772 
    773 	/* Allocate, clear, and link in our adapter structure */
    774 	adapter = device_private(dev);
    775 	adapter->hw.back = adapter;
    776 	adapter->dev = dev;
    777 	hw = &adapter->hw;
    778 	adapter->osdep.pc = pa->pa_pc;
    779 	adapter->osdep.tag = pa->pa_tag;
    780 	if (pci_dma64_available(pa))
    781 		adapter->osdep.dmat = pa->pa_dmat64;
    782 	else
    783 		adapter->osdep.dmat = pa->pa_dmat;
    784 	adapter->osdep.attached = false;
    785 
    786 	ent = ixgbe_lookup(pa);
    787 
    788 	KASSERT(ent != NULL);
    789 
    790 	aprint_normal(": %s, Version - %s\n",
    791 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    792 
    793 	/* Core Lock Init*/
    794 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    795 
    796 	/* Set up the timer callout */
    797 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    798 
    799 	/* Determine hardware revision */
    800 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    801 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    802 
    803 	hw->vendor_id = PCI_VENDOR(id);
    804 	hw->device_id = PCI_PRODUCT(id);
    805 	hw->revision_id =
    806 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    807 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    808 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    809 
    810 	/*
    811 	 * Make sure BUSMASTER is set
    812 	 */
    813 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    814 
    815 	/* Do base PCI setup - map BAR0 */
    816 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    817 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    818 		error = ENXIO;
    819 		goto err_out;
    820 	}
    821 
    822 	/* let hardware know driver is loaded */
    823 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    824 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    825 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    826 
    827 	/*
    828 	 * Initialize the shared code
    829 	 */
    830 	if (ixgbe_init_shared_code(hw)) {
    831 		aprint_error_dev(dev, "Unable to initialize the shared code\n");
    832 		error = ENXIO;
    833 		goto err_out;
    834 	}
    835 
    836 	switch (hw->mac.type) {
    837 	case ixgbe_mac_82598EB:
    838 		str = "82598EB";
    839 		break;
    840 	case ixgbe_mac_82599EB:
    841 		str = "82599EB";
    842 		break;
    843 	case ixgbe_mac_X540:
    844 		str = "X540";
    845 		break;
    846 	case ixgbe_mac_X550:
    847 		str = "X550";
    848 		break;
    849 	case ixgbe_mac_X550EM_x:
    850 		str = "X550EM";
    851 		break;
    852 	case ixgbe_mac_X550EM_a:
    853 		str = "X550EM A";
    854 		break;
    855 	default:
    856 		str = "Unknown";
    857 		break;
    858 	}
    859 	aprint_normal_dev(dev, "device %s\n", str);
    860 
    861 	if (hw->mbx.ops.init_params)
    862 		hw->mbx.ops.init_params(hw);
    863 
    864 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
    865 
    866 	/* Pick up the 82599 settings */
    867 	if (hw->mac.type != ixgbe_mac_82598EB) {
    868 		hw->phy.smart_speed = ixgbe_smart_speed;
    869 		adapter->num_segs = IXGBE_82599_SCATTER;
    870 	} else
    871 		adapter->num_segs = IXGBE_82598_SCATTER;
    872 
    873 	hw->mac.ops.set_lan_id(hw);
    874 	ixgbe_init_device_features(adapter);
    875 
    876 	if (ixgbe_configure_interrupts(adapter)) {
    877 		error = ENXIO;
    878 		goto err_out;
    879 	}
    880 
    881 	/* Allocate multicast array memory. */
    882 	adapter->mta = malloc(sizeof(*adapter->mta) *
    883 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    884 	if (adapter->mta == NULL) {
    885 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    886 		error = ENOMEM;
    887 		goto err_out;
    888 	}
    889 
    890 	/* Enable WoL (if supported) */
    891 	ixgbe_check_wol_support(adapter);
    892 
    893 	/* Verify adapter fan is still functional (if applicable) */
    894 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
    895 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    896 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
    897 	}
    898 
    899 	/* Ensure SW/FW semaphore is free */
    900 	ixgbe_init_swfw_semaphore(hw);
    901 
    902 	/* Enable EEE power saving */
    903 	if (adapter->feat_en & IXGBE_FEATURE_EEE)
    904 		hw->mac.ops.setup_eee(hw, TRUE);
    905 
    906 	/* Set an initial default flow control value */
    907 	hw->fc.requested_mode = ixgbe_flow_control;
    908 
    909 	/* Sysctls for limiting the amount of work done in the taskqueues */
    910 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
    911 	    "max number of rx packets to process",
    912 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
    913 
    914 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
    915 	    "max number of tx packets to process",
    916 	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
    917 
    918 	/* Do descriptor calc and sanity checks */
    919 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    920 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    921 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    922 		adapter->num_tx_desc = DEFAULT_TXD;
    923 	} else
    924 		adapter->num_tx_desc = ixgbe_txd;
    925 
    926 	/*
    927 	 * With many RX rings it is easy to exceed the
    928 	 * system mbuf allocation. Tuning nmbclusters
    929 	 * can alleviate this.
    930 	 */
    931 	if (nmbclusters > 0) {
    932 		int s;
    933 		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
    934 		if (s > nmbclusters) {
    935 			aprint_error_dev(dev, "RX Descriptors exceed "
    936 			    "system mbuf max, using default instead!\n");
    937 			ixgbe_rxd = DEFAULT_RXD;
    938 		}
    939 	}
    940 
    941 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    942 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
    943 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    944 		adapter->num_rx_desc = DEFAULT_RXD;
    945 	} else
    946 		adapter->num_rx_desc = ixgbe_rxd;
    947 
    948 	/* Allocate our TX/RX Queues */
    949 	if (ixgbe_allocate_queues(adapter)) {
    950 		error = ENOMEM;
    951 		goto err_out;
    952 	}
    953 
    954 	hw->phy.reset_if_overtemp = TRUE;
    955 	error = ixgbe_reset_hw(hw);
    956 	hw->phy.reset_if_overtemp = FALSE;
    957 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    958 		/*
    959 		 * No optics in this port, set up
    960 		 * so the timer routine will probe
    961 		 * for later insertion.
    962 		 */
    963 		adapter->sfp_probe = TRUE;
    964 		error = IXGBE_SUCCESS;
    965 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    966 		aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
    967 		error = EIO;
    968 		goto err_late;
    969 	} else if (error) {
    970 		aprint_error_dev(dev, "Hardware initialization failed\n");
    971 		error = EIO;
    972 		goto err_late;
    973 	}
    974 
    975 	/* Make sure we have a good EEPROM before we read from it */
    976 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
    977 		aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
    978 		error = EIO;
    979 		goto err_late;
    980 	}
    981 
    982 	aprint_normal("%s:", device_xname(dev));
    983 	/* NVM Image Version */
    984 	switch (hw->mac.type) {
    985 	case ixgbe_mac_X540:
    986 	case ixgbe_mac_X550EM_a:
    987 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    988 		if (nvmreg == 0xffff)
    989 			break;
    990 		high = (nvmreg >> 12) & 0x0f;
    991 		low = (nvmreg >> 4) & 0xff;
    992 		id = nvmreg & 0x0f;
    993 		aprint_normal(" NVM Image Version %u.", high);
    994 		if (hw->mac.type == ixgbe_mac_X540)
    995 			str = "%x";
    996 		else
    997 			str = "%02x";
    998 		aprint_normal(str, low);
    999 		aprint_normal(" ID 0x%x,", id);
   1000 		break;
   1001 	case ixgbe_mac_X550EM_x:
   1002 	case ixgbe_mac_X550:
   1003 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
   1004 		if (nvmreg == 0xffff)
   1005 			break;
   1006 		high = (nvmreg >> 12) & 0x0f;
   1007 		low = nvmreg & 0xff;
   1008 		aprint_normal(" NVM Image Version %u.%02x,", high, low);
   1009 		break;
   1010 	default:
   1011 		break;
   1012 	}
   1013 
   1014 	/* PHY firmware revision */
   1015 	switch (hw->mac.type) {
   1016 	case ixgbe_mac_X540:
   1017 	case ixgbe_mac_X550:
   1018 		hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
   1019 		if (nvmreg == 0xffff)
   1020 			break;
   1021 		high = (nvmreg >> 12) & 0x0f;
   1022 		low = (nvmreg >> 4) & 0xff;
   1023 		id = nvmreg & 0x000f;
   1024 		aprint_normal(" PHY FW Revision %u.", high);
   1025 		if (hw->mac.type == ixgbe_mac_X540)
   1026 			str = "%x";
   1027 		else
   1028 			str = "%02x";
   1029 		aprint_normal(str, low);
   1030 		aprint_normal(" ID 0x%x,", id);
   1031 		break;
   1032 	default:
   1033 		break;
   1034 	}
   1035 
   1036 	/* NVM Map version & OEM NVM Image version */
   1037 	switch (hw->mac.type) {
   1038 	case ixgbe_mac_X550:
   1039 	case ixgbe_mac_X550EM_x:
   1040 	case ixgbe_mac_X550EM_a:
   1041 		hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
   1042 		if (nvmreg != 0xffff) {
   1043 			high = (nvmreg >> 12) & 0x0f;
   1044 			low = nvmreg & 0x00ff;
   1045 			aprint_normal(" NVM Map version %u.%02x,", high, low);
   1046 		}
   1047 		hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
   1048 		if (nvmreg != 0xffff) {
   1049 			high = (nvmreg >> 12) & 0x0f;
   1050 			low = nvmreg & 0x00ff;
   1051 			aprint_verbose(" OEM NVM Image version %u.%02x,", high,
   1052 			    low);
   1053 		}
   1054 		break;
   1055 	default:
   1056 		break;
   1057 	}
   1058 
   1059 	/* Print the ETrackID */
   1060 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
   1061 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
   1062 	aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
   1063 
   1064 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   1065 		error = ixgbe_allocate_msix(adapter, pa);
   1066 		if (error) {
   1067 			/* Free allocated queue structures first */
   1068 			ixgbe_free_transmit_structures(adapter);
   1069 			ixgbe_free_receive_structures(adapter);
   1070 			free(adapter->queues, M_DEVBUF);
   1071 
   1072 			/* Fallback to legacy interrupt */
   1073 			adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
   1074 			if (adapter->feat_cap & IXGBE_FEATURE_MSI)
   1075 				adapter->feat_en |= IXGBE_FEATURE_MSI;
   1076 			adapter->num_queues = 1;
   1077 
   1078 			/* Allocate our TX/RX Queues again */
   1079 			if (ixgbe_allocate_queues(adapter)) {
   1080 				error = ENOMEM;
   1081 				goto err_out;
   1082 			}
   1083 		}
   1084 	}
   1085 	if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
   1086 		error = ixgbe_allocate_legacy(adapter, pa);
   1087 	if (error)
   1088 		goto err_late;
   1089 
   1090 	/* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
   1091 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   1092 	    ixgbe_handle_link, adapter);
   1093 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1094 	    ixgbe_handle_mod, adapter);
   1095 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1096 	    ixgbe_handle_msf, adapter);
   1097 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1098 	    ixgbe_handle_phy, adapter);
   1099 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   1100 		adapter->fdir_si =
   1101 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1102 			ixgbe_reinit_fdir, adapter);
   1103 	if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
   1104 	    || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
   1105 	    || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
   1106 		&& (adapter->fdir_si == NULL))) {
   1107 		aprint_error_dev(dev,
   1108 		    "could not establish software interrupts ()\n");
   1109 		goto err_out;
   1110 	}
   1111 
   1112 	error = ixgbe_start_hw(hw);
   1113 	switch (error) {
   1114 	case IXGBE_ERR_EEPROM_VERSION:
   1115 		aprint_error_dev(dev, "This device is a pre-production adapter/"
   1116 		    "LOM.  Please be aware there may be issues associated "
   1117 		    "with your hardware.\nIf you are experiencing problems "
   1118 		    "please contact your Intel or hardware representative "
   1119 		    "who provided you with this hardware.\n");
   1120 		break;
   1121 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
   1122 		aprint_error_dev(dev, "Unsupported SFP+ Module\n");
   1123 		error = EIO;
   1124 		goto err_late;
   1125 	case IXGBE_ERR_SFP_NOT_PRESENT:
   1126 		aprint_error_dev(dev, "No SFP+ Module found\n");
   1127 		/* falls thru */
   1128 	default:
   1129 		break;
   1130 	}
   1131 
   1132 	/* Setup OS specific network interface */
   1133 	if (ixgbe_setup_interface(dev, adapter) != 0)
   1134 		goto err_late;
   1135 
   1136 	/*
   1137 	 *  Print PHY ID only for copper PHY. On device which has SFP(+) cage
   1138 	 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
   1139 	 */
   1140 	if (hw->phy.media_type == ixgbe_media_type_copper) {
   1141 		uint16_t id1, id2;
   1142 		int oui, model, rev;
   1143 		const char *descr;
   1144 
   1145 		id1 = hw->phy.id >> 16;
   1146 		id2 = hw->phy.id & 0xffff;
   1147 		oui = MII_OUI(id1, id2);
   1148 		model = MII_MODEL(id2);
   1149 		rev = MII_REV(id2);
   1150 		if ((descr = mii_get_descr(oui, model)) != NULL)
   1151 			aprint_normal_dev(dev,
   1152 			    "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
   1153 			    descr, oui, model, rev);
   1154 		else
   1155 			aprint_normal_dev(dev,
   1156 			    "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
   1157 			    oui, model, rev);
   1158 	}
   1159 
   1160 	/* Enable the optics for 82599 SFP+ fiber */
   1161 	ixgbe_enable_tx_laser(hw);
   1162 
   1163 	/* Enable power to the phy. */
   1164 	ixgbe_set_phy_power(hw, TRUE);
   1165 
   1166 	/* Initialize statistics */
   1167 	ixgbe_update_stats_counters(adapter);
   1168 
   1169 	/* Check PCIE slot type/speed/width */
   1170 	ixgbe_get_slot_info(adapter);
   1171 
   1172 	/*
   1173 	 * Do time init and sysctl init here, but
   1174 	 * only on the first port of a bypass adapter.
   1175 	 */
   1176 	ixgbe_bypass_init(adapter);
   1177 
   1178 	/* Set an initial dmac value */
   1179 	adapter->dmac = 0;
   1180 	/* Set initial advertised speeds (if applicable) */
   1181 	adapter->advertise = ixgbe_get_advertise(adapter);
   1182 
   1183 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   1184 		ixgbe_define_iov_schemas(dev, &error);
   1185 
   1186 	/* Add sysctls */
   1187 	ixgbe_add_device_sysctls(adapter);
   1188 	ixgbe_add_hw_stats(adapter);
   1189 
   1190 	/* For Netmap */
   1191 	adapter->init_locked = ixgbe_init_locked;
   1192 	adapter->stop_locked = ixgbe_stop;
   1193 
   1194 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1195 		ixgbe_netmap_attach(adapter);
   1196 
   1197 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
   1198 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
   1199 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
   1200 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
   1201 
   1202 	if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
   1203 		pmf_class_network_register(dev, adapter->ifp);
   1204 	else
   1205 		aprint_error_dev(dev, "couldn't establish power handler\n");
   1206 
   1207 	INIT_DEBUGOUT("ixgbe_attach: end");
   1208 	adapter->osdep.attached = true;
   1209 
   1210 	return;
   1211 
   1212 err_late:
   1213 	ixgbe_free_transmit_structures(adapter);
   1214 	ixgbe_free_receive_structures(adapter);
   1215 	free(adapter->queues, M_DEVBUF);
   1216 err_out:
   1217 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   1218 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   1219 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   1220 	ixgbe_free_softint(adapter);
   1221 	ixgbe_free_pci_resources(adapter);
   1222 	if (adapter->mta != NULL)
   1223 		free(adapter->mta, M_DEVBUF);
   1224 	IXGBE_CORE_LOCK_DESTROY(adapter);
   1225 
   1226 	return;
   1227 } /* ixgbe_attach */
   1228 
   1229 /************************************************************************
   1230  * ixgbe_check_wol_support
   1231  *
   1232  *   Checks whether the adapter's ports are capable of
   1233  *   Wake On LAN by reading the adapter's NVM.
   1234  *
   1235  *   Sets each port's hw->wol_enabled value depending
   1236  *   on the value read here.
   1237  ************************************************************************/
   1238 static void
   1239 ixgbe_check_wol_support(struct adapter *adapter)
   1240 {
   1241 	struct ixgbe_hw *hw = &adapter->hw;
   1242 	u16             dev_caps = 0;
   1243 
   1244 	/* Find out WoL support for port */
   1245 	adapter->wol_support = hw->wol_enabled = 0;
   1246 	ixgbe_get_device_caps(hw, &dev_caps);
   1247 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
   1248 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
   1249 	     hw->bus.func == 0))
   1250 		adapter->wol_support = hw->wol_enabled = 1;
   1251 
   1252 	/* Save initial wake up filter configuration */
   1253 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
   1254 
   1255 	return;
   1256 } /* ixgbe_check_wol_support */
   1257 
   1258 /************************************************************************
   1259  * ixgbe_setup_interface
   1260  *
   1261  *   Setup networking device structure and register an interface.
   1262  ************************************************************************/
   1263 static int
   1264 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   1265 {
   1266 	struct ethercom *ec = &adapter->osdep.ec;
   1267 	struct ifnet   *ifp;
   1268 	int rv;
   1269 
   1270 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   1271 
   1272 	ifp = adapter->ifp = &ec->ec_if;
   1273 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1274 	ifp->if_baudrate = IF_Gbps(10);
   1275 	ifp->if_init = ixgbe_init;
   1276 	ifp->if_stop = ixgbe_ifstop;
   1277 	ifp->if_softc = adapter;
   1278 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1279 #ifdef IXGBE_MPSAFE
   1280 	ifp->if_extflags = IFEF_MPSAFE;
   1281 #endif
   1282 	ifp->if_ioctl = ixgbe_ioctl;
   1283 #if __FreeBSD_version >= 1100045
   1284 	/* TSO parameters */
   1285 	ifp->if_hw_tsomax = 65518;
   1286 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
   1287 	ifp->if_hw_tsomaxsegsize = 2048;
   1288 #endif
   1289 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1290 #if 0
   1291 		ixgbe_start_locked = ixgbe_legacy_start_locked;
   1292 #endif
   1293 	} else {
   1294 		ifp->if_transmit = ixgbe_mq_start;
   1295 #if 0
   1296 		ixgbe_start_locked = ixgbe_mq_start_locked;
   1297 #endif
   1298 	}
   1299 	ifp->if_start = ixgbe_legacy_start;
   1300 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1301 	IFQ_SET_READY(&ifp->if_snd);
   1302 
   1303 	rv = if_initialize(ifp);
   1304 	if (rv != 0) {
   1305 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1306 		return rv;
   1307 	}
   1308 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1309 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1310 	/*
   1311 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1312 	 * used.
   1313 	 */
   1314 	if_register(ifp);
   1315 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   1316 
   1317 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1318 
   1319 	/*
   1320 	 * Tell the upper layer(s) we support long frames.
   1321 	 */
   1322 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1323 
   1324 	/* Set capability flags */
   1325 	ifp->if_capabilities |= IFCAP_RXCSUM
   1326 			     |  IFCAP_TXCSUM
   1327 			     |  IFCAP_TSOv4
   1328 			     |  IFCAP_TSOv6
   1329 			     |  IFCAP_LRO;
   1330 	ifp->if_capenable = 0;
   1331 
   1332 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1333 	    		    |  ETHERCAP_VLAN_HWCSUM
   1334 	    		    |  ETHERCAP_JUMBO_MTU
   1335 	    		    |  ETHERCAP_VLAN_MTU;
   1336 
   1337 	/* Enable the above capabilities by default */
   1338 	ec->ec_capenable = ec->ec_capabilities;
   1339 
   1340 	/*
   1341 	 * Don't turn this on by default, if vlans are
   1342 	 * created on another pseudo device (eg. lagg)
   1343 	 * then vlan events are not passed thru, breaking
   1344 	 * operation, but with HW FILTER off it works. If
   1345 	 * using vlans directly on the ixgbe driver you can
   1346 	 * enable this and get full hardware tag filtering.
   1347 	 */
   1348 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1349 
   1350 	/*
   1351 	 * Specify the media types supported by this adapter and register
   1352 	 * callbacks to update media and link information
   1353 	 */
   1354 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   1355 	    ixgbe_media_status);
   1356 
   1357 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
   1358 	ixgbe_add_media_types(adapter);
   1359 
   1360 	/* Set autoselect media by default */
   1361 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1362 
   1363 	return (0);
   1364 } /* ixgbe_setup_interface */
   1365 
   1366 /************************************************************************
   1367  * ixgbe_add_media_types
   1368  ************************************************************************/
   1369 static void
   1370 ixgbe_add_media_types(struct adapter *adapter)
   1371 {
   1372 	struct ixgbe_hw *hw = &adapter->hw;
   1373 	device_t        dev = adapter->dev;
   1374 	u64             layer;
   1375 
   1376 	layer = adapter->phy_layer;
   1377 
   1378 #define	ADD(mm, dd)							\
   1379 	ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
   1380 
   1381 	/* Media types with matching NetBSD media defines */
   1382 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
   1383 		ADD(IFM_10G_T | IFM_FDX, 0);
   1384 	}
   1385 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
   1386 		ADD(IFM_1000_T | IFM_FDX, 0);
   1387 	}
   1388 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
   1389 		ADD(IFM_100_TX | IFM_FDX, 0);
   1390 	}
   1391 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
   1392 		ADD(IFM_10_T | IFM_FDX, 0);
   1393 	}
   1394 
   1395 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   1396 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
   1397 		ADD(IFM_10G_TWINAX | IFM_FDX, 0);
   1398 	}
   1399 
   1400 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
   1401 		ADD(IFM_10G_LR | IFM_FDX, 0);
   1402 		if (hw->phy.multispeed_fiber) {
   1403 			ADD(IFM_1000_LX | IFM_FDX, 0);
   1404 		}
   1405 	}
   1406 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
   1407 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1408 		if (hw->phy.multispeed_fiber) {
   1409 			ADD(IFM_1000_SX | IFM_FDX, 0);
   1410 		}
   1411 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
   1412 		ADD(IFM_1000_SX | IFM_FDX, 0);
   1413 	}
   1414 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
   1415 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1416 	}
   1417 
   1418 #ifdef IFM_ETH_XTYPE
   1419 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1420 		ADD(IFM_10G_KR | IFM_FDX, 0);
   1421 	}
   1422 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1423 		ADD(AIFM_10G_KX4 | IFM_FDX, 0);
   1424 	}
   1425 #else
   1426 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1427 		device_printf(dev, "Media supported: 10GbaseKR\n");
   1428 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
   1429 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1430 	}
   1431 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1432 		device_printf(dev, "Media supported: 10GbaseKX4\n");
   1433 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
   1434 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1435 	}
   1436 #endif
   1437 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
   1438 		ADD(IFM_1000_KX | IFM_FDX, 0);
   1439 	}
   1440 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
   1441 		ADD(IFM_2500_KX | IFM_FDX, 0);
   1442 	}
   1443 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
   1444 		ADD(IFM_2500_T | IFM_FDX, 0);
   1445 	}
   1446 	if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
   1447 		ADD(IFM_5000_T | IFM_FDX, 0);
   1448 	}
   1449 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
   1450 		device_printf(dev, "Media supported: 1000baseBX\n");
   1451 	/* XXX no ifmedia_set? */
   1452 
   1453 	ADD(IFM_AUTO, 0);
   1454 
   1455 #undef ADD
   1456 } /* ixgbe_add_media_types */
   1457 
   1458 /************************************************************************
   1459  * ixgbe_is_sfp
   1460  ************************************************************************/
   1461 static inline bool
   1462 ixgbe_is_sfp(struct ixgbe_hw *hw)
   1463 {
   1464 	switch (hw->mac.type) {
   1465 	case ixgbe_mac_82598EB:
   1466 		if (hw->phy.type == ixgbe_phy_nl)
   1467 			return TRUE;
   1468 		return FALSE;
   1469 	case ixgbe_mac_82599EB:
   1470 		switch (hw->mac.ops.get_media_type(hw)) {
   1471 		case ixgbe_media_type_fiber:
   1472 		case ixgbe_media_type_fiber_qsfp:
   1473 			return TRUE;
   1474 		default:
   1475 			return FALSE;
   1476 		}
   1477 	case ixgbe_mac_X550EM_x:
   1478 	case ixgbe_mac_X550EM_a:
   1479 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
   1480 			return TRUE;
   1481 		return FALSE;
   1482 	default:
   1483 		return FALSE;
   1484 	}
   1485 } /* ixgbe_is_sfp */
   1486 
   1487 /************************************************************************
   1488  * ixgbe_config_link
   1489  ************************************************************************/
   1490 static void
   1491 ixgbe_config_link(struct adapter *adapter)
   1492 {
   1493 	struct ixgbe_hw *hw = &adapter->hw;
   1494 	u32             autoneg, err = 0;
   1495 	bool            sfp, negotiate = false;
   1496 
   1497 	sfp = ixgbe_is_sfp(hw);
   1498 
   1499 	if (sfp) {
   1500 		if (hw->phy.multispeed_fiber) {
   1501 			hw->mac.ops.setup_sfp(hw);
   1502 			ixgbe_enable_tx_laser(hw);
   1503 			kpreempt_disable();
   1504 			softint_schedule(adapter->msf_si);
   1505 			kpreempt_enable();
   1506 		} else {
   1507 			kpreempt_disable();
   1508 			softint_schedule(adapter->mod_si);
   1509 			kpreempt_enable();
   1510 		}
   1511 	} else {
   1512 		if (hw->mac.ops.check_link)
   1513 			err = ixgbe_check_link(hw, &adapter->link_speed,
   1514 			    &adapter->link_up, FALSE);
   1515 		if (err)
   1516 			goto out;
   1517 		autoneg = hw->phy.autoneg_advertised;
   1518 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   1519                 	err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
   1520 			    &negotiate);
   1521 		if (err)
   1522 			goto out;
   1523 		if (hw->mac.ops.setup_link)
   1524                 	err = hw->mac.ops.setup_link(hw, autoneg,
   1525 			    adapter->link_up);
   1526 	}
   1527 out:
   1528 
   1529 	return;
   1530 } /* ixgbe_config_link */
   1531 
   1532 /************************************************************************
   1533  * ixgbe_update_stats_counters - Update board statistics counters.
   1534  ************************************************************************/
   1535 static void
   1536 ixgbe_update_stats_counters(struct adapter *adapter)
   1537 {
   1538 	struct ifnet          *ifp = adapter->ifp;
   1539 	struct ixgbe_hw       *hw = &adapter->hw;
   1540 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1541 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
   1542 	u64                   total_missed_rx = 0;
   1543 	uint64_t              crcerrs, rlec;
   1544 
   1545 	crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   1546 	stats->crcerrs.ev_count += crcerrs;
   1547 	stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   1548 	stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   1549 	stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   1550 	if (hw->mac.type == ixgbe_mac_X550)
   1551 		stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
   1552 
   1553 	for (int i = 0; i < __arraycount(stats->qprc); i++) {
   1554 		int j = i % adapter->num_queues;
   1555 		stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   1556 		stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   1557 		stats->qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   1558 	}
   1559 	for (int i = 0; i < __arraycount(stats->mpc); i++) {
   1560 		uint32_t mp;
   1561 		int j = i % adapter->num_queues;
   1562 
   1563 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   1564 		/* global total per queue */
   1565 		stats->mpc[j].ev_count += mp;
   1566 		/* running comprehensive total for stats display */
   1567 		total_missed_rx += mp;
   1568 
   1569 		if (hw->mac.type == ixgbe_mac_82598EB)
   1570 			stats->rnbc[j].ev_count
   1571 			    += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   1572 
   1573 	}
   1574 	stats->mpctotal.ev_count += total_missed_rx;
   1575 
   1576 	/* Document says M[LR]FC are valid when link is up and 10Gbps */
   1577 	if ((adapter->link_active == TRUE)
   1578 	    && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
   1579 		stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   1580 		stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   1581 	}
   1582 	rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
   1583 	stats->rlec.ev_count += rlec;
   1584 
   1585 	/* Hardware workaround, gprc counts missed packets */
   1586 	stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   1587 
   1588 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   1589 	stats->lxontxc.ev_count += lxon;
   1590 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   1591 	stats->lxofftxc.ev_count += lxoff;
   1592 	total = lxon + lxoff;
   1593 
   1594 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1595 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   1596 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   1597 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   1598 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   1599 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   1600 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   1601 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   1602 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   1603 	} else {
   1604 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   1605 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   1606 		/* 82598 only has a counter in the high register */
   1607 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   1608 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   1609 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   1610 	}
   1611 
   1612 	/*
   1613 	 * Workaround: mprc hardware is incorrectly counting
   1614 	 * broadcasts, so for now we subtract those.
   1615 	 */
   1616 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   1617 	stats->bprc.ev_count += bprc;
   1618 	stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
   1619 	    - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   1620 
   1621 	stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   1622 	stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   1623 	stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   1624 	stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   1625 	stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   1626 	stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   1627 
   1628 	stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   1629 	stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   1630 	stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   1631 
   1632 	stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   1633 	stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   1634 	stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   1635 	stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   1636 	stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   1637 	stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   1638 	stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   1639 	stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   1640 	stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   1641 	stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   1642 	stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   1643 	stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   1644 	stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   1645 	stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   1646 	stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   1647 	stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   1648 	stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   1649 	stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   1650 	/* Only read FCOE on 82599 */
   1651 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1652 		stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   1653 		stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   1654 		stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   1655 		stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   1656 		stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   1657 	}
   1658 
   1659 	/* Fill out the OS statistics structure */
   1660 	/*
   1661 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   1662 	 * adapter->stats counters. It's required to make ifconfig -z
   1663 	 * (SOICZIFDATA) work.
   1664 	 */
   1665 	ifp->if_collisions = 0;
   1666 
   1667 	/* Rx Errors */
   1668 	ifp->if_iqdrops += total_missed_rx;
   1669 	ifp->if_ierrors += crcerrs + rlec;
   1670 } /* ixgbe_update_stats_counters */
   1671 
   1672 /************************************************************************
   1673  * ixgbe_add_hw_stats
   1674  *
   1675  *   Add sysctl variables, one per statistic, to the system.
   1676  ************************************************************************/
   1677 static void
   1678 ixgbe_add_hw_stats(struct adapter *adapter)
   1679 {
   1680 	device_t dev = adapter->dev;
   1681 	const struct sysctlnode *rnode, *cnode;
   1682 	struct sysctllog **log = &adapter->sysctllog;
   1683 	struct tx_ring *txr = adapter->tx_rings;
   1684 	struct rx_ring *rxr = adapter->rx_rings;
   1685 	struct ixgbe_hw *hw = &adapter->hw;
   1686 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1687 	const char *xname = device_xname(dev);
   1688 
   1689 	/* Driver Statistics */
   1690 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   1691 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   1692 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   1693 	    NULL, xname, "m_defrag() failed");
   1694 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   1695 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   1696 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   1697 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   1698 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   1699 	    NULL, xname, "Driver tx dma hard fail other");
   1700 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   1701 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   1702 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   1703 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   1704 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   1705 	    NULL, xname, "Watchdog timeouts");
   1706 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   1707 	    NULL, xname, "TSO errors");
   1708 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   1709 	    NULL, xname, "Link MSI-X IRQ Handled");
   1710 	evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
   1711 	    NULL, xname, "Link softint");
   1712 	evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
   1713 	    NULL, xname, "module softint");
   1714 	evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
   1715 	    NULL, xname, "multimode softint");
   1716 	evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
   1717 	    NULL, xname, "external PHY softint");
   1718 
   1719 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1720 #ifdef LRO
   1721 		struct lro_ctrl *lro = &rxr->lro;
   1722 #endif /* LRO */
   1723 
   1724 		snprintf(adapter->queues[i].evnamebuf,
   1725 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   1726 		    xname, i);
   1727 		snprintf(adapter->queues[i].namebuf,
   1728 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   1729 
   1730 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   1731 			aprint_error_dev(dev, "could not create sysctl root\n");
   1732 			break;
   1733 		}
   1734 
   1735 		if (sysctl_createv(log, 0, &rnode, &rnode,
   1736 		    0, CTLTYPE_NODE,
   1737 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   1738 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   1739 			break;
   1740 
   1741 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1742 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   1743 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   1744 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   1745 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   1746 			break;
   1747 
   1748 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1749 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1750 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   1751 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   1752 		    0, CTL_CREATE, CTL_EOL) != 0)
   1753 			break;
   1754 
   1755 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1756 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1757 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   1758 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   1759 		    0, CTL_CREATE, CTL_EOL) != 0)
   1760 			break;
   1761 
   1762 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   1763 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   1764 		evcnt_attach_dynamic(&adapter->queues[i].handleq,
   1765 		    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1766 		    "Handled queue in softint");
   1767 		evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
   1768 		    NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
   1769 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   1770 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   1771 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   1772 		    NULL, adapter->queues[i].evnamebuf,
   1773 		    "Queue No Descriptor Available");
   1774 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   1775 		    NULL, adapter->queues[i].evnamebuf,
   1776 		    "Queue Packets Transmitted");
   1777 #ifndef IXGBE_LEGACY_TX
   1778 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   1779 		    NULL, adapter->queues[i].evnamebuf,
   1780 		    "Packets dropped in pcq");
   1781 #endif
   1782 
   1783 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1784 		    CTLFLAG_READONLY,
   1785 		    CTLTYPE_INT,
   1786 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   1787 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   1788 		    CTL_CREATE, CTL_EOL) != 0)
   1789 			break;
   1790 
   1791 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1792 		    CTLFLAG_READONLY,
   1793 		    CTLTYPE_INT,
   1794 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   1795 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   1796 		    CTL_CREATE, CTL_EOL) != 0)
   1797 			break;
   1798 
   1799 		if (i < __arraycount(stats->mpc)) {
   1800 			evcnt_attach_dynamic(&stats->mpc[i],
   1801 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1802 			    "RX Missed Packet Count");
   1803 			if (hw->mac.type == ixgbe_mac_82598EB)
   1804 				evcnt_attach_dynamic(&stats->rnbc[i],
   1805 				    EVCNT_TYPE_MISC, NULL,
   1806 				    adapter->queues[i].evnamebuf,
   1807 				    "Receive No Buffers");
   1808 		}
   1809 		if (i < __arraycount(stats->pxontxc)) {
   1810 			evcnt_attach_dynamic(&stats->pxontxc[i],
   1811 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1812 			    "pxontxc");
   1813 			evcnt_attach_dynamic(&stats->pxonrxc[i],
   1814 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1815 			    "pxonrxc");
   1816 			evcnt_attach_dynamic(&stats->pxofftxc[i],
   1817 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1818 			    "pxofftxc");
   1819 			evcnt_attach_dynamic(&stats->pxoffrxc[i],
   1820 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1821 			    "pxoffrxc");
   1822 			evcnt_attach_dynamic(&stats->pxon2offc[i],
   1823 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1824 			    "pxon2offc");
   1825 		}
   1826 		if (i < __arraycount(stats->qprc)) {
   1827 			evcnt_attach_dynamic(&stats->qprc[i],
   1828 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1829 			    "qprc");
   1830 			evcnt_attach_dynamic(&stats->qptc[i],
   1831 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1832 			    "qptc");
   1833 			evcnt_attach_dynamic(&stats->qbrc[i],
   1834 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1835 			    "qbrc");
   1836 			evcnt_attach_dynamic(&stats->qbtc[i],
   1837 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1838 			    "qbtc");
   1839 			evcnt_attach_dynamic(&stats->qprdc[i],
   1840 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1841 			    "qprdc");
   1842 		}
   1843 
   1844 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   1845 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   1846 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   1847 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   1848 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   1849 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   1850 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   1851 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   1852 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   1853 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   1854 #ifdef LRO
   1855 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   1856 				CTLFLAG_RD, &lro->lro_queued, 0,
   1857 				"LRO Queued");
   1858 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   1859 				CTLFLAG_RD, &lro->lro_flushed, 0,
   1860 				"LRO Flushed");
   1861 #endif /* LRO */
   1862 	}
   1863 
   1864 	/* MAC stats get their own sub node */
   1865 
   1866 	snprintf(stats->namebuf,
   1867 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   1868 
   1869 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   1870 	    stats->namebuf, "rx csum offload - IP");
   1871 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   1872 	    stats->namebuf, "rx csum offload - L4");
   1873 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   1874 	    stats->namebuf, "rx csum offload - IP bad");
   1875 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   1876 	    stats->namebuf, "rx csum offload - L4 bad");
   1877 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   1878 	    stats->namebuf, "Interrupt conditions zero");
   1879 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   1880 	    stats->namebuf, "Legacy interrupts");
   1881 
   1882 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   1883 	    stats->namebuf, "CRC Errors");
   1884 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   1885 	    stats->namebuf, "Illegal Byte Errors");
   1886 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   1887 	    stats->namebuf, "Byte Errors");
   1888 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   1889 	    stats->namebuf, "MAC Short Packets Discarded");
   1890 	if (hw->mac.type >= ixgbe_mac_X550)
   1891 		evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
   1892 		    stats->namebuf, "Bad SFD");
   1893 	evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
   1894 	    stats->namebuf, "Total Packets Missed");
   1895 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   1896 	    stats->namebuf, "MAC Local Faults");
   1897 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   1898 	    stats->namebuf, "MAC Remote Faults");
   1899 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   1900 	    stats->namebuf, "Receive Length Errors");
   1901 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   1902 	    stats->namebuf, "Link XON Transmitted");
   1903 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   1904 	    stats->namebuf, "Link XON Received");
   1905 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   1906 	    stats->namebuf, "Link XOFF Transmitted");
   1907 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   1908 	    stats->namebuf, "Link XOFF Received");
   1909 
   1910 	/* Packet Reception Stats */
   1911 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   1912 	    stats->namebuf, "Total Octets Received");
   1913 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   1914 	    stats->namebuf, "Good Octets Received");
   1915 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   1916 	    stats->namebuf, "Total Packets Received");
   1917 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   1918 	    stats->namebuf, "Good Packets Received");
   1919 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   1920 	    stats->namebuf, "Multicast Packets Received");
   1921 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   1922 	    stats->namebuf, "Broadcast Packets Received");
   1923 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   1924 	    stats->namebuf, "64 byte frames received ");
   1925 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   1926 	    stats->namebuf, "65-127 byte frames received");
   1927 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   1928 	    stats->namebuf, "128-255 byte frames received");
   1929 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   1930 	    stats->namebuf, "256-511 byte frames received");
   1931 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   1932 	    stats->namebuf, "512-1023 byte frames received");
   1933 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   1934 	    stats->namebuf, "1023-1522 byte frames received");
   1935 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   1936 	    stats->namebuf, "Receive Undersized");
   1937 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   1938 	    stats->namebuf, "Fragmented Packets Received ");
   1939 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   1940 	    stats->namebuf, "Oversized Packets Received");
   1941 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   1942 	    stats->namebuf, "Received Jabber");
   1943 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   1944 	    stats->namebuf, "Management Packets Received");
   1945 	evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
   1946 	    stats->namebuf, "Management Packets Dropped");
   1947 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   1948 	    stats->namebuf, "Checksum Errors");
   1949 
   1950 	/* Packet Transmission Stats */
   1951 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   1952 	    stats->namebuf, "Good Octets Transmitted");
   1953 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   1954 	    stats->namebuf, "Total Packets Transmitted");
   1955 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   1956 	    stats->namebuf, "Good Packets Transmitted");
   1957 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   1958 	    stats->namebuf, "Broadcast Packets Transmitted");
   1959 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   1960 	    stats->namebuf, "Multicast Packets Transmitted");
   1961 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   1962 	    stats->namebuf, "Management Packets Transmitted");
   1963 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   1964 	    stats->namebuf, "64 byte frames transmitted ");
   1965 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   1966 	    stats->namebuf, "65-127 byte frames transmitted");
   1967 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   1968 	    stats->namebuf, "128-255 byte frames transmitted");
   1969 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   1970 	    stats->namebuf, "256-511 byte frames transmitted");
   1971 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   1972 	    stats->namebuf, "512-1023 byte frames transmitted");
   1973 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   1974 	    stats->namebuf, "1024-1522 byte frames transmitted");
   1975 } /* ixgbe_add_hw_stats */
   1976 
   1977 static void
   1978 ixgbe_clear_evcnt(struct adapter *adapter)
   1979 {
   1980 	struct tx_ring *txr = adapter->tx_rings;
   1981 	struct rx_ring *rxr = adapter->rx_rings;
   1982 	struct ixgbe_hw *hw = &adapter->hw;
   1983 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1984 
   1985 	adapter->efbig_tx_dma_setup.ev_count = 0;
   1986 	adapter->mbuf_defrag_failed.ev_count = 0;
   1987 	adapter->efbig2_tx_dma_setup.ev_count = 0;
   1988 	adapter->einval_tx_dma_setup.ev_count = 0;
   1989 	adapter->other_tx_dma_setup.ev_count = 0;
   1990 	adapter->eagain_tx_dma_setup.ev_count = 0;
   1991 	adapter->enomem_tx_dma_setup.ev_count = 0;
   1992 	adapter->tso_err.ev_count = 0;
   1993 	adapter->watchdog_events.ev_count = 0;
   1994 	adapter->link_irq.ev_count = 0;
   1995 	adapter->link_sicount.ev_count = 0;
   1996 	adapter->mod_sicount.ev_count = 0;
   1997 	adapter->msf_sicount.ev_count = 0;
   1998 	adapter->phy_sicount.ev_count = 0;
   1999 
   2000 	txr = adapter->tx_rings;
   2001 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2002 		adapter->queues[i].irqs.ev_count = 0;
   2003 		adapter->queues[i].handleq.ev_count = 0;
   2004 		adapter->queues[i].req.ev_count = 0;
   2005 		txr->no_desc_avail.ev_count = 0;
   2006 		txr->total_packets.ev_count = 0;
   2007 		txr->tso_tx.ev_count = 0;
   2008 #ifndef IXGBE_LEGACY_TX
   2009 		txr->pcq_drops.ev_count = 0;
   2010 #endif
   2011 		txr->q_efbig_tx_dma_setup = 0;
   2012 		txr->q_mbuf_defrag_failed = 0;
   2013 		txr->q_efbig2_tx_dma_setup = 0;
   2014 		txr->q_einval_tx_dma_setup = 0;
   2015 		txr->q_other_tx_dma_setup = 0;
   2016 		txr->q_eagain_tx_dma_setup = 0;
   2017 		txr->q_enomem_tx_dma_setup = 0;
   2018 		txr->q_tso_err = 0;
   2019 
   2020 		if (i < __arraycount(stats->mpc)) {
   2021 			stats->mpc[i].ev_count = 0;
   2022 			if (hw->mac.type == ixgbe_mac_82598EB)
   2023 				stats->rnbc[i].ev_count = 0;
   2024 		}
   2025 		if (i < __arraycount(stats->pxontxc)) {
   2026 			stats->pxontxc[i].ev_count = 0;
   2027 			stats->pxonrxc[i].ev_count = 0;
   2028 			stats->pxofftxc[i].ev_count = 0;
   2029 			stats->pxoffrxc[i].ev_count = 0;
   2030 			stats->pxon2offc[i].ev_count = 0;
   2031 		}
   2032 		if (i < __arraycount(stats->qprc)) {
   2033 			stats->qprc[i].ev_count = 0;
   2034 			stats->qptc[i].ev_count = 0;
   2035 			stats->qbrc[i].ev_count = 0;
   2036 			stats->qbtc[i].ev_count = 0;
   2037 			stats->qprdc[i].ev_count = 0;
   2038 		}
   2039 
   2040 		rxr->rx_packets.ev_count = 0;
   2041 		rxr->rx_bytes.ev_count = 0;
   2042 		rxr->rx_copies.ev_count = 0;
   2043 		rxr->no_jmbuf.ev_count = 0;
   2044 		rxr->rx_discarded.ev_count = 0;
   2045 	}
   2046 	stats->ipcs.ev_count = 0;
   2047 	stats->l4cs.ev_count = 0;
   2048 	stats->ipcs_bad.ev_count = 0;
   2049 	stats->l4cs_bad.ev_count = 0;
   2050 	stats->intzero.ev_count = 0;
   2051 	stats->legint.ev_count = 0;
   2052 	stats->crcerrs.ev_count = 0;
   2053 	stats->illerrc.ev_count = 0;
   2054 	stats->errbc.ev_count = 0;
   2055 	stats->mspdc.ev_count = 0;
   2056 	stats->mbsdc.ev_count = 0;
   2057 	stats->mpctotal.ev_count = 0;
   2058 	stats->mlfc.ev_count = 0;
   2059 	stats->mrfc.ev_count = 0;
   2060 	stats->rlec.ev_count = 0;
   2061 	stats->lxontxc.ev_count = 0;
   2062 	stats->lxonrxc.ev_count = 0;
   2063 	stats->lxofftxc.ev_count = 0;
   2064 	stats->lxoffrxc.ev_count = 0;
   2065 
   2066 	/* Packet Reception Stats */
   2067 	stats->tor.ev_count = 0;
   2068 	stats->gorc.ev_count = 0;
   2069 	stats->tpr.ev_count = 0;
   2070 	stats->gprc.ev_count = 0;
   2071 	stats->mprc.ev_count = 0;
   2072 	stats->bprc.ev_count = 0;
   2073 	stats->prc64.ev_count = 0;
   2074 	stats->prc127.ev_count = 0;
   2075 	stats->prc255.ev_count = 0;
   2076 	stats->prc511.ev_count = 0;
   2077 	stats->prc1023.ev_count = 0;
   2078 	stats->prc1522.ev_count = 0;
   2079 	stats->ruc.ev_count = 0;
   2080 	stats->rfc.ev_count = 0;
   2081 	stats->roc.ev_count = 0;
   2082 	stats->rjc.ev_count = 0;
   2083 	stats->mngprc.ev_count = 0;
   2084 	stats->mngpdc.ev_count = 0;
   2085 	stats->xec.ev_count = 0;
   2086 
   2087 	/* Packet Transmission Stats */
   2088 	stats->gotc.ev_count = 0;
   2089 	stats->tpt.ev_count = 0;
   2090 	stats->gptc.ev_count = 0;
   2091 	stats->bptc.ev_count = 0;
   2092 	stats->mptc.ev_count = 0;
   2093 	stats->mngptc.ev_count = 0;
   2094 	stats->ptc64.ev_count = 0;
   2095 	stats->ptc127.ev_count = 0;
   2096 	stats->ptc255.ev_count = 0;
   2097 	stats->ptc511.ev_count = 0;
   2098 	stats->ptc1023.ev_count = 0;
   2099 	stats->ptc1522.ev_count = 0;
   2100 }
   2101 
   2102 /************************************************************************
   2103  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
   2104  *
   2105  *   Retrieves the TDH value from the hardware
   2106  ************************************************************************/
   2107 static int
   2108 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   2109 {
   2110 	struct sysctlnode node = *rnode;
   2111 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2112 	uint32_t val;
   2113 
   2114 	if (!txr)
   2115 		return (0);
   2116 
   2117 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
   2118 	node.sysctl_data = &val;
   2119 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2120 } /* ixgbe_sysctl_tdh_handler */
   2121 
   2122 /************************************************************************
   2123  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   2124  *
   2125  *   Retrieves the TDT value from the hardware
   2126  ************************************************************************/
   2127 static int
   2128 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   2129 {
   2130 	struct sysctlnode node = *rnode;
   2131 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2132 	uint32_t val;
   2133 
   2134 	if (!txr)
   2135 		return (0);
   2136 
   2137 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
   2138 	node.sysctl_data = &val;
   2139 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2140 } /* ixgbe_sysctl_tdt_handler */
   2141 
   2142 /************************************************************************
   2143  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
   2144  *
   2145  *   Retrieves the RDH value from the hardware
   2146  ************************************************************************/
   2147 static int
   2148 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2149 {
   2150 	struct sysctlnode node = *rnode;
   2151 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2152 	uint32_t val;
   2153 
   2154 	if (!rxr)
   2155 		return (0);
   2156 
   2157 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
   2158 	node.sysctl_data = &val;
   2159 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2160 } /* ixgbe_sysctl_rdh_handler */
   2161 
   2162 /************************************************************************
   2163  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2164  *
   2165  *   Retrieves the RDT value from the hardware
   2166  ************************************************************************/
   2167 static int
   2168 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2169 {
   2170 	struct sysctlnode node = *rnode;
   2171 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2172 	uint32_t val;
   2173 
   2174 	if (!rxr)
   2175 		return (0);
   2176 
   2177 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
   2178 	node.sysctl_data = &val;
   2179 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2180 } /* ixgbe_sysctl_rdt_handler */
   2181 
   2182 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   2183 /************************************************************************
   2184  * ixgbe_register_vlan
   2185  *
   2186  *   Run via vlan config EVENT, it enables us to use the
   2187  *   HW Filter table since we can get the vlan id. This
   2188  *   just creates the entry in the soft version of the
   2189  *   VFTA, init will repopulate the real table.
   2190  ************************************************************************/
   2191 static void
   2192 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2193 {
   2194 	struct adapter	*adapter = ifp->if_softc;
   2195 	u16		index, bit;
   2196 
   2197 	if (ifp->if_softc != arg)   /* Not our event */
   2198 		return;
   2199 
   2200 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2201 		return;
   2202 
   2203 	IXGBE_CORE_LOCK(adapter);
   2204 	index = (vtag >> 5) & 0x7F;
   2205 	bit = vtag & 0x1F;
   2206 	adapter->shadow_vfta[index] |= (1 << bit);
   2207 	ixgbe_setup_vlan_hw_support(adapter);
   2208 	IXGBE_CORE_UNLOCK(adapter);
   2209 } /* ixgbe_register_vlan */
   2210 
   2211 /************************************************************************
   2212  * ixgbe_unregister_vlan
   2213  *
   2214  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
   2215  ************************************************************************/
   2216 static void
   2217 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2218 {
   2219 	struct adapter	*adapter = ifp->if_softc;
   2220 	u16		index, bit;
   2221 
   2222 	if (ifp->if_softc != arg)
   2223 		return;
   2224 
   2225 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2226 		return;
   2227 
   2228 	IXGBE_CORE_LOCK(adapter);
   2229 	index = (vtag >> 5) & 0x7F;
   2230 	bit = vtag & 0x1F;
   2231 	adapter->shadow_vfta[index] &= ~(1 << bit);
   2232 	/* Re-init to load the changes */
   2233 	ixgbe_setup_vlan_hw_support(adapter);
   2234 	IXGBE_CORE_UNLOCK(adapter);
   2235 } /* ixgbe_unregister_vlan */
   2236 #endif
   2237 
   2238 static void
   2239 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   2240 {
   2241 	struct ethercom *ec = &adapter->osdep.ec;
   2242 	struct ixgbe_hw *hw = &adapter->hw;
   2243 	struct rx_ring	*rxr;
   2244 	int             i;
   2245 	u32		ctrl;
   2246 
   2247 
   2248 	/*
   2249 	 * We get here thru init_locked, meaning
   2250 	 * a soft reset, this has already cleared
   2251 	 * the VFTA and other state, so if there
   2252 	 * have been no vlan's registered do nothing.
   2253 	 */
   2254 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   2255 		return;
   2256 
   2257 	/* Setup the queues for vlans */
   2258 	if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
   2259 		for (i = 0; i < adapter->num_queues; i++) {
   2260 			rxr = &adapter->rx_rings[i];
   2261 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
   2262 			if (hw->mac.type != ixgbe_mac_82598EB) {
   2263 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   2264 				ctrl |= IXGBE_RXDCTL_VME;
   2265 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
   2266 			}
   2267 			rxr->vtag_strip = TRUE;
   2268 		}
   2269 	}
   2270 
   2271 	if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
   2272 		return;
   2273 	/*
   2274 	 * A soft reset zero's out the VFTA, so
   2275 	 * we need to repopulate it now.
   2276 	 */
   2277 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2278 		if (adapter->shadow_vfta[i] != 0)
   2279 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
   2280 			    adapter->shadow_vfta[i]);
   2281 
   2282 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2283 	/* Enable the Filter Table if enabled */
   2284 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
   2285 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
   2286 		ctrl |= IXGBE_VLNCTRL_VFE;
   2287 	}
   2288 	if (hw->mac.type == ixgbe_mac_82598EB)
   2289 		ctrl |= IXGBE_VLNCTRL_VME;
   2290 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2291 } /* ixgbe_setup_vlan_hw_support */
   2292 
   2293 /************************************************************************
   2294  * ixgbe_get_slot_info
   2295  *
   2296  *   Get the width and transaction speed of
   2297  *   the slot this adapter is plugged into.
   2298  ************************************************************************/
   2299 static void
   2300 ixgbe_get_slot_info(struct adapter *adapter)
   2301 {
   2302 	device_t		dev = adapter->dev;
   2303 	struct ixgbe_hw		*hw = &adapter->hw;
   2304 	u32                   offset;
   2305 //	struct ixgbe_mac_info	*mac = &hw->mac;
   2306 	u16			link;
   2307 	int                   bus_info_valid = TRUE;
   2308 
   2309 	/* Some devices are behind an internal bridge */
   2310 	switch (hw->device_id) {
   2311 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
   2312 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
   2313 		goto get_parent_info;
   2314 	default:
   2315 		break;
   2316 	}
   2317 
   2318 	ixgbe_get_bus_info(hw);
   2319 
   2320 	/*
   2321 	 * Some devices don't use PCI-E, but there is no need
   2322 	 * to display "Unknown" for bus speed and width.
   2323 	 */
   2324 	switch (hw->mac.type) {
   2325 	case ixgbe_mac_X550EM_x:
   2326 	case ixgbe_mac_X550EM_a:
   2327 		return;
   2328 	default:
   2329 		goto display;
   2330 	}
   2331 
   2332 get_parent_info:
   2333 	/*
   2334 	 * For the Quad port adapter we need to parse back
   2335 	 * up the PCI tree to find the speed of the expansion
   2336 	 * slot into which this adapter is plugged. A bit more work.
   2337 	 */
   2338 	dev = device_parent(device_parent(dev));
   2339 #if 0
   2340 #ifdef IXGBE_DEBUG
   2341 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
   2342 	    pci_get_slot(dev), pci_get_function(dev));
   2343 #endif
   2344 	dev = device_parent(device_parent(dev));
   2345 #ifdef IXGBE_DEBUG
   2346 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
   2347 	    pci_get_slot(dev), pci_get_function(dev));
   2348 #endif
   2349 #endif
   2350 	/* Now get the PCI Express Capabilities offset */
   2351 	if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
   2352 	    PCI_CAP_PCIEXPRESS, &offset, NULL)) {
   2353 		/*
   2354 		 * Hmm...can't get PCI-Express capabilities.
   2355 		 * Falling back to default method.
   2356 		 */
   2357 		bus_info_valid = FALSE;
   2358 		ixgbe_get_bus_info(hw);
   2359 		goto display;
   2360 	}
   2361 	/* ...and read the Link Status Register */
   2362 	link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
   2363 	    offset + PCIE_LCSR) >> 16;
   2364 	ixgbe_set_pci_config_data_generic(hw, link);
   2365 
   2366 display:
   2367 	device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
   2368 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s" :
   2369 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s" :
   2370 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s" :
   2371 	     "Unknown"),
   2372 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
   2373 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
   2374 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
   2375 	     "Unknown"));
   2376 
   2377 	if (bus_info_valid) {
   2378 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2379 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
   2380 			(hw->bus.speed == ixgbe_bus_speed_2500))) {
   2381 			device_printf(dev, "PCI-Express bandwidth available"
   2382 			    " for this card\n     is not sufficient for"
   2383 			    " optimal performance.\n");
   2384 			device_printf(dev, "For optimal performance a x8 "
   2385 			    "PCIE, or x4 PCIE Gen2 slot is required.\n");
   2386 		}
   2387 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2388 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
   2389 			(hw->bus.speed < ixgbe_bus_speed_8000))) {
   2390 			device_printf(dev, "PCI-Express bandwidth available"
   2391 			    " for this card\n     is not sufficient for"
   2392 			    " optimal performance.\n");
   2393 			device_printf(dev, "For optimal performance a x8 "
   2394 			    "PCIE Gen3 slot is required.\n");
   2395 		}
   2396 	} else
   2397 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
   2398 
   2399 	return;
   2400 } /* ixgbe_get_slot_info */
   2401 
   2402 /************************************************************************
   2403  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
   2404  ************************************************************************/
   2405 static inline void
   2406 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   2407 {
   2408 	struct ixgbe_hw *hw = &adapter->hw;
   2409 	struct ix_queue *que = &adapter->queues[vector];
   2410 	u64             queue = (u64)(1ULL << vector);
   2411 	u32             mask;
   2412 
   2413 	mutex_enter(&que->im_mtx);
   2414 	if (que->im_nest > 0 && --que->im_nest > 0)
   2415 		goto out;
   2416 
   2417 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2418 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2419 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   2420 	} else {
   2421 		mask = (queue & 0xFFFFFFFF);
   2422 		if (mask)
   2423 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   2424 		mask = (queue >> 32);
   2425 		if (mask)
   2426 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   2427 	}
   2428 out:
   2429 	mutex_exit(&que->im_mtx);
   2430 } /* ixgbe_enable_queue */
   2431 
   2432 /************************************************************************
   2433  * ixgbe_disable_queue
   2434  ************************************************************************/
   2435 static inline void
   2436 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   2437 {
   2438 	struct ixgbe_hw *hw = &adapter->hw;
   2439 	struct ix_queue *que = &adapter->queues[vector];
   2440 	u64             queue = (u64)(1ULL << vector);
   2441 	u32             mask;
   2442 
   2443 	mutex_enter(&que->im_mtx);
   2444 	if (que->im_nest++ > 0)
   2445 		goto  out;
   2446 
   2447 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2448 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2449 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   2450 	} else {
   2451 		mask = (queue & 0xFFFFFFFF);
   2452 		if (mask)
   2453 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   2454 		mask = (queue >> 32);
   2455 		if (mask)
   2456 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   2457 	}
   2458 out:
   2459 	mutex_exit(&que->im_mtx);
   2460 } /* ixgbe_disable_queue */
   2461 
   2462 /************************************************************************
   2463  * ixgbe_sched_handle_que - schedule deferred packet processing
   2464  ************************************************************************/
   2465 static inline void
   2466 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
   2467 {
   2468 
   2469 	if (adapter->txrx_use_workqueue) {
   2470 		/*
   2471 		 * adapter->que_wq is bound to each CPU instead of
   2472 		 * each NIC queue to reduce workqueue kthread. As we
   2473 		 * should consider about interrupt affinity in this
   2474 		 * function, the workqueue kthread must be WQ_PERCPU.
   2475 		 * If create WQ_PERCPU workqueue kthread for each NIC
   2476 		 * queue, that number of created workqueue kthread is
   2477 		 * (number of used NIC queue) * (number of CPUs) =
   2478 		 * (number of CPUs) ^ 2 most often.
   2479 		 *
   2480 		 * The same NIC queue's interrupts are avoided by
   2481 		 * masking the queue's interrupt. And different
   2482 		 * NIC queue's interrupts use different struct work
   2483 		 * (que->wq_cookie). So, "enqueued flag" to avoid
   2484 		 * twice workqueue_enqueue() is not required .
   2485 		 */
   2486 		workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
   2487 	} else {
   2488 		softint_schedule(que->que_si);
   2489 	}
   2490 }
   2491 
   2492 /************************************************************************
   2493  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
   2494  ************************************************************************/
   2495 static int
   2496 ixgbe_msix_que(void *arg)
   2497 {
   2498 	struct ix_queue	*que = arg;
   2499 	struct adapter  *adapter = que->adapter;
   2500 	struct ifnet    *ifp = adapter->ifp;
   2501 	struct tx_ring	*txr = que->txr;
   2502 	struct rx_ring	*rxr = que->rxr;
   2503 	bool		more;
   2504 	u32		newitr = 0;
   2505 
   2506 	/* Protect against spurious interrupts */
   2507 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   2508 		return 0;
   2509 
   2510 	ixgbe_disable_queue(adapter, que->msix);
   2511 	++que->irqs.ev_count;
   2512 
   2513 #ifdef __NetBSD__
   2514 	/* Don't run ixgbe_rxeof in interrupt context */
   2515 	more = true;
   2516 #else
   2517 	more = ixgbe_rxeof(que);
   2518 #endif
   2519 
   2520 	IXGBE_TX_LOCK(txr);
   2521 	ixgbe_txeof(txr);
   2522 	IXGBE_TX_UNLOCK(txr);
   2523 
   2524 	/* Do AIM now? */
   2525 
   2526 	if (adapter->enable_aim == false)
   2527 		goto no_calc;
   2528 	/*
   2529 	 * Do Adaptive Interrupt Moderation:
   2530 	 *  - Write out last calculated setting
   2531 	 *  - Calculate based on average size over
   2532 	 *    the last interval.
   2533 	 */
   2534 	if (que->eitr_setting)
   2535 		ixgbe_eitr_write(que, que->eitr_setting);
   2536 
   2537 	que->eitr_setting = 0;
   2538 
   2539 	/* Idle, do nothing */
   2540         if ((txr->bytes == 0) && (rxr->bytes == 0))
   2541                 goto no_calc;
   2542 
   2543 	if ((txr->bytes) && (txr->packets))
   2544 		newitr = txr->bytes/txr->packets;
   2545 	if ((rxr->bytes) && (rxr->packets))
   2546 		newitr = max(newitr, (rxr->bytes / rxr->packets));
   2547 	newitr += 24; /* account for hardware frame, crc */
   2548 
   2549 	/* set an upper boundary */
   2550 	newitr = min(newitr, 3000);
   2551 
   2552 	/* Be nice to the mid range */
   2553 	if ((newitr > 300) && (newitr < 1200))
   2554 		newitr = (newitr / 3);
   2555 	else
   2556 		newitr = (newitr / 2);
   2557 
   2558 	/*
   2559 	 * When RSC is used, ITR interval must be larger than RSC_DELAY.
   2560 	 * Currently, we use 2us for RSC_DELAY. The minimum value is always
   2561 	 * greater than 2us on 100M (and 10M?(not documented)), but it's not
   2562 	 * on 1G and higher.
   2563 	 */
   2564 	if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   2565 	    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   2566 		if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
   2567 			newitr = IXGBE_MIN_RSC_EITR_10G1G;
   2568 	}
   2569 
   2570         /* save for next interrupt */
   2571         que->eitr_setting = newitr;
   2572 
   2573 	/* Reset state */
   2574 	txr->bytes = 0;
   2575 	txr->packets = 0;
   2576 	rxr->bytes = 0;
   2577 	rxr->packets = 0;
   2578 
   2579 no_calc:
   2580 	if (more)
   2581 		ixgbe_sched_handle_que(adapter, que);
   2582 	else
   2583 		ixgbe_enable_queue(adapter, que->msix);
   2584 
   2585 	return 1;
   2586 } /* ixgbe_msix_que */
   2587 
   2588 /************************************************************************
   2589  * ixgbe_media_status - Media Ioctl callback
   2590  *
   2591  *   Called whenever the user queries the status of
   2592  *   the interface using ifconfig.
   2593  ************************************************************************/
   2594 static void
   2595 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   2596 {
   2597 	struct adapter *adapter = ifp->if_softc;
   2598 	struct ixgbe_hw *hw = &adapter->hw;
   2599 	int layer;
   2600 
   2601 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   2602 	IXGBE_CORE_LOCK(adapter);
   2603 	ixgbe_update_link_status(adapter);
   2604 
   2605 	ifmr->ifm_status = IFM_AVALID;
   2606 	ifmr->ifm_active = IFM_ETHER;
   2607 
   2608 	if (!adapter->link_active) {
   2609 		ifmr->ifm_active |= IFM_NONE;
   2610 		IXGBE_CORE_UNLOCK(adapter);
   2611 		return;
   2612 	}
   2613 
   2614 	ifmr->ifm_status |= IFM_ACTIVE;
   2615 	layer = adapter->phy_layer;
   2616 
   2617 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
   2618 	    layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
   2619 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
   2620 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
   2621 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
   2622 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
   2623 		switch (adapter->link_speed) {
   2624 		case IXGBE_LINK_SPEED_10GB_FULL:
   2625 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   2626 			break;
   2627 		case IXGBE_LINK_SPEED_5GB_FULL:
   2628 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   2629 			break;
   2630 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2631 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   2632 			break;
   2633 		case IXGBE_LINK_SPEED_1GB_FULL:
   2634 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   2635 			break;
   2636 		case IXGBE_LINK_SPEED_100_FULL:
   2637 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   2638 			break;
   2639 		case IXGBE_LINK_SPEED_10_FULL:
   2640 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   2641 			break;
   2642 		}
   2643 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   2644 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
   2645 		switch (adapter->link_speed) {
   2646 		case IXGBE_LINK_SPEED_10GB_FULL:
   2647 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
   2648 			break;
   2649 		}
   2650 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
   2651 		switch (adapter->link_speed) {
   2652 		case IXGBE_LINK_SPEED_10GB_FULL:
   2653 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
   2654 			break;
   2655 		case IXGBE_LINK_SPEED_1GB_FULL:
   2656 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2657 			break;
   2658 		}
   2659 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
   2660 		switch (adapter->link_speed) {
   2661 		case IXGBE_LINK_SPEED_10GB_FULL:
   2662 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
   2663 			break;
   2664 		case IXGBE_LINK_SPEED_1GB_FULL:
   2665 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2666 			break;
   2667 		}
   2668 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
   2669 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
   2670 		switch (adapter->link_speed) {
   2671 		case IXGBE_LINK_SPEED_10GB_FULL:
   2672 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2673 			break;
   2674 		case IXGBE_LINK_SPEED_1GB_FULL:
   2675 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
   2676 			break;
   2677 		}
   2678 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
   2679 		switch (adapter->link_speed) {
   2680 		case IXGBE_LINK_SPEED_10GB_FULL:
   2681 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2682 			break;
   2683 		}
   2684 	/*
   2685 	 * XXX: These need to use the proper media types once
   2686 	 * they're added.
   2687 	 */
   2688 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
   2689 		switch (adapter->link_speed) {
   2690 		case IXGBE_LINK_SPEED_10GB_FULL:
   2691 #ifndef IFM_ETH_XTYPE
   2692 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2693 #else
   2694 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
   2695 #endif
   2696 			break;
   2697 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2698 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2699 			break;
   2700 		case IXGBE_LINK_SPEED_1GB_FULL:
   2701 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2702 			break;
   2703 		}
   2704 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
   2705 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
   2706 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
   2707 		switch (adapter->link_speed) {
   2708 		case IXGBE_LINK_SPEED_10GB_FULL:
   2709 #ifndef IFM_ETH_XTYPE
   2710 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2711 #else
   2712 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
   2713 #endif
   2714 			break;
   2715 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2716 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2717 			break;
   2718 		case IXGBE_LINK_SPEED_1GB_FULL:
   2719 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2720 			break;
   2721 		}
   2722 
   2723 	/* If nothing is recognized... */
   2724 #if 0
   2725 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
   2726 		ifmr->ifm_active |= IFM_UNKNOWN;
   2727 #endif
   2728 
   2729 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   2730 
   2731 	/* Display current flow control setting used on link */
   2732 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
   2733 	    hw->fc.current_mode == ixgbe_fc_full)
   2734 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
   2735 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
   2736 	    hw->fc.current_mode == ixgbe_fc_full)
   2737 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
   2738 
   2739 	IXGBE_CORE_UNLOCK(adapter);
   2740 
   2741 	return;
   2742 } /* ixgbe_media_status */
   2743 
   2744 /************************************************************************
   2745  * ixgbe_media_change - Media Ioctl callback
   2746  *
   2747  *   Called when the user changes speed/duplex using
   2748  *   media/mediopt option with ifconfig.
   2749  ************************************************************************/
   2750 static int
   2751 ixgbe_media_change(struct ifnet *ifp)
   2752 {
   2753 	struct adapter   *adapter = ifp->if_softc;
   2754 	struct ifmedia   *ifm = &adapter->media;
   2755 	struct ixgbe_hw  *hw = &adapter->hw;
   2756 	ixgbe_link_speed speed = 0;
   2757 	ixgbe_link_speed link_caps = 0;
   2758 	bool negotiate = false;
   2759 	s32 err = IXGBE_NOT_IMPLEMENTED;
   2760 
   2761 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   2762 
   2763 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   2764 		return (EINVAL);
   2765 
   2766 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   2767 		return (ENODEV);
   2768 
   2769 	/*
   2770 	 * We don't actually need to check against the supported
   2771 	 * media types of the adapter; ifmedia will take care of
   2772 	 * that for us.
   2773 	 */
   2774 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   2775 	case IFM_AUTO:
   2776 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   2777 		    &negotiate);
   2778 		if (err != IXGBE_SUCCESS) {
   2779 			device_printf(adapter->dev, "Unable to determine "
   2780 			    "supported advertise speeds\n");
   2781 			return (ENODEV);
   2782 		}
   2783 		speed |= link_caps;
   2784 		break;
   2785 	case IFM_10G_T:
   2786 	case IFM_10G_LRM:
   2787 	case IFM_10G_LR:
   2788 	case IFM_10G_TWINAX:
   2789 #ifndef IFM_ETH_XTYPE
   2790 	case IFM_10G_SR: /* KR, too */
   2791 	case IFM_10G_CX4: /* KX4 */
   2792 #else
   2793 	case IFM_10G_KR:
   2794 	case IFM_10G_KX4:
   2795 #endif
   2796 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   2797 		break;
   2798 	case IFM_5000_T:
   2799 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   2800 		break;
   2801 	case IFM_2500_T:
   2802 	case IFM_2500_KX:
   2803 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   2804 		break;
   2805 	case IFM_1000_T:
   2806 	case IFM_1000_LX:
   2807 	case IFM_1000_SX:
   2808 	case IFM_1000_KX:
   2809 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   2810 		break;
   2811 	case IFM_100_TX:
   2812 		speed |= IXGBE_LINK_SPEED_100_FULL;
   2813 		break;
   2814 	case IFM_10_T:
   2815 		speed |= IXGBE_LINK_SPEED_10_FULL;
   2816 		break;
   2817 	default:
   2818 		goto invalid;
   2819 	}
   2820 
   2821 	hw->mac.autotry_restart = TRUE;
   2822 	hw->mac.ops.setup_link(hw, speed, TRUE);
   2823 	adapter->advertise = 0;
   2824 	if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
   2825 		if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
   2826 			adapter->advertise |= 1 << 2;
   2827 		if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
   2828 			adapter->advertise |= 1 << 1;
   2829 		if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
   2830 			adapter->advertise |= 1 << 0;
   2831 		if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
   2832 			adapter->advertise |= 1 << 3;
   2833 		if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
   2834 			adapter->advertise |= 1 << 4;
   2835 		if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
   2836 			adapter->advertise |= 1 << 5;
   2837 	}
   2838 
   2839 	return (0);
   2840 
   2841 invalid:
   2842 	device_printf(adapter->dev, "Invalid media type!\n");
   2843 
   2844 	return (EINVAL);
   2845 } /* ixgbe_media_change */
   2846 
   2847 /************************************************************************
   2848  * ixgbe_set_promisc
   2849  ************************************************************************/
   2850 static void
   2851 ixgbe_set_promisc(struct adapter *adapter)
   2852 {
   2853 	struct ifnet *ifp = adapter->ifp;
   2854 	int          mcnt = 0;
   2855 	u32          rctl;
   2856 	struct ether_multi *enm;
   2857 	struct ether_multistep step;
   2858 	struct ethercom *ec = &adapter->osdep.ec;
   2859 
   2860 	KASSERT(mutex_owned(&adapter->core_mtx));
   2861 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   2862 	rctl &= (~IXGBE_FCTRL_UPE);
   2863 	if (ifp->if_flags & IFF_ALLMULTI)
   2864 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
   2865 	else {
   2866 		ETHER_LOCK(ec);
   2867 		ETHER_FIRST_MULTI(step, ec, enm);
   2868 		while (enm != NULL) {
   2869 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
   2870 				break;
   2871 			mcnt++;
   2872 			ETHER_NEXT_MULTI(step, enm);
   2873 		}
   2874 		ETHER_UNLOCK(ec);
   2875 	}
   2876 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
   2877 		rctl &= (~IXGBE_FCTRL_MPE);
   2878 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2879 
   2880 	if (ifp->if_flags & IFF_PROMISC) {
   2881 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   2882 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2883 	} else if (ifp->if_flags & IFF_ALLMULTI) {
   2884 		rctl |= IXGBE_FCTRL_MPE;
   2885 		rctl &= ~IXGBE_FCTRL_UPE;
   2886 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2887 	}
   2888 } /* ixgbe_set_promisc */
   2889 
   2890 /************************************************************************
   2891  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
   2892  ************************************************************************/
   2893 static int
   2894 ixgbe_msix_link(void *arg)
   2895 {
   2896 	struct adapter	*adapter = arg;
   2897 	struct ixgbe_hw *hw = &adapter->hw;
   2898 	u32		eicr, eicr_mask;
   2899 	s32             retval;
   2900 
   2901 	++adapter->link_irq.ev_count;
   2902 
   2903 	/* Pause other interrupts */
   2904 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
   2905 
   2906 	/* First get the cause */
   2907 	/*
   2908 	 * The specifications of 82598, 82599, X540 and X550 say EICS register
   2909 	 * is write only. However, Linux says it is a workaround for silicon
   2910 	 * errata to read EICS instead of EICR to get interrupt cause. It seems
   2911 	 * there is a problem about read clear mechanism for EICR register.
   2912 	 */
   2913 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   2914 	/* Be sure the queue bits are not cleared */
   2915 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
   2916 	/* Clear interrupt with write */
   2917 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
   2918 
   2919 	/* Link status change */
   2920 	if (eicr & IXGBE_EICR_LSC) {
   2921 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
   2922 		softint_schedule(adapter->link_si);
   2923 	}
   2924 
   2925 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   2926 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
   2927 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
   2928 			/* This is probably overkill :) */
   2929 			if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
   2930 				return 1;
   2931 			/* Disable the interrupt */
   2932 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
   2933 			softint_schedule(adapter->fdir_si);
   2934 		}
   2935 
   2936 		if (eicr & IXGBE_EICR_ECC) {
   2937 			device_printf(adapter->dev,
   2938 			    "CRITICAL: ECC ERROR!! Please Reboot!!\n");
   2939 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   2940 		}
   2941 
   2942 		/* Check for over temp condition */
   2943 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
   2944 			switch (adapter->hw.mac.type) {
   2945 			case ixgbe_mac_X550EM_a:
   2946 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
   2947 					break;
   2948 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
   2949 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2950 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2951 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2952 				retval = hw->phy.ops.check_overtemp(hw);
   2953 				if (retval != IXGBE_ERR_OVERTEMP)
   2954 					break;
   2955 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2956 				device_printf(adapter->dev, "System shutdown required!\n");
   2957 				break;
   2958 			default:
   2959 				if (!(eicr & IXGBE_EICR_TS))
   2960 					break;
   2961 				retval = hw->phy.ops.check_overtemp(hw);
   2962 				if (retval != IXGBE_ERR_OVERTEMP)
   2963 					break;
   2964 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2965 				device_printf(adapter->dev, "System shutdown required!\n");
   2966 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
   2967 				break;
   2968 			}
   2969 		}
   2970 
   2971 		/* Check for VF message */
   2972 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
   2973 		    (eicr & IXGBE_EICR_MAILBOX))
   2974 			softint_schedule(adapter->mbx_si);
   2975 	}
   2976 
   2977 	if (ixgbe_is_sfp(hw)) {
   2978 		/* Pluggable optics-related interrupt */
   2979 		if (hw->mac.type >= ixgbe_mac_X540)
   2980 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   2981 		else
   2982 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   2983 
   2984 		if (eicr & eicr_mask) {
   2985 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   2986 			softint_schedule(adapter->mod_si);
   2987 		}
   2988 
   2989 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   2990 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   2991 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2992 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2993 			softint_schedule(adapter->msf_si);
   2994 		}
   2995 	}
   2996 
   2997 	/* Check for fan failure */
   2998 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   2999 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
   3000 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   3001 	}
   3002 
   3003 	/* External PHY interrupt */
   3004 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   3005 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
   3006 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
   3007 		softint_schedule(adapter->phy_si);
   3008  	}
   3009 
   3010 	/* Re-enable other interrupts */
   3011 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   3012 	return 1;
   3013 } /* ixgbe_msix_link */
   3014 
   3015 static void
   3016 ixgbe_eitr_write(struct ix_queue *que, uint32_t itr)
   3017 {
   3018 	struct adapter *adapter = que->adapter;
   3019 
   3020         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   3021                 itr |= itr << 16;
   3022         else
   3023                 itr |= IXGBE_EITR_CNT_WDIS;
   3024 
   3025 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
   3026 	    itr);
   3027 }
   3028 
   3029 
   3030 /************************************************************************
   3031  * ixgbe_sysctl_interrupt_rate_handler
   3032  ************************************************************************/
   3033 static int
   3034 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   3035 {
   3036 	struct sysctlnode node = *rnode;
   3037 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   3038 	struct adapter  *adapter = que->adapter;
   3039 	uint32_t reg, usec, rate;
   3040 	int error;
   3041 
   3042 	if (que == NULL)
   3043 		return 0;
   3044 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
   3045 	usec = ((reg & 0x0FF8) >> 3);
   3046 	if (usec > 0)
   3047 		rate = 500000 / usec;
   3048 	else
   3049 		rate = 0;
   3050 	node.sysctl_data = &rate;
   3051 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   3052 	if (error || newp == NULL)
   3053 		return error;
   3054 	reg &= ~0xfff; /* default, no limitation */
   3055 	if (rate > 0 && rate < 500000) {
   3056 		if (rate < 1000)
   3057 			rate = 1000;
   3058 		reg |= ((4000000/rate) & 0xff8);
   3059 		/*
   3060 		 * When RSC is used, ITR interval must be larger than
   3061 		 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
   3062 		 * The minimum value is always greater than 2us on 100M
   3063 		 * (and 10M?(not documented)), but it's not on 1G and higher.
   3064 		 */
   3065 		if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   3066 		    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   3067 			if ((adapter->num_queues > 1)
   3068 			    && (reg < IXGBE_MIN_RSC_EITR_10G1G))
   3069 				return EINVAL;
   3070 		}
   3071 		ixgbe_max_interrupt_rate = rate;
   3072 	} else
   3073 		ixgbe_max_interrupt_rate = 0;
   3074 	ixgbe_eitr_write(que, reg);
   3075 
   3076 	return (0);
   3077 } /* ixgbe_sysctl_interrupt_rate_handler */
   3078 
   3079 const struct sysctlnode *
   3080 ixgbe_sysctl_instance(struct adapter *adapter)
   3081 {
   3082 	const char *dvname;
   3083 	struct sysctllog **log;
   3084 	int rc;
   3085 	const struct sysctlnode *rnode;
   3086 
   3087 	if (adapter->sysctltop != NULL)
   3088 		return adapter->sysctltop;
   3089 
   3090 	log = &adapter->sysctllog;
   3091 	dvname = device_xname(adapter->dev);
   3092 
   3093 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   3094 	    0, CTLTYPE_NODE, dvname,
   3095 	    SYSCTL_DESCR("ixgbe information and settings"),
   3096 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   3097 		goto err;
   3098 
   3099 	return rnode;
   3100 err:
   3101 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   3102 	return NULL;
   3103 }
   3104 
   3105 /************************************************************************
   3106  * ixgbe_add_device_sysctls
   3107  ************************************************************************/
   3108 static void
   3109 ixgbe_add_device_sysctls(struct adapter *adapter)
   3110 {
   3111 	device_t               dev = adapter->dev;
   3112 	struct ixgbe_hw        *hw = &adapter->hw;
   3113 	struct sysctllog **log;
   3114 	const struct sysctlnode *rnode, *cnode;
   3115 
   3116 	log = &adapter->sysctllog;
   3117 
   3118 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   3119 		aprint_error_dev(dev, "could not create sysctl root\n");
   3120 		return;
   3121 	}
   3122 
   3123 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3124 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3125 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
   3126 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   3127 		aprint_error_dev(dev, "could not create sysctl\n");
   3128 
   3129 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3130 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3131 	    "num_queues", SYSCTL_DESCR("Number of queues"),
   3132 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
   3133 		aprint_error_dev(dev, "could not create sysctl\n");
   3134 
   3135 	/* Sysctls for all devices */
   3136 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3137 	    CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
   3138 	    ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
   3139 	    CTL_EOL) != 0)
   3140 		aprint_error_dev(dev, "could not create sysctl\n");
   3141 
   3142 	adapter->enable_aim = ixgbe_enable_aim;
   3143 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3144 	    CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   3145 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   3146 		aprint_error_dev(dev, "could not create sysctl\n");
   3147 
   3148 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3149 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   3150 	    "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
   3151 	    ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
   3152 	    CTL_EOL) != 0)
   3153 		aprint_error_dev(dev, "could not create sysctl\n");
   3154 
   3155 	adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
   3156 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3157 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   3158 	    NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
   3159 		aprint_error_dev(dev, "could not create sysctl\n");
   3160 
   3161 #ifdef IXGBE_DEBUG
   3162 	/* testing sysctls (for all devices) */
   3163 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3164 	    CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
   3165 	    ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
   3166 	    CTL_EOL) != 0)
   3167 		aprint_error_dev(dev, "could not create sysctl\n");
   3168 
   3169 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
   3170 	    CTLTYPE_STRING, "print_rss_config",
   3171 	    SYSCTL_DESCR("Prints RSS Configuration"),
   3172 	    ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
   3173 	    CTL_EOL) != 0)
   3174 		aprint_error_dev(dev, "could not create sysctl\n");
   3175 #endif
   3176 	/* for X550 series devices */
   3177 	if (hw->mac.type >= ixgbe_mac_X550)
   3178 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3179 		    CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
   3180 		    ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
   3181 		    CTL_EOL) != 0)
   3182 			aprint_error_dev(dev, "could not create sysctl\n");
   3183 
   3184 	/* for WoL-capable devices */
   3185 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3186 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3187 		    CTLTYPE_BOOL, "wol_enable",
   3188 		    SYSCTL_DESCR("Enable/Disable Wake on LAN"),
   3189 		    ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
   3190 		    CTL_EOL) != 0)
   3191 			aprint_error_dev(dev, "could not create sysctl\n");
   3192 
   3193 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3194 		    CTLTYPE_INT, "wufc",
   3195 		    SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
   3196 		    ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
   3197 		    CTL_EOL) != 0)
   3198 			aprint_error_dev(dev, "could not create sysctl\n");
   3199 	}
   3200 
   3201 	/* for X552/X557-AT devices */
   3202 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3203 		const struct sysctlnode *phy_node;
   3204 
   3205 		if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
   3206 		    "phy", SYSCTL_DESCR("External PHY sysctls"),
   3207 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
   3208 			aprint_error_dev(dev, "could not create sysctl\n");
   3209 			return;
   3210 		}
   3211 
   3212 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3213 		    CTLTYPE_INT, "temp",
   3214 		    SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
   3215 		    ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
   3216 		    CTL_EOL) != 0)
   3217 			aprint_error_dev(dev, "could not create sysctl\n");
   3218 
   3219 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3220 		    CTLTYPE_INT, "overtemp_occurred",
   3221 		    SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
   3222 		    ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
   3223 		    CTL_CREATE, CTL_EOL) != 0)
   3224 			aprint_error_dev(dev, "could not create sysctl\n");
   3225 	}
   3226 
   3227 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
   3228 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3229 		    CTLTYPE_INT, "eee_state",
   3230 		    SYSCTL_DESCR("EEE Power Save State"),
   3231 		    ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
   3232 		    CTL_EOL) != 0)
   3233 			aprint_error_dev(dev, "could not create sysctl\n");
   3234 	}
   3235 } /* ixgbe_add_device_sysctls */
   3236 
   3237 /************************************************************************
   3238  * ixgbe_allocate_pci_resources
   3239  ************************************************************************/
   3240 static int
   3241 ixgbe_allocate_pci_resources(struct adapter *adapter,
   3242     const struct pci_attach_args *pa)
   3243 {
   3244 	pcireg_t	memtype;
   3245 	device_t dev = adapter->dev;
   3246 	bus_addr_t addr;
   3247 	int flags;
   3248 
   3249 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   3250 	switch (memtype) {
   3251 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   3252 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   3253 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   3254 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   3255 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   3256 			goto map_err;
   3257 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   3258 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   3259 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3260 		}
   3261 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   3262 		     adapter->osdep.mem_size, flags,
   3263 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   3264 map_err:
   3265 			adapter->osdep.mem_size = 0;
   3266 			aprint_error_dev(dev, "unable to map BAR0\n");
   3267 			return ENXIO;
   3268 		}
   3269 		break;
   3270 	default:
   3271 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   3272 		return ENXIO;
   3273 	}
   3274 
   3275 	return (0);
   3276 } /* ixgbe_allocate_pci_resources */
   3277 
   3278 static void
   3279 ixgbe_free_softint(struct adapter *adapter)
   3280 {
   3281 	struct ix_queue *que = adapter->queues;
   3282 	struct tx_ring *txr = adapter->tx_rings;
   3283 	int i;
   3284 
   3285 	for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
   3286 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   3287 			if (txr->txr_si != NULL)
   3288 				softint_disestablish(txr->txr_si);
   3289 		}
   3290 		if (que->que_si != NULL)
   3291 			softint_disestablish(que->que_si);
   3292 	}
   3293 	if (adapter->txr_wq != NULL)
   3294 		workqueue_destroy(adapter->txr_wq);
   3295 	if (adapter->txr_wq_enqueued != NULL)
   3296 		percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
   3297 	if (adapter->que_wq != NULL)
   3298 		workqueue_destroy(adapter->que_wq);
   3299 
   3300 	/* Drain the Link queue */
   3301 	if (adapter->link_si != NULL) {
   3302 		softint_disestablish(adapter->link_si);
   3303 		adapter->link_si = NULL;
   3304 	}
   3305 	if (adapter->mod_si != NULL) {
   3306 		softint_disestablish(adapter->mod_si);
   3307 		adapter->mod_si = NULL;
   3308 	}
   3309 	if (adapter->msf_si != NULL) {
   3310 		softint_disestablish(adapter->msf_si);
   3311 		adapter->msf_si = NULL;
   3312 	}
   3313 	if (adapter->phy_si != NULL) {
   3314 		softint_disestablish(adapter->phy_si);
   3315 		adapter->phy_si = NULL;
   3316 	}
   3317 	if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
   3318 		if (adapter->fdir_si != NULL) {
   3319 			softint_disestablish(adapter->fdir_si);
   3320 			adapter->fdir_si = NULL;
   3321 		}
   3322 	}
   3323 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   3324 		if (adapter->mbx_si != NULL) {
   3325 			softint_disestablish(adapter->mbx_si);
   3326 			adapter->mbx_si = NULL;
   3327 		}
   3328 	}
   3329 } /* ixgbe_free_softint */
   3330 
   3331 /************************************************************************
   3332  * ixgbe_detach - Device removal routine
   3333  *
   3334  *   Called when the driver is being removed.
   3335  *   Stops the adapter and deallocates all the resources
   3336  *   that were allocated for driver operation.
   3337  *
   3338  *   return 0 on success, positive on failure
   3339  ************************************************************************/
   3340 static int
   3341 ixgbe_detach(device_t dev, int flags)
   3342 {
   3343 	struct adapter *adapter = device_private(dev);
   3344 	struct rx_ring *rxr = adapter->rx_rings;
   3345 	struct tx_ring *txr = adapter->tx_rings;
   3346 	struct ixgbe_hw *hw = &adapter->hw;
   3347 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   3348 	u32	ctrl_ext;
   3349 
   3350 	INIT_DEBUGOUT("ixgbe_detach: begin");
   3351 	if (adapter->osdep.attached == false)
   3352 		return 0;
   3353 
   3354 	if (ixgbe_pci_iov_detach(dev) != 0) {
   3355 		device_printf(dev, "SR-IOV in use; detach first.\n");
   3356 		return (EBUSY);
   3357 	}
   3358 
   3359 	/* Stop the interface. Callouts are stopped in it. */
   3360 	ixgbe_ifstop(adapter->ifp, 1);
   3361 #if NVLAN > 0
   3362 	/* Make sure VLANs are not using driver */
   3363 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   3364 		;	/* nothing to do: no VLANs */
   3365 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
   3366 		vlan_ifdetach(adapter->ifp);
   3367 	else {
   3368 		aprint_error_dev(dev, "VLANs in use, detach first\n");
   3369 		return (EBUSY);
   3370 	}
   3371 #endif
   3372 
   3373 	pmf_device_deregister(dev);
   3374 
   3375 	ether_ifdetach(adapter->ifp);
   3376 	/* Stop the adapter */
   3377 	IXGBE_CORE_LOCK(adapter);
   3378 	ixgbe_setup_low_power_mode(adapter);
   3379 	IXGBE_CORE_UNLOCK(adapter);
   3380 
   3381 	ixgbe_free_softint(adapter);
   3382 
   3383 	/* let hardware know driver is unloading */
   3384 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   3385 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   3386 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   3387 
   3388 	callout_halt(&adapter->timer, NULL);
   3389 
   3390 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   3391 		netmap_detach(adapter->ifp);
   3392 
   3393 	ixgbe_free_pci_resources(adapter);
   3394 #if 0	/* XXX the NetBSD port is probably missing something here */
   3395 	bus_generic_detach(dev);
   3396 #endif
   3397 	if_detach(adapter->ifp);
   3398 	if_percpuq_destroy(adapter->ipq);
   3399 
   3400 	sysctl_teardown(&adapter->sysctllog);
   3401 	evcnt_detach(&adapter->efbig_tx_dma_setup);
   3402 	evcnt_detach(&adapter->mbuf_defrag_failed);
   3403 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
   3404 	evcnt_detach(&adapter->einval_tx_dma_setup);
   3405 	evcnt_detach(&adapter->other_tx_dma_setup);
   3406 	evcnt_detach(&adapter->eagain_tx_dma_setup);
   3407 	evcnt_detach(&adapter->enomem_tx_dma_setup);
   3408 	evcnt_detach(&adapter->watchdog_events);
   3409 	evcnt_detach(&adapter->tso_err);
   3410 	evcnt_detach(&adapter->link_irq);
   3411 	evcnt_detach(&adapter->link_sicount);
   3412 	evcnt_detach(&adapter->mod_sicount);
   3413 	evcnt_detach(&adapter->msf_sicount);
   3414 	evcnt_detach(&adapter->phy_sicount);
   3415 
   3416 	txr = adapter->tx_rings;
   3417 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   3418 		evcnt_detach(&adapter->queues[i].irqs);
   3419 		evcnt_detach(&adapter->queues[i].handleq);
   3420 		evcnt_detach(&adapter->queues[i].req);
   3421 		evcnt_detach(&txr->no_desc_avail);
   3422 		evcnt_detach(&txr->total_packets);
   3423 		evcnt_detach(&txr->tso_tx);
   3424 #ifndef IXGBE_LEGACY_TX
   3425 		evcnt_detach(&txr->pcq_drops);
   3426 #endif
   3427 
   3428 		if (i < __arraycount(stats->mpc)) {
   3429 			evcnt_detach(&stats->mpc[i]);
   3430 			if (hw->mac.type == ixgbe_mac_82598EB)
   3431 				evcnt_detach(&stats->rnbc[i]);
   3432 		}
   3433 		if (i < __arraycount(stats->pxontxc)) {
   3434 			evcnt_detach(&stats->pxontxc[i]);
   3435 			evcnt_detach(&stats->pxonrxc[i]);
   3436 			evcnt_detach(&stats->pxofftxc[i]);
   3437 			evcnt_detach(&stats->pxoffrxc[i]);
   3438 			evcnt_detach(&stats->pxon2offc[i]);
   3439 		}
   3440 		if (i < __arraycount(stats->qprc)) {
   3441 			evcnt_detach(&stats->qprc[i]);
   3442 			evcnt_detach(&stats->qptc[i]);
   3443 			evcnt_detach(&stats->qbrc[i]);
   3444 			evcnt_detach(&stats->qbtc[i]);
   3445 			evcnt_detach(&stats->qprdc[i]);
   3446 		}
   3447 
   3448 		evcnt_detach(&rxr->rx_packets);
   3449 		evcnt_detach(&rxr->rx_bytes);
   3450 		evcnt_detach(&rxr->rx_copies);
   3451 		evcnt_detach(&rxr->no_jmbuf);
   3452 		evcnt_detach(&rxr->rx_discarded);
   3453 	}
   3454 	evcnt_detach(&stats->ipcs);
   3455 	evcnt_detach(&stats->l4cs);
   3456 	evcnt_detach(&stats->ipcs_bad);
   3457 	evcnt_detach(&stats->l4cs_bad);
   3458 	evcnt_detach(&stats->intzero);
   3459 	evcnt_detach(&stats->legint);
   3460 	evcnt_detach(&stats->crcerrs);
   3461 	evcnt_detach(&stats->illerrc);
   3462 	evcnt_detach(&stats->errbc);
   3463 	evcnt_detach(&stats->mspdc);
   3464 	if (hw->mac.type >= ixgbe_mac_X550)
   3465 		evcnt_detach(&stats->mbsdc);
   3466 	evcnt_detach(&stats->mpctotal);
   3467 	evcnt_detach(&stats->mlfc);
   3468 	evcnt_detach(&stats->mrfc);
   3469 	evcnt_detach(&stats->rlec);
   3470 	evcnt_detach(&stats->lxontxc);
   3471 	evcnt_detach(&stats->lxonrxc);
   3472 	evcnt_detach(&stats->lxofftxc);
   3473 	evcnt_detach(&stats->lxoffrxc);
   3474 
   3475 	/* Packet Reception Stats */
   3476 	evcnt_detach(&stats->tor);
   3477 	evcnt_detach(&stats->gorc);
   3478 	evcnt_detach(&stats->tpr);
   3479 	evcnt_detach(&stats->gprc);
   3480 	evcnt_detach(&stats->mprc);
   3481 	evcnt_detach(&stats->bprc);
   3482 	evcnt_detach(&stats->prc64);
   3483 	evcnt_detach(&stats->prc127);
   3484 	evcnt_detach(&stats->prc255);
   3485 	evcnt_detach(&stats->prc511);
   3486 	evcnt_detach(&stats->prc1023);
   3487 	evcnt_detach(&stats->prc1522);
   3488 	evcnt_detach(&stats->ruc);
   3489 	evcnt_detach(&stats->rfc);
   3490 	evcnt_detach(&stats->roc);
   3491 	evcnt_detach(&stats->rjc);
   3492 	evcnt_detach(&stats->mngprc);
   3493 	evcnt_detach(&stats->mngpdc);
   3494 	evcnt_detach(&stats->xec);
   3495 
   3496 	/* Packet Transmission Stats */
   3497 	evcnt_detach(&stats->gotc);
   3498 	evcnt_detach(&stats->tpt);
   3499 	evcnt_detach(&stats->gptc);
   3500 	evcnt_detach(&stats->bptc);
   3501 	evcnt_detach(&stats->mptc);
   3502 	evcnt_detach(&stats->mngptc);
   3503 	evcnt_detach(&stats->ptc64);
   3504 	evcnt_detach(&stats->ptc127);
   3505 	evcnt_detach(&stats->ptc255);
   3506 	evcnt_detach(&stats->ptc511);
   3507 	evcnt_detach(&stats->ptc1023);
   3508 	evcnt_detach(&stats->ptc1522);
   3509 
   3510 	ixgbe_free_transmit_structures(adapter);
   3511 	ixgbe_free_receive_structures(adapter);
   3512 	for (int i = 0; i < adapter->num_queues; i++) {
   3513 		struct ix_queue * que = &adapter->queues[i];
   3514 		mutex_destroy(&que->im_mtx);
   3515 	}
   3516 	free(adapter->queues, M_DEVBUF);
   3517 	free(adapter->mta, M_DEVBUF);
   3518 
   3519 	IXGBE_CORE_LOCK_DESTROY(adapter);
   3520 
   3521 	return (0);
   3522 } /* ixgbe_detach */
   3523 
   3524 /************************************************************************
   3525  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
   3526  *
   3527  *   Prepare the adapter/port for LPLU and/or WoL
   3528  ************************************************************************/
   3529 static int
   3530 ixgbe_setup_low_power_mode(struct adapter *adapter)
   3531 {
   3532 	struct ixgbe_hw *hw = &adapter->hw;
   3533 	device_t        dev = adapter->dev;
   3534 	s32             error = 0;
   3535 
   3536 	KASSERT(mutex_owned(&adapter->core_mtx));
   3537 
   3538 	/* Limit power management flow to X550EM baseT */
   3539 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
   3540 	    hw->phy.ops.enter_lplu) {
   3541 		/* X550EM baseT adapters need a special LPLU flow */
   3542 		hw->phy.reset_disable = true;
   3543 		ixgbe_stop(adapter);
   3544 		error = hw->phy.ops.enter_lplu(hw);
   3545 		if (error)
   3546 			device_printf(dev,
   3547 			    "Error entering LPLU: %d\n", error);
   3548 		hw->phy.reset_disable = false;
   3549 	} else {
   3550 		/* Just stop for other adapters */
   3551 		ixgbe_stop(adapter);
   3552 	}
   3553 
   3554 	if (!hw->wol_enabled) {
   3555 		ixgbe_set_phy_power(hw, FALSE);
   3556 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3557 		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
   3558 	} else {
   3559 		/* Turn off support for APM wakeup. (Using ACPI instead) */
   3560 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
   3561 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
   3562 
   3563 		/*
   3564 		 * Clear Wake Up Status register to prevent any previous wakeup
   3565 		 * events from waking us up immediately after we suspend.
   3566 		 */
   3567 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3568 
   3569 		/*
   3570 		 * Program the Wakeup Filter Control register with user filter
   3571 		 * settings
   3572 		 */
   3573 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
   3574 
   3575 		/* Enable wakeups and power management in Wakeup Control */
   3576 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
   3577 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
   3578 
   3579 	}
   3580 
   3581 	return error;
   3582 } /* ixgbe_setup_low_power_mode */
   3583 
   3584 /************************************************************************
   3585  * ixgbe_shutdown - Shutdown entry point
   3586  ************************************************************************/
   3587 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   3588 static int
   3589 ixgbe_shutdown(device_t dev)
   3590 {
   3591 	struct adapter *adapter = device_private(dev);
   3592 	int error = 0;
   3593 
   3594 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
   3595 
   3596 	IXGBE_CORE_LOCK(adapter);
   3597 	error = ixgbe_setup_low_power_mode(adapter);
   3598 	IXGBE_CORE_UNLOCK(adapter);
   3599 
   3600 	return (error);
   3601 } /* ixgbe_shutdown */
   3602 #endif
   3603 
   3604 /************************************************************************
   3605  * ixgbe_suspend
   3606  *
   3607  *   From D0 to D3
   3608  ************************************************************************/
   3609 static bool
   3610 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
   3611 {
   3612 	struct adapter *adapter = device_private(dev);
   3613 	int            error = 0;
   3614 
   3615 	INIT_DEBUGOUT("ixgbe_suspend: begin");
   3616 
   3617 	IXGBE_CORE_LOCK(adapter);
   3618 
   3619 	error = ixgbe_setup_low_power_mode(adapter);
   3620 
   3621 	IXGBE_CORE_UNLOCK(adapter);
   3622 
   3623 	return (error);
   3624 } /* ixgbe_suspend */
   3625 
   3626 /************************************************************************
   3627  * ixgbe_resume
   3628  *
   3629  *   From D3 to D0
   3630  ************************************************************************/
   3631 static bool
   3632 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
   3633 {
   3634 	struct adapter  *adapter = device_private(dev);
   3635 	struct ifnet    *ifp = adapter->ifp;
   3636 	struct ixgbe_hw *hw = &adapter->hw;
   3637 	u32             wus;
   3638 
   3639 	INIT_DEBUGOUT("ixgbe_resume: begin");
   3640 
   3641 	IXGBE_CORE_LOCK(adapter);
   3642 
   3643 	/* Read & clear WUS register */
   3644 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
   3645 	if (wus)
   3646 		device_printf(dev, "Woken up by (WUS): %#010x\n",
   3647 		    IXGBE_READ_REG(hw, IXGBE_WUS));
   3648 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3649 	/* And clear WUFC until next low-power transition */
   3650 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3651 
   3652 	/*
   3653 	 * Required after D3->D0 transition;
   3654 	 * will re-advertise all previous advertised speeds
   3655 	 */
   3656 	if (ifp->if_flags & IFF_UP)
   3657 		ixgbe_init_locked(adapter);
   3658 
   3659 	IXGBE_CORE_UNLOCK(adapter);
   3660 
   3661 	return true;
   3662 } /* ixgbe_resume */
   3663 
   3664 /*
   3665  * Set the various hardware offload abilities.
   3666  *
   3667  * This takes the ifnet's if_capenable flags (e.g. set by the user using
   3668  * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
   3669  * mbuf offload flags the driver will understand.
   3670  */
   3671 static void
   3672 ixgbe_set_if_hwassist(struct adapter *adapter)
   3673 {
   3674 	/* XXX */
   3675 }
   3676 
   3677 /************************************************************************
   3678  * ixgbe_init_locked - Init entry point
   3679  *
   3680  *   Used in two ways: It is used by the stack as an init
   3681  *   entry point in network interface structure. It is also
   3682  *   used by the driver as a hw/sw initialization routine to
   3683  *   get to a consistent state.
   3684  *
   3685  *   return 0 on success, positive on failure
   3686  ************************************************************************/
   3687 static void
   3688 ixgbe_init_locked(struct adapter *adapter)
   3689 {
   3690 	struct ifnet   *ifp = adapter->ifp;
   3691 	device_t 	dev = adapter->dev;
   3692 	struct ixgbe_hw *hw = &adapter->hw;
   3693 	struct tx_ring  *txr;
   3694 	struct rx_ring  *rxr;
   3695 	u32		txdctl, mhadd;
   3696 	u32		rxdctl, rxctrl;
   3697 	u32             ctrl_ext;
   3698 	int             err = 0;
   3699 
   3700 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   3701 
   3702 	KASSERT(mutex_owned(&adapter->core_mtx));
   3703 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
   3704 
   3705 	hw->adapter_stopped = FALSE;
   3706 	ixgbe_stop_adapter(hw);
   3707         callout_stop(&adapter->timer);
   3708 
   3709 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   3710 	adapter->max_frame_size =
   3711 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   3712 
   3713 	/* Queue indices may change with IOV mode */
   3714 	ixgbe_align_all_queue_indices(adapter);
   3715 
   3716 	/* reprogram the RAR[0] in case user changed it. */
   3717 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
   3718 
   3719 	/* Get the latest mac address, User can use a LAA */
   3720 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
   3721 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   3722 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
   3723 	hw->addr_ctrl.rar_used_count = 1;
   3724 
   3725 	/* Set hardware offload abilities from ifnet flags */
   3726 	ixgbe_set_if_hwassist(adapter);
   3727 
   3728 	/* Prepare transmit descriptors and buffers */
   3729 	if (ixgbe_setup_transmit_structures(adapter)) {
   3730 		device_printf(dev, "Could not setup transmit structures\n");
   3731 		ixgbe_stop(adapter);
   3732 		return;
   3733 	}
   3734 
   3735 	ixgbe_init_hw(hw);
   3736 	ixgbe_initialize_iov(adapter);
   3737 	ixgbe_initialize_transmit_units(adapter);
   3738 
   3739 	/* Setup Multicast table */
   3740 	ixgbe_set_multi(adapter);
   3741 
   3742 	/* Determine the correct mbuf pool, based on frame size */
   3743 	if (adapter->max_frame_size <= MCLBYTES)
   3744 		adapter->rx_mbuf_sz = MCLBYTES;
   3745 	else
   3746 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   3747 
   3748 	/* Prepare receive descriptors and buffers */
   3749 	if (ixgbe_setup_receive_structures(adapter)) {
   3750 		device_printf(dev, "Could not setup receive structures\n");
   3751 		ixgbe_stop(adapter);
   3752 		return;
   3753 	}
   3754 
   3755 	/* Configure RX settings */
   3756 	ixgbe_initialize_receive_units(adapter);
   3757 
   3758 	/* Enable SDP & MSI-X interrupts based on adapter */
   3759 	ixgbe_config_gpie(adapter);
   3760 
   3761 	/* Set MTU size */
   3762 	if (ifp->if_mtu > ETHERMTU) {
   3763 		/* aka IXGBE_MAXFRS on 82599 and newer */
   3764 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   3765 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   3766 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   3767 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   3768 	}
   3769 
   3770 	/* Now enable all the queues */
   3771 	for (int i = 0; i < adapter->num_queues; i++) {
   3772 		txr = &adapter->tx_rings[i];
   3773 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
   3774 		txdctl |= IXGBE_TXDCTL_ENABLE;
   3775 		/* Set WTHRESH to 8, burst writeback */
   3776 		txdctl |= (8 << 16);
   3777 		/*
   3778 		 * When the internal queue falls below PTHRESH (32),
   3779 		 * start prefetching as long as there are at least
   3780 		 * HTHRESH (1) buffers ready. The values are taken
   3781 		 * from the Intel linux driver 3.8.21.
   3782 		 * Prefetching enables tx line rate even with 1 queue.
   3783 		 */
   3784 		txdctl |= (32 << 0) | (1 << 8);
   3785 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
   3786 	}
   3787 
   3788 	for (int i = 0, j = 0; i < adapter->num_queues; i++) {
   3789 		rxr = &adapter->rx_rings[i];
   3790 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   3791 		if (hw->mac.type == ixgbe_mac_82598EB) {
   3792 			/*
   3793 			 * PTHRESH = 21
   3794 			 * HTHRESH = 4
   3795 			 * WTHRESH = 8
   3796 			 */
   3797 			rxdctl &= ~0x3FFFFF;
   3798 			rxdctl |= 0x080420;
   3799 		}
   3800 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   3801 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
   3802 		for (; j < 10; j++) {
   3803 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
   3804 			    IXGBE_RXDCTL_ENABLE)
   3805 				break;
   3806 			else
   3807 				msec_delay(1);
   3808 		}
   3809 		wmb();
   3810 
   3811 		/*
   3812 		 * In netmap mode, we must preserve the buffers made
   3813 		 * available to userspace before the if_init()
   3814 		 * (this is true by default on the TX side, because
   3815 		 * init makes all buffers available to userspace).
   3816 		 *
   3817 		 * netmap_reset() and the device specific routines
   3818 		 * (e.g. ixgbe_setup_receive_rings()) map these
   3819 		 * buffers at the end of the NIC ring, so here we
   3820 		 * must set the RDT (tail) register to make sure
   3821 		 * they are not overwritten.
   3822 		 *
   3823 		 * In this driver the NIC ring starts at RDH = 0,
   3824 		 * RDT points to the last slot available for reception (?),
   3825 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   3826 		 */
   3827 #ifdef DEV_NETMAP
   3828 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   3829 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   3830 			struct netmap_adapter *na = NA(adapter->ifp);
   3831 			struct netmap_kring *kring = &na->rx_rings[i];
   3832 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   3833 
   3834 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
   3835 		} else
   3836 #endif /* DEV_NETMAP */
   3837 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
   3838 			    adapter->num_rx_desc - 1);
   3839 	}
   3840 
   3841 	/* Enable Receive engine */
   3842 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   3843 	if (hw->mac.type == ixgbe_mac_82598EB)
   3844 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   3845 	rxctrl |= IXGBE_RXCTRL_RXEN;
   3846 	ixgbe_enable_rx_dma(hw, rxctrl);
   3847 
   3848 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   3849 
   3850 	/* Set up MSI-X routing */
   3851 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3852 		ixgbe_configure_ivars(adapter);
   3853 		/* Set up auto-mask */
   3854 		if (hw->mac.type == ixgbe_mac_82598EB)
   3855 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3856 		else {
   3857 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   3858 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   3859 		}
   3860 	} else {  /* Simple settings for Legacy/MSI */
   3861 		ixgbe_set_ivar(adapter, 0, 0, 0);
   3862 		ixgbe_set_ivar(adapter, 0, 0, 1);
   3863 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3864 	}
   3865 
   3866 	ixgbe_init_fdir(adapter);
   3867 
   3868 	/*
   3869 	 * Check on any SFP devices that
   3870 	 * need to be kick-started
   3871 	 */
   3872 	if (hw->phy.type == ixgbe_phy_none) {
   3873 		err = hw->phy.ops.identify(hw);
   3874 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   3875                 	device_printf(dev,
   3876 			    "Unsupported SFP+ module type was detected.\n");
   3877 			return;
   3878         	}
   3879 	}
   3880 
   3881 	/* Set moderation on the Link interrupt */
   3882 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
   3883 
   3884 	/* Config/Enable Link */
   3885 	ixgbe_config_link(adapter);
   3886 
   3887 	/* Hardware Packet Buffer & Flow Control setup */
   3888 	ixgbe_config_delay_values(adapter);
   3889 
   3890 	/* Initialize the FC settings */
   3891 	ixgbe_start_hw(hw);
   3892 
   3893 	/* Set up VLAN support and filter */
   3894 	ixgbe_setup_vlan_hw_support(adapter);
   3895 
   3896 	/* Setup DMA Coalescing */
   3897 	ixgbe_config_dmac(adapter);
   3898 
   3899 	/* And now turn on interrupts */
   3900 	ixgbe_enable_intr(adapter);
   3901 
   3902 	/* Enable the use of the MBX by the VF's */
   3903 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
   3904 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
   3905 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
   3906 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
   3907 	}
   3908 
   3909 	/* Update saved flags. See ixgbe_ifflags_cb() */
   3910 	adapter->if_flags = ifp->if_flags;
   3911 
   3912 	/* Now inform the stack we're ready */
   3913 	ifp->if_flags |= IFF_RUNNING;
   3914 
   3915 	return;
   3916 } /* ixgbe_init_locked */
   3917 
   3918 /************************************************************************
   3919  * ixgbe_init
   3920  ************************************************************************/
   3921 static int
   3922 ixgbe_init(struct ifnet *ifp)
   3923 {
   3924 	struct adapter *adapter = ifp->if_softc;
   3925 
   3926 	IXGBE_CORE_LOCK(adapter);
   3927 	ixgbe_init_locked(adapter);
   3928 	IXGBE_CORE_UNLOCK(adapter);
   3929 
   3930 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   3931 } /* ixgbe_init */
   3932 
   3933 /************************************************************************
   3934  * ixgbe_set_ivar
   3935  *
   3936  *   Setup the correct IVAR register for a particular MSI-X interrupt
   3937  *     (yes this is all very magic and confusing :)
   3938  *    - entry is the register array entry
   3939  *    - vector is the MSI-X vector for this queue
   3940  *    - type is RX/TX/MISC
   3941  ************************************************************************/
   3942 static void
   3943 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   3944 {
   3945 	struct ixgbe_hw *hw = &adapter->hw;
   3946 	u32 ivar, index;
   3947 
   3948 	vector |= IXGBE_IVAR_ALLOC_VAL;
   3949 
   3950 	switch (hw->mac.type) {
   3951 	case ixgbe_mac_82598EB:
   3952 		if (type == -1)
   3953 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   3954 		else
   3955 			entry += (type * 64);
   3956 		index = (entry >> 2) & 0x1F;
   3957 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   3958 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
   3959 		ivar |= (vector << (8 * (entry & 0x3)));
   3960 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   3961 		break;
   3962 	case ixgbe_mac_82599EB:
   3963 	case ixgbe_mac_X540:
   3964 	case ixgbe_mac_X550:
   3965 	case ixgbe_mac_X550EM_x:
   3966 	case ixgbe_mac_X550EM_a:
   3967 		if (type == -1) { /* MISC IVAR */
   3968 			index = (entry & 1) * 8;
   3969 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   3970 			ivar &= ~(0xFF << index);
   3971 			ivar |= (vector << index);
   3972 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   3973 		} else {	/* RX/TX IVARS */
   3974 			index = (16 * (entry & 1)) + (8 * type);
   3975 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   3976 			ivar &= ~(0xFF << index);
   3977 			ivar |= (vector << index);
   3978 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   3979 		}
   3980 		break;
   3981 	default:
   3982 		break;
   3983 	}
   3984 } /* ixgbe_set_ivar */
   3985 
   3986 /************************************************************************
   3987  * ixgbe_configure_ivars
   3988  ************************************************************************/
   3989 static void
   3990 ixgbe_configure_ivars(struct adapter *adapter)
   3991 {
   3992 	struct ix_queue *que = adapter->queues;
   3993 	u32             newitr;
   3994 
   3995 	if (ixgbe_max_interrupt_rate > 0)
   3996 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   3997 	else {
   3998 		/*
   3999 		 * Disable DMA coalescing if interrupt moderation is
   4000 		 * disabled.
   4001 		 */
   4002 		adapter->dmac = 0;
   4003 		newitr = 0;
   4004 	}
   4005 
   4006         for (int i = 0; i < adapter->num_queues; i++, que++) {
   4007 		struct rx_ring *rxr = &adapter->rx_rings[i];
   4008 		struct tx_ring *txr = &adapter->tx_rings[i];
   4009 		/* First the RX queue entry */
   4010                 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
   4011 		/* ... and the TX */
   4012 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
   4013 		/* Set an Initial EITR value */
   4014 		ixgbe_eitr_write(que, newitr);
   4015 		/*
   4016 		 * To eliminate influence of the previous state.
   4017 		 * At this point, Tx/Rx interrupt handler
   4018 		 * (ixgbe_msix_que()) cannot be called, so  both
   4019 		 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
   4020 		 */
   4021 		que->eitr_setting = 0;
   4022 	}
   4023 
   4024 	/* For the Link interrupt */
   4025         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
   4026 } /* ixgbe_configure_ivars */
   4027 
   4028 /************************************************************************
   4029  * ixgbe_config_gpie
   4030  ************************************************************************/
   4031 static void
   4032 ixgbe_config_gpie(struct adapter *adapter)
   4033 {
   4034 	struct ixgbe_hw *hw = &adapter->hw;
   4035 	u32             gpie;
   4036 
   4037 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
   4038 
   4039 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   4040 		/* Enable Enhanced MSI-X mode */
   4041 		gpie |= IXGBE_GPIE_MSIX_MODE
   4042 		     |  IXGBE_GPIE_EIAME
   4043 		     |  IXGBE_GPIE_PBA_SUPPORT
   4044 		     |  IXGBE_GPIE_OCD;
   4045 	}
   4046 
   4047 	/* Fan Failure Interrupt */
   4048 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4049 		gpie |= IXGBE_SDP1_GPIEN;
   4050 
   4051 	/* Thermal Sensor Interrupt */
   4052 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
   4053 		gpie |= IXGBE_SDP0_GPIEN_X540;
   4054 
   4055 	/* Link detection */
   4056 	switch (hw->mac.type) {
   4057 	case ixgbe_mac_82599EB:
   4058 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
   4059 		break;
   4060 	case ixgbe_mac_X550EM_x:
   4061 	case ixgbe_mac_X550EM_a:
   4062 		gpie |= IXGBE_SDP0_GPIEN_X540;
   4063 		break;
   4064 	default:
   4065 		break;
   4066 	}
   4067 
   4068 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   4069 
   4070 } /* ixgbe_config_gpie */
   4071 
   4072 /************************************************************************
   4073  * ixgbe_config_delay_values
   4074  *
   4075  *   Requires adapter->max_frame_size to be set.
   4076  ************************************************************************/
   4077 static void
   4078 ixgbe_config_delay_values(struct adapter *adapter)
   4079 {
   4080 	struct ixgbe_hw *hw = &adapter->hw;
   4081 	u32             rxpb, frame, size, tmp;
   4082 
   4083 	frame = adapter->max_frame_size;
   4084 
   4085 	/* Calculate High Water */
   4086 	switch (hw->mac.type) {
   4087 	case ixgbe_mac_X540:
   4088 	case ixgbe_mac_X550:
   4089 	case ixgbe_mac_X550EM_x:
   4090 	case ixgbe_mac_X550EM_a:
   4091 		tmp = IXGBE_DV_X540(frame, frame);
   4092 		break;
   4093 	default:
   4094 		tmp = IXGBE_DV(frame, frame);
   4095 		break;
   4096 	}
   4097 	size = IXGBE_BT2KB(tmp);
   4098 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
   4099 	hw->fc.high_water[0] = rxpb - size;
   4100 
   4101 	/* Now calculate Low Water */
   4102 	switch (hw->mac.type) {
   4103 	case ixgbe_mac_X540:
   4104 	case ixgbe_mac_X550:
   4105 	case ixgbe_mac_X550EM_x:
   4106 	case ixgbe_mac_X550EM_a:
   4107 		tmp = IXGBE_LOW_DV_X540(frame);
   4108 		break;
   4109 	default:
   4110 		tmp = IXGBE_LOW_DV(frame);
   4111 		break;
   4112 	}
   4113 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
   4114 
   4115 	hw->fc.pause_time = IXGBE_FC_PAUSE;
   4116 	hw->fc.send_xon = TRUE;
   4117 } /* ixgbe_config_delay_values */
   4118 
   4119 /************************************************************************
   4120  * ixgbe_set_multi - Multicast Update
   4121  *
   4122  *   Called whenever multicast address list is updated.
   4123  ************************************************************************/
   4124 static void
   4125 ixgbe_set_multi(struct adapter *adapter)
   4126 {
   4127 	struct ixgbe_mc_addr	*mta;
   4128 	struct ifnet		*ifp = adapter->ifp;
   4129 	u8			*update_ptr;
   4130 	int			mcnt = 0;
   4131 	u32			fctrl;
   4132 	struct ethercom		*ec = &adapter->osdep.ec;
   4133 	struct ether_multi	*enm;
   4134 	struct ether_multistep	step;
   4135 
   4136 	KASSERT(mutex_owned(&adapter->core_mtx));
   4137 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   4138 
   4139 	mta = adapter->mta;
   4140 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
   4141 
   4142 	ifp->if_flags &= ~IFF_ALLMULTI;
   4143 	ETHER_LOCK(ec);
   4144 	ETHER_FIRST_MULTI(step, ec, enm);
   4145 	while (enm != NULL) {
   4146 		if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
   4147 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   4148 			ETHER_ADDR_LEN) != 0)) {
   4149 			ifp->if_flags |= IFF_ALLMULTI;
   4150 			break;
   4151 		}
   4152 		bcopy(enm->enm_addrlo,
   4153 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
   4154 		mta[mcnt].vmdq = adapter->pool;
   4155 		mcnt++;
   4156 		ETHER_NEXT_MULTI(step, enm);
   4157 	}
   4158 	ETHER_UNLOCK(ec);
   4159 
   4160 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   4161 	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4162 	if (ifp->if_flags & IFF_PROMISC)
   4163 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4164 	else if (ifp->if_flags & IFF_ALLMULTI) {
   4165 		fctrl |= IXGBE_FCTRL_MPE;
   4166 	}
   4167 
   4168 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   4169 
   4170 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
   4171 		update_ptr = (u8 *)mta;
   4172 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   4173 		    ixgbe_mc_array_itr, TRUE);
   4174 	}
   4175 
   4176 } /* ixgbe_set_multi */
   4177 
   4178 /************************************************************************
   4179  * ixgbe_mc_array_itr
   4180  *
   4181  *   An iterator function needed by the multicast shared code.
   4182  *   It feeds the shared code routine the addresses in the
   4183  *   array of ixgbe_set_multi() one by one.
   4184  ************************************************************************/
   4185 static u8 *
   4186 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   4187 {
   4188 	struct ixgbe_mc_addr *mta;
   4189 
   4190 	mta = (struct ixgbe_mc_addr *)*update_ptr;
   4191 	*vmdq = mta->vmdq;
   4192 
   4193 	*update_ptr = (u8*)(mta + 1);
   4194 
   4195 	return (mta->addr);
   4196 } /* ixgbe_mc_array_itr */
   4197 
   4198 /************************************************************************
   4199  * ixgbe_local_timer - Timer routine
   4200  *
   4201  *   Checks for link status, updates statistics,
   4202  *   and runs the watchdog check.
   4203  ************************************************************************/
   4204 static void
   4205 ixgbe_local_timer(void *arg)
   4206 {
   4207 	struct adapter *adapter = arg;
   4208 
   4209 	IXGBE_CORE_LOCK(adapter);
   4210 	ixgbe_local_timer1(adapter);
   4211 	IXGBE_CORE_UNLOCK(adapter);
   4212 }
   4213 
   4214 static void
   4215 ixgbe_local_timer1(void *arg)
   4216 {
   4217 	struct adapter	*adapter = arg;
   4218 	device_t	dev = adapter->dev;
   4219 	struct ix_queue *que = adapter->queues;
   4220 	u64		queues = 0;
   4221 	u64		v0, v1, v2, v3, v4, v5, v6, v7;
   4222 	int		hung = 0;
   4223 	int		i;
   4224 
   4225 	KASSERT(mutex_owned(&adapter->core_mtx));
   4226 
   4227 	/* Check for pluggable optics */
   4228 	if (adapter->sfp_probe)
   4229 		if (!ixgbe_sfp_probe(adapter))
   4230 			goto out; /* Nothing to do */
   4231 
   4232 	ixgbe_update_link_status(adapter);
   4233 	ixgbe_update_stats_counters(adapter);
   4234 
   4235 	/* Update some event counters */
   4236 	v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
   4237 	que = adapter->queues;
   4238 	for (i = 0; i < adapter->num_queues; i++, que++) {
   4239 		struct tx_ring  *txr = que->txr;
   4240 
   4241 		v0 += txr->q_efbig_tx_dma_setup;
   4242 		v1 += txr->q_mbuf_defrag_failed;
   4243 		v2 += txr->q_efbig2_tx_dma_setup;
   4244 		v3 += txr->q_einval_tx_dma_setup;
   4245 		v4 += txr->q_other_tx_dma_setup;
   4246 		v5 += txr->q_eagain_tx_dma_setup;
   4247 		v6 += txr->q_enomem_tx_dma_setup;
   4248 		v7 += txr->q_tso_err;
   4249 	}
   4250 	adapter->efbig_tx_dma_setup.ev_count = v0;
   4251 	adapter->mbuf_defrag_failed.ev_count = v1;
   4252 	adapter->efbig2_tx_dma_setup.ev_count = v2;
   4253 	adapter->einval_tx_dma_setup.ev_count = v3;
   4254 	adapter->other_tx_dma_setup.ev_count = v4;
   4255 	adapter->eagain_tx_dma_setup.ev_count = v5;
   4256 	adapter->enomem_tx_dma_setup.ev_count = v6;
   4257 	adapter->tso_err.ev_count = v7;
   4258 
   4259 	/*
   4260 	 * Check the TX queues status
   4261 	 *      - mark hung queues so we don't schedule on them
   4262 	 *      - watchdog only if all queues show hung
   4263 	 */
   4264 	que = adapter->queues;
   4265 	for (i = 0; i < adapter->num_queues; i++, que++) {
   4266 		/* Keep track of queues with work for soft irq */
   4267 		if (que->txr->busy)
   4268 			queues |= ((u64)1 << que->me);
   4269 		/*
   4270 		 * Each time txeof runs without cleaning, but there
   4271 		 * are uncleaned descriptors it increments busy. If
   4272 		 * we get to the MAX we declare it hung.
   4273 		 */
   4274 		if (que->busy == IXGBE_QUEUE_HUNG) {
   4275 			++hung;
   4276 			/* Mark the queue as inactive */
   4277 			adapter->active_queues &= ~((u64)1 << que->me);
   4278 			continue;
   4279 		} else {
   4280 			/* Check if we've come back from hung */
   4281 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   4282 				adapter->active_queues |= ((u64)1 << que->me);
   4283 		}
   4284 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   4285 			device_printf(dev,
   4286 			    "Warning queue %d appears to be hung!\n", i);
   4287 			que->txr->busy = IXGBE_QUEUE_HUNG;
   4288 			++hung;
   4289 		}
   4290 	}
   4291 
   4292 	/* Only truely watchdog if all queues show hung */
   4293 	if (hung == adapter->num_queues)
   4294 		goto watchdog;
   4295 	else if (queues != 0) { /* Force an IRQ on queues with work */
   4296 		que = adapter->queues;
   4297 		for (i = 0; i < adapter->num_queues; i++, que++) {
   4298 			mutex_enter(&que->im_mtx);
   4299 			if (que->im_nest == 0)
   4300 				ixgbe_rearm_queues(adapter,
   4301 				    queues & ((u64)1 << i));
   4302 			mutex_exit(&que->im_mtx);
   4303 		}
   4304 	}
   4305 
   4306 out:
   4307 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4308 	return;
   4309 
   4310 watchdog:
   4311 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   4312 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   4313 	adapter->watchdog_events.ev_count++;
   4314 	ixgbe_init_locked(adapter);
   4315 } /* ixgbe_local_timer */
   4316 
   4317 /************************************************************************
   4318  * ixgbe_sfp_probe
   4319  *
   4320  *   Determine if a port had optics inserted.
   4321  ************************************************************************/
   4322 static bool
   4323 ixgbe_sfp_probe(struct adapter *adapter)
   4324 {
   4325 	struct ixgbe_hw	*hw = &adapter->hw;
   4326 	device_t	dev = adapter->dev;
   4327 	bool		result = FALSE;
   4328 
   4329 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4330 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4331 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4332 		if (ret)
   4333 			goto out;
   4334 		ret = hw->phy.ops.reset(hw);
   4335 		adapter->sfp_probe = FALSE;
   4336 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4337 			device_printf(dev,"Unsupported SFP+ module detected!");
   4338 			device_printf(dev,
   4339 			    "Reload driver with supported module.\n");
   4340                         goto out;
   4341 		} else
   4342 			device_printf(dev, "SFP+ module detected!\n");
   4343 		/* We now have supported optics */
   4344 		result = TRUE;
   4345 	}
   4346 out:
   4347 
   4348 	return (result);
   4349 } /* ixgbe_sfp_probe */
   4350 
   4351 /************************************************************************
   4352  * ixgbe_handle_mod - Tasklet for SFP module interrupts
   4353  ************************************************************************/
   4354 static void
   4355 ixgbe_handle_mod(void *context)
   4356 {
   4357 	struct adapter  *adapter = context;
   4358 	struct ixgbe_hw *hw = &adapter->hw;
   4359 	device_t	dev = adapter->dev;
   4360 	u32             err, cage_full = 0;
   4361 
   4362 	++adapter->mod_sicount.ev_count;
   4363 	if (adapter->hw.need_crosstalk_fix) {
   4364 		switch (hw->mac.type) {
   4365 		case ixgbe_mac_82599EB:
   4366 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4367 			    IXGBE_ESDP_SDP2;
   4368 			break;
   4369 		case ixgbe_mac_X550EM_x:
   4370 		case ixgbe_mac_X550EM_a:
   4371 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4372 			    IXGBE_ESDP_SDP0;
   4373 			break;
   4374 		default:
   4375 			break;
   4376 		}
   4377 
   4378 		if (!cage_full)
   4379 			return;
   4380 	}
   4381 
   4382 	err = hw->phy.ops.identify_sfp(hw);
   4383 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4384 		device_printf(dev,
   4385 		    "Unsupported SFP+ module type was detected.\n");
   4386 		return;
   4387 	}
   4388 
   4389 	err = hw->mac.ops.setup_sfp(hw);
   4390 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4391 		device_printf(dev,
   4392 		    "Setup failure - unsupported SFP+ module type.\n");
   4393 		return;
   4394 	}
   4395 	softint_schedule(adapter->msf_si);
   4396 } /* ixgbe_handle_mod */
   4397 
   4398 
   4399 /************************************************************************
   4400  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
   4401  ************************************************************************/
   4402 static void
   4403 ixgbe_handle_msf(void *context)
   4404 {
   4405 	struct adapter  *adapter = context;
   4406 	struct ixgbe_hw *hw = &adapter->hw;
   4407 	u32             autoneg;
   4408 	bool            negotiate;
   4409 
   4410 	++adapter->msf_sicount.ev_count;
   4411 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
   4412 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
   4413 
   4414 	autoneg = hw->phy.autoneg_advertised;
   4415 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   4416 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   4417 	else
   4418 		negotiate = 0;
   4419 	if (hw->mac.ops.setup_link)
   4420 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
   4421 
   4422 	/* Adjust media types shown in ifconfig */
   4423 	ifmedia_removeall(&adapter->media);
   4424 	ixgbe_add_media_types(adapter);
   4425 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   4426 } /* ixgbe_handle_msf */
   4427 
   4428 /************************************************************************
   4429  * ixgbe_handle_phy - Tasklet for external PHY interrupts
   4430  ************************************************************************/
   4431 static void
   4432 ixgbe_handle_phy(void *context)
   4433 {
   4434 	struct adapter  *adapter = context;
   4435 	struct ixgbe_hw *hw = &adapter->hw;
   4436 	int error;
   4437 
   4438 	++adapter->phy_sicount.ev_count;
   4439 	error = hw->phy.ops.handle_lasi(hw);
   4440 	if (error == IXGBE_ERR_OVERTEMP)
   4441 		device_printf(adapter->dev,
   4442 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
   4443 		    " PHY will downshift to lower power state!\n");
   4444 	else if (error)
   4445 		device_printf(adapter->dev,
   4446 		    "Error handling LASI interrupt: %d\n", error);
   4447 } /* ixgbe_handle_phy */
   4448 
   4449 static void
   4450 ixgbe_ifstop(struct ifnet *ifp, int disable)
   4451 {
   4452 	struct adapter *adapter = ifp->if_softc;
   4453 
   4454 	IXGBE_CORE_LOCK(adapter);
   4455 	ixgbe_stop(adapter);
   4456 	IXGBE_CORE_UNLOCK(adapter);
   4457 }
   4458 
   4459 /************************************************************************
   4460  * ixgbe_stop - Stop the hardware
   4461  *
   4462  *   Disables all traffic on the adapter by issuing a
   4463  *   global reset on the MAC and deallocates TX/RX buffers.
   4464  ************************************************************************/
   4465 static void
   4466 ixgbe_stop(void *arg)
   4467 {
   4468 	struct ifnet    *ifp;
   4469 	struct adapter  *adapter = arg;
   4470 	struct ixgbe_hw *hw = &adapter->hw;
   4471 
   4472 	ifp = adapter->ifp;
   4473 
   4474 	KASSERT(mutex_owned(&adapter->core_mtx));
   4475 
   4476 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   4477 	ixgbe_disable_intr(adapter);
   4478 	callout_stop(&adapter->timer);
   4479 
   4480 	/* Let the stack know...*/
   4481 	ifp->if_flags &= ~IFF_RUNNING;
   4482 
   4483 	ixgbe_reset_hw(hw);
   4484 	hw->adapter_stopped = FALSE;
   4485 	ixgbe_stop_adapter(hw);
   4486 	if (hw->mac.type == ixgbe_mac_82599EB)
   4487 		ixgbe_stop_mac_link_on_d3_82599(hw);
   4488 	/* Turn off the laser - noop with no optics */
   4489 	ixgbe_disable_tx_laser(hw);
   4490 
   4491 	/* Update the stack */
   4492 	adapter->link_up = FALSE;
   4493 	ixgbe_update_link_status(adapter);
   4494 
   4495 	/* reprogram the RAR[0] in case user changed it. */
   4496 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   4497 
   4498 	return;
   4499 } /* ixgbe_stop */
   4500 
   4501 /************************************************************************
   4502  * ixgbe_update_link_status - Update OS on link state
   4503  *
   4504  * Note: Only updates the OS on the cached link state.
   4505  *       The real check of the hardware only happens with
   4506  *       a link interrupt.
   4507  ************************************************************************/
   4508 static void
   4509 ixgbe_update_link_status(struct adapter *adapter)
   4510 {
   4511 	struct ifnet	*ifp = adapter->ifp;
   4512 	device_t        dev = adapter->dev;
   4513 	struct ixgbe_hw *hw = &adapter->hw;
   4514 
   4515 	KASSERT(mutex_owned(&adapter->core_mtx));
   4516 
   4517 	if (adapter->link_up) {
   4518 		if (adapter->link_active == FALSE) {
   4519 			/*
   4520 			 * To eliminate influence of the previous state
   4521 			 * in the same way as ixgbe_init_locked().
   4522 			 */
   4523 			struct ix_queue	*que = adapter->queues;
   4524 			for (int i = 0; i < adapter->num_queues; i++, que++)
   4525 				que->eitr_setting = 0;
   4526 
   4527 			if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
   4528 				/*
   4529 				 *  Discard count for both MAC Local Fault and
   4530 				 * Remote Fault because those registers are
   4531 				 * valid only when the link speed is up and
   4532 				 * 10Gbps.
   4533 				 */
   4534 				IXGBE_READ_REG(hw, IXGBE_MLFC);
   4535 				IXGBE_READ_REG(hw, IXGBE_MRFC);
   4536 			}
   4537 
   4538 			if (bootverbose) {
   4539 				const char *bpsmsg;
   4540 
   4541 				switch (adapter->link_speed) {
   4542 				case IXGBE_LINK_SPEED_10GB_FULL:
   4543 					bpsmsg = "10 Gbps";
   4544 					break;
   4545 				case IXGBE_LINK_SPEED_5GB_FULL:
   4546 					bpsmsg = "5 Gbps";
   4547 					break;
   4548 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   4549 					bpsmsg = "2.5 Gbps";
   4550 					break;
   4551 				case IXGBE_LINK_SPEED_1GB_FULL:
   4552 					bpsmsg = "1 Gbps";
   4553 					break;
   4554 				case IXGBE_LINK_SPEED_100_FULL:
   4555 					bpsmsg = "100 Mbps";
   4556 					break;
   4557 				case IXGBE_LINK_SPEED_10_FULL:
   4558 					bpsmsg = "10 Mbps";
   4559 					break;
   4560 				default:
   4561 					bpsmsg = "unknown speed";
   4562 					break;
   4563 				}
   4564 				device_printf(dev, "Link is up %s %s \n",
   4565 				    bpsmsg, "Full Duplex");
   4566 			}
   4567 			adapter->link_active = TRUE;
   4568 			/* Update any Flow Control changes */
   4569 			ixgbe_fc_enable(&adapter->hw);
   4570 			/* Update DMA coalescing config */
   4571 			ixgbe_config_dmac(adapter);
   4572 			if_link_state_change(ifp, LINK_STATE_UP);
   4573 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4574 				ixgbe_ping_all_vfs(adapter);
   4575 		}
   4576 	} else { /* Link down */
   4577 		if (adapter->link_active == TRUE) {
   4578 			if (bootverbose)
   4579 				device_printf(dev, "Link is Down\n");
   4580 			if_link_state_change(ifp, LINK_STATE_DOWN);
   4581 			adapter->link_active = FALSE;
   4582 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4583 				ixgbe_ping_all_vfs(adapter);
   4584 		}
   4585 	}
   4586 } /* ixgbe_update_link_status */
   4587 
   4588 /************************************************************************
   4589  * ixgbe_config_dmac - Configure DMA Coalescing
   4590  ************************************************************************/
   4591 static void
   4592 ixgbe_config_dmac(struct adapter *adapter)
   4593 {
   4594 	struct ixgbe_hw *hw = &adapter->hw;
   4595 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
   4596 
   4597 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
   4598 		return;
   4599 
   4600 	if (dcfg->watchdog_timer ^ adapter->dmac ||
   4601 	    dcfg->link_speed ^ adapter->link_speed) {
   4602 		dcfg->watchdog_timer = adapter->dmac;
   4603 		dcfg->fcoe_en = false;
   4604 		dcfg->link_speed = adapter->link_speed;
   4605 		dcfg->num_tcs = 1;
   4606 
   4607 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
   4608 		    dcfg->watchdog_timer, dcfg->link_speed);
   4609 
   4610 		hw->mac.ops.dmac_config(hw);
   4611 	}
   4612 } /* ixgbe_config_dmac */
   4613 
   4614 /************************************************************************
   4615  * ixgbe_enable_intr
   4616  ************************************************************************/
   4617 static void
   4618 ixgbe_enable_intr(struct adapter *adapter)
   4619 {
   4620 	struct ixgbe_hw	*hw = &adapter->hw;
   4621 	struct ix_queue	*que = adapter->queues;
   4622 	u32		mask, fwsm;
   4623 
   4624 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4625 
   4626 	switch (adapter->hw.mac.type) {
   4627 	case ixgbe_mac_82599EB:
   4628 		mask |= IXGBE_EIMS_ECC;
   4629 		/* Temperature sensor on some adapters */
   4630 		mask |= IXGBE_EIMS_GPI_SDP0;
   4631 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
   4632 		mask |= IXGBE_EIMS_GPI_SDP1;
   4633 		mask |= IXGBE_EIMS_GPI_SDP2;
   4634 		break;
   4635 	case ixgbe_mac_X540:
   4636 		/* Detect if Thermal Sensor is enabled */
   4637 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
   4638 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
   4639 			mask |= IXGBE_EIMS_TS;
   4640 		mask |= IXGBE_EIMS_ECC;
   4641 		break;
   4642 	case ixgbe_mac_X550:
   4643 		/* MAC thermal sensor is automatically enabled */
   4644 		mask |= IXGBE_EIMS_TS;
   4645 		mask |= IXGBE_EIMS_ECC;
   4646 		break;
   4647 	case ixgbe_mac_X550EM_x:
   4648 	case ixgbe_mac_X550EM_a:
   4649 		/* Some devices use SDP0 for important information */
   4650 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
   4651 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
   4652 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
   4653 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
   4654 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
   4655 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
   4656 			mask |= IXGBE_EICR_GPI_SDP0_X540;
   4657 		mask |= IXGBE_EIMS_ECC;
   4658 		break;
   4659 	default:
   4660 		break;
   4661 	}
   4662 
   4663 	/* Enable Fan Failure detection */
   4664 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4665 		mask |= IXGBE_EIMS_GPI_SDP1;
   4666 	/* Enable SR-IOV */
   4667 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4668 		mask |= IXGBE_EIMS_MAILBOX;
   4669 	/* Enable Flow Director */
   4670 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   4671 		mask |= IXGBE_EIMS_FLOW_DIR;
   4672 
   4673 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4674 
   4675 	/* With MSI-X we use auto clear */
   4676 	if (adapter->msix_mem) {
   4677 		mask = IXGBE_EIMS_ENABLE_MASK;
   4678 		/* Don't autoclear Link */
   4679 		mask &= ~IXGBE_EIMS_OTHER;
   4680 		mask &= ~IXGBE_EIMS_LSC;
   4681 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   4682 			mask &= ~IXGBE_EIMS_MAILBOX;
   4683 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4684 	}
   4685 
   4686 	/*
   4687 	 * Now enable all queues, this is done separately to
   4688 	 * allow for handling the extended (beyond 32) MSI-X
   4689 	 * vectors that can be used by 82599
   4690 	 */
   4691         for (int i = 0; i < adapter->num_queues; i++, que++)
   4692                 ixgbe_enable_queue(adapter, que->msix);
   4693 
   4694 	IXGBE_WRITE_FLUSH(hw);
   4695 
   4696 	return;
   4697 } /* ixgbe_enable_intr */
   4698 
   4699 /************************************************************************
   4700  * ixgbe_disable_intr
   4701  ************************************************************************/
   4702 static void
   4703 ixgbe_disable_intr(struct adapter *adapter)
   4704 {
   4705 	struct ix_queue	*que = adapter->queues;
   4706 
   4707 	/* disable interrupts other than queues */
   4708 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
   4709 
   4710 	if (adapter->msix_mem)
   4711 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4712 
   4713 	for (int i = 0; i < adapter->num_queues; i++, que++)
   4714 		ixgbe_disable_queue(adapter, que->msix);
   4715 
   4716 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4717 
   4718 } /* ixgbe_disable_intr */
   4719 
   4720 /************************************************************************
   4721  * ixgbe_legacy_irq - Legacy Interrupt Service routine
   4722  ************************************************************************/
   4723 static int
   4724 ixgbe_legacy_irq(void *arg)
   4725 {
   4726 	struct ix_queue *que = arg;
   4727 	struct adapter	*adapter = que->adapter;
   4728 	struct ixgbe_hw	*hw = &adapter->hw;
   4729 	struct ifnet    *ifp = adapter->ifp;
   4730 	struct 		tx_ring *txr = adapter->tx_rings;
   4731 	bool		more = false;
   4732 	u32             eicr, eicr_mask;
   4733 
   4734 	/* Silicon errata #26 on 82598 */
   4735 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
   4736 
   4737 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   4738 
   4739 	adapter->stats.pf.legint.ev_count++;
   4740 	++que->irqs.ev_count;
   4741 	if (eicr == 0) {
   4742 		adapter->stats.pf.intzero.ev_count++;
   4743 		if ((ifp->if_flags & IFF_UP) != 0)
   4744 			ixgbe_enable_intr(adapter);
   4745 		return 0;
   4746 	}
   4747 
   4748 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   4749 #ifdef __NetBSD__
   4750 		/* Don't run ixgbe_rxeof in interrupt context */
   4751 		more = true;
   4752 #else
   4753 		more = ixgbe_rxeof(que);
   4754 #endif
   4755 
   4756 		IXGBE_TX_LOCK(txr);
   4757 		ixgbe_txeof(txr);
   4758 #ifdef notyet
   4759 		if (!ixgbe_ring_empty(ifp, txr->br))
   4760 			ixgbe_start_locked(ifp, txr);
   4761 #endif
   4762 		IXGBE_TX_UNLOCK(txr);
   4763 	}
   4764 
   4765 	/* Check for fan failure */
   4766 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   4767 		ixgbe_check_fan_failure(adapter, eicr, true);
   4768 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4769 	}
   4770 
   4771 	/* Link status change */
   4772 	if (eicr & IXGBE_EICR_LSC)
   4773 		softint_schedule(adapter->link_si);
   4774 
   4775 	if (ixgbe_is_sfp(hw)) {
   4776 		/* Pluggable optics-related interrupt */
   4777 		if (hw->mac.type >= ixgbe_mac_X540)
   4778 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   4779 		else
   4780 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   4781 
   4782 		if (eicr & eicr_mask) {
   4783 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   4784 			softint_schedule(adapter->mod_si);
   4785 		}
   4786 
   4787 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   4788 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   4789 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   4790 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4791 			softint_schedule(adapter->msf_si);
   4792 		}
   4793 	}
   4794 
   4795 	/* External PHY interrupt */
   4796 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   4797 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
   4798 		softint_schedule(adapter->phy_si);
   4799 
   4800 	if (more) {
   4801 		que->req.ev_count++;
   4802 		ixgbe_sched_handle_que(adapter, que);
   4803 	} else
   4804 		ixgbe_enable_intr(adapter);
   4805 
   4806 	return 1;
   4807 } /* ixgbe_legacy_irq */
   4808 
   4809 /************************************************************************
   4810  * ixgbe_free_pciintr_resources
   4811  ************************************************************************/
   4812 static void
   4813 ixgbe_free_pciintr_resources(struct adapter *adapter)
   4814 {
   4815 	struct ix_queue *que = adapter->queues;
   4816 	int		rid;
   4817 
   4818 	/*
   4819 	 * Release all msix queue resources:
   4820 	 */
   4821 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4822 		if (que->res != NULL) {
   4823 			pci_intr_disestablish(adapter->osdep.pc,
   4824 			    adapter->osdep.ihs[i]);
   4825 			adapter->osdep.ihs[i] = NULL;
   4826 		}
   4827 	}
   4828 
   4829 	/* Clean the Legacy or Link interrupt last */
   4830 	if (adapter->vector) /* we are doing MSIX */
   4831 		rid = adapter->vector;
   4832 	else
   4833 		rid = 0;
   4834 
   4835 	if (adapter->osdep.ihs[rid] != NULL) {
   4836 		pci_intr_disestablish(adapter->osdep.pc,
   4837 		    adapter->osdep.ihs[rid]);
   4838 		adapter->osdep.ihs[rid] = NULL;
   4839 	}
   4840 
   4841 	if (adapter->osdep.intrs != NULL) {
   4842 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   4843 		    adapter->osdep.nintrs);
   4844 		adapter->osdep.intrs = NULL;
   4845 	}
   4846 
   4847 	return;
   4848 } /* ixgbe_free_pciintr_resources */
   4849 
   4850 /************************************************************************
   4851  * ixgbe_free_pci_resources
   4852  ************************************************************************/
   4853 static void
   4854 ixgbe_free_pci_resources(struct adapter *adapter)
   4855 {
   4856 
   4857 	ixgbe_free_pciintr_resources(adapter);
   4858 
   4859 	if (adapter->osdep.mem_size != 0) {
   4860 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   4861 		    adapter->osdep.mem_bus_space_handle,
   4862 		    adapter->osdep.mem_size);
   4863 	}
   4864 
   4865 	return;
   4866 } /* ixgbe_free_pci_resources */
   4867 
   4868 /************************************************************************
   4869  * ixgbe_set_sysctl_value
   4870  ************************************************************************/
   4871 static void
   4872 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
   4873     const char *description, int *limit, int value)
   4874 {
   4875 	device_t dev =  adapter->dev;
   4876 	struct sysctllog **log;
   4877 	const struct sysctlnode *rnode, *cnode;
   4878 
   4879 	log = &adapter->sysctllog;
   4880 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   4881 		aprint_error_dev(dev, "could not create sysctl root\n");
   4882 		return;
   4883 	}
   4884 	if (sysctl_createv(log, 0, &rnode, &cnode,
   4885 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   4886 	    name, SYSCTL_DESCR(description),
   4887 		NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   4888 		aprint_error_dev(dev, "could not create sysctl\n");
   4889 	*limit = value;
   4890 } /* ixgbe_set_sysctl_value */
   4891 
   4892 /************************************************************************
   4893  * ixgbe_sysctl_flowcntl
   4894  *
   4895  *   SYSCTL wrapper around setting Flow Control
   4896  ************************************************************************/
   4897 static int
   4898 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
   4899 {
   4900 	struct sysctlnode node = *rnode;
   4901 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4902 	int error, fc;
   4903 
   4904 	fc = adapter->hw.fc.current_mode;
   4905 	node.sysctl_data = &fc;
   4906 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4907 	if (error != 0 || newp == NULL)
   4908 		return error;
   4909 
   4910 	/* Don't bother if it's not changed */
   4911 	if (fc == adapter->hw.fc.current_mode)
   4912 		return (0);
   4913 
   4914 	return ixgbe_set_flowcntl(adapter, fc);
   4915 } /* ixgbe_sysctl_flowcntl */
   4916 
   4917 /************************************************************************
   4918  * ixgbe_set_flowcntl - Set flow control
   4919  *
   4920  *   Flow control values:
   4921  *     0 - off
   4922  *     1 - rx pause
   4923  *     2 - tx pause
   4924  *     3 - full
   4925  ************************************************************************/
   4926 static int
   4927 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
   4928 {
   4929 	switch (fc) {
   4930 		case ixgbe_fc_rx_pause:
   4931 		case ixgbe_fc_tx_pause:
   4932 		case ixgbe_fc_full:
   4933 			adapter->hw.fc.requested_mode = fc;
   4934 			if (adapter->num_queues > 1)
   4935 				ixgbe_disable_rx_drop(adapter);
   4936 			break;
   4937 		case ixgbe_fc_none:
   4938 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   4939 			if (adapter->num_queues > 1)
   4940 				ixgbe_enable_rx_drop(adapter);
   4941 			break;
   4942 		default:
   4943 			return (EINVAL);
   4944 	}
   4945 
   4946 #if 0 /* XXX NetBSD */
   4947 	/* Don't autoneg if forcing a value */
   4948 	adapter->hw.fc.disable_fc_autoneg = TRUE;
   4949 #endif
   4950 	ixgbe_fc_enable(&adapter->hw);
   4951 
   4952 	return (0);
   4953 } /* ixgbe_set_flowcntl */
   4954 
   4955 /************************************************************************
   4956  * ixgbe_enable_rx_drop
   4957  *
   4958  *   Enable the hardware to drop packets when the buffer is
   4959  *   full. This is useful with multiqueue, so that no single
   4960  *   queue being full stalls the entire RX engine. We only
   4961  *   enable this when Multiqueue is enabled AND Flow Control
   4962  *   is disabled.
   4963  ************************************************************************/
   4964 static void
   4965 ixgbe_enable_rx_drop(struct adapter *adapter)
   4966 {
   4967 	struct ixgbe_hw *hw = &adapter->hw;
   4968 	struct rx_ring  *rxr;
   4969 	u32             srrctl;
   4970 
   4971 	for (int i = 0; i < adapter->num_queues; i++) {
   4972 		rxr = &adapter->rx_rings[i];
   4973 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4974 		srrctl |= IXGBE_SRRCTL_DROP_EN;
   4975 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4976 	}
   4977 
   4978 	/* enable drop for each vf */
   4979 	for (int i = 0; i < adapter->num_vfs; i++) {
   4980 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4981 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
   4982 		    IXGBE_QDE_ENABLE));
   4983 	}
   4984 } /* ixgbe_enable_rx_drop */
   4985 
   4986 /************************************************************************
   4987  * ixgbe_disable_rx_drop
   4988  ************************************************************************/
   4989 static void
   4990 ixgbe_disable_rx_drop(struct adapter *adapter)
   4991 {
   4992 	struct ixgbe_hw *hw = &adapter->hw;
   4993 	struct rx_ring  *rxr;
   4994 	u32             srrctl;
   4995 
   4996 	for (int i = 0; i < adapter->num_queues; i++) {
   4997 		rxr = &adapter->rx_rings[i];
   4998         	srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4999         	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
   5000         	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   5001 	}
   5002 
   5003 	/* disable drop for each vf */
   5004 	for (int i = 0; i < adapter->num_vfs; i++) {
   5005 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   5006 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
   5007 	}
   5008 } /* ixgbe_disable_rx_drop */
   5009 
   5010 /************************************************************************
   5011  * ixgbe_sysctl_advertise
   5012  *
   5013  *   SYSCTL wrapper around setting advertised speed
   5014  ************************************************************************/
   5015 static int
   5016 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
   5017 {
   5018 	struct sysctlnode node = *rnode;
   5019 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5020 	int            error = 0, advertise;
   5021 
   5022 	advertise = adapter->advertise;
   5023 	node.sysctl_data = &advertise;
   5024 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5025 	if (error != 0 || newp == NULL)
   5026 		return error;
   5027 
   5028 	return ixgbe_set_advertise(adapter, advertise);
   5029 } /* ixgbe_sysctl_advertise */
   5030 
   5031 /************************************************************************
   5032  * ixgbe_set_advertise - Control advertised link speed
   5033  *
   5034  *   Flags:
   5035  *     0x00 - Default (all capable link speed)
   5036  *     0x01 - advertise 100 Mb
   5037  *     0x02 - advertise 1G
   5038  *     0x04 - advertise 10G
   5039  *     0x08 - advertise 10 Mb
   5040  *     0x10 - advertise 2.5G
   5041  *     0x20 - advertise 5G
   5042  ************************************************************************/
   5043 static int
   5044 ixgbe_set_advertise(struct adapter *adapter, int advertise)
   5045 {
   5046 	device_t         dev;
   5047 	struct ixgbe_hw  *hw;
   5048 	ixgbe_link_speed speed = 0;
   5049 	ixgbe_link_speed link_caps = 0;
   5050 	s32              err = IXGBE_NOT_IMPLEMENTED;
   5051 	bool             negotiate = FALSE;
   5052 
   5053 	/* Checks to validate new value */
   5054 	if (adapter->advertise == advertise) /* no change */
   5055 		return (0);
   5056 
   5057 	dev = adapter->dev;
   5058 	hw = &adapter->hw;
   5059 
   5060 	/* No speed changes for backplane media */
   5061 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   5062 		return (ENODEV);
   5063 
   5064 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   5065 	    (hw->phy.multispeed_fiber))) {
   5066 		device_printf(dev,
   5067 		    "Advertised speed can only be set on copper or "
   5068 		    "multispeed fiber media types.\n");
   5069 		return (EINVAL);
   5070 	}
   5071 
   5072 	if (advertise < 0x0 || advertise > 0x2f) {
   5073 		device_printf(dev,
   5074 		    "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
   5075 		return (EINVAL);
   5076 	}
   5077 
   5078 	if (hw->mac.ops.get_link_capabilities) {
   5079 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   5080 		    &negotiate);
   5081 		if (err != IXGBE_SUCCESS) {
   5082 			device_printf(dev, "Unable to determine supported advertise speeds\n");
   5083 			return (ENODEV);
   5084 		}
   5085 	}
   5086 
   5087 	/* Set new value and report new advertised mode */
   5088 	if (advertise & 0x1) {
   5089 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
   5090 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
   5091 			return (EINVAL);
   5092 		}
   5093 		speed |= IXGBE_LINK_SPEED_100_FULL;
   5094 	}
   5095 	if (advertise & 0x2) {
   5096 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
   5097 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
   5098 			return (EINVAL);
   5099 		}
   5100 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   5101 	}
   5102 	if (advertise & 0x4) {
   5103 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
   5104 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
   5105 			return (EINVAL);
   5106 		}
   5107 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   5108 	}
   5109 	if (advertise & 0x8) {
   5110 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
   5111 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
   5112 			return (EINVAL);
   5113 		}
   5114 		speed |= IXGBE_LINK_SPEED_10_FULL;
   5115 	}
   5116 	if (advertise & 0x10) {
   5117 		if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
   5118 			device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
   5119 			return (EINVAL);
   5120 		}
   5121 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   5122 	}
   5123 	if (advertise & 0x20) {
   5124 		if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
   5125 			device_printf(dev, "Interface does not support 5Gb advertised speed\n");
   5126 			return (EINVAL);
   5127 		}
   5128 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   5129 	}
   5130 	if (advertise == 0)
   5131 		speed = link_caps; /* All capable link speed */
   5132 
   5133 	hw->mac.autotry_restart = TRUE;
   5134 	hw->mac.ops.setup_link(hw, speed, TRUE);
   5135 	adapter->advertise = advertise;
   5136 
   5137 	return (0);
   5138 } /* ixgbe_set_advertise */
   5139 
   5140 /************************************************************************
   5141  * ixgbe_get_advertise - Get current advertised speed settings
   5142  *
   5143  *   Formatted for sysctl usage.
   5144  *   Flags:
   5145  *     0x01 - advertise 100 Mb
   5146  *     0x02 - advertise 1G
   5147  *     0x04 - advertise 10G
   5148  *     0x08 - advertise 10 Mb (yes, Mb)
   5149  *     0x10 - advertise 2.5G
   5150  *     0x20 - advertise 5G
   5151  ************************************************************************/
   5152 static int
   5153 ixgbe_get_advertise(struct adapter *adapter)
   5154 {
   5155 	struct ixgbe_hw  *hw = &adapter->hw;
   5156 	int              speed;
   5157 	ixgbe_link_speed link_caps = 0;
   5158 	s32              err;
   5159 	bool             negotiate = FALSE;
   5160 
   5161 	/*
   5162 	 * Advertised speed means nothing unless it's copper or
   5163 	 * multi-speed fiber
   5164 	 */
   5165 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
   5166 	    !(hw->phy.multispeed_fiber))
   5167 		return (0);
   5168 
   5169 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
   5170 	if (err != IXGBE_SUCCESS)
   5171 		return (0);
   5172 
   5173 	speed =
   5174 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x04 : 0) |
   5175 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x02 : 0) |
   5176 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)   ? 0x01 : 0) |
   5177 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)    ? 0x08 : 0) |
   5178 	    ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
   5179 	    ((link_caps & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0);
   5180 
   5181 	return speed;
   5182 } /* ixgbe_get_advertise */
   5183 
   5184 /************************************************************************
   5185  * ixgbe_sysctl_dmac - Manage DMA Coalescing
   5186  *
   5187  *   Control values:
   5188  *     0/1 - off / on (use default value of 1000)
   5189  *
   5190  *     Legal timer values are:
   5191  *     50,100,250,500,1000,2000,5000,10000
   5192  *
   5193  *     Turning off interrupt moderation will also turn this off.
   5194  ************************************************************************/
   5195 static int
   5196 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
   5197 {
   5198 	struct sysctlnode node = *rnode;
   5199 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5200 	struct ifnet   *ifp = adapter->ifp;
   5201 	int            error;
   5202 	int            newval;
   5203 
   5204 	newval = adapter->dmac;
   5205 	node.sysctl_data = &newval;
   5206 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5207 	if ((error) || (newp == NULL))
   5208 		return (error);
   5209 
   5210 	switch (newval) {
   5211 	case 0:
   5212 		/* Disabled */
   5213 		adapter->dmac = 0;
   5214 		break;
   5215 	case 1:
   5216 		/* Enable and use default */
   5217 		adapter->dmac = 1000;
   5218 		break;
   5219 	case 50:
   5220 	case 100:
   5221 	case 250:
   5222 	case 500:
   5223 	case 1000:
   5224 	case 2000:
   5225 	case 5000:
   5226 	case 10000:
   5227 		/* Legal values - allow */
   5228 		adapter->dmac = newval;
   5229 		break;
   5230 	default:
   5231 		/* Do nothing, illegal value */
   5232 		return (EINVAL);
   5233 	}
   5234 
   5235 	/* Re-initialize hardware if it's already running */
   5236 	if (ifp->if_flags & IFF_RUNNING)
   5237 		ifp->if_init(ifp);
   5238 
   5239 	return (0);
   5240 }
   5241 
   5242 #ifdef IXGBE_DEBUG
   5243 /************************************************************************
   5244  * ixgbe_sysctl_power_state
   5245  *
   5246  *   Sysctl to test power states
   5247  *   Values:
   5248  *     0      - set device to D0
   5249  *     3      - set device to D3
   5250  *     (none) - get current device power state
   5251  ************************************************************************/
   5252 static int
   5253 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
   5254 {
   5255 #ifdef notyet
   5256 	struct sysctlnode node = *rnode;
   5257 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5258 	device_t       dev =  adapter->dev;
   5259 	int            curr_ps, new_ps, error = 0;
   5260 
   5261 	curr_ps = new_ps = pci_get_powerstate(dev);
   5262 
   5263 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5264 	if ((error) || (req->newp == NULL))
   5265 		return (error);
   5266 
   5267 	if (new_ps == curr_ps)
   5268 		return (0);
   5269 
   5270 	if (new_ps == 3 && curr_ps == 0)
   5271 		error = DEVICE_SUSPEND(dev);
   5272 	else if (new_ps == 0 && curr_ps == 3)
   5273 		error = DEVICE_RESUME(dev);
   5274 	else
   5275 		return (EINVAL);
   5276 
   5277 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
   5278 
   5279 	return (error);
   5280 #else
   5281 	return 0;
   5282 #endif
   5283 } /* ixgbe_sysctl_power_state */
   5284 #endif
   5285 
   5286 /************************************************************************
   5287  * ixgbe_sysctl_wol_enable
   5288  *
   5289  *   Sysctl to enable/disable the WoL capability,
   5290  *   if supported by the adapter.
   5291  *
   5292  *   Values:
   5293  *     0 - disabled
   5294  *     1 - enabled
   5295  ************************************************************************/
   5296 static int
   5297 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
   5298 {
   5299 	struct sysctlnode node = *rnode;
   5300 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5301 	struct ixgbe_hw *hw = &adapter->hw;
   5302 	bool            new_wol_enabled;
   5303 	int             error = 0;
   5304 
   5305 	new_wol_enabled = hw->wol_enabled;
   5306 	node.sysctl_data = &new_wol_enabled;
   5307 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5308 	if ((error) || (newp == NULL))
   5309 		return (error);
   5310 	if (new_wol_enabled == hw->wol_enabled)
   5311 		return (0);
   5312 
   5313 	if (new_wol_enabled && !adapter->wol_support)
   5314 		return (ENODEV);
   5315 	else
   5316 		hw->wol_enabled = new_wol_enabled;
   5317 
   5318 	return (0);
   5319 } /* ixgbe_sysctl_wol_enable */
   5320 
   5321 /************************************************************************
   5322  * ixgbe_sysctl_wufc - Wake Up Filter Control
   5323  *
   5324  *   Sysctl to enable/disable the types of packets that the
   5325  *   adapter will wake up on upon receipt.
   5326  *   Flags:
   5327  *     0x1  - Link Status Change
   5328  *     0x2  - Magic Packet
   5329  *     0x4  - Direct Exact
   5330  *     0x8  - Directed Multicast
   5331  *     0x10 - Broadcast
   5332  *     0x20 - ARP/IPv4 Request Packet
   5333  *     0x40 - Direct IPv4 Packet
   5334  *     0x80 - Direct IPv6 Packet
   5335  *
   5336  *   Settings not listed above will cause the sysctl to return an error.
   5337  ************************************************************************/
   5338 static int
   5339 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
   5340 {
   5341 	struct sysctlnode node = *rnode;
   5342 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5343 	int error = 0;
   5344 	u32 new_wufc;
   5345 
   5346 	new_wufc = adapter->wufc;
   5347 	node.sysctl_data = &new_wufc;
   5348 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5349 	if ((error) || (newp == NULL))
   5350 		return (error);
   5351 	if (new_wufc == adapter->wufc)
   5352 		return (0);
   5353 
   5354 	if (new_wufc & 0xffffff00)
   5355 		return (EINVAL);
   5356 
   5357 	new_wufc &= 0xff;
   5358 	new_wufc |= (0xffffff & adapter->wufc);
   5359 	adapter->wufc = new_wufc;
   5360 
   5361 	return (0);
   5362 } /* ixgbe_sysctl_wufc */
   5363 
   5364 #ifdef IXGBE_DEBUG
   5365 /************************************************************************
   5366  * ixgbe_sysctl_print_rss_config
   5367  ************************************************************************/
   5368 static int
   5369 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
   5370 {
   5371 #ifdef notyet
   5372 	struct sysctlnode node = *rnode;
   5373 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5374 	struct ixgbe_hw *hw = &adapter->hw;
   5375 	device_t        dev = adapter->dev;
   5376 	struct sbuf     *buf;
   5377 	int             error = 0, reta_size;
   5378 	u32             reg;
   5379 
   5380 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
   5381 	if (!buf) {
   5382 		device_printf(dev, "Could not allocate sbuf for output.\n");
   5383 		return (ENOMEM);
   5384 	}
   5385 
   5386 	// TODO: use sbufs to make a string to print out
   5387 	/* Set multiplier for RETA setup and table size based on MAC */
   5388 	switch (adapter->hw.mac.type) {
   5389 	case ixgbe_mac_X550:
   5390 	case ixgbe_mac_X550EM_x:
   5391 	case ixgbe_mac_X550EM_a:
   5392 		reta_size = 128;
   5393 		break;
   5394 	default:
   5395 		reta_size = 32;
   5396 		break;
   5397 	}
   5398 
   5399 	/* Print out the redirection table */
   5400 	sbuf_cat(buf, "\n");
   5401 	for (int i = 0; i < reta_size; i++) {
   5402 		if (i < 32) {
   5403 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
   5404 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
   5405 		} else {
   5406 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
   5407 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
   5408 		}
   5409 	}
   5410 
   5411 	// TODO: print more config
   5412 
   5413 	error = sbuf_finish(buf);
   5414 	if (error)
   5415 		device_printf(dev, "Error finishing sbuf: %d\n", error);
   5416 
   5417 	sbuf_delete(buf);
   5418 #endif
   5419 	return (0);
   5420 } /* ixgbe_sysctl_print_rss_config */
   5421 #endif /* IXGBE_DEBUG */
   5422 
   5423 /************************************************************************
   5424  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
   5425  *
   5426  *   For X552/X557-AT devices using an external PHY
   5427  ************************************************************************/
   5428 static int
   5429 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
   5430 {
   5431 	struct sysctlnode node = *rnode;
   5432 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5433 	struct ixgbe_hw *hw = &adapter->hw;
   5434 	int val;
   5435 	u16 reg;
   5436 	int		error;
   5437 
   5438 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5439 		device_printf(adapter->dev,
   5440 		    "Device has no supported external thermal sensor.\n");
   5441 		return (ENODEV);
   5442 	}
   5443 
   5444 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
   5445 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5446 		device_printf(adapter->dev,
   5447 		    "Error reading from PHY's current temperature register\n");
   5448 		return (EAGAIN);
   5449 	}
   5450 
   5451 	node.sysctl_data = &val;
   5452 
   5453 	/* Shift temp for output */
   5454 	val = reg >> 8;
   5455 
   5456 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5457 	if ((error) || (newp == NULL))
   5458 		return (error);
   5459 
   5460 	return (0);
   5461 } /* ixgbe_sysctl_phy_temp */
   5462 
   5463 /************************************************************************
   5464  * ixgbe_sysctl_phy_overtemp_occurred
   5465  *
   5466  *   Reports (directly from the PHY) whether the current PHY
   5467  *   temperature is over the overtemp threshold.
   5468  ************************************************************************/
   5469 static int
   5470 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
   5471 {
   5472 	struct sysctlnode node = *rnode;
   5473 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5474 	struct ixgbe_hw *hw = &adapter->hw;
   5475 	int val, error;
   5476 	u16 reg;
   5477 
   5478 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5479 		device_printf(adapter->dev,
   5480 		    "Device has no supported external thermal sensor.\n");
   5481 		return (ENODEV);
   5482 	}
   5483 
   5484 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
   5485 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5486 		device_printf(adapter->dev,
   5487 		    "Error reading from PHY's temperature status register\n");
   5488 		return (EAGAIN);
   5489 	}
   5490 
   5491 	node.sysctl_data = &val;
   5492 
   5493 	/* Get occurrence bit */
   5494 	val = !!(reg & 0x4000);
   5495 
   5496 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5497 	if ((error) || (newp == NULL))
   5498 		return (error);
   5499 
   5500 	return (0);
   5501 } /* ixgbe_sysctl_phy_overtemp_occurred */
   5502 
   5503 /************************************************************************
   5504  * ixgbe_sysctl_eee_state
   5505  *
   5506  *   Sysctl to set EEE power saving feature
   5507  *   Values:
   5508  *     0      - disable EEE
   5509  *     1      - enable EEE
   5510  *     (none) - get current device EEE state
   5511  ************************************************************************/
   5512 static int
   5513 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
   5514 {
   5515 	struct sysctlnode node = *rnode;
   5516 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5517 	struct ifnet   *ifp = adapter->ifp;
   5518 	device_t       dev = adapter->dev;
   5519 	int            curr_eee, new_eee, error = 0;
   5520 	s32            retval;
   5521 
   5522 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
   5523 	node.sysctl_data = &new_eee;
   5524 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5525 	if ((error) || (newp == NULL))
   5526 		return (error);
   5527 
   5528 	/* Nothing to do */
   5529 	if (new_eee == curr_eee)
   5530 		return (0);
   5531 
   5532 	/* Not supported */
   5533 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
   5534 		return (EINVAL);
   5535 
   5536 	/* Bounds checking */
   5537 	if ((new_eee < 0) || (new_eee > 1))
   5538 		return (EINVAL);
   5539 
   5540 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
   5541 	if (retval) {
   5542 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
   5543 		return (EINVAL);
   5544 	}
   5545 
   5546 	/* Restart auto-neg */
   5547 	ifp->if_init(ifp);
   5548 
   5549 	device_printf(dev, "New EEE state: %d\n", new_eee);
   5550 
   5551 	/* Cache new value */
   5552 	if (new_eee)
   5553 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5554 	else
   5555 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
   5556 
   5557 	return (error);
   5558 } /* ixgbe_sysctl_eee_state */
   5559 
   5560 /************************************************************************
   5561  * ixgbe_init_device_features
   5562  ************************************************************************/
   5563 static void
   5564 ixgbe_init_device_features(struct adapter *adapter)
   5565 {
   5566 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   5567 	                  | IXGBE_FEATURE_RSS
   5568 	                  | IXGBE_FEATURE_MSI
   5569 	                  | IXGBE_FEATURE_MSIX
   5570 	                  | IXGBE_FEATURE_LEGACY_IRQ
   5571 	                  | IXGBE_FEATURE_LEGACY_TX;
   5572 
   5573 	/* Set capabilities first... */
   5574 	switch (adapter->hw.mac.type) {
   5575 	case ixgbe_mac_82598EB:
   5576 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
   5577 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
   5578 		break;
   5579 	case ixgbe_mac_X540:
   5580 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5581 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5582 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
   5583 		    (adapter->hw.bus.func == 0))
   5584 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5585 		break;
   5586 	case ixgbe_mac_X550:
   5587 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5588 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5589 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5590 		break;
   5591 	case ixgbe_mac_X550EM_x:
   5592 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5593 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5594 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
   5595 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5596 		break;
   5597 	case ixgbe_mac_X550EM_a:
   5598 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5599 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5600 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5601 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
   5602 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
   5603 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5604 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5605 		}
   5606 		break;
   5607 	case ixgbe_mac_82599EB:
   5608 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5609 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5610 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
   5611 		    (adapter->hw.bus.func == 0))
   5612 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5613 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
   5614 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5615 		break;
   5616 	default:
   5617 		break;
   5618 	}
   5619 
   5620 	/* Enabled by default... */
   5621 	/* Fan failure detection */
   5622 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
   5623 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
   5624 	/* Netmap */
   5625 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   5626 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   5627 	/* EEE */
   5628 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   5629 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5630 	/* Thermal Sensor */
   5631 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
   5632 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
   5633 
   5634 	/* Enabled via global sysctl... */
   5635 	/* Flow Director */
   5636 	if (ixgbe_enable_fdir) {
   5637 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
   5638 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
   5639 		else
   5640 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
   5641 	}
   5642 	/* Legacy (single queue) transmit */
   5643 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   5644 	    ixgbe_enable_legacy_tx)
   5645 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   5646 	/*
   5647 	 * Message Signal Interrupts - Extended (MSI-X)
   5648 	 * Normal MSI is only enabled if MSI-X calls fail.
   5649 	 */
   5650 	if (!ixgbe_enable_msix)
   5651 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
   5652 	/* Receive-Side Scaling (RSS) */
   5653 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
   5654 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   5655 
   5656 	/* Disable features with unmet dependencies... */
   5657 	/* No MSI-X */
   5658 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
   5659 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   5660 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   5661 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
   5662 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
   5663 	}
   5664 } /* ixgbe_init_device_features */
   5665 
   5666 /************************************************************************
   5667  * ixgbe_probe - Device identification routine
   5668  *
   5669  *   Determines if the driver should be loaded on
   5670  *   adapter based on its PCI vendor/device ID.
   5671  *
   5672  *   return BUS_PROBE_DEFAULT on success, positive on failure
   5673  ************************************************************************/
   5674 static int
   5675 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
   5676 {
   5677 	const struct pci_attach_args *pa = aux;
   5678 
   5679 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
   5680 }
   5681 
   5682 static ixgbe_vendor_info_t *
   5683 ixgbe_lookup(const struct pci_attach_args *pa)
   5684 {
   5685 	ixgbe_vendor_info_t *ent;
   5686 	pcireg_t subid;
   5687 
   5688 	INIT_DEBUGOUT("ixgbe_lookup: begin");
   5689 
   5690 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
   5691 		return NULL;
   5692 
   5693 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
   5694 
   5695 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
   5696 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
   5697 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
   5698 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
   5699 			(ent->subvendor_id == 0)) &&
   5700 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
   5701 			(ent->subdevice_id == 0))) {
   5702 			++ixgbe_total_ports;
   5703 			return ent;
   5704 		}
   5705 	}
   5706 	return NULL;
   5707 }
   5708 
   5709 static int
   5710 ixgbe_ifflags_cb(struct ethercom *ec)
   5711 {
   5712 	struct ifnet *ifp = &ec->ec_if;
   5713 	struct adapter *adapter = ifp->if_softc;
   5714 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   5715 
   5716 	IXGBE_CORE_LOCK(adapter);
   5717 
   5718 	if (change != 0)
   5719 		adapter->if_flags = ifp->if_flags;
   5720 
   5721 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   5722 		rc = ENETRESET;
   5723 	else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   5724 		ixgbe_set_promisc(adapter);
   5725 
   5726 	/* Set up VLAN support and filter */
   5727 	ixgbe_setup_vlan_hw_support(adapter);
   5728 
   5729 	IXGBE_CORE_UNLOCK(adapter);
   5730 
   5731 	return rc;
   5732 }
   5733 
   5734 /************************************************************************
   5735  * ixgbe_ioctl - Ioctl entry point
   5736  *
   5737  *   Called when the user wants to configure the interface.
   5738  *
   5739  *   return 0 on success, positive on failure
   5740  ************************************************************************/
   5741 static int
   5742 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   5743 {
   5744 	struct adapter	*adapter = ifp->if_softc;
   5745 	struct ixgbe_hw *hw = &adapter->hw;
   5746 	struct ifcapreq *ifcr = data;
   5747 	struct ifreq	*ifr = data;
   5748 	int             error = 0;
   5749 	int l4csum_en;
   5750 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   5751 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   5752 
   5753 	switch (command) {
   5754 	case SIOCSIFFLAGS:
   5755 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   5756 		break;
   5757 	case SIOCADDMULTI:
   5758 	case SIOCDELMULTI:
   5759 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   5760 		break;
   5761 	case SIOCSIFMEDIA:
   5762 	case SIOCGIFMEDIA:
   5763 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   5764 		break;
   5765 	case SIOCSIFCAP:
   5766 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   5767 		break;
   5768 	case SIOCSIFMTU:
   5769 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   5770 		break;
   5771 #ifdef __NetBSD__
   5772 	case SIOCINITIFADDR:
   5773 		IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
   5774 		break;
   5775 	case SIOCGIFFLAGS:
   5776 		IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
   5777 		break;
   5778 	case SIOCGIFAFLAG_IN:
   5779 		IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
   5780 		break;
   5781 	case SIOCGIFADDR:
   5782 		IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
   5783 		break;
   5784 	case SIOCGIFMTU:
   5785 		IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
   5786 		break;
   5787 	case SIOCGIFCAP:
   5788 		IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
   5789 		break;
   5790 	case SIOCGETHERCAP:
   5791 		IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
   5792 		break;
   5793 	case SIOCGLIFADDR:
   5794 		IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
   5795 		break;
   5796 	case SIOCZIFDATA:
   5797 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   5798 		hw->mac.ops.clear_hw_cntrs(hw);
   5799 		ixgbe_clear_evcnt(adapter);
   5800 		break;
   5801 	case SIOCAIFADDR:
   5802 		IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
   5803 		break;
   5804 #endif
   5805 	default:
   5806 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   5807 		break;
   5808 	}
   5809 
   5810 	switch (command) {
   5811 	case SIOCSIFMEDIA:
   5812 	case SIOCGIFMEDIA:
   5813 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   5814 	case SIOCGI2C:
   5815 	{
   5816 		struct ixgbe_i2c_req	i2c;
   5817 
   5818 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
   5819 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
   5820 		if (error != 0)
   5821 			break;
   5822 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
   5823 			error = EINVAL;
   5824 			break;
   5825 		}
   5826 		if (i2c.len > sizeof(i2c.data)) {
   5827 			error = EINVAL;
   5828 			break;
   5829 		}
   5830 
   5831 		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
   5832 		    i2c.dev_addr, i2c.data);
   5833 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
   5834 		break;
   5835 	}
   5836 	case SIOCSIFCAP:
   5837 		/* Layer-4 Rx checksum offload has to be turned on and
   5838 		 * off as a unit.
   5839 		 */
   5840 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   5841 		if (l4csum_en != l4csum && l4csum_en != 0)
   5842 			return EINVAL;
   5843 		/*FALLTHROUGH*/
   5844 	case SIOCADDMULTI:
   5845 	case SIOCDELMULTI:
   5846 	case SIOCSIFFLAGS:
   5847 	case SIOCSIFMTU:
   5848 	default:
   5849 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   5850 			return error;
   5851 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   5852 			;
   5853 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   5854 			IXGBE_CORE_LOCK(adapter);
   5855 			if ((ifp->if_flags & IFF_RUNNING) != 0)
   5856 				ixgbe_init_locked(adapter);
   5857 			ixgbe_recalculate_max_frame(adapter);
   5858 			IXGBE_CORE_UNLOCK(adapter);
   5859 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   5860 			/*
   5861 			 * Multicast list has changed; set the hardware filter
   5862 			 * accordingly.
   5863 			 */
   5864 			IXGBE_CORE_LOCK(adapter);
   5865 			ixgbe_disable_intr(adapter);
   5866 			ixgbe_set_multi(adapter);
   5867 			ixgbe_enable_intr(adapter);
   5868 			IXGBE_CORE_UNLOCK(adapter);
   5869 		}
   5870 		return 0;
   5871 	}
   5872 
   5873 	return error;
   5874 } /* ixgbe_ioctl */
   5875 
   5876 /************************************************************************
   5877  * ixgbe_check_fan_failure
   5878  ************************************************************************/
   5879 static void
   5880 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
   5881 {
   5882 	u32 mask;
   5883 
   5884 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
   5885 	    IXGBE_ESDP_SDP1;
   5886 
   5887 	if (reg & mask)
   5888 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
   5889 } /* ixgbe_check_fan_failure */
   5890 
   5891 /************************************************************************
   5892  * ixgbe_handle_que
   5893  ************************************************************************/
   5894 static void
   5895 ixgbe_handle_que(void *context)
   5896 {
   5897 	struct ix_queue *que = context;
   5898 	struct adapter  *adapter = que->adapter;
   5899 	struct tx_ring  *txr = que->txr;
   5900 	struct ifnet    *ifp = adapter->ifp;
   5901 	bool		more = false;
   5902 
   5903 	que->handleq.ev_count++;
   5904 
   5905 	if (ifp->if_flags & IFF_RUNNING) {
   5906 		more = ixgbe_rxeof(que);
   5907 		IXGBE_TX_LOCK(txr);
   5908 		more |= ixgbe_txeof(txr);
   5909 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5910 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   5911 				ixgbe_mq_start_locked(ifp, txr);
   5912 		/* Only for queue 0 */
   5913 		/* NetBSD still needs this for CBQ */
   5914 		if ((&adapter->queues[0] == que)
   5915 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   5916 			ixgbe_legacy_start_locked(ifp, txr);
   5917 		IXGBE_TX_UNLOCK(txr);
   5918 	}
   5919 
   5920 	if (more) {
   5921 		que->req.ev_count++;
   5922 		ixgbe_sched_handle_que(adapter, que);
   5923 	} else if (que->res != NULL) {
   5924 		/* Re-enable this interrupt */
   5925 		ixgbe_enable_queue(adapter, que->msix);
   5926 	} else
   5927 		ixgbe_enable_intr(adapter);
   5928 
   5929 	return;
   5930 } /* ixgbe_handle_que */
   5931 
   5932 /************************************************************************
   5933  * ixgbe_handle_que_work
   5934  ************************************************************************/
   5935 static void
   5936 ixgbe_handle_que_work(struct work *wk, void *context)
   5937 {
   5938 	struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
   5939 
   5940 	/*
   5941 	 * "enqueued flag" is not required here.
   5942 	 * See ixgbe_msix_que().
   5943 	 */
   5944 	ixgbe_handle_que(que);
   5945 }
   5946 
   5947 /************************************************************************
   5948  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
   5949  ************************************************************************/
   5950 static int
   5951 ixgbe_allocate_legacy(struct adapter *adapter,
   5952     const struct pci_attach_args *pa)
   5953 {
   5954 	device_t	dev = adapter->dev;
   5955 	struct ix_queue *que = adapter->queues;
   5956 	struct tx_ring  *txr = adapter->tx_rings;
   5957 	int		counts[PCI_INTR_TYPE_SIZE];
   5958 	pci_intr_type_t intr_type, max_type;
   5959 	char            intrbuf[PCI_INTRSTR_LEN];
   5960 	const char	*intrstr = NULL;
   5961 
   5962 	/* We allocate a single interrupt resource */
   5963 	max_type = PCI_INTR_TYPE_MSI;
   5964 	counts[PCI_INTR_TYPE_MSIX] = 0;
   5965 	counts[PCI_INTR_TYPE_MSI] =
   5966 	    (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
   5967 	/* Check not feat_en but feat_cap to fallback to INTx */
   5968 	counts[PCI_INTR_TYPE_INTX] =
   5969 	    (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
   5970 
   5971 alloc_retry:
   5972 	if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
   5973 		aprint_error_dev(dev, "couldn't alloc interrupt\n");
   5974 		return ENXIO;
   5975 	}
   5976 	adapter->osdep.nintrs = 1;
   5977 	intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
   5978 	    intrbuf, sizeof(intrbuf));
   5979 	adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
   5980 	    adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
   5981 	    device_xname(dev));
   5982 	intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
   5983 	if (adapter->osdep.ihs[0] == NULL) {
   5984 		aprint_error_dev(dev,"unable to establish %s\n",
   5985 		    (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5986 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5987 		adapter->osdep.intrs = NULL;
   5988 		switch (intr_type) {
   5989 		case PCI_INTR_TYPE_MSI:
   5990 			/* The next try is for INTx: Disable MSI */
   5991 			max_type = PCI_INTR_TYPE_INTX;
   5992 			counts[PCI_INTR_TYPE_INTX] = 1;
   5993 			adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   5994 			if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
   5995 				adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   5996 				goto alloc_retry;
   5997 			} else
   5998 				break;
   5999 		case PCI_INTR_TYPE_INTX:
   6000 		default:
   6001 			/* See below */
   6002 			break;
   6003 		}
   6004 	}
   6005 	if (intr_type == PCI_INTR_TYPE_INTX) {
   6006 		adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   6007 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6008 	}
   6009 	if (adapter->osdep.ihs[0] == NULL) {
   6010 		aprint_error_dev(dev,
   6011 		    "couldn't establish interrupt%s%s\n",
   6012 		    intrstr ? " at " : "", intrstr ? intrstr : "");
   6013 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   6014 		adapter->osdep.intrs = NULL;
   6015 		return ENXIO;
   6016 	}
   6017 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   6018 	/*
   6019 	 * Try allocating a fast interrupt and the associated deferred
   6020 	 * processing contexts.
   6021 	 */
   6022 	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   6023 		txr->txr_si =
   6024 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6025 			ixgbe_deferred_mq_start, txr);
   6026 	que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6027 	    ixgbe_handle_que, que);
   6028 
   6029 	if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
   6030 		& (txr->txr_si == NULL)) || (que->que_si == NULL)) {
   6031 		aprint_error_dev(dev,
   6032 		    "could not establish software interrupts\n");
   6033 
   6034 		return ENXIO;
   6035 	}
   6036 	/* For simplicity in the handlers */
   6037 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
   6038 
   6039 	return (0);
   6040 } /* ixgbe_allocate_legacy */
   6041 
   6042 /************************************************************************
   6043  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
   6044  ************************************************************************/
   6045 static int
   6046 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   6047 {
   6048 	device_t        dev = adapter->dev;
   6049 	struct 		ix_queue *que = adapter->queues;
   6050 	struct  	tx_ring *txr = adapter->tx_rings;
   6051 	pci_chipset_tag_t pc;
   6052 	char		intrbuf[PCI_INTRSTR_LEN];
   6053 	char		intr_xname[32];
   6054 	char		wqname[MAXCOMLEN];
   6055 	const char	*intrstr = NULL;
   6056 	int 		error, vector = 0;
   6057 	int		cpu_id = 0;
   6058 	kcpuset_t	*affinity;
   6059 #ifdef RSS
   6060 	unsigned int    rss_buckets = 0;
   6061 	kcpuset_t	cpu_mask;
   6062 #endif
   6063 
   6064 	pc = adapter->osdep.pc;
   6065 #ifdef	RSS
   6066 	/*
   6067 	 * If we're doing RSS, the number of queues needs to
   6068 	 * match the number of RSS buckets that are configured.
   6069 	 *
   6070 	 * + If there's more queues than RSS buckets, we'll end
   6071 	 *   up with queues that get no traffic.
   6072 	 *
   6073 	 * + If there's more RSS buckets than queues, we'll end
   6074 	 *   up having multiple RSS buckets map to the same queue,
   6075 	 *   so there'll be some contention.
   6076 	 */
   6077 	rss_buckets = rss_getnumbuckets();
   6078 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
   6079 	    (adapter->num_queues != rss_buckets)) {
   6080 		device_printf(dev,
   6081 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
   6082 		    "; performance will be impacted.\n",
   6083 		    __func__, adapter->num_queues, rss_buckets);
   6084 	}
   6085 #endif
   6086 
   6087 	adapter->osdep.nintrs = adapter->num_queues + 1;
   6088 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   6089 	    adapter->osdep.nintrs) != 0) {
   6090 		aprint_error_dev(dev,
   6091 		    "failed to allocate MSI-X interrupt\n");
   6092 		return (ENXIO);
   6093 	}
   6094 
   6095 	kcpuset_create(&affinity, false);
   6096 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   6097 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   6098 		    device_xname(dev), i);
   6099 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   6100 		    sizeof(intrbuf));
   6101 #ifdef IXGBE_MPSAFE
   6102 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   6103 		    true);
   6104 #endif
   6105 		/* Set the handler function */
   6106 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   6107 		    adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
   6108 		    intr_xname);
   6109 		if (que->res == NULL) {
   6110 			aprint_error_dev(dev,
   6111 			    "Failed to register QUE handler\n");
   6112 			error = ENXIO;
   6113 			goto err_out;
   6114 		}
   6115 		que->msix = vector;
   6116 		adapter->active_queues |= (u64)(1 << que->msix);
   6117 
   6118 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   6119 #ifdef	RSS
   6120 			/*
   6121 			 * The queue ID is used as the RSS layer bucket ID.
   6122 			 * We look up the queue ID -> RSS CPU ID and select
   6123 			 * that.
   6124 			 */
   6125 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
   6126 			CPU_SETOF(cpu_id, &cpu_mask);
   6127 #endif
   6128 		} else {
   6129 			/*
   6130 			 * Bind the MSI-X vector, and thus the
   6131 			 * rings to the corresponding CPU.
   6132 			 *
   6133 			 * This just happens to match the default RSS
   6134 			 * round-robin bucket -> queue -> CPU allocation.
   6135 			 */
   6136 			if (adapter->num_queues > 1)
   6137 				cpu_id = i;
   6138 		}
   6139 		/* Round-robin affinity */
   6140 		kcpuset_zero(affinity);
   6141 		kcpuset_set(affinity, cpu_id % ncpu);
   6142 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   6143 		    NULL);
   6144 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   6145 		    intrstr);
   6146 		if (error == 0) {
   6147 #if 1 /* def IXGBE_DEBUG */
   6148 #ifdef	RSS
   6149 			aprintf_normal(", bound RSS bucket %d to CPU %d", i,
   6150 			    cpu_id % ncpu);
   6151 #else
   6152 			aprint_normal(", bound queue %d to cpu %d", i,
   6153 			    cpu_id % ncpu);
   6154 #endif
   6155 #endif /* IXGBE_DEBUG */
   6156 		}
   6157 		aprint_normal("\n");
   6158 
   6159 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   6160 			txr->txr_si = softint_establish(
   6161 				SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6162 				ixgbe_deferred_mq_start, txr);
   6163 			if (txr->txr_si == NULL) {
   6164 				aprint_error_dev(dev,
   6165 				    "couldn't establish software interrupt\n");
   6166 				error = ENXIO;
   6167 				goto err_out;
   6168 			}
   6169 		}
   6170 		que->que_si
   6171 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6172 			ixgbe_handle_que, que);
   6173 		if (que->que_si == NULL) {
   6174 			aprint_error_dev(dev,
   6175 			    "couldn't establish software interrupt\n");
   6176 			error = ENXIO;
   6177 			goto err_out;
   6178 		}
   6179 	}
   6180 	snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
   6181 	error = workqueue_create(&adapter->txr_wq, wqname,
   6182 	    ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   6183 	    IXGBE_WORKQUEUE_FLAGS);
   6184 	if (error) {
   6185 		aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
   6186 		goto err_out;
   6187 	}
   6188 	adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
   6189 
   6190 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
   6191 	error = workqueue_create(&adapter->que_wq, wqname,
   6192 	    ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   6193 	    IXGBE_WORKQUEUE_FLAGS);
   6194 	if (error) {
   6195 		aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
   6196 		goto err_out;
   6197 	}
   6198 
   6199 	/* and Link */
   6200 	cpu_id++;
   6201 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   6202 	adapter->vector = vector;
   6203 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   6204 	    sizeof(intrbuf));
   6205 #ifdef IXGBE_MPSAFE
   6206 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   6207 	    true);
   6208 #endif
   6209 	/* Set the link handler function */
   6210 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   6211 	    adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
   6212 	    intr_xname);
   6213 	if (adapter->osdep.ihs[vector] == NULL) {
   6214 		adapter->res = NULL;
   6215 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   6216 		error = ENXIO;
   6217 		goto err_out;
   6218 	}
   6219 	/* Round-robin affinity */
   6220 	kcpuset_zero(affinity);
   6221 	kcpuset_set(affinity, cpu_id % ncpu);
   6222 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
   6223 	    NULL);
   6224 
   6225 	aprint_normal_dev(dev,
   6226 	    "for link, interrupting at %s", intrstr);
   6227 	if (error == 0)
   6228 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   6229 	else
   6230 		aprint_normal("\n");
   6231 
   6232 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   6233 		adapter->mbx_si =
   6234 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6235 			ixgbe_handle_mbx, adapter);
   6236 		if (adapter->mbx_si == NULL) {
   6237 			aprint_error_dev(dev,
   6238 			    "could not establish software interrupts\n");
   6239 
   6240 			error = ENXIO;
   6241 			goto err_out;
   6242 		}
   6243 	}
   6244 
   6245 	kcpuset_destroy(affinity);
   6246 	aprint_normal_dev(dev,
   6247 	    "Using MSI-X interrupts with %d vectors\n", vector + 1);
   6248 
   6249 	return (0);
   6250 
   6251 err_out:
   6252 	kcpuset_destroy(affinity);
   6253 	ixgbe_free_softint(adapter);
   6254 	ixgbe_free_pciintr_resources(adapter);
   6255 	return (error);
   6256 } /* ixgbe_allocate_msix */
   6257 
   6258 /************************************************************************
   6259  * ixgbe_configure_interrupts
   6260  *
   6261  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
   6262  *   This will also depend on user settings.
   6263  ************************************************************************/
   6264 static int
   6265 ixgbe_configure_interrupts(struct adapter *adapter)
   6266 {
   6267 	device_t dev = adapter->dev;
   6268 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
   6269 	int want, queues, msgs;
   6270 
   6271 	/* Default to 1 queue if MSI-X setup fails */
   6272 	adapter->num_queues = 1;
   6273 
   6274 	/* Override by tuneable */
   6275 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
   6276 		goto msi;
   6277 
   6278 	/*
   6279 	 *  NetBSD only: Use single vector MSI when number of CPU is 1 to save
   6280 	 * interrupt slot.
   6281 	 */
   6282 	if (ncpu == 1)
   6283 		goto msi;
   6284 
   6285 	/* First try MSI-X */
   6286 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   6287 	msgs = MIN(msgs, IXG_MAX_NINTR);
   6288 	if (msgs < 2)
   6289 		goto msi;
   6290 
   6291 	adapter->msix_mem = (void *)1; /* XXX */
   6292 
   6293 	/* Figure out a reasonable auto config value */
   6294 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   6295 
   6296 #ifdef	RSS
   6297 	/* If we're doing RSS, clamp at the number of RSS buckets */
   6298 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   6299 		queues = min(queues, rss_getnumbuckets());
   6300 #endif
   6301 	if (ixgbe_num_queues > queues) {
   6302 		aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
   6303 		ixgbe_num_queues = queues;
   6304 	}
   6305 
   6306 	if (ixgbe_num_queues != 0)
   6307 		queues = ixgbe_num_queues;
   6308 	else
   6309 		queues = min(queues,
   6310 		    min(mac->max_tx_queues, mac->max_rx_queues));
   6311 
   6312 	/* reflect correct sysctl value */
   6313 	ixgbe_num_queues = queues;
   6314 
   6315 	/*
   6316 	 * Want one vector (RX/TX pair) per queue
   6317 	 * plus an additional for Link.
   6318 	 */
   6319 	want = queues + 1;
   6320 	if (msgs >= want)
   6321 		msgs = want;
   6322 	else {
   6323                	aprint_error_dev(dev, "MSI-X Configuration Problem, "
   6324 		    "%d vectors but %d queues wanted!\n",
   6325 		    msgs, want);
   6326 		goto msi;
   6327 	}
   6328 	adapter->num_queues = queues;
   6329 	adapter->feat_en |= IXGBE_FEATURE_MSIX;
   6330 	return (0);
   6331 
   6332 	/*
   6333 	 * MSI-X allocation failed or provided us with
   6334 	 * less vectors than needed. Free MSI-X resources
   6335 	 * and we'll try enabling MSI.
   6336 	 */
   6337 msi:
   6338 	/* Without MSI-X, some features are no longer supported */
   6339 	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6340 	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
   6341 	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6342 	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
   6343 
   6344        	msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
   6345 	adapter->msix_mem = NULL; /* XXX */
   6346 	if (msgs > 1)
   6347 		msgs = 1;
   6348 	if (msgs != 0) {
   6349 		msgs = 1;
   6350 		adapter->feat_en |= IXGBE_FEATURE_MSI;
   6351 		return (0);
   6352 	}
   6353 
   6354 	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
   6355 		aprint_error_dev(dev,
   6356 		    "Device does not support legacy interrupts.\n");
   6357 		return 1;
   6358 	}
   6359 
   6360 	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6361 
   6362 	return (0);
   6363 } /* ixgbe_configure_interrupts */
   6364 
   6365 
   6366 /************************************************************************
   6367  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
   6368  *
   6369  *   Done outside of interrupt context since the driver might sleep
   6370  ************************************************************************/
   6371 static void
   6372 ixgbe_handle_link(void *context)
   6373 {
   6374 	struct adapter  *adapter = context;
   6375 	struct ixgbe_hw *hw = &adapter->hw;
   6376 
   6377 	IXGBE_CORE_LOCK(adapter);
   6378 	++adapter->link_sicount.ev_count;
   6379 	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
   6380 	ixgbe_update_link_status(adapter);
   6381 
   6382 	/* Re-enable link interrupts */
   6383 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
   6384 
   6385 	IXGBE_CORE_UNLOCK(adapter);
   6386 } /* ixgbe_handle_link */
   6387 
   6388 /************************************************************************
   6389  * ixgbe_rearm_queues
   6390  ************************************************************************/
   6391 static void
   6392 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   6393 {
   6394 	u32 mask;
   6395 
   6396 	switch (adapter->hw.mac.type) {
   6397 	case ixgbe_mac_82598EB:
   6398 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   6399 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   6400 		break;
   6401 	case ixgbe_mac_82599EB:
   6402 	case ixgbe_mac_X540:
   6403 	case ixgbe_mac_X550:
   6404 	case ixgbe_mac_X550EM_x:
   6405 	case ixgbe_mac_X550EM_a:
   6406 		mask = (queues & 0xFFFFFFFF);
   6407 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   6408 		mask = (queues >> 32);
   6409 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   6410 		break;
   6411 	default:
   6412 		break;
   6413 	}
   6414 } /* ixgbe_rearm_queues */
   6415