Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.140
      1 /* $NetBSD: ixgbe.c,v 1.140 2018/03/30 06:44:30 msaitoh Exp $ */
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 320916 2017-07-12 17:35:32Z sbruno $*/
     36 
     37 /*
     38  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39  * All rights reserved.
     40  *
     41  * This code is derived from software contributed to The NetBSD Foundation
     42  * by Coyote Point Systems, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63  * POSSIBILITY OF SUCH DAMAGE.
     64  */
     65 
     66 #ifdef _KERNEL_OPT
     67 #include "opt_inet.h"
     68 #include "opt_inet6.h"
     69 #include "opt_net_mpsafe.h"
     70 #endif
     71 
     72 #include "ixgbe.h"
     73 #include "ixgbe_sriov.h"
     74 #include "vlan.h"
     75 
     76 #include <sys/cprng.h>
     77 #include <dev/mii/mii.h>
     78 #include <dev/mii/miivar.h>
     79 
     80 /************************************************************************
     81  * Driver version
     82  ************************************************************************/
     83 char ixgbe_driver_version[] = "3.2.12-k";
     84 
     85 
     86 /************************************************************************
     87  * PCI Device ID Table
     88  *
     89  *   Used by probe to select devices to load on
     90  *   Last field stores an index into ixgbe_strings
     91  *   Last entry must be all 0s
     92  *
     93  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     94  ************************************************************************/
     95 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     96 {
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    111 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    112 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    113 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    114 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    115 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    116 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
    117 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    118 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
    119 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
    120 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
    121 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
    122 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
    123 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
    124 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
    125 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
    126 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
    127 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
    128 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
    129 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
    130 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
    131 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
    132 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
    133 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
    134 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
    135 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
    136 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
    137 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
    138 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
    139 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
    140 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
    141 	/* required last entry */
    142 	{0, 0, 0, 0, 0}
    143 };
    144 
    145 /************************************************************************
    146  * Table of branding strings
    147  ************************************************************************/
    148 static const char    *ixgbe_strings[] = {
    149 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    150 };
    151 
    152 /************************************************************************
    153  * Function prototypes
    154  ************************************************************************/
    155 static int      ixgbe_probe(device_t, cfdata_t, void *);
    156 static void     ixgbe_attach(device_t, device_t, void *);
    157 static int      ixgbe_detach(device_t, int);
    158 #if 0
    159 static int      ixgbe_shutdown(device_t);
    160 #endif
    161 static bool	ixgbe_suspend(device_t, const pmf_qual_t *);
    162 static bool	ixgbe_resume(device_t, const pmf_qual_t *);
    163 static int	ixgbe_ifflags_cb(struct ethercom *);
    164 static int      ixgbe_ioctl(struct ifnet *, u_long, void *);
    165 static void	ixgbe_ifstop(struct ifnet *, int);
    166 static int	ixgbe_init(struct ifnet *);
    167 static void	ixgbe_init_locked(struct adapter *);
    168 static void     ixgbe_stop(void *);
    169 static void     ixgbe_init_device_features(struct adapter *);
    170 static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
    171 static void	ixgbe_add_media_types(struct adapter *);
    172 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    173 static int      ixgbe_media_change(struct ifnet *);
    174 static int      ixgbe_allocate_pci_resources(struct adapter *,
    175 		    const struct pci_attach_args *);
    176 static void     ixgbe_free_softint(struct adapter *);
    177 static void	ixgbe_get_slot_info(struct adapter *);
    178 static int      ixgbe_allocate_msix(struct adapter *,
    179 		    const struct pci_attach_args *);
    180 static int      ixgbe_allocate_legacy(struct adapter *,
    181 		    const struct pci_attach_args *);
    182 static int      ixgbe_configure_interrupts(struct adapter *);
    183 static void	ixgbe_free_pciintr_resources(struct adapter *);
    184 static void	ixgbe_free_pci_resources(struct adapter *);
    185 static void	ixgbe_local_timer(void *);
    186 static void	ixgbe_local_timer1(void *);
    187 static int	ixgbe_setup_interface(device_t, struct adapter *);
    188 static void	ixgbe_config_gpie(struct adapter *);
    189 static void	ixgbe_config_dmac(struct adapter *);
    190 static void	ixgbe_config_delay_values(struct adapter *);
    191 static void	ixgbe_config_link(struct adapter *);
    192 static void	ixgbe_check_wol_support(struct adapter *);
    193 static int	ixgbe_setup_low_power_mode(struct adapter *);
    194 static void	ixgbe_rearm_queues(struct adapter *, u64);
    195 
    196 static void     ixgbe_initialize_transmit_units(struct adapter *);
    197 static void     ixgbe_initialize_receive_units(struct adapter *);
    198 static void	ixgbe_enable_rx_drop(struct adapter *);
    199 static void	ixgbe_disable_rx_drop(struct adapter *);
    200 static void	ixgbe_initialize_rss_mapping(struct adapter *);
    201 
    202 static void     ixgbe_enable_intr(struct adapter *);
    203 static void     ixgbe_disable_intr(struct adapter *);
    204 static void     ixgbe_update_stats_counters(struct adapter *);
    205 static void     ixgbe_set_promisc(struct adapter *);
    206 static void     ixgbe_set_multi(struct adapter *);
    207 static void     ixgbe_update_link_status(struct adapter *);
    208 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    209 static void	ixgbe_configure_ivars(struct adapter *);
    210 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    211 static void	ixgbe_eitr_write(struct ix_queue *, uint32_t);
    212 
    213 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    214 #if 0
    215 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
    216 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
    217 #endif
    218 
    219 static void	ixgbe_add_device_sysctls(struct adapter *);
    220 static void     ixgbe_add_hw_stats(struct adapter *);
    221 static void	ixgbe_clear_evcnt(struct adapter *);
    222 static int	ixgbe_set_flowcntl(struct adapter *, int);
    223 static int	ixgbe_set_advertise(struct adapter *, int);
    224 static int      ixgbe_get_advertise(struct adapter *);
    225 
    226 /* Sysctl handlers */
    227 static void	ixgbe_set_sysctl_value(struct adapter *, const char *,
    228 		     const char *, int *, int);
    229 static int	ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
    230 static int	ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
    231 static int      ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    232 static int	ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
    233 static int	ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
    234 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
    235 #ifdef IXGBE_DEBUG
    236 static int	ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
    237 static int	ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
    238 #endif
    239 static int      ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
    240 static int      ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
    241 static int      ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
    242 static int      ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
    243 static int      ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
    244 static int	ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
    245 static int	ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
    246 
    247 /* Support for pluggable optic modules */
    248 static bool	ixgbe_sfp_probe(struct adapter *);
    249 
    250 /* Legacy (single vector) interrupt handler */
    251 static int	ixgbe_legacy_irq(void *);
    252 
    253 /* The MSI/MSI-X Interrupt handlers */
    254 static int	ixgbe_msix_que(void *);
    255 static int	ixgbe_msix_link(void *);
    256 
    257 /* Software interrupts for deferred work */
    258 static void	ixgbe_handle_que(void *);
    259 static void	ixgbe_handle_link(void *);
    260 static void	ixgbe_handle_msf(void *);
    261 static void	ixgbe_handle_mod(void *);
    262 static void	ixgbe_handle_phy(void *);
    263 
    264 /* Workqueue handler for deferred work */
    265 static void	ixgbe_handle_que_work(struct work *, void *);
    266 
    267 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    268 
    269 /************************************************************************
    270  *  NetBSD Device Interface Entry Points
    271  ************************************************************************/
    272 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    273     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    274     DVF_DETACH_SHUTDOWN);
    275 
    276 #if 0
    277 devclass_t ix_devclass;
    278 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
    279 
    280 MODULE_DEPEND(ix, pci, 1, 1, 1);
    281 MODULE_DEPEND(ix, ether, 1, 1, 1);
    282 #ifdef DEV_NETMAP
    283 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    284 #endif
    285 #endif
    286 
    287 /*
    288  * TUNEABLE PARAMETERS:
    289  */
    290 
    291 /*
    292  * AIM: Adaptive Interrupt Moderation
    293  * which means that the interrupt rate
    294  * is varied over time based on the
    295  * traffic for that interrupt vector
    296  */
    297 static bool ixgbe_enable_aim = true;
    298 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
    299 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
    300     "Enable adaptive interrupt moderation");
    301 
    302 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    303 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
    304     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
    305 
    306 /* How many packets rxeof tries to clean at a time */
    307 static int ixgbe_rx_process_limit = 256;
    308 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
    309     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
    310 
    311 /* How many packets txeof tries to clean at a time */
    312 static int ixgbe_tx_process_limit = 256;
    313 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
    314     &ixgbe_tx_process_limit, 0,
    315     "Maximum number of sent packets to process at a time, -1 means unlimited");
    316 
    317 /* Flow control setting, default to full */
    318 static int ixgbe_flow_control = ixgbe_fc_full;
    319 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
    320     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
    321 
    322 /* Which pakcet processing uses workqueue or softint */
    323 static bool ixgbe_txrx_workqueue = false;
    324 
    325 /*
    326  * Smart speed setting, default to on
    327  * this only works as a compile option
    328  * right now as its during attach, set
    329  * this to 'ixgbe_smart_speed_off' to
    330  * disable.
    331  */
    332 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    333 
    334 /*
    335  * MSI-X should be the default for best performance,
    336  * but this allows it to be forced off for testing.
    337  */
    338 static int ixgbe_enable_msix = 1;
    339 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
    340     "Enable MSI-X interrupts");
    341 
    342 /*
    343  * Number of Queues, can be set to 0,
    344  * it then autoconfigures based on the
    345  * number of cpus with a max of 8. This
    346  * can be overriden manually here.
    347  */
    348 static int ixgbe_num_queues = 0;
    349 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
    350     "Number of queues to configure, 0 indicates autoconfigure");
    351 
    352 /*
    353  * Number of TX descriptors per ring,
    354  * setting higher than RX as this seems
    355  * the better performing choice.
    356  */
    357 static int ixgbe_txd = PERFORM_TXD;
    358 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
    359     "Number of transmit descriptors per queue");
    360 
    361 /* Number of RX descriptors per ring */
    362 static int ixgbe_rxd = PERFORM_RXD;
    363 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
    364     "Number of receive descriptors per queue");
    365 
    366 /*
    367  * Defining this on will allow the use
    368  * of unsupported SFP+ modules, note that
    369  * doing so you are on your own :)
    370  */
    371 static int allow_unsupported_sfp = false;
    372 #define TUNABLE_INT(__x, __y)
    373 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
    374 
    375 /*
    376  * Not sure if Flow Director is fully baked,
    377  * so we'll default to turning it off.
    378  */
    379 static int ixgbe_enable_fdir = 0;
    380 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
    381     "Enable Flow Director");
    382 
    383 /* Legacy Transmit (single queue) */
    384 static int ixgbe_enable_legacy_tx = 0;
    385 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
    386     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
    387 
    388 /* Receive-Side Scaling */
    389 static int ixgbe_enable_rss = 1;
    390 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
    391     "Enable Receive-Side Scaling (RSS)");
    392 
    393 /* Keep running tab on them for sanity check */
    394 static int ixgbe_total_ports;
    395 
    396 #if 0
    397 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
    398 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
    399 #endif
    400 
    401 #ifdef NET_MPSAFE
    402 #define IXGBE_MPSAFE		1
    403 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    404 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    405 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    406 #else
    407 #define IXGBE_CALLOUT_FLAGS	0
    408 #define IXGBE_SOFTINFT_FLAGS	0
    409 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU
    410 #endif
    411 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
    412 
    413 /************************************************************************
    414  * ixgbe_initialize_rss_mapping
    415  ************************************************************************/
    416 static void
    417 ixgbe_initialize_rss_mapping(struct adapter *adapter)
    418 {
    419 	struct ixgbe_hw	*hw = &adapter->hw;
    420 	u32             reta = 0, mrqc, rss_key[10];
    421 	int             queue_id, table_size, index_mult;
    422 	int             i, j;
    423 	u32             rss_hash_config;
    424 
    425 	/* force use default RSS key. */
    426 #ifdef __NetBSD__
    427 	rss_getkey((uint8_t *) &rss_key);
    428 #else
    429 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    430 		/* Fetch the configured RSS key */
    431 		rss_getkey((uint8_t *) &rss_key);
    432 	} else {
    433 		/* set up random bits */
    434 		cprng_fast(&rss_key, sizeof(rss_key));
    435 	}
    436 #endif
    437 
    438 	/* Set multiplier for RETA setup and table size based on MAC */
    439 	index_mult = 0x1;
    440 	table_size = 128;
    441 	switch (adapter->hw.mac.type) {
    442 	case ixgbe_mac_82598EB:
    443 		index_mult = 0x11;
    444 		break;
    445 	case ixgbe_mac_X550:
    446 	case ixgbe_mac_X550EM_x:
    447 	case ixgbe_mac_X550EM_a:
    448 		table_size = 512;
    449 		break;
    450 	default:
    451 		break;
    452 	}
    453 
    454 	/* Set up the redirection table */
    455 	for (i = 0, j = 0; i < table_size; i++, j++) {
    456 		if (j == adapter->num_queues)
    457 			j = 0;
    458 
    459 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    460 			/*
    461 			 * Fetch the RSS bucket id for the given indirection
    462 			 * entry. Cap it at the number of configured buckets
    463 			 * (which is num_queues.)
    464 			 */
    465 			queue_id = rss_get_indirection_to_bucket(i);
    466 			queue_id = queue_id % adapter->num_queues;
    467 		} else
    468 			queue_id = (j * index_mult);
    469 
    470 		/*
    471 		 * The low 8 bits are for hash value (n+0);
    472 		 * The next 8 bits are for hash value (n+1), etc.
    473 		 */
    474 		reta = reta >> 8;
    475 		reta = reta | (((uint32_t) queue_id) << 24);
    476 		if ((i & 3) == 3) {
    477 			if (i < 128)
    478 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
    479 			else
    480 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
    481 				    reta);
    482 			reta = 0;
    483 		}
    484 	}
    485 
    486 	/* Now fill our hash function seeds */
    487 	for (i = 0; i < 10; i++)
    488 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
    489 
    490 	/* Perform hash on these packet types */
    491 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
    492 		rss_hash_config = rss_gethashconfig();
    493 	else {
    494 		/*
    495 		 * Disable UDP - IP fragments aren't currently being handled
    496 		 * and so we end up with a mix of 2-tuple and 4-tuple
    497 		 * traffic.
    498 		 */
    499 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
    500 		                | RSS_HASHTYPE_RSS_TCP_IPV4
    501 		                | RSS_HASHTYPE_RSS_IPV6
    502 		                | RSS_HASHTYPE_RSS_TCP_IPV6
    503 		                | RSS_HASHTYPE_RSS_IPV6_EX
    504 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
    505 	}
    506 
    507 	mrqc = IXGBE_MRQC_RSSEN;
    508 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
    509 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
    510 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
    511 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
    512 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
    513 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
    514 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
    515 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
    516 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
    517 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
    518 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
    519 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
    520 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
    521 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
    522 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
    523 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
    524 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
    525 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
    526 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
    527 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    528 } /* ixgbe_initialize_rss_mapping */
    529 
    530 /************************************************************************
    531  * ixgbe_initialize_receive_units - Setup receive registers and features.
    532  ************************************************************************/
    533 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
    534 
    535 static void
    536 ixgbe_initialize_receive_units(struct adapter *adapter)
    537 {
    538 	struct	rx_ring	*rxr = adapter->rx_rings;
    539 	struct ixgbe_hw	*hw = &adapter->hw;
    540 	struct ifnet    *ifp = adapter->ifp;
    541 	int             i, j;
    542 	u32		bufsz, fctrl, srrctl, rxcsum;
    543 	u32		hlreg;
    544 
    545 	/*
    546 	 * Make sure receives are disabled while
    547 	 * setting up the descriptor ring
    548 	 */
    549 	ixgbe_disable_rx(hw);
    550 
    551 	/* Enable broadcasts */
    552 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
    553 	fctrl |= IXGBE_FCTRL_BAM;
    554 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
    555 		fctrl |= IXGBE_FCTRL_DPF;
    556 		fctrl |= IXGBE_FCTRL_PMCF;
    557 	}
    558 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
    559 
    560 	/* Set for Jumbo Frames? */
    561 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
    562 	if (ifp->if_mtu > ETHERMTU)
    563 		hlreg |= IXGBE_HLREG0_JUMBOEN;
    564 	else
    565 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
    566 
    567 #ifdef DEV_NETMAP
    568 	/* CRC stripping is conditional in Netmap */
    569 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
    570 	    (ifp->if_capenable & IFCAP_NETMAP) &&
    571 	    !ix_crcstrip)
    572 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
    573 	else
    574 #endif /* DEV_NETMAP */
    575 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
    576 
    577 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
    578 
    579 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
    580 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
    581 
    582 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
    583 		u64 rdba = rxr->rxdma.dma_paddr;
    584 		u32 tqsmreg, reg;
    585 		int regnum = i / 4;	/* 1 register per 4 queues */
    586 		int regshift = i % 4;	/* 4 bits per 1 queue */
    587 		j = rxr->me;
    588 
    589 		/* Setup the Base and Length of the Rx Descriptor Ring */
    590 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
    591 		    (rdba & 0x00000000ffffffffULL));
    592 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
    593 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
    594 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
    595 
    596 		/* Set up the SRRCTL register */
    597 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
    598 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
    599 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
    600 		srrctl |= bufsz;
    601 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
    602 
    603 		/* Set RQSMR (Receive Queue Statistic Mapping) register */
    604 		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
    605 		reg &= ~(0x000000ff << (regshift * 8));
    606 		reg |= i << (regshift * 8);
    607 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
    608 
    609 		/*
    610 		 * Set RQSMR (Receive Queue Statistic Mapping) register.
    611 		 * Register location for queue 0...7 are different between
    612 		 * 82598 and newer.
    613 		 */
    614 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
    615 			tqsmreg = IXGBE_TQSMR(regnum);
    616 		else
    617 			tqsmreg = IXGBE_TQSM(regnum);
    618 		reg = IXGBE_READ_REG(hw, tqsmreg);
    619 		reg &= ~(0x000000ff << (regshift * 8));
    620 		reg |= i << (regshift * 8);
    621 		IXGBE_WRITE_REG(hw, tqsmreg, reg);
    622 
    623 		/*
    624 		 * Set DROP_EN iff we have no flow control and >1 queue.
    625 		 * Note that srrctl was cleared shortly before during reset,
    626 		 * so we do not need to clear the bit, but do it just in case
    627 		 * this code is moved elsewhere.
    628 		 */
    629 		if (adapter->num_queues > 1 &&
    630 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
    631 			srrctl |= IXGBE_SRRCTL_DROP_EN;
    632 		} else {
    633 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
    634 		}
    635 
    636 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
    637 
    638 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
    639 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
    640 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
    641 
    642 		/* Set the driver rx tail address */
    643 		rxr->tail =  IXGBE_RDT(rxr->me);
    644 	}
    645 
    646 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    647 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
    648 		            | IXGBE_PSRTYPE_UDPHDR
    649 		            | IXGBE_PSRTYPE_IPV4HDR
    650 		            | IXGBE_PSRTYPE_IPV6HDR;
    651 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
    652 	}
    653 
    654 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
    655 
    656 	ixgbe_initialize_rss_mapping(adapter);
    657 
    658 	if (adapter->num_queues > 1) {
    659 		/* RSS and RX IPP Checksum are mutually exclusive */
    660 		rxcsum |= IXGBE_RXCSUM_PCSD;
    661 	}
    662 
    663 	if (ifp->if_capenable & IFCAP_RXCSUM)
    664 		rxcsum |= IXGBE_RXCSUM_PCSD;
    665 
    666 	/* This is useful for calculating UDP/IP fragment checksums */
    667 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
    668 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
    669 
    670 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
    671 
    672 	return;
    673 } /* ixgbe_initialize_receive_units */
    674 
    675 /************************************************************************
    676  * ixgbe_initialize_transmit_units - Enable transmit units.
    677  ************************************************************************/
    678 static void
    679 ixgbe_initialize_transmit_units(struct adapter *adapter)
    680 {
    681 	struct tx_ring  *txr = adapter->tx_rings;
    682 	struct ixgbe_hw	*hw = &adapter->hw;
    683 
    684 	/* Setup the Base and Length of the Tx Descriptor Ring */
    685 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    686 		u64 tdba = txr->txdma.dma_paddr;
    687 		u32 txctrl = 0;
    688 		int j = txr->me;
    689 
    690 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
    691 		    (tdba & 0x00000000ffffffffULL));
    692 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
    693 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
    694 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
    695 
    696 		/* Setup the HW Tx Head and Tail descriptor pointers */
    697 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
    698 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
    699 
    700 		/* Cache the tail address */
    701 		txr->tail = IXGBE_TDT(j);
    702 
    703 		/* Disable Head Writeback */
    704 		/*
    705 		 * Note: for X550 series devices, these registers are actually
    706 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
    707 		 * fields remain the same.
    708 		 */
    709 		switch (hw->mac.type) {
    710 		case ixgbe_mac_82598EB:
    711 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
    712 			break;
    713 		default:
    714 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
    715 			break;
    716 		}
    717 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
    718 		switch (hw->mac.type) {
    719 		case ixgbe_mac_82598EB:
    720 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
    721 			break;
    722 		default:
    723 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
    724 			break;
    725 		}
    726 
    727 	}
    728 
    729 	if (hw->mac.type != ixgbe_mac_82598EB) {
    730 		u32 dmatxctl, rttdcs;
    731 
    732 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
    733 		dmatxctl |= IXGBE_DMATXCTL_TE;
    734 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
    735 		/* Disable arbiter to set MTQC */
    736 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
    737 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
    738 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    739 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
    740 		    ixgbe_get_mtqc(adapter->iov_mode));
    741 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
    742 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    743 	}
    744 
    745 	return;
    746 } /* ixgbe_initialize_transmit_units */
    747 
    748 /************************************************************************
    749  * ixgbe_attach - Device initialization routine
    750  *
    751  *   Called when the driver is being loaded.
    752  *   Identifies the type of hardware, allocates all resources
    753  *   and initializes the hardware.
    754  *
    755  *   return 0 on success, positive on failure
    756  ************************************************************************/
    757 static void
    758 ixgbe_attach(device_t parent, device_t dev, void *aux)
    759 {
    760 	struct adapter  *adapter;
    761 	struct ixgbe_hw *hw;
    762 	int             error = -1;
    763 	u32		ctrl_ext;
    764 	u16		high, low, nvmreg;
    765 	pcireg_t	id, subid;
    766 	ixgbe_vendor_info_t *ent;
    767 	struct pci_attach_args *pa = aux;
    768 	const char *str;
    769 	char buf[256];
    770 
    771 	INIT_DEBUGOUT("ixgbe_attach: begin");
    772 
    773 	/* Allocate, clear, and link in our adapter structure */
    774 	adapter = device_private(dev);
    775 	adapter->hw.back = adapter;
    776 	adapter->dev = dev;
    777 	hw = &adapter->hw;
    778 	adapter->osdep.pc = pa->pa_pc;
    779 	adapter->osdep.tag = pa->pa_tag;
    780 	if (pci_dma64_available(pa))
    781 		adapter->osdep.dmat = pa->pa_dmat64;
    782 	else
    783 		adapter->osdep.dmat = pa->pa_dmat;
    784 	adapter->osdep.attached = false;
    785 
    786 	ent = ixgbe_lookup(pa);
    787 
    788 	KASSERT(ent != NULL);
    789 
    790 	aprint_normal(": %s, Version - %s\n",
    791 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    792 
    793 	/* Core Lock Init*/
    794 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    795 
    796 	/* Set up the timer callout */
    797 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    798 
    799 	/* Determine hardware revision */
    800 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    801 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    802 
    803 	hw->vendor_id = PCI_VENDOR(id);
    804 	hw->device_id = PCI_PRODUCT(id);
    805 	hw->revision_id =
    806 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    807 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    808 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    809 
    810 	/*
    811 	 * Make sure BUSMASTER is set
    812 	 */
    813 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    814 
    815 	/* Do base PCI setup - map BAR0 */
    816 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    817 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    818 		error = ENXIO;
    819 		goto err_out;
    820 	}
    821 
    822 	/* let hardware know driver is loaded */
    823 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    824 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    825 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    826 
    827 	/*
    828 	 * Initialize the shared code
    829 	 */
    830 	if (ixgbe_init_shared_code(hw)) {
    831 		aprint_error_dev(dev, "Unable to initialize the shared code\n");
    832 		error = ENXIO;
    833 		goto err_out;
    834 	}
    835 
    836 	switch (hw->mac.type) {
    837 	case ixgbe_mac_82598EB:
    838 		str = "82598EB";
    839 		break;
    840 	case ixgbe_mac_82599EB:
    841 		str = "82599EB";
    842 		break;
    843 	case ixgbe_mac_X540:
    844 		str = "X540";
    845 		break;
    846 	case ixgbe_mac_X550:
    847 		str = "X550";
    848 		break;
    849 	case ixgbe_mac_X550EM_x:
    850 		str = "X550EM";
    851 		break;
    852 	case ixgbe_mac_X550EM_a:
    853 		str = "X550EM A";
    854 		break;
    855 	default:
    856 		str = "Unknown";
    857 		break;
    858 	}
    859 	aprint_normal_dev(dev, "device %s\n", str);
    860 
    861 	if (hw->mbx.ops.init_params)
    862 		hw->mbx.ops.init_params(hw);
    863 
    864 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
    865 
    866 	/* Pick up the 82599 settings */
    867 	if (hw->mac.type != ixgbe_mac_82598EB) {
    868 		hw->phy.smart_speed = ixgbe_smart_speed;
    869 		adapter->num_segs = IXGBE_82599_SCATTER;
    870 	} else
    871 		adapter->num_segs = IXGBE_82598_SCATTER;
    872 
    873 	hw->mac.ops.set_lan_id(hw);
    874 	ixgbe_init_device_features(adapter);
    875 
    876 	if (ixgbe_configure_interrupts(adapter)) {
    877 		error = ENXIO;
    878 		goto err_out;
    879 	}
    880 
    881 	/* Allocate multicast array memory. */
    882 	adapter->mta = malloc(sizeof(*adapter->mta) *
    883 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    884 	if (adapter->mta == NULL) {
    885 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    886 		error = ENOMEM;
    887 		goto err_out;
    888 	}
    889 
    890 	/* Enable WoL (if supported) */
    891 	ixgbe_check_wol_support(adapter);
    892 
    893 	/* Verify adapter fan is still functional (if applicable) */
    894 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
    895 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    896 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
    897 	}
    898 
    899 	/* Ensure SW/FW semaphore is free */
    900 	ixgbe_init_swfw_semaphore(hw);
    901 
    902 	/* Enable EEE power saving */
    903 	if (adapter->feat_en & IXGBE_FEATURE_EEE)
    904 		hw->mac.ops.setup_eee(hw, TRUE);
    905 
    906 	/* Set an initial default flow control value */
    907 	hw->fc.requested_mode = ixgbe_flow_control;
    908 
    909 	/* Sysctls for limiting the amount of work done in the taskqueues */
    910 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
    911 	    "max number of rx packets to process",
    912 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
    913 
    914 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
    915 	    "max number of tx packets to process",
    916 	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
    917 
    918 	/* Do descriptor calc and sanity checks */
    919 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    920 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    921 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    922 		adapter->num_tx_desc = DEFAULT_TXD;
    923 	} else
    924 		adapter->num_tx_desc = ixgbe_txd;
    925 
    926 	/*
    927 	 * With many RX rings it is easy to exceed the
    928 	 * system mbuf allocation. Tuning nmbclusters
    929 	 * can alleviate this.
    930 	 */
    931 	if (nmbclusters > 0) {
    932 		int s;
    933 		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
    934 		if (s > nmbclusters) {
    935 			aprint_error_dev(dev, "RX Descriptors exceed "
    936 			    "system mbuf max, using default instead!\n");
    937 			ixgbe_rxd = DEFAULT_RXD;
    938 		}
    939 	}
    940 
    941 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    942 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
    943 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    944 		adapter->num_rx_desc = DEFAULT_RXD;
    945 	} else
    946 		adapter->num_rx_desc = ixgbe_rxd;
    947 
    948 	/* Allocate our TX/RX Queues */
    949 	if (ixgbe_allocate_queues(adapter)) {
    950 		error = ENOMEM;
    951 		goto err_out;
    952 	}
    953 
    954 	hw->phy.reset_if_overtemp = TRUE;
    955 	error = ixgbe_reset_hw(hw);
    956 	hw->phy.reset_if_overtemp = FALSE;
    957 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    958 		/*
    959 		 * No optics in this port, set up
    960 		 * so the timer routine will probe
    961 		 * for later insertion.
    962 		 */
    963 		adapter->sfp_probe = TRUE;
    964 		error = IXGBE_SUCCESS;
    965 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    966 		aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
    967 		error = EIO;
    968 		goto err_late;
    969 	} else if (error) {
    970 		aprint_error_dev(dev, "Hardware initialization failed\n");
    971 		error = EIO;
    972 		goto err_late;
    973 	}
    974 
    975 	/* Make sure we have a good EEPROM before we read from it */
    976 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
    977 		aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
    978 		error = EIO;
    979 		goto err_late;
    980 	}
    981 
    982 	aprint_normal("%s:", device_xname(dev));
    983 	/* NVM Image Version */
    984 	switch (hw->mac.type) {
    985 	case ixgbe_mac_X540:
    986 	case ixgbe_mac_X550EM_a:
    987 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    988 		if (nvmreg == 0xffff)
    989 			break;
    990 		high = (nvmreg >> 12) & 0x0f;
    991 		low = (nvmreg >> 4) & 0xff;
    992 		id = nvmreg & 0x0f;
    993 		aprint_normal(" NVM Image Version %u.", high);
    994 		if (hw->mac.type == ixgbe_mac_X540)
    995 			str = "%x";
    996 		else
    997 			str = "%02x";
    998 		aprint_normal(str, low);
    999 		aprint_normal(" ID 0x%x,", id);
   1000 		break;
   1001 	case ixgbe_mac_X550EM_x:
   1002 	case ixgbe_mac_X550:
   1003 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
   1004 		if (nvmreg == 0xffff)
   1005 			break;
   1006 		high = (nvmreg >> 12) & 0x0f;
   1007 		low = nvmreg & 0xff;
   1008 		aprint_normal(" NVM Image Version %u.%02x,", high, low);
   1009 		break;
   1010 	default:
   1011 		break;
   1012 	}
   1013 
   1014 	/* PHY firmware revision */
   1015 	switch (hw->mac.type) {
   1016 	case ixgbe_mac_X540:
   1017 	case ixgbe_mac_X550:
   1018 		hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
   1019 		if (nvmreg == 0xffff)
   1020 			break;
   1021 		high = (nvmreg >> 12) & 0x0f;
   1022 		low = (nvmreg >> 4) & 0xff;
   1023 		id = nvmreg & 0x000f;
   1024 		aprint_normal(" PHY FW Revision %u.", high);
   1025 		if (hw->mac.type == ixgbe_mac_X540)
   1026 			str = "%x";
   1027 		else
   1028 			str = "%02x";
   1029 		aprint_normal(str, low);
   1030 		aprint_normal(" ID 0x%x,", id);
   1031 		break;
   1032 	default:
   1033 		break;
   1034 	}
   1035 
   1036 	/* NVM Map version & OEM NVM Image version */
   1037 	switch (hw->mac.type) {
   1038 	case ixgbe_mac_X550:
   1039 	case ixgbe_mac_X550EM_x:
   1040 	case ixgbe_mac_X550EM_a:
   1041 		hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
   1042 		if (nvmreg != 0xffff) {
   1043 			high = (nvmreg >> 12) & 0x0f;
   1044 			low = nvmreg & 0x00ff;
   1045 			aprint_normal(" NVM Map version %u.%02x,", high, low);
   1046 		}
   1047 		hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
   1048 		if (nvmreg != 0xffff) {
   1049 			high = (nvmreg >> 12) & 0x0f;
   1050 			low = nvmreg & 0x00ff;
   1051 			aprint_verbose(" OEM NVM Image version %u.%02x,", high,
   1052 			    low);
   1053 		}
   1054 		break;
   1055 	default:
   1056 		break;
   1057 	}
   1058 
   1059 	/* Print the ETrackID */
   1060 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
   1061 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
   1062 	aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
   1063 
   1064 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   1065 		error = ixgbe_allocate_msix(adapter, pa);
   1066 		if (error) {
   1067 			/* Free allocated queue structures first */
   1068 			ixgbe_free_transmit_structures(adapter);
   1069 			ixgbe_free_receive_structures(adapter);
   1070 			free(adapter->queues, M_DEVBUF);
   1071 
   1072 			/* Fallback to legacy interrupt */
   1073 			adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
   1074 			if (adapter->feat_cap & IXGBE_FEATURE_MSI)
   1075 				adapter->feat_en |= IXGBE_FEATURE_MSI;
   1076 			adapter->num_queues = 1;
   1077 
   1078 			/* Allocate our TX/RX Queues again */
   1079 			if (ixgbe_allocate_queues(adapter)) {
   1080 				error = ENOMEM;
   1081 				goto err_out;
   1082 			}
   1083 		}
   1084 	}
   1085 	if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
   1086 		error = ixgbe_allocate_legacy(adapter, pa);
   1087 	if (error)
   1088 		goto err_late;
   1089 
   1090 	/* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
   1091 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   1092 	    ixgbe_handle_link, adapter);
   1093 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1094 	    ixgbe_handle_mod, adapter);
   1095 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1096 	    ixgbe_handle_msf, adapter);
   1097 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1098 	    ixgbe_handle_phy, adapter);
   1099 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   1100 		adapter->fdir_si =
   1101 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1102 			ixgbe_reinit_fdir, adapter);
   1103 	if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
   1104 	    || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
   1105 	    || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
   1106 		&& (adapter->fdir_si == NULL))) {
   1107 		aprint_error_dev(dev,
   1108 		    "could not establish software interrupts ()\n");
   1109 		goto err_out;
   1110 	}
   1111 
   1112 	error = ixgbe_start_hw(hw);
   1113 	switch (error) {
   1114 	case IXGBE_ERR_EEPROM_VERSION:
   1115 		aprint_error_dev(dev, "This device is a pre-production adapter/"
   1116 		    "LOM.  Please be aware there may be issues associated "
   1117 		    "with your hardware.\nIf you are experiencing problems "
   1118 		    "please contact your Intel or hardware representative "
   1119 		    "who provided you with this hardware.\n");
   1120 		break;
   1121 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
   1122 		aprint_error_dev(dev, "Unsupported SFP+ Module\n");
   1123 		error = EIO;
   1124 		goto err_late;
   1125 	case IXGBE_ERR_SFP_NOT_PRESENT:
   1126 		aprint_error_dev(dev, "No SFP+ Module found\n");
   1127 		/* falls thru */
   1128 	default:
   1129 		break;
   1130 	}
   1131 
   1132 	/* Setup OS specific network interface */
   1133 	if (ixgbe_setup_interface(dev, adapter) != 0)
   1134 		goto err_late;
   1135 
   1136 	/*
   1137 	 *  Print PHY ID only for copper PHY. On device which has SFP(+) cage
   1138 	 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
   1139 	 */
   1140 	if (hw->phy.media_type == ixgbe_media_type_copper) {
   1141 		uint16_t id1, id2;
   1142 		int oui, model, rev;
   1143 		const char *descr;
   1144 
   1145 		id1 = hw->phy.id >> 16;
   1146 		id2 = hw->phy.id & 0xffff;
   1147 		oui = MII_OUI(id1, id2);
   1148 		model = MII_MODEL(id2);
   1149 		rev = MII_REV(id2);
   1150 		if ((descr = mii_get_descr(oui, model)) != NULL)
   1151 			aprint_normal_dev(dev,
   1152 			    "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
   1153 			    descr, oui, model, rev);
   1154 		else
   1155 			aprint_normal_dev(dev,
   1156 			    "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
   1157 			    oui, model, rev);
   1158 	}
   1159 
   1160 	/* Enable the optics for 82599 SFP+ fiber */
   1161 	ixgbe_enable_tx_laser(hw);
   1162 
   1163 	/* Enable power to the phy. */
   1164 	ixgbe_set_phy_power(hw, TRUE);
   1165 
   1166 	/* Initialize statistics */
   1167 	ixgbe_update_stats_counters(adapter);
   1168 
   1169 	/* Check PCIE slot type/speed/width */
   1170 	ixgbe_get_slot_info(adapter);
   1171 
   1172 	/*
   1173 	 * Do time init and sysctl init here, but
   1174 	 * only on the first port of a bypass adapter.
   1175 	 */
   1176 	ixgbe_bypass_init(adapter);
   1177 
   1178 	/* Set an initial dmac value */
   1179 	adapter->dmac = 0;
   1180 	/* Set initial advertised speeds (if applicable) */
   1181 	adapter->advertise = ixgbe_get_advertise(adapter);
   1182 
   1183 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   1184 		ixgbe_define_iov_schemas(dev, &error);
   1185 
   1186 	/* Add sysctls */
   1187 	ixgbe_add_device_sysctls(adapter);
   1188 	ixgbe_add_hw_stats(adapter);
   1189 
   1190 	/* For Netmap */
   1191 	adapter->init_locked = ixgbe_init_locked;
   1192 	adapter->stop_locked = ixgbe_stop;
   1193 
   1194 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1195 		ixgbe_netmap_attach(adapter);
   1196 
   1197 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
   1198 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
   1199 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
   1200 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
   1201 
   1202 	if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
   1203 		pmf_class_network_register(dev, adapter->ifp);
   1204 	else
   1205 		aprint_error_dev(dev, "couldn't establish power handler\n");
   1206 
   1207 	INIT_DEBUGOUT("ixgbe_attach: end");
   1208 	adapter->osdep.attached = true;
   1209 
   1210 	return;
   1211 
   1212 err_late:
   1213 	ixgbe_free_transmit_structures(adapter);
   1214 	ixgbe_free_receive_structures(adapter);
   1215 	free(adapter->queues, M_DEVBUF);
   1216 err_out:
   1217 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   1218 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   1219 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   1220 	ixgbe_free_softint(adapter);
   1221 	ixgbe_free_pci_resources(adapter);
   1222 	if (adapter->mta != NULL)
   1223 		free(adapter->mta, M_DEVBUF);
   1224 	IXGBE_CORE_LOCK_DESTROY(adapter);
   1225 
   1226 	return;
   1227 } /* ixgbe_attach */
   1228 
   1229 /************************************************************************
   1230  * ixgbe_check_wol_support
   1231  *
   1232  *   Checks whether the adapter's ports are capable of
   1233  *   Wake On LAN by reading the adapter's NVM.
   1234  *
   1235  *   Sets each port's hw->wol_enabled value depending
   1236  *   on the value read here.
   1237  ************************************************************************/
   1238 static void
   1239 ixgbe_check_wol_support(struct adapter *adapter)
   1240 {
   1241 	struct ixgbe_hw *hw = &adapter->hw;
   1242 	u16             dev_caps = 0;
   1243 
   1244 	/* Find out WoL support for port */
   1245 	adapter->wol_support = hw->wol_enabled = 0;
   1246 	ixgbe_get_device_caps(hw, &dev_caps);
   1247 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
   1248 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
   1249 	     hw->bus.func == 0))
   1250 		adapter->wol_support = hw->wol_enabled = 1;
   1251 
   1252 	/* Save initial wake up filter configuration */
   1253 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
   1254 
   1255 	return;
   1256 } /* ixgbe_check_wol_support */
   1257 
   1258 /************************************************************************
   1259  * ixgbe_setup_interface
   1260  *
   1261  *   Setup networking device structure and register an interface.
   1262  ************************************************************************/
   1263 static int
   1264 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   1265 {
   1266 	struct ethercom *ec = &adapter->osdep.ec;
   1267 	struct ifnet   *ifp;
   1268 	int rv;
   1269 
   1270 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   1271 
   1272 	ifp = adapter->ifp = &ec->ec_if;
   1273 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1274 	ifp->if_baudrate = IF_Gbps(10);
   1275 	ifp->if_init = ixgbe_init;
   1276 	ifp->if_stop = ixgbe_ifstop;
   1277 	ifp->if_softc = adapter;
   1278 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1279 #ifdef IXGBE_MPSAFE
   1280 	ifp->if_extflags = IFEF_MPSAFE;
   1281 #endif
   1282 	ifp->if_ioctl = ixgbe_ioctl;
   1283 #if __FreeBSD_version >= 1100045
   1284 	/* TSO parameters */
   1285 	ifp->if_hw_tsomax = 65518;
   1286 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
   1287 	ifp->if_hw_tsomaxsegsize = 2048;
   1288 #endif
   1289 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1290 #if 0
   1291 		ixgbe_start_locked = ixgbe_legacy_start_locked;
   1292 #endif
   1293 	} else {
   1294 		ifp->if_transmit = ixgbe_mq_start;
   1295 #if 0
   1296 		ixgbe_start_locked = ixgbe_mq_start_locked;
   1297 #endif
   1298 	}
   1299 	ifp->if_start = ixgbe_legacy_start;
   1300 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1301 	IFQ_SET_READY(&ifp->if_snd);
   1302 
   1303 	rv = if_initialize(ifp);
   1304 	if (rv != 0) {
   1305 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1306 		return rv;
   1307 	}
   1308 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1309 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1310 	/*
   1311 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1312 	 * used.
   1313 	 */
   1314 	if_register(ifp);
   1315 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   1316 
   1317 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1318 
   1319 	/*
   1320 	 * Tell the upper layer(s) we support long frames.
   1321 	 */
   1322 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1323 
   1324 	/* Set capability flags */
   1325 	ifp->if_capabilities |= IFCAP_RXCSUM
   1326 			     |  IFCAP_TXCSUM
   1327 			     |  IFCAP_TSOv4
   1328 			     |  IFCAP_TSOv6
   1329 			     |  IFCAP_LRO;
   1330 	ifp->if_capenable = 0;
   1331 
   1332 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1333 	    		    |  ETHERCAP_VLAN_HWCSUM
   1334 	    		    |  ETHERCAP_JUMBO_MTU
   1335 	    		    |  ETHERCAP_VLAN_MTU;
   1336 
   1337 	/* Enable the above capabilities by default */
   1338 	ec->ec_capenable = ec->ec_capabilities;
   1339 
   1340 	/*
   1341 	 * Don't turn this on by default, if vlans are
   1342 	 * created on another pseudo device (eg. lagg)
   1343 	 * then vlan events are not passed thru, breaking
   1344 	 * operation, but with HW FILTER off it works. If
   1345 	 * using vlans directly on the ixgbe driver you can
   1346 	 * enable this and get full hardware tag filtering.
   1347 	 */
   1348 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1349 
   1350 	/*
   1351 	 * Specify the media types supported by this adapter and register
   1352 	 * callbacks to update media and link information
   1353 	 */
   1354 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   1355 	    ixgbe_media_status);
   1356 
   1357 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
   1358 	ixgbe_add_media_types(adapter);
   1359 
   1360 	/* Set autoselect media by default */
   1361 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1362 
   1363 	return (0);
   1364 } /* ixgbe_setup_interface */
   1365 
   1366 /************************************************************************
   1367  * ixgbe_add_media_types
   1368  ************************************************************************/
   1369 static void
   1370 ixgbe_add_media_types(struct adapter *adapter)
   1371 {
   1372 	struct ixgbe_hw *hw = &adapter->hw;
   1373 	device_t        dev = adapter->dev;
   1374 	u64             layer;
   1375 
   1376 	layer = adapter->phy_layer;
   1377 
   1378 #define	ADD(mm, dd)							\
   1379 	ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
   1380 
   1381 	ADD(IFM_NONE, 0);
   1382 
   1383 	/* Media types with matching NetBSD media defines */
   1384 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
   1385 		ADD(IFM_10G_T | IFM_FDX, 0);
   1386 	}
   1387 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
   1388 		ADD(IFM_1000_T | IFM_FDX, 0);
   1389 	}
   1390 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
   1391 		ADD(IFM_100_TX | IFM_FDX, 0);
   1392 	}
   1393 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
   1394 		ADD(IFM_10_T | IFM_FDX, 0);
   1395 	}
   1396 
   1397 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   1398 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
   1399 		ADD(IFM_10G_TWINAX | IFM_FDX, 0);
   1400 	}
   1401 
   1402 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
   1403 		ADD(IFM_10G_LR | IFM_FDX, 0);
   1404 		if (hw->phy.multispeed_fiber) {
   1405 			ADD(IFM_1000_LX | IFM_FDX, 0);
   1406 		}
   1407 	}
   1408 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
   1409 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1410 		if (hw->phy.multispeed_fiber) {
   1411 			ADD(IFM_1000_SX | IFM_FDX, 0);
   1412 		}
   1413 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
   1414 		ADD(IFM_1000_SX | IFM_FDX, 0);
   1415 	}
   1416 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
   1417 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1418 	}
   1419 
   1420 #ifdef IFM_ETH_XTYPE
   1421 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1422 		ADD(IFM_10G_KR | IFM_FDX, 0);
   1423 	}
   1424 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1425 		ADD(AIFM_10G_KX4 | IFM_FDX, 0);
   1426 	}
   1427 #else
   1428 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1429 		device_printf(dev, "Media supported: 10GbaseKR\n");
   1430 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
   1431 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1432 	}
   1433 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1434 		device_printf(dev, "Media supported: 10GbaseKX4\n");
   1435 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
   1436 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1437 	}
   1438 #endif
   1439 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
   1440 		ADD(IFM_1000_KX | IFM_FDX, 0);
   1441 	}
   1442 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
   1443 		ADD(IFM_2500_KX | IFM_FDX, 0);
   1444 	}
   1445 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
   1446 		ADD(IFM_2500_T | IFM_FDX, 0);
   1447 	}
   1448 	if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
   1449 		ADD(IFM_5000_T | IFM_FDX, 0);
   1450 	}
   1451 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
   1452 		device_printf(dev, "Media supported: 1000baseBX\n");
   1453 	/* XXX no ifmedia_set? */
   1454 
   1455 	ADD(IFM_AUTO, 0);
   1456 
   1457 #undef ADD
   1458 } /* ixgbe_add_media_types */
   1459 
   1460 /************************************************************************
   1461  * ixgbe_is_sfp
   1462  ************************************************************************/
   1463 static inline bool
   1464 ixgbe_is_sfp(struct ixgbe_hw *hw)
   1465 {
   1466 	switch (hw->mac.type) {
   1467 	case ixgbe_mac_82598EB:
   1468 		if (hw->phy.type == ixgbe_phy_nl)
   1469 			return TRUE;
   1470 		return FALSE;
   1471 	case ixgbe_mac_82599EB:
   1472 		switch (hw->mac.ops.get_media_type(hw)) {
   1473 		case ixgbe_media_type_fiber:
   1474 		case ixgbe_media_type_fiber_qsfp:
   1475 			return TRUE;
   1476 		default:
   1477 			return FALSE;
   1478 		}
   1479 	case ixgbe_mac_X550EM_x:
   1480 	case ixgbe_mac_X550EM_a:
   1481 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
   1482 			return TRUE;
   1483 		return FALSE;
   1484 	default:
   1485 		return FALSE;
   1486 	}
   1487 } /* ixgbe_is_sfp */
   1488 
   1489 /************************************************************************
   1490  * ixgbe_config_link
   1491  ************************************************************************/
   1492 static void
   1493 ixgbe_config_link(struct adapter *adapter)
   1494 {
   1495 	struct ixgbe_hw *hw = &adapter->hw;
   1496 	u32             autoneg, err = 0;
   1497 	bool            sfp, negotiate = false;
   1498 
   1499 	sfp = ixgbe_is_sfp(hw);
   1500 
   1501 	if (sfp) {
   1502 		if (hw->phy.multispeed_fiber) {
   1503 			hw->mac.ops.setup_sfp(hw);
   1504 			ixgbe_enable_tx_laser(hw);
   1505 			kpreempt_disable();
   1506 			softint_schedule(adapter->msf_si);
   1507 			kpreempt_enable();
   1508 		} else {
   1509 			kpreempt_disable();
   1510 			softint_schedule(adapter->mod_si);
   1511 			kpreempt_enable();
   1512 		}
   1513 	} else {
   1514 		if (hw->mac.ops.check_link)
   1515 			err = ixgbe_check_link(hw, &adapter->link_speed,
   1516 			    &adapter->link_up, FALSE);
   1517 		if (err)
   1518 			goto out;
   1519 		autoneg = hw->phy.autoneg_advertised;
   1520 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   1521                 	err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
   1522 			    &negotiate);
   1523 		if (err)
   1524 			goto out;
   1525 		if (hw->mac.ops.setup_link)
   1526                 	err = hw->mac.ops.setup_link(hw, autoneg,
   1527 			    adapter->link_up);
   1528 	}
   1529 out:
   1530 
   1531 	return;
   1532 } /* ixgbe_config_link */
   1533 
   1534 /************************************************************************
   1535  * ixgbe_update_stats_counters - Update board statistics counters.
   1536  ************************************************************************/
   1537 static void
   1538 ixgbe_update_stats_counters(struct adapter *adapter)
   1539 {
   1540 	struct ifnet          *ifp = adapter->ifp;
   1541 	struct ixgbe_hw       *hw = &adapter->hw;
   1542 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1543 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
   1544 	u64                   total_missed_rx = 0;
   1545 	uint64_t              crcerrs, rlec;
   1546 
   1547 	crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   1548 	stats->crcerrs.ev_count += crcerrs;
   1549 	stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   1550 	stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   1551 	stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   1552 	if (hw->mac.type == ixgbe_mac_X550)
   1553 		stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
   1554 
   1555 	for (int i = 0; i < __arraycount(stats->qprc); i++) {
   1556 		int j = i % adapter->num_queues;
   1557 		stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   1558 		stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   1559 		stats->qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   1560 	}
   1561 	for (int i = 0; i < __arraycount(stats->mpc); i++) {
   1562 		uint32_t mp;
   1563 		int j = i % adapter->num_queues;
   1564 
   1565 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   1566 		/* global total per queue */
   1567 		stats->mpc[j].ev_count += mp;
   1568 		/* running comprehensive total for stats display */
   1569 		total_missed_rx += mp;
   1570 
   1571 		if (hw->mac.type == ixgbe_mac_82598EB)
   1572 			stats->rnbc[j].ev_count
   1573 			    += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   1574 
   1575 	}
   1576 	stats->mpctotal.ev_count += total_missed_rx;
   1577 
   1578 	/* Document says M[LR]FC are valid when link is up and 10Gbps */
   1579 	if ((adapter->link_active == TRUE)
   1580 	    && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
   1581 		stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   1582 		stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   1583 	}
   1584 	rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
   1585 	stats->rlec.ev_count += rlec;
   1586 
   1587 	/* Hardware workaround, gprc counts missed packets */
   1588 	stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   1589 
   1590 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   1591 	stats->lxontxc.ev_count += lxon;
   1592 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   1593 	stats->lxofftxc.ev_count += lxoff;
   1594 	total = lxon + lxoff;
   1595 
   1596 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1597 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   1598 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   1599 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   1600 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   1601 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   1602 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   1603 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   1604 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   1605 	} else {
   1606 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   1607 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   1608 		/* 82598 only has a counter in the high register */
   1609 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   1610 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   1611 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   1612 	}
   1613 
   1614 	/*
   1615 	 * Workaround: mprc hardware is incorrectly counting
   1616 	 * broadcasts, so for now we subtract those.
   1617 	 */
   1618 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   1619 	stats->bprc.ev_count += bprc;
   1620 	stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
   1621 	    - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   1622 
   1623 	stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   1624 	stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   1625 	stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   1626 	stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   1627 	stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   1628 	stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   1629 
   1630 	stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   1631 	stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   1632 	stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   1633 
   1634 	stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   1635 	stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   1636 	stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   1637 	stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   1638 	stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   1639 	stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   1640 	stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   1641 	stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   1642 	stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   1643 	stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   1644 	stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   1645 	stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   1646 	stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   1647 	stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   1648 	stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   1649 	stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   1650 	stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   1651 	stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   1652 	/* Only read FCOE on 82599 */
   1653 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1654 		stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   1655 		stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   1656 		stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   1657 		stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   1658 		stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   1659 	}
   1660 
   1661 	/* Fill out the OS statistics structure */
   1662 	/*
   1663 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   1664 	 * adapter->stats counters. It's required to make ifconfig -z
   1665 	 * (SOICZIFDATA) work.
   1666 	 */
   1667 	ifp->if_collisions = 0;
   1668 
   1669 	/* Rx Errors */
   1670 	ifp->if_iqdrops += total_missed_rx;
   1671 	ifp->if_ierrors += crcerrs + rlec;
   1672 } /* ixgbe_update_stats_counters */
   1673 
   1674 /************************************************************************
   1675  * ixgbe_add_hw_stats
   1676  *
   1677  *   Add sysctl variables, one per statistic, to the system.
   1678  ************************************************************************/
   1679 static void
   1680 ixgbe_add_hw_stats(struct adapter *adapter)
   1681 {
   1682 	device_t dev = adapter->dev;
   1683 	const struct sysctlnode *rnode, *cnode;
   1684 	struct sysctllog **log = &adapter->sysctllog;
   1685 	struct tx_ring *txr = adapter->tx_rings;
   1686 	struct rx_ring *rxr = adapter->rx_rings;
   1687 	struct ixgbe_hw *hw = &adapter->hw;
   1688 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1689 	const char *xname = device_xname(dev);
   1690 
   1691 	/* Driver Statistics */
   1692 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   1693 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   1694 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   1695 	    NULL, xname, "m_defrag() failed");
   1696 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   1697 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   1698 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   1699 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   1700 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   1701 	    NULL, xname, "Driver tx dma hard fail other");
   1702 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   1703 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   1704 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   1705 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   1706 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   1707 	    NULL, xname, "Watchdog timeouts");
   1708 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   1709 	    NULL, xname, "TSO errors");
   1710 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   1711 	    NULL, xname, "Link MSI-X IRQ Handled");
   1712 	evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
   1713 	    NULL, xname, "Link softint");
   1714 	evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
   1715 	    NULL, xname, "module softint");
   1716 	evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
   1717 	    NULL, xname, "multimode softint");
   1718 	evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
   1719 	    NULL, xname, "external PHY softint");
   1720 
   1721 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1722 #ifdef LRO
   1723 		struct lro_ctrl *lro = &rxr->lro;
   1724 #endif /* LRO */
   1725 
   1726 		snprintf(adapter->queues[i].evnamebuf,
   1727 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   1728 		    xname, i);
   1729 		snprintf(adapter->queues[i].namebuf,
   1730 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   1731 
   1732 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   1733 			aprint_error_dev(dev, "could not create sysctl root\n");
   1734 			break;
   1735 		}
   1736 
   1737 		if (sysctl_createv(log, 0, &rnode, &rnode,
   1738 		    0, CTLTYPE_NODE,
   1739 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   1740 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   1741 			break;
   1742 
   1743 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1744 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   1745 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   1746 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   1747 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   1748 			break;
   1749 
   1750 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1751 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1752 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   1753 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   1754 		    0, CTL_CREATE, CTL_EOL) != 0)
   1755 			break;
   1756 
   1757 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1758 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1759 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   1760 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   1761 		    0, CTL_CREATE, CTL_EOL) != 0)
   1762 			break;
   1763 
   1764 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   1765 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   1766 		evcnt_attach_dynamic(&adapter->queues[i].handleq,
   1767 		    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1768 		    "Handled queue in softint");
   1769 		evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
   1770 		    NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
   1771 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   1772 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   1773 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   1774 		    NULL, adapter->queues[i].evnamebuf,
   1775 		    "Queue No Descriptor Available");
   1776 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   1777 		    NULL, adapter->queues[i].evnamebuf,
   1778 		    "Queue Packets Transmitted");
   1779 #ifndef IXGBE_LEGACY_TX
   1780 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   1781 		    NULL, adapter->queues[i].evnamebuf,
   1782 		    "Packets dropped in pcq");
   1783 #endif
   1784 
   1785 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1786 		    CTLFLAG_READONLY,
   1787 		    CTLTYPE_INT,
   1788 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   1789 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   1790 		    CTL_CREATE, CTL_EOL) != 0)
   1791 			break;
   1792 
   1793 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1794 		    CTLFLAG_READONLY,
   1795 		    CTLTYPE_INT,
   1796 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   1797 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   1798 		    CTL_CREATE, CTL_EOL) != 0)
   1799 			break;
   1800 
   1801 		if (i < __arraycount(stats->mpc)) {
   1802 			evcnt_attach_dynamic(&stats->mpc[i],
   1803 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1804 			    "RX Missed Packet Count");
   1805 			if (hw->mac.type == ixgbe_mac_82598EB)
   1806 				evcnt_attach_dynamic(&stats->rnbc[i],
   1807 				    EVCNT_TYPE_MISC, NULL,
   1808 				    adapter->queues[i].evnamebuf,
   1809 				    "Receive No Buffers");
   1810 		}
   1811 		if (i < __arraycount(stats->pxontxc)) {
   1812 			evcnt_attach_dynamic(&stats->pxontxc[i],
   1813 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1814 			    "pxontxc");
   1815 			evcnt_attach_dynamic(&stats->pxonrxc[i],
   1816 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1817 			    "pxonrxc");
   1818 			evcnt_attach_dynamic(&stats->pxofftxc[i],
   1819 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1820 			    "pxofftxc");
   1821 			evcnt_attach_dynamic(&stats->pxoffrxc[i],
   1822 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1823 			    "pxoffrxc");
   1824 			evcnt_attach_dynamic(&stats->pxon2offc[i],
   1825 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1826 			    "pxon2offc");
   1827 		}
   1828 		if (i < __arraycount(stats->qprc)) {
   1829 			evcnt_attach_dynamic(&stats->qprc[i],
   1830 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1831 			    "qprc");
   1832 			evcnt_attach_dynamic(&stats->qptc[i],
   1833 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1834 			    "qptc");
   1835 			evcnt_attach_dynamic(&stats->qbrc[i],
   1836 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1837 			    "qbrc");
   1838 			evcnt_attach_dynamic(&stats->qbtc[i],
   1839 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1840 			    "qbtc");
   1841 			evcnt_attach_dynamic(&stats->qprdc[i],
   1842 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1843 			    "qprdc");
   1844 		}
   1845 
   1846 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   1847 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   1848 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   1849 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   1850 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   1851 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   1852 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   1853 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   1854 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   1855 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   1856 #ifdef LRO
   1857 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   1858 				CTLFLAG_RD, &lro->lro_queued, 0,
   1859 				"LRO Queued");
   1860 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   1861 				CTLFLAG_RD, &lro->lro_flushed, 0,
   1862 				"LRO Flushed");
   1863 #endif /* LRO */
   1864 	}
   1865 
   1866 	/* MAC stats get their own sub node */
   1867 
   1868 	snprintf(stats->namebuf,
   1869 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   1870 
   1871 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   1872 	    stats->namebuf, "rx csum offload - IP");
   1873 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   1874 	    stats->namebuf, "rx csum offload - L4");
   1875 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   1876 	    stats->namebuf, "rx csum offload - IP bad");
   1877 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   1878 	    stats->namebuf, "rx csum offload - L4 bad");
   1879 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   1880 	    stats->namebuf, "Interrupt conditions zero");
   1881 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   1882 	    stats->namebuf, "Legacy interrupts");
   1883 
   1884 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   1885 	    stats->namebuf, "CRC Errors");
   1886 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   1887 	    stats->namebuf, "Illegal Byte Errors");
   1888 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   1889 	    stats->namebuf, "Byte Errors");
   1890 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   1891 	    stats->namebuf, "MAC Short Packets Discarded");
   1892 	if (hw->mac.type >= ixgbe_mac_X550)
   1893 		evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
   1894 		    stats->namebuf, "Bad SFD");
   1895 	evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
   1896 	    stats->namebuf, "Total Packets Missed");
   1897 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   1898 	    stats->namebuf, "MAC Local Faults");
   1899 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   1900 	    stats->namebuf, "MAC Remote Faults");
   1901 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   1902 	    stats->namebuf, "Receive Length Errors");
   1903 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   1904 	    stats->namebuf, "Link XON Transmitted");
   1905 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   1906 	    stats->namebuf, "Link XON Received");
   1907 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   1908 	    stats->namebuf, "Link XOFF Transmitted");
   1909 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   1910 	    stats->namebuf, "Link XOFF Received");
   1911 
   1912 	/* Packet Reception Stats */
   1913 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   1914 	    stats->namebuf, "Total Octets Received");
   1915 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   1916 	    stats->namebuf, "Good Octets Received");
   1917 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   1918 	    stats->namebuf, "Total Packets Received");
   1919 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   1920 	    stats->namebuf, "Good Packets Received");
   1921 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   1922 	    stats->namebuf, "Multicast Packets Received");
   1923 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   1924 	    stats->namebuf, "Broadcast Packets Received");
   1925 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   1926 	    stats->namebuf, "64 byte frames received ");
   1927 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   1928 	    stats->namebuf, "65-127 byte frames received");
   1929 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   1930 	    stats->namebuf, "128-255 byte frames received");
   1931 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   1932 	    stats->namebuf, "256-511 byte frames received");
   1933 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   1934 	    stats->namebuf, "512-1023 byte frames received");
   1935 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   1936 	    stats->namebuf, "1023-1522 byte frames received");
   1937 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   1938 	    stats->namebuf, "Receive Undersized");
   1939 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   1940 	    stats->namebuf, "Fragmented Packets Received ");
   1941 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   1942 	    stats->namebuf, "Oversized Packets Received");
   1943 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   1944 	    stats->namebuf, "Received Jabber");
   1945 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   1946 	    stats->namebuf, "Management Packets Received");
   1947 	evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
   1948 	    stats->namebuf, "Management Packets Dropped");
   1949 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   1950 	    stats->namebuf, "Checksum Errors");
   1951 
   1952 	/* Packet Transmission Stats */
   1953 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   1954 	    stats->namebuf, "Good Octets Transmitted");
   1955 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   1956 	    stats->namebuf, "Total Packets Transmitted");
   1957 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   1958 	    stats->namebuf, "Good Packets Transmitted");
   1959 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   1960 	    stats->namebuf, "Broadcast Packets Transmitted");
   1961 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   1962 	    stats->namebuf, "Multicast Packets Transmitted");
   1963 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   1964 	    stats->namebuf, "Management Packets Transmitted");
   1965 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   1966 	    stats->namebuf, "64 byte frames transmitted ");
   1967 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   1968 	    stats->namebuf, "65-127 byte frames transmitted");
   1969 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   1970 	    stats->namebuf, "128-255 byte frames transmitted");
   1971 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   1972 	    stats->namebuf, "256-511 byte frames transmitted");
   1973 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   1974 	    stats->namebuf, "512-1023 byte frames transmitted");
   1975 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   1976 	    stats->namebuf, "1024-1522 byte frames transmitted");
   1977 } /* ixgbe_add_hw_stats */
   1978 
   1979 static void
   1980 ixgbe_clear_evcnt(struct adapter *adapter)
   1981 {
   1982 	struct tx_ring *txr = adapter->tx_rings;
   1983 	struct rx_ring *rxr = adapter->rx_rings;
   1984 	struct ixgbe_hw *hw = &adapter->hw;
   1985 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1986 
   1987 	adapter->efbig_tx_dma_setup.ev_count = 0;
   1988 	adapter->mbuf_defrag_failed.ev_count = 0;
   1989 	adapter->efbig2_tx_dma_setup.ev_count = 0;
   1990 	adapter->einval_tx_dma_setup.ev_count = 0;
   1991 	adapter->other_tx_dma_setup.ev_count = 0;
   1992 	adapter->eagain_tx_dma_setup.ev_count = 0;
   1993 	adapter->enomem_tx_dma_setup.ev_count = 0;
   1994 	adapter->tso_err.ev_count = 0;
   1995 	adapter->watchdog_events.ev_count = 0;
   1996 	adapter->link_irq.ev_count = 0;
   1997 	adapter->link_sicount.ev_count = 0;
   1998 	adapter->mod_sicount.ev_count = 0;
   1999 	adapter->msf_sicount.ev_count = 0;
   2000 	adapter->phy_sicount.ev_count = 0;
   2001 
   2002 	txr = adapter->tx_rings;
   2003 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2004 		adapter->queues[i].irqs.ev_count = 0;
   2005 		adapter->queues[i].handleq.ev_count = 0;
   2006 		adapter->queues[i].req.ev_count = 0;
   2007 		txr->no_desc_avail.ev_count = 0;
   2008 		txr->total_packets.ev_count = 0;
   2009 		txr->tso_tx.ev_count = 0;
   2010 #ifndef IXGBE_LEGACY_TX
   2011 		txr->pcq_drops.ev_count = 0;
   2012 #endif
   2013 		txr->q_efbig_tx_dma_setup = 0;
   2014 		txr->q_mbuf_defrag_failed = 0;
   2015 		txr->q_efbig2_tx_dma_setup = 0;
   2016 		txr->q_einval_tx_dma_setup = 0;
   2017 		txr->q_other_tx_dma_setup = 0;
   2018 		txr->q_eagain_tx_dma_setup = 0;
   2019 		txr->q_enomem_tx_dma_setup = 0;
   2020 		txr->q_tso_err = 0;
   2021 
   2022 		if (i < __arraycount(stats->mpc)) {
   2023 			stats->mpc[i].ev_count = 0;
   2024 			if (hw->mac.type == ixgbe_mac_82598EB)
   2025 				stats->rnbc[i].ev_count = 0;
   2026 		}
   2027 		if (i < __arraycount(stats->pxontxc)) {
   2028 			stats->pxontxc[i].ev_count = 0;
   2029 			stats->pxonrxc[i].ev_count = 0;
   2030 			stats->pxofftxc[i].ev_count = 0;
   2031 			stats->pxoffrxc[i].ev_count = 0;
   2032 			stats->pxon2offc[i].ev_count = 0;
   2033 		}
   2034 		if (i < __arraycount(stats->qprc)) {
   2035 			stats->qprc[i].ev_count = 0;
   2036 			stats->qptc[i].ev_count = 0;
   2037 			stats->qbrc[i].ev_count = 0;
   2038 			stats->qbtc[i].ev_count = 0;
   2039 			stats->qprdc[i].ev_count = 0;
   2040 		}
   2041 
   2042 		rxr->rx_packets.ev_count = 0;
   2043 		rxr->rx_bytes.ev_count = 0;
   2044 		rxr->rx_copies.ev_count = 0;
   2045 		rxr->no_jmbuf.ev_count = 0;
   2046 		rxr->rx_discarded.ev_count = 0;
   2047 	}
   2048 	stats->ipcs.ev_count = 0;
   2049 	stats->l4cs.ev_count = 0;
   2050 	stats->ipcs_bad.ev_count = 0;
   2051 	stats->l4cs_bad.ev_count = 0;
   2052 	stats->intzero.ev_count = 0;
   2053 	stats->legint.ev_count = 0;
   2054 	stats->crcerrs.ev_count = 0;
   2055 	stats->illerrc.ev_count = 0;
   2056 	stats->errbc.ev_count = 0;
   2057 	stats->mspdc.ev_count = 0;
   2058 	stats->mbsdc.ev_count = 0;
   2059 	stats->mpctotal.ev_count = 0;
   2060 	stats->mlfc.ev_count = 0;
   2061 	stats->mrfc.ev_count = 0;
   2062 	stats->rlec.ev_count = 0;
   2063 	stats->lxontxc.ev_count = 0;
   2064 	stats->lxonrxc.ev_count = 0;
   2065 	stats->lxofftxc.ev_count = 0;
   2066 	stats->lxoffrxc.ev_count = 0;
   2067 
   2068 	/* Packet Reception Stats */
   2069 	stats->tor.ev_count = 0;
   2070 	stats->gorc.ev_count = 0;
   2071 	stats->tpr.ev_count = 0;
   2072 	stats->gprc.ev_count = 0;
   2073 	stats->mprc.ev_count = 0;
   2074 	stats->bprc.ev_count = 0;
   2075 	stats->prc64.ev_count = 0;
   2076 	stats->prc127.ev_count = 0;
   2077 	stats->prc255.ev_count = 0;
   2078 	stats->prc511.ev_count = 0;
   2079 	stats->prc1023.ev_count = 0;
   2080 	stats->prc1522.ev_count = 0;
   2081 	stats->ruc.ev_count = 0;
   2082 	stats->rfc.ev_count = 0;
   2083 	stats->roc.ev_count = 0;
   2084 	stats->rjc.ev_count = 0;
   2085 	stats->mngprc.ev_count = 0;
   2086 	stats->mngpdc.ev_count = 0;
   2087 	stats->xec.ev_count = 0;
   2088 
   2089 	/* Packet Transmission Stats */
   2090 	stats->gotc.ev_count = 0;
   2091 	stats->tpt.ev_count = 0;
   2092 	stats->gptc.ev_count = 0;
   2093 	stats->bptc.ev_count = 0;
   2094 	stats->mptc.ev_count = 0;
   2095 	stats->mngptc.ev_count = 0;
   2096 	stats->ptc64.ev_count = 0;
   2097 	stats->ptc127.ev_count = 0;
   2098 	stats->ptc255.ev_count = 0;
   2099 	stats->ptc511.ev_count = 0;
   2100 	stats->ptc1023.ev_count = 0;
   2101 	stats->ptc1522.ev_count = 0;
   2102 }
   2103 
   2104 /************************************************************************
   2105  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
   2106  *
   2107  *   Retrieves the TDH value from the hardware
   2108  ************************************************************************/
   2109 static int
   2110 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   2111 {
   2112 	struct sysctlnode node = *rnode;
   2113 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2114 	uint32_t val;
   2115 
   2116 	if (!txr)
   2117 		return (0);
   2118 
   2119 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
   2120 	node.sysctl_data = &val;
   2121 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2122 } /* ixgbe_sysctl_tdh_handler */
   2123 
   2124 /************************************************************************
   2125  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   2126  *
   2127  *   Retrieves the TDT value from the hardware
   2128  ************************************************************************/
   2129 static int
   2130 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   2131 {
   2132 	struct sysctlnode node = *rnode;
   2133 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2134 	uint32_t val;
   2135 
   2136 	if (!txr)
   2137 		return (0);
   2138 
   2139 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
   2140 	node.sysctl_data = &val;
   2141 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2142 } /* ixgbe_sysctl_tdt_handler */
   2143 
   2144 /************************************************************************
   2145  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
   2146  *
   2147  *   Retrieves the RDH value from the hardware
   2148  ************************************************************************/
   2149 static int
   2150 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2151 {
   2152 	struct sysctlnode node = *rnode;
   2153 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2154 	uint32_t val;
   2155 
   2156 	if (!rxr)
   2157 		return (0);
   2158 
   2159 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
   2160 	node.sysctl_data = &val;
   2161 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2162 } /* ixgbe_sysctl_rdh_handler */
   2163 
   2164 /************************************************************************
   2165  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2166  *
   2167  *   Retrieves the RDT value from the hardware
   2168  ************************************************************************/
   2169 static int
   2170 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2171 {
   2172 	struct sysctlnode node = *rnode;
   2173 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2174 	uint32_t val;
   2175 
   2176 	if (!rxr)
   2177 		return (0);
   2178 
   2179 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
   2180 	node.sysctl_data = &val;
   2181 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2182 } /* ixgbe_sysctl_rdt_handler */
   2183 
   2184 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   2185 /************************************************************************
   2186  * ixgbe_register_vlan
   2187  *
   2188  *   Run via vlan config EVENT, it enables us to use the
   2189  *   HW Filter table since we can get the vlan id. This
   2190  *   just creates the entry in the soft version of the
   2191  *   VFTA, init will repopulate the real table.
   2192  ************************************************************************/
   2193 static void
   2194 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2195 {
   2196 	struct adapter	*adapter = ifp->if_softc;
   2197 	u16		index, bit;
   2198 
   2199 	if (ifp->if_softc != arg)   /* Not our event */
   2200 		return;
   2201 
   2202 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2203 		return;
   2204 
   2205 	IXGBE_CORE_LOCK(adapter);
   2206 	index = (vtag >> 5) & 0x7F;
   2207 	bit = vtag & 0x1F;
   2208 	adapter->shadow_vfta[index] |= (1 << bit);
   2209 	ixgbe_setup_vlan_hw_support(adapter);
   2210 	IXGBE_CORE_UNLOCK(adapter);
   2211 } /* ixgbe_register_vlan */
   2212 
   2213 /************************************************************************
   2214  * ixgbe_unregister_vlan
   2215  *
   2216  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
   2217  ************************************************************************/
   2218 static void
   2219 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2220 {
   2221 	struct adapter	*adapter = ifp->if_softc;
   2222 	u16		index, bit;
   2223 
   2224 	if (ifp->if_softc != arg)
   2225 		return;
   2226 
   2227 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2228 		return;
   2229 
   2230 	IXGBE_CORE_LOCK(adapter);
   2231 	index = (vtag >> 5) & 0x7F;
   2232 	bit = vtag & 0x1F;
   2233 	adapter->shadow_vfta[index] &= ~(1 << bit);
   2234 	/* Re-init to load the changes */
   2235 	ixgbe_setup_vlan_hw_support(adapter);
   2236 	IXGBE_CORE_UNLOCK(adapter);
   2237 } /* ixgbe_unregister_vlan */
   2238 #endif
   2239 
   2240 static void
   2241 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   2242 {
   2243 	struct ethercom *ec = &adapter->osdep.ec;
   2244 	struct ixgbe_hw *hw = &adapter->hw;
   2245 	struct rx_ring	*rxr;
   2246 	int             i;
   2247 	u32		ctrl;
   2248 
   2249 
   2250 	/*
   2251 	 * We get here thru init_locked, meaning
   2252 	 * a soft reset, this has already cleared
   2253 	 * the VFTA and other state, so if there
   2254 	 * have been no vlan's registered do nothing.
   2255 	 */
   2256 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   2257 		return;
   2258 
   2259 	/* Setup the queues for vlans */
   2260 	if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
   2261 		for (i = 0; i < adapter->num_queues; i++) {
   2262 			rxr = &adapter->rx_rings[i];
   2263 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
   2264 			if (hw->mac.type != ixgbe_mac_82598EB) {
   2265 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   2266 				ctrl |= IXGBE_RXDCTL_VME;
   2267 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
   2268 			}
   2269 			rxr->vtag_strip = TRUE;
   2270 		}
   2271 	}
   2272 
   2273 	if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
   2274 		return;
   2275 	/*
   2276 	 * A soft reset zero's out the VFTA, so
   2277 	 * we need to repopulate it now.
   2278 	 */
   2279 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2280 		if (adapter->shadow_vfta[i] != 0)
   2281 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
   2282 			    adapter->shadow_vfta[i]);
   2283 
   2284 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2285 	/* Enable the Filter Table if enabled */
   2286 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
   2287 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
   2288 		ctrl |= IXGBE_VLNCTRL_VFE;
   2289 	}
   2290 	if (hw->mac.type == ixgbe_mac_82598EB)
   2291 		ctrl |= IXGBE_VLNCTRL_VME;
   2292 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2293 } /* ixgbe_setup_vlan_hw_support */
   2294 
   2295 /************************************************************************
   2296  * ixgbe_get_slot_info
   2297  *
   2298  *   Get the width and transaction speed of
   2299  *   the slot this adapter is plugged into.
   2300  ************************************************************************/
   2301 static void
   2302 ixgbe_get_slot_info(struct adapter *adapter)
   2303 {
   2304 	device_t		dev = adapter->dev;
   2305 	struct ixgbe_hw		*hw = &adapter->hw;
   2306 	u32                   offset;
   2307 //	struct ixgbe_mac_info	*mac = &hw->mac;
   2308 	u16			link;
   2309 	int                   bus_info_valid = TRUE;
   2310 
   2311 	/* Some devices are behind an internal bridge */
   2312 	switch (hw->device_id) {
   2313 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
   2314 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
   2315 		goto get_parent_info;
   2316 	default:
   2317 		break;
   2318 	}
   2319 
   2320 	ixgbe_get_bus_info(hw);
   2321 
   2322 	/*
   2323 	 * Some devices don't use PCI-E, but there is no need
   2324 	 * to display "Unknown" for bus speed and width.
   2325 	 */
   2326 	switch (hw->mac.type) {
   2327 	case ixgbe_mac_X550EM_x:
   2328 	case ixgbe_mac_X550EM_a:
   2329 		return;
   2330 	default:
   2331 		goto display;
   2332 	}
   2333 
   2334 get_parent_info:
   2335 	/*
   2336 	 * For the Quad port adapter we need to parse back
   2337 	 * up the PCI tree to find the speed of the expansion
   2338 	 * slot into which this adapter is plugged. A bit more work.
   2339 	 */
   2340 	dev = device_parent(device_parent(dev));
   2341 #if 0
   2342 #ifdef IXGBE_DEBUG
   2343 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
   2344 	    pci_get_slot(dev), pci_get_function(dev));
   2345 #endif
   2346 	dev = device_parent(device_parent(dev));
   2347 #ifdef IXGBE_DEBUG
   2348 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
   2349 	    pci_get_slot(dev), pci_get_function(dev));
   2350 #endif
   2351 #endif
   2352 	/* Now get the PCI Express Capabilities offset */
   2353 	if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
   2354 	    PCI_CAP_PCIEXPRESS, &offset, NULL)) {
   2355 		/*
   2356 		 * Hmm...can't get PCI-Express capabilities.
   2357 		 * Falling back to default method.
   2358 		 */
   2359 		bus_info_valid = FALSE;
   2360 		ixgbe_get_bus_info(hw);
   2361 		goto display;
   2362 	}
   2363 	/* ...and read the Link Status Register */
   2364 	link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
   2365 	    offset + PCIE_LCSR) >> 16;
   2366 	ixgbe_set_pci_config_data_generic(hw, link);
   2367 
   2368 display:
   2369 	device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
   2370 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s" :
   2371 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s" :
   2372 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s" :
   2373 	     "Unknown"),
   2374 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
   2375 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
   2376 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
   2377 	     "Unknown"));
   2378 
   2379 	if (bus_info_valid) {
   2380 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2381 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
   2382 			(hw->bus.speed == ixgbe_bus_speed_2500))) {
   2383 			device_printf(dev, "PCI-Express bandwidth available"
   2384 			    " for this card\n     is not sufficient for"
   2385 			    " optimal performance.\n");
   2386 			device_printf(dev, "For optimal performance a x8 "
   2387 			    "PCIE, or x4 PCIE Gen2 slot is required.\n");
   2388 		}
   2389 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2390 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
   2391 			(hw->bus.speed < ixgbe_bus_speed_8000))) {
   2392 			device_printf(dev, "PCI-Express bandwidth available"
   2393 			    " for this card\n     is not sufficient for"
   2394 			    " optimal performance.\n");
   2395 			device_printf(dev, "For optimal performance a x8 "
   2396 			    "PCIE Gen3 slot is required.\n");
   2397 		}
   2398 	} else
   2399 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
   2400 
   2401 	return;
   2402 } /* ixgbe_get_slot_info */
   2403 
   2404 /************************************************************************
   2405  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
   2406  ************************************************************************/
   2407 static inline void
   2408 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   2409 {
   2410 	struct ixgbe_hw *hw = &adapter->hw;
   2411 	struct ix_queue *que = &adapter->queues[vector];
   2412 	u64             queue = (u64)(1ULL << vector);
   2413 	u32             mask;
   2414 
   2415 	mutex_enter(&que->dc_mtx);
   2416 	if (que->disabled_count > 0 && --que->disabled_count > 0)
   2417 		goto out;
   2418 
   2419 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2420 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2421 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   2422 	} else {
   2423 		mask = (queue & 0xFFFFFFFF);
   2424 		if (mask)
   2425 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   2426 		mask = (queue >> 32);
   2427 		if (mask)
   2428 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   2429 	}
   2430 out:
   2431 	mutex_exit(&que->dc_mtx);
   2432 } /* ixgbe_enable_queue */
   2433 
   2434 /************************************************************************
   2435  * ixgbe_disable_queue_internal
   2436  ************************************************************************/
   2437 static inline void
   2438 ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
   2439 {
   2440 	struct ixgbe_hw *hw = &adapter->hw;
   2441 	struct ix_queue *que = &adapter->queues[vector];
   2442 	u64             queue = (u64)(1ULL << vector);
   2443 	u32             mask;
   2444 
   2445 	mutex_enter(&que->dc_mtx);
   2446 
   2447 	if (que->disabled_count > 0) {
   2448 		if (nestok)
   2449 			que->disabled_count++;
   2450 		goto out;
   2451 	}
   2452 	que->disabled_count++;
   2453 
   2454 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2455 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2456 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   2457 	} else {
   2458 		mask = (queue & 0xFFFFFFFF);
   2459 		if (mask)
   2460 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   2461 		mask = (queue >> 32);
   2462 		if (mask)
   2463 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   2464 	}
   2465 out:
   2466 	mutex_exit(&que->dc_mtx);
   2467 } /* ixgbe_disable_queue_internal */
   2468 
   2469 /************************************************************************
   2470  * ixgbe_disable_queue
   2471  ************************************************************************/
   2472 static inline void
   2473 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   2474 {
   2475 
   2476 	ixgbe_disable_queue_internal(adapter, vector, true);
   2477 } /* ixgbe_disable_queue */
   2478 
   2479 /************************************************************************
   2480  * ixgbe_sched_handle_que - schedule deferred packet processing
   2481  ************************************************************************/
   2482 static inline void
   2483 ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
   2484 {
   2485 
   2486 	if (adapter->txrx_use_workqueue) {
   2487 		/*
   2488 		 * adapter->que_wq is bound to each CPU instead of
   2489 		 * each NIC queue to reduce workqueue kthread. As we
   2490 		 * should consider about interrupt affinity in this
   2491 		 * function, the workqueue kthread must be WQ_PERCPU.
   2492 		 * If create WQ_PERCPU workqueue kthread for each NIC
   2493 		 * queue, that number of created workqueue kthread is
   2494 		 * (number of used NIC queue) * (number of CPUs) =
   2495 		 * (number of CPUs) ^ 2 most often.
   2496 		 *
   2497 		 * The same NIC queue's interrupts are avoided by
   2498 		 * masking the queue's interrupt. And different
   2499 		 * NIC queue's interrupts use different struct work
   2500 		 * (que->wq_cookie). So, "enqueued flag" to avoid
   2501 		 * twice workqueue_enqueue() is not required .
   2502 		 */
   2503 		workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
   2504 	} else {
   2505 		softint_schedule(que->que_si);
   2506 	}
   2507 }
   2508 
   2509 /************************************************************************
   2510  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
   2511  ************************************************************************/
   2512 static int
   2513 ixgbe_msix_que(void *arg)
   2514 {
   2515 	struct ix_queue	*que = arg;
   2516 	struct adapter  *adapter = que->adapter;
   2517 	struct ifnet    *ifp = adapter->ifp;
   2518 	struct tx_ring	*txr = que->txr;
   2519 	struct rx_ring	*rxr = que->rxr;
   2520 	bool		more;
   2521 	u32		newitr = 0;
   2522 
   2523 	/* Protect against spurious interrupts */
   2524 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   2525 		return 0;
   2526 
   2527 	ixgbe_disable_queue(adapter, que->msix);
   2528 	++que->irqs.ev_count;
   2529 
   2530 #ifdef __NetBSD__
   2531 	/* Don't run ixgbe_rxeof in interrupt context */
   2532 	more = true;
   2533 #else
   2534 	more = ixgbe_rxeof(que);
   2535 #endif
   2536 
   2537 	IXGBE_TX_LOCK(txr);
   2538 	ixgbe_txeof(txr);
   2539 	IXGBE_TX_UNLOCK(txr);
   2540 
   2541 	/* Do AIM now? */
   2542 
   2543 	if (adapter->enable_aim == false)
   2544 		goto no_calc;
   2545 	/*
   2546 	 * Do Adaptive Interrupt Moderation:
   2547 	 *  - Write out last calculated setting
   2548 	 *  - Calculate based on average size over
   2549 	 *    the last interval.
   2550 	 */
   2551 	if (que->eitr_setting)
   2552 		ixgbe_eitr_write(que, que->eitr_setting);
   2553 
   2554 	que->eitr_setting = 0;
   2555 
   2556 	/* Idle, do nothing */
   2557         if ((txr->bytes == 0) && (rxr->bytes == 0))
   2558                 goto no_calc;
   2559 
   2560 	if ((txr->bytes) && (txr->packets))
   2561 		newitr = txr->bytes/txr->packets;
   2562 	if ((rxr->bytes) && (rxr->packets))
   2563 		newitr = max(newitr, (rxr->bytes / rxr->packets));
   2564 	newitr += 24; /* account for hardware frame, crc */
   2565 
   2566 	/* set an upper boundary */
   2567 	newitr = min(newitr, 3000);
   2568 
   2569 	/* Be nice to the mid range */
   2570 	if ((newitr > 300) && (newitr < 1200))
   2571 		newitr = (newitr / 3);
   2572 	else
   2573 		newitr = (newitr / 2);
   2574 
   2575 	/*
   2576 	 * When RSC is used, ITR interval must be larger than RSC_DELAY.
   2577 	 * Currently, we use 2us for RSC_DELAY. The minimum value is always
   2578 	 * greater than 2us on 100M (and 10M?(not documented)), but it's not
   2579 	 * on 1G and higher.
   2580 	 */
   2581 	if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   2582 	    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   2583 		if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
   2584 			newitr = IXGBE_MIN_RSC_EITR_10G1G;
   2585 	}
   2586 
   2587         /* save for next interrupt */
   2588         que->eitr_setting = newitr;
   2589 
   2590 	/* Reset state */
   2591 	txr->bytes = 0;
   2592 	txr->packets = 0;
   2593 	rxr->bytes = 0;
   2594 	rxr->packets = 0;
   2595 
   2596 no_calc:
   2597 	if (more)
   2598 		ixgbe_sched_handle_que(adapter, que);
   2599 	else
   2600 		ixgbe_enable_queue(adapter, que->msix);
   2601 
   2602 	return 1;
   2603 } /* ixgbe_msix_que */
   2604 
   2605 /************************************************************************
   2606  * ixgbe_media_status - Media Ioctl callback
   2607  *
   2608  *   Called whenever the user queries the status of
   2609  *   the interface using ifconfig.
   2610  ************************************************************************/
   2611 static void
   2612 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   2613 {
   2614 	struct adapter *adapter = ifp->if_softc;
   2615 	struct ixgbe_hw *hw = &adapter->hw;
   2616 	int layer;
   2617 
   2618 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   2619 	IXGBE_CORE_LOCK(adapter);
   2620 	ixgbe_update_link_status(adapter);
   2621 
   2622 	ifmr->ifm_status = IFM_AVALID;
   2623 	ifmr->ifm_active = IFM_ETHER;
   2624 
   2625 	if (!adapter->link_active) {
   2626 		ifmr->ifm_active |= IFM_NONE;
   2627 		IXGBE_CORE_UNLOCK(adapter);
   2628 		return;
   2629 	}
   2630 
   2631 	ifmr->ifm_status |= IFM_ACTIVE;
   2632 	layer = adapter->phy_layer;
   2633 
   2634 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
   2635 	    layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
   2636 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
   2637 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
   2638 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
   2639 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
   2640 		switch (adapter->link_speed) {
   2641 		case IXGBE_LINK_SPEED_10GB_FULL:
   2642 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   2643 			break;
   2644 		case IXGBE_LINK_SPEED_5GB_FULL:
   2645 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   2646 			break;
   2647 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2648 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   2649 			break;
   2650 		case IXGBE_LINK_SPEED_1GB_FULL:
   2651 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   2652 			break;
   2653 		case IXGBE_LINK_SPEED_100_FULL:
   2654 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   2655 			break;
   2656 		case IXGBE_LINK_SPEED_10_FULL:
   2657 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   2658 			break;
   2659 		}
   2660 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   2661 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
   2662 		switch (adapter->link_speed) {
   2663 		case IXGBE_LINK_SPEED_10GB_FULL:
   2664 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
   2665 			break;
   2666 		}
   2667 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
   2668 		switch (adapter->link_speed) {
   2669 		case IXGBE_LINK_SPEED_10GB_FULL:
   2670 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
   2671 			break;
   2672 		case IXGBE_LINK_SPEED_1GB_FULL:
   2673 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2674 			break;
   2675 		}
   2676 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
   2677 		switch (adapter->link_speed) {
   2678 		case IXGBE_LINK_SPEED_10GB_FULL:
   2679 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
   2680 			break;
   2681 		case IXGBE_LINK_SPEED_1GB_FULL:
   2682 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2683 			break;
   2684 		}
   2685 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
   2686 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
   2687 		switch (adapter->link_speed) {
   2688 		case IXGBE_LINK_SPEED_10GB_FULL:
   2689 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2690 			break;
   2691 		case IXGBE_LINK_SPEED_1GB_FULL:
   2692 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
   2693 			break;
   2694 		}
   2695 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
   2696 		switch (adapter->link_speed) {
   2697 		case IXGBE_LINK_SPEED_10GB_FULL:
   2698 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2699 			break;
   2700 		}
   2701 	/*
   2702 	 * XXX: These need to use the proper media types once
   2703 	 * they're added.
   2704 	 */
   2705 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
   2706 		switch (adapter->link_speed) {
   2707 		case IXGBE_LINK_SPEED_10GB_FULL:
   2708 #ifndef IFM_ETH_XTYPE
   2709 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2710 #else
   2711 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
   2712 #endif
   2713 			break;
   2714 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2715 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2716 			break;
   2717 		case IXGBE_LINK_SPEED_1GB_FULL:
   2718 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2719 			break;
   2720 		}
   2721 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
   2722 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
   2723 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
   2724 		switch (adapter->link_speed) {
   2725 		case IXGBE_LINK_SPEED_10GB_FULL:
   2726 #ifndef IFM_ETH_XTYPE
   2727 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2728 #else
   2729 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
   2730 #endif
   2731 			break;
   2732 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2733 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2734 			break;
   2735 		case IXGBE_LINK_SPEED_1GB_FULL:
   2736 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2737 			break;
   2738 		}
   2739 
   2740 	/* If nothing is recognized... */
   2741 #if 0
   2742 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
   2743 		ifmr->ifm_active |= IFM_UNKNOWN;
   2744 #endif
   2745 
   2746 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   2747 
   2748 	/* Display current flow control setting used on link */
   2749 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
   2750 	    hw->fc.current_mode == ixgbe_fc_full)
   2751 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
   2752 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
   2753 	    hw->fc.current_mode == ixgbe_fc_full)
   2754 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
   2755 
   2756 	IXGBE_CORE_UNLOCK(adapter);
   2757 
   2758 	return;
   2759 } /* ixgbe_media_status */
   2760 
   2761 /************************************************************************
   2762  * ixgbe_media_change - Media Ioctl callback
   2763  *
   2764  *   Called when the user changes speed/duplex using
   2765  *   media/mediopt option with ifconfig.
   2766  ************************************************************************/
   2767 static int
   2768 ixgbe_media_change(struct ifnet *ifp)
   2769 {
   2770 	struct adapter   *adapter = ifp->if_softc;
   2771 	struct ifmedia   *ifm = &adapter->media;
   2772 	struct ixgbe_hw  *hw = &adapter->hw;
   2773 	ixgbe_link_speed speed = 0;
   2774 	ixgbe_link_speed link_caps = 0;
   2775 	bool negotiate = false;
   2776 	s32 err = IXGBE_NOT_IMPLEMENTED;
   2777 
   2778 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   2779 
   2780 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   2781 		return (EINVAL);
   2782 
   2783 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   2784 		return (ENODEV);
   2785 
   2786 	/*
   2787 	 * We don't actually need to check against the supported
   2788 	 * media types of the adapter; ifmedia will take care of
   2789 	 * that for us.
   2790 	 */
   2791 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   2792 	case IFM_AUTO:
   2793 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   2794 		    &negotiate);
   2795 		if (err != IXGBE_SUCCESS) {
   2796 			device_printf(adapter->dev, "Unable to determine "
   2797 			    "supported advertise speeds\n");
   2798 			return (ENODEV);
   2799 		}
   2800 		speed |= link_caps;
   2801 		break;
   2802 	case IFM_10G_T:
   2803 	case IFM_10G_LRM:
   2804 	case IFM_10G_LR:
   2805 	case IFM_10G_TWINAX:
   2806 #ifndef IFM_ETH_XTYPE
   2807 	case IFM_10G_SR: /* KR, too */
   2808 	case IFM_10G_CX4: /* KX4 */
   2809 #else
   2810 	case IFM_10G_KR:
   2811 	case IFM_10G_KX4:
   2812 #endif
   2813 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   2814 		break;
   2815 	case IFM_5000_T:
   2816 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   2817 		break;
   2818 	case IFM_2500_T:
   2819 	case IFM_2500_KX:
   2820 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   2821 		break;
   2822 	case IFM_1000_T:
   2823 	case IFM_1000_LX:
   2824 	case IFM_1000_SX:
   2825 	case IFM_1000_KX:
   2826 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   2827 		break;
   2828 	case IFM_100_TX:
   2829 		speed |= IXGBE_LINK_SPEED_100_FULL;
   2830 		break;
   2831 	case IFM_10_T:
   2832 		speed |= IXGBE_LINK_SPEED_10_FULL;
   2833 		break;
   2834 	case IFM_NONE:
   2835 		break;
   2836 	default:
   2837 		goto invalid;
   2838 	}
   2839 
   2840 	hw->mac.autotry_restart = TRUE;
   2841 	hw->mac.ops.setup_link(hw, speed, TRUE);
   2842 	adapter->advertise = 0;
   2843 	if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
   2844 		if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
   2845 			adapter->advertise |= 1 << 2;
   2846 		if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
   2847 			adapter->advertise |= 1 << 1;
   2848 		if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
   2849 			adapter->advertise |= 1 << 0;
   2850 		if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
   2851 			adapter->advertise |= 1 << 3;
   2852 		if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
   2853 			adapter->advertise |= 1 << 4;
   2854 		if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
   2855 			adapter->advertise |= 1 << 5;
   2856 	}
   2857 
   2858 	return (0);
   2859 
   2860 invalid:
   2861 	device_printf(adapter->dev, "Invalid media type!\n");
   2862 
   2863 	return (EINVAL);
   2864 } /* ixgbe_media_change */
   2865 
   2866 /************************************************************************
   2867  * ixgbe_set_promisc
   2868  ************************************************************************/
   2869 static void
   2870 ixgbe_set_promisc(struct adapter *adapter)
   2871 {
   2872 	struct ifnet *ifp = adapter->ifp;
   2873 	int          mcnt = 0;
   2874 	u32          rctl;
   2875 	struct ether_multi *enm;
   2876 	struct ether_multistep step;
   2877 	struct ethercom *ec = &adapter->osdep.ec;
   2878 
   2879 	KASSERT(mutex_owned(&adapter->core_mtx));
   2880 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   2881 	rctl &= (~IXGBE_FCTRL_UPE);
   2882 	if (ifp->if_flags & IFF_ALLMULTI)
   2883 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
   2884 	else {
   2885 		ETHER_LOCK(ec);
   2886 		ETHER_FIRST_MULTI(step, ec, enm);
   2887 		while (enm != NULL) {
   2888 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
   2889 				break;
   2890 			mcnt++;
   2891 			ETHER_NEXT_MULTI(step, enm);
   2892 		}
   2893 		ETHER_UNLOCK(ec);
   2894 	}
   2895 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
   2896 		rctl &= (~IXGBE_FCTRL_MPE);
   2897 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2898 
   2899 	if (ifp->if_flags & IFF_PROMISC) {
   2900 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   2901 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2902 	} else if (ifp->if_flags & IFF_ALLMULTI) {
   2903 		rctl |= IXGBE_FCTRL_MPE;
   2904 		rctl &= ~IXGBE_FCTRL_UPE;
   2905 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2906 	}
   2907 } /* ixgbe_set_promisc */
   2908 
   2909 /************************************************************************
   2910  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
   2911  ************************************************************************/
   2912 static int
   2913 ixgbe_msix_link(void *arg)
   2914 {
   2915 	struct adapter	*adapter = arg;
   2916 	struct ixgbe_hw *hw = &adapter->hw;
   2917 	u32		eicr, eicr_mask;
   2918 	s32             retval;
   2919 
   2920 	++adapter->link_irq.ev_count;
   2921 
   2922 	/* Pause other interrupts */
   2923 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
   2924 
   2925 	/* First get the cause */
   2926 	/*
   2927 	 * The specifications of 82598, 82599, X540 and X550 say EICS register
   2928 	 * is write only. However, Linux says it is a workaround for silicon
   2929 	 * errata to read EICS instead of EICR to get interrupt cause. It seems
   2930 	 * there is a problem about read clear mechanism for EICR register.
   2931 	 */
   2932 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   2933 	/* Be sure the queue bits are not cleared */
   2934 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
   2935 	/* Clear interrupt with write */
   2936 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
   2937 
   2938 	/* Link status change */
   2939 	if (eicr & IXGBE_EICR_LSC) {
   2940 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
   2941 		softint_schedule(adapter->link_si);
   2942 	}
   2943 
   2944 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   2945 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
   2946 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
   2947 			/* This is probably overkill :) */
   2948 			if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
   2949 				return 1;
   2950 			/* Disable the interrupt */
   2951 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
   2952 			softint_schedule(adapter->fdir_si);
   2953 		}
   2954 
   2955 		if (eicr & IXGBE_EICR_ECC) {
   2956 			device_printf(adapter->dev,
   2957 			    "CRITICAL: ECC ERROR!! Please Reboot!!\n");
   2958 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   2959 		}
   2960 
   2961 		/* Check for over temp condition */
   2962 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
   2963 			switch (adapter->hw.mac.type) {
   2964 			case ixgbe_mac_X550EM_a:
   2965 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
   2966 					break;
   2967 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
   2968 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2969 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2970 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2971 				retval = hw->phy.ops.check_overtemp(hw);
   2972 				if (retval != IXGBE_ERR_OVERTEMP)
   2973 					break;
   2974 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2975 				device_printf(adapter->dev, "System shutdown required!\n");
   2976 				break;
   2977 			default:
   2978 				if (!(eicr & IXGBE_EICR_TS))
   2979 					break;
   2980 				retval = hw->phy.ops.check_overtemp(hw);
   2981 				if (retval != IXGBE_ERR_OVERTEMP)
   2982 					break;
   2983 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2984 				device_printf(adapter->dev, "System shutdown required!\n");
   2985 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
   2986 				break;
   2987 			}
   2988 		}
   2989 
   2990 		/* Check for VF message */
   2991 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
   2992 		    (eicr & IXGBE_EICR_MAILBOX))
   2993 			softint_schedule(adapter->mbx_si);
   2994 	}
   2995 
   2996 	if (ixgbe_is_sfp(hw)) {
   2997 		/* Pluggable optics-related interrupt */
   2998 		if (hw->mac.type >= ixgbe_mac_X540)
   2999 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   3000 		else
   3001 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   3002 
   3003 		if (eicr & eicr_mask) {
   3004 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   3005 			softint_schedule(adapter->mod_si);
   3006 		}
   3007 
   3008 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   3009 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   3010 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   3011 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   3012 			softint_schedule(adapter->msf_si);
   3013 		}
   3014 	}
   3015 
   3016 	/* Check for fan failure */
   3017 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   3018 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
   3019 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   3020 	}
   3021 
   3022 	/* External PHY interrupt */
   3023 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   3024 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
   3025 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
   3026 		softint_schedule(adapter->phy_si);
   3027  	}
   3028 
   3029 	/* Re-enable other interrupts */
   3030 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   3031 	return 1;
   3032 } /* ixgbe_msix_link */
   3033 
   3034 static void
   3035 ixgbe_eitr_write(struct ix_queue *que, uint32_t itr)
   3036 {
   3037 	struct adapter *adapter = que->adapter;
   3038 
   3039         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   3040                 itr |= itr << 16;
   3041         else
   3042                 itr |= IXGBE_EITR_CNT_WDIS;
   3043 
   3044 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
   3045 	    itr);
   3046 }
   3047 
   3048 
   3049 /************************************************************************
   3050  * ixgbe_sysctl_interrupt_rate_handler
   3051  ************************************************************************/
   3052 static int
   3053 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   3054 {
   3055 	struct sysctlnode node = *rnode;
   3056 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   3057 	struct adapter  *adapter = que->adapter;
   3058 	uint32_t reg, usec, rate;
   3059 	int error;
   3060 
   3061 	if (que == NULL)
   3062 		return 0;
   3063 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
   3064 	usec = ((reg & 0x0FF8) >> 3);
   3065 	if (usec > 0)
   3066 		rate = 500000 / usec;
   3067 	else
   3068 		rate = 0;
   3069 	node.sysctl_data = &rate;
   3070 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   3071 	if (error || newp == NULL)
   3072 		return error;
   3073 	reg &= ~0xfff; /* default, no limitation */
   3074 	if (rate > 0 && rate < 500000) {
   3075 		if (rate < 1000)
   3076 			rate = 1000;
   3077 		reg |= ((4000000/rate) & 0xff8);
   3078 		/*
   3079 		 * When RSC is used, ITR interval must be larger than
   3080 		 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
   3081 		 * The minimum value is always greater than 2us on 100M
   3082 		 * (and 10M?(not documented)), but it's not on 1G and higher.
   3083 		 */
   3084 		if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   3085 		    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   3086 			if ((adapter->num_queues > 1)
   3087 			    && (reg < IXGBE_MIN_RSC_EITR_10G1G))
   3088 				return EINVAL;
   3089 		}
   3090 		ixgbe_max_interrupt_rate = rate;
   3091 	} else
   3092 		ixgbe_max_interrupt_rate = 0;
   3093 	ixgbe_eitr_write(que, reg);
   3094 
   3095 	return (0);
   3096 } /* ixgbe_sysctl_interrupt_rate_handler */
   3097 
   3098 const struct sysctlnode *
   3099 ixgbe_sysctl_instance(struct adapter *adapter)
   3100 {
   3101 	const char *dvname;
   3102 	struct sysctllog **log;
   3103 	int rc;
   3104 	const struct sysctlnode *rnode;
   3105 
   3106 	if (adapter->sysctltop != NULL)
   3107 		return adapter->sysctltop;
   3108 
   3109 	log = &adapter->sysctllog;
   3110 	dvname = device_xname(adapter->dev);
   3111 
   3112 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   3113 	    0, CTLTYPE_NODE, dvname,
   3114 	    SYSCTL_DESCR("ixgbe information and settings"),
   3115 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   3116 		goto err;
   3117 
   3118 	return rnode;
   3119 err:
   3120 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   3121 	return NULL;
   3122 }
   3123 
   3124 /************************************************************************
   3125  * ixgbe_add_device_sysctls
   3126  ************************************************************************/
   3127 static void
   3128 ixgbe_add_device_sysctls(struct adapter *adapter)
   3129 {
   3130 	device_t               dev = adapter->dev;
   3131 	struct ixgbe_hw        *hw = &adapter->hw;
   3132 	struct sysctllog **log;
   3133 	const struct sysctlnode *rnode, *cnode;
   3134 
   3135 	log = &adapter->sysctllog;
   3136 
   3137 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   3138 		aprint_error_dev(dev, "could not create sysctl root\n");
   3139 		return;
   3140 	}
   3141 
   3142 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3143 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3144 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
   3145 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   3146 		aprint_error_dev(dev, "could not create sysctl\n");
   3147 
   3148 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3149 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3150 	    "num_queues", SYSCTL_DESCR("Number of queues"),
   3151 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
   3152 		aprint_error_dev(dev, "could not create sysctl\n");
   3153 
   3154 	/* Sysctls for all devices */
   3155 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3156 	    CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
   3157 	    ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
   3158 	    CTL_EOL) != 0)
   3159 		aprint_error_dev(dev, "could not create sysctl\n");
   3160 
   3161 	adapter->enable_aim = ixgbe_enable_aim;
   3162 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3163 	    CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   3164 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   3165 		aprint_error_dev(dev, "could not create sysctl\n");
   3166 
   3167 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3168 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   3169 	    "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
   3170 	    ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
   3171 	    CTL_EOL) != 0)
   3172 		aprint_error_dev(dev, "could not create sysctl\n");
   3173 
   3174 	adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
   3175 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3176 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   3177 	    NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
   3178 		aprint_error_dev(dev, "could not create sysctl\n");
   3179 
   3180 #ifdef IXGBE_DEBUG
   3181 	/* testing sysctls (for all devices) */
   3182 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3183 	    CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
   3184 	    ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
   3185 	    CTL_EOL) != 0)
   3186 		aprint_error_dev(dev, "could not create sysctl\n");
   3187 
   3188 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
   3189 	    CTLTYPE_STRING, "print_rss_config",
   3190 	    SYSCTL_DESCR("Prints RSS Configuration"),
   3191 	    ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
   3192 	    CTL_EOL) != 0)
   3193 		aprint_error_dev(dev, "could not create sysctl\n");
   3194 #endif
   3195 	/* for X550 series devices */
   3196 	if (hw->mac.type >= ixgbe_mac_X550)
   3197 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3198 		    CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
   3199 		    ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
   3200 		    CTL_EOL) != 0)
   3201 			aprint_error_dev(dev, "could not create sysctl\n");
   3202 
   3203 	/* for WoL-capable devices */
   3204 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3205 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3206 		    CTLTYPE_BOOL, "wol_enable",
   3207 		    SYSCTL_DESCR("Enable/Disable Wake on LAN"),
   3208 		    ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
   3209 		    CTL_EOL) != 0)
   3210 			aprint_error_dev(dev, "could not create sysctl\n");
   3211 
   3212 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3213 		    CTLTYPE_INT, "wufc",
   3214 		    SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
   3215 		    ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
   3216 		    CTL_EOL) != 0)
   3217 			aprint_error_dev(dev, "could not create sysctl\n");
   3218 	}
   3219 
   3220 	/* for X552/X557-AT devices */
   3221 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3222 		const struct sysctlnode *phy_node;
   3223 
   3224 		if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
   3225 		    "phy", SYSCTL_DESCR("External PHY sysctls"),
   3226 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
   3227 			aprint_error_dev(dev, "could not create sysctl\n");
   3228 			return;
   3229 		}
   3230 
   3231 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3232 		    CTLTYPE_INT, "temp",
   3233 		    SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
   3234 		    ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
   3235 		    CTL_EOL) != 0)
   3236 			aprint_error_dev(dev, "could not create sysctl\n");
   3237 
   3238 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3239 		    CTLTYPE_INT, "overtemp_occurred",
   3240 		    SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
   3241 		    ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
   3242 		    CTL_CREATE, CTL_EOL) != 0)
   3243 			aprint_error_dev(dev, "could not create sysctl\n");
   3244 	}
   3245 
   3246 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
   3247 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3248 		    CTLTYPE_INT, "eee_state",
   3249 		    SYSCTL_DESCR("EEE Power Save State"),
   3250 		    ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
   3251 		    CTL_EOL) != 0)
   3252 			aprint_error_dev(dev, "could not create sysctl\n");
   3253 	}
   3254 } /* ixgbe_add_device_sysctls */
   3255 
   3256 /************************************************************************
   3257  * ixgbe_allocate_pci_resources
   3258  ************************************************************************/
   3259 static int
   3260 ixgbe_allocate_pci_resources(struct adapter *adapter,
   3261     const struct pci_attach_args *pa)
   3262 {
   3263 	pcireg_t	memtype;
   3264 	device_t dev = adapter->dev;
   3265 	bus_addr_t addr;
   3266 	int flags;
   3267 
   3268 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   3269 	switch (memtype) {
   3270 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   3271 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   3272 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   3273 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   3274 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   3275 			goto map_err;
   3276 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   3277 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   3278 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3279 		}
   3280 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   3281 		     adapter->osdep.mem_size, flags,
   3282 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   3283 map_err:
   3284 			adapter->osdep.mem_size = 0;
   3285 			aprint_error_dev(dev, "unable to map BAR0\n");
   3286 			return ENXIO;
   3287 		}
   3288 		break;
   3289 	default:
   3290 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   3291 		return ENXIO;
   3292 	}
   3293 
   3294 	return (0);
   3295 } /* ixgbe_allocate_pci_resources */
   3296 
   3297 static void
   3298 ixgbe_free_softint(struct adapter *adapter)
   3299 {
   3300 	struct ix_queue *que = adapter->queues;
   3301 	struct tx_ring *txr = adapter->tx_rings;
   3302 	int i;
   3303 
   3304 	for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
   3305 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   3306 			if (txr->txr_si != NULL)
   3307 				softint_disestablish(txr->txr_si);
   3308 		}
   3309 		if (que->que_si != NULL)
   3310 			softint_disestablish(que->que_si);
   3311 	}
   3312 	if (adapter->txr_wq != NULL)
   3313 		workqueue_destroy(adapter->txr_wq);
   3314 	if (adapter->txr_wq_enqueued != NULL)
   3315 		percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
   3316 	if (adapter->que_wq != NULL)
   3317 		workqueue_destroy(adapter->que_wq);
   3318 
   3319 	/* Drain the Link queue */
   3320 	if (adapter->link_si != NULL) {
   3321 		softint_disestablish(adapter->link_si);
   3322 		adapter->link_si = NULL;
   3323 	}
   3324 	if (adapter->mod_si != NULL) {
   3325 		softint_disestablish(adapter->mod_si);
   3326 		adapter->mod_si = NULL;
   3327 	}
   3328 	if (adapter->msf_si != NULL) {
   3329 		softint_disestablish(adapter->msf_si);
   3330 		adapter->msf_si = NULL;
   3331 	}
   3332 	if (adapter->phy_si != NULL) {
   3333 		softint_disestablish(adapter->phy_si);
   3334 		adapter->phy_si = NULL;
   3335 	}
   3336 	if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
   3337 		if (adapter->fdir_si != NULL) {
   3338 			softint_disestablish(adapter->fdir_si);
   3339 			adapter->fdir_si = NULL;
   3340 		}
   3341 	}
   3342 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   3343 		if (adapter->mbx_si != NULL) {
   3344 			softint_disestablish(adapter->mbx_si);
   3345 			adapter->mbx_si = NULL;
   3346 		}
   3347 	}
   3348 } /* ixgbe_free_softint */
   3349 
   3350 /************************************************************************
   3351  * ixgbe_detach - Device removal routine
   3352  *
   3353  *   Called when the driver is being removed.
   3354  *   Stops the adapter and deallocates all the resources
   3355  *   that were allocated for driver operation.
   3356  *
   3357  *   return 0 on success, positive on failure
   3358  ************************************************************************/
   3359 static int
   3360 ixgbe_detach(device_t dev, int flags)
   3361 {
   3362 	struct adapter *adapter = device_private(dev);
   3363 	struct rx_ring *rxr = adapter->rx_rings;
   3364 	struct tx_ring *txr = adapter->tx_rings;
   3365 	struct ixgbe_hw *hw = &adapter->hw;
   3366 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   3367 	u32	ctrl_ext;
   3368 
   3369 	INIT_DEBUGOUT("ixgbe_detach: begin");
   3370 	if (adapter->osdep.attached == false)
   3371 		return 0;
   3372 
   3373 	if (ixgbe_pci_iov_detach(dev) != 0) {
   3374 		device_printf(dev, "SR-IOV in use; detach first.\n");
   3375 		return (EBUSY);
   3376 	}
   3377 
   3378 	/* Stop the interface. Callouts are stopped in it. */
   3379 	ixgbe_ifstop(adapter->ifp, 1);
   3380 #if NVLAN > 0
   3381 	/* Make sure VLANs are not using driver */
   3382 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   3383 		;	/* nothing to do: no VLANs */
   3384 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
   3385 		vlan_ifdetach(adapter->ifp);
   3386 	else {
   3387 		aprint_error_dev(dev, "VLANs in use, detach first\n");
   3388 		return (EBUSY);
   3389 	}
   3390 #endif
   3391 
   3392 	pmf_device_deregister(dev);
   3393 
   3394 	ether_ifdetach(adapter->ifp);
   3395 	/* Stop the adapter */
   3396 	IXGBE_CORE_LOCK(adapter);
   3397 	ixgbe_setup_low_power_mode(adapter);
   3398 	IXGBE_CORE_UNLOCK(adapter);
   3399 
   3400 	ixgbe_free_softint(adapter);
   3401 
   3402 	/* let hardware know driver is unloading */
   3403 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   3404 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   3405 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   3406 
   3407 	callout_halt(&adapter->timer, NULL);
   3408 
   3409 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   3410 		netmap_detach(adapter->ifp);
   3411 
   3412 	ixgbe_free_pci_resources(adapter);
   3413 #if 0	/* XXX the NetBSD port is probably missing something here */
   3414 	bus_generic_detach(dev);
   3415 #endif
   3416 	if_detach(adapter->ifp);
   3417 	if_percpuq_destroy(adapter->ipq);
   3418 
   3419 	sysctl_teardown(&adapter->sysctllog);
   3420 	evcnt_detach(&adapter->efbig_tx_dma_setup);
   3421 	evcnt_detach(&adapter->mbuf_defrag_failed);
   3422 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
   3423 	evcnt_detach(&adapter->einval_tx_dma_setup);
   3424 	evcnt_detach(&adapter->other_tx_dma_setup);
   3425 	evcnt_detach(&adapter->eagain_tx_dma_setup);
   3426 	evcnt_detach(&adapter->enomem_tx_dma_setup);
   3427 	evcnt_detach(&adapter->watchdog_events);
   3428 	evcnt_detach(&adapter->tso_err);
   3429 	evcnt_detach(&adapter->link_irq);
   3430 	evcnt_detach(&adapter->link_sicount);
   3431 	evcnt_detach(&adapter->mod_sicount);
   3432 	evcnt_detach(&adapter->msf_sicount);
   3433 	evcnt_detach(&adapter->phy_sicount);
   3434 
   3435 	txr = adapter->tx_rings;
   3436 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   3437 		evcnt_detach(&adapter->queues[i].irqs);
   3438 		evcnt_detach(&adapter->queues[i].handleq);
   3439 		evcnt_detach(&adapter->queues[i].req);
   3440 		evcnt_detach(&txr->no_desc_avail);
   3441 		evcnt_detach(&txr->total_packets);
   3442 		evcnt_detach(&txr->tso_tx);
   3443 #ifndef IXGBE_LEGACY_TX
   3444 		evcnt_detach(&txr->pcq_drops);
   3445 #endif
   3446 
   3447 		if (i < __arraycount(stats->mpc)) {
   3448 			evcnt_detach(&stats->mpc[i]);
   3449 			if (hw->mac.type == ixgbe_mac_82598EB)
   3450 				evcnt_detach(&stats->rnbc[i]);
   3451 		}
   3452 		if (i < __arraycount(stats->pxontxc)) {
   3453 			evcnt_detach(&stats->pxontxc[i]);
   3454 			evcnt_detach(&stats->pxonrxc[i]);
   3455 			evcnt_detach(&stats->pxofftxc[i]);
   3456 			evcnt_detach(&stats->pxoffrxc[i]);
   3457 			evcnt_detach(&stats->pxon2offc[i]);
   3458 		}
   3459 		if (i < __arraycount(stats->qprc)) {
   3460 			evcnt_detach(&stats->qprc[i]);
   3461 			evcnt_detach(&stats->qptc[i]);
   3462 			evcnt_detach(&stats->qbrc[i]);
   3463 			evcnt_detach(&stats->qbtc[i]);
   3464 			evcnt_detach(&stats->qprdc[i]);
   3465 		}
   3466 
   3467 		evcnt_detach(&rxr->rx_packets);
   3468 		evcnt_detach(&rxr->rx_bytes);
   3469 		evcnt_detach(&rxr->rx_copies);
   3470 		evcnt_detach(&rxr->no_jmbuf);
   3471 		evcnt_detach(&rxr->rx_discarded);
   3472 	}
   3473 	evcnt_detach(&stats->ipcs);
   3474 	evcnt_detach(&stats->l4cs);
   3475 	evcnt_detach(&stats->ipcs_bad);
   3476 	evcnt_detach(&stats->l4cs_bad);
   3477 	evcnt_detach(&stats->intzero);
   3478 	evcnt_detach(&stats->legint);
   3479 	evcnt_detach(&stats->crcerrs);
   3480 	evcnt_detach(&stats->illerrc);
   3481 	evcnt_detach(&stats->errbc);
   3482 	evcnt_detach(&stats->mspdc);
   3483 	if (hw->mac.type >= ixgbe_mac_X550)
   3484 		evcnt_detach(&stats->mbsdc);
   3485 	evcnt_detach(&stats->mpctotal);
   3486 	evcnt_detach(&stats->mlfc);
   3487 	evcnt_detach(&stats->mrfc);
   3488 	evcnt_detach(&stats->rlec);
   3489 	evcnt_detach(&stats->lxontxc);
   3490 	evcnt_detach(&stats->lxonrxc);
   3491 	evcnt_detach(&stats->lxofftxc);
   3492 	evcnt_detach(&stats->lxoffrxc);
   3493 
   3494 	/* Packet Reception Stats */
   3495 	evcnt_detach(&stats->tor);
   3496 	evcnt_detach(&stats->gorc);
   3497 	evcnt_detach(&stats->tpr);
   3498 	evcnt_detach(&stats->gprc);
   3499 	evcnt_detach(&stats->mprc);
   3500 	evcnt_detach(&stats->bprc);
   3501 	evcnt_detach(&stats->prc64);
   3502 	evcnt_detach(&stats->prc127);
   3503 	evcnt_detach(&stats->prc255);
   3504 	evcnt_detach(&stats->prc511);
   3505 	evcnt_detach(&stats->prc1023);
   3506 	evcnt_detach(&stats->prc1522);
   3507 	evcnt_detach(&stats->ruc);
   3508 	evcnt_detach(&stats->rfc);
   3509 	evcnt_detach(&stats->roc);
   3510 	evcnt_detach(&stats->rjc);
   3511 	evcnt_detach(&stats->mngprc);
   3512 	evcnt_detach(&stats->mngpdc);
   3513 	evcnt_detach(&stats->xec);
   3514 
   3515 	/* Packet Transmission Stats */
   3516 	evcnt_detach(&stats->gotc);
   3517 	evcnt_detach(&stats->tpt);
   3518 	evcnt_detach(&stats->gptc);
   3519 	evcnt_detach(&stats->bptc);
   3520 	evcnt_detach(&stats->mptc);
   3521 	evcnt_detach(&stats->mngptc);
   3522 	evcnt_detach(&stats->ptc64);
   3523 	evcnt_detach(&stats->ptc127);
   3524 	evcnt_detach(&stats->ptc255);
   3525 	evcnt_detach(&stats->ptc511);
   3526 	evcnt_detach(&stats->ptc1023);
   3527 	evcnt_detach(&stats->ptc1522);
   3528 
   3529 	ixgbe_free_transmit_structures(adapter);
   3530 	ixgbe_free_receive_structures(adapter);
   3531 	for (int i = 0; i < adapter->num_queues; i++) {
   3532 		struct ix_queue * que = &adapter->queues[i];
   3533 		mutex_destroy(&que->dc_mtx);
   3534 	}
   3535 	free(adapter->queues, M_DEVBUF);
   3536 	free(adapter->mta, M_DEVBUF);
   3537 
   3538 	IXGBE_CORE_LOCK_DESTROY(adapter);
   3539 
   3540 	return (0);
   3541 } /* ixgbe_detach */
   3542 
   3543 /************************************************************************
   3544  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
   3545  *
   3546  *   Prepare the adapter/port for LPLU and/or WoL
   3547  ************************************************************************/
   3548 static int
   3549 ixgbe_setup_low_power_mode(struct adapter *adapter)
   3550 {
   3551 	struct ixgbe_hw *hw = &adapter->hw;
   3552 	device_t        dev = adapter->dev;
   3553 	s32             error = 0;
   3554 
   3555 	KASSERT(mutex_owned(&adapter->core_mtx));
   3556 
   3557 	/* Limit power management flow to X550EM baseT */
   3558 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
   3559 	    hw->phy.ops.enter_lplu) {
   3560 		/* X550EM baseT adapters need a special LPLU flow */
   3561 		hw->phy.reset_disable = true;
   3562 		ixgbe_stop(adapter);
   3563 		error = hw->phy.ops.enter_lplu(hw);
   3564 		if (error)
   3565 			device_printf(dev,
   3566 			    "Error entering LPLU: %d\n", error);
   3567 		hw->phy.reset_disable = false;
   3568 	} else {
   3569 		/* Just stop for other adapters */
   3570 		ixgbe_stop(adapter);
   3571 	}
   3572 
   3573 	if (!hw->wol_enabled) {
   3574 		ixgbe_set_phy_power(hw, FALSE);
   3575 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3576 		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
   3577 	} else {
   3578 		/* Turn off support for APM wakeup. (Using ACPI instead) */
   3579 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
   3580 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
   3581 
   3582 		/*
   3583 		 * Clear Wake Up Status register to prevent any previous wakeup
   3584 		 * events from waking us up immediately after we suspend.
   3585 		 */
   3586 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3587 
   3588 		/*
   3589 		 * Program the Wakeup Filter Control register with user filter
   3590 		 * settings
   3591 		 */
   3592 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
   3593 
   3594 		/* Enable wakeups and power management in Wakeup Control */
   3595 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
   3596 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
   3597 
   3598 	}
   3599 
   3600 	return error;
   3601 } /* ixgbe_setup_low_power_mode */
   3602 
   3603 /************************************************************************
   3604  * ixgbe_shutdown - Shutdown entry point
   3605  ************************************************************************/
   3606 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   3607 static int
   3608 ixgbe_shutdown(device_t dev)
   3609 {
   3610 	struct adapter *adapter = device_private(dev);
   3611 	int error = 0;
   3612 
   3613 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
   3614 
   3615 	IXGBE_CORE_LOCK(adapter);
   3616 	error = ixgbe_setup_low_power_mode(adapter);
   3617 	IXGBE_CORE_UNLOCK(adapter);
   3618 
   3619 	return (error);
   3620 } /* ixgbe_shutdown */
   3621 #endif
   3622 
   3623 /************************************************************************
   3624  * ixgbe_suspend
   3625  *
   3626  *   From D0 to D3
   3627  ************************************************************************/
   3628 static bool
   3629 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
   3630 {
   3631 	struct adapter *adapter = device_private(dev);
   3632 	int            error = 0;
   3633 
   3634 	INIT_DEBUGOUT("ixgbe_suspend: begin");
   3635 
   3636 	IXGBE_CORE_LOCK(adapter);
   3637 
   3638 	error = ixgbe_setup_low_power_mode(adapter);
   3639 
   3640 	IXGBE_CORE_UNLOCK(adapter);
   3641 
   3642 	return (error);
   3643 } /* ixgbe_suspend */
   3644 
   3645 /************************************************************************
   3646  * ixgbe_resume
   3647  *
   3648  *   From D3 to D0
   3649  ************************************************************************/
   3650 static bool
   3651 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
   3652 {
   3653 	struct adapter  *adapter = device_private(dev);
   3654 	struct ifnet    *ifp = adapter->ifp;
   3655 	struct ixgbe_hw *hw = &adapter->hw;
   3656 	u32             wus;
   3657 
   3658 	INIT_DEBUGOUT("ixgbe_resume: begin");
   3659 
   3660 	IXGBE_CORE_LOCK(adapter);
   3661 
   3662 	/* Read & clear WUS register */
   3663 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
   3664 	if (wus)
   3665 		device_printf(dev, "Woken up by (WUS): %#010x\n",
   3666 		    IXGBE_READ_REG(hw, IXGBE_WUS));
   3667 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3668 	/* And clear WUFC until next low-power transition */
   3669 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3670 
   3671 	/*
   3672 	 * Required after D3->D0 transition;
   3673 	 * will re-advertise all previous advertised speeds
   3674 	 */
   3675 	if (ifp->if_flags & IFF_UP)
   3676 		ixgbe_init_locked(adapter);
   3677 
   3678 	IXGBE_CORE_UNLOCK(adapter);
   3679 
   3680 	return true;
   3681 } /* ixgbe_resume */
   3682 
   3683 /*
   3684  * Set the various hardware offload abilities.
   3685  *
   3686  * This takes the ifnet's if_capenable flags (e.g. set by the user using
   3687  * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
   3688  * mbuf offload flags the driver will understand.
   3689  */
   3690 static void
   3691 ixgbe_set_if_hwassist(struct adapter *adapter)
   3692 {
   3693 	/* XXX */
   3694 }
   3695 
   3696 /************************************************************************
   3697  * ixgbe_init_locked - Init entry point
   3698  *
   3699  *   Used in two ways: It is used by the stack as an init
   3700  *   entry point in network interface structure. It is also
   3701  *   used by the driver as a hw/sw initialization routine to
   3702  *   get to a consistent state.
   3703  *
   3704  *   return 0 on success, positive on failure
   3705  ************************************************************************/
   3706 static void
   3707 ixgbe_init_locked(struct adapter *adapter)
   3708 {
   3709 	struct ifnet   *ifp = adapter->ifp;
   3710 	device_t 	dev = adapter->dev;
   3711 	struct ixgbe_hw *hw = &adapter->hw;
   3712 	struct tx_ring  *txr;
   3713 	struct rx_ring  *rxr;
   3714 	u32		txdctl, mhadd;
   3715 	u32		rxdctl, rxctrl;
   3716 	u32             ctrl_ext;
   3717 	int             err = 0;
   3718 
   3719 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   3720 
   3721 	KASSERT(mutex_owned(&adapter->core_mtx));
   3722 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
   3723 
   3724 	hw->adapter_stopped = FALSE;
   3725 	ixgbe_stop_adapter(hw);
   3726         callout_stop(&adapter->timer);
   3727 
   3728 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   3729 	adapter->max_frame_size =
   3730 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   3731 
   3732 	/* Queue indices may change with IOV mode */
   3733 	ixgbe_align_all_queue_indices(adapter);
   3734 
   3735 	/* reprogram the RAR[0] in case user changed it. */
   3736 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
   3737 
   3738 	/* Get the latest mac address, User can use a LAA */
   3739 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
   3740 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   3741 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
   3742 	hw->addr_ctrl.rar_used_count = 1;
   3743 
   3744 	/* Set hardware offload abilities from ifnet flags */
   3745 	ixgbe_set_if_hwassist(adapter);
   3746 
   3747 	/* Prepare transmit descriptors and buffers */
   3748 	if (ixgbe_setup_transmit_structures(adapter)) {
   3749 		device_printf(dev, "Could not setup transmit structures\n");
   3750 		ixgbe_stop(adapter);
   3751 		return;
   3752 	}
   3753 
   3754 	ixgbe_init_hw(hw);
   3755 	ixgbe_initialize_iov(adapter);
   3756 	ixgbe_initialize_transmit_units(adapter);
   3757 
   3758 	/* Setup Multicast table */
   3759 	ixgbe_set_multi(adapter);
   3760 
   3761 	/* Determine the correct mbuf pool, based on frame size */
   3762 	if (adapter->max_frame_size <= MCLBYTES)
   3763 		adapter->rx_mbuf_sz = MCLBYTES;
   3764 	else
   3765 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   3766 
   3767 	/* Prepare receive descriptors and buffers */
   3768 	if (ixgbe_setup_receive_structures(adapter)) {
   3769 		device_printf(dev, "Could not setup receive structures\n");
   3770 		ixgbe_stop(adapter);
   3771 		return;
   3772 	}
   3773 
   3774 	/* Configure RX settings */
   3775 	ixgbe_initialize_receive_units(adapter);
   3776 
   3777 	/* Enable SDP & MSI-X interrupts based on adapter */
   3778 	ixgbe_config_gpie(adapter);
   3779 
   3780 	/* Set MTU size */
   3781 	if (ifp->if_mtu > ETHERMTU) {
   3782 		/* aka IXGBE_MAXFRS on 82599 and newer */
   3783 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   3784 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   3785 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   3786 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   3787 	}
   3788 
   3789 	/* Now enable all the queues */
   3790 	for (int i = 0; i < adapter->num_queues; i++) {
   3791 		txr = &adapter->tx_rings[i];
   3792 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
   3793 		txdctl |= IXGBE_TXDCTL_ENABLE;
   3794 		/* Set WTHRESH to 8, burst writeback */
   3795 		txdctl |= (8 << 16);
   3796 		/*
   3797 		 * When the internal queue falls below PTHRESH (32),
   3798 		 * start prefetching as long as there are at least
   3799 		 * HTHRESH (1) buffers ready. The values are taken
   3800 		 * from the Intel linux driver 3.8.21.
   3801 		 * Prefetching enables tx line rate even with 1 queue.
   3802 		 */
   3803 		txdctl |= (32 << 0) | (1 << 8);
   3804 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
   3805 	}
   3806 
   3807 	for (int i = 0, j = 0; i < adapter->num_queues; i++) {
   3808 		rxr = &adapter->rx_rings[i];
   3809 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   3810 		if (hw->mac.type == ixgbe_mac_82598EB) {
   3811 			/*
   3812 			 * PTHRESH = 21
   3813 			 * HTHRESH = 4
   3814 			 * WTHRESH = 8
   3815 			 */
   3816 			rxdctl &= ~0x3FFFFF;
   3817 			rxdctl |= 0x080420;
   3818 		}
   3819 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   3820 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
   3821 		for (; j < 10; j++) {
   3822 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
   3823 			    IXGBE_RXDCTL_ENABLE)
   3824 				break;
   3825 			else
   3826 				msec_delay(1);
   3827 		}
   3828 		wmb();
   3829 
   3830 		/*
   3831 		 * In netmap mode, we must preserve the buffers made
   3832 		 * available to userspace before the if_init()
   3833 		 * (this is true by default on the TX side, because
   3834 		 * init makes all buffers available to userspace).
   3835 		 *
   3836 		 * netmap_reset() and the device specific routines
   3837 		 * (e.g. ixgbe_setup_receive_rings()) map these
   3838 		 * buffers at the end of the NIC ring, so here we
   3839 		 * must set the RDT (tail) register to make sure
   3840 		 * they are not overwritten.
   3841 		 *
   3842 		 * In this driver the NIC ring starts at RDH = 0,
   3843 		 * RDT points to the last slot available for reception (?),
   3844 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   3845 		 */
   3846 #ifdef DEV_NETMAP
   3847 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   3848 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   3849 			struct netmap_adapter *na = NA(adapter->ifp);
   3850 			struct netmap_kring *kring = &na->rx_rings[i];
   3851 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   3852 
   3853 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
   3854 		} else
   3855 #endif /* DEV_NETMAP */
   3856 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
   3857 			    adapter->num_rx_desc - 1);
   3858 	}
   3859 
   3860 	/* Enable Receive engine */
   3861 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   3862 	if (hw->mac.type == ixgbe_mac_82598EB)
   3863 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   3864 	rxctrl |= IXGBE_RXCTRL_RXEN;
   3865 	ixgbe_enable_rx_dma(hw, rxctrl);
   3866 
   3867 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   3868 
   3869 	/* Set up MSI-X routing */
   3870 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3871 		ixgbe_configure_ivars(adapter);
   3872 		/* Set up auto-mask */
   3873 		if (hw->mac.type == ixgbe_mac_82598EB)
   3874 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3875 		else {
   3876 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   3877 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   3878 		}
   3879 	} else {  /* Simple settings for Legacy/MSI */
   3880 		ixgbe_set_ivar(adapter, 0, 0, 0);
   3881 		ixgbe_set_ivar(adapter, 0, 0, 1);
   3882 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3883 	}
   3884 
   3885 	ixgbe_init_fdir(adapter);
   3886 
   3887 	/*
   3888 	 * Check on any SFP devices that
   3889 	 * need to be kick-started
   3890 	 */
   3891 	if (hw->phy.type == ixgbe_phy_none) {
   3892 		err = hw->phy.ops.identify(hw);
   3893 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   3894                 	device_printf(dev,
   3895 			    "Unsupported SFP+ module type was detected.\n");
   3896 			return;
   3897         	}
   3898 	}
   3899 
   3900 	/* Set moderation on the Link interrupt */
   3901 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
   3902 
   3903 	/* Config/Enable Link */
   3904 	ixgbe_config_link(adapter);
   3905 
   3906 	/* Hardware Packet Buffer & Flow Control setup */
   3907 	ixgbe_config_delay_values(adapter);
   3908 
   3909 	/* Initialize the FC settings */
   3910 	ixgbe_start_hw(hw);
   3911 
   3912 	/* Set up VLAN support and filter */
   3913 	ixgbe_setup_vlan_hw_support(adapter);
   3914 
   3915 	/* Setup DMA Coalescing */
   3916 	ixgbe_config_dmac(adapter);
   3917 
   3918 	/* And now turn on interrupts */
   3919 	ixgbe_enable_intr(adapter);
   3920 
   3921 	/* Enable the use of the MBX by the VF's */
   3922 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
   3923 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
   3924 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
   3925 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
   3926 	}
   3927 
   3928 	/* Update saved flags. See ixgbe_ifflags_cb() */
   3929 	adapter->if_flags = ifp->if_flags;
   3930 
   3931 	/* Now inform the stack we're ready */
   3932 	ifp->if_flags |= IFF_RUNNING;
   3933 
   3934 	return;
   3935 } /* ixgbe_init_locked */
   3936 
   3937 /************************************************************************
   3938  * ixgbe_init
   3939  ************************************************************************/
   3940 static int
   3941 ixgbe_init(struct ifnet *ifp)
   3942 {
   3943 	struct adapter *adapter = ifp->if_softc;
   3944 
   3945 	IXGBE_CORE_LOCK(adapter);
   3946 	ixgbe_init_locked(adapter);
   3947 	IXGBE_CORE_UNLOCK(adapter);
   3948 
   3949 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   3950 } /* ixgbe_init */
   3951 
   3952 /************************************************************************
   3953  * ixgbe_set_ivar
   3954  *
   3955  *   Setup the correct IVAR register for a particular MSI-X interrupt
   3956  *     (yes this is all very magic and confusing :)
   3957  *    - entry is the register array entry
   3958  *    - vector is the MSI-X vector for this queue
   3959  *    - type is RX/TX/MISC
   3960  ************************************************************************/
   3961 static void
   3962 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   3963 {
   3964 	struct ixgbe_hw *hw = &adapter->hw;
   3965 	u32 ivar, index;
   3966 
   3967 	vector |= IXGBE_IVAR_ALLOC_VAL;
   3968 
   3969 	switch (hw->mac.type) {
   3970 	case ixgbe_mac_82598EB:
   3971 		if (type == -1)
   3972 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   3973 		else
   3974 			entry += (type * 64);
   3975 		index = (entry >> 2) & 0x1F;
   3976 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   3977 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
   3978 		ivar |= (vector << (8 * (entry & 0x3)));
   3979 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   3980 		break;
   3981 	case ixgbe_mac_82599EB:
   3982 	case ixgbe_mac_X540:
   3983 	case ixgbe_mac_X550:
   3984 	case ixgbe_mac_X550EM_x:
   3985 	case ixgbe_mac_X550EM_a:
   3986 		if (type == -1) { /* MISC IVAR */
   3987 			index = (entry & 1) * 8;
   3988 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   3989 			ivar &= ~(0xFF << index);
   3990 			ivar |= (vector << index);
   3991 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   3992 		} else {	/* RX/TX IVARS */
   3993 			index = (16 * (entry & 1)) + (8 * type);
   3994 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   3995 			ivar &= ~(0xFF << index);
   3996 			ivar |= (vector << index);
   3997 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   3998 		}
   3999 		break;
   4000 	default:
   4001 		break;
   4002 	}
   4003 } /* ixgbe_set_ivar */
   4004 
   4005 /************************************************************************
   4006  * ixgbe_configure_ivars
   4007  ************************************************************************/
   4008 static void
   4009 ixgbe_configure_ivars(struct adapter *adapter)
   4010 {
   4011 	struct ix_queue *que = adapter->queues;
   4012 	u32             newitr;
   4013 
   4014 	if (ixgbe_max_interrupt_rate > 0)
   4015 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   4016 	else {
   4017 		/*
   4018 		 * Disable DMA coalescing if interrupt moderation is
   4019 		 * disabled.
   4020 		 */
   4021 		adapter->dmac = 0;
   4022 		newitr = 0;
   4023 	}
   4024 
   4025         for (int i = 0; i < adapter->num_queues; i++, que++) {
   4026 		struct rx_ring *rxr = &adapter->rx_rings[i];
   4027 		struct tx_ring *txr = &adapter->tx_rings[i];
   4028 		/* First the RX queue entry */
   4029                 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
   4030 		/* ... and the TX */
   4031 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
   4032 		/* Set an Initial EITR value */
   4033 		ixgbe_eitr_write(que, newitr);
   4034 		/*
   4035 		 * To eliminate influence of the previous state.
   4036 		 * At this point, Tx/Rx interrupt handler
   4037 		 * (ixgbe_msix_que()) cannot be called, so  both
   4038 		 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
   4039 		 */
   4040 		que->eitr_setting = 0;
   4041 	}
   4042 
   4043 	/* For the Link interrupt */
   4044         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
   4045 } /* ixgbe_configure_ivars */
   4046 
   4047 /************************************************************************
   4048  * ixgbe_config_gpie
   4049  ************************************************************************/
   4050 static void
   4051 ixgbe_config_gpie(struct adapter *adapter)
   4052 {
   4053 	struct ixgbe_hw *hw = &adapter->hw;
   4054 	u32             gpie;
   4055 
   4056 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
   4057 
   4058 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   4059 		/* Enable Enhanced MSI-X mode */
   4060 		gpie |= IXGBE_GPIE_MSIX_MODE
   4061 		     |  IXGBE_GPIE_EIAME
   4062 		     |  IXGBE_GPIE_PBA_SUPPORT
   4063 		     |  IXGBE_GPIE_OCD;
   4064 	}
   4065 
   4066 	/* Fan Failure Interrupt */
   4067 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4068 		gpie |= IXGBE_SDP1_GPIEN;
   4069 
   4070 	/* Thermal Sensor Interrupt */
   4071 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
   4072 		gpie |= IXGBE_SDP0_GPIEN_X540;
   4073 
   4074 	/* Link detection */
   4075 	switch (hw->mac.type) {
   4076 	case ixgbe_mac_82599EB:
   4077 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
   4078 		break;
   4079 	case ixgbe_mac_X550EM_x:
   4080 	case ixgbe_mac_X550EM_a:
   4081 		gpie |= IXGBE_SDP0_GPIEN_X540;
   4082 		break;
   4083 	default:
   4084 		break;
   4085 	}
   4086 
   4087 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   4088 
   4089 } /* ixgbe_config_gpie */
   4090 
   4091 /************************************************************************
   4092  * ixgbe_config_delay_values
   4093  *
   4094  *   Requires adapter->max_frame_size to be set.
   4095  ************************************************************************/
   4096 static void
   4097 ixgbe_config_delay_values(struct adapter *adapter)
   4098 {
   4099 	struct ixgbe_hw *hw = &adapter->hw;
   4100 	u32             rxpb, frame, size, tmp;
   4101 
   4102 	frame = adapter->max_frame_size;
   4103 
   4104 	/* Calculate High Water */
   4105 	switch (hw->mac.type) {
   4106 	case ixgbe_mac_X540:
   4107 	case ixgbe_mac_X550:
   4108 	case ixgbe_mac_X550EM_x:
   4109 	case ixgbe_mac_X550EM_a:
   4110 		tmp = IXGBE_DV_X540(frame, frame);
   4111 		break;
   4112 	default:
   4113 		tmp = IXGBE_DV(frame, frame);
   4114 		break;
   4115 	}
   4116 	size = IXGBE_BT2KB(tmp);
   4117 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
   4118 	hw->fc.high_water[0] = rxpb - size;
   4119 
   4120 	/* Now calculate Low Water */
   4121 	switch (hw->mac.type) {
   4122 	case ixgbe_mac_X540:
   4123 	case ixgbe_mac_X550:
   4124 	case ixgbe_mac_X550EM_x:
   4125 	case ixgbe_mac_X550EM_a:
   4126 		tmp = IXGBE_LOW_DV_X540(frame);
   4127 		break;
   4128 	default:
   4129 		tmp = IXGBE_LOW_DV(frame);
   4130 		break;
   4131 	}
   4132 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
   4133 
   4134 	hw->fc.pause_time = IXGBE_FC_PAUSE;
   4135 	hw->fc.send_xon = TRUE;
   4136 } /* ixgbe_config_delay_values */
   4137 
   4138 /************************************************************************
   4139  * ixgbe_set_multi - Multicast Update
   4140  *
   4141  *   Called whenever multicast address list is updated.
   4142  ************************************************************************/
   4143 static void
   4144 ixgbe_set_multi(struct adapter *adapter)
   4145 {
   4146 	struct ixgbe_mc_addr	*mta;
   4147 	struct ifnet		*ifp = adapter->ifp;
   4148 	u8			*update_ptr;
   4149 	int			mcnt = 0;
   4150 	u32			fctrl;
   4151 	struct ethercom		*ec = &adapter->osdep.ec;
   4152 	struct ether_multi	*enm;
   4153 	struct ether_multistep	step;
   4154 
   4155 	KASSERT(mutex_owned(&adapter->core_mtx));
   4156 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   4157 
   4158 	mta = adapter->mta;
   4159 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
   4160 
   4161 	ifp->if_flags &= ~IFF_ALLMULTI;
   4162 	ETHER_LOCK(ec);
   4163 	ETHER_FIRST_MULTI(step, ec, enm);
   4164 	while (enm != NULL) {
   4165 		if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
   4166 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   4167 			ETHER_ADDR_LEN) != 0)) {
   4168 			ifp->if_flags |= IFF_ALLMULTI;
   4169 			break;
   4170 		}
   4171 		bcopy(enm->enm_addrlo,
   4172 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
   4173 		mta[mcnt].vmdq = adapter->pool;
   4174 		mcnt++;
   4175 		ETHER_NEXT_MULTI(step, enm);
   4176 	}
   4177 	ETHER_UNLOCK(ec);
   4178 
   4179 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   4180 	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4181 	if (ifp->if_flags & IFF_PROMISC)
   4182 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4183 	else if (ifp->if_flags & IFF_ALLMULTI) {
   4184 		fctrl |= IXGBE_FCTRL_MPE;
   4185 	}
   4186 
   4187 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   4188 
   4189 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
   4190 		update_ptr = (u8 *)mta;
   4191 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   4192 		    ixgbe_mc_array_itr, TRUE);
   4193 	}
   4194 
   4195 } /* ixgbe_set_multi */
   4196 
   4197 /************************************************************************
   4198  * ixgbe_mc_array_itr
   4199  *
   4200  *   An iterator function needed by the multicast shared code.
   4201  *   It feeds the shared code routine the addresses in the
   4202  *   array of ixgbe_set_multi() one by one.
   4203  ************************************************************************/
   4204 static u8 *
   4205 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   4206 {
   4207 	struct ixgbe_mc_addr *mta;
   4208 
   4209 	mta = (struct ixgbe_mc_addr *)*update_ptr;
   4210 	*vmdq = mta->vmdq;
   4211 
   4212 	*update_ptr = (u8*)(mta + 1);
   4213 
   4214 	return (mta->addr);
   4215 } /* ixgbe_mc_array_itr */
   4216 
   4217 /************************************************************************
   4218  * ixgbe_local_timer - Timer routine
   4219  *
   4220  *   Checks for link status, updates statistics,
   4221  *   and runs the watchdog check.
   4222  ************************************************************************/
   4223 static void
   4224 ixgbe_local_timer(void *arg)
   4225 {
   4226 	struct adapter *adapter = arg;
   4227 
   4228 	IXGBE_CORE_LOCK(adapter);
   4229 	ixgbe_local_timer1(adapter);
   4230 	IXGBE_CORE_UNLOCK(adapter);
   4231 }
   4232 
   4233 static void
   4234 ixgbe_local_timer1(void *arg)
   4235 {
   4236 	struct adapter	*adapter = arg;
   4237 	device_t	dev = adapter->dev;
   4238 	struct ix_queue *que = adapter->queues;
   4239 	u64		queues = 0;
   4240 	u64		v0, v1, v2, v3, v4, v5, v6, v7;
   4241 	int		hung = 0;
   4242 	int		i;
   4243 
   4244 	KASSERT(mutex_owned(&adapter->core_mtx));
   4245 
   4246 	/* Check for pluggable optics */
   4247 	if (adapter->sfp_probe)
   4248 		if (!ixgbe_sfp_probe(adapter))
   4249 			goto out; /* Nothing to do */
   4250 
   4251 	ixgbe_update_link_status(adapter);
   4252 	ixgbe_update_stats_counters(adapter);
   4253 
   4254 	/* Update some event counters */
   4255 	v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
   4256 	que = adapter->queues;
   4257 	for (i = 0; i < adapter->num_queues; i++, que++) {
   4258 		struct tx_ring  *txr = que->txr;
   4259 
   4260 		v0 += txr->q_efbig_tx_dma_setup;
   4261 		v1 += txr->q_mbuf_defrag_failed;
   4262 		v2 += txr->q_efbig2_tx_dma_setup;
   4263 		v3 += txr->q_einval_tx_dma_setup;
   4264 		v4 += txr->q_other_tx_dma_setup;
   4265 		v5 += txr->q_eagain_tx_dma_setup;
   4266 		v6 += txr->q_enomem_tx_dma_setup;
   4267 		v7 += txr->q_tso_err;
   4268 	}
   4269 	adapter->efbig_tx_dma_setup.ev_count = v0;
   4270 	adapter->mbuf_defrag_failed.ev_count = v1;
   4271 	adapter->efbig2_tx_dma_setup.ev_count = v2;
   4272 	adapter->einval_tx_dma_setup.ev_count = v3;
   4273 	adapter->other_tx_dma_setup.ev_count = v4;
   4274 	adapter->eagain_tx_dma_setup.ev_count = v5;
   4275 	adapter->enomem_tx_dma_setup.ev_count = v6;
   4276 	adapter->tso_err.ev_count = v7;
   4277 
   4278 	/*
   4279 	 * Check the TX queues status
   4280 	 *      - mark hung queues so we don't schedule on them
   4281 	 *      - watchdog only if all queues show hung
   4282 	 */
   4283 	que = adapter->queues;
   4284 	for (i = 0; i < adapter->num_queues; i++, que++) {
   4285 		/* Keep track of queues with work for soft irq */
   4286 		if (que->txr->busy)
   4287 			queues |= ((u64)1 << que->me);
   4288 		/*
   4289 		 * Each time txeof runs without cleaning, but there
   4290 		 * are uncleaned descriptors it increments busy. If
   4291 		 * we get to the MAX we declare it hung.
   4292 		 */
   4293 		if (que->busy == IXGBE_QUEUE_HUNG) {
   4294 			++hung;
   4295 			/* Mark the queue as inactive */
   4296 			adapter->active_queues &= ~((u64)1 << que->me);
   4297 			continue;
   4298 		} else {
   4299 			/* Check if we've come back from hung */
   4300 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   4301 				adapter->active_queues |= ((u64)1 << que->me);
   4302 		}
   4303 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   4304 			device_printf(dev,
   4305 			    "Warning queue %d appears to be hung!\n", i);
   4306 			que->txr->busy = IXGBE_QUEUE_HUNG;
   4307 			++hung;
   4308 		}
   4309 	}
   4310 
   4311 	/* Only truely watchdog if all queues show hung */
   4312 	if (hung == adapter->num_queues)
   4313 		goto watchdog;
   4314 	else if (queues != 0) { /* Force an IRQ on queues with work */
   4315 		que = adapter->queues;
   4316 		for (i = 0; i < adapter->num_queues; i++, que++) {
   4317 			mutex_enter(&que->dc_mtx);
   4318 			if (que->disabled_count == 0)
   4319 				ixgbe_rearm_queues(adapter,
   4320 				    queues & ((u64)1 << i));
   4321 			mutex_exit(&que->dc_mtx);
   4322 		}
   4323 	}
   4324 
   4325 out:
   4326 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4327 	return;
   4328 
   4329 watchdog:
   4330 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   4331 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   4332 	adapter->watchdog_events.ev_count++;
   4333 	ixgbe_init_locked(adapter);
   4334 } /* ixgbe_local_timer */
   4335 
   4336 /************************************************************************
   4337  * ixgbe_sfp_probe
   4338  *
   4339  *   Determine if a port had optics inserted.
   4340  ************************************************************************/
   4341 static bool
   4342 ixgbe_sfp_probe(struct adapter *adapter)
   4343 {
   4344 	struct ixgbe_hw	*hw = &adapter->hw;
   4345 	device_t	dev = adapter->dev;
   4346 	bool		result = FALSE;
   4347 
   4348 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4349 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4350 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4351 		if (ret)
   4352 			goto out;
   4353 		ret = hw->phy.ops.reset(hw);
   4354 		adapter->sfp_probe = FALSE;
   4355 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4356 			device_printf(dev,"Unsupported SFP+ module detected!");
   4357 			device_printf(dev,
   4358 			    "Reload driver with supported module.\n");
   4359                         goto out;
   4360 		} else
   4361 			device_printf(dev, "SFP+ module detected!\n");
   4362 		/* We now have supported optics */
   4363 		result = TRUE;
   4364 	}
   4365 out:
   4366 
   4367 	return (result);
   4368 } /* ixgbe_sfp_probe */
   4369 
   4370 /************************************************************************
   4371  * ixgbe_handle_mod - Tasklet for SFP module interrupts
   4372  ************************************************************************/
   4373 static void
   4374 ixgbe_handle_mod(void *context)
   4375 {
   4376 	struct adapter  *adapter = context;
   4377 	struct ixgbe_hw *hw = &adapter->hw;
   4378 	device_t	dev = adapter->dev;
   4379 	u32             err, cage_full = 0;
   4380 
   4381 	++adapter->mod_sicount.ev_count;
   4382 	if (adapter->hw.need_crosstalk_fix) {
   4383 		switch (hw->mac.type) {
   4384 		case ixgbe_mac_82599EB:
   4385 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4386 			    IXGBE_ESDP_SDP2;
   4387 			break;
   4388 		case ixgbe_mac_X550EM_x:
   4389 		case ixgbe_mac_X550EM_a:
   4390 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4391 			    IXGBE_ESDP_SDP0;
   4392 			break;
   4393 		default:
   4394 			break;
   4395 		}
   4396 
   4397 		if (!cage_full)
   4398 			return;
   4399 	}
   4400 
   4401 	err = hw->phy.ops.identify_sfp(hw);
   4402 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4403 		device_printf(dev,
   4404 		    "Unsupported SFP+ module type was detected.\n");
   4405 		return;
   4406 	}
   4407 
   4408 	err = hw->mac.ops.setup_sfp(hw);
   4409 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4410 		device_printf(dev,
   4411 		    "Setup failure - unsupported SFP+ module type.\n");
   4412 		return;
   4413 	}
   4414 	softint_schedule(adapter->msf_si);
   4415 } /* ixgbe_handle_mod */
   4416 
   4417 
   4418 /************************************************************************
   4419  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
   4420  ************************************************************************/
   4421 static void
   4422 ixgbe_handle_msf(void *context)
   4423 {
   4424 	struct adapter  *adapter = context;
   4425 	struct ixgbe_hw *hw = &adapter->hw;
   4426 	u32             autoneg;
   4427 	bool            negotiate;
   4428 
   4429 	++adapter->msf_sicount.ev_count;
   4430 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
   4431 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
   4432 
   4433 	autoneg = hw->phy.autoneg_advertised;
   4434 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   4435 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   4436 	else
   4437 		negotiate = 0;
   4438 	if (hw->mac.ops.setup_link)
   4439 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
   4440 
   4441 	/* Adjust media types shown in ifconfig */
   4442 	ifmedia_removeall(&adapter->media);
   4443 	ixgbe_add_media_types(adapter);
   4444 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   4445 } /* ixgbe_handle_msf */
   4446 
   4447 /************************************************************************
   4448  * ixgbe_handle_phy - Tasklet for external PHY interrupts
   4449  ************************************************************************/
   4450 static void
   4451 ixgbe_handle_phy(void *context)
   4452 {
   4453 	struct adapter  *adapter = context;
   4454 	struct ixgbe_hw *hw = &adapter->hw;
   4455 	int error;
   4456 
   4457 	++adapter->phy_sicount.ev_count;
   4458 	error = hw->phy.ops.handle_lasi(hw);
   4459 	if (error == IXGBE_ERR_OVERTEMP)
   4460 		device_printf(adapter->dev,
   4461 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
   4462 		    " PHY will downshift to lower power state!\n");
   4463 	else if (error)
   4464 		device_printf(adapter->dev,
   4465 		    "Error handling LASI interrupt: %d\n", error);
   4466 } /* ixgbe_handle_phy */
   4467 
   4468 static void
   4469 ixgbe_ifstop(struct ifnet *ifp, int disable)
   4470 {
   4471 	struct adapter *adapter = ifp->if_softc;
   4472 
   4473 	IXGBE_CORE_LOCK(adapter);
   4474 	ixgbe_stop(adapter);
   4475 	IXGBE_CORE_UNLOCK(adapter);
   4476 }
   4477 
   4478 /************************************************************************
   4479  * ixgbe_stop - Stop the hardware
   4480  *
   4481  *   Disables all traffic on the adapter by issuing a
   4482  *   global reset on the MAC and deallocates TX/RX buffers.
   4483  ************************************************************************/
   4484 static void
   4485 ixgbe_stop(void *arg)
   4486 {
   4487 	struct ifnet    *ifp;
   4488 	struct adapter  *adapter = arg;
   4489 	struct ixgbe_hw *hw = &adapter->hw;
   4490 
   4491 	ifp = adapter->ifp;
   4492 
   4493 	KASSERT(mutex_owned(&adapter->core_mtx));
   4494 
   4495 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   4496 	ixgbe_disable_intr(adapter);
   4497 	callout_stop(&adapter->timer);
   4498 
   4499 	/* Let the stack know...*/
   4500 	ifp->if_flags &= ~IFF_RUNNING;
   4501 
   4502 	ixgbe_reset_hw(hw);
   4503 	hw->adapter_stopped = FALSE;
   4504 	ixgbe_stop_adapter(hw);
   4505 	if (hw->mac.type == ixgbe_mac_82599EB)
   4506 		ixgbe_stop_mac_link_on_d3_82599(hw);
   4507 	/* Turn off the laser - noop with no optics */
   4508 	ixgbe_disable_tx_laser(hw);
   4509 
   4510 	/* Update the stack */
   4511 	adapter->link_up = FALSE;
   4512 	ixgbe_update_link_status(adapter);
   4513 
   4514 	/* reprogram the RAR[0] in case user changed it. */
   4515 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   4516 
   4517 	return;
   4518 } /* ixgbe_stop */
   4519 
   4520 /************************************************************************
   4521  * ixgbe_update_link_status - Update OS on link state
   4522  *
   4523  * Note: Only updates the OS on the cached link state.
   4524  *       The real check of the hardware only happens with
   4525  *       a link interrupt.
   4526  ************************************************************************/
   4527 static void
   4528 ixgbe_update_link_status(struct adapter *adapter)
   4529 {
   4530 	struct ifnet	*ifp = adapter->ifp;
   4531 	device_t        dev = adapter->dev;
   4532 	struct ixgbe_hw *hw = &adapter->hw;
   4533 
   4534 	KASSERT(mutex_owned(&adapter->core_mtx));
   4535 
   4536 	if (adapter->link_up) {
   4537 		if (adapter->link_active == FALSE) {
   4538 			/*
   4539 			 * To eliminate influence of the previous state
   4540 			 * in the same way as ixgbe_init_locked().
   4541 			 */
   4542 			struct ix_queue	*que = adapter->queues;
   4543 			for (int i = 0; i < adapter->num_queues; i++, que++)
   4544 				que->eitr_setting = 0;
   4545 
   4546 			if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
   4547 				/*
   4548 				 *  Discard count for both MAC Local Fault and
   4549 				 * Remote Fault because those registers are
   4550 				 * valid only when the link speed is up and
   4551 				 * 10Gbps.
   4552 				 */
   4553 				IXGBE_READ_REG(hw, IXGBE_MLFC);
   4554 				IXGBE_READ_REG(hw, IXGBE_MRFC);
   4555 			}
   4556 
   4557 			if (bootverbose) {
   4558 				const char *bpsmsg;
   4559 
   4560 				switch (adapter->link_speed) {
   4561 				case IXGBE_LINK_SPEED_10GB_FULL:
   4562 					bpsmsg = "10 Gbps";
   4563 					break;
   4564 				case IXGBE_LINK_SPEED_5GB_FULL:
   4565 					bpsmsg = "5 Gbps";
   4566 					break;
   4567 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   4568 					bpsmsg = "2.5 Gbps";
   4569 					break;
   4570 				case IXGBE_LINK_SPEED_1GB_FULL:
   4571 					bpsmsg = "1 Gbps";
   4572 					break;
   4573 				case IXGBE_LINK_SPEED_100_FULL:
   4574 					bpsmsg = "100 Mbps";
   4575 					break;
   4576 				case IXGBE_LINK_SPEED_10_FULL:
   4577 					bpsmsg = "10 Mbps";
   4578 					break;
   4579 				default:
   4580 					bpsmsg = "unknown speed";
   4581 					break;
   4582 				}
   4583 				device_printf(dev, "Link is up %s %s \n",
   4584 				    bpsmsg, "Full Duplex");
   4585 			}
   4586 			adapter->link_active = TRUE;
   4587 			/* Update any Flow Control changes */
   4588 			ixgbe_fc_enable(&adapter->hw);
   4589 			/* Update DMA coalescing config */
   4590 			ixgbe_config_dmac(adapter);
   4591 			if_link_state_change(ifp, LINK_STATE_UP);
   4592 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4593 				ixgbe_ping_all_vfs(adapter);
   4594 		}
   4595 	} else { /* Link down */
   4596 		if (adapter->link_active == TRUE) {
   4597 			if (bootverbose)
   4598 				device_printf(dev, "Link is Down\n");
   4599 			if_link_state_change(ifp, LINK_STATE_DOWN);
   4600 			adapter->link_active = FALSE;
   4601 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4602 				ixgbe_ping_all_vfs(adapter);
   4603 		}
   4604 	}
   4605 } /* ixgbe_update_link_status */
   4606 
   4607 /************************************************************************
   4608  * ixgbe_config_dmac - Configure DMA Coalescing
   4609  ************************************************************************/
   4610 static void
   4611 ixgbe_config_dmac(struct adapter *adapter)
   4612 {
   4613 	struct ixgbe_hw *hw = &adapter->hw;
   4614 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
   4615 
   4616 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
   4617 		return;
   4618 
   4619 	if (dcfg->watchdog_timer ^ adapter->dmac ||
   4620 	    dcfg->link_speed ^ adapter->link_speed) {
   4621 		dcfg->watchdog_timer = adapter->dmac;
   4622 		dcfg->fcoe_en = false;
   4623 		dcfg->link_speed = adapter->link_speed;
   4624 		dcfg->num_tcs = 1;
   4625 
   4626 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
   4627 		    dcfg->watchdog_timer, dcfg->link_speed);
   4628 
   4629 		hw->mac.ops.dmac_config(hw);
   4630 	}
   4631 } /* ixgbe_config_dmac */
   4632 
   4633 /************************************************************************
   4634  * ixgbe_enable_intr
   4635  ************************************************************************/
   4636 static void
   4637 ixgbe_enable_intr(struct adapter *adapter)
   4638 {
   4639 	struct ixgbe_hw	*hw = &adapter->hw;
   4640 	struct ix_queue	*que = adapter->queues;
   4641 	u32		mask, fwsm;
   4642 
   4643 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4644 
   4645 	switch (adapter->hw.mac.type) {
   4646 	case ixgbe_mac_82599EB:
   4647 		mask |= IXGBE_EIMS_ECC;
   4648 		/* Temperature sensor on some adapters */
   4649 		mask |= IXGBE_EIMS_GPI_SDP0;
   4650 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
   4651 		mask |= IXGBE_EIMS_GPI_SDP1;
   4652 		mask |= IXGBE_EIMS_GPI_SDP2;
   4653 		break;
   4654 	case ixgbe_mac_X540:
   4655 		/* Detect if Thermal Sensor is enabled */
   4656 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
   4657 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
   4658 			mask |= IXGBE_EIMS_TS;
   4659 		mask |= IXGBE_EIMS_ECC;
   4660 		break;
   4661 	case ixgbe_mac_X550:
   4662 		/* MAC thermal sensor is automatically enabled */
   4663 		mask |= IXGBE_EIMS_TS;
   4664 		mask |= IXGBE_EIMS_ECC;
   4665 		break;
   4666 	case ixgbe_mac_X550EM_x:
   4667 	case ixgbe_mac_X550EM_a:
   4668 		/* Some devices use SDP0 for important information */
   4669 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
   4670 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
   4671 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
   4672 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
   4673 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
   4674 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
   4675 			mask |= IXGBE_EICR_GPI_SDP0_X540;
   4676 		mask |= IXGBE_EIMS_ECC;
   4677 		break;
   4678 	default:
   4679 		break;
   4680 	}
   4681 
   4682 	/* Enable Fan Failure detection */
   4683 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4684 		mask |= IXGBE_EIMS_GPI_SDP1;
   4685 	/* Enable SR-IOV */
   4686 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4687 		mask |= IXGBE_EIMS_MAILBOX;
   4688 	/* Enable Flow Director */
   4689 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   4690 		mask |= IXGBE_EIMS_FLOW_DIR;
   4691 
   4692 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4693 
   4694 	/* With MSI-X we use auto clear */
   4695 	if (adapter->msix_mem) {
   4696 		mask = IXGBE_EIMS_ENABLE_MASK;
   4697 		/* Don't autoclear Link */
   4698 		mask &= ~IXGBE_EIMS_OTHER;
   4699 		mask &= ~IXGBE_EIMS_LSC;
   4700 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   4701 			mask &= ~IXGBE_EIMS_MAILBOX;
   4702 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4703 	}
   4704 
   4705 	/*
   4706 	 * Now enable all queues, this is done separately to
   4707 	 * allow for handling the extended (beyond 32) MSI-X
   4708 	 * vectors that can be used by 82599
   4709 	 */
   4710         for (int i = 0; i < adapter->num_queues; i++, que++)
   4711                 ixgbe_enable_queue(adapter, que->msix);
   4712 
   4713 	IXGBE_WRITE_FLUSH(hw);
   4714 
   4715 	return;
   4716 } /* ixgbe_enable_intr */
   4717 
   4718 /************************************************************************
   4719  * ixgbe_disable_intr_internal
   4720  ************************************************************************/
   4721 static void
   4722 ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
   4723 {
   4724 	struct ix_queue	*que = adapter->queues;
   4725 
   4726 	/* disable interrupts other than queues */
   4727 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
   4728 
   4729 	if (adapter->msix_mem)
   4730 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4731 
   4732 	for (int i = 0; i < adapter->num_queues; i++, que++)
   4733 		ixgbe_disable_queue_internal(adapter, que->msix, nestok);
   4734 
   4735 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4736 
   4737 } /* ixgbe_do_disable_intr_internal */
   4738 
   4739 /************************************************************************
   4740  * ixgbe_disable_intr
   4741  ************************************************************************/
   4742 static void
   4743 ixgbe_disable_intr(struct adapter *adapter)
   4744 {
   4745 
   4746 	ixgbe_disable_intr_internal(adapter, true);
   4747 } /* ixgbe_disable_intr */
   4748 
   4749 /************************************************************************
   4750  * ixgbe_ensure_disabled_intr
   4751  ************************************************************************/
   4752 void
   4753 ixgbe_ensure_disabled_intr(struct adapter *adapter)
   4754 {
   4755 
   4756 	ixgbe_disable_intr_internal(adapter, false);
   4757 } /* ixgbe_ensure_disabled_intr */
   4758 
   4759 /************************************************************************
   4760  * ixgbe_legacy_irq - Legacy Interrupt Service routine
   4761  ************************************************************************/
   4762 static int
   4763 ixgbe_legacy_irq(void *arg)
   4764 {
   4765 	struct ix_queue *que = arg;
   4766 	struct adapter	*adapter = que->adapter;
   4767 	struct ixgbe_hw	*hw = &adapter->hw;
   4768 	struct ifnet    *ifp = adapter->ifp;
   4769 	struct 		tx_ring *txr = adapter->tx_rings;
   4770 	bool		more = false;
   4771 	u32             eicr, eicr_mask;
   4772 
   4773 	/* Silicon errata #26 on 82598 */
   4774 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
   4775 
   4776 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   4777 
   4778 	adapter->stats.pf.legint.ev_count++;
   4779 	++que->irqs.ev_count;
   4780 	if (eicr == 0) {
   4781 		adapter->stats.pf.intzero.ev_count++;
   4782 		if ((ifp->if_flags & IFF_UP) != 0)
   4783 			ixgbe_enable_intr(adapter);
   4784 		return 0;
   4785 	}
   4786 
   4787 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   4788 #ifdef __NetBSD__
   4789 		/* Don't run ixgbe_rxeof in interrupt context */
   4790 		more = true;
   4791 #else
   4792 		more = ixgbe_rxeof(que);
   4793 #endif
   4794 
   4795 		IXGBE_TX_LOCK(txr);
   4796 		ixgbe_txeof(txr);
   4797 #ifdef notyet
   4798 		if (!ixgbe_ring_empty(ifp, txr->br))
   4799 			ixgbe_start_locked(ifp, txr);
   4800 #endif
   4801 		IXGBE_TX_UNLOCK(txr);
   4802 	}
   4803 
   4804 	/* Check for fan failure */
   4805 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   4806 		ixgbe_check_fan_failure(adapter, eicr, true);
   4807 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4808 	}
   4809 
   4810 	/* Link status change */
   4811 	if (eicr & IXGBE_EICR_LSC)
   4812 		softint_schedule(adapter->link_si);
   4813 
   4814 	if (ixgbe_is_sfp(hw)) {
   4815 		/* Pluggable optics-related interrupt */
   4816 		if (hw->mac.type >= ixgbe_mac_X540)
   4817 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   4818 		else
   4819 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   4820 
   4821 		if (eicr & eicr_mask) {
   4822 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   4823 			softint_schedule(adapter->mod_si);
   4824 		}
   4825 
   4826 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   4827 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   4828 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   4829 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4830 			softint_schedule(adapter->msf_si);
   4831 		}
   4832 	}
   4833 
   4834 	/* External PHY interrupt */
   4835 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   4836 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
   4837 		softint_schedule(adapter->phy_si);
   4838 
   4839 	if (more) {
   4840 		que->req.ev_count++;
   4841 		ixgbe_sched_handle_que(adapter, que);
   4842 	} else
   4843 		ixgbe_enable_intr(adapter);
   4844 
   4845 	return 1;
   4846 } /* ixgbe_legacy_irq */
   4847 
   4848 /************************************************************************
   4849  * ixgbe_free_pciintr_resources
   4850  ************************************************************************/
   4851 static void
   4852 ixgbe_free_pciintr_resources(struct adapter *adapter)
   4853 {
   4854 	struct ix_queue *que = adapter->queues;
   4855 	int		rid;
   4856 
   4857 	/*
   4858 	 * Release all msix queue resources:
   4859 	 */
   4860 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4861 		if (que->res != NULL) {
   4862 			pci_intr_disestablish(adapter->osdep.pc,
   4863 			    adapter->osdep.ihs[i]);
   4864 			adapter->osdep.ihs[i] = NULL;
   4865 		}
   4866 	}
   4867 
   4868 	/* Clean the Legacy or Link interrupt last */
   4869 	if (adapter->vector) /* we are doing MSIX */
   4870 		rid = adapter->vector;
   4871 	else
   4872 		rid = 0;
   4873 
   4874 	if (adapter->osdep.ihs[rid] != NULL) {
   4875 		pci_intr_disestablish(adapter->osdep.pc,
   4876 		    adapter->osdep.ihs[rid]);
   4877 		adapter->osdep.ihs[rid] = NULL;
   4878 	}
   4879 
   4880 	if (adapter->osdep.intrs != NULL) {
   4881 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   4882 		    adapter->osdep.nintrs);
   4883 		adapter->osdep.intrs = NULL;
   4884 	}
   4885 
   4886 	return;
   4887 } /* ixgbe_free_pciintr_resources */
   4888 
   4889 /************************************************************************
   4890  * ixgbe_free_pci_resources
   4891  ************************************************************************/
   4892 static void
   4893 ixgbe_free_pci_resources(struct adapter *adapter)
   4894 {
   4895 
   4896 	ixgbe_free_pciintr_resources(adapter);
   4897 
   4898 	if (adapter->osdep.mem_size != 0) {
   4899 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   4900 		    adapter->osdep.mem_bus_space_handle,
   4901 		    adapter->osdep.mem_size);
   4902 	}
   4903 
   4904 	return;
   4905 } /* ixgbe_free_pci_resources */
   4906 
   4907 /************************************************************************
   4908  * ixgbe_set_sysctl_value
   4909  ************************************************************************/
   4910 static void
   4911 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
   4912     const char *description, int *limit, int value)
   4913 {
   4914 	device_t dev =  adapter->dev;
   4915 	struct sysctllog **log;
   4916 	const struct sysctlnode *rnode, *cnode;
   4917 
   4918 	log = &adapter->sysctllog;
   4919 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   4920 		aprint_error_dev(dev, "could not create sysctl root\n");
   4921 		return;
   4922 	}
   4923 	if (sysctl_createv(log, 0, &rnode, &cnode,
   4924 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   4925 	    name, SYSCTL_DESCR(description),
   4926 		NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   4927 		aprint_error_dev(dev, "could not create sysctl\n");
   4928 	*limit = value;
   4929 } /* ixgbe_set_sysctl_value */
   4930 
   4931 /************************************************************************
   4932  * ixgbe_sysctl_flowcntl
   4933  *
   4934  *   SYSCTL wrapper around setting Flow Control
   4935  ************************************************************************/
   4936 static int
   4937 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
   4938 {
   4939 	struct sysctlnode node = *rnode;
   4940 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4941 	int error, fc;
   4942 
   4943 	fc = adapter->hw.fc.current_mode;
   4944 	node.sysctl_data = &fc;
   4945 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4946 	if (error != 0 || newp == NULL)
   4947 		return error;
   4948 
   4949 	/* Don't bother if it's not changed */
   4950 	if (fc == adapter->hw.fc.current_mode)
   4951 		return (0);
   4952 
   4953 	return ixgbe_set_flowcntl(adapter, fc);
   4954 } /* ixgbe_sysctl_flowcntl */
   4955 
   4956 /************************************************************************
   4957  * ixgbe_set_flowcntl - Set flow control
   4958  *
   4959  *   Flow control values:
   4960  *     0 - off
   4961  *     1 - rx pause
   4962  *     2 - tx pause
   4963  *     3 - full
   4964  ************************************************************************/
   4965 static int
   4966 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
   4967 {
   4968 	switch (fc) {
   4969 		case ixgbe_fc_rx_pause:
   4970 		case ixgbe_fc_tx_pause:
   4971 		case ixgbe_fc_full:
   4972 			adapter->hw.fc.requested_mode = fc;
   4973 			if (adapter->num_queues > 1)
   4974 				ixgbe_disable_rx_drop(adapter);
   4975 			break;
   4976 		case ixgbe_fc_none:
   4977 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   4978 			if (adapter->num_queues > 1)
   4979 				ixgbe_enable_rx_drop(adapter);
   4980 			break;
   4981 		default:
   4982 			return (EINVAL);
   4983 	}
   4984 
   4985 #if 0 /* XXX NetBSD */
   4986 	/* Don't autoneg if forcing a value */
   4987 	adapter->hw.fc.disable_fc_autoneg = TRUE;
   4988 #endif
   4989 	ixgbe_fc_enable(&adapter->hw);
   4990 
   4991 	return (0);
   4992 } /* ixgbe_set_flowcntl */
   4993 
   4994 /************************************************************************
   4995  * ixgbe_enable_rx_drop
   4996  *
   4997  *   Enable the hardware to drop packets when the buffer is
   4998  *   full. This is useful with multiqueue, so that no single
   4999  *   queue being full stalls the entire RX engine. We only
   5000  *   enable this when Multiqueue is enabled AND Flow Control
   5001  *   is disabled.
   5002  ************************************************************************/
   5003 static void
   5004 ixgbe_enable_rx_drop(struct adapter *adapter)
   5005 {
   5006 	struct ixgbe_hw *hw = &adapter->hw;
   5007 	struct rx_ring  *rxr;
   5008 	u32             srrctl;
   5009 
   5010 	for (int i = 0; i < adapter->num_queues; i++) {
   5011 		rxr = &adapter->rx_rings[i];
   5012 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   5013 		srrctl |= IXGBE_SRRCTL_DROP_EN;
   5014 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   5015 	}
   5016 
   5017 	/* enable drop for each vf */
   5018 	for (int i = 0; i < adapter->num_vfs; i++) {
   5019 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   5020 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
   5021 		    IXGBE_QDE_ENABLE));
   5022 	}
   5023 } /* ixgbe_enable_rx_drop */
   5024 
   5025 /************************************************************************
   5026  * ixgbe_disable_rx_drop
   5027  ************************************************************************/
   5028 static void
   5029 ixgbe_disable_rx_drop(struct adapter *adapter)
   5030 {
   5031 	struct ixgbe_hw *hw = &adapter->hw;
   5032 	struct rx_ring  *rxr;
   5033 	u32             srrctl;
   5034 
   5035 	for (int i = 0; i < adapter->num_queues; i++) {
   5036 		rxr = &adapter->rx_rings[i];
   5037         	srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   5038         	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
   5039         	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   5040 	}
   5041 
   5042 	/* disable drop for each vf */
   5043 	for (int i = 0; i < adapter->num_vfs; i++) {
   5044 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   5045 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
   5046 	}
   5047 } /* ixgbe_disable_rx_drop */
   5048 
   5049 /************************************************************************
   5050  * ixgbe_sysctl_advertise
   5051  *
   5052  *   SYSCTL wrapper around setting advertised speed
   5053  ************************************************************************/
   5054 static int
   5055 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
   5056 {
   5057 	struct sysctlnode node = *rnode;
   5058 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5059 	int            error = 0, advertise;
   5060 
   5061 	advertise = adapter->advertise;
   5062 	node.sysctl_data = &advertise;
   5063 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5064 	if (error != 0 || newp == NULL)
   5065 		return error;
   5066 
   5067 	return ixgbe_set_advertise(adapter, advertise);
   5068 } /* ixgbe_sysctl_advertise */
   5069 
   5070 /************************************************************************
   5071  * ixgbe_set_advertise - Control advertised link speed
   5072  *
   5073  *   Flags:
   5074  *     0x00 - Default (all capable link speed)
   5075  *     0x01 - advertise 100 Mb
   5076  *     0x02 - advertise 1G
   5077  *     0x04 - advertise 10G
   5078  *     0x08 - advertise 10 Mb
   5079  *     0x10 - advertise 2.5G
   5080  *     0x20 - advertise 5G
   5081  ************************************************************************/
   5082 static int
   5083 ixgbe_set_advertise(struct adapter *adapter, int advertise)
   5084 {
   5085 	device_t         dev;
   5086 	struct ixgbe_hw  *hw;
   5087 	ixgbe_link_speed speed = 0;
   5088 	ixgbe_link_speed link_caps = 0;
   5089 	s32              err = IXGBE_NOT_IMPLEMENTED;
   5090 	bool             negotiate = FALSE;
   5091 
   5092 	/* Checks to validate new value */
   5093 	if (adapter->advertise == advertise) /* no change */
   5094 		return (0);
   5095 
   5096 	dev = adapter->dev;
   5097 	hw = &adapter->hw;
   5098 
   5099 	/* No speed changes for backplane media */
   5100 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   5101 		return (ENODEV);
   5102 
   5103 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   5104 	    (hw->phy.multispeed_fiber))) {
   5105 		device_printf(dev,
   5106 		    "Advertised speed can only be set on copper or "
   5107 		    "multispeed fiber media types.\n");
   5108 		return (EINVAL);
   5109 	}
   5110 
   5111 	if (advertise < 0x0 || advertise > 0x2f) {
   5112 		device_printf(dev,
   5113 		    "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
   5114 		return (EINVAL);
   5115 	}
   5116 
   5117 	if (hw->mac.ops.get_link_capabilities) {
   5118 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   5119 		    &negotiate);
   5120 		if (err != IXGBE_SUCCESS) {
   5121 			device_printf(dev, "Unable to determine supported advertise speeds\n");
   5122 			return (ENODEV);
   5123 		}
   5124 	}
   5125 
   5126 	/* Set new value and report new advertised mode */
   5127 	if (advertise & 0x1) {
   5128 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
   5129 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
   5130 			return (EINVAL);
   5131 		}
   5132 		speed |= IXGBE_LINK_SPEED_100_FULL;
   5133 	}
   5134 	if (advertise & 0x2) {
   5135 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
   5136 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
   5137 			return (EINVAL);
   5138 		}
   5139 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   5140 	}
   5141 	if (advertise & 0x4) {
   5142 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
   5143 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
   5144 			return (EINVAL);
   5145 		}
   5146 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   5147 	}
   5148 	if (advertise & 0x8) {
   5149 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
   5150 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
   5151 			return (EINVAL);
   5152 		}
   5153 		speed |= IXGBE_LINK_SPEED_10_FULL;
   5154 	}
   5155 	if (advertise & 0x10) {
   5156 		if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
   5157 			device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
   5158 			return (EINVAL);
   5159 		}
   5160 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   5161 	}
   5162 	if (advertise & 0x20) {
   5163 		if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
   5164 			device_printf(dev, "Interface does not support 5Gb advertised speed\n");
   5165 			return (EINVAL);
   5166 		}
   5167 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   5168 	}
   5169 	if (advertise == 0)
   5170 		speed = link_caps; /* All capable link speed */
   5171 
   5172 	hw->mac.autotry_restart = TRUE;
   5173 	hw->mac.ops.setup_link(hw, speed, TRUE);
   5174 	adapter->advertise = advertise;
   5175 
   5176 	return (0);
   5177 } /* ixgbe_set_advertise */
   5178 
   5179 /************************************************************************
   5180  * ixgbe_get_advertise - Get current advertised speed settings
   5181  *
   5182  *   Formatted for sysctl usage.
   5183  *   Flags:
   5184  *     0x01 - advertise 100 Mb
   5185  *     0x02 - advertise 1G
   5186  *     0x04 - advertise 10G
   5187  *     0x08 - advertise 10 Mb (yes, Mb)
   5188  *     0x10 - advertise 2.5G
   5189  *     0x20 - advertise 5G
   5190  ************************************************************************/
   5191 static int
   5192 ixgbe_get_advertise(struct adapter *adapter)
   5193 {
   5194 	struct ixgbe_hw  *hw = &adapter->hw;
   5195 	int              speed;
   5196 	ixgbe_link_speed link_caps = 0;
   5197 	s32              err;
   5198 	bool             negotiate = FALSE;
   5199 
   5200 	/*
   5201 	 * Advertised speed means nothing unless it's copper or
   5202 	 * multi-speed fiber
   5203 	 */
   5204 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
   5205 	    !(hw->phy.multispeed_fiber))
   5206 		return (0);
   5207 
   5208 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
   5209 	if (err != IXGBE_SUCCESS)
   5210 		return (0);
   5211 
   5212 	speed =
   5213 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x04 : 0) |
   5214 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x02 : 0) |
   5215 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)   ? 0x01 : 0) |
   5216 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)    ? 0x08 : 0) |
   5217 	    ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
   5218 	    ((link_caps & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0);
   5219 
   5220 	return speed;
   5221 } /* ixgbe_get_advertise */
   5222 
   5223 /************************************************************************
   5224  * ixgbe_sysctl_dmac - Manage DMA Coalescing
   5225  *
   5226  *   Control values:
   5227  *     0/1 - off / on (use default value of 1000)
   5228  *
   5229  *     Legal timer values are:
   5230  *     50,100,250,500,1000,2000,5000,10000
   5231  *
   5232  *     Turning off interrupt moderation will also turn this off.
   5233  ************************************************************************/
   5234 static int
   5235 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
   5236 {
   5237 	struct sysctlnode node = *rnode;
   5238 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5239 	struct ifnet   *ifp = adapter->ifp;
   5240 	int            error;
   5241 	int            newval;
   5242 
   5243 	newval = adapter->dmac;
   5244 	node.sysctl_data = &newval;
   5245 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5246 	if ((error) || (newp == NULL))
   5247 		return (error);
   5248 
   5249 	switch (newval) {
   5250 	case 0:
   5251 		/* Disabled */
   5252 		adapter->dmac = 0;
   5253 		break;
   5254 	case 1:
   5255 		/* Enable and use default */
   5256 		adapter->dmac = 1000;
   5257 		break;
   5258 	case 50:
   5259 	case 100:
   5260 	case 250:
   5261 	case 500:
   5262 	case 1000:
   5263 	case 2000:
   5264 	case 5000:
   5265 	case 10000:
   5266 		/* Legal values - allow */
   5267 		adapter->dmac = newval;
   5268 		break;
   5269 	default:
   5270 		/* Do nothing, illegal value */
   5271 		return (EINVAL);
   5272 	}
   5273 
   5274 	/* Re-initialize hardware if it's already running */
   5275 	if (ifp->if_flags & IFF_RUNNING)
   5276 		ifp->if_init(ifp);
   5277 
   5278 	return (0);
   5279 }
   5280 
   5281 #ifdef IXGBE_DEBUG
   5282 /************************************************************************
   5283  * ixgbe_sysctl_power_state
   5284  *
   5285  *   Sysctl to test power states
   5286  *   Values:
   5287  *     0      - set device to D0
   5288  *     3      - set device to D3
   5289  *     (none) - get current device power state
   5290  ************************************************************************/
   5291 static int
   5292 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
   5293 {
   5294 #ifdef notyet
   5295 	struct sysctlnode node = *rnode;
   5296 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5297 	device_t       dev =  adapter->dev;
   5298 	int            curr_ps, new_ps, error = 0;
   5299 
   5300 	curr_ps = new_ps = pci_get_powerstate(dev);
   5301 
   5302 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5303 	if ((error) || (req->newp == NULL))
   5304 		return (error);
   5305 
   5306 	if (new_ps == curr_ps)
   5307 		return (0);
   5308 
   5309 	if (new_ps == 3 && curr_ps == 0)
   5310 		error = DEVICE_SUSPEND(dev);
   5311 	else if (new_ps == 0 && curr_ps == 3)
   5312 		error = DEVICE_RESUME(dev);
   5313 	else
   5314 		return (EINVAL);
   5315 
   5316 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
   5317 
   5318 	return (error);
   5319 #else
   5320 	return 0;
   5321 #endif
   5322 } /* ixgbe_sysctl_power_state */
   5323 #endif
   5324 
   5325 /************************************************************************
   5326  * ixgbe_sysctl_wol_enable
   5327  *
   5328  *   Sysctl to enable/disable the WoL capability,
   5329  *   if supported by the adapter.
   5330  *
   5331  *   Values:
   5332  *     0 - disabled
   5333  *     1 - enabled
   5334  ************************************************************************/
   5335 static int
   5336 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
   5337 {
   5338 	struct sysctlnode node = *rnode;
   5339 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5340 	struct ixgbe_hw *hw = &adapter->hw;
   5341 	bool            new_wol_enabled;
   5342 	int             error = 0;
   5343 
   5344 	new_wol_enabled = hw->wol_enabled;
   5345 	node.sysctl_data = &new_wol_enabled;
   5346 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5347 	if ((error) || (newp == NULL))
   5348 		return (error);
   5349 	if (new_wol_enabled == hw->wol_enabled)
   5350 		return (0);
   5351 
   5352 	if (new_wol_enabled && !adapter->wol_support)
   5353 		return (ENODEV);
   5354 	else
   5355 		hw->wol_enabled = new_wol_enabled;
   5356 
   5357 	return (0);
   5358 } /* ixgbe_sysctl_wol_enable */
   5359 
   5360 /************************************************************************
   5361  * ixgbe_sysctl_wufc - Wake Up Filter Control
   5362  *
   5363  *   Sysctl to enable/disable the types of packets that the
   5364  *   adapter will wake up on upon receipt.
   5365  *   Flags:
   5366  *     0x1  - Link Status Change
   5367  *     0x2  - Magic Packet
   5368  *     0x4  - Direct Exact
   5369  *     0x8  - Directed Multicast
   5370  *     0x10 - Broadcast
   5371  *     0x20 - ARP/IPv4 Request Packet
   5372  *     0x40 - Direct IPv4 Packet
   5373  *     0x80 - Direct IPv6 Packet
   5374  *
   5375  *   Settings not listed above will cause the sysctl to return an error.
   5376  ************************************************************************/
   5377 static int
   5378 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
   5379 {
   5380 	struct sysctlnode node = *rnode;
   5381 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5382 	int error = 0;
   5383 	u32 new_wufc;
   5384 
   5385 	new_wufc = adapter->wufc;
   5386 	node.sysctl_data = &new_wufc;
   5387 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5388 	if ((error) || (newp == NULL))
   5389 		return (error);
   5390 	if (new_wufc == adapter->wufc)
   5391 		return (0);
   5392 
   5393 	if (new_wufc & 0xffffff00)
   5394 		return (EINVAL);
   5395 
   5396 	new_wufc &= 0xff;
   5397 	new_wufc |= (0xffffff & adapter->wufc);
   5398 	adapter->wufc = new_wufc;
   5399 
   5400 	return (0);
   5401 } /* ixgbe_sysctl_wufc */
   5402 
   5403 #ifdef IXGBE_DEBUG
   5404 /************************************************************************
   5405  * ixgbe_sysctl_print_rss_config
   5406  ************************************************************************/
   5407 static int
   5408 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
   5409 {
   5410 #ifdef notyet
   5411 	struct sysctlnode node = *rnode;
   5412 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5413 	struct ixgbe_hw *hw = &adapter->hw;
   5414 	device_t        dev = adapter->dev;
   5415 	struct sbuf     *buf;
   5416 	int             error = 0, reta_size;
   5417 	u32             reg;
   5418 
   5419 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
   5420 	if (!buf) {
   5421 		device_printf(dev, "Could not allocate sbuf for output.\n");
   5422 		return (ENOMEM);
   5423 	}
   5424 
   5425 	// TODO: use sbufs to make a string to print out
   5426 	/* Set multiplier for RETA setup and table size based on MAC */
   5427 	switch (adapter->hw.mac.type) {
   5428 	case ixgbe_mac_X550:
   5429 	case ixgbe_mac_X550EM_x:
   5430 	case ixgbe_mac_X550EM_a:
   5431 		reta_size = 128;
   5432 		break;
   5433 	default:
   5434 		reta_size = 32;
   5435 		break;
   5436 	}
   5437 
   5438 	/* Print out the redirection table */
   5439 	sbuf_cat(buf, "\n");
   5440 	for (int i = 0; i < reta_size; i++) {
   5441 		if (i < 32) {
   5442 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
   5443 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
   5444 		} else {
   5445 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
   5446 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
   5447 		}
   5448 	}
   5449 
   5450 	// TODO: print more config
   5451 
   5452 	error = sbuf_finish(buf);
   5453 	if (error)
   5454 		device_printf(dev, "Error finishing sbuf: %d\n", error);
   5455 
   5456 	sbuf_delete(buf);
   5457 #endif
   5458 	return (0);
   5459 } /* ixgbe_sysctl_print_rss_config */
   5460 #endif /* IXGBE_DEBUG */
   5461 
   5462 /************************************************************************
   5463  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
   5464  *
   5465  *   For X552/X557-AT devices using an external PHY
   5466  ************************************************************************/
   5467 static int
   5468 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
   5469 {
   5470 	struct sysctlnode node = *rnode;
   5471 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5472 	struct ixgbe_hw *hw = &adapter->hw;
   5473 	int val;
   5474 	u16 reg;
   5475 	int		error;
   5476 
   5477 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5478 		device_printf(adapter->dev,
   5479 		    "Device has no supported external thermal sensor.\n");
   5480 		return (ENODEV);
   5481 	}
   5482 
   5483 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
   5484 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5485 		device_printf(adapter->dev,
   5486 		    "Error reading from PHY's current temperature register\n");
   5487 		return (EAGAIN);
   5488 	}
   5489 
   5490 	node.sysctl_data = &val;
   5491 
   5492 	/* Shift temp for output */
   5493 	val = reg >> 8;
   5494 
   5495 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5496 	if ((error) || (newp == NULL))
   5497 		return (error);
   5498 
   5499 	return (0);
   5500 } /* ixgbe_sysctl_phy_temp */
   5501 
   5502 /************************************************************************
   5503  * ixgbe_sysctl_phy_overtemp_occurred
   5504  *
   5505  *   Reports (directly from the PHY) whether the current PHY
   5506  *   temperature is over the overtemp threshold.
   5507  ************************************************************************/
   5508 static int
   5509 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
   5510 {
   5511 	struct sysctlnode node = *rnode;
   5512 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5513 	struct ixgbe_hw *hw = &adapter->hw;
   5514 	int val, error;
   5515 	u16 reg;
   5516 
   5517 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5518 		device_printf(adapter->dev,
   5519 		    "Device has no supported external thermal sensor.\n");
   5520 		return (ENODEV);
   5521 	}
   5522 
   5523 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
   5524 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5525 		device_printf(adapter->dev,
   5526 		    "Error reading from PHY's temperature status register\n");
   5527 		return (EAGAIN);
   5528 	}
   5529 
   5530 	node.sysctl_data = &val;
   5531 
   5532 	/* Get occurrence bit */
   5533 	val = !!(reg & 0x4000);
   5534 
   5535 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5536 	if ((error) || (newp == NULL))
   5537 		return (error);
   5538 
   5539 	return (0);
   5540 } /* ixgbe_sysctl_phy_overtemp_occurred */
   5541 
   5542 /************************************************************************
   5543  * ixgbe_sysctl_eee_state
   5544  *
   5545  *   Sysctl to set EEE power saving feature
   5546  *   Values:
   5547  *     0      - disable EEE
   5548  *     1      - enable EEE
   5549  *     (none) - get current device EEE state
   5550  ************************************************************************/
   5551 static int
   5552 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
   5553 {
   5554 	struct sysctlnode node = *rnode;
   5555 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5556 	struct ifnet   *ifp = adapter->ifp;
   5557 	device_t       dev = adapter->dev;
   5558 	int            curr_eee, new_eee, error = 0;
   5559 	s32            retval;
   5560 
   5561 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
   5562 	node.sysctl_data = &new_eee;
   5563 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5564 	if ((error) || (newp == NULL))
   5565 		return (error);
   5566 
   5567 	/* Nothing to do */
   5568 	if (new_eee == curr_eee)
   5569 		return (0);
   5570 
   5571 	/* Not supported */
   5572 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
   5573 		return (EINVAL);
   5574 
   5575 	/* Bounds checking */
   5576 	if ((new_eee < 0) || (new_eee > 1))
   5577 		return (EINVAL);
   5578 
   5579 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
   5580 	if (retval) {
   5581 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
   5582 		return (EINVAL);
   5583 	}
   5584 
   5585 	/* Restart auto-neg */
   5586 	ifp->if_init(ifp);
   5587 
   5588 	device_printf(dev, "New EEE state: %d\n", new_eee);
   5589 
   5590 	/* Cache new value */
   5591 	if (new_eee)
   5592 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5593 	else
   5594 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
   5595 
   5596 	return (error);
   5597 } /* ixgbe_sysctl_eee_state */
   5598 
   5599 /************************************************************************
   5600  * ixgbe_init_device_features
   5601  ************************************************************************/
   5602 static void
   5603 ixgbe_init_device_features(struct adapter *adapter)
   5604 {
   5605 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   5606 	                  | IXGBE_FEATURE_RSS
   5607 	                  | IXGBE_FEATURE_MSI
   5608 	                  | IXGBE_FEATURE_MSIX
   5609 	                  | IXGBE_FEATURE_LEGACY_IRQ
   5610 	                  | IXGBE_FEATURE_LEGACY_TX;
   5611 
   5612 	/* Set capabilities first... */
   5613 	switch (adapter->hw.mac.type) {
   5614 	case ixgbe_mac_82598EB:
   5615 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
   5616 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
   5617 		break;
   5618 	case ixgbe_mac_X540:
   5619 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5620 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5621 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
   5622 		    (adapter->hw.bus.func == 0))
   5623 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5624 		break;
   5625 	case ixgbe_mac_X550:
   5626 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5627 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5628 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5629 		break;
   5630 	case ixgbe_mac_X550EM_x:
   5631 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5632 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5633 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
   5634 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5635 		break;
   5636 	case ixgbe_mac_X550EM_a:
   5637 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5638 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5639 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5640 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
   5641 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
   5642 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5643 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5644 		}
   5645 		break;
   5646 	case ixgbe_mac_82599EB:
   5647 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5648 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5649 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
   5650 		    (adapter->hw.bus.func == 0))
   5651 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5652 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
   5653 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5654 		break;
   5655 	default:
   5656 		break;
   5657 	}
   5658 
   5659 	/* Enabled by default... */
   5660 	/* Fan failure detection */
   5661 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
   5662 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
   5663 	/* Netmap */
   5664 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   5665 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   5666 	/* EEE */
   5667 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   5668 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5669 	/* Thermal Sensor */
   5670 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
   5671 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
   5672 
   5673 	/* Enabled via global sysctl... */
   5674 	/* Flow Director */
   5675 	if (ixgbe_enable_fdir) {
   5676 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
   5677 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
   5678 		else
   5679 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
   5680 	}
   5681 	/* Legacy (single queue) transmit */
   5682 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   5683 	    ixgbe_enable_legacy_tx)
   5684 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   5685 	/*
   5686 	 * Message Signal Interrupts - Extended (MSI-X)
   5687 	 * Normal MSI is only enabled if MSI-X calls fail.
   5688 	 */
   5689 	if (!ixgbe_enable_msix)
   5690 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
   5691 	/* Receive-Side Scaling (RSS) */
   5692 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
   5693 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   5694 
   5695 	/* Disable features with unmet dependencies... */
   5696 	/* No MSI-X */
   5697 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
   5698 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   5699 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   5700 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
   5701 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
   5702 	}
   5703 } /* ixgbe_init_device_features */
   5704 
   5705 /************************************************************************
   5706  * ixgbe_probe - Device identification routine
   5707  *
   5708  *   Determines if the driver should be loaded on
   5709  *   adapter based on its PCI vendor/device ID.
   5710  *
   5711  *   return BUS_PROBE_DEFAULT on success, positive on failure
   5712  ************************************************************************/
   5713 static int
   5714 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
   5715 {
   5716 	const struct pci_attach_args *pa = aux;
   5717 
   5718 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
   5719 }
   5720 
   5721 static ixgbe_vendor_info_t *
   5722 ixgbe_lookup(const struct pci_attach_args *pa)
   5723 {
   5724 	ixgbe_vendor_info_t *ent;
   5725 	pcireg_t subid;
   5726 
   5727 	INIT_DEBUGOUT("ixgbe_lookup: begin");
   5728 
   5729 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
   5730 		return NULL;
   5731 
   5732 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
   5733 
   5734 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
   5735 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
   5736 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
   5737 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
   5738 			(ent->subvendor_id == 0)) &&
   5739 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
   5740 			(ent->subdevice_id == 0))) {
   5741 			++ixgbe_total_ports;
   5742 			return ent;
   5743 		}
   5744 	}
   5745 	return NULL;
   5746 }
   5747 
   5748 static int
   5749 ixgbe_ifflags_cb(struct ethercom *ec)
   5750 {
   5751 	struct ifnet *ifp = &ec->ec_if;
   5752 	struct adapter *adapter = ifp->if_softc;
   5753 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   5754 
   5755 	IXGBE_CORE_LOCK(adapter);
   5756 
   5757 	if (change != 0)
   5758 		adapter->if_flags = ifp->if_flags;
   5759 
   5760 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   5761 		rc = ENETRESET;
   5762 	else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   5763 		ixgbe_set_promisc(adapter);
   5764 
   5765 	/* Set up VLAN support and filter */
   5766 	ixgbe_setup_vlan_hw_support(adapter);
   5767 
   5768 	IXGBE_CORE_UNLOCK(adapter);
   5769 
   5770 	return rc;
   5771 }
   5772 
   5773 /************************************************************************
   5774  * ixgbe_ioctl - Ioctl entry point
   5775  *
   5776  *   Called when the user wants to configure the interface.
   5777  *
   5778  *   return 0 on success, positive on failure
   5779  ************************************************************************/
   5780 static int
   5781 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   5782 {
   5783 	struct adapter	*adapter = ifp->if_softc;
   5784 	struct ixgbe_hw *hw = &adapter->hw;
   5785 	struct ifcapreq *ifcr = data;
   5786 	struct ifreq	*ifr = data;
   5787 	int             error = 0;
   5788 	int l4csum_en;
   5789 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   5790 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   5791 
   5792 	switch (command) {
   5793 	case SIOCSIFFLAGS:
   5794 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   5795 		break;
   5796 	case SIOCADDMULTI:
   5797 	case SIOCDELMULTI:
   5798 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   5799 		break;
   5800 	case SIOCSIFMEDIA:
   5801 	case SIOCGIFMEDIA:
   5802 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   5803 		break;
   5804 	case SIOCSIFCAP:
   5805 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   5806 		break;
   5807 	case SIOCSIFMTU:
   5808 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   5809 		break;
   5810 #ifdef __NetBSD__
   5811 	case SIOCINITIFADDR:
   5812 		IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
   5813 		break;
   5814 	case SIOCGIFFLAGS:
   5815 		IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
   5816 		break;
   5817 	case SIOCGIFAFLAG_IN:
   5818 		IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
   5819 		break;
   5820 	case SIOCGIFADDR:
   5821 		IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
   5822 		break;
   5823 	case SIOCGIFMTU:
   5824 		IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
   5825 		break;
   5826 	case SIOCGIFCAP:
   5827 		IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
   5828 		break;
   5829 	case SIOCGETHERCAP:
   5830 		IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
   5831 		break;
   5832 	case SIOCGLIFADDR:
   5833 		IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
   5834 		break;
   5835 	case SIOCZIFDATA:
   5836 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   5837 		hw->mac.ops.clear_hw_cntrs(hw);
   5838 		ixgbe_clear_evcnt(adapter);
   5839 		break;
   5840 	case SIOCAIFADDR:
   5841 		IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
   5842 		break;
   5843 #endif
   5844 	default:
   5845 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   5846 		break;
   5847 	}
   5848 
   5849 	switch (command) {
   5850 	case SIOCSIFMEDIA:
   5851 	case SIOCGIFMEDIA:
   5852 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   5853 	case SIOCGI2C:
   5854 	{
   5855 		struct ixgbe_i2c_req	i2c;
   5856 
   5857 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
   5858 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
   5859 		if (error != 0)
   5860 			break;
   5861 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
   5862 			error = EINVAL;
   5863 			break;
   5864 		}
   5865 		if (i2c.len > sizeof(i2c.data)) {
   5866 			error = EINVAL;
   5867 			break;
   5868 		}
   5869 
   5870 		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
   5871 		    i2c.dev_addr, i2c.data);
   5872 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
   5873 		break;
   5874 	}
   5875 	case SIOCSIFCAP:
   5876 		/* Layer-4 Rx checksum offload has to be turned on and
   5877 		 * off as a unit.
   5878 		 */
   5879 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   5880 		if (l4csum_en != l4csum && l4csum_en != 0)
   5881 			return EINVAL;
   5882 		/*FALLTHROUGH*/
   5883 	case SIOCADDMULTI:
   5884 	case SIOCDELMULTI:
   5885 	case SIOCSIFFLAGS:
   5886 	case SIOCSIFMTU:
   5887 	default:
   5888 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   5889 			return error;
   5890 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   5891 			;
   5892 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   5893 			IXGBE_CORE_LOCK(adapter);
   5894 			if ((ifp->if_flags & IFF_RUNNING) != 0)
   5895 				ixgbe_init_locked(adapter);
   5896 			ixgbe_recalculate_max_frame(adapter);
   5897 			IXGBE_CORE_UNLOCK(adapter);
   5898 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   5899 			/*
   5900 			 * Multicast list has changed; set the hardware filter
   5901 			 * accordingly.
   5902 			 */
   5903 			IXGBE_CORE_LOCK(adapter);
   5904 			ixgbe_disable_intr(adapter);
   5905 			ixgbe_set_multi(adapter);
   5906 			ixgbe_enable_intr(adapter);
   5907 			IXGBE_CORE_UNLOCK(adapter);
   5908 		}
   5909 		return 0;
   5910 	}
   5911 
   5912 	return error;
   5913 } /* ixgbe_ioctl */
   5914 
   5915 /************************************************************************
   5916  * ixgbe_check_fan_failure
   5917  ************************************************************************/
   5918 static void
   5919 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
   5920 {
   5921 	u32 mask;
   5922 
   5923 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
   5924 	    IXGBE_ESDP_SDP1;
   5925 
   5926 	if (reg & mask)
   5927 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
   5928 } /* ixgbe_check_fan_failure */
   5929 
   5930 /************************************************************************
   5931  * ixgbe_handle_que
   5932  ************************************************************************/
   5933 static void
   5934 ixgbe_handle_que(void *context)
   5935 {
   5936 	struct ix_queue *que = context;
   5937 	struct adapter  *adapter = que->adapter;
   5938 	struct tx_ring  *txr = que->txr;
   5939 	struct ifnet    *ifp = adapter->ifp;
   5940 	bool		more = false;
   5941 
   5942 	que->handleq.ev_count++;
   5943 
   5944 	if (ifp->if_flags & IFF_RUNNING) {
   5945 		more = ixgbe_rxeof(que);
   5946 		IXGBE_TX_LOCK(txr);
   5947 		more |= ixgbe_txeof(txr);
   5948 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5949 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   5950 				ixgbe_mq_start_locked(ifp, txr);
   5951 		/* Only for queue 0 */
   5952 		/* NetBSD still needs this for CBQ */
   5953 		if ((&adapter->queues[0] == que)
   5954 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   5955 			ixgbe_legacy_start_locked(ifp, txr);
   5956 		IXGBE_TX_UNLOCK(txr);
   5957 	}
   5958 
   5959 	if (more) {
   5960 		que->req.ev_count++;
   5961 		ixgbe_sched_handle_que(adapter, que);
   5962 	} else if (que->res != NULL) {
   5963 		/* Re-enable this interrupt */
   5964 		ixgbe_enable_queue(adapter, que->msix);
   5965 	} else
   5966 		ixgbe_enable_intr(adapter);
   5967 
   5968 	return;
   5969 } /* ixgbe_handle_que */
   5970 
   5971 /************************************************************************
   5972  * ixgbe_handle_que_work
   5973  ************************************************************************/
   5974 static void
   5975 ixgbe_handle_que_work(struct work *wk, void *context)
   5976 {
   5977 	struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
   5978 
   5979 	/*
   5980 	 * "enqueued flag" is not required here.
   5981 	 * See ixgbe_msix_que().
   5982 	 */
   5983 	ixgbe_handle_que(que);
   5984 }
   5985 
   5986 /************************************************************************
   5987  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
   5988  ************************************************************************/
   5989 static int
   5990 ixgbe_allocate_legacy(struct adapter *adapter,
   5991     const struct pci_attach_args *pa)
   5992 {
   5993 	device_t	dev = adapter->dev;
   5994 	struct ix_queue *que = adapter->queues;
   5995 	struct tx_ring  *txr = adapter->tx_rings;
   5996 	int		counts[PCI_INTR_TYPE_SIZE];
   5997 	pci_intr_type_t intr_type, max_type;
   5998 	char            intrbuf[PCI_INTRSTR_LEN];
   5999 	const char	*intrstr = NULL;
   6000 
   6001 	/* We allocate a single interrupt resource */
   6002 	max_type = PCI_INTR_TYPE_MSI;
   6003 	counts[PCI_INTR_TYPE_MSIX] = 0;
   6004 	counts[PCI_INTR_TYPE_MSI] =
   6005 	    (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
   6006 	/* Check not feat_en but feat_cap to fallback to INTx */
   6007 	counts[PCI_INTR_TYPE_INTX] =
   6008 	    (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
   6009 
   6010 alloc_retry:
   6011 	if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
   6012 		aprint_error_dev(dev, "couldn't alloc interrupt\n");
   6013 		return ENXIO;
   6014 	}
   6015 	adapter->osdep.nintrs = 1;
   6016 	intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
   6017 	    intrbuf, sizeof(intrbuf));
   6018 	adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
   6019 	    adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
   6020 	    device_xname(dev));
   6021 	intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
   6022 	if (adapter->osdep.ihs[0] == NULL) {
   6023 		aprint_error_dev(dev,"unable to establish %s\n",
   6024 		    (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   6025 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   6026 		adapter->osdep.intrs = NULL;
   6027 		switch (intr_type) {
   6028 		case PCI_INTR_TYPE_MSI:
   6029 			/* The next try is for INTx: Disable MSI */
   6030 			max_type = PCI_INTR_TYPE_INTX;
   6031 			counts[PCI_INTR_TYPE_INTX] = 1;
   6032 			adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   6033 			if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
   6034 				adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6035 				goto alloc_retry;
   6036 			} else
   6037 				break;
   6038 		case PCI_INTR_TYPE_INTX:
   6039 		default:
   6040 			/* See below */
   6041 			break;
   6042 		}
   6043 	}
   6044 	if (intr_type == PCI_INTR_TYPE_INTX) {
   6045 		adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   6046 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6047 	}
   6048 	if (adapter->osdep.ihs[0] == NULL) {
   6049 		aprint_error_dev(dev,
   6050 		    "couldn't establish interrupt%s%s\n",
   6051 		    intrstr ? " at " : "", intrstr ? intrstr : "");
   6052 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   6053 		adapter->osdep.intrs = NULL;
   6054 		return ENXIO;
   6055 	}
   6056 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   6057 	/*
   6058 	 * Try allocating a fast interrupt and the associated deferred
   6059 	 * processing contexts.
   6060 	 */
   6061 	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   6062 		txr->txr_si =
   6063 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6064 			ixgbe_deferred_mq_start, txr);
   6065 	que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6066 	    ixgbe_handle_que, que);
   6067 
   6068 	if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
   6069 		& (txr->txr_si == NULL)) || (que->que_si == NULL)) {
   6070 		aprint_error_dev(dev,
   6071 		    "could not establish software interrupts\n");
   6072 
   6073 		return ENXIO;
   6074 	}
   6075 	/* For simplicity in the handlers */
   6076 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
   6077 
   6078 	return (0);
   6079 } /* ixgbe_allocate_legacy */
   6080 
   6081 /************************************************************************
   6082  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
   6083  ************************************************************************/
   6084 static int
   6085 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   6086 {
   6087 	device_t        dev = adapter->dev;
   6088 	struct 		ix_queue *que = adapter->queues;
   6089 	struct  	tx_ring *txr = adapter->tx_rings;
   6090 	pci_chipset_tag_t pc;
   6091 	char		intrbuf[PCI_INTRSTR_LEN];
   6092 	char		intr_xname[32];
   6093 	char		wqname[MAXCOMLEN];
   6094 	const char	*intrstr = NULL;
   6095 	int 		error, vector = 0;
   6096 	int		cpu_id = 0;
   6097 	kcpuset_t	*affinity;
   6098 #ifdef RSS
   6099 	unsigned int    rss_buckets = 0;
   6100 	kcpuset_t	cpu_mask;
   6101 #endif
   6102 
   6103 	pc = adapter->osdep.pc;
   6104 #ifdef	RSS
   6105 	/*
   6106 	 * If we're doing RSS, the number of queues needs to
   6107 	 * match the number of RSS buckets that are configured.
   6108 	 *
   6109 	 * + If there's more queues than RSS buckets, we'll end
   6110 	 *   up with queues that get no traffic.
   6111 	 *
   6112 	 * + If there's more RSS buckets than queues, we'll end
   6113 	 *   up having multiple RSS buckets map to the same queue,
   6114 	 *   so there'll be some contention.
   6115 	 */
   6116 	rss_buckets = rss_getnumbuckets();
   6117 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
   6118 	    (adapter->num_queues != rss_buckets)) {
   6119 		device_printf(dev,
   6120 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
   6121 		    "; performance will be impacted.\n",
   6122 		    __func__, adapter->num_queues, rss_buckets);
   6123 	}
   6124 #endif
   6125 
   6126 	adapter->osdep.nintrs = adapter->num_queues + 1;
   6127 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   6128 	    adapter->osdep.nintrs) != 0) {
   6129 		aprint_error_dev(dev,
   6130 		    "failed to allocate MSI-X interrupt\n");
   6131 		return (ENXIO);
   6132 	}
   6133 
   6134 	kcpuset_create(&affinity, false);
   6135 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   6136 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   6137 		    device_xname(dev), i);
   6138 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   6139 		    sizeof(intrbuf));
   6140 #ifdef IXGBE_MPSAFE
   6141 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   6142 		    true);
   6143 #endif
   6144 		/* Set the handler function */
   6145 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   6146 		    adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
   6147 		    intr_xname);
   6148 		if (que->res == NULL) {
   6149 			aprint_error_dev(dev,
   6150 			    "Failed to register QUE handler\n");
   6151 			error = ENXIO;
   6152 			goto err_out;
   6153 		}
   6154 		que->msix = vector;
   6155 		adapter->active_queues |= (u64)(1 << que->msix);
   6156 
   6157 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   6158 #ifdef	RSS
   6159 			/*
   6160 			 * The queue ID is used as the RSS layer bucket ID.
   6161 			 * We look up the queue ID -> RSS CPU ID and select
   6162 			 * that.
   6163 			 */
   6164 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
   6165 			CPU_SETOF(cpu_id, &cpu_mask);
   6166 #endif
   6167 		} else {
   6168 			/*
   6169 			 * Bind the MSI-X vector, and thus the
   6170 			 * rings to the corresponding CPU.
   6171 			 *
   6172 			 * This just happens to match the default RSS
   6173 			 * round-robin bucket -> queue -> CPU allocation.
   6174 			 */
   6175 			if (adapter->num_queues > 1)
   6176 				cpu_id = i;
   6177 		}
   6178 		/* Round-robin affinity */
   6179 		kcpuset_zero(affinity);
   6180 		kcpuset_set(affinity, cpu_id % ncpu);
   6181 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   6182 		    NULL);
   6183 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   6184 		    intrstr);
   6185 		if (error == 0) {
   6186 #if 1 /* def IXGBE_DEBUG */
   6187 #ifdef	RSS
   6188 			aprintf_normal(", bound RSS bucket %d to CPU %d", i,
   6189 			    cpu_id % ncpu);
   6190 #else
   6191 			aprint_normal(", bound queue %d to cpu %d", i,
   6192 			    cpu_id % ncpu);
   6193 #endif
   6194 #endif /* IXGBE_DEBUG */
   6195 		}
   6196 		aprint_normal("\n");
   6197 
   6198 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   6199 			txr->txr_si = softint_establish(
   6200 				SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6201 				ixgbe_deferred_mq_start, txr);
   6202 			if (txr->txr_si == NULL) {
   6203 				aprint_error_dev(dev,
   6204 				    "couldn't establish software interrupt\n");
   6205 				error = ENXIO;
   6206 				goto err_out;
   6207 			}
   6208 		}
   6209 		que->que_si
   6210 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6211 			ixgbe_handle_que, que);
   6212 		if (que->que_si == NULL) {
   6213 			aprint_error_dev(dev,
   6214 			    "couldn't establish software interrupt\n");
   6215 			error = ENXIO;
   6216 			goto err_out;
   6217 		}
   6218 	}
   6219 	snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
   6220 	error = workqueue_create(&adapter->txr_wq, wqname,
   6221 	    ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   6222 	    IXGBE_WORKQUEUE_FLAGS);
   6223 	if (error) {
   6224 		aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
   6225 		goto err_out;
   6226 	}
   6227 	adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
   6228 
   6229 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
   6230 	error = workqueue_create(&adapter->que_wq, wqname,
   6231 	    ixgbe_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   6232 	    IXGBE_WORKQUEUE_FLAGS);
   6233 	if (error) {
   6234 		aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
   6235 		goto err_out;
   6236 	}
   6237 
   6238 	/* and Link */
   6239 	cpu_id++;
   6240 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   6241 	adapter->vector = vector;
   6242 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   6243 	    sizeof(intrbuf));
   6244 #ifdef IXGBE_MPSAFE
   6245 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   6246 	    true);
   6247 #endif
   6248 	/* Set the link handler function */
   6249 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   6250 	    adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
   6251 	    intr_xname);
   6252 	if (adapter->osdep.ihs[vector] == NULL) {
   6253 		adapter->res = NULL;
   6254 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   6255 		error = ENXIO;
   6256 		goto err_out;
   6257 	}
   6258 	/* Round-robin affinity */
   6259 	kcpuset_zero(affinity);
   6260 	kcpuset_set(affinity, cpu_id % ncpu);
   6261 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
   6262 	    NULL);
   6263 
   6264 	aprint_normal_dev(dev,
   6265 	    "for link, interrupting at %s", intrstr);
   6266 	if (error == 0)
   6267 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   6268 	else
   6269 		aprint_normal("\n");
   6270 
   6271 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   6272 		adapter->mbx_si =
   6273 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6274 			ixgbe_handle_mbx, adapter);
   6275 		if (adapter->mbx_si == NULL) {
   6276 			aprint_error_dev(dev,
   6277 			    "could not establish software interrupts\n");
   6278 
   6279 			error = ENXIO;
   6280 			goto err_out;
   6281 		}
   6282 	}
   6283 
   6284 	kcpuset_destroy(affinity);
   6285 	aprint_normal_dev(dev,
   6286 	    "Using MSI-X interrupts with %d vectors\n", vector + 1);
   6287 
   6288 	return (0);
   6289 
   6290 err_out:
   6291 	kcpuset_destroy(affinity);
   6292 	ixgbe_free_softint(adapter);
   6293 	ixgbe_free_pciintr_resources(adapter);
   6294 	return (error);
   6295 } /* ixgbe_allocate_msix */
   6296 
   6297 /************************************************************************
   6298  * ixgbe_configure_interrupts
   6299  *
   6300  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
   6301  *   This will also depend on user settings.
   6302  ************************************************************************/
   6303 static int
   6304 ixgbe_configure_interrupts(struct adapter *adapter)
   6305 {
   6306 	device_t dev = adapter->dev;
   6307 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
   6308 	int want, queues, msgs;
   6309 
   6310 	/* Default to 1 queue if MSI-X setup fails */
   6311 	adapter->num_queues = 1;
   6312 
   6313 	/* Override by tuneable */
   6314 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
   6315 		goto msi;
   6316 
   6317 	/*
   6318 	 *  NetBSD only: Use single vector MSI when number of CPU is 1 to save
   6319 	 * interrupt slot.
   6320 	 */
   6321 	if (ncpu == 1)
   6322 		goto msi;
   6323 
   6324 	/* First try MSI-X */
   6325 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   6326 	msgs = MIN(msgs, IXG_MAX_NINTR);
   6327 	if (msgs < 2)
   6328 		goto msi;
   6329 
   6330 	adapter->msix_mem = (void *)1; /* XXX */
   6331 
   6332 	/* Figure out a reasonable auto config value */
   6333 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   6334 
   6335 #ifdef	RSS
   6336 	/* If we're doing RSS, clamp at the number of RSS buckets */
   6337 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   6338 		queues = min(queues, rss_getnumbuckets());
   6339 #endif
   6340 	if (ixgbe_num_queues > queues) {
   6341 		aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
   6342 		ixgbe_num_queues = queues;
   6343 	}
   6344 
   6345 	if (ixgbe_num_queues != 0)
   6346 		queues = ixgbe_num_queues;
   6347 	else
   6348 		queues = min(queues,
   6349 		    min(mac->max_tx_queues, mac->max_rx_queues));
   6350 
   6351 	/* reflect correct sysctl value */
   6352 	ixgbe_num_queues = queues;
   6353 
   6354 	/*
   6355 	 * Want one vector (RX/TX pair) per queue
   6356 	 * plus an additional for Link.
   6357 	 */
   6358 	want = queues + 1;
   6359 	if (msgs >= want)
   6360 		msgs = want;
   6361 	else {
   6362                	aprint_error_dev(dev, "MSI-X Configuration Problem, "
   6363 		    "%d vectors but %d queues wanted!\n",
   6364 		    msgs, want);
   6365 		goto msi;
   6366 	}
   6367 	adapter->num_queues = queues;
   6368 	adapter->feat_en |= IXGBE_FEATURE_MSIX;
   6369 	return (0);
   6370 
   6371 	/*
   6372 	 * MSI-X allocation failed or provided us with
   6373 	 * less vectors than needed. Free MSI-X resources
   6374 	 * and we'll try enabling MSI.
   6375 	 */
   6376 msi:
   6377 	/* Without MSI-X, some features are no longer supported */
   6378 	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6379 	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
   6380 	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6381 	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
   6382 
   6383        	msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
   6384 	adapter->msix_mem = NULL; /* XXX */
   6385 	if (msgs > 1)
   6386 		msgs = 1;
   6387 	if (msgs != 0) {
   6388 		msgs = 1;
   6389 		adapter->feat_en |= IXGBE_FEATURE_MSI;
   6390 		return (0);
   6391 	}
   6392 
   6393 	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
   6394 		aprint_error_dev(dev,
   6395 		    "Device does not support legacy interrupts.\n");
   6396 		return 1;
   6397 	}
   6398 
   6399 	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6400 
   6401 	return (0);
   6402 } /* ixgbe_configure_interrupts */
   6403 
   6404 
   6405 /************************************************************************
   6406  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
   6407  *
   6408  *   Done outside of interrupt context since the driver might sleep
   6409  ************************************************************************/
   6410 static void
   6411 ixgbe_handle_link(void *context)
   6412 {
   6413 	struct adapter  *adapter = context;
   6414 	struct ixgbe_hw *hw = &adapter->hw;
   6415 
   6416 	IXGBE_CORE_LOCK(adapter);
   6417 	++adapter->link_sicount.ev_count;
   6418 	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
   6419 	ixgbe_update_link_status(adapter);
   6420 
   6421 	/* Re-enable link interrupts */
   6422 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
   6423 
   6424 	IXGBE_CORE_UNLOCK(adapter);
   6425 } /* ixgbe_handle_link */
   6426 
   6427 /************************************************************************
   6428  * ixgbe_rearm_queues
   6429  ************************************************************************/
   6430 static void
   6431 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   6432 {
   6433 	u32 mask;
   6434 
   6435 	switch (adapter->hw.mac.type) {
   6436 	case ixgbe_mac_82598EB:
   6437 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   6438 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   6439 		break;
   6440 	case ixgbe_mac_82599EB:
   6441 	case ixgbe_mac_X540:
   6442 	case ixgbe_mac_X550:
   6443 	case ixgbe_mac_X550EM_x:
   6444 	case ixgbe_mac_X550EM_a:
   6445 		mask = (queues & 0xFFFFFFFF);
   6446 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   6447 		mask = (queues >> 32);
   6448 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   6449 		break;
   6450 	default:
   6451 		break;
   6452 	}
   6453 } /* ixgbe_rearm_queues */
   6454