Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.123
      1 /* $NetBSD: ixgbe.c,v 1.123 2018/02/16 10:11:21 msaitoh Exp $ */
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 320916 2017-07-12 17:35:32Z sbruno $*/
     36 
     37 /*
     38  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39  * All rights reserved.
     40  *
     41  * This code is derived from software contributed to The NetBSD Foundation
     42  * by Coyote Point Systems, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63  * POSSIBILITY OF SUCH DAMAGE.
     64  */
     65 
     66 #ifdef _KERNEL_OPT
     67 #include "opt_inet.h"
     68 #include "opt_inet6.h"
     69 #include "opt_net_mpsafe.h"
     70 #endif
     71 
     72 #include "ixgbe.h"
     73 #include "vlan.h"
     74 
     75 #include <sys/cprng.h>
     76 #include <dev/mii/mii.h>
     77 #include <dev/mii/miivar.h>
     78 
     79 /************************************************************************
     80  * Driver version
     81  ************************************************************************/
     82 char ixgbe_driver_version[] = "3.2.12-k";
     83 
     84 
     85 /************************************************************************
     86  * PCI Device ID Table
     87  *
     88  *   Used by probe to select devices to load on
     89  *   Last field stores an index into ixgbe_strings
     90  *   Last entry must be all 0s
     91  *
     92  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     93  ************************************************************************/
     94 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     95 {
     96 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    111 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    112 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    113 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    114 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    115 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
    116 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    117 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
    118 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
    119 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
    120 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
    121 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
    122 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
    123 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
    124 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
    125 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
    126 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
    127 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
    128 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
    129 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
    130 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
    131 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
    132 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
    133 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
    134 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
    135 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
    136 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
    137 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
    138 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
    139 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
    140 	/* required last entry */
    141 	{0, 0, 0, 0, 0}
    142 };
    143 
    144 /************************************************************************
    145  * Table of branding strings
    146  ************************************************************************/
    147 static const char    *ixgbe_strings[] = {
    148 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    149 };
    150 
    151 /************************************************************************
    152  * Function prototypes
    153  ************************************************************************/
    154 static int      ixgbe_probe(device_t, cfdata_t, void *);
    155 static void     ixgbe_attach(device_t, device_t, void *);
    156 static int      ixgbe_detach(device_t, int);
    157 #if 0
    158 static int      ixgbe_shutdown(device_t);
    159 #endif
    160 static bool	ixgbe_suspend(device_t, const pmf_qual_t *);
    161 static bool	ixgbe_resume(device_t, const pmf_qual_t *);
    162 static int	ixgbe_ifflags_cb(struct ethercom *);
    163 static int      ixgbe_ioctl(struct ifnet *, u_long, void *);
    164 static void	ixgbe_ifstop(struct ifnet *, int);
    165 static int	ixgbe_init(struct ifnet *);
    166 static void	ixgbe_init_locked(struct adapter *);
    167 static void     ixgbe_stop(void *);
    168 static void     ixgbe_init_device_features(struct adapter *);
    169 static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
    170 static void	ixgbe_add_media_types(struct adapter *);
    171 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    172 static int      ixgbe_media_change(struct ifnet *);
    173 static int      ixgbe_allocate_pci_resources(struct adapter *,
    174 		    const struct pci_attach_args *);
    175 static void      ixgbe_free_softint(struct adapter *);
    176 static void	ixgbe_get_slot_info(struct adapter *);
    177 static int      ixgbe_allocate_msix(struct adapter *,
    178 		    const struct pci_attach_args *);
    179 static int      ixgbe_allocate_legacy(struct adapter *,
    180 		    const struct pci_attach_args *);
    181 static int      ixgbe_configure_interrupts(struct adapter *);
    182 static void	ixgbe_free_pciintr_resources(struct adapter *);
    183 static void	ixgbe_free_pci_resources(struct adapter *);
    184 static void	ixgbe_local_timer(void *);
    185 static void	ixgbe_local_timer1(void *);
    186 static int	ixgbe_setup_interface(device_t, struct adapter *);
    187 static void	ixgbe_config_gpie(struct adapter *);
    188 static void	ixgbe_config_dmac(struct adapter *);
    189 static void	ixgbe_config_delay_values(struct adapter *);
    190 static void	ixgbe_config_link(struct adapter *);
    191 static void	ixgbe_check_wol_support(struct adapter *);
    192 static int	ixgbe_setup_low_power_mode(struct adapter *);
    193 static void	ixgbe_rearm_queues(struct adapter *, u64);
    194 
    195 static void     ixgbe_initialize_transmit_units(struct adapter *);
    196 static void     ixgbe_initialize_receive_units(struct adapter *);
    197 static void	ixgbe_enable_rx_drop(struct adapter *);
    198 static void	ixgbe_disable_rx_drop(struct adapter *);
    199 static void	ixgbe_initialize_rss_mapping(struct adapter *);
    200 
    201 static void     ixgbe_enable_intr(struct adapter *);
    202 static void     ixgbe_disable_intr(struct adapter *);
    203 static void     ixgbe_update_stats_counters(struct adapter *);
    204 static void     ixgbe_set_promisc(struct adapter *);
    205 static void     ixgbe_set_multi(struct adapter *);
    206 static void     ixgbe_update_link_status(struct adapter *);
    207 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    208 static void	ixgbe_configure_ivars(struct adapter *);
    209 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    210 
    211 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    212 #if 0
    213 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
    214 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
    215 #endif
    216 
    217 static void	ixgbe_add_device_sysctls(struct adapter *);
    218 static void     ixgbe_add_hw_stats(struct adapter *);
    219 static void	ixgbe_clear_evcnt(struct adapter *);
    220 static int	ixgbe_set_flowcntl(struct adapter *, int);
    221 static int	ixgbe_set_advertise(struct adapter *, int);
    222 static int      ixgbe_get_advertise(struct adapter *);
    223 
    224 /* Sysctl handlers */
    225 static void	ixgbe_set_sysctl_value(struct adapter *, const char *,
    226 		     const char *, int *, int);
    227 static int	ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
    228 static int	ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
    229 static int      ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    230 static int	ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
    231 static int	ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
    232 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
    233 #ifdef IXGBE_DEBUG
    234 static int	ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
    235 static int	ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
    236 #endif
    237 static int      ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
    238 static int      ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
    239 static int      ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
    240 static int      ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
    241 static int      ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
    242 static int	ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
    243 static int	ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
    244 
    245 /* Support for pluggable optic modules */
    246 static bool	ixgbe_sfp_probe(struct adapter *);
    247 
    248 /* Legacy (single vector) interrupt handler */
    249 static int	ixgbe_legacy_irq(void *);
    250 
    251 /* The MSI/MSI-X Interrupt handlers */
    252 static int	ixgbe_msix_que(void *);
    253 static int	ixgbe_msix_link(void *);
    254 
    255 /* Software interrupts for deferred work */
    256 static void	ixgbe_handle_que(void *);
    257 static void	ixgbe_handle_link(void *);
    258 static void	ixgbe_handle_msf(void *);
    259 static void	ixgbe_handle_mod(void *);
    260 static void	ixgbe_handle_phy(void *);
    261 
    262 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    263 
    264 /************************************************************************
    265  *  NetBSD Device Interface Entry Points
    266  ************************************************************************/
    267 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    268     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    269     DVF_DETACH_SHUTDOWN);
    270 
    271 #if 0
    272 devclass_t ix_devclass;
    273 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
    274 
    275 MODULE_DEPEND(ix, pci, 1, 1, 1);
    276 MODULE_DEPEND(ix, ether, 1, 1, 1);
    277 #ifdef DEV_NETMAP
    278 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    279 #endif
    280 #endif
    281 
    282 /*
    283  * TUNEABLE PARAMETERS:
    284  */
    285 
    286 /*
    287  * AIM: Adaptive Interrupt Moderation
    288  * which means that the interrupt rate
    289  * is varied over time based on the
    290  * traffic for that interrupt vector
    291  */
    292 static bool ixgbe_enable_aim = true;
    293 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
    294 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
    295     "Enable adaptive interrupt moderation");
    296 
    297 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    298 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
    299     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
    300 
    301 /* How many packets rxeof tries to clean at a time */
    302 static int ixgbe_rx_process_limit = 256;
    303 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
    304     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
    305 
    306 /* How many packets txeof tries to clean at a time */
    307 static int ixgbe_tx_process_limit = 256;
    308 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
    309     &ixgbe_tx_process_limit, 0,
    310     "Maximum number of sent packets to process at a time, -1 means unlimited");
    311 
    312 /* Flow control setting, default to full */
    313 static int ixgbe_flow_control = ixgbe_fc_full;
    314 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
    315     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
    316 
    317 /*
    318  * Smart speed setting, default to on
    319  * this only works as a compile option
    320  * right now as its during attach, set
    321  * this to 'ixgbe_smart_speed_off' to
    322  * disable.
    323  */
    324 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    325 
    326 /*
    327  * MSI-X should be the default for best performance,
    328  * but this allows it to be forced off for testing.
    329  */
    330 static int ixgbe_enable_msix = 1;
    331 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
    332     "Enable MSI-X interrupts");
    333 
    334 /*
    335  * Number of Queues, can be set to 0,
    336  * it then autoconfigures based on the
    337  * number of cpus with a max of 8. This
    338  * can be overriden manually here.
    339  */
    340 static int ixgbe_num_queues = 0;
    341 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
    342     "Number of queues to configure, 0 indicates autoconfigure");
    343 
    344 /*
    345  * Number of TX descriptors per ring,
    346  * setting higher than RX as this seems
    347  * the better performing choice.
    348  */
    349 static int ixgbe_txd = PERFORM_TXD;
    350 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
    351     "Number of transmit descriptors per queue");
    352 
    353 /* Number of RX descriptors per ring */
    354 static int ixgbe_rxd = PERFORM_RXD;
    355 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
    356     "Number of receive descriptors per queue");
    357 
    358 /*
    359  * Defining this on will allow the use
    360  * of unsupported SFP+ modules, note that
    361  * doing so you are on your own :)
    362  */
    363 static int allow_unsupported_sfp = false;
    364 #define TUNABLE_INT(__x, __y)
    365 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
    366 
    367 /*
    368  * Not sure if Flow Director is fully baked,
    369  * so we'll default to turning it off.
    370  */
    371 static int ixgbe_enable_fdir = 0;
    372 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
    373     "Enable Flow Director");
    374 
    375 /* Legacy Transmit (single queue) */
    376 static int ixgbe_enable_legacy_tx = 0;
    377 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
    378     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
    379 
    380 /* Receive-Side Scaling */
    381 static int ixgbe_enable_rss = 1;
    382 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
    383     "Enable Receive-Side Scaling (RSS)");
    384 
    385 /* Keep running tab on them for sanity check */
    386 static int ixgbe_total_ports;
    387 
    388 #if 0
    389 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
    390 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
    391 #endif
    392 
    393 #ifdef NET_MPSAFE
    394 #define IXGBE_MPSAFE		1
    395 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    396 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    397 #else
    398 #define IXGBE_CALLOUT_FLAGS	0
    399 #define IXGBE_SOFTINFT_FLAGS	0
    400 #endif
    401 
    402 /************************************************************************
    403  * ixgbe_initialize_rss_mapping
    404  ************************************************************************/
    405 static void
    406 ixgbe_initialize_rss_mapping(struct adapter *adapter)
    407 {
    408 	struct ixgbe_hw	*hw = &adapter->hw;
    409 	u32             reta = 0, mrqc, rss_key[10];
    410 	int             queue_id, table_size, index_mult;
    411 	int             i, j;
    412 	u32             rss_hash_config;
    413 
    414 	/* force use default RSS key. */
    415 #ifdef __NetBSD__
    416 	rss_getkey((uint8_t *) &rss_key);
    417 #else
    418 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    419 		/* Fetch the configured RSS key */
    420 		rss_getkey((uint8_t *) &rss_key);
    421 	} else {
    422 		/* set up random bits */
    423 		cprng_fast(&rss_key, sizeof(rss_key));
    424 	}
    425 #endif
    426 
    427 	/* Set multiplier for RETA setup and table size based on MAC */
    428 	index_mult = 0x1;
    429 	table_size = 128;
    430 	switch (adapter->hw.mac.type) {
    431 	case ixgbe_mac_82598EB:
    432 		index_mult = 0x11;
    433 		break;
    434 	case ixgbe_mac_X550:
    435 	case ixgbe_mac_X550EM_x:
    436 	case ixgbe_mac_X550EM_a:
    437 		table_size = 512;
    438 		break;
    439 	default:
    440 		break;
    441 	}
    442 
    443 	/* Set up the redirection table */
    444 	for (i = 0, j = 0; i < table_size; i++, j++) {
    445 		if (j == adapter->num_queues)
    446 			j = 0;
    447 
    448 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    449 			/*
    450 			 * Fetch the RSS bucket id for the given indirection
    451 			 * entry. Cap it at the number of configured buckets
    452 			 * (which is num_queues.)
    453 			 */
    454 			queue_id = rss_get_indirection_to_bucket(i);
    455 			queue_id = queue_id % adapter->num_queues;
    456 		} else
    457 			queue_id = (j * index_mult);
    458 
    459 		/*
    460 		 * The low 8 bits are for hash value (n+0);
    461 		 * The next 8 bits are for hash value (n+1), etc.
    462 		 */
    463 		reta = reta >> 8;
    464 		reta = reta | (((uint32_t) queue_id) << 24);
    465 		if ((i & 3) == 3) {
    466 			if (i < 128)
    467 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
    468 			else
    469 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
    470 				    reta);
    471 			reta = 0;
    472 		}
    473 	}
    474 
    475 	/* Now fill our hash function seeds */
    476 	for (i = 0; i < 10; i++)
    477 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
    478 
    479 	/* Perform hash on these packet types */
    480 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
    481 		rss_hash_config = rss_gethashconfig();
    482 	else {
    483 		/*
    484 		 * Disable UDP - IP fragments aren't currently being handled
    485 		 * and so we end up with a mix of 2-tuple and 4-tuple
    486 		 * traffic.
    487 		 */
    488 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
    489 		                | RSS_HASHTYPE_RSS_TCP_IPV4
    490 		                | RSS_HASHTYPE_RSS_IPV6
    491 		                | RSS_HASHTYPE_RSS_TCP_IPV6
    492 		                | RSS_HASHTYPE_RSS_IPV6_EX
    493 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
    494 	}
    495 
    496 	mrqc = IXGBE_MRQC_RSSEN;
    497 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
    498 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
    499 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
    500 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
    501 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
    502 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
    503 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
    504 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
    505 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
    506 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
    507 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
    508 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
    509 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
    510 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
    511 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
    512 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
    513 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
    514 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
    515 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
    516 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    517 } /* ixgbe_initialize_rss_mapping */
    518 
    519 /************************************************************************
    520  * ixgbe_initialize_receive_units - Setup receive registers and features.
    521  ************************************************************************/
    522 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
    523 
    524 static void
    525 ixgbe_initialize_receive_units(struct adapter *adapter)
    526 {
    527 	struct	rx_ring	*rxr = adapter->rx_rings;
    528 	struct ixgbe_hw	*hw = &adapter->hw;
    529 	struct ifnet    *ifp = adapter->ifp;
    530 	int             i, j;
    531 	u32		bufsz, fctrl, srrctl, rxcsum;
    532 	u32		hlreg;
    533 
    534 	/*
    535 	 * Make sure receives are disabled while
    536 	 * setting up the descriptor ring
    537 	 */
    538 	ixgbe_disable_rx(hw);
    539 
    540 	/* Enable broadcasts */
    541 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
    542 	fctrl |= IXGBE_FCTRL_BAM;
    543 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
    544 		fctrl |= IXGBE_FCTRL_DPF;
    545 		fctrl |= IXGBE_FCTRL_PMCF;
    546 	}
    547 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
    548 
    549 	/* Set for Jumbo Frames? */
    550 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
    551 	if (ifp->if_mtu > ETHERMTU)
    552 		hlreg |= IXGBE_HLREG0_JUMBOEN;
    553 	else
    554 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
    555 
    556 #ifdef DEV_NETMAP
    557 	/* CRC stripping is conditional in Netmap */
    558 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
    559 	    (ifp->if_capenable & IFCAP_NETMAP) &&
    560 	    !ix_crcstrip)
    561 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
    562 	else
    563 #endif /* DEV_NETMAP */
    564 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
    565 
    566 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
    567 
    568 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
    569 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
    570 
    571 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
    572 		u64 rdba = rxr->rxdma.dma_paddr;
    573 		u32 tqsmreg, reg;
    574 		int regnum = i / 4;	/* 1 register per 4 queues */
    575 		int regshift = i % 4;	/* 4 bits per 1 queue */
    576 		j = rxr->me;
    577 
    578 		/* Setup the Base and Length of the Rx Descriptor Ring */
    579 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
    580 		    (rdba & 0x00000000ffffffffULL));
    581 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
    582 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
    583 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
    584 
    585 		/* Set up the SRRCTL register */
    586 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
    587 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
    588 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
    589 		srrctl |= bufsz;
    590 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
    591 
    592 		/* Set RQSMR (Receive Queue Statistic Mapping) register */
    593 		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
    594 		reg &= ~(0x000000ff << (regshift * 8));
    595 		reg |= i << (regshift * 8);
    596 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
    597 
    598 		/*
    599 		 * Set RQSMR (Receive Queue Statistic Mapping) register.
    600 		 * Register location for queue 0...7 are different between
    601 		 * 82598 and newer.
    602 		 */
    603 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
    604 			tqsmreg = IXGBE_TQSMR(regnum);
    605 		else
    606 			tqsmreg = IXGBE_TQSM(regnum);
    607 		reg = IXGBE_READ_REG(hw, tqsmreg);
    608 		reg &= ~(0x000000ff << (regshift * 8));
    609 		reg |= i << (regshift * 8);
    610 		IXGBE_WRITE_REG(hw, tqsmreg, reg);
    611 
    612 		/*
    613 		 * Set DROP_EN iff we have no flow control and >1 queue.
    614 		 * Note that srrctl was cleared shortly before during reset,
    615 		 * so we do not need to clear the bit, but do it just in case
    616 		 * this code is moved elsewhere.
    617 		 */
    618 		if (adapter->num_queues > 1 &&
    619 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
    620 			srrctl |= IXGBE_SRRCTL_DROP_EN;
    621 		} else {
    622 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
    623 		}
    624 
    625 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
    626 
    627 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
    628 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
    629 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
    630 
    631 		/* Set the driver rx tail address */
    632 		rxr->tail =  IXGBE_RDT(rxr->me);
    633 	}
    634 
    635 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    636 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
    637 		            | IXGBE_PSRTYPE_UDPHDR
    638 		            | IXGBE_PSRTYPE_IPV4HDR
    639 		            | IXGBE_PSRTYPE_IPV6HDR;
    640 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
    641 	}
    642 
    643 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
    644 
    645 	ixgbe_initialize_rss_mapping(adapter);
    646 
    647 	if (adapter->num_queues > 1) {
    648 		/* RSS and RX IPP Checksum are mutually exclusive */
    649 		rxcsum |= IXGBE_RXCSUM_PCSD;
    650 	}
    651 
    652 	if (ifp->if_capenable & IFCAP_RXCSUM)
    653 		rxcsum |= IXGBE_RXCSUM_PCSD;
    654 
    655 	/* This is useful for calculating UDP/IP fragment checksums */
    656 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
    657 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
    658 
    659 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
    660 
    661 	return;
    662 } /* ixgbe_initialize_receive_units */
    663 
    664 /************************************************************************
    665  * ixgbe_initialize_transmit_units - Enable transmit units.
    666  ************************************************************************/
    667 static void
    668 ixgbe_initialize_transmit_units(struct adapter *adapter)
    669 {
    670 	struct tx_ring  *txr = adapter->tx_rings;
    671 	struct ixgbe_hw	*hw = &adapter->hw;
    672 
    673 	/* Setup the Base and Length of the Tx Descriptor Ring */
    674 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    675 		u64 tdba = txr->txdma.dma_paddr;
    676 		u32 txctrl = 0;
    677 		int j = txr->me;
    678 
    679 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
    680 		    (tdba & 0x00000000ffffffffULL));
    681 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
    682 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
    683 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
    684 
    685 		/* Setup the HW Tx Head and Tail descriptor pointers */
    686 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
    687 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
    688 
    689 		/* Cache the tail address */
    690 		txr->tail = IXGBE_TDT(j);
    691 
    692 		/* Disable Head Writeback */
    693 		/*
    694 		 * Note: for X550 series devices, these registers are actually
    695 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
    696 		 * fields remain the same.
    697 		 */
    698 		switch (hw->mac.type) {
    699 		case ixgbe_mac_82598EB:
    700 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
    701 			break;
    702 		default:
    703 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
    704 			break;
    705 		}
    706 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
    707 		switch (hw->mac.type) {
    708 		case ixgbe_mac_82598EB:
    709 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
    710 			break;
    711 		default:
    712 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
    713 			break;
    714 		}
    715 
    716 	}
    717 
    718 	if (hw->mac.type != ixgbe_mac_82598EB) {
    719 		u32 dmatxctl, rttdcs;
    720 
    721 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
    722 		dmatxctl |= IXGBE_DMATXCTL_TE;
    723 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
    724 		/* Disable arbiter to set MTQC */
    725 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
    726 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
    727 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    728 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
    729 		    ixgbe_get_mtqc(adapter->iov_mode));
    730 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
    731 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    732 	}
    733 
    734 	return;
    735 } /* ixgbe_initialize_transmit_units */
    736 
    737 /************************************************************************
    738  * ixgbe_attach - Device initialization routine
    739  *
    740  *   Called when the driver is being loaded.
    741  *   Identifies the type of hardware, allocates all resources
    742  *   and initializes the hardware.
    743  *
    744  *   return 0 on success, positive on failure
    745  ************************************************************************/
    746 static void
    747 ixgbe_attach(device_t parent, device_t dev, void *aux)
    748 {
    749 	struct adapter  *adapter;
    750 	struct ixgbe_hw *hw;
    751 	int             error = -1;
    752 	u32		ctrl_ext;
    753 	u16		high, low, nvmreg;
    754 	pcireg_t	id, subid;
    755 	ixgbe_vendor_info_t *ent;
    756 	struct pci_attach_args *pa = aux;
    757 	const char *str;
    758 	char buf[256];
    759 
    760 	INIT_DEBUGOUT("ixgbe_attach: begin");
    761 
    762 	/* Allocate, clear, and link in our adapter structure */
    763 	adapter = device_private(dev);
    764 	adapter->hw.back = adapter;
    765 	adapter->dev = dev;
    766 	hw = &adapter->hw;
    767 	adapter->osdep.pc = pa->pa_pc;
    768 	adapter->osdep.tag = pa->pa_tag;
    769 	if (pci_dma64_available(pa))
    770 		adapter->osdep.dmat = pa->pa_dmat64;
    771 	else
    772 		adapter->osdep.dmat = pa->pa_dmat;
    773 	adapter->osdep.attached = false;
    774 
    775 	ent = ixgbe_lookup(pa);
    776 
    777 	KASSERT(ent != NULL);
    778 
    779 	aprint_normal(": %s, Version - %s\n",
    780 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    781 
    782 	/* Core Lock Init*/
    783 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    784 
    785 	/* Set up the timer callout */
    786 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    787 
    788 	/* Determine hardware revision */
    789 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    790 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    791 
    792 	hw->vendor_id = PCI_VENDOR(id);
    793 	hw->device_id = PCI_PRODUCT(id);
    794 	hw->revision_id =
    795 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    796 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    797 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    798 
    799 	/*
    800 	 * Make sure BUSMASTER is set
    801 	 */
    802 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    803 
    804 	/* Do base PCI setup - map BAR0 */
    805 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    806 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    807 		error = ENXIO;
    808 		goto err_out;
    809 	}
    810 
    811 	/* let hardware know driver is loaded */
    812 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    813 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    814 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    815 
    816 	/*
    817 	 * Initialize the shared code
    818 	 */
    819 	if (ixgbe_init_shared_code(hw)) {
    820 		aprint_error_dev(dev, "Unable to initialize the shared code\n");
    821 		error = ENXIO;
    822 		goto err_out;
    823 	}
    824 
    825 	switch (hw->mac.type) {
    826 	case ixgbe_mac_82598EB:
    827 		str = "82598EB";
    828 		break;
    829 	case ixgbe_mac_82599EB:
    830 		str = "82599EB";
    831 		break;
    832 	case ixgbe_mac_X540:
    833 		str = "X540";
    834 		break;
    835 	case ixgbe_mac_X550:
    836 		str = "X550";
    837 		break;
    838 	case ixgbe_mac_X550EM_x:
    839 		str = "X550EM";
    840 		break;
    841 	case ixgbe_mac_X550EM_a:
    842 		str = "X550EM A";
    843 		break;
    844 	default:
    845 		str = "Unknown";
    846 		break;
    847 	}
    848 	aprint_normal_dev(dev, "device %s\n", str);
    849 
    850 	if (hw->mbx.ops.init_params)
    851 		hw->mbx.ops.init_params(hw);
    852 
    853 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
    854 
    855 	/* Pick up the 82599 settings */
    856 	if (hw->mac.type != ixgbe_mac_82598EB) {
    857 		hw->phy.smart_speed = ixgbe_smart_speed;
    858 		adapter->num_segs = IXGBE_82599_SCATTER;
    859 	} else
    860 		adapter->num_segs = IXGBE_82598_SCATTER;
    861 
    862 	hw->mac.ops.set_lan_id(hw);
    863 	ixgbe_init_device_features(adapter);
    864 
    865 	if (ixgbe_configure_interrupts(adapter)) {
    866 		error = ENXIO;
    867 		goto err_out;
    868 	}
    869 
    870 	/* Allocate multicast array memory. */
    871 	adapter->mta = malloc(sizeof(*adapter->mta) *
    872 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    873 	if (adapter->mta == NULL) {
    874 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    875 		error = ENOMEM;
    876 		goto err_out;
    877 	}
    878 
    879 	/* Enable WoL (if supported) */
    880 	ixgbe_check_wol_support(adapter);
    881 
    882 	/* Verify adapter fan is still functional (if applicable) */
    883 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
    884 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    885 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
    886 	}
    887 
    888 	/* Ensure SW/FW semaphore is free */
    889 	ixgbe_init_swfw_semaphore(hw);
    890 
    891 	/* Enable EEE power saving */
    892 	if (adapter->feat_en & IXGBE_FEATURE_EEE)
    893 		hw->mac.ops.setup_eee(hw, TRUE);
    894 
    895 	/* Set an initial default flow control value */
    896 	hw->fc.requested_mode = ixgbe_flow_control;
    897 
    898 	/* Sysctls for limiting the amount of work done in the taskqueues */
    899 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
    900 	    "max number of rx packets to process",
    901 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
    902 
    903 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
    904 	    "max number of tx packets to process",
    905 	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
    906 
    907 	/* Do descriptor calc and sanity checks */
    908 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    909 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    910 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    911 		adapter->num_tx_desc = DEFAULT_TXD;
    912 	} else
    913 		adapter->num_tx_desc = ixgbe_txd;
    914 
    915 	/*
    916 	 * With many RX rings it is easy to exceed the
    917 	 * system mbuf allocation. Tuning nmbclusters
    918 	 * can alleviate this.
    919 	 */
    920 	if (nmbclusters > 0) {
    921 		int s;
    922 		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
    923 		if (s > nmbclusters) {
    924 			aprint_error_dev(dev, "RX Descriptors exceed "
    925 			    "system mbuf max, using default instead!\n");
    926 			ixgbe_rxd = DEFAULT_RXD;
    927 		}
    928 	}
    929 
    930 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    931 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
    932 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    933 		adapter->num_rx_desc = DEFAULT_RXD;
    934 	} else
    935 		adapter->num_rx_desc = ixgbe_rxd;
    936 
    937 	/* Allocate our TX/RX Queues */
    938 	if (ixgbe_allocate_queues(adapter)) {
    939 		error = ENOMEM;
    940 		goto err_out;
    941 	}
    942 
    943 	hw->phy.reset_if_overtemp = TRUE;
    944 	error = ixgbe_reset_hw(hw);
    945 	hw->phy.reset_if_overtemp = FALSE;
    946 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    947 		/*
    948 		 * No optics in this port, set up
    949 		 * so the timer routine will probe
    950 		 * for later insertion.
    951 		 */
    952 		adapter->sfp_probe = TRUE;
    953 		error = IXGBE_SUCCESS;
    954 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    955 		aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
    956 		error = EIO;
    957 		goto err_late;
    958 	} else if (error) {
    959 		aprint_error_dev(dev, "Hardware initialization failed\n");
    960 		error = EIO;
    961 		goto err_late;
    962 	}
    963 
    964 	/* Make sure we have a good EEPROM before we read from it */
    965 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
    966 		aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
    967 		error = EIO;
    968 		goto err_late;
    969 	}
    970 
    971 	aprint_normal("%s:", device_xname(dev));
    972 	/* NVM Image Version */
    973 	switch (hw->mac.type) {
    974 	case ixgbe_mac_X540:
    975 	case ixgbe_mac_X550EM_a:
    976 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    977 		if (nvmreg == 0xffff)
    978 			break;
    979 		high = (nvmreg >> 12) & 0x0f;
    980 		low = (nvmreg >> 4) & 0xff;
    981 		id = nvmreg & 0x0f;
    982 		aprint_normal(" NVM Image Version %u.", high);
    983 		if (hw->mac.type == ixgbe_mac_X540)
    984 			str = "%x";
    985 		else
    986 			str = "%02x";
    987 		aprint_normal(str, low);
    988 		aprint_normal(" ID 0x%x,", id);
    989 		break;
    990 	case ixgbe_mac_X550EM_x:
    991 	case ixgbe_mac_X550:
    992 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    993 		if (nvmreg == 0xffff)
    994 			break;
    995 		high = (nvmreg >> 12) & 0x0f;
    996 		low = nvmreg & 0xff;
    997 		aprint_normal(" NVM Image Version %u.%02x,", high, low);
    998 		break;
    999 	default:
   1000 		break;
   1001 	}
   1002 
   1003 	/* PHY firmware revision */
   1004 	switch (hw->mac.type) {
   1005 	case ixgbe_mac_X540:
   1006 	case ixgbe_mac_X550:
   1007 		hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
   1008 		if (nvmreg == 0xffff)
   1009 			break;
   1010 		high = (nvmreg >> 12) & 0x0f;
   1011 		low = (nvmreg >> 4) & 0xff;
   1012 		id = nvmreg & 0x000f;
   1013 		aprint_normal(" PHY FW Revision %u.", high);
   1014 		if (hw->mac.type == ixgbe_mac_X540)
   1015 			str = "%x";
   1016 		else
   1017 			str = "%02x";
   1018 		aprint_normal(str, low);
   1019 		aprint_normal(" ID 0x%x,", id);
   1020 		break;
   1021 	default:
   1022 		break;
   1023 	}
   1024 
   1025 	/* NVM Map version & OEM NVM Image version */
   1026 	switch (hw->mac.type) {
   1027 	case ixgbe_mac_X550:
   1028 	case ixgbe_mac_X550EM_x:
   1029 	case ixgbe_mac_X550EM_a:
   1030 		hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
   1031 		if (nvmreg != 0xffff) {
   1032 			high = (nvmreg >> 12) & 0x0f;
   1033 			low = nvmreg & 0x00ff;
   1034 			aprint_normal(" NVM Map version %u.%02x,", high, low);
   1035 		}
   1036 		hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
   1037 		if (nvmreg != 0xffff) {
   1038 			high = (nvmreg >> 12) & 0x0f;
   1039 			low = nvmreg & 0x00ff;
   1040 			aprint_verbose(" OEM NVM Image version %u.%02x,", high,
   1041 			    low);
   1042 		}
   1043 		break;
   1044 	default:
   1045 		break;
   1046 	}
   1047 
   1048 	/* Print the ETrackID */
   1049 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
   1050 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
   1051 	aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
   1052 
   1053 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   1054 		error = ixgbe_allocate_msix(adapter, pa);
   1055 		if (error) {
   1056 			/* Free allocated queue structures first */
   1057 			ixgbe_free_transmit_structures(adapter);
   1058 			ixgbe_free_receive_structures(adapter);
   1059 			free(adapter->queues, M_DEVBUF);
   1060 
   1061 			/* Fallback to legacy interrupt */
   1062 			adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
   1063 			if (adapter->feat_cap & IXGBE_FEATURE_MSI)
   1064 				adapter->feat_en |= IXGBE_FEATURE_MSI;
   1065 			adapter->num_queues = 1;
   1066 
   1067 			/* Allocate our TX/RX Queues again */
   1068 			if (ixgbe_allocate_queues(adapter)) {
   1069 				error = ENOMEM;
   1070 				goto err_out;
   1071 			}
   1072 		}
   1073 	}
   1074 	if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
   1075 		error = ixgbe_allocate_legacy(adapter, pa);
   1076 	if (error)
   1077 		goto err_late;
   1078 
   1079 	/* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
   1080 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   1081 	    ixgbe_handle_link, adapter);
   1082 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1083 	    ixgbe_handle_mod, adapter);
   1084 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1085 	    ixgbe_handle_msf, adapter);
   1086 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1087 	    ixgbe_handle_phy, adapter);
   1088 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   1089 		adapter->fdir_si =
   1090 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1091 			ixgbe_reinit_fdir, adapter);
   1092 	if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
   1093 	    || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
   1094 	    || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
   1095 		&& (adapter->fdir_si == NULL))) {
   1096 		aprint_error_dev(dev,
   1097 		    "could not establish software interrupts ()\n");
   1098 		goto err_out;
   1099 	}
   1100 
   1101 	error = ixgbe_start_hw(hw);
   1102 	switch (error) {
   1103 	case IXGBE_ERR_EEPROM_VERSION:
   1104 		aprint_error_dev(dev, "This device is a pre-production adapter/"
   1105 		    "LOM.  Please be aware there may be issues associated "
   1106 		    "with your hardware.\nIf you are experiencing problems "
   1107 		    "please contact your Intel or hardware representative "
   1108 		    "who provided you with this hardware.\n");
   1109 		break;
   1110 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
   1111 		aprint_error_dev(dev, "Unsupported SFP+ Module\n");
   1112 		error = EIO;
   1113 		goto err_late;
   1114 	case IXGBE_ERR_SFP_NOT_PRESENT:
   1115 		aprint_error_dev(dev, "No SFP+ Module found\n");
   1116 		/* falls thru */
   1117 	default:
   1118 		break;
   1119 	}
   1120 
   1121 	/* Setup OS specific network interface */
   1122 	if (ixgbe_setup_interface(dev, adapter) != 0)
   1123 		goto err_late;
   1124 
   1125 	/*
   1126 	 *  Print PHY ID only for copper PHY. On device which has SFP(+) cage
   1127 	 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
   1128 	 */
   1129 	if (hw->phy.media_type == ixgbe_media_type_copper) {
   1130 		uint16_t id1, id2;
   1131 		int oui, model, rev;
   1132 		const char *descr;
   1133 
   1134 		id1 = hw->phy.id >> 16;
   1135 		id2 = hw->phy.id & 0xffff;
   1136 		oui = MII_OUI(id1, id2);
   1137 		model = MII_MODEL(id2);
   1138 		rev = MII_REV(id2);
   1139 		if ((descr = mii_get_descr(oui, model)) != NULL)
   1140 			aprint_normal_dev(dev,
   1141 			    "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
   1142 			    descr, oui, model, rev);
   1143 		else
   1144 			aprint_normal_dev(dev,
   1145 			    "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
   1146 			    oui, model, rev);
   1147 	}
   1148 
   1149 	/* Enable the optics for 82599 SFP+ fiber */
   1150 	ixgbe_enable_tx_laser(hw);
   1151 
   1152 	/* Enable power to the phy. */
   1153 	ixgbe_set_phy_power(hw, TRUE);
   1154 
   1155 	/* Initialize statistics */
   1156 	ixgbe_update_stats_counters(adapter);
   1157 
   1158 	/* Check PCIE slot type/speed/width */
   1159 	ixgbe_get_slot_info(adapter);
   1160 
   1161 	/*
   1162 	 * Do time init and sysctl init here, but
   1163 	 * only on the first port of a bypass adapter.
   1164 	 */
   1165 	ixgbe_bypass_init(adapter);
   1166 
   1167 	/* Set an initial dmac value */
   1168 	adapter->dmac = 0;
   1169 	/* Set initial advertised speeds (if applicable) */
   1170 	adapter->advertise = ixgbe_get_advertise(adapter);
   1171 
   1172 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   1173 		ixgbe_define_iov_schemas(dev, &error);
   1174 
   1175 	/* Add sysctls */
   1176 	ixgbe_add_device_sysctls(adapter);
   1177 	ixgbe_add_hw_stats(adapter);
   1178 
   1179 	/* For Netmap */
   1180 	adapter->init_locked = ixgbe_init_locked;
   1181 	adapter->stop_locked = ixgbe_stop;
   1182 
   1183 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1184 		ixgbe_netmap_attach(adapter);
   1185 
   1186 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
   1187 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
   1188 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
   1189 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
   1190 
   1191 	if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
   1192 		pmf_class_network_register(dev, adapter->ifp);
   1193 	else
   1194 		aprint_error_dev(dev, "couldn't establish power handler\n");
   1195 
   1196 	INIT_DEBUGOUT("ixgbe_attach: end");
   1197 	adapter->osdep.attached = true;
   1198 
   1199 	return;
   1200 
   1201 err_late:
   1202 	ixgbe_free_transmit_structures(adapter);
   1203 	ixgbe_free_receive_structures(adapter);
   1204 	free(adapter->queues, M_DEVBUF);
   1205 err_out:
   1206 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   1207 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   1208 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   1209 	ixgbe_free_softint(adapter);
   1210 	ixgbe_free_pci_resources(adapter);
   1211 	if (adapter->mta != NULL)
   1212 		free(adapter->mta, M_DEVBUF);
   1213 	IXGBE_CORE_LOCK_DESTROY(adapter);
   1214 
   1215 	return;
   1216 } /* ixgbe_attach */
   1217 
   1218 /************************************************************************
   1219  * ixgbe_check_wol_support
   1220  *
   1221  *   Checks whether the adapter's ports are capable of
   1222  *   Wake On LAN by reading the adapter's NVM.
   1223  *
   1224  *   Sets each port's hw->wol_enabled value depending
   1225  *   on the value read here.
   1226  ************************************************************************/
   1227 static void
   1228 ixgbe_check_wol_support(struct adapter *adapter)
   1229 {
   1230 	struct ixgbe_hw *hw = &adapter->hw;
   1231 	u16             dev_caps = 0;
   1232 
   1233 	/* Find out WoL support for port */
   1234 	adapter->wol_support = hw->wol_enabled = 0;
   1235 	ixgbe_get_device_caps(hw, &dev_caps);
   1236 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
   1237 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
   1238 	     hw->bus.func == 0))
   1239 		adapter->wol_support = hw->wol_enabled = 1;
   1240 
   1241 	/* Save initial wake up filter configuration */
   1242 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
   1243 
   1244 	return;
   1245 } /* ixgbe_check_wol_support */
   1246 
   1247 /************************************************************************
   1248  * ixgbe_setup_interface
   1249  *
   1250  *   Setup networking device structure and register an interface.
   1251  ************************************************************************/
   1252 static int
   1253 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   1254 {
   1255 	struct ethercom *ec = &adapter->osdep.ec;
   1256 	struct ifnet   *ifp;
   1257 	int rv;
   1258 
   1259 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   1260 
   1261 	ifp = adapter->ifp = &ec->ec_if;
   1262 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1263 	ifp->if_baudrate = IF_Gbps(10);
   1264 	ifp->if_init = ixgbe_init;
   1265 	ifp->if_stop = ixgbe_ifstop;
   1266 	ifp->if_softc = adapter;
   1267 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1268 #ifdef IXGBE_MPSAFE
   1269 	ifp->if_extflags = IFEF_MPSAFE;
   1270 #endif
   1271 	ifp->if_ioctl = ixgbe_ioctl;
   1272 #if __FreeBSD_version >= 1100045
   1273 	/* TSO parameters */
   1274 	ifp->if_hw_tsomax = 65518;
   1275 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
   1276 	ifp->if_hw_tsomaxsegsize = 2048;
   1277 #endif
   1278 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1279 #if 0
   1280 		ixgbe_start_locked = ixgbe_legacy_start_locked;
   1281 #endif
   1282 	} else {
   1283 		ifp->if_transmit = ixgbe_mq_start;
   1284 #if 0
   1285 		ixgbe_start_locked = ixgbe_mq_start_locked;
   1286 #endif
   1287 	}
   1288 	ifp->if_start = ixgbe_legacy_start;
   1289 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1290 	IFQ_SET_READY(&ifp->if_snd);
   1291 
   1292 	rv = if_initialize(ifp);
   1293 	if (rv != 0) {
   1294 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1295 		return rv;
   1296 	}
   1297 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1298 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1299 	/*
   1300 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1301 	 * used.
   1302 	 */
   1303 	if_register(ifp);
   1304 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   1305 
   1306 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1307 
   1308 	/*
   1309 	 * Tell the upper layer(s) we support long frames.
   1310 	 */
   1311 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1312 
   1313 	/* Set capability flags */
   1314 	ifp->if_capabilities |= IFCAP_RXCSUM
   1315 			     |  IFCAP_TXCSUM
   1316 			     |  IFCAP_TSOv4
   1317 			     |  IFCAP_TSOv6
   1318 			     |  IFCAP_LRO;
   1319 	ifp->if_capenable = 0;
   1320 
   1321 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1322 	    		    |  ETHERCAP_VLAN_HWCSUM
   1323 	    		    |  ETHERCAP_JUMBO_MTU
   1324 	    		    |  ETHERCAP_VLAN_MTU;
   1325 
   1326 	/* Enable the above capabilities by default */
   1327 	ec->ec_capenable = ec->ec_capabilities;
   1328 
   1329 	/*
   1330 	 * Don't turn this on by default, if vlans are
   1331 	 * created on another pseudo device (eg. lagg)
   1332 	 * then vlan events are not passed thru, breaking
   1333 	 * operation, but with HW FILTER off it works. If
   1334 	 * using vlans directly on the ixgbe driver you can
   1335 	 * enable this and get full hardware tag filtering.
   1336 	 */
   1337 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1338 
   1339 	/*
   1340 	 * Specify the media types supported by this adapter and register
   1341 	 * callbacks to update media and link information
   1342 	 */
   1343 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   1344 	    ixgbe_media_status);
   1345 
   1346 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
   1347 	ixgbe_add_media_types(adapter);
   1348 
   1349 	/* Set autoselect media by default */
   1350 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1351 
   1352 	return (0);
   1353 } /* ixgbe_setup_interface */
   1354 
   1355 /************************************************************************
   1356  * ixgbe_add_media_types
   1357  ************************************************************************/
   1358 static void
   1359 ixgbe_add_media_types(struct adapter *adapter)
   1360 {
   1361 	struct ixgbe_hw *hw = &adapter->hw;
   1362 	device_t        dev = adapter->dev;
   1363 	u64             layer;
   1364 
   1365 	layer = adapter->phy_layer;
   1366 
   1367 #define	ADD(mm, dd)							\
   1368 	ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
   1369 
   1370 	/* Media types with matching NetBSD media defines */
   1371 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
   1372 		ADD(IFM_10G_T | IFM_FDX, 0);
   1373 	}
   1374 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
   1375 		ADD(IFM_1000_T | IFM_FDX, 0);
   1376 	}
   1377 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
   1378 		ADD(IFM_100_TX | IFM_FDX, 0);
   1379 	}
   1380 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
   1381 		ADD(IFM_10_T | IFM_FDX, 0);
   1382 	}
   1383 
   1384 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   1385 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
   1386 		ADD(IFM_10G_TWINAX | IFM_FDX, 0);
   1387 	}
   1388 
   1389 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
   1390 		ADD(IFM_10G_LR | IFM_FDX, 0);
   1391 		if (hw->phy.multispeed_fiber) {
   1392 			ADD(IFM_1000_LX | IFM_FDX, 0);
   1393 		}
   1394 	}
   1395 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
   1396 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1397 		if (hw->phy.multispeed_fiber) {
   1398 			ADD(IFM_1000_SX | IFM_FDX, 0);
   1399 		}
   1400 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
   1401 		ADD(IFM_1000_SX | IFM_FDX, 0);
   1402 	}
   1403 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
   1404 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1405 	}
   1406 
   1407 #ifdef IFM_ETH_XTYPE
   1408 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1409 		ADD(IFM_10G_KR | IFM_FDX, 0);
   1410 	}
   1411 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1412 		ADD(AIFM_10G_KX4 | IFM_FDX, 0);
   1413 	}
   1414 #else
   1415 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1416 		device_printf(dev, "Media supported: 10GbaseKR\n");
   1417 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
   1418 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1419 	}
   1420 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1421 		device_printf(dev, "Media supported: 10GbaseKX4\n");
   1422 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
   1423 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1424 	}
   1425 #endif
   1426 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
   1427 		ADD(IFM_1000_KX | IFM_FDX, 0);
   1428 	}
   1429 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
   1430 		ADD(IFM_2500_KX | IFM_FDX, 0);
   1431 	}
   1432 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
   1433 		ADD(IFM_2500_T | IFM_FDX, 0);
   1434 	}
   1435 	if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
   1436 		ADD(IFM_5000_T | IFM_FDX, 0);
   1437 	}
   1438 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
   1439 		device_printf(dev, "Media supported: 1000baseBX\n");
   1440 	/* XXX no ifmedia_set? */
   1441 
   1442 	ADD(IFM_AUTO, 0);
   1443 
   1444 #undef ADD
   1445 } /* ixgbe_add_media_types */
   1446 
   1447 /************************************************************************
   1448  * ixgbe_is_sfp
   1449  ************************************************************************/
   1450 static inline bool
   1451 ixgbe_is_sfp(struct ixgbe_hw *hw)
   1452 {
   1453 	switch (hw->mac.type) {
   1454 	case ixgbe_mac_82598EB:
   1455 		if (hw->phy.type == ixgbe_phy_nl)
   1456 			return TRUE;
   1457 		return FALSE;
   1458 	case ixgbe_mac_82599EB:
   1459 		switch (hw->mac.ops.get_media_type(hw)) {
   1460 		case ixgbe_media_type_fiber:
   1461 		case ixgbe_media_type_fiber_qsfp:
   1462 			return TRUE;
   1463 		default:
   1464 			return FALSE;
   1465 		}
   1466 	case ixgbe_mac_X550EM_x:
   1467 	case ixgbe_mac_X550EM_a:
   1468 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
   1469 			return TRUE;
   1470 		return FALSE;
   1471 	default:
   1472 		return FALSE;
   1473 	}
   1474 } /* ixgbe_is_sfp */
   1475 
   1476 /************************************************************************
   1477  * ixgbe_config_link
   1478  ************************************************************************/
   1479 static void
   1480 ixgbe_config_link(struct adapter *adapter)
   1481 {
   1482 	struct ixgbe_hw *hw = &adapter->hw;
   1483 	u32             autoneg, err = 0;
   1484 	bool            sfp, negotiate = false;
   1485 
   1486 	sfp = ixgbe_is_sfp(hw);
   1487 
   1488 	if (sfp) {
   1489 		if (hw->phy.multispeed_fiber) {
   1490 			hw->mac.ops.setup_sfp(hw);
   1491 			ixgbe_enable_tx_laser(hw);
   1492 			kpreempt_disable();
   1493 			softint_schedule(adapter->msf_si);
   1494 			kpreempt_enable();
   1495 		} else {
   1496 			kpreempt_disable();
   1497 			softint_schedule(adapter->mod_si);
   1498 			kpreempt_enable();
   1499 		}
   1500 	} else {
   1501 		if (hw->mac.ops.check_link)
   1502 			err = ixgbe_check_link(hw, &adapter->link_speed,
   1503 			    &adapter->link_up, FALSE);
   1504 		if (err)
   1505 			goto out;
   1506 		autoneg = hw->phy.autoneg_advertised;
   1507 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   1508                 	err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
   1509 			    &negotiate);
   1510 		if (err)
   1511 			goto out;
   1512 		if (hw->mac.ops.setup_link)
   1513                 	err = hw->mac.ops.setup_link(hw, autoneg,
   1514 			    adapter->link_up);
   1515 	}
   1516 out:
   1517 
   1518 	return;
   1519 } /* ixgbe_config_link */
   1520 
   1521 /************************************************************************
   1522  * ixgbe_update_stats_counters - Update board statistics counters.
   1523  ************************************************************************/
   1524 static void
   1525 ixgbe_update_stats_counters(struct adapter *adapter)
   1526 {
   1527 	struct ifnet          *ifp = adapter->ifp;
   1528 	struct ixgbe_hw       *hw = &adapter->hw;
   1529 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1530 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
   1531 	u64                   total_missed_rx = 0;
   1532 	uint64_t              crcerrs, rlec;
   1533 
   1534 	crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   1535 	stats->crcerrs.ev_count += crcerrs;
   1536 	stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   1537 	stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   1538 	stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   1539 	if (hw->mac.type == ixgbe_mac_X550)
   1540 		stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
   1541 
   1542 	for (int i = 0; i < __arraycount(stats->qprc); i++) {
   1543 		int j = i % adapter->num_queues;
   1544 		stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   1545 		stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   1546 		stats->qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   1547 	}
   1548 	for (int i = 0; i < __arraycount(stats->mpc); i++) {
   1549 		uint32_t mp;
   1550 		int j = i % adapter->num_queues;
   1551 
   1552 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   1553 		/* global total per queue */
   1554 		stats->mpc[j].ev_count += mp;
   1555 		/* running comprehensive total for stats display */
   1556 		total_missed_rx += mp;
   1557 
   1558 		if (hw->mac.type == ixgbe_mac_82598EB)
   1559 			stats->rnbc[j].ev_count
   1560 			    += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   1561 
   1562 	}
   1563 	stats->mpctotal.ev_count += total_missed_rx;
   1564 
   1565 	/* Document says M[LR]FC are valid when link is up and 10Gbps */
   1566 	if ((adapter->link_active == TRUE)
   1567 	    && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
   1568 		stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   1569 		stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   1570 	}
   1571 	rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
   1572 	stats->rlec.ev_count += rlec;
   1573 
   1574 	/* Hardware workaround, gprc counts missed packets */
   1575 	stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   1576 
   1577 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   1578 	stats->lxontxc.ev_count += lxon;
   1579 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   1580 	stats->lxofftxc.ev_count += lxoff;
   1581 	total = lxon + lxoff;
   1582 
   1583 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1584 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   1585 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   1586 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   1587 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   1588 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   1589 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   1590 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   1591 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   1592 	} else {
   1593 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   1594 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   1595 		/* 82598 only has a counter in the high register */
   1596 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   1597 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   1598 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   1599 	}
   1600 
   1601 	/*
   1602 	 * Workaround: mprc hardware is incorrectly counting
   1603 	 * broadcasts, so for now we subtract those.
   1604 	 */
   1605 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   1606 	stats->bprc.ev_count += bprc;
   1607 	stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
   1608 	    - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   1609 
   1610 	stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   1611 	stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   1612 	stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   1613 	stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   1614 	stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   1615 	stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   1616 
   1617 	stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   1618 	stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   1619 	stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   1620 
   1621 	stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   1622 	stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   1623 	stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   1624 	stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   1625 	stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   1626 	stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   1627 	stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   1628 	stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   1629 	stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   1630 	stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   1631 	stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   1632 	stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   1633 	stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   1634 	stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   1635 	stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   1636 	stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   1637 	stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   1638 	stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   1639 	/* Only read FCOE on 82599 */
   1640 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1641 		stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   1642 		stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   1643 		stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   1644 		stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   1645 		stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   1646 	}
   1647 
   1648 	/* Fill out the OS statistics structure */
   1649 	/*
   1650 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   1651 	 * adapter->stats counters. It's required to make ifconfig -z
   1652 	 * (SOICZIFDATA) work.
   1653 	 */
   1654 	ifp->if_collisions = 0;
   1655 
   1656 	/* Rx Errors */
   1657 	ifp->if_iqdrops += total_missed_rx;
   1658 	ifp->if_ierrors += crcerrs + rlec;
   1659 } /* ixgbe_update_stats_counters */
   1660 
   1661 /************************************************************************
   1662  * ixgbe_add_hw_stats
   1663  *
   1664  *   Add sysctl variables, one per statistic, to the system.
   1665  ************************************************************************/
   1666 static void
   1667 ixgbe_add_hw_stats(struct adapter *adapter)
   1668 {
   1669 	device_t dev = adapter->dev;
   1670 	const struct sysctlnode *rnode, *cnode;
   1671 	struct sysctllog **log = &adapter->sysctllog;
   1672 	struct tx_ring *txr = adapter->tx_rings;
   1673 	struct rx_ring *rxr = adapter->rx_rings;
   1674 	struct ixgbe_hw *hw = &adapter->hw;
   1675 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1676 	const char *xname = device_xname(dev);
   1677 
   1678 	/* Driver Statistics */
   1679 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   1680 	    NULL, xname, "Handled queue in softint");
   1681 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   1682 	    NULL, xname, "Requeued in softint");
   1683 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   1684 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   1685 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   1686 	    NULL, xname, "m_defrag() failed");
   1687 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   1688 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   1689 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   1690 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   1691 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   1692 	    NULL, xname, "Driver tx dma hard fail other");
   1693 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   1694 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   1695 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   1696 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   1697 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   1698 	    NULL, xname, "Watchdog timeouts");
   1699 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   1700 	    NULL, xname, "TSO errors");
   1701 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   1702 	    NULL, xname, "Link MSI-X IRQ Handled");
   1703 
   1704 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1705 		snprintf(adapter->queues[i].evnamebuf,
   1706 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   1707 		    xname, i);
   1708 		snprintf(adapter->queues[i].namebuf,
   1709 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   1710 
   1711 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   1712 			aprint_error_dev(dev, "could not create sysctl root\n");
   1713 			break;
   1714 		}
   1715 
   1716 		if (sysctl_createv(log, 0, &rnode, &rnode,
   1717 		    0, CTLTYPE_NODE,
   1718 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   1719 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   1720 			break;
   1721 
   1722 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1723 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   1724 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   1725 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   1726 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   1727 			break;
   1728 
   1729 #if 0 /* XXX msaitoh */
   1730 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1731 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   1732 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   1733 			NULL, 0, &(adapter->queues[i].irqs),
   1734 		    0, CTL_CREATE, CTL_EOL) != 0)
   1735 			break;
   1736 #endif
   1737 
   1738 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1739 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1740 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   1741 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   1742 		    0, CTL_CREATE, CTL_EOL) != 0)
   1743 			break;
   1744 
   1745 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1746 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1747 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   1748 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   1749 		    0, CTL_CREATE, CTL_EOL) != 0)
   1750 			break;
   1751 
   1752 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   1753 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   1754 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   1755 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   1756 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   1757 		    NULL, adapter->queues[i].evnamebuf,
   1758 		    "Queue No Descriptor Available");
   1759 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   1760 		    NULL, adapter->queues[i].evnamebuf,
   1761 		    "Queue Packets Transmitted");
   1762 #ifndef IXGBE_LEGACY_TX
   1763 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   1764 		    NULL, adapter->queues[i].evnamebuf,
   1765 		    "Packets dropped in pcq");
   1766 #endif
   1767 
   1768 #ifdef LRO
   1769 		struct lro_ctrl *lro = &rxr->lro;
   1770 #endif /* LRO */
   1771 
   1772 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1773 		    CTLFLAG_READONLY,
   1774 		    CTLTYPE_INT,
   1775 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   1776 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   1777 		    CTL_CREATE, CTL_EOL) != 0)
   1778 			break;
   1779 
   1780 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1781 		    CTLFLAG_READONLY,
   1782 		    CTLTYPE_INT,
   1783 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   1784 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   1785 		    CTL_CREATE, CTL_EOL) != 0)
   1786 			break;
   1787 
   1788 		if (i < __arraycount(stats->mpc)) {
   1789 			evcnt_attach_dynamic(&stats->mpc[i],
   1790 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1791 			    "RX Missed Packet Count");
   1792 			if (hw->mac.type == ixgbe_mac_82598EB)
   1793 				evcnt_attach_dynamic(&stats->rnbc[i],
   1794 				    EVCNT_TYPE_MISC, NULL,
   1795 				    adapter->queues[i].evnamebuf,
   1796 				    "Receive No Buffers");
   1797 		}
   1798 		if (i < __arraycount(stats->pxontxc)) {
   1799 			evcnt_attach_dynamic(&stats->pxontxc[i],
   1800 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1801 			    "pxontxc");
   1802 			evcnt_attach_dynamic(&stats->pxonrxc[i],
   1803 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1804 			    "pxonrxc");
   1805 			evcnt_attach_dynamic(&stats->pxofftxc[i],
   1806 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1807 			    "pxofftxc");
   1808 			evcnt_attach_dynamic(&stats->pxoffrxc[i],
   1809 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1810 			    "pxoffrxc");
   1811 			evcnt_attach_dynamic(&stats->pxon2offc[i],
   1812 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1813 			    "pxon2offc");
   1814 		}
   1815 		if (i < __arraycount(stats->qprc)) {
   1816 			evcnt_attach_dynamic(&stats->qprc[i],
   1817 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1818 			    "qprc");
   1819 			evcnt_attach_dynamic(&stats->qptc[i],
   1820 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1821 			    "qptc");
   1822 			evcnt_attach_dynamic(&stats->qbrc[i],
   1823 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1824 			    "qbrc");
   1825 			evcnt_attach_dynamic(&stats->qbtc[i],
   1826 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1827 			    "qbtc");
   1828 			evcnt_attach_dynamic(&stats->qprdc[i],
   1829 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1830 			    "qprdc");
   1831 		}
   1832 
   1833 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   1834 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   1835 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   1836 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   1837 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   1838 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   1839 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   1840 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   1841 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   1842 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   1843 #ifdef LRO
   1844 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   1845 				CTLFLAG_RD, &lro->lro_queued, 0,
   1846 				"LRO Queued");
   1847 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   1848 				CTLFLAG_RD, &lro->lro_flushed, 0,
   1849 				"LRO Flushed");
   1850 #endif /* LRO */
   1851 	}
   1852 
   1853 	/* MAC stats get their own sub node */
   1854 
   1855 	snprintf(stats->namebuf,
   1856 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   1857 
   1858 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   1859 	    stats->namebuf, "rx csum offload - IP");
   1860 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   1861 	    stats->namebuf, "rx csum offload - L4");
   1862 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   1863 	    stats->namebuf, "rx csum offload - IP bad");
   1864 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   1865 	    stats->namebuf, "rx csum offload - L4 bad");
   1866 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   1867 	    stats->namebuf, "Interrupt conditions zero");
   1868 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   1869 	    stats->namebuf, "Legacy interrupts");
   1870 
   1871 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   1872 	    stats->namebuf, "CRC Errors");
   1873 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   1874 	    stats->namebuf, "Illegal Byte Errors");
   1875 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   1876 	    stats->namebuf, "Byte Errors");
   1877 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   1878 	    stats->namebuf, "MAC Short Packets Discarded");
   1879 	if (hw->mac.type >= ixgbe_mac_X550)
   1880 		evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
   1881 		    stats->namebuf, "Bad SFD");
   1882 	evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
   1883 	    stats->namebuf, "Total Packets Missed");
   1884 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   1885 	    stats->namebuf, "MAC Local Faults");
   1886 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   1887 	    stats->namebuf, "MAC Remote Faults");
   1888 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   1889 	    stats->namebuf, "Receive Length Errors");
   1890 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   1891 	    stats->namebuf, "Link XON Transmitted");
   1892 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   1893 	    stats->namebuf, "Link XON Received");
   1894 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   1895 	    stats->namebuf, "Link XOFF Transmitted");
   1896 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   1897 	    stats->namebuf, "Link XOFF Received");
   1898 
   1899 	/* Packet Reception Stats */
   1900 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   1901 	    stats->namebuf, "Total Octets Received");
   1902 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   1903 	    stats->namebuf, "Good Octets Received");
   1904 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   1905 	    stats->namebuf, "Total Packets Received");
   1906 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   1907 	    stats->namebuf, "Good Packets Received");
   1908 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   1909 	    stats->namebuf, "Multicast Packets Received");
   1910 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   1911 	    stats->namebuf, "Broadcast Packets Received");
   1912 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   1913 	    stats->namebuf, "64 byte frames received ");
   1914 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   1915 	    stats->namebuf, "65-127 byte frames received");
   1916 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   1917 	    stats->namebuf, "128-255 byte frames received");
   1918 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   1919 	    stats->namebuf, "256-511 byte frames received");
   1920 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   1921 	    stats->namebuf, "512-1023 byte frames received");
   1922 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   1923 	    stats->namebuf, "1023-1522 byte frames received");
   1924 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   1925 	    stats->namebuf, "Receive Undersized");
   1926 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   1927 	    stats->namebuf, "Fragmented Packets Received ");
   1928 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   1929 	    stats->namebuf, "Oversized Packets Received");
   1930 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   1931 	    stats->namebuf, "Received Jabber");
   1932 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   1933 	    stats->namebuf, "Management Packets Received");
   1934 	evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
   1935 	    stats->namebuf, "Management Packets Dropped");
   1936 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   1937 	    stats->namebuf, "Checksum Errors");
   1938 
   1939 	/* Packet Transmission Stats */
   1940 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   1941 	    stats->namebuf, "Good Octets Transmitted");
   1942 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   1943 	    stats->namebuf, "Total Packets Transmitted");
   1944 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   1945 	    stats->namebuf, "Good Packets Transmitted");
   1946 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   1947 	    stats->namebuf, "Broadcast Packets Transmitted");
   1948 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   1949 	    stats->namebuf, "Multicast Packets Transmitted");
   1950 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   1951 	    stats->namebuf, "Management Packets Transmitted");
   1952 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   1953 	    stats->namebuf, "64 byte frames transmitted ");
   1954 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   1955 	    stats->namebuf, "65-127 byte frames transmitted");
   1956 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   1957 	    stats->namebuf, "128-255 byte frames transmitted");
   1958 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   1959 	    stats->namebuf, "256-511 byte frames transmitted");
   1960 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   1961 	    stats->namebuf, "512-1023 byte frames transmitted");
   1962 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   1963 	    stats->namebuf, "1024-1522 byte frames transmitted");
   1964 } /* ixgbe_add_hw_stats */
   1965 
   1966 static void
   1967 ixgbe_clear_evcnt(struct adapter *adapter)
   1968 {
   1969 	struct tx_ring *txr = adapter->tx_rings;
   1970 	struct rx_ring *rxr = adapter->rx_rings;
   1971 	struct ixgbe_hw *hw = &adapter->hw;
   1972 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1973 
   1974 	adapter->handleq.ev_count = 0;
   1975 	adapter->req.ev_count = 0;
   1976 	adapter->efbig_tx_dma_setup.ev_count = 0;
   1977 	adapter->mbuf_defrag_failed.ev_count = 0;
   1978 	adapter->efbig2_tx_dma_setup.ev_count = 0;
   1979 	adapter->einval_tx_dma_setup.ev_count = 0;
   1980 	adapter->other_tx_dma_setup.ev_count = 0;
   1981 	adapter->eagain_tx_dma_setup.ev_count = 0;
   1982 	adapter->enomem_tx_dma_setup.ev_count = 0;
   1983 	adapter->watchdog_events.ev_count = 0;
   1984 	adapter->tso_err.ev_count = 0;
   1985 	adapter->link_irq.ev_count = 0;
   1986 
   1987 	txr = adapter->tx_rings;
   1988 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1989 		adapter->queues[i].irqs.ev_count = 0;
   1990 		txr->no_desc_avail.ev_count = 0;
   1991 		txr->total_packets.ev_count = 0;
   1992 		txr->tso_tx.ev_count = 0;
   1993 #ifndef IXGBE_LEGACY_TX
   1994 		txr->pcq_drops.ev_count = 0;
   1995 #endif
   1996 
   1997 		if (i < __arraycount(stats->mpc)) {
   1998 			stats->mpc[i].ev_count = 0;
   1999 			if (hw->mac.type == ixgbe_mac_82598EB)
   2000 				stats->rnbc[i].ev_count = 0;
   2001 		}
   2002 		if (i < __arraycount(stats->pxontxc)) {
   2003 			stats->pxontxc[i].ev_count = 0;
   2004 			stats->pxonrxc[i].ev_count = 0;
   2005 			stats->pxofftxc[i].ev_count = 0;
   2006 			stats->pxoffrxc[i].ev_count = 0;
   2007 			stats->pxon2offc[i].ev_count = 0;
   2008 		}
   2009 		if (i < __arraycount(stats->qprc)) {
   2010 			stats->qprc[i].ev_count = 0;
   2011 			stats->qptc[i].ev_count = 0;
   2012 			stats->qbrc[i].ev_count = 0;
   2013 			stats->qbtc[i].ev_count = 0;
   2014 			stats->qprdc[i].ev_count = 0;
   2015 		}
   2016 
   2017 		rxr->rx_packets.ev_count = 0;
   2018 		rxr->rx_bytes.ev_count = 0;
   2019 		rxr->rx_copies.ev_count = 0;
   2020 		rxr->no_jmbuf.ev_count = 0;
   2021 		rxr->rx_discarded.ev_count = 0;
   2022 	}
   2023 	stats->ipcs.ev_count = 0;
   2024 	stats->l4cs.ev_count = 0;
   2025 	stats->ipcs_bad.ev_count = 0;
   2026 	stats->l4cs_bad.ev_count = 0;
   2027 	stats->intzero.ev_count = 0;
   2028 	stats->legint.ev_count = 0;
   2029 	stats->crcerrs.ev_count = 0;
   2030 	stats->illerrc.ev_count = 0;
   2031 	stats->errbc.ev_count = 0;
   2032 	stats->mspdc.ev_count = 0;
   2033 	stats->mbsdc.ev_count = 0;
   2034 	stats->mpctotal.ev_count = 0;
   2035 	stats->mlfc.ev_count = 0;
   2036 	stats->mrfc.ev_count = 0;
   2037 	stats->rlec.ev_count = 0;
   2038 	stats->lxontxc.ev_count = 0;
   2039 	stats->lxonrxc.ev_count = 0;
   2040 	stats->lxofftxc.ev_count = 0;
   2041 	stats->lxoffrxc.ev_count = 0;
   2042 
   2043 	/* Packet Reception Stats */
   2044 	stats->tor.ev_count = 0;
   2045 	stats->gorc.ev_count = 0;
   2046 	stats->tpr.ev_count = 0;
   2047 	stats->gprc.ev_count = 0;
   2048 	stats->mprc.ev_count = 0;
   2049 	stats->bprc.ev_count = 0;
   2050 	stats->prc64.ev_count = 0;
   2051 	stats->prc127.ev_count = 0;
   2052 	stats->prc255.ev_count = 0;
   2053 	stats->prc511.ev_count = 0;
   2054 	stats->prc1023.ev_count = 0;
   2055 	stats->prc1522.ev_count = 0;
   2056 	stats->ruc.ev_count = 0;
   2057 	stats->rfc.ev_count = 0;
   2058 	stats->roc.ev_count = 0;
   2059 	stats->rjc.ev_count = 0;
   2060 	stats->mngprc.ev_count = 0;
   2061 	stats->mngpdc.ev_count = 0;
   2062 	stats->xec.ev_count = 0;
   2063 
   2064 	/* Packet Transmission Stats */
   2065 	stats->gotc.ev_count = 0;
   2066 	stats->tpt.ev_count = 0;
   2067 	stats->gptc.ev_count = 0;
   2068 	stats->bptc.ev_count = 0;
   2069 	stats->mptc.ev_count = 0;
   2070 	stats->mngptc.ev_count = 0;
   2071 	stats->ptc64.ev_count = 0;
   2072 	stats->ptc127.ev_count = 0;
   2073 	stats->ptc255.ev_count = 0;
   2074 	stats->ptc511.ev_count = 0;
   2075 	stats->ptc1023.ev_count = 0;
   2076 	stats->ptc1522.ev_count = 0;
   2077 }
   2078 
   2079 /************************************************************************
   2080  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
   2081  *
   2082  *   Retrieves the TDH value from the hardware
   2083  ************************************************************************/
   2084 static int
   2085 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   2086 {
   2087 	struct sysctlnode node = *rnode;
   2088 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2089 	uint32_t val;
   2090 
   2091 	if (!txr)
   2092 		return (0);
   2093 
   2094 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
   2095 	node.sysctl_data = &val;
   2096 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2097 } /* ixgbe_sysctl_tdh_handler */
   2098 
   2099 /************************************************************************
   2100  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   2101  *
   2102  *   Retrieves the TDT value from the hardware
   2103  ************************************************************************/
   2104 static int
   2105 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   2106 {
   2107 	struct sysctlnode node = *rnode;
   2108 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2109 	uint32_t val;
   2110 
   2111 	if (!txr)
   2112 		return (0);
   2113 
   2114 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
   2115 	node.sysctl_data = &val;
   2116 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2117 } /* ixgbe_sysctl_tdt_handler */
   2118 
   2119 /************************************************************************
   2120  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
   2121  *
   2122  *   Retrieves the RDH value from the hardware
   2123  ************************************************************************/
   2124 static int
   2125 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2126 {
   2127 	struct sysctlnode node = *rnode;
   2128 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2129 	uint32_t val;
   2130 
   2131 	if (!rxr)
   2132 		return (0);
   2133 
   2134 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
   2135 	node.sysctl_data = &val;
   2136 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2137 } /* ixgbe_sysctl_rdh_handler */
   2138 
   2139 /************************************************************************
   2140  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2141  *
   2142  *   Retrieves the RDT value from the hardware
   2143  ************************************************************************/
   2144 static int
   2145 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2146 {
   2147 	struct sysctlnode node = *rnode;
   2148 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2149 	uint32_t val;
   2150 
   2151 	if (!rxr)
   2152 		return (0);
   2153 
   2154 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
   2155 	node.sysctl_data = &val;
   2156 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2157 } /* ixgbe_sysctl_rdt_handler */
   2158 
   2159 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   2160 /************************************************************************
   2161  * ixgbe_register_vlan
   2162  *
   2163  *   Run via vlan config EVENT, it enables us to use the
   2164  *   HW Filter table since we can get the vlan id. This
   2165  *   just creates the entry in the soft version of the
   2166  *   VFTA, init will repopulate the real table.
   2167  ************************************************************************/
   2168 static void
   2169 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2170 {
   2171 	struct adapter	*adapter = ifp->if_softc;
   2172 	u16		index, bit;
   2173 
   2174 	if (ifp->if_softc != arg)   /* Not our event */
   2175 		return;
   2176 
   2177 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2178 		return;
   2179 
   2180 	IXGBE_CORE_LOCK(adapter);
   2181 	index = (vtag >> 5) & 0x7F;
   2182 	bit = vtag & 0x1F;
   2183 	adapter->shadow_vfta[index] |= (1 << bit);
   2184 	ixgbe_setup_vlan_hw_support(adapter);
   2185 	IXGBE_CORE_UNLOCK(adapter);
   2186 } /* ixgbe_register_vlan */
   2187 
   2188 /************************************************************************
   2189  * ixgbe_unregister_vlan
   2190  *
   2191  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
   2192  ************************************************************************/
   2193 static void
   2194 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2195 {
   2196 	struct adapter	*adapter = ifp->if_softc;
   2197 	u16		index, bit;
   2198 
   2199 	if (ifp->if_softc != arg)
   2200 		return;
   2201 
   2202 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2203 		return;
   2204 
   2205 	IXGBE_CORE_LOCK(adapter);
   2206 	index = (vtag >> 5) & 0x7F;
   2207 	bit = vtag & 0x1F;
   2208 	adapter->shadow_vfta[index] &= ~(1 << bit);
   2209 	/* Re-init to load the changes */
   2210 	ixgbe_setup_vlan_hw_support(adapter);
   2211 	IXGBE_CORE_UNLOCK(adapter);
   2212 } /* ixgbe_unregister_vlan */
   2213 #endif
   2214 
   2215 static void
   2216 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   2217 {
   2218 	struct ethercom *ec = &adapter->osdep.ec;
   2219 	struct ixgbe_hw *hw = &adapter->hw;
   2220 	struct rx_ring	*rxr;
   2221 	int             i;
   2222 	u32		ctrl;
   2223 
   2224 
   2225 	/*
   2226 	 * We get here thru init_locked, meaning
   2227 	 * a soft reset, this has already cleared
   2228 	 * the VFTA and other state, so if there
   2229 	 * have been no vlan's registered do nothing.
   2230 	 */
   2231 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   2232 		return;
   2233 
   2234 	/* Setup the queues for vlans */
   2235 	if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
   2236 		for (i = 0; i < adapter->num_queues; i++) {
   2237 			rxr = &adapter->rx_rings[i];
   2238 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
   2239 			if (hw->mac.type != ixgbe_mac_82598EB) {
   2240 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   2241 				ctrl |= IXGBE_RXDCTL_VME;
   2242 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
   2243 			}
   2244 			rxr->vtag_strip = TRUE;
   2245 		}
   2246 	}
   2247 
   2248 	if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
   2249 		return;
   2250 	/*
   2251 	 * A soft reset zero's out the VFTA, so
   2252 	 * we need to repopulate it now.
   2253 	 */
   2254 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2255 		if (adapter->shadow_vfta[i] != 0)
   2256 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
   2257 			    adapter->shadow_vfta[i]);
   2258 
   2259 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2260 	/* Enable the Filter Table if enabled */
   2261 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
   2262 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
   2263 		ctrl |= IXGBE_VLNCTRL_VFE;
   2264 	}
   2265 	if (hw->mac.type == ixgbe_mac_82598EB)
   2266 		ctrl |= IXGBE_VLNCTRL_VME;
   2267 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2268 } /* ixgbe_setup_vlan_hw_support */
   2269 
   2270 /************************************************************************
   2271  * ixgbe_get_slot_info
   2272  *
   2273  *   Get the width and transaction speed of
   2274  *   the slot this adapter is plugged into.
   2275  ************************************************************************/
   2276 static void
   2277 ixgbe_get_slot_info(struct adapter *adapter)
   2278 {
   2279 	device_t		dev = adapter->dev;
   2280 	struct ixgbe_hw		*hw = &adapter->hw;
   2281 	u32                   offset;
   2282 //	struct ixgbe_mac_info	*mac = &hw->mac;
   2283 	u16			link;
   2284 	int                   bus_info_valid = TRUE;
   2285 
   2286 	/* Some devices are behind an internal bridge */
   2287 	switch (hw->device_id) {
   2288 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
   2289 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
   2290 		goto get_parent_info;
   2291 	default:
   2292 		break;
   2293 	}
   2294 
   2295 	ixgbe_get_bus_info(hw);
   2296 
   2297 	/*
   2298 	 * Some devices don't use PCI-E, but there is no need
   2299 	 * to display "Unknown" for bus speed and width.
   2300 	 */
   2301 	switch (hw->mac.type) {
   2302 	case ixgbe_mac_X550EM_x:
   2303 	case ixgbe_mac_X550EM_a:
   2304 		return;
   2305 	default:
   2306 		goto display;
   2307 	}
   2308 
   2309 get_parent_info:
   2310 	/*
   2311 	 * For the Quad port adapter we need to parse back
   2312 	 * up the PCI tree to find the speed of the expansion
   2313 	 * slot into which this adapter is plugged. A bit more work.
   2314 	 */
   2315 	dev = device_parent(device_parent(dev));
   2316 #if 0
   2317 #ifdef IXGBE_DEBUG
   2318 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
   2319 	    pci_get_slot(dev), pci_get_function(dev));
   2320 #endif
   2321 	dev = device_parent(device_parent(dev));
   2322 #ifdef IXGBE_DEBUG
   2323 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
   2324 	    pci_get_slot(dev), pci_get_function(dev));
   2325 #endif
   2326 #endif
   2327 	/* Now get the PCI Express Capabilities offset */
   2328 	if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
   2329 	    PCI_CAP_PCIEXPRESS, &offset, NULL)) {
   2330 		/*
   2331 		 * Hmm...can't get PCI-Express capabilities.
   2332 		 * Falling back to default method.
   2333 		 */
   2334 		bus_info_valid = FALSE;
   2335 		ixgbe_get_bus_info(hw);
   2336 		goto display;
   2337 	}
   2338 	/* ...and read the Link Status Register */
   2339 	link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
   2340 	    offset + PCIE_LCSR) >> 16;
   2341 	ixgbe_set_pci_config_data_generic(hw, link);
   2342 
   2343 display:
   2344 	device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
   2345 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s" :
   2346 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s" :
   2347 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s" :
   2348 	     "Unknown"),
   2349 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
   2350 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
   2351 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
   2352 	     "Unknown"));
   2353 
   2354 	if (bus_info_valid) {
   2355 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2356 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
   2357 			(hw->bus.speed == ixgbe_bus_speed_2500))) {
   2358 			device_printf(dev, "PCI-Express bandwidth available"
   2359 			    " for this card\n     is not sufficient for"
   2360 			    " optimal performance.\n");
   2361 			device_printf(dev, "For optimal performance a x8 "
   2362 			    "PCIE, or x4 PCIE Gen2 slot is required.\n");
   2363 		}
   2364 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2365 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
   2366 			(hw->bus.speed < ixgbe_bus_speed_8000))) {
   2367 			device_printf(dev, "PCI-Express bandwidth available"
   2368 			    " for this card\n     is not sufficient for"
   2369 			    " optimal performance.\n");
   2370 			device_printf(dev, "For optimal performance a x8 "
   2371 			    "PCIE Gen3 slot is required.\n");
   2372 		}
   2373 	} else
   2374 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
   2375 
   2376 	return;
   2377 } /* ixgbe_get_slot_info */
   2378 
   2379 /************************************************************************
   2380  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
   2381  ************************************************************************/
   2382 static inline void
   2383 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   2384 {
   2385 	struct ixgbe_hw *hw = &adapter->hw;
   2386 	u64             queue = (u64)(1ULL << vector);
   2387 	u32             mask;
   2388 
   2389 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2390 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2391 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   2392 	} else {
   2393 		mask = (queue & 0xFFFFFFFF);
   2394 		if (mask)
   2395 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   2396 		mask = (queue >> 32);
   2397 		if (mask)
   2398 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   2399 	}
   2400 } /* ixgbe_enable_queue */
   2401 
   2402 /************************************************************************
   2403  * ixgbe_disable_queue
   2404  ************************************************************************/
   2405 static inline void
   2406 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   2407 {
   2408 	struct ixgbe_hw *hw = &adapter->hw;
   2409 	u64             queue = (u64)(1ULL << vector);
   2410 	u32             mask;
   2411 
   2412 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2413 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2414 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   2415 	} else {
   2416 		mask = (queue & 0xFFFFFFFF);
   2417 		if (mask)
   2418 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   2419 		mask = (queue >> 32);
   2420 		if (mask)
   2421 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   2422 	}
   2423 } /* ixgbe_disable_queue */
   2424 
   2425 /************************************************************************
   2426  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
   2427  ************************************************************************/
   2428 static int
   2429 ixgbe_msix_que(void *arg)
   2430 {
   2431 	struct ix_queue	*que = arg;
   2432 	struct adapter  *adapter = que->adapter;
   2433 	struct ifnet    *ifp = adapter->ifp;
   2434 	struct tx_ring	*txr = que->txr;
   2435 	struct rx_ring	*rxr = que->rxr;
   2436 	bool		more;
   2437 	u32		newitr = 0;
   2438 
   2439 	/* Protect against spurious interrupts */
   2440 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   2441 		return 0;
   2442 
   2443 	ixgbe_disable_queue(adapter, que->msix);
   2444 	++que->irqs.ev_count;
   2445 
   2446 #ifdef __NetBSD__
   2447 	/* Don't run ixgbe_rxeof in interrupt context */
   2448 	more = true;
   2449 #else
   2450 	more = ixgbe_rxeof(que);
   2451 #endif
   2452 
   2453 	IXGBE_TX_LOCK(txr);
   2454 	ixgbe_txeof(txr);
   2455 	IXGBE_TX_UNLOCK(txr);
   2456 
   2457 	/* Do AIM now? */
   2458 
   2459 	if (adapter->enable_aim == false)
   2460 		goto no_calc;
   2461 	/*
   2462 	 * Do Adaptive Interrupt Moderation:
   2463 	 *  - Write out last calculated setting
   2464 	 *  - Calculate based on average size over
   2465 	 *    the last interval.
   2466 	 */
   2467 	if (que->eitr_setting)
   2468 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
   2469 		    que->eitr_setting);
   2470 
   2471 	que->eitr_setting = 0;
   2472 
   2473 	/* Idle, do nothing */
   2474         if ((txr->bytes == 0) && (rxr->bytes == 0))
   2475                 goto no_calc;
   2476 
   2477 	if ((txr->bytes) && (txr->packets))
   2478 		newitr = txr->bytes/txr->packets;
   2479 	if ((rxr->bytes) && (rxr->packets))
   2480 		newitr = max(newitr, (rxr->bytes / rxr->packets));
   2481 	newitr += 24; /* account for hardware frame, crc */
   2482 
   2483 	/* set an upper boundary */
   2484 	newitr = min(newitr, 3000);
   2485 
   2486 	/* Be nice to the mid range */
   2487 	if ((newitr > 300) && (newitr < 1200))
   2488 		newitr = (newitr / 3);
   2489 	else
   2490 		newitr = (newitr / 2);
   2491 
   2492         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   2493                 newitr |= newitr << 16;
   2494         else
   2495                 newitr |= IXGBE_EITR_CNT_WDIS;
   2496 
   2497         /* save for next interrupt */
   2498         que->eitr_setting = newitr;
   2499 
   2500 	/* Reset state */
   2501 	txr->bytes = 0;
   2502 	txr->packets = 0;
   2503 	rxr->bytes = 0;
   2504 	rxr->packets = 0;
   2505 
   2506 no_calc:
   2507 	if (more)
   2508 		softint_schedule(que->que_si);
   2509 	else
   2510 		ixgbe_enable_queue(adapter, que->msix);
   2511 
   2512 	return 1;
   2513 } /* ixgbe_msix_que */
   2514 
   2515 /************************************************************************
   2516  * ixgbe_media_status - Media Ioctl callback
   2517  *
   2518  *   Called whenever the user queries the status of
   2519  *   the interface using ifconfig.
   2520  ************************************************************************/
   2521 static void
   2522 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   2523 {
   2524 	struct adapter *adapter = ifp->if_softc;
   2525 	struct ixgbe_hw *hw = &adapter->hw;
   2526 	int layer;
   2527 
   2528 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   2529 	IXGBE_CORE_LOCK(adapter);
   2530 	ixgbe_update_link_status(adapter);
   2531 
   2532 	ifmr->ifm_status = IFM_AVALID;
   2533 	ifmr->ifm_active = IFM_ETHER;
   2534 
   2535 	if (!adapter->link_active) {
   2536 		ifmr->ifm_active |= IFM_NONE;
   2537 		IXGBE_CORE_UNLOCK(adapter);
   2538 		return;
   2539 	}
   2540 
   2541 	ifmr->ifm_status |= IFM_ACTIVE;
   2542 	layer = adapter->phy_layer;
   2543 
   2544 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
   2545 	    layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
   2546 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
   2547 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
   2548 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
   2549 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
   2550 		switch (adapter->link_speed) {
   2551 		case IXGBE_LINK_SPEED_10GB_FULL:
   2552 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   2553 			break;
   2554 		case IXGBE_LINK_SPEED_5GB_FULL:
   2555 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   2556 			break;
   2557 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2558 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   2559 			break;
   2560 		case IXGBE_LINK_SPEED_1GB_FULL:
   2561 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   2562 			break;
   2563 		case IXGBE_LINK_SPEED_100_FULL:
   2564 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   2565 			break;
   2566 		case IXGBE_LINK_SPEED_10_FULL:
   2567 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   2568 			break;
   2569 		}
   2570 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   2571 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
   2572 		switch (adapter->link_speed) {
   2573 		case IXGBE_LINK_SPEED_10GB_FULL:
   2574 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
   2575 			break;
   2576 		}
   2577 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
   2578 		switch (adapter->link_speed) {
   2579 		case IXGBE_LINK_SPEED_10GB_FULL:
   2580 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
   2581 			break;
   2582 		case IXGBE_LINK_SPEED_1GB_FULL:
   2583 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2584 			break;
   2585 		}
   2586 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
   2587 		switch (adapter->link_speed) {
   2588 		case IXGBE_LINK_SPEED_10GB_FULL:
   2589 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
   2590 			break;
   2591 		case IXGBE_LINK_SPEED_1GB_FULL:
   2592 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2593 			break;
   2594 		}
   2595 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
   2596 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
   2597 		switch (adapter->link_speed) {
   2598 		case IXGBE_LINK_SPEED_10GB_FULL:
   2599 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2600 			break;
   2601 		case IXGBE_LINK_SPEED_1GB_FULL:
   2602 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
   2603 			break;
   2604 		}
   2605 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
   2606 		switch (adapter->link_speed) {
   2607 		case IXGBE_LINK_SPEED_10GB_FULL:
   2608 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2609 			break;
   2610 		}
   2611 	/*
   2612 	 * XXX: These need to use the proper media types once
   2613 	 * they're added.
   2614 	 */
   2615 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
   2616 		switch (adapter->link_speed) {
   2617 		case IXGBE_LINK_SPEED_10GB_FULL:
   2618 #ifndef IFM_ETH_XTYPE
   2619 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2620 #else
   2621 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
   2622 #endif
   2623 			break;
   2624 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2625 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2626 			break;
   2627 		case IXGBE_LINK_SPEED_1GB_FULL:
   2628 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2629 			break;
   2630 		}
   2631 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
   2632 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
   2633 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
   2634 		switch (adapter->link_speed) {
   2635 		case IXGBE_LINK_SPEED_10GB_FULL:
   2636 #ifndef IFM_ETH_XTYPE
   2637 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2638 #else
   2639 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
   2640 #endif
   2641 			break;
   2642 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2643 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2644 			break;
   2645 		case IXGBE_LINK_SPEED_1GB_FULL:
   2646 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2647 			break;
   2648 		}
   2649 
   2650 	/* If nothing is recognized... */
   2651 #if 0
   2652 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
   2653 		ifmr->ifm_active |= IFM_UNKNOWN;
   2654 #endif
   2655 
   2656 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   2657 
   2658 	/* Display current flow control setting used on link */
   2659 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
   2660 	    hw->fc.current_mode == ixgbe_fc_full)
   2661 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
   2662 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
   2663 	    hw->fc.current_mode == ixgbe_fc_full)
   2664 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
   2665 
   2666 	IXGBE_CORE_UNLOCK(adapter);
   2667 
   2668 	return;
   2669 } /* ixgbe_media_status */
   2670 
   2671 /************************************************************************
   2672  * ixgbe_media_change - Media Ioctl callback
   2673  *
   2674  *   Called when the user changes speed/duplex using
   2675  *   media/mediopt option with ifconfig.
   2676  ************************************************************************/
   2677 static int
   2678 ixgbe_media_change(struct ifnet *ifp)
   2679 {
   2680 	struct adapter   *adapter = ifp->if_softc;
   2681 	struct ifmedia   *ifm = &adapter->media;
   2682 	struct ixgbe_hw  *hw = &adapter->hw;
   2683 	ixgbe_link_speed speed = 0;
   2684 	ixgbe_link_speed link_caps = 0;
   2685 	bool negotiate = false;
   2686 	s32 err = IXGBE_NOT_IMPLEMENTED;
   2687 
   2688 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   2689 
   2690 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   2691 		return (EINVAL);
   2692 
   2693 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   2694 		return (ENODEV);
   2695 
   2696 	/*
   2697 	 * We don't actually need to check against the supported
   2698 	 * media types of the adapter; ifmedia will take care of
   2699 	 * that for us.
   2700 	 */
   2701 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   2702 	case IFM_AUTO:
   2703 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   2704 		    &negotiate);
   2705 		if (err != IXGBE_SUCCESS) {
   2706 			device_printf(adapter->dev, "Unable to determine "
   2707 			    "supported advertise speeds\n");
   2708 			return (ENODEV);
   2709 		}
   2710 		speed |= link_caps;
   2711 		break;
   2712 	case IFM_10G_T:
   2713 	case IFM_10G_LRM:
   2714 	case IFM_10G_LR:
   2715 	case IFM_10G_TWINAX:
   2716 #ifndef IFM_ETH_XTYPE
   2717 	case IFM_10G_SR: /* KR, too */
   2718 	case IFM_10G_CX4: /* KX4 */
   2719 #else
   2720 	case IFM_10G_KR:
   2721 	case IFM_10G_KX4:
   2722 #endif
   2723 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   2724 		break;
   2725 	case IFM_5000_T:
   2726 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   2727 		break;
   2728 	case IFM_2500_T:
   2729 	case IFM_2500_KX:
   2730 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   2731 		break;
   2732 	case IFM_1000_T:
   2733 	case IFM_1000_LX:
   2734 	case IFM_1000_SX:
   2735 	case IFM_1000_KX:
   2736 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   2737 		break;
   2738 	case IFM_100_TX:
   2739 		speed |= IXGBE_LINK_SPEED_100_FULL;
   2740 		break;
   2741 	case IFM_10_T:
   2742 		speed |= IXGBE_LINK_SPEED_10_FULL;
   2743 		break;
   2744 	default:
   2745 		goto invalid;
   2746 	}
   2747 
   2748 	hw->mac.autotry_restart = TRUE;
   2749 	hw->mac.ops.setup_link(hw, speed, TRUE);
   2750 	adapter->advertise = 0;
   2751 	if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
   2752 		if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
   2753 			adapter->advertise |= 1 << 2;
   2754 		if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
   2755 			adapter->advertise |= 1 << 1;
   2756 		if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
   2757 			adapter->advertise |= 1 << 0;
   2758 		if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
   2759 			adapter->advertise |= 1 << 3;
   2760 		if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
   2761 			adapter->advertise |= 1 << 4;
   2762 		if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
   2763 			adapter->advertise |= 1 << 5;
   2764 	}
   2765 
   2766 	return (0);
   2767 
   2768 invalid:
   2769 	device_printf(adapter->dev, "Invalid media type!\n");
   2770 
   2771 	return (EINVAL);
   2772 } /* ixgbe_media_change */
   2773 
   2774 /************************************************************************
   2775  * ixgbe_set_promisc
   2776  ************************************************************************/
   2777 static void
   2778 ixgbe_set_promisc(struct adapter *adapter)
   2779 {
   2780 	struct ifnet *ifp = adapter->ifp;
   2781 	int          mcnt = 0;
   2782 	u32          rctl;
   2783 	struct ether_multi *enm;
   2784 	struct ether_multistep step;
   2785 	struct ethercom *ec = &adapter->osdep.ec;
   2786 
   2787 	KASSERT(mutex_owned(&adapter->core_mtx));
   2788 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   2789 	rctl &= (~IXGBE_FCTRL_UPE);
   2790 	if (ifp->if_flags & IFF_ALLMULTI)
   2791 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
   2792 	else {
   2793 		ETHER_LOCK(ec);
   2794 		ETHER_FIRST_MULTI(step, ec, enm);
   2795 		while (enm != NULL) {
   2796 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
   2797 				break;
   2798 			mcnt++;
   2799 			ETHER_NEXT_MULTI(step, enm);
   2800 		}
   2801 		ETHER_UNLOCK(ec);
   2802 	}
   2803 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
   2804 		rctl &= (~IXGBE_FCTRL_MPE);
   2805 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2806 
   2807 	if (ifp->if_flags & IFF_PROMISC) {
   2808 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   2809 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2810 	} else if (ifp->if_flags & IFF_ALLMULTI) {
   2811 		rctl |= IXGBE_FCTRL_MPE;
   2812 		rctl &= ~IXGBE_FCTRL_UPE;
   2813 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2814 	}
   2815 } /* ixgbe_set_promisc */
   2816 
   2817 /************************************************************************
   2818  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
   2819  ************************************************************************/
   2820 static int
   2821 ixgbe_msix_link(void *arg)
   2822 {
   2823 	struct adapter	*adapter = arg;
   2824 	struct ixgbe_hw *hw = &adapter->hw;
   2825 	u32		eicr, eicr_mask;
   2826 	s32             retval;
   2827 
   2828 	++adapter->link_irq.ev_count;
   2829 
   2830 	/* Pause other interrupts */
   2831 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
   2832 
   2833 	/* First get the cause */
   2834 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   2835 	/* Be sure the queue bits are not cleared */
   2836 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
   2837 	/* Clear interrupt with write */
   2838 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
   2839 
   2840 	/* Link status change */
   2841 	if (eicr & IXGBE_EICR_LSC) {
   2842 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
   2843 		softint_schedule(adapter->link_si);
   2844 	}
   2845 
   2846 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   2847 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
   2848 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
   2849 			/* This is probably overkill :) */
   2850 			if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
   2851 				return 1;
   2852 			/* Disable the interrupt */
   2853 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
   2854 			softint_schedule(adapter->fdir_si);
   2855 		}
   2856 
   2857 		if (eicr & IXGBE_EICR_ECC) {
   2858 			device_printf(adapter->dev,
   2859 			    "CRITICAL: ECC ERROR!! Please Reboot!!\n");
   2860 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   2861 		}
   2862 
   2863 		/* Check for over temp condition */
   2864 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
   2865 			switch (adapter->hw.mac.type) {
   2866 			case ixgbe_mac_X550EM_a:
   2867 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
   2868 					break;
   2869 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
   2870 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2871 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2872 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2873 				retval = hw->phy.ops.check_overtemp(hw);
   2874 				if (retval != IXGBE_ERR_OVERTEMP)
   2875 					break;
   2876 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2877 				device_printf(adapter->dev, "System shutdown required!\n");
   2878 				break;
   2879 			default:
   2880 				if (!(eicr & IXGBE_EICR_TS))
   2881 					break;
   2882 				retval = hw->phy.ops.check_overtemp(hw);
   2883 				if (retval != IXGBE_ERR_OVERTEMP)
   2884 					break;
   2885 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2886 				device_printf(adapter->dev, "System shutdown required!\n");
   2887 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
   2888 				break;
   2889 			}
   2890 		}
   2891 
   2892 		/* Check for VF message */
   2893 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
   2894 		    (eicr & IXGBE_EICR_MAILBOX))
   2895 			softint_schedule(adapter->mbx_si);
   2896 	}
   2897 
   2898 	if (ixgbe_is_sfp(hw)) {
   2899 		/* Pluggable optics-related interrupt */
   2900 		if (hw->mac.type >= ixgbe_mac_X540)
   2901 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   2902 		else
   2903 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   2904 
   2905 		if (eicr & eicr_mask) {
   2906 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   2907 			softint_schedule(adapter->mod_si);
   2908 		}
   2909 
   2910 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   2911 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   2912 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2913 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2914 			softint_schedule(adapter->msf_si);
   2915 		}
   2916 	}
   2917 
   2918 	/* Check for fan failure */
   2919 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   2920 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
   2921 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2922 	}
   2923 
   2924 	/* External PHY interrupt */
   2925 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   2926 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
   2927 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
   2928 		softint_schedule(adapter->phy_si);
   2929  	}
   2930 
   2931 	/* Re-enable other interrupts */
   2932 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   2933 	return 1;
   2934 } /* ixgbe_msix_link */
   2935 
   2936 /************************************************************************
   2937  * ixgbe_sysctl_interrupt_rate_handler
   2938  ************************************************************************/
   2939 static int
   2940 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   2941 {
   2942 	struct sysctlnode node = *rnode;
   2943 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   2944 	uint32_t reg, usec, rate;
   2945 	int error;
   2946 
   2947 	if (que == NULL)
   2948 		return 0;
   2949 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
   2950 	usec = ((reg & 0x0FF8) >> 3);
   2951 	if (usec > 0)
   2952 		rate = 500000 / usec;
   2953 	else
   2954 		rate = 0;
   2955 	node.sysctl_data = &rate;
   2956 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2957 	if (error || newp == NULL)
   2958 		return error;
   2959 	reg &= ~0xfff; /* default, no limitation */
   2960 	ixgbe_max_interrupt_rate = 0;
   2961 	if (rate > 0 && rate < 500000) {
   2962 		if (rate < 1000)
   2963 			rate = 1000;
   2964 		ixgbe_max_interrupt_rate = rate;
   2965 		reg |= ((4000000/rate) & 0xff8);
   2966 	}
   2967 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
   2968 
   2969 	return (0);
   2970 } /* ixgbe_sysctl_interrupt_rate_handler */
   2971 
   2972 const struct sysctlnode *
   2973 ixgbe_sysctl_instance(struct adapter *adapter)
   2974 {
   2975 	const char *dvname;
   2976 	struct sysctllog **log;
   2977 	int rc;
   2978 	const struct sysctlnode *rnode;
   2979 
   2980 	if (adapter->sysctltop != NULL)
   2981 		return adapter->sysctltop;
   2982 
   2983 	log = &adapter->sysctllog;
   2984 	dvname = device_xname(adapter->dev);
   2985 
   2986 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2987 	    0, CTLTYPE_NODE, dvname,
   2988 	    SYSCTL_DESCR("ixgbe information and settings"),
   2989 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2990 		goto err;
   2991 
   2992 	return rnode;
   2993 err:
   2994 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2995 	return NULL;
   2996 }
   2997 
   2998 /************************************************************************
   2999  * ixgbe_add_device_sysctls
   3000  ************************************************************************/
   3001 static void
   3002 ixgbe_add_device_sysctls(struct adapter *adapter)
   3003 {
   3004 	device_t               dev = adapter->dev;
   3005 	struct ixgbe_hw        *hw = &adapter->hw;
   3006 	struct sysctllog **log;
   3007 	const struct sysctlnode *rnode, *cnode;
   3008 
   3009 	log = &adapter->sysctllog;
   3010 
   3011 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   3012 		aprint_error_dev(dev, "could not create sysctl root\n");
   3013 		return;
   3014 	}
   3015 
   3016 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3017 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3018 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
   3019 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   3020 		aprint_error_dev(dev, "could not create sysctl\n");
   3021 
   3022 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3023 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3024 	    "num_queues", SYSCTL_DESCR("Number of queues"),
   3025 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
   3026 		aprint_error_dev(dev, "could not create sysctl\n");
   3027 
   3028 	/* Sysctls for all devices */
   3029 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3030 	    CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
   3031 	    ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
   3032 	    CTL_EOL) != 0)
   3033 		aprint_error_dev(dev, "could not create sysctl\n");
   3034 
   3035 	adapter->enable_aim = ixgbe_enable_aim;
   3036 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3037 	    CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   3038 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   3039 		aprint_error_dev(dev, "could not create sysctl\n");
   3040 
   3041 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3042 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   3043 	    "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
   3044 	    ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
   3045 	    CTL_EOL) != 0)
   3046 		aprint_error_dev(dev, "could not create sysctl\n");
   3047 
   3048 #ifdef IXGBE_DEBUG
   3049 	/* testing sysctls (for all devices) */
   3050 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3051 	    CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
   3052 	    ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
   3053 	    CTL_EOL) != 0)
   3054 		aprint_error_dev(dev, "could not create sysctl\n");
   3055 
   3056 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
   3057 	    CTLTYPE_STRING, "print_rss_config",
   3058 	    SYSCTL_DESCR("Prints RSS Configuration"),
   3059 	    ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
   3060 	    CTL_EOL) != 0)
   3061 		aprint_error_dev(dev, "could not create sysctl\n");
   3062 #endif
   3063 	/* for X550 series devices */
   3064 	if (hw->mac.type >= ixgbe_mac_X550)
   3065 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3066 		    CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
   3067 		    ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
   3068 		    CTL_EOL) != 0)
   3069 			aprint_error_dev(dev, "could not create sysctl\n");
   3070 
   3071 	/* for WoL-capable devices */
   3072 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3073 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3074 		    CTLTYPE_BOOL, "wol_enable",
   3075 		    SYSCTL_DESCR("Enable/Disable Wake on LAN"),
   3076 		    ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
   3077 		    CTL_EOL) != 0)
   3078 			aprint_error_dev(dev, "could not create sysctl\n");
   3079 
   3080 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3081 		    CTLTYPE_INT, "wufc",
   3082 		    SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
   3083 		    ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
   3084 		    CTL_EOL) != 0)
   3085 			aprint_error_dev(dev, "could not create sysctl\n");
   3086 	}
   3087 
   3088 	/* for X552/X557-AT devices */
   3089 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3090 		const struct sysctlnode *phy_node;
   3091 
   3092 		if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
   3093 		    "phy", SYSCTL_DESCR("External PHY sysctls"),
   3094 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
   3095 			aprint_error_dev(dev, "could not create sysctl\n");
   3096 			return;
   3097 		}
   3098 
   3099 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3100 		    CTLTYPE_INT, "temp",
   3101 		    SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
   3102 		    ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
   3103 		    CTL_EOL) != 0)
   3104 			aprint_error_dev(dev, "could not create sysctl\n");
   3105 
   3106 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3107 		    CTLTYPE_INT, "overtemp_occurred",
   3108 		    SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
   3109 		    ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
   3110 		    CTL_CREATE, CTL_EOL) != 0)
   3111 			aprint_error_dev(dev, "could not create sysctl\n");
   3112 	}
   3113 
   3114 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
   3115 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3116 		    CTLTYPE_INT, "eee_state",
   3117 		    SYSCTL_DESCR("EEE Power Save State"),
   3118 		    ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
   3119 		    CTL_EOL) != 0)
   3120 			aprint_error_dev(dev, "could not create sysctl\n");
   3121 	}
   3122 } /* ixgbe_add_device_sysctls */
   3123 
   3124 /************************************************************************
   3125  * ixgbe_allocate_pci_resources
   3126  ************************************************************************/
   3127 static int
   3128 ixgbe_allocate_pci_resources(struct adapter *adapter,
   3129     const struct pci_attach_args *pa)
   3130 {
   3131 	pcireg_t	memtype;
   3132 	device_t dev = adapter->dev;
   3133 	bus_addr_t addr;
   3134 	int flags;
   3135 
   3136 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   3137 	switch (memtype) {
   3138 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   3139 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   3140 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   3141 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   3142 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   3143 			goto map_err;
   3144 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   3145 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   3146 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3147 		}
   3148 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   3149 		     adapter->osdep.mem_size, flags,
   3150 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   3151 map_err:
   3152 			adapter->osdep.mem_size = 0;
   3153 			aprint_error_dev(dev, "unable to map BAR0\n");
   3154 			return ENXIO;
   3155 		}
   3156 		break;
   3157 	default:
   3158 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   3159 		return ENXIO;
   3160 	}
   3161 
   3162 	return (0);
   3163 } /* ixgbe_allocate_pci_resources */
   3164 
   3165 static void
   3166 ixgbe_free_softint(struct adapter *adapter)
   3167 {
   3168 	struct ix_queue *que = adapter->queues;
   3169 	struct tx_ring *txr = adapter->tx_rings;
   3170 	int i;
   3171 
   3172 	for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
   3173 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   3174 			if (txr->txr_si != NULL)
   3175 				softint_disestablish(txr->txr_si);
   3176 		}
   3177 		if (que->que_si != NULL)
   3178 			softint_disestablish(que->que_si);
   3179 	}
   3180 
   3181 	/* Drain the Link queue */
   3182 	if (adapter->link_si != NULL) {
   3183 		softint_disestablish(adapter->link_si);
   3184 		adapter->link_si = NULL;
   3185 	}
   3186 	if (adapter->mod_si != NULL) {
   3187 		softint_disestablish(adapter->mod_si);
   3188 		adapter->mod_si = NULL;
   3189 	}
   3190 	if (adapter->msf_si != NULL) {
   3191 		softint_disestablish(adapter->msf_si);
   3192 		adapter->msf_si = NULL;
   3193 	}
   3194 	if (adapter->phy_si != NULL) {
   3195 		softint_disestablish(adapter->phy_si);
   3196 		adapter->phy_si = NULL;
   3197 	}
   3198 	if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
   3199 		if (adapter->fdir_si != NULL) {
   3200 			softint_disestablish(adapter->fdir_si);
   3201 			adapter->fdir_si = NULL;
   3202 		}
   3203 	}
   3204 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   3205 		if (adapter->mbx_si != NULL) {
   3206 			softint_disestablish(adapter->mbx_si);
   3207 			adapter->mbx_si = NULL;
   3208 		}
   3209 	}
   3210 } /* ixgbe_free_softint */
   3211 
   3212 /************************************************************************
   3213  * ixgbe_detach - Device removal routine
   3214  *
   3215  *   Called when the driver is being removed.
   3216  *   Stops the adapter and deallocates all the resources
   3217  *   that were allocated for driver operation.
   3218  *
   3219  *   return 0 on success, positive on failure
   3220  ************************************************************************/
   3221 static int
   3222 ixgbe_detach(device_t dev, int flags)
   3223 {
   3224 	struct adapter *adapter = device_private(dev);
   3225 	struct rx_ring *rxr = adapter->rx_rings;
   3226 	struct tx_ring *txr = adapter->tx_rings;
   3227 	struct ixgbe_hw *hw = &adapter->hw;
   3228 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   3229 	u32	ctrl_ext;
   3230 
   3231 	INIT_DEBUGOUT("ixgbe_detach: begin");
   3232 	if (adapter->osdep.attached == false)
   3233 		return 0;
   3234 
   3235 	if (ixgbe_pci_iov_detach(dev) != 0) {
   3236 		device_printf(dev, "SR-IOV in use; detach first.\n");
   3237 		return (EBUSY);
   3238 	}
   3239 
   3240 	/* Stop the interface. Callouts are stopped in it. */
   3241 	ixgbe_ifstop(adapter->ifp, 1);
   3242 #if NVLAN > 0
   3243 	/* Make sure VLANs are not using driver */
   3244 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   3245 		;	/* nothing to do: no VLANs */
   3246 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
   3247 		vlan_ifdetach(adapter->ifp);
   3248 	else {
   3249 		aprint_error_dev(dev, "VLANs in use, detach first\n");
   3250 		return (EBUSY);
   3251 	}
   3252 #endif
   3253 
   3254 	pmf_device_deregister(dev);
   3255 
   3256 	ether_ifdetach(adapter->ifp);
   3257 	/* Stop the adapter */
   3258 	IXGBE_CORE_LOCK(adapter);
   3259 	ixgbe_setup_low_power_mode(adapter);
   3260 	IXGBE_CORE_UNLOCK(adapter);
   3261 
   3262 	ixgbe_free_softint(adapter);
   3263 
   3264 	/* let hardware know driver is unloading */
   3265 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   3266 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   3267 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   3268 
   3269 	callout_halt(&adapter->timer, NULL);
   3270 
   3271 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   3272 		netmap_detach(adapter->ifp);
   3273 
   3274 	ixgbe_free_pci_resources(adapter);
   3275 #if 0	/* XXX the NetBSD port is probably missing something here */
   3276 	bus_generic_detach(dev);
   3277 #endif
   3278 	if_detach(adapter->ifp);
   3279 	if_percpuq_destroy(adapter->ipq);
   3280 
   3281 	sysctl_teardown(&adapter->sysctllog);
   3282 	evcnt_detach(&adapter->handleq);
   3283 	evcnt_detach(&adapter->req);
   3284 	evcnt_detach(&adapter->efbig_tx_dma_setup);
   3285 	evcnt_detach(&adapter->mbuf_defrag_failed);
   3286 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
   3287 	evcnt_detach(&adapter->einval_tx_dma_setup);
   3288 	evcnt_detach(&adapter->other_tx_dma_setup);
   3289 	evcnt_detach(&adapter->eagain_tx_dma_setup);
   3290 	evcnt_detach(&adapter->enomem_tx_dma_setup);
   3291 	evcnt_detach(&adapter->watchdog_events);
   3292 	evcnt_detach(&adapter->tso_err);
   3293 	evcnt_detach(&adapter->link_irq);
   3294 
   3295 	txr = adapter->tx_rings;
   3296 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   3297 		evcnt_detach(&adapter->queues[i].irqs);
   3298 		evcnt_detach(&txr->no_desc_avail);
   3299 		evcnt_detach(&txr->total_packets);
   3300 		evcnt_detach(&txr->tso_tx);
   3301 #ifndef IXGBE_LEGACY_TX
   3302 		evcnt_detach(&txr->pcq_drops);
   3303 #endif
   3304 
   3305 		if (i < __arraycount(stats->mpc)) {
   3306 			evcnt_detach(&stats->mpc[i]);
   3307 			if (hw->mac.type == ixgbe_mac_82598EB)
   3308 				evcnt_detach(&stats->rnbc[i]);
   3309 		}
   3310 		if (i < __arraycount(stats->pxontxc)) {
   3311 			evcnt_detach(&stats->pxontxc[i]);
   3312 			evcnt_detach(&stats->pxonrxc[i]);
   3313 			evcnt_detach(&stats->pxofftxc[i]);
   3314 			evcnt_detach(&stats->pxoffrxc[i]);
   3315 			evcnt_detach(&stats->pxon2offc[i]);
   3316 		}
   3317 		if (i < __arraycount(stats->qprc)) {
   3318 			evcnt_detach(&stats->qprc[i]);
   3319 			evcnt_detach(&stats->qptc[i]);
   3320 			evcnt_detach(&stats->qbrc[i]);
   3321 			evcnt_detach(&stats->qbtc[i]);
   3322 			evcnt_detach(&stats->qprdc[i]);
   3323 		}
   3324 
   3325 		evcnt_detach(&rxr->rx_packets);
   3326 		evcnt_detach(&rxr->rx_bytes);
   3327 		evcnt_detach(&rxr->rx_copies);
   3328 		evcnt_detach(&rxr->no_jmbuf);
   3329 		evcnt_detach(&rxr->rx_discarded);
   3330 	}
   3331 	evcnt_detach(&stats->ipcs);
   3332 	evcnt_detach(&stats->l4cs);
   3333 	evcnt_detach(&stats->ipcs_bad);
   3334 	evcnt_detach(&stats->l4cs_bad);
   3335 	evcnt_detach(&stats->intzero);
   3336 	evcnt_detach(&stats->legint);
   3337 	evcnt_detach(&stats->crcerrs);
   3338 	evcnt_detach(&stats->illerrc);
   3339 	evcnt_detach(&stats->errbc);
   3340 	evcnt_detach(&stats->mspdc);
   3341 	if (hw->mac.type >= ixgbe_mac_X550)
   3342 		evcnt_detach(&stats->mbsdc);
   3343 	evcnt_detach(&stats->mpctotal);
   3344 	evcnt_detach(&stats->mlfc);
   3345 	evcnt_detach(&stats->mrfc);
   3346 	evcnt_detach(&stats->rlec);
   3347 	evcnt_detach(&stats->lxontxc);
   3348 	evcnt_detach(&stats->lxonrxc);
   3349 	evcnt_detach(&stats->lxofftxc);
   3350 	evcnt_detach(&stats->lxoffrxc);
   3351 
   3352 	/* Packet Reception Stats */
   3353 	evcnt_detach(&stats->tor);
   3354 	evcnt_detach(&stats->gorc);
   3355 	evcnt_detach(&stats->tpr);
   3356 	evcnt_detach(&stats->gprc);
   3357 	evcnt_detach(&stats->mprc);
   3358 	evcnt_detach(&stats->bprc);
   3359 	evcnt_detach(&stats->prc64);
   3360 	evcnt_detach(&stats->prc127);
   3361 	evcnt_detach(&stats->prc255);
   3362 	evcnt_detach(&stats->prc511);
   3363 	evcnt_detach(&stats->prc1023);
   3364 	evcnt_detach(&stats->prc1522);
   3365 	evcnt_detach(&stats->ruc);
   3366 	evcnt_detach(&stats->rfc);
   3367 	evcnt_detach(&stats->roc);
   3368 	evcnt_detach(&stats->rjc);
   3369 	evcnt_detach(&stats->mngprc);
   3370 	evcnt_detach(&stats->mngpdc);
   3371 	evcnt_detach(&stats->xec);
   3372 
   3373 	/* Packet Transmission Stats */
   3374 	evcnt_detach(&stats->gotc);
   3375 	evcnt_detach(&stats->tpt);
   3376 	evcnt_detach(&stats->gptc);
   3377 	evcnt_detach(&stats->bptc);
   3378 	evcnt_detach(&stats->mptc);
   3379 	evcnt_detach(&stats->mngptc);
   3380 	evcnt_detach(&stats->ptc64);
   3381 	evcnt_detach(&stats->ptc127);
   3382 	evcnt_detach(&stats->ptc255);
   3383 	evcnt_detach(&stats->ptc511);
   3384 	evcnt_detach(&stats->ptc1023);
   3385 	evcnt_detach(&stats->ptc1522);
   3386 
   3387 	ixgbe_free_transmit_structures(adapter);
   3388 	ixgbe_free_receive_structures(adapter);
   3389 	free(adapter->queues, M_DEVBUF);
   3390 	free(adapter->mta, M_DEVBUF);
   3391 
   3392 	IXGBE_CORE_LOCK_DESTROY(adapter);
   3393 
   3394 	return (0);
   3395 } /* ixgbe_detach */
   3396 
   3397 /************************************************************************
   3398  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
   3399  *
   3400  *   Prepare the adapter/port for LPLU and/or WoL
   3401  ************************************************************************/
   3402 static int
   3403 ixgbe_setup_low_power_mode(struct adapter *adapter)
   3404 {
   3405 	struct ixgbe_hw *hw = &adapter->hw;
   3406 	device_t        dev = adapter->dev;
   3407 	s32             error = 0;
   3408 
   3409 	KASSERT(mutex_owned(&adapter->core_mtx));
   3410 
   3411 	/* Limit power management flow to X550EM baseT */
   3412 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
   3413 	    hw->phy.ops.enter_lplu) {
   3414 		/* X550EM baseT adapters need a special LPLU flow */
   3415 		hw->phy.reset_disable = true;
   3416 		ixgbe_stop(adapter);
   3417 		error = hw->phy.ops.enter_lplu(hw);
   3418 		if (error)
   3419 			device_printf(dev,
   3420 			    "Error entering LPLU: %d\n", error);
   3421 		hw->phy.reset_disable = false;
   3422 	} else {
   3423 		/* Just stop for other adapters */
   3424 		ixgbe_stop(adapter);
   3425 	}
   3426 
   3427 	if (!hw->wol_enabled) {
   3428 		ixgbe_set_phy_power(hw, FALSE);
   3429 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3430 		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
   3431 	} else {
   3432 		/* Turn off support for APM wakeup. (Using ACPI instead) */
   3433 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
   3434 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
   3435 
   3436 		/*
   3437 		 * Clear Wake Up Status register to prevent any previous wakeup
   3438 		 * events from waking us up immediately after we suspend.
   3439 		 */
   3440 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3441 
   3442 		/*
   3443 		 * Program the Wakeup Filter Control register with user filter
   3444 		 * settings
   3445 		 */
   3446 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
   3447 
   3448 		/* Enable wakeups and power management in Wakeup Control */
   3449 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
   3450 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
   3451 
   3452 	}
   3453 
   3454 	return error;
   3455 } /* ixgbe_setup_low_power_mode */
   3456 
   3457 /************************************************************************
   3458  * ixgbe_shutdown - Shutdown entry point
   3459  ************************************************************************/
   3460 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   3461 static int
   3462 ixgbe_shutdown(device_t dev)
   3463 {
   3464 	struct adapter *adapter = device_private(dev);
   3465 	int error = 0;
   3466 
   3467 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
   3468 
   3469 	IXGBE_CORE_LOCK(adapter);
   3470 	error = ixgbe_setup_low_power_mode(adapter);
   3471 	IXGBE_CORE_UNLOCK(adapter);
   3472 
   3473 	return (error);
   3474 } /* ixgbe_shutdown */
   3475 #endif
   3476 
   3477 /************************************************************************
   3478  * ixgbe_suspend
   3479  *
   3480  *   From D0 to D3
   3481  ************************************************************************/
   3482 static bool
   3483 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
   3484 {
   3485 	struct adapter *adapter = device_private(dev);
   3486 	int            error = 0;
   3487 
   3488 	INIT_DEBUGOUT("ixgbe_suspend: begin");
   3489 
   3490 	IXGBE_CORE_LOCK(adapter);
   3491 
   3492 	error = ixgbe_setup_low_power_mode(adapter);
   3493 
   3494 	IXGBE_CORE_UNLOCK(adapter);
   3495 
   3496 	return (error);
   3497 } /* ixgbe_suspend */
   3498 
   3499 /************************************************************************
   3500  * ixgbe_resume
   3501  *
   3502  *   From D3 to D0
   3503  ************************************************************************/
   3504 static bool
   3505 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
   3506 {
   3507 	struct adapter  *adapter = device_private(dev);
   3508 	struct ifnet    *ifp = adapter->ifp;
   3509 	struct ixgbe_hw *hw = &adapter->hw;
   3510 	u32             wus;
   3511 
   3512 	INIT_DEBUGOUT("ixgbe_resume: begin");
   3513 
   3514 	IXGBE_CORE_LOCK(adapter);
   3515 
   3516 	/* Read & clear WUS register */
   3517 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
   3518 	if (wus)
   3519 		device_printf(dev, "Woken up by (WUS): %#010x\n",
   3520 		    IXGBE_READ_REG(hw, IXGBE_WUS));
   3521 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3522 	/* And clear WUFC until next low-power transition */
   3523 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3524 
   3525 	/*
   3526 	 * Required after D3->D0 transition;
   3527 	 * will re-advertise all previous advertised speeds
   3528 	 */
   3529 	if (ifp->if_flags & IFF_UP)
   3530 		ixgbe_init_locked(adapter);
   3531 
   3532 	IXGBE_CORE_UNLOCK(adapter);
   3533 
   3534 	return true;
   3535 } /* ixgbe_resume */
   3536 
   3537 /*
   3538  * Set the various hardware offload abilities.
   3539  *
   3540  * This takes the ifnet's if_capenable flags (e.g. set by the user using
   3541  * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
   3542  * mbuf offload flags the driver will understand.
   3543  */
   3544 static void
   3545 ixgbe_set_if_hwassist(struct adapter *adapter)
   3546 {
   3547 	/* XXX */
   3548 }
   3549 
   3550 /************************************************************************
   3551  * ixgbe_init_locked - Init entry point
   3552  *
   3553  *   Used in two ways: It is used by the stack as an init
   3554  *   entry point in network interface structure. It is also
   3555  *   used by the driver as a hw/sw initialization routine to
   3556  *   get to a consistent state.
   3557  *
   3558  *   return 0 on success, positive on failure
   3559  ************************************************************************/
   3560 static void
   3561 ixgbe_init_locked(struct adapter *adapter)
   3562 {
   3563 	struct ifnet   *ifp = adapter->ifp;
   3564 	device_t 	dev = adapter->dev;
   3565 	struct ixgbe_hw *hw = &adapter->hw;
   3566 	struct tx_ring  *txr;
   3567 	struct rx_ring  *rxr;
   3568 	u32		txdctl, mhadd;
   3569 	u32		rxdctl, rxctrl;
   3570 	u32             ctrl_ext;
   3571 	int             err = 0;
   3572 
   3573 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   3574 
   3575 	KASSERT(mutex_owned(&adapter->core_mtx));
   3576 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
   3577 
   3578 	hw->adapter_stopped = FALSE;
   3579 	ixgbe_stop_adapter(hw);
   3580         callout_stop(&adapter->timer);
   3581 
   3582 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   3583 	adapter->max_frame_size =
   3584 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   3585 
   3586 	/* Queue indices may change with IOV mode */
   3587 	ixgbe_align_all_queue_indices(adapter);
   3588 
   3589 	/* reprogram the RAR[0] in case user changed it. */
   3590 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
   3591 
   3592 	/* Get the latest mac address, User can use a LAA */
   3593 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
   3594 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   3595 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
   3596 	hw->addr_ctrl.rar_used_count = 1;
   3597 
   3598 	/* Set hardware offload abilities from ifnet flags */
   3599 	ixgbe_set_if_hwassist(adapter);
   3600 
   3601 	/* Prepare transmit descriptors and buffers */
   3602 	if (ixgbe_setup_transmit_structures(adapter)) {
   3603 		device_printf(dev, "Could not setup transmit structures\n");
   3604 		ixgbe_stop(adapter);
   3605 		return;
   3606 	}
   3607 
   3608 	ixgbe_init_hw(hw);
   3609 	ixgbe_initialize_iov(adapter);
   3610 	ixgbe_initialize_transmit_units(adapter);
   3611 
   3612 	/* Setup Multicast table */
   3613 	ixgbe_set_multi(adapter);
   3614 
   3615 	/* Determine the correct mbuf pool, based on frame size */
   3616 	if (adapter->max_frame_size <= MCLBYTES)
   3617 		adapter->rx_mbuf_sz = MCLBYTES;
   3618 	else
   3619 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   3620 
   3621 	/* Prepare receive descriptors and buffers */
   3622 	if (ixgbe_setup_receive_structures(adapter)) {
   3623 		device_printf(dev, "Could not setup receive structures\n");
   3624 		ixgbe_stop(adapter);
   3625 		return;
   3626 	}
   3627 
   3628 	/* Configure RX settings */
   3629 	ixgbe_initialize_receive_units(adapter);
   3630 
   3631 	/* Enable SDP & MSI-X interrupts based on adapter */
   3632 	ixgbe_config_gpie(adapter);
   3633 
   3634 	/* Set MTU size */
   3635 	if (ifp->if_mtu > ETHERMTU) {
   3636 		/* aka IXGBE_MAXFRS on 82599 and newer */
   3637 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   3638 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   3639 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   3640 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   3641 	}
   3642 
   3643 	/* Now enable all the queues */
   3644 	for (int i = 0; i < adapter->num_queues; i++) {
   3645 		txr = &adapter->tx_rings[i];
   3646 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
   3647 		txdctl |= IXGBE_TXDCTL_ENABLE;
   3648 		/* Set WTHRESH to 8, burst writeback */
   3649 		txdctl |= (8 << 16);
   3650 		/*
   3651 		 * When the internal queue falls below PTHRESH (32),
   3652 		 * start prefetching as long as there are at least
   3653 		 * HTHRESH (1) buffers ready. The values are taken
   3654 		 * from the Intel linux driver 3.8.21.
   3655 		 * Prefetching enables tx line rate even with 1 queue.
   3656 		 */
   3657 		txdctl |= (32 << 0) | (1 << 8);
   3658 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
   3659 	}
   3660 
   3661 	for (int i = 0, j = 0; i < adapter->num_queues; i++) {
   3662 		rxr = &adapter->rx_rings[i];
   3663 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   3664 		if (hw->mac.type == ixgbe_mac_82598EB) {
   3665 			/*
   3666 			 * PTHRESH = 21
   3667 			 * HTHRESH = 4
   3668 			 * WTHRESH = 8
   3669 			 */
   3670 			rxdctl &= ~0x3FFFFF;
   3671 			rxdctl |= 0x080420;
   3672 		}
   3673 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   3674 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
   3675 		for (; j < 10; j++) {
   3676 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
   3677 			    IXGBE_RXDCTL_ENABLE)
   3678 				break;
   3679 			else
   3680 				msec_delay(1);
   3681 		}
   3682 		wmb();
   3683 
   3684 		/*
   3685 		 * In netmap mode, we must preserve the buffers made
   3686 		 * available to userspace before the if_init()
   3687 		 * (this is true by default on the TX side, because
   3688 		 * init makes all buffers available to userspace).
   3689 		 *
   3690 		 * netmap_reset() and the device specific routines
   3691 		 * (e.g. ixgbe_setup_receive_rings()) map these
   3692 		 * buffers at the end of the NIC ring, so here we
   3693 		 * must set the RDT (tail) register to make sure
   3694 		 * they are not overwritten.
   3695 		 *
   3696 		 * In this driver the NIC ring starts at RDH = 0,
   3697 		 * RDT points to the last slot available for reception (?),
   3698 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   3699 		 */
   3700 #ifdef DEV_NETMAP
   3701 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   3702 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   3703 			struct netmap_adapter *na = NA(adapter->ifp);
   3704 			struct netmap_kring *kring = &na->rx_rings[i];
   3705 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   3706 
   3707 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
   3708 		} else
   3709 #endif /* DEV_NETMAP */
   3710 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
   3711 			    adapter->num_rx_desc - 1);
   3712 	}
   3713 
   3714 	/* Enable Receive engine */
   3715 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   3716 	if (hw->mac.type == ixgbe_mac_82598EB)
   3717 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   3718 	rxctrl |= IXGBE_RXCTRL_RXEN;
   3719 	ixgbe_enable_rx_dma(hw, rxctrl);
   3720 
   3721 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   3722 
   3723 	/* Set up MSI-X routing */
   3724 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3725 		ixgbe_configure_ivars(adapter);
   3726 		/* Set up auto-mask */
   3727 		if (hw->mac.type == ixgbe_mac_82598EB)
   3728 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3729 		else {
   3730 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   3731 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   3732 		}
   3733 	} else {  /* Simple settings for Legacy/MSI */
   3734 		ixgbe_set_ivar(adapter, 0, 0, 0);
   3735 		ixgbe_set_ivar(adapter, 0, 0, 1);
   3736 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3737 	}
   3738 
   3739 	ixgbe_init_fdir(adapter);
   3740 
   3741 	/*
   3742 	 * Check on any SFP devices that
   3743 	 * need to be kick-started
   3744 	 */
   3745 	if (hw->phy.type == ixgbe_phy_none) {
   3746 		err = hw->phy.ops.identify(hw);
   3747 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   3748                 	device_printf(dev,
   3749 			    "Unsupported SFP+ module type was detected.\n");
   3750 			return;
   3751         	}
   3752 	}
   3753 
   3754 	/* Set moderation on the Link interrupt */
   3755 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
   3756 
   3757 	/* Config/Enable Link */
   3758 	ixgbe_config_link(adapter);
   3759 
   3760 	/* Hardware Packet Buffer & Flow Control setup */
   3761 	ixgbe_config_delay_values(adapter);
   3762 
   3763 	/* Initialize the FC settings */
   3764 	ixgbe_start_hw(hw);
   3765 
   3766 	/* Set up VLAN support and filter */
   3767 	ixgbe_setup_vlan_hw_support(adapter);
   3768 
   3769 	/* Setup DMA Coalescing */
   3770 	ixgbe_config_dmac(adapter);
   3771 
   3772 	/* And now turn on interrupts */
   3773 	ixgbe_enable_intr(adapter);
   3774 
   3775 	/* Enable the use of the MBX by the VF's */
   3776 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
   3777 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
   3778 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
   3779 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
   3780 	}
   3781 
   3782 	/* Update saved flags. See ixgbe_ifflags_cb() */
   3783 	adapter->if_flags = ifp->if_flags;
   3784 
   3785 	/* Now inform the stack we're ready */
   3786 	ifp->if_flags |= IFF_RUNNING;
   3787 
   3788 	return;
   3789 } /* ixgbe_init_locked */
   3790 
   3791 /************************************************************************
   3792  * ixgbe_init
   3793  ************************************************************************/
   3794 static int
   3795 ixgbe_init(struct ifnet *ifp)
   3796 {
   3797 	struct adapter *adapter = ifp->if_softc;
   3798 
   3799 	IXGBE_CORE_LOCK(adapter);
   3800 	ixgbe_init_locked(adapter);
   3801 	IXGBE_CORE_UNLOCK(adapter);
   3802 
   3803 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   3804 } /* ixgbe_init */
   3805 
   3806 /************************************************************************
   3807  * ixgbe_set_ivar
   3808  *
   3809  *   Setup the correct IVAR register for a particular MSI-X interrupt
   3810  *     (yes this is all very magic and confusing :)
   3811  *    - entry is the register array entry
   3812  *    - vector is the MSI-X vector for this queue
   3813  *    - type is RX/TX/MISC
   3814  ************************************************************************/
   3815 static void
   3816 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   3817 {
   3818 	struct ixgbe_hw *hw = &adapter->hw;
   3819 	u32 ivar, index;
   3820 
   3821 	vector |= IXGBE_IVAR_ALLOC_VAL;
   3822 
   3823 	switch (hw->mac.type) {
   3824 
   3825 	case ixgbe_mac_82598EB:
   3826 		if (type == -1)
   3827 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   3828 		else
   3829 			entry += (type * 64);
   3830 		index = (entry >> 2) & 0x1F;
   3831 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   3832 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
   3833 		ivar |= (vector << (8 * (entry & 0x3)));
   3834 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   3835 		break;
   3836 
   3837 	case ixgbe_mac_82599EB:
   3838 	case ixgbe_mac_X540:
   3839 	case ixgbe_mac_X550:
   3840 	case ixgbe_mac_X550EM_x:
   3841 	case ixgbe_mac_X550EM_a:
   3842 		if (type == -1) { /* MISC IVAR */
   3843 			index = (entry & 1) * 8;
   3844 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   3845 			ivar &= ~(0xFF << index);
   3846 			ivar |= (vector << index);
   3847 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   3848 		} else {	/* RX/TX IVARS */
   3849 			index = (16 * (entry & 1)) + (8 * type);
   3850 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   3851 			ivar &= ~(0xFF << index);
   3852 			ivar |= (vector << index);
   3853 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   3854 		}
   3855 
   3856 	default:
   3857 		break;
   3858 	}
   3859 } /* ixgbe_set_ivar */
   3860 
   3861 /************************************************************************
   3862  * ixgbe_configure_ivars
   3863  ************************************************************************/
   3864 static void
   3865 ixgbe_configure_ivars(struct adapter *adapter)
   3866 {
   3867 	struct ix_queue *que = adapter->queues;
   3868 	u32             newitr;
   3869 
   3870 	if (ixgbe_max_interrupt_rate > 0)
   3871 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   3872 	else {
   3873 		/*
   3874 		 * Disable DMA coalescing if interrupt moderation is
   3875 		 * disabled.
   3876 		 */
   3877 		adapter->dmac = 0;
   3878 		newitr = 0;
   3879 	}
   3880 
   3881         for (int i = 0; i < adapter->num_queues; i++, que++) {
   3882 		struct rx_ring *rxr = &adapter->rx_rings[i];
   3883 		struct tx_ring *txr = &adapter->tx_rings[i];
   3884 		/* First the RX queue entry */
   3885                 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
   3886 		/* ... and the TX */
   3887 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
   3888 		/* Set an Initial EITR value */
   3889 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
   3890 	}
   3891 
   3892 	/* For the Link interrupt */
   3893         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
   3894 } /* ixgbe_configure_ivars */
   3895 
   3896 /************************************************************************
   3897  * ixgbe_config_gpie
   3898  ************************************************************************/
   3899 static void
   3900 ixgbe_config_gpie(struct adapter *adapter)
   3901 {
   3902 	struct ixgbe_hw *hw = &adapter->hw;
   3903 	u32             gpie;
   3904 
   3905 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
   3906 
   3907 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3908 		/* Enable Enhanced MSI-X mode */
   3909 		gpie |= IXGBE_GPIE_MSIX_MODE
   3910 		     |  IXGBE_GPIE_EIAME
   3911 		     |  IXGBE_GPIE_PBA_SUPPORT
   3912 		     |  IXGBE_GPIE_OCD;
   3913 	}
   3914 
   3915 	/* Fan Failure Interrupt */
   3916 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   3917 		gpie |= IXGBE_SDP1_GPIEN;
   3918 
   3919 	/* Thermal Sensor Interrupt */
   3920 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
   3921 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3922 
   3923 	/* Link detection */
   3924 	switch (hw->mac.type) {
   3925 	case ixgbe_mac_82599EB:
   3926 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
   3927 		break;
   3928 	case ixgbe_mac_X550EM_x:
   3929 	case ixgbe_mac_X550EM_a:
   3930 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3931 		break;
   3932 	default:
   3933 		break;
   3934 	}
   3935 
   3936 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   3937 
   3938 	return;
   3939 } /* ixgbe_config_gpie */
   3940 
   3941 /************************************************************************
   3942  * ixgbe_config_delay_values
   3943  *
   3944  *   Requires adapter->max_frame_size to be set.
   3945  ************************************************************************/
   3946 static void
   3947 ixgbe_config_delay_values(struct adapter *adapter)
   3948 {
   3949 	struct ixgbe_hw *hw = &adapter->hw;
   3950 	u32             rxpb, frame, size, tmp;
   3951 
   3952 	frame = adapter->max_frame_size;
   3953 
   3954 	/* Calculate High Water */
   3955 	switch (hw->mac.type) {
   3956 	case ixgbe_mac_X540:
   3957 	case ixgbe_mac_X550:
   3958 	case ixgbe_mac_X550EM_x:
   3959 	case ixgbe_mac_X550EM_a:
   3960 		tmp = IXGBE_DV_X540(frame, frame);
   3961 		break;
   3962 	default:
   3963 		tmp = IXGBE_DV(frame, frame);
   3964 		break;
   3965 	}
   3966 	size = IXGBE_BT2KB(tmp);
   3967 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
   3968 	hw->fc.high_water[0] = rxpb - size;
   3969 
   3970 	/* Now calculate Low Water */
   3971 	switch (hw->mac.type) {
   3972 	case ixgbe_mac_X540:
   3973 	case ixgbe_mac_X550:
   3974 	case ixgbe_mac_X550EM_x:
   3975 	case ixgbe_mac_X550EM_a:
   3976 		tmp = IXGBE_LOW_DV_X540(frame);
   3977 		break;
   3978 	default:
   3979 		tmp = IXGBE_LOW_DV(frame);
   3980 		break;
   3981 	}
   3982 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
   3983 
   3984 	hw->fc.pause_time = IXGBE_FC_PAUSE;
   3985 	hw->fc.send_xon = TRUE;
   3986 } /* ixgbe_config_delay_values */
   3987 
   3988 /************************************************************************
   3989  * ixgbe_set_multi - Multicast Update
   3990  *
   3991  *   Called whenever multicast address list is updated.
   3992  ************************************************************************/
   3993 static void
   3994 ixgbe_set_multi(struct adapter *adapter)
   3995 {
   3996 	struct ixgbe_mc_addr	*mta;
   3997 	struct ifnet		*ifp = adapter->ifp;
   3998 	u8			*update_ptr;
   3999 	int			mcnt = 0;
   4000 	u32			fctrl;
   4001 	struct ethercom		*ec = &adapter->osdep.ec;
   4002 	struct ether_multi	*enm;
   4003 	struct ether_multistep	step;
   4004 
   4005 	KASSERT(mutex_owned(&adapter->core_mtx));
   4006 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   4007 
   4008 	mta = adapter->mta;
   4009 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
   4010 
   4011 	ifp->if_flags &= ~IFF_ALLMULTI;
   4012 	ETHER_LOCK(ec);
   4013 	ETHER_FIRST_MULTI(step, ec, enm);
   4014 	while (enm != NULL) {
   4015 		if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
   4016 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   4017 			ETHER_ADDR_LEN) != 0)) {
   4018 			ifp->if_flags |= IFF_ALLMULTI;
   4019 			break;
   4020 		}
   4021 		bcopy(enm->enm_addrlo,
   4022 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
   4023 		mta[mcnt].vmdq = adapter->pool;
   4024 		mcnt++;
   4025 		ETHER_NEXT_MULTI(step, enm);
   4026 	}
   4027 	ETHER_UNLOCK(ec);
   4028 
   4029 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   4030 	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4031 	if (ifp->if_flags & IFF_PROMISC)
   4032 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4033 	else if (ifp->if_flags & IFF_ALLMULTI) {
   4034 		fctrl |= IXGBE_FCTRL_MPE;
   4035 	}
   4036 
   4037 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   4038 
   4039 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
   4040 		update_ptr = (u8 *)mta;
   4041 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   4042 		    ixgbe_mc_array_itr, TRUE);
   4043 	}
   4044 
   4045 	return;
   4046 } /* ixgbe_set_multi */
   4047 
   4048 /************************************************************************
   4049  * ixgbe_mc_array_itr
   4050  *
   4051  *   An iterator function needed by the multicast shared code.
   4052  *   It feeds the shared code routine the addresses in the
   4053  *   array of ixgbe_set_multi() one by one.
   4054  ************************************************************************/
   4055 static u8 *
   4056 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   4057 {
   4058 	struct ixgbe_mc_addr *mta;
   4059 
   4060 	mta = (struct ixgbe_mc_addr *)*update_ptr;
   4061 	*vmdq = mta->vmdq;
   4062 
   4063 	*update_ptr = (u8*)(mta + 1);
   4064 
   4065 	return (mta->addr);
   4066 } /* ixgbe_mc_array_itr */
   4067 
   4068 /************************************************************************
   4069  * ixgbe_local_timer - Timer routine
   4070  *
   4071  *   Checks for link status, updates statistics,
   4072  *   and runs the watchdog check.
   4073  ************************************************************************/
   4074 static void
   4075 ixgbe_local_timer(void *arg)
   4076 {
   4077 	struct adapter *adapter = arg;
   4078 
   4079 	IXGBE_CORE_LOCK(adapter);
   4080 	ixgbe_local_timer1(adapter);
   4081 	IXGBE_CORE_UNLOCK(adapter);
   4082 }
   4083 
   4084 static void
   4085 ixgbe_local_timer1(void *arg)
   4086 {
   4087 	struct adapter	*adapter = arg;
   4088 	device_t	dev = adapter->dev;
   4089 	struct ix_queue *que = adapter->queues;
   4090 	u64		queues = 0;
   4091 	int		hung = 0;
   4092 
   4093 	KASSERT(mutex_owned(&adapter->core_mtx));
   4094 
   4095 	/* Check for pluggable optics */
   4096 	if (adapter->sfp_probe)
   4097 		if (!ixgbe_sfp_probe(adapter))
   4098 			goto out; /* Nothing to do */
   4099 
   4100 	ixgbe_update_link_status(adapter);
   4101 	ixgbe_update_stats_counters(adapter);
   4102 
   4103 	/*
   4104 	 * Check the TX queues status
   4105 	 *      - mark hung queues so we don't schedule on them
   4106 	 *      - watchdog only if all queues show hung
   4107 	 */
   4108 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4109 		/* Keep track of queues with work for soft irq */
   4110 		if (que->txr->busy)
   4111 			queues |= ((u64)1 << que->me);
   4112 		/*
   4113 		 * Each time txeof runs without cleaning, but there
   4114 		 * are uncleaned descriptors it increments busy. If
   4115 		 * we get to the MAX we declare it hung.
   4116 		 */
   4117 		if (que->busy == IXGBE_QUEUE_HUNG) {
   4118 			++hung;
   4119 			/* Mark the queue as inactive */
   4120 			adapter->active_queues &= ~((u64)1 << que->me);
   4121 			continue;
   4122 		} else {
   4123 			/* Check if we've come back from hung */
   4124 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   4125 				adapter->active_queues |= ((u64)1 << que->me);
   4126 		}
   4127 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   4128 			device_printf(dev,
   4129 			    "Warning queue %d appears to be hung!\n", i);
   4130 			que->txr->busy = IXGBE_QUEUE_HUNG;
   4131 			++hung;
   4132 		}
   4133 	}
   4134 
   4135 	/* Only truely watchdog if all queues show hung */
   4136 	if (hung == adapter->num_queues)
   4137 		goto watchdog;
   4138 	else if (queues != 0) { /* Force an IRQ on queues with work */
   4139 		ixgbe_rearm_queues(adapter, queues);
   4140 	}
   4141 
   4142 out:
   4143 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4144 	return;
   4145 
   4146 watchdog:
   4147 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   4148 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   4149 	adapter->watchdog_events.ev_count++;
   4150 	ixgbe_init_locked(adapter);
   4151 } /* ixgbe_local_timer */
   4152 
   4153 /************************************************************************
   4154  * ixgbe_sfp_probe
   4155  *
   4156  *   Determine if a port had optics inserted.
   4157  ************************************************************************/
   4158 static bool
   4159 ixgbe_sfp_probe(struct adapter *adapter)
   4160 {
   4161 	struct ixgbe_hw	*hw = &adapter->hw;
   4162 	device_t	dev = adapter->dev;
   4163 	bool		result = FALSE;
   4164 
   4165 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4166 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4167 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4168 		if (ret)
   4169 			goto out;
   4170 		ret = hw->phy.ops.reset(hw);
   4171 		adapter->sfp_probe = FALSE;
   4172 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4173 			device_printf(dev,"Unsupported SFP+ module detected!");
   4174 			device_printf(dev,
   4175 			    "Reload driver with supported module.\n");
   4176                         goto out;
   4177 		} else
   4178 			device_printf(dev, "SFP+ module detected!\n");
   4179 		/* We now have supported optics */
   4180 		result = TRUE;
   4181 	}
   4182 out:
   4183 
   4184 	return (result);
   4185 } /* ixgbe_sfp_probe */
   4186 
   4187 /************************************************************************
   4188  * ixgbe_handle_mod - Tasklet for SFP module interrupts
   4189  ************************************************************************/
   4190 static void
   4191 ixgbe_handle_mod(void *context)
   4192 {
   4193 	struct adapter  *adapter = context;
   4194 	struct ixgbe_hw *hw = &adapter->hw;
   4195 	device_t	dev = adapter->dev;
   4196 	u32             err, cage_full = 0;
   4197 
   4198 	if (adapter->hw.need_crosstalk_fix) {
   4199 		switch (hw->mac.type) {
   4200 		case ixgbe_mac_82599EB:
   4201 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4202 			    IXGBE_ESDP_SDP2;
   4203 			break;
   4204 		case ixgbe_mac_X550EM_x:
   4205 		case ixgbe_mac_X550EM_a:
   4206 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4207 			    IXGBE_ESDP_SDP0;
   4208 			break;
   4209 		default:
   4210 			break;
   4211 		}
   4212 
   4213 		if (!cage_full)
   4214 			return;
   4215 	}
   4216 
   4217 	err = hw->phy.ops.identify_sfp(hw);
   4218 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4219 		device_printf(dev,
   4220 		    "Unsupported SFP+ module type was detected.\n");
   4221 		return;
   4222 	}
   4223 
   4224 	err = hw->mac.ops.setup_sfp(hw);
   4225 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4226 		device_printf(dev,
   4227 		    "Setup failure - unsupported SFP+ module type.\n");
   4228 		return;
   4229 	}
   4230 	softint_schedule(adapter->msf_si);
   4231 } /* ixgbe_handle_mod */
   4232 
   4233 
   4234 /************************************************************************
   4235  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
   4236  ************************************************************************/
   4237 static void
   4238 ixgbe_handle_msf(void *context)
   4239 {
   4240 	struct adapter  *adapter = context;
   4241 	struct ixgbe_hw *hw = &adapter->hw;
   4242 	u32             autoneg;
   4243 	bool            negotiate;
   4244 
   4245 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
   4246 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
   4247 
   4248 	autoneg = hw->phy.autoneg_advertised;
   4249 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   4250 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   4251 	else
   4252 		negotiate = 0;
   4253 	if (hw->mac.ops.setup_link)
   4254 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
   4255 
   4256 	/* Adjust media types shown in ifconfig */
   4257 	ifmedia_removeall(&adapter->media);
   4258 	ixgbe_add_media_types(adapter);
   4259 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   4260 } /* ixgbe_handle_msf */
   4261 
   4262 /************************************************************************
   4263  * ixgbe_handle_phy - Tasklet for external PHY interrupts
   4264  ************************************************************************/
   4265 static void
   4266 ixgbe_handle_phy(void *context)
   4267 {
   4268 	struct adapter  *adapter = context;
   4269 	struct ixgbe_hw *hw = &adapter->hw;
   4270 	int error;
   4271 
   4272 	error = hw->phy.ops.handle_lasi(hw);
   4273 	if (error == IXGBE_ERR_OVERTEMP)
   4274 		device_printf(adapter->dev,
   4275 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
   4276 		    " PHY will downshift to lower power state!\n");
   4277 	else if (error)
   4278 		device_printf(adapter->dev,
   4279 		    "Error handling LASI interrupt: %d\n", error);
   4280 } /* ixgbe_handle_phy */
   4281 
   4282 static void
   4283 ixgbe_ifstop(struct ifnet *ifp, int disable)
   4284 {
   4285 	struct adapter *adapter = ifp->if_softc;
   4286 
   4287 	IXGBE_CORE_LOCK(adapter);
   4288 	ixgbe_stop(adapter);
   4289 	IXGBE_CORE_UNLOCK(adapter);
   4290 }
   4291 
   4292 /************************************************************************
   4293  * ixgbe_stop - Stop the hardware
   4294  *
   4295  *   Disables all traffic on the adapter by issuing a
   4296  *   global reset on the MAC and deallocates TX/RX buffers.
   4297  ************************************************************************/
   4298 static void
   4299 ixgbe_stop(void *arg)
   4300 {
   4301 	struct ifnet    *ifp;
   4302 	struct adapter  *adapter = arg;
   4303 	struct ixgbe_hw *hw = &adapter->hw;
   4304 
   4305 	ifp = adapter->ifp;
   4306 
   4307 	KASSERT(mutex_owned(&adapter->core_mtx));
   4308 
   4309 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   4310 	ixgbe_disable_intr(adapter);
   4311 	callout_stop(&adapter->timer);
   4312 
   4313 	/* Let the stack know...*/
   4314 	ifp->if_flags &= ~IFF_RUNNING;
   4315 
   4316 	ixgbe_reset_hw(hw);
   4317 	hw->adapter_stopped = FALSE;
   4318 	ixgbe_stop_adapter(hw);
   4319 	if (hw->mac.type == ixgbe_mac_82599EB)
   4320 		ixgbe_stop_mac_link_on_d3_82599(hw);
   4321 	/* Turn off the laser - noop with no optics */
   4322 	ixgbe_disable_tx_laser(hw);
   4323 
   4324 	/* Update the stack */
   4325 	adapter->link_up = FALSE;
   4326 	ixgbe_update_link_status(adapter);
   4327 
   4328 	/* reprogram the RAR[0] in case user changed it. */
   4329 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   4330 
   4331 	return;
   4332 } /* ixgbe_stop */
   4333 
   4334 /************************************************************************
   4335  * ixgbe_update_link_status - Update OS on link state
   4336  *
   4337  * Note: Only updates the OS on the cached link state.
   4338  *       The real check of the hardware only happens with
   4339  *       a link interrupt.
   4340  ************************************************************************/
   4341 static void
   4342 ixgbe_update_link_status(struct adapter *adapter)
   4343 {
   4344 	struct ifnet	*ifp = adapter->ifp;
   4345 	device_t        dev = adapter->dev;
   4346 	struct ixgbe_hw *hw = &adapter->hw;
   4347 
   4348 	if (adapter->link_up) {
   4349 		if (adapter->link_active == FALSE) {
   4350 			if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
   4351 				/*
   4352 				 *  Discard count for both MAC Local Fault and
   4353 				 * Remote Fault because those registers are
   4354 				 * valid only when the link speed is up and
   4355 				 * 10Gbps.
   4356 				 */
   4357 				IXGBE_READ_REG(hw, IXGBE_MLFC);
   4358 				IXGBE_READ_REG(hw, IXGBE_MRFC);
   4359 			}
   4360 
   4361 			if (bootverbose) {
   4362 				const char *bpsmsg;
   4363 
   4364 				switch (adapter->link_speed) {
   4365 				case IXGBE_LINK_SPEED_10GB_FULL:
   4366 					bpsmsg = "10 Gbps";
   4367 					break;
   4368 				case IXGBE_LINK_SPEED_5GB_FULL:
   4369 					bpsmsg = "5 Gbps";
   4370 					break;
   4371 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   4372 					bpsmsg = "2.5 Gbps";
   4373 					break;
   4374 				case IXGBE_LINK_SPEED_1GB_FULL:
   4375 					bpsmsg = "1 Gbps";
   4376 					break;
   4377 				case IXGBE_LINK_SPEED_100_FULL:
   4378 					bpsmsg = "100 Mbps";
   4379 					break;
   4380 				case IXGBE_LINK_SPEED_10_FULL:
   4381 					bpsmsg = "10 Mbps";
   4382 					break;
   4383 				default:
   4384 					bpsmsg = "unknown speed";
   4385 					break;
   4386 				}
   4387 				device_printf(dev, "Link is up %s %s \n",
   4388 				    bpsmsg, "Full Duplex");
   4389 			}
   4390 			adapter->link_active = TRUE;
   4391 			/* Update any Flow Control changes */
   4392 			ixgbe_fc_enable(&adapter->hw);
   4393 			/* Update DMA coalescing config */
   4394 			ixgbe_config_dmac(adapter);
   4395 			if_link_state_change(ifp, LINK_STATE_UP);
   4396 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4397 				ixgbe_ping_all_vfs(adapter);
   4398 		}
   4399 	} else { /* Link down */
   4400 		if (adapter->link_active == TRUE) {
   4401 			if (bootverbose)
   4402 				device_printf(dev, "Link is Down\n");
   4403 			if_link_state_change(ifp, LINK_STATE_DOWN);
   4404 			adapter->link_active = FALSE;
   4405 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4406 				ixgbe_ping_all_vfs(adapter);
   4407 		}
   4408 	}
   4409 
   4410 	return;
   4411 } /* ixgbe_update_link_status */
   4412 
   4413 /************************************************************************
   4414  * ixgbe_config_dmac - Configure DMA Coalescing
   4415  ************************************************************************/
   4416 static void
   4417 ixgbe_config_dmac(struct adapter *adapter)
   4418 {
   4419 	struct ixgbe_hw *hw = &adapter->hw;
   4420 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
   4421 
   4422 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
   4423 		return;
   4424 
   4425 	if (dcfg->watchdog_timer ^ adapter->dmac ||
   4426 	    dcfg->link_speed ^ adapter->link_speed) {
   4427 		dcfg->watchdog_timer = adapter->dmac;
   4428 		dcfg->fcoe_en = false;
   4429 		dcfg->link_speed = adapter->link_speed;
   4430 		dcfg->num_tcs = 1;
   4431 
   4432 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
   4433 		    dcfg->watchdog_timer, dcfg->link_speed);
   4434 
   4435 		hw->mac.ops.dmac_config(hw);
   4436 	}
   4437 } /* ixgbe_config_dmac */
   4438 
   4439 /************************************************************************
   4440  * ixgbe_enable_intr
   4441  ************************************************************************/
   4442 static void
   4443 ixgbe_enable_intr(struct adapter *adapter)
   4444 {
   4445 	struct ixgbe_hw	*hw = &adapter->hw;
   4446 	struct ix_queue	*que = adapter->queues;
   4447 	u32		mask, fwsm;
   4448 
   4449 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4450 
   4451 	switch (adapter->hw.mac.type) {
   4452 	case ixgbe_mac_82599EB:
   4453 		mask |= IXGBE_EIMS_ECC;
   4454 		/* Temperature sensor on some adapters */
   4455 		mask |= IXGBE_EIMS_GPI_SDP0;
   4456 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
   4457 		mask |= IXGBE_EIMS_GPI_SDP1;
   4458 		mask |= IXGBE_EIMS_GPI_SDP2;
   4459 		break;
   4460 	case ixgbe_mac_X540:
   4461 		/* Detect if Thermal Sensor is enabled */
   4462 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
   4463 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
   4464 			mask |= IXGBE_EIMS_TS;
   4465 		mask |= IXGBE_EIMS_ECC;
   4466 		break;
   4467 	case ixgbe_mac_X550:
   4468 		/* MAC thermal sensor is automatically enabled */
   4469 		mask |= IXGBE_EIMS_TS;
   4470 		mask |= IXGBE_EIMS_ECC;
   4471 		break;
   4472 	case ixgbe_mac_X550EM_x:
   4473 	case ixgbe_mac_X550EM_a:
   4474 		/* Some devices use SDP0 for important information */
   4475 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
   4476 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
   4477 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
   4478 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
   4479 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
   4480 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
   4481 			mask |= IXGBE_EICR_GPI_SDP0_X540;
   4482 		mask |= IXGBE_EIMS_ECC;
   4483 		break;
   4484 	default:
   4485 		break;
   4486 	}
   4487 
   4488 	/* Enable Fan Failure detection */
   4489 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4490 		mask |= IXGBE_EIMS_GPI_SDP1;
   4491 	/* Enable SR-IOV */
   4492 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4493 		mask |= IXGBE_EIMS_MAILBOX;
   4494 	/* Enable Flow Director */
   4495 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   4496 		mask |= IXGBE_EIMS_FLOW_DIR;
   4497 
   4498 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4499 
   4500 	/* With MSI-X we use auto clear */
   4501 	if (adapter->msix_mem) {
   4502 		mask = IXGBE_EIMS_ENABLE_MASK;
   4503 		/* Don't autoclear Link */
   4504 		mask &= ~IXGBE_EIMS_OTHER;
   4505 		mask &= ~IXGBE_EIMS_LSC;
   4506 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   4507 			mask &= ~IXGBE_EIMS_MAILBOX;
   4508 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4509 	}
   4510 
   4511 	/*
   4512 	 * Now enable all queues, this is done separately to
   4513 	 * allow for handling the extended (beyond 32) MSI-X
   4514 	 * vectors that can be used by 82599
   4515 	 */
   4516         for (int i = 0; i < adapter->num_queues; i++, que++)
   4517                 ixgbe_enable_queue(adapter, que->msix);
   4518 
   4519 	IXGBE_WRITE_FLUSH(hw);
   4520 
   4521 	return;
   4522 } /* ixgbe_enable_intr */
   4523 
   4524 /************************************************************************
   4525  * ixgbe_disable_intr
   4526  ************************************************************************/
   4527 static void
   4528 ixgbe_disable_intr(struct adapter *adapter)
   4529 {
   4530 	if (adapter->msix_mem)
   4531 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4532 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
   4533 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
   4534 	} else {
   4535 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
   4536 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
   4537 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
   4538 	}
   4539 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4540 
   4541 	return;
   4542 } /* ixgbe_disable_intr */
   4543 
   4544 /************************************************************************
   4545  * ixgbe_legacy_irq - Legacy Interrupt Service routine
   4546  ************************************************************************/
   4547 static int
   4548 ixgbe_legacy_irq(void *arg)
   4549 {
   4550 	struct ix_queue *que = arg;
   4551 	struct adapter	*adapter = que->adapter;
   4552 	struct ixgbe_hw	*hw = &adapter->hw;
   4553 	struct ifnet    *ifp = adapter->ifp;
   4554 	struct 		tx_ring *txr = adapter->tx_rings;
   4555 	bool		more = false;
   4556 	u32             eicr, eicr_mask;
   4557 
   4558 	/* Silicon errata #26 on 82598 */
   4559 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
   4560 
   4561 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   4562 
   4563 	adapter->stats.pf.legint.ev_count++;
   4564 	++que->irqs.ev_count;
   4565 	if (eicr == 0) {
   4566 		adapter->stats.pf.intzero.ev_count++;
   4567 		if ((ifp->if_flags & IFF_UP) != 0)
   4568 			ixgbe_enable_intr(adapter);
   4569 		return 0;
   4570 	}
   4571 
   4572 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   4573 #ifdef __NetBSD__
   4574 		/* Don't run ixgbe_rxeof in interrupt context */
   4575 		more = true;
   4576 #else
   4577 		more = ixgbe_rxeof(que);
   4578 #endif
   4579 
   4580 		IXGBE_TX_LOCK(txr);
   4581 		ixgbe_txeof(txr);
   4582 #ifdef notyet
   4583 		if (!ixgbe_ring_empty(ifp, txr->br))
   4584 			ixgbe_start_locked(ifp, txr);
   4585 #endif
   4586 		IXGBE_TX_UNLOCK(txr);
   4587 	}
   4588 
   4589 	/* Check for fan failure */
   4590 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   4591 		ixgbe_check_fan_failure(adapter, eicr, true);
   4592 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4593 	}
   4594 
   4595 	/* Link status change */
   4596 	if (eicr & IXGBE_EICR_LSC)
   4597 		softint_schedule(adapter->link_si);
   4598 
   4599 	if (ixgbe_is_sfp(hw)) {
   4600 		/* Pluggable optics-related interrupt */
   4601 		if (hw->mac.type >= ixgbe_mac_X540)
   4602 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   4603 		else
   4604 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   4605 
   4606 		if (eicr & eicr_mask) {
   4607 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   4608 			softint_schedule(adapter->mod_si);
   4609 		}
   4610 
   4611 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   4612 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   4613 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   4614 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4615 			softint_schedule(adapter->msf_si);
   4616 		}
   4617 	}
   4618 
   4619 	/* External PHY interrupt */
   4620 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   4621 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
   4622 		softint_schedule(adapter->phy_si);
   4623 
   4624 	if (more)
   4625 		softint_schedule(que->que_si);
   4626 	else
   4627 		ixgbe_enable_intr(adapter);
   4628 
   4629 	return 1;
   4630 } /* ixgbe_legacy_irq */
   4631 
   4632 /************************************************************************
   4633  * ixgbe_free_pciintr_resources
   4634  ************************************************************************/
   4635 static void
   4636 ixgbe_free_pciintr_resources(struct adapter *adapter)
   4637 {
   4638 	struct ix_queue *que = adapter->queues;
   4639 	int		rid;
   4640 
   4641 	/*
   4642 	 * Release all msix queue resources:
   4643 	 */
   4644 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4645 		if (que->res != NULL) {
   4646 			pci_intr_disestablish(adapter->osdep.pc,
   4647 			    adapter->osdep.ihs[i]);
   4648 			adapter->osdep.ihs[i] = NULL;
   4649 		}
   4650 	}
   4651 
   4652 	/* Clean the Legacy or Link interrupt last */
   4653 	if (adapter->vector) /* we are doing MSIX */
   4654 		rid = adapter->vector;
   4655 	else
   4656 		rid = 0;
   4657 
   4658 	if (adapter->osdep.ihs[rid] != NULL) {
   4659 		pci_intr_disestablish(adapter->osdep.pc,
   4660 		    adapter->osdep.ihs[rid]);
   4661 		adapter->osdep.ihs[rid] = NULL;
   4662 	}
   4663 
   4664 	if (adapter->osdep.intrs != NULL) {
   4665 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   4666 		    adapter->osdep.nintrs);
   4667 		adapter->osdep.intrs = NULL;
   4668 	}
   4669 
   4670 	return;
   4671 } /* ixgbe_free_pciintr_resources */
   4672 
   4673 /************************************************************************
   4674  * ixgbe_free_pci_resources
   4675  ************************************************************************/
   4676 static void
   4677 ixgbe_free_pci_resources(struct adapter *adapter)
   4678 {
   4679 
   4680 	ixgbe_free_pciintr_resources(adapter);
   4681 
   4682 	if (adapter->osdep.mem_size != 0) {
   4683 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   4684 		    adapter->osdep.mem_bus_space_handle,
   4685 		    adapter->osdep.mem_size);
   4686 	}
   4687 
   4688 	return;
   4689 } /* ixgbe_free_pci_resources */
   4690 
   4691 /************************************************************************
   4692  * ixgbe_set_sysctl_value
   4693  ************************************************************************/
   4694 static void
   4695 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
   4696     const char *description, int *limit, int value)
   4697 {
   4698 	device_t dev =  adapter->dev;
   4699 	struct sysctllog **log;
   4700 	const struct sysctlnode *rnode, *cnode;
   4701 
   4702 	log = &adapter->sysctllog;
   4703 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   4704 		aprint_error_dev(dev, "could not create sysctl root\n");
   4705 		return;
   4706 	}
   4707 	if (sysctl_createv(log, 0, &rnode, &cnode,
   4708 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   4709 	    name, SYSCTL_DESCR(description),
   4710 		NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   4711 		aprint_error_dev(dev, "could not create sysctl\n");
   4712 	*limit = value;
   4713 } /* ixgbe_set_sysctl_value */
   4714 
   4715 /************************************************************************
   4716  * ixgbe_sysctl_flowcntl
   4717  *
   4718  *   SYSCTL wrapper around setting Flow Control
   4719  ************************************************************************/
   4720 static int
   4721 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
   4722 {
   4723 	struct sysctlnode node = *rnode;
   4724 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4725 	int error, fc;
   4726 
   4727 	fc = adapter->hw.fc.current_mode;
   4728 	node.sysctl_data = &fc;
   4729 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4730 	if (error != 0 || newp == NULL)
   4731 		return error;
   4732 
   4733 	/* Don't bother if it's not changed */
   4734 	if (fc == adapter->hw.fc.current_mode)
   4735 		return (0);
   4736 
   4737 	return ixgbe_set_flowcntl(adapter, fc);
   4738 } /* ixgbe_sysctl_flowcntl */
   4739 
   4740 /************************************************************************
   4741  * ixgbe_set_flowcntl - Set flow control
   4742  *
   4743  *   Flow control values:
   4744  *     0 - off
   4745  *     1 - rx pause
   4746  *     2 - tx pause
   4747  *     3 - full
   4748  ************************************************************************/
   4749 static int
   4750 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
   4751 {
   4752 	switch (fc) {
   4753 		case ixgbe_fc_rx_pause:
   4754 		case ixgbe_fc_tx_pause:
   4755 		case ixgbe_fc_full:
   4756 			adapter->hw.fc.requested_mode = fc;
   4757 			if (adapter->num_queues > 1)
   4758 				ixgbe_disable_rx_drop(adapter);
   4759 			break;
   4760 		case ixgbe_fc_none:
   4761 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   4762 			if (adapter->num_queues > 1)
   4763 				ixgbe_enable_rx_drop(adapter);
   4764 			break;
   4765 		default:
   4766 			return (EINVAL);
   4767 	}
   4768 
   4769 #if 0 /* XXX NetBSD */
   4770 	/* Don't autoneg if forcing a value */
   4771 	adapter->hw.fc.disable_fc_autoneg = TRUE;
   4772 #endif
   4773 	ixgbe_fc_enable(&adapter->hw);
   4774 
   4775 	return (0);
   4776 } /* ixgbe_set_flowcntl */
   4777 
   4778 /************************************************************************
   4779  * ixgbe_enable_rx_drop
   4780  *
   4781  *   Enable the hardware to drop packets when the buffer is
   4782  *   full. This is useful with multiqueue, so that no single
   4783  *   queue being full stalls the entire RX engine. We only
   4784  *   enable this when Multiqueue is enabled AND Flow Control
   4785  *   is disabled.
   4786  ************************************************************************/
   4787 static void
   4788 ixgbe_enable_rx_drop(struct adapter *adapter)
   4789 {
   4790 	struct ixgbe_hw *hw = &adapter->hw;
   4791 	struct rx_ring  *rxr;
   4792 	u32             srrctl;
   4793 
   4794 	for (int i = 0; i < adapter->num_queues; i++) {
   4795 		rxr = &adapter->rx_rings[i];
   4796 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4797 		srrctl |= IXGBE_SRRCTL_DROP_EN;
   4798 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4799 	}
   4800 
   4801 	/* enable drop for each vf */
   4802 	for (int i = 0; i < adapter->num_vfs; i++) {
   4803 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4804 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
   4805 		    IXGBE_QDE_ENABLE));
   4806 	}
   4807 } /* ixgbe_enable_rx_drop */
   4808 
   4809 /************************************************************************
   4810  * ixgbe_disable_rx_drop
   4811  ************************************************************************/
   4812 static void
   4813 ixgbe_disable_rx_drop(struct adapter *adapter)
   4814 {
   4815 	struct ixgbe_hw *hw = &adapter->hw;
   4816 	struct rx_ring  *rxr;
   4817 	u32             srrctl;
   4818 
   4819 	for (int i = 0; i < adapter->num_queues; i++) {
   4820 		rxr = &adapter->rx_rings[i];
   4821         	srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4822         	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
   4823         	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4824 	}
   4825 
   4826 	/* disable drop for each vf */
   4827 	for (int i = 0; i < adapter->num_vfs; i++) {
   4828 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4829 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
   4830 	}
   4831 } /* ixgbe_disable_rx_drop */
   4832 
   4833 /************************************************************************
   4834  * ixgbe_sysctl_advertise
   4835  *
   4836  *   SYSCTL wrapper around setting advertised speed
   4837  ************************************************************************/
   4838 static int
   4839 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
   4840 {
   4841 	struct sysctlnode node = *rnode;
   4842 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4843 	int            error = 0, advertise;
   4844 
   4845 	advertise = adapter->advertise;
   4846 	node.sysctl_data = &advertise;
   4847 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4848 	if (error != 0 || newp == NULL)
   4849 		return error;
   4850 
   4851 	return ixgbe_set_advertise(adapter, advertise);
   4852 } /* ixgbe_sysctl_advertise */
   4853 
   4854 /************************************************************************
   4855  * ixgbe_set_advertise - Control advertised link speed
   4856  *
   4857  *   Flags:
   4858  *     0x00 - Default (all capable link speed)
   4859  *     0x01 - advertise 100 Mb
   4860  *     0x02 - advertise 1G
   4861  *     0x04 - advertise 10G
   4862  *     0x08 - advertise 10 Mb
   4863  *     0x10 - advertise 2.5G
   4864  *     0x20 - advertise 5G
   4865  ************************************************************************/
   4866 static int
   4867 ixgbe_set_advertise(struct adapter *adapter, int advertise)
   4868 {
   4869 	device_t         dev;
   4870 	struct ixgbe_hw  *hw;
   4871 	ixgbe_link_speed speed = 0;
   4872 	ixgbe_link_speed link_caps = 0;
   4873 	s32              err = IXGBE_NOT_IMPLEMENTED;
   4874 	bool             negotiate = FALSE;
   4875 
   4876 	/* Checks to validate new value */
   4877 	if (adapter->advertise == advertise) /* no change */
   4878 		return (0);
   4879 
   4880 	dev = adapter->dev;
   4881 	hw = &adapter->hw;
   4882 
   4883 	/* No speed changes for backplane media */
   4884 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   4885 		return (ENODEV);
   4886 
   4887 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   4888 	    (hw->phy.multispeed_fiber))) {
   4889 		device_printf(dev,
   4890 		    "Advertised speed can only be set on copper or "
   4891 		    "multispeed fiber media types.\n");
   4892 		return (EINVAL);
   4893 	}
   4894 
   4895 	if (advertise < 0x0 || advertise > 0x2f) {
   4896 		device_printf(dev,
   4897 		    "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
   4898 		return (EINVAL);
   4899 	}
   4900 
   4901 	if (hw->mac.ops.get_link_capabilities) {
   4902 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   4903 		    &negotiate);
   4904 		if (err != IXGBE_SUCCESS) {
   4905 			device_printf(dev, "Unable to determine supported advertise speeds\n");
   4906 			return (ENODEV);
   4907 		}
   4908 	}
   4909 
   4910 	/* Set new value and report new advertised mode */
   4911 	if (advertise & 0x1) {
   4912 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
   4913 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
   4914 			return (EINVAL);
   4915 		}
   4916 		speed |= IXGBE_LINK_SPEED_100_FULL;
   4917 	}
   4918 	if (advertise & 0x2) {
   4919 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
   4920 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
   4921 			return (EINVAL);
   4922 		}
   4923 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   4924 	}
   4925 	if (advertise & 0x4) {
   4926 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
   4927 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
   4928 			return (EINVAL);
   4929 		}
   4930 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   4931 	}
   4932 	if (advertise & 0x8) {
   4933 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
   4934 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
   4935 			return (EINVAL);
   4936 		}
   4937 		speed |= IXGBE_LINK_SPEED_10_FULL;
   4938 	}
   4939 	if (advertise & 0x10) {
   4940 		if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
   4941 			device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
   4942 			return (EINVAL);
   4943 		}
   4944 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   4945 	}
   4946 	if (advertise & 0x20) {
   4947 		if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
   4948 			device_printf(dev, "Interface does not support 5Gb advertised speed\n");
   4949 			return (EINVAL);
   4950 		}
   4951 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   4952 	}
   4953 	if (advertise == 0)
   4954 		speed = link_caps; /* All capable link speed */
   4955 
   4956 	hw->mac.autotry_restart = TRUE;
   4957 	hw->mac.ops.setup_link(hw, speed, TRUE);
   4958 	adapter->advertise = advertise;
   4959 
   4960 	return (0);
   4961 } /* ixgbe_set_advertise */
   4962 
   4963 /************************************************************************
   4964  * ixgbe_get_advertise - Get current advertised speed settings
   4965  *
   4966  *   Formatted for sysctl usage.
   4967  *   Flags:
   4968  *     0x01 - advertise 100 Mb
   4969  *     0x02 - advertise 1G
   4970  *     0x04 - advertise 10G
   4971  *     0x08 - advertise 10 Mb (yes, Mb)
   4972  *     0x10 - advertise 2.5G
   4973  *     0x20 - advertise 5G
   4974  ************************************************************************/
   4975 static int
   4976 ixgbe_get_advertise(struct adapter *adapter)
   4977 {
   4978 	struct ixgbe_hw  *hw = &adapter->hw;
   4979 	int              speed;
   4980 	ixgbe_link_speed link_caps = 0;
   4981 	s32              err;
   4982 	bool             negotiate = FALSE;
   4983 
   4984 	/*
   4985 	 * Advertised speed means nothing unless it's copper or
   4986 	 * multi-speed fiber
   4987 	 */
   4988 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
   4989 	    !(hw->phy.multispeed_fiber))
   4990 		return (0);
   4991 
   4992 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
   4993 	if (err != IXGBE_SUCCESS)
   4994 		return (0);
   4995 
   4996 	speed =
   4997 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x04 : 0) |
   4998 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x02 : 0) |
   4999 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)   ? 0x01 : 0) |
   5000 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)    ? 0x08 : 0) |
   5001 	    ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
   5002 	    ((link_caps & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0);
   5003 
   5004 	return speed;
   5005 } /* ixgbe_get_advertise */
   5006 
   5007 /************************************************************************
   5008  * ixgbe_sysctl_dmac - Manage DMA Coalescing
   5009  *
   5010  *   Control values:
   5011  *     0/1 - off / on (use default value of 1000)
   5012  *
   5013  *     Legal timer values are:
   5014  *     50,100,250,500,1000,2000,5000,10000
   5015  *
   5016  *     Turning off interrupt moderation will also turn this off.
   5017  ************************************************************************/
   5018 static int
   5019 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
   5020 {
   5021 	struct sysctlnode node = *rnode;
   5022 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5023 	struct ifnet   *ifp = adapter->ifp;
   5024 	int            error;
   5025 	int            newval;
   5026 
   5027 	newval = adapter->dmac;
   5028 	node.sysctl_data = &newval;
   5029 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5030 	if ((error) || (newp == NULL))
   5031 		return (error);
   5032 
   5033 	switch (newval) {
   5034 	case 0:
   5035 		/* Disabled */
   5036 		adapter->dmac = 0;
   5037 		break;
   5038 	case 1:
   5039 		/* Enable and use default */
   5040 		adapter->dmac = 1000;
   5041 		break;
   5042 	case 50:
   5043 	case 100:
   5044 	case 250:
   5045 	case 500:
   5046 	case 1000:
   5047 	case 2000:
   5048 	case 5000:
   5049 	case 10000:
   5050 		/* Legal values - allow */
   5051 		adapter->dmac = newval;
   5052 		break;
   5053 	default:
   5054 		/* Do nothing, illegal value */
   5055 		return (EINVAL);
   5056 	}
   5057 
   5058 	/* Re-initialize hardware if it's already running */
   5059 	if (ifp->if_flags & IFF_RUNNING)
   5060 		ixgbe_init(ifp);
   5061 
   5062 	return (0);
   5063 }
   5064 
   5065 #ifdef IXGBE_DEBUG
   5066 /************************************************************************
   5067  * ixgbe_sysctl_power_state
   5068  *
   5069  *   Sysctl to test power states
   5070  *   Values:
   5071  *     0      - set device to D0
   5072  *     3      - set device to D3
   5073  *     (none) - get current device power state
   5074  ************************************************************************/
   5075 static int
   5076 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
   5077 {
   5078 #ifdef notyet
   5079 	struct sysctlnode node = *rnode;
   5080 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5081 	device_t       dev =  adapter->dev;
   5082 	int            curr_ps, new_ps, error = 0;
   5083 
   5084 	curr_ps = new_ps = pci_get_powerstate(dev);
   5085 
   5086 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5087 	if ((error) || (req->newp == NULL))
   5088 		return (error);
   5089 
   5090 	if (new_ps == curr_ps)
   5091 		return (0);
   5092 
   5093 	if (new_ps == 3 && curr_ps == 0)
   5094 		error = DEVICE_SUSPEND(dev);
   5095 	else if (new_ps == 0 && curr_ps == 3)
   5096 		error = DEVICE_RESUME(dev);
   5097 	else
   5098 		return (EINVAL);
   5099 
   5100 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
   5101 
   5102 	return (error);
   5103 #else
   5104 	return 0;
   5105 #endif
   5106 } /* ixgbe_sysctl_power_state */
   5107 #endif
   5108 
   5109 /************************************************************************
   5110  * ixgbe_sysctl_wol_enable
   5111  *
   5112  *   Sysctl to enable/disable the WoL capability,
   5113  *   if supported by the adapter.
   5114  *
   5115  *   Values:
   5116  *     0 - disabled
   5117  *     1 - enabled
   5118  ************************************************************************/
   5119 static int
   5120 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
   5121 {
   5122 	struct sysctlnode node = *rnode;
   5123 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5124 	struct ixgbe_hw *hw = &adapter->hw;
   5125 	bool            new_wol_enabled;
   5126 	int             error = 0;
   5127 
   5128 	new_wol_enabled = hw->wol_enabled;
   5129 	node.sysctl_data = &new_wol_enabled;
   5130 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5131 	if ((error) || (newp == NULL))
   5132 		return (error);
   5133 	if (new_wol_enabled == hw->wol_enabled)
   5134 		return (0);
   5135 
   5136 	if (new_wol_enabled && !adapter->wol_support)
   5137 		return (ENODEV);
   5138 	else
   5139 		hw->wol_enabled = new_wol_enabled;
   5140 
   5141 	return (0);
   5142 } /* ixgbe_sysctl_wol_enable */
   5143 
   5144 /************************************************************************
   5145  * ixgbe_sysctl_wufc - Wake Up Filter Control
   5146  *
   5147  *   Sysctl to enable/disable the types of packets that the
   5148  *   adapter will wake up on upon receipt.
   5149  *   Flags:
   5150  *     0x1  - Link Status Change
   5151  *     0x2  - Magic Packet
   5152  *     0x4  - Direct Exact
   5153  *     0x8  - Directed Multicast
   5154  *     0x10 - Broadcast
   5155  *     0x20 - ARP/IPv4 Request Packet
   5156  *     0x40 - Direct IPv4 Packet
   5157  *     0x80 - Direct IPv6 Packet
   5158  *
   5159  *   Settings not listed above will cause the sysctl to return an error.
   5160  ************************************************************************/
   5161 static int
   5162 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
   5163 {
   5164 	struct sysctlnode node = *rnode;
   5165 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5166 	int error = 0;
   5167 	u32 new_wufc;
   5168 
   5169 	new_wufc = adapter->wufc;
   5170 	node.sysctl_data = &new_wufc;
   5171 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5172 	if ((error) || (newp == NULL))
   5173 		return (error);
   5174 	if (new_wufc == adapter->wufc)
   5175 		return (0);
   5176 
   5177 	if (new_wufc & 0xffffff00)
   5178 		return (EINVAL);
   5179 
   5180 	new_wufc &= 0xff;
   5181 	new_wufc |= (0xffffff & adapter->wufc);
   5182 	adapter->wufc = new_wufc;
   5183 
   5184 	return (0);
   5185 } /* ixgbe_sysctl_wufc */
   5186 
   5187 #ifdef IXGBE_DEBUG
   5188 /************************************************************************
   5189  * ixgbe_sysctl_print_rss_config
   5190  ************************************************************************/
   5191 static int
   5192 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
   5193 {
   5194 #ifdef notyet
   5195 	struct sysctlnode node = *rnode;
   5196 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5197 	struct ixgbe_hw *hw = &adapter->hw;
   5198 	device_t        dev = adapter->dev;
   5199 	struct sbuf     *buf;
   5200 	int             error = 0, reta_size;
   5201 	u32             reg;
   5202 
   5203 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
   5204 	if (!buf) {
   5205 		device_printf(dev, "Could not allocate sbuf for output.\n");
   5206 		return (ENOMEM);
   5207 	}
   5208 
   5209 	// TODO: use sbufs to make a string to print out
   5210 	/* Set multiplier for RETA setup and table size based on MAC */
   5211 	switch (adapter->hw.mac.type) {
   5212 	case ixgbe_mac_X550:
   5213 	case ixgbe_mac_X550EM_x:
   5214 	case ixgbe_mac_X550EM_a:
   5215 		reta_size = 128;
   5216 		break;
   5217 	default:
   5218 		reta_size = 32;
   5219 		break;
   5220 	}
   5221 
   5222 	/* Print out the redirection table */
   5223 	sbuf_cat(buf, "\n");
   5224 	for (int i = 0; i < reta_size; i++) {
   5225 		if (i < 32) {
   5226 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
   5227 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
   5228 		} else {
   5229 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
   5230 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
   5231 		}
   5232 	}
   5233 
   5234 	// TODO: print more config
   5235 
   5236 	error = sbuf_finish(buf);
   5237 	if (error)
   5238 		device_printf(dev, "Error finishing sbuf: %d\n", error);
   5239 
   5240 	sbuf_delete(buf);
   5241 #endif
   5242 	return (0);
   5243 } /* ixgbe_sysctl_print_rss_config */
   5244 #endif /* IXGBE_DEBUG */
   5245 
   5246 /************************************************************************
   5247  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
   5248  *
   5249  *   For X552/X557-AT devices using an external PHY
   5250  ************************************************************************/
   5251 static int
   5252 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
   5253 {
   5254 	struct sysctlnode node = *rnode;
   5255 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5256 	struct ixgbe_hw *hw = &adapter->hw;
   5257 	int val;
   5258 	u16 reg;
   5259 	int		error;
   5260 
   5261 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5262 		device_printf(adapter->dev,
   5263 		    "Device has no supported external thermal sensor.\n");
   5264 		return (ENODEV);
   5265 	}
   5266 
   5267 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
   5268 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5269 		device_printf(adapter->dev,
   5270 		    "Error reading from PHY's current temperature register\n");
   5271 		return (EAGAIN);
   5272 	}
   5273 
   5274 	node.sysctl_data = &val;
   5275 
   5276 	/* Shift temp for output */
   5277 	val = reg >> 8;
   5278 
   5279 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5280 	if ((error) || (newp == NULL))
   5281 		return (error);
   5282 
   5283 	return (0);
   5284 } /* ixgbe_sysctl_phy_temp */
   5285 
   5286 /************************************************************************
   5287  * ixgbe_sysctl_phy_overtemp_occurred
   5288  *
   5289  *   Reports (directly from the PHY) whether the current PHY
   5290  *   temperature is over the overtemp threshold.
   5291  ************************************************************************/
   5292 static int
   5293 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
   5294 {
   5295 	struct sysctlnode node = *rnode;
   5296 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5297 	struct ixgbe_hw *hw = &adapter->hw;
   5298 	int val, error;
   5299 	u16 reg;
   5300 
   5301 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5302 		device_printf(adapter->dev,
   5303 		    "Device has no supported external thermal sensor.\n");
   5304 		return (ENODEV);
   5305 	}
   5306 
   5307 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
   5308 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5309 		device_printf(adapter->dev,
   5310 		    "Error reading from PHY's temperature status register\n");
   5311 		return (EAGAIN);
   5312 	}
   5313 
   5314 	node.sysctl_data = &val;
   5315 
   5316 	/* Get occurrence bit */
   5317 	val = !!(reg & 0x4000);
   5318 
   5319 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5320 	if ((error) || (newp == NULL))
   5321 		return (error);
   5322 
   5323 	return (0);
   5324 } /* ixgbe_sysctl_phy_overtemp_occurred */
   5325 
   5326 /************************************************************************
   5327  * ixgbe_sysctl_eee_state
   5328  *
   5329  *   Sysctl to set EEE power saving feature
   5330  *   Values:
   5331  *     0      - disable EEE
   5332  *     1      - enable EEE
   5333  *     (none) - get current device EEE state
   5334  ************************************************************************/
   5335 static int
   5336 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
   5337 {
   5338 	struct sysctlnode node = *rnode;
   5339 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5340 	struct ifnet   *ifp = adapter->ifp;
   5341 	device_t       dev = adapter->dev;
   5342 	int            curr_eee, new_eee, error = 0;
   5343 	s32            retval;
   5344 
   5345 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
   5346 	node.sysctl_data = &new_eee;
   5347 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5348 	if ((error) || (newp == NULL))
   5349 		return (error);
   5350 
   5351 	/* Nothing to do */
   5352 	if (new_eee == curr_eee)
   5353 		return (0);
   5354 
   5355 	/* Not supported */
   5356 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
   5357 		return (EINVAL);
   5358 
   5359 	/* Bounds checking */
   5360 	if ((new_eee < 0) || (new_eee > 1))
   5361 		return (EINVAL);
   5362 
   5363 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
   5364 	if (retval) {
   5365 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
   5366 		return (EINVAL);
   5367 	}
   5368 
   5369 	/* Restart auto-neg */
   5370 	ixgbe_init(ifp);
   5371 
   5372 	device_printf(dev, "New EEE state: %d\n", new_eee);
   5373 
   5374 	/* Cache new value */
   5375 	if (new_eee)
   5376 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5377 	else
   5378 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
   5379 
   5380 	return (error);
   5381 } /* ixgbe_sysctl_eee_state */
   5382 
   5383 /************************************************************************
   5384  * ixgbe_init_device_features
   5385  ************************************************************************/
   5386 static void
   5387 ixgbe_init_device_features(struct adapter *adapter)
   5388 {
   5389 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   5390 	                  | IXGBE_FEATURE_RSS
   5391 	                  | IXGBE_FEATURE_MSI
   5392 	                  | IXGBE_FEATURE_MSIX
   5393 	                  | IXGBE_FEATURE_LEGACY_IRQ
   5394 	                  | IXGBE_FEATURE_LEGACY_TX;
   5395 
   5396 	/* Set capabilities first... */
   5397 	switch (adapter->hw.mac.type) {
   5398 	case ixgbe_mac_82598EB:
   5399 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
   5400 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
   5401 		break;
   5402 	case ixgbe_mac_X540:
   5403 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5404 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5405 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
   5406 		    (adapter->hw.bus.func == 0))
   5407 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5408 		break;
   5409 	case ixgbe_mac_X550:
   5410 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5411 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5412 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5413 		break;
   5414 	case ixgbe_mac_X550EM_x:
   5415 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5416 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5417 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
   5418 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5419 		break;
   5420 	case ixgbe_mac_X550EM_a:
   5421 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5422 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5423 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5424 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
   5425 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
   5426 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5427 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5428 		}
   5429 		break;
   5430 	case ixgbe_mac_82599EB:
   5431 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5432 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5433 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
   5434 		    (adapter->hw.bus.func == 0))
   5435 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5436 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
   5437 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5438 		break;
   5439 	default:
   5440 		break;
   5441 	}
   5442 
   5443 	/* Enabled by default... */
   5444 	/* Fan failure detection */
   5445 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
   5446 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
   5447 	/* Netmap */
   5448 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   5449 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   5450 	/* EEE */
   5451 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   5452 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5453 	/* Thermal Sensor */
   5454 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
   5455 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
   5456 
   5457 	/* Enabled via global sysctl... */
   5458 	/* Flow Director */
   5459 	if (ixgbe_enable_fdir) {
   5460 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
   5461 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
   5462 		else
   5463 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
   5464 	}
   5465 	/* Legacy (single queue) transmit */
   5466 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   5467 	    ixgbe_enable_legacy_tx)
   5468 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   5469 	/*
   5470 	 * Message Signal Interrupts - Extended (MSI-X)
   5471 	 * Normal MSI is only enabled if MSI-X calls fail.
   5472 	 */
   5473 	if (!ixgbe_enable_msix)
   5474 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
   5475 	/* Receive-Side Scaling (RSS) */
   5476 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
   5477 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   5478 
   5479 	/* Disable features with unmet dependencies... */
   5480 	/* No MSI-X */
   5481 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
   5482 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   5483 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   5484 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
   5485 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
   5486 	}
   5487 } /* ixgbe_init_device_features */
   5488 
   5489 /************************************************************************
   5490  * ixgbe_probe - Device identification routine
   5491  *
   5492  *   Determines if the driver should be loaded on
   5493  *   adapter based on its PCI vendor/device ID.
   5494  *
   5495  *   return BUS_PROBE_DEFAULT on success, positive on failure
   5496  ************************************************************************/
   5497 static int
   5498 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
   5499 {
   5500 	const struct pci_attach_args *pa = aux;
   5501 
   5502 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
   5503 }
   5504 
   5505 static ixgbe_vendor_info_t *
   5506 ixgbe_lookup(const struct pci_attach_args *pa)
   5507 {
   5508 	ixgbe_vendor_info_t *ent;
   5509 	pcireg_t subid;
   5510 
   5511 	INIT_DEBUGOUT("ixgbe_lookup: begin");
   5512 
   5513 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
   5514 		return NULL;
   5515 
   5516 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
   5517 
   5518 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
   5519 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
   5520 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
   5521 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
   5522 			(ent->subvendor_id == 0)) &&
   5523 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
   5524 			(ent->subdevice_id == 0))) {
   5525 			++ixgbe_total_ports;
   5526 			return ent;
   5527 		}
   5528 	}
   5529 	return NULL;
   5530 }
   5531 
   5532 static int
   5533 ixgbe_ifflags_cb(struct ethercom *ec)
   5534 {
   5535 	struct ifnet *ifp = &ec->ec_if;
   5536 	struct adapter *adapter = ifp->if_softc;
   5537 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   5538 
   5539 	IXGBE_CORE_LOCK(adapter);
   5540 
   5541 	if (change != 0)
   5542 		adapter->if_flags = ifp->if_flags;
   5543 
   5544 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   5545 		rc = ENETRESET;
   5546 	else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   5547 		ixgbe_set_promisc(adapter);
   5548 
   5549 	/* Set up VLAN support and filter */
   5550 	ixgbe_setup_vlan_hw_support(adapter);
   5551 
   5552 	IXGBE_CORE_UNLOCK(adapter);
   5553 
   5554 	return rc;
   5555 }
   5556 
   5557 /************************************************************************
   5558  * ixgbe_ioctl - Ioctl entry point
   5559  *
   5560  *   Called when the user wants to configure the interface.
   5561  *
   5562  *   return 0 on success, positive on failure
   5563  ************************************************************************/
   5564 static int
   5565 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   5566 {
   5567 	struct adapter	*adapter = ifp->if_softc;
   5568 	struct ixgbe_hw *hw = &adapter->hw;
   5569 	struct ifcapreq *ifcr = data;
   5570 	struct ifreq	*ifr = data;
   5571 	int             error = 0;
   5572 	int l4csum_en;
   5573 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   5574 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   5575 
   5576 	switch (command) {
   5577 	case SIOCSIFFLAGS:
   5578 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   5579 		break;
   5580 	case SIOCADDMULTI:
   5581 	case SIOCDELMULTI:
   5582 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   5583 		break;
   5584 	case SIOCSIFMEDIA:
   5585 	case SIOCGIFMEDIA:
   5586 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   5587 		break;
   5588 	case SIOCSIFCAP:
   5589 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   5590 		break;
   5591 	case SIOCSIFMTU:
   5592 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   5593 		break;
   5594 #ifdef __NetBSD__
   5595 	case SIOCINITIFADDR:
   5596 		IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
   5597 		break;
   5598 	case SIOCGIFFLAGS:
   5599 		IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
   5600 		break;
   5601 	case SIOCGIFAFLAG_IN:
   5602 		IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
   5603 		break;
   5604 	case SIOCGIFADDR:
   5605 		IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
   5606 		break;
   5607 	case SIOCGIFMTU:
   5608 		IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
   5609 		break;
   5610 	case SIOCGIFCAP:
   5611 		IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
   5612 		break;
   5613 	case SIOCGETHERCAP:
   5614 		IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
   5615 		break;
   5616 	case SIOCGLIFADDR:
   5617 		IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
   5618 		break;
   5619 	case SIOCZIFDATA:
   5620 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   5621 		hw->mac.ops.clear_hw_cntrs(hw);
   5622 		ixgbe_clear_evcnt(adapter);
   5623 		break;
   5624 	case SIOCAIFADDR:
   5625 		IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
   5626 		break;
   5627 #endif
   5628 	default:
   5629 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   5630 		break;
   5631 	}
   5632 
   5633 	switch (command) {
   5634 	case SIOCSIFMEDIA:
   5635 	case SIOCGIFMEDIA:
   5636 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   5637 	case SIOCGI2C:
   5638 	{
   5639 		struct ixgbe_i2c_req	i2c;
   5640 
   5641 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
   5642 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
   5643 		if (error != 0)
   5644 			break;
   5645 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
   5646 			error = EINVAL;
   5647 			break;
   5648 		}
   5649 		if (i2c.len > sizeof(i2c.data)) {
   5650 			error = EINVAL;
   5651 			break;
   5652 		}
   5653 
   5654 		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
   5655 		    i2c.dev_addr, i2c.data);
   5656 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
   5657 		break;
   5658 	}
   5659 	case SIOCSIFCAP:
   5660 		/* Layer-4 Rx checksum offload has to be turned on and
   5661 		 * off as a unit.
   5662 		 */
   5663 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   5664 		if (l4csum_en != l4csum && l4csum_en != 0)
   5665 			return EINVAL;
   5666 		/*FALLTHROUGH*/
   5667 	case SIOCADDMULTI:
   5668 	case SIOCDELMULTI:
   5669 	case SIOCSIFFLAGS:
   5670 	case SIOCSIFMTU:
   5671 	default:
   5672 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   5673 			return error;
   5674 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   5675 			;
   5676 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   5677 			IXGBE_CORE_LOCK(adapter);
   5678 			ixgbe_init_locked(adapter);
   5679 			ixgbe_recalculate_max_frame(adapter);
   5680 			IXGBE_CORE_UNLOCK(adapter);
   5681 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   5682 			/*
   5683 			 * Multicast list has changed; set the hardware filter
   5684 			 * accordingly.
   5685 			 */
   5686 			IXGBE_CORE_LOCK(adapter);
   5687 			ixgbe_disable_intr(adapter);
   5688 			ixgbe_set_multi(adapter);
   5689 			ixgbe_enable_intr(adapter);
   5690 			IXGBE_CORE_UNLOCK(adapter);
   5691 		}
   5692 		return 0;
   5693 	}
   5694 
   5695 	return error;
   5696 } /* ixgbe_ioctl */
   5697 
   5698 /************************************************************************
   5699  * ixgbe_check_fan_failure
   5700  ************************************************************************/
   5701 static void
   5702 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
   5703 {
   5704 	u32 mask;
   5705 
   5706 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
   5707 	    IXGBE_ESDP_SDP1;
   5708 
   5709 	if (reg & mask)
   5710 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
   5711 } /* ixgbe_check_fan_failure */
   5712 
   5713 /************************************************************************
   5714  * ixgbe_handle_que
   5715  ************************************************************************/
   5716 static void
   5717 ixgbe_handle_que(void *context)
   5718 {
   5719 	struct ix_queue *que = context;
   5720 	struct adapter  *adapter = que->adapter;
   5721 	struct tx_ring  *txr = que->txr;
   5722 	struct ifnet    *ifp = adapter->ifp;
   5723 	bool		more = false;
   5724 
   5725 	adapter->handleq.ev_count++;
   5726 
   5727 	if (ifp->if_flags & IFF_RUNNING) {
   5728 		more = ixgbe_rxeof(que);
   5729 		IXGBE_TX_LOCK(txr);
   5730 		ixgbe_txeof(txr);
   5731 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5732 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   5733 				ixgbe_mq_start_locked(ifp, txr);
   5734 		/* Only for queue 0 */
   5735 		/* NetBSD still needs this for CBQ */
   5736 		if ((&adapter->queues[0] == que)
   5737 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   5738 			ixgbe_legacy_start_locked(ifp, txr);
   5739 		IXGBE_TX_UNLOCK(txr);
   5740 	}
   5741 
   5742 	if (more)
   5743 		softint_schedule(que->que_si);
   5744 	else if (que->res != NULL) {
   5745 		/* Re-enable this interrupt */
   5746 		ixgbe_enable_queue(adapter, que->msix);
   5747 	} else
   5748 		ixgbe_enable_intr(adapter);
   5749 
   5750 	return;
   5751 } /* ixgbe_handle_que */
   5752 
   5753 /************************************************************************
   5754  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
   5755  ************************************************************************/
   5756 static int
   5757 ixgbe_allocate_legacy(struct adapter *adapter,
   5758     const struct pci_attach_args *pa)
   5759 {
   5760 	device_t	dev = adapter->dev;
   5761 	struct ix_queue *que = adapter->queues;
   5762 	struct tx_ring  *txr = adapter->tx_rings;
   5763 	int		counts[PCI_INTR_TYPE_SIZE];
   5764 	pci_intr_type_t intr_type, max_type;
   5765 	char            intrbuf[PCI_INTRSTR_LEN];
   5766 	const char	*intrstr = NULL;
   5767 
   5768 	/* We allocate a single interrupt resource */
   5769 	max_type = PCI_INTR_TYPE_MSI;
   5770 	counts[PCI_INTR_TYPE_MSIX] = 0;
   5771 	counts[PCI_INTR_TYPE_MSI] =
   5772 	    (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
   5773 	/* Check not feat_en but feat_cap to fallback to INTx */
   5774 	counts[PCI_INTR_TYPE_INTX] =
   5775 	    (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
   5776 
   5777 alloc_retry:
   5778 	if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
   5779 		aprint_error_dev(dev, "couldn't alloc interrupt\n");
   5780 		return ENXIO;
   5781 	}
   5782 	adapter->osdep.nintrs = 1;
   5783 	intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
   5784 	    intrbuf, sizeof(intrbuf));
   5785 	adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
   5786 	    adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
   5787 	    device_xname(dev));
   5788 	intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
   5789 	if (adapter->osdep.ihs[0] == NULL) {
   5790 		aprint_error_dev(dev,"unable to establish %s\n",
   5791 		    (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5792 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5793 		adapter->osdep.intrs = NULL;
   5794 		switch (intr_type) {
   5795 		case PCI_INTR_TYPE_MSI:
   5796 			/* The next try is for INTx: Disable MSI */
   5797 			max_type = PCI_INTR_TYPE_INTX;
   5798 			counts[PCI_INTR_TYPE_INTX] = 1;
   5799 			adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   5800 			if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
   5801 				adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   5802 				goto alloc_retry;
   5803 			} else
   5804 				break;
   5805 		case PCI_INTR_TYPE_INTX:
   5806 		default:
   5807 			/* See below */
   5808 			break;
   5809 		}
   5810 	}
   5811 	if (intr_type == PCI_INTR_TYPE_INTX) {
   5812 		adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   5813 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   5814 	}
   5815 	if (adapter->osdep.ihs[0] == NULL) {
   5816 		aprint_error_dev(dev,
   5817 		    "couldn't establish interrupt%s%s\n",
   5818 		    intrstr ? " at " : "", intrstr ? intrstr : "");
   5819 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5820 		adapter->osdep.intrs = NULL;
   5821 		return ENXIO;
   5822 	}
   5823 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   5824 	/*
   5825 	 * Try allocating a fast interrupt and the associated deferred
   5826 	 * processing contexts.
   5827 	 */
   5828 	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5829 		txr->txr_si =
   5830 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5831 			ixgbe_deferred_mq_start, txr);
   5832 	que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5833 	    ixgbe_handle_que, que);
   5834 
   5835 	if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
   5836 		& (txr->txr_si == NULL)) || (que->que_si == NULL)) {
   5837 		aprint_error_dev(dev,
   5838 		    "could not establish software interrupts\n");
   5839 
   5840 		return ENXIO;
   5841 	}
   5842 	/* For simplicity in the handlers */
   5843 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
   5844 
   5845 	return (0);
   5846 } /* ixgbe_allocate_legacy */
   5847 
   5848 
   5849 /************************************************************************
   5850  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
   5851  ************************************************************************/
   5852 static int
   5853 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   5854 {
   5855 	device_t        dev = adapter->dev;
   5856 	struct 		ix_queue *que = adapter->queues;
   5857 	struct  	tx_ring *txr = adapter->tx_rings;
   5858 	pci_chipset_tag_t pc;
   5859 	char		intrbuf[PCI_INTRSTR_LEN];
   5860 	char		intr_xname[32];
   5861 	const char	*intrstr = NULL;
   5862 	int 		error, vector = 0;
   5863 	int		cpu_id = 0;
   5864 	kcpuset_t	*affinity;
   5865 #ifdef RSS
   5866 	unsigned int    rss_buckets = 0;
   5867 	kcpuset_t	cpu_mask;
   5868 #endif
   5869 
   5870 	pc = adapter->osdep.pc;
   5871 #ifdef	RSS
   5872 	/*
   5873 	 * If we're doing RSS, the number of queues needs to
   5874 	 * match the number of RSS buckets that are configured.
   5875 	 *
   5876 	 * + If there's more queues than RSS buckets, we'll end
   5877 	 *   up with queues that get no traffic.
   5878 	 *
   5879 	 * + If there's more RSS buckets than queues, we'll end
   5880 	 *   up having multiple RSS buckets map to the same queue,
   5881 	 *   so there'll be some contention.
   5882 	 */
   5883 	rss_buckets = rss_getnumbuckets();
   5884 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
   5885 	    (adapter->num_queues != rss_buckets)) {
   5886 		device_printf(dev,
   5887 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
   5888 		    "; performance will be impacted.\n",
   5889 		    __func__, adapter->num_queues, rss_buckets);
   5890 	}
   5891 #endif
   5892 
   5893 	adapter->osdep.nintrs = adapter->num_queues + 1;
   5894 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   5895 	    adapter->osdep.nintrs) != 0) {
   5896 		aprint_error_dev(dev,
   5897 		    "failed to allocate MSI-X interrupt\n");
   5898 		return (ENXIO);
   5899 	}
   5900 
   5901 	kcpuset_create(&affinity, false);
   5902 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   5903 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   5904 		    device_xname(dev), i);
   5905 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   5906 		    sizeof(intrbuf));
   5907 #ifdef IXGBE_MPSAFE
   5908 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   5909 		    true);
   5910 #endif
   5911 		/* Set the handler function */
   5912 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   5913 		    adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
   5914 		    intr_xname);
   5915 		if (que->res == NULL) {
   5916 			aprint_error_dev(dev,
   5917 			    "Failed to register QUE handler\n");
   5918 			error = ENXIO;
   5919 			goto err_out;
   5920 		}
   5921 		que->msix = vector;
   5922 		adapter->active_queues |= (u64)(1 << que->msix);
   5923 
   5924 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   5925 #ifdef	RSS
   5926 			/*
   5927 			 * The queue ID is used as the RSS layer bucket ID.
   5928 			 * We look up the queue ID -> RSS CPU ID and select
   5929 			 * that.
   5930 			 */
   5931 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
   5932 			CPU_SETOF(cpu_id, &cpu_mask);
   5933 #endif
   5934 		} else {
   5935 			/*
   5936 			 * Bind the MSI-X vector, and thus the
   5937 			 * rings to the corresponding CPU.
   5938 			 *
   5939 			 * This just happens to match the default RSS
   5940 			 * round-robin bucket -> queue -> CPU allocation.
   5941 			 */
   5942 			if (adapter->num_queues > 1)
   5943 				cpu_id = i;
   5944 		}
   5945 		/* Round-robin affinity */
   5946 		kcpuset_zero(affinity);
   5947 		kcpuset_set(affinity, cpu_id % ncpu);
   5948 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   5949 		    NULL);
   5950 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   5951 		    intrstr);
   5952 		if (error == 0) {
   5953 #if 1 /* def IXGBE_DEBUG */
   5954 #ifdef	RSS
   5955 			aprintf_normal(", bound RSS bucket %d to CPU %d", i,
   5956 			    cpu_id % ncpu);
   5957 #else
   5958 			aprint_normal(", bound queue %d to cpu %d", i,
   5959 			    cpu_id % ncpu);
   5960 #endif
   5961 #endif /* IXGBE_DEBUG */
   5962 		}
   5963 		aprint_normal("\n");
   5964 
   5965 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   5966 			txr->txr_si = softint_establish(
   5967 				SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5968 				ixgbe_deferred_mq_start, txr);
   5969 			if (txr->txr_si == NULL) {
   5970 				aprint_error_dev(dev,
   5971 				    "couldn't establish software interrupt\n");
   5972 				error = ENXIO;
   5973 				goto err_out;
   5974 			}
   5975 		}
   5976 		que->que_si
   5977 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5978 			ixgbe_handle_que, que);
   5979 		if (que->que_si == NULL) {
   5980 			aprint_error_dev(dev,
   5981 			    "couldn't establish software interrupt\n");
   5982 			error = ENXIO;
   5983 			goto err_out;
   5984 		}
   5985 	}
   5986 
   5987 	/* and Link */
   5988 	cpu_id++;
   5989 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   5990 	adapter->vector = vector;
   5991 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   5992 	    sizeof(intrbuf));
   5993 #ifdef IXGBE_MPSAFE
   5994 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   5995 	    true);
   5996 #endif
   5997 	/* Set the link handler function */
   5998 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   5999 	    adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
   6000 	    intr_xname);
   6001 	if (adapter->osdep.ihs[vector] == NULL) {
   6002 		adapter->res = NULL;
   6003 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   6004 		error = ENXIO;
   6005 		goto err_out;
   6006 	}
   6007 	/* Round-robin affinity */
   6008 	kcpuset_zero(affinity);
   6009 	kcpuset_set(affinity, cpu_id % ncpu);
   6010 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
   6011 	    NULL);
   6012 
   6013 	aprint_normal_dev(dev,
   6014 	    "for link, interrupting at %s", intrstr);
   6015 	if (error == 0)
   6016 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   6017 	else
   6018 		aprint_normal("\n");
   6019 
   6020 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   6021 		adapter->mbx_si =
   6022 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6023 			ixgbe_handle_mbx, adapter);
   6024 		if (adapter->mbx_si == NULL) {
   6025 			aprint_error_dev(dev,
   6026 			    "could not establish software interrupts\n");
   6027 
   6028 			error = ENXIO;
   6029 			goto err_out;
   6030 		}
   6031 	}
   6032 
   6033 	kcpuset_destroy(affinity);
   6034 	aprint_normal_dev(dev,
   6035 	    "Using MSI-X interrupts with %d vectors\n", vector + 1);
   6036 
   6037 	return (0);
   6038 
   6039 err_out:
   6040 	kcpuset_destroy(affinity);
   6041 	ixgbe_free_softint(adapter);
   6042 	ixgbe_free_pciintr_resources(adapter);
   6043 	return (error);
   6044 } /* ixgbe_allocate_msix */
   6045 
   6046 /************************************************************************
   6047  * ixgbe_configure_interrupts
   6048  *
   6049  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
   6050  *   This will also depend on user settings.
   6051  ************************************************************************/
   6052 static int
   6053 ixgbe_configure_interrupts(struct adapter *adapter)
   6054 {
   6055 	device_t dev = adapter->dev;
   6056 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
   6057 	int want, queues, msgs;
   6058 
   6059 	/* Default to 1 queue if MSI-X setup fails */
   6060 	adapter->num_queues = 1;
   6061 
   6062 	/* Override by tuneable */
   6063 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
   6064 		goto msi;
   6065 
   6066 	/*
   6067 	 *  NetBSD only: Use single vector MSI when number of CPU is 1 to save
   6068 	 * interrupt slot.
   6069 	 */
   6070 	if (ncpu == 1)
   6071 		goto msi;
   6072 
   6073 	/* First try MSI-X */
   6074 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   6075 	msgs = MIN(msgs, IXG_MAX_NINTR);
   6076 	if (msgs < 2)
   6077 		goto msi;
   6078 
   6079 	adapter->msix_mem = (void *)1; /* XXX */
   6080 
   6081 	/* Figure out a reasonable auto config value */
   6082 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   6083 
   6084 #ifdef	RSS
   6085 	/* If we're doing RSS, clamp at the number of RSS buckets */
   6086 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   6087 		queues = min(queues, rss_getnumbuckets());
   6088 #endif
   6089 	if (ixgbe_num_queues > queues) {
   6090 		aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
   6091 		ixgbe_num_queues = queues;
   6092 	}
   6093 
   6094 	if (ixgbe_num_queues != 0)
   6095 		queues = ixgbe_num_queues;
   6096 	else
   6097 		queues = min(queues,
   6098 		    min(mac->max_tx_queues, mac->max_rx_queues));
   6099 
   6100 	/* reflect correct sysctl value */
   6101 	ixgbe_num_queues = queues;
   6102 
   6103 	/*
   6104 	 * Want one vector (RX/TX pair) per queue
   6105 	 * plus an additional for Link.
   6106 	 */
   6107 	want = queues + 1;
   6108 	if (msgs >= want)
   6109 		msgs = want;
   6110 	else {
   6111                	aprint_error_dev(dev, "MSI-X Configuration Problem, "
   6112 		    "%d vectors but %d queues wanted!\n",
   6113 		    msgs, want);
   6114 		goto msi;
   6115 	}
   6116 	adapter->num_queues = queues;
   6117 	adapter->feat_en |= IXGBE_FEATURE_MSIX;
   6118 	return (0);
   6119 
   6120 	/*
   6121 	 * MSI-X allocation failed or provided us with
   6122 	 * less vectors than needed. Free MSI-X resources
   6123 	 * and we'll try enabling MSI.
   6124 	 */
   6125 msi:
   6126 	/* Without MSI-X, some features are no longer supported */
   6127 	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6128 	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
   6129 	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6130 	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
   6131 
   6132        	msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
   6133 	adapter->msix_mem = NULL; /* XXX */
   6134 	if (msgs > 1)
   6135 		msgs = 1;
   6136 	if (msgs != 0) {
   6137 		msgs = 1;
   6138 		adapter->feat_en |= IXGBE_FEATURE_MSI;
   6139 		return (0);
   6140 	}
   6141 
   6142 	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
   6143 		aprint_error_dev(dev,
   6144 		    "Device does not support legacy interrupts.\n");
   6145 		return 1;
   6146 	}
   6147 
   6148 	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6149 
   6150 	return (0);
   6151 } /* ixgbe_configure_interrupts */
   6152 
   6153 
   6154 /************************************************************************
   6155  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
   6156  *
   6157  *   Done outside of interrupt context since the driver might sleep
   6158  ************************************************************************/
   6159 static void
   6160 ixgbe_handle_link(void *context)
   6161 {
   6162 	struct adapter  *adapter = context;
   6163 	struct ixgbe_hw *hw = &adapter->hw;
   6164 
   6165 	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
   6166 	ixgbe_update_link_status(adapter);
   6167 
   6168 	/* Re-enable link interrupts */
   6169 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
   6170 } /* ixgbe_handle_link */
   6171 
   6172 /************************************************************************
   6173  * ixgbe_rearm_queues
   6174  ************************************************************************/
   6175 static void
   6176 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   6177 {
   6178 	u32 mask;
   6179 
   6180 	switch (adapter->hw.mac.type) {
   6181 	case ixgbe_mac_82598EB:
   6182 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   6183 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   6184 		break;
   6185 	case ixgbe_mac_82599EB:
   6186 	case ixgbe_mac_X540:
   6187 	case ixgbe_mac_X550:
   6188 	case ixgbe_mac_X550EM_x:
   6189 	case ixgbe_mac_X550EM_a:
   6190 		mask = (queues & 0xFFFFFFFF);
   6191 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   6192 		mask = (queues >> 32);
   6193 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   6194 		break;
   6195 	default:
   6196 		break;
   6197 	}
   6198 } /* ixgbe_rearm_queues */
   6199