Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.127
      1 /* $NetBSD: ixgbe.c,v 1.127 2018/02/26 04:19:00 knakahara Exp $ */
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 320916 2017-07-12 17:35:32Z sbruno $*/
     36 
     37 /*
     38  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     39  * All rights reserved.
     40  *
     41  * This code is derived from software contributed to The NetBSD Foundation
     42  * by Coyote Point Systems, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63  * POSSIBILITY OF SUCH DAMAGE.
     64  */
     65 
     66 #ifdef _KERNEL_OPT
     67 #include "opt_inet.h"
     68 #include "opt_inet6.h"
     69 #include "opt_net_mpsafe.h"
     70 #endif
     71 
     72 #include "ixgbe.h"
     73 #include "vlan.h"
     74 
     75 #include <sys/cprng.h>
     76 #include <dev/mii/mii.h>
     77 #include <dev/mii/miivar.h>
     78 
     79 /************************************************************************
     80  * Driver version
     81  ************************************************************************/
     82 char ixgbe_driver_version[] = "3.2.12-k";
     83 
     84 
     85 /************************************************************************
     86  * PCI Device ID Table
     87  *
     88  *   Used by probe to select devices to load on
     89  *   Last field stores an index into ixgbe_strings
     90  *   Last entry must be all 0s
     91  *
     92  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     93  ************************************************************************/
     94 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     95 {
     96 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    111 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    112 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    113 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    114 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    115 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
    116 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    117 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
    118 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
    119 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
    120 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
    121 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
    122 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
    123 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
    124 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
    125 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
    126 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
    127 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
    128 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
    129 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
    130 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
    131 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
    132 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
    133 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
    134 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
    135 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
    136 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
    137 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
    138 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
    139 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
    140 	/* required last entry */
    141 	{0, 0, 0, 0, 0}
    142 };
    143 
    144 /************************************************************************
    145  * Table of branding strings
    146  ************************************************************************/
    147 static const char    *ixgbe_strings[] = {
    148 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    149 };
    150 
    151 /************************************************************************
    152  * Function prototypes
    153  ************************************************************************/
    154 static int      ixgbe_probe(device_t, cfdata_t, void *);
    155 static void     ixgbe_attach(device_t, device_t, void *);
    156 static int      ixgbe_detach(device_t, int);
    157 #if 0
    158 static int      ixgbe_shutdown(device_t);
    159 #endif
    160 static bool	ixgbe_suspend(device_t, const pmf_qual_t *);
    161 static bool	ixgbe_resume(device_t, const pmf_qual_t *);
    162 static int	ixgbe_ifflags_cb(struct ethercom *);
    163 static int      ixgbe_ioctl(struct ifnet *, u_long, void *);
    164 static void	ixgbe_ifstop(struct ifnet *, int);
    165 static int	ixgbe_init(struct ifnet *);
    166 static void	ixgbe_init_locked(struct adapter *);
    167 static void     ixgbe_stop(void *);
    168 static void     ixgbe_init_device_features(struct adapter *);
    169 static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
    170 static void	ixgbe_add_media_types(struct adapter *);
    171 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    172 static int      ixgbe_media_change(struct ifnet *);
    173 static int      ixgbe_allocate_pci_resources(struct adapter *,
    174 		    const struct pci_attach_args *);
    175 static void      ixgbe_free_softint(struct adapter *);
    176 static void	ixgbe_get_slot_info(struct adapter *);
    177 static int      ixgbe_allocate_msix(struct adapter *,
    178 		    const struct pci_attach_args *);
    179 static int      ixgbe_allocate_legacy(struct adapter *,
    180 		    const struct pci_attach_args *);
    181 static int      ixgbe_configure_interrupts(struct adapter *);
    182 static void	ixgbe_free_pciintr_resources(struct adapter *);
    183 static void	ixgbe_free_pci_resources(struct adapter *);
    184 static void	ixgbe_local_timer(void *);
    185 static void	ixgbe_local_timer1(void *);
    186 static int	ixgbe_setup_interface(device_t, struct adapter *);
    187 static void	ixgbe_config_gpie(struct adapter *);
    188 static void	ixgbe_config_dmac(struct adapter *);
    189 static void	ixgbe_config_delay_values(struct adapter *);
    190 static void	ixgbe_config_link(struct adapter *);
    191 static void	ixgbe_check_wol_support(struct adapter *);
    192 static int	ixgbe_setup_low_power_mode(struct adapter *);
    193 static void	ixgbe_rearm_queues(struct adapter *, u64);
    194 
    195 static void     ixgbe_initialize_transmit_units(struct adapter *);
    196 static void     ixgbe_initialize_receive_units(struct adapter *);
    197 static void	ixgbe_enable_rx_drop(struct adapter *);
    198 static void	ixgbe_disable_rx_drop(struct adapter *);
    199 static void	ixgbe_initialize_rss_mapping(struct adapter *);
    200 
    201 static void     ixgbe_enable_intr(struct adapter *);
    202 static void     ixgbe_disable_intr(struct adapter *);
    203 static void     ixgbe_update_stats_counters(struct adapter *);
    204 static void     ixgbe_set_promisc(struct adapter *);
    205 static void     ixgbe_set_multi(struct adapter *);
    206 static void     ixgbe_update_link_status(struct adapter *);
    207 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    208 static void	ixgbe_configure_ivars(struct adapter *);
    209 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    210 static void	ixgbe_eitr_write(struct ix_queue *, uint32_t);
    211 
    212 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    213 #if 0
    214 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
    215 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
    216 #endif
    217 
    218 static void	ixgbe_add_device_sysctls(struct adapter *);
    219 static void     ixgbe_add_hw_stats(struct adapter *);
    220 static void	ixgbe_clear_evcnt(struct adapter *);
    221 static int	ixgbe_set_flowcntl(struct adapter *, int);
    222 static int	ixgbe_set_advertise(struct adapter *, int);
    223 static int      ixgbe_get_advertise(struct adapter *);
    224 
    225 /* Sysctl handlers */
    226 static void	ixgbe_set_sysctl_value(struct adapter *, const char *,
    227 		     const char *, int *, int);
    228 static int	ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
    229 static int	ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
    230 static int      ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    231 static int	ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
    232 static int	ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
    233 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
    234 #ifdef IXGBE_DEBUG
    235 static int	ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
    236 static int	ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
    237 #endif
    238 static int      ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
    239 static int      ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
    240 static int      ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
    241 static int      ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
    242 static int      ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
    243 static int	ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
    244 static int	ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
    245 
    246 /* Support for pluggable optic modules */
    247 static bool	ixgbe_sfp_probe(struct adapter *);
    248 
    249 /* Legacy (single vector) interrupt handler */
    250 static int	ixgbe_legacy_irq(void *);
    251 
    252 /* The MSI/MSI-X Interrupt handlers */
    253 static int	ixgbe_msix_que(void *);
    254 static int	ixgbe_msix_link(void *);
    255 
    256 /* Software interrupts for deferred work */
    257 static void	ixgbe_handle_que(void *);
    258 static void	ixgbe_handle_link(void *);
    259 static void	ixgbe_handle_msf(void *);
    260 static void	ixgbe_handle_mod(void *);
    261 static void	ixgbe_handle_phy(void *);
    262 
    263 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    264 
    265 /************************************************************************
    266  *  NetBSD Device Interface Entry Points
    267  ************************************************************************/
    268 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    269     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    270     DVF_DETACH_SHUTDOWN);
    271 
    272 #if 0
    273 devclass_t ix_devclass;
    274 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
    275 
    276 MODULE_DEPEND(ix, pci, 1, 1, 1);
    277 MODULE_DEPEND(ix, ether, 1, 1, 1);
    278 #ifdef DEV_NETMAP
    279 MODULE_DEPEND(ix, netmap, 1, 1, 1);
    280 #endif
    281 #endif
    282 
    283 /*
    284  * TUNEABLE PARAMETERS:
    285  */
    286 
    287 /*
    288  * AIM: Adaptive Interrupt Moderation
    289  * which means that the interrupt rate
    290  * is varied over time based on the
    291  * traffic for that interrupt vector
    292  */
    293 static bool ixgbe_enable_aim = true;
    294 #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
    295 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
    296     "Enable adaptive interrupt moderation");
    297 
    298 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    299 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
    300     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
    301 
    302 /* How many packets rxeof tries to clean at a time */
    303 static int ixgbe_rx_process_limit = 256;
    304 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
    305     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
    306 
    307 /* How many packets txeof tries to clean at a time */
    308 static int ixgbe_tx_process_limit = 256;
    309 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
    310     &ixgbe_tx_process_limit, 0,
    311     "Maximum number of sent packets to process at a time, -1 means unlimited");
    312 
    313 /* Flow control setting, default to full */
    314 static int ixgbe_flow_control = ixgbe_fc_full;
    315 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
    316     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
    317 
    318 /*
    319  * Smart speed setting, default to on
    320  * this only works as a compile option
    321  * right now as its during attach, set
    322  * this to 'ixgbe_smart_speed_off' to
    323  * disable.
    324  */
    325 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    326 
    327 /*
    328  * MSI-X should be the default for best performance,
    329  * but this allows it to be forced off for testing.
    330  */
    331 static int ixgbe_enable_msix = 1;
    332 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
    333     "Enable MSI-X interrupts");
    334 
    335 /*
    336  * Number of Queues, can be set to 0,
    337  * it then autoconfigures based on the
    338  * number of cpus with a max of 8. This
    339  * can be overriden manually here.
    340  */
    341 static int ixgbe_num_queues = 0;
    342 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
    343     "Number of queues to configure, 0 indicates autoconfigure");
    344 
    345 /*
    346  * Number of TX descriptors per ring,
    347  * setting higher than RX as this seems
    348  * the better performing choice.
    349  */
    350 static int ixgbe_txd = PERFORM_TXD;
    351 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
    352     "Number of transmit descriptors per queue");
    353 
    354 /* Number of RX descriptors per ring */
    355 static int ixgbe_rxd = PERFORM_RXD;
    356 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
    357     "Number of receive descriptors per queue");
    358 
    359 /*
    360  * Defining this on will allow the use
    361  * of unsupported SFP+ modules, note that
    362  * doing so you are on your own :)
    363  */
    364 static int allow_unsupported_sfp = false;
    365 #define TUNABLE_INT(__x, __y)
    366 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
    367 
    368 /*
    369  * Not sure if Flow Director is fully baked,
    370  * so we'll default to turning it off.
    371  */
    372 static int ixgbe_enable_fdir = 0;
    373 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
    374     "Enable Flow Director");
    375 
    376 /* Legacy Transmit (single queue) */
    377 static int ixgbe_enable_legacy_tx = 0;
    378 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
    379     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
    380 
    381 /* Receive-Side Scaling */
    382 static int ixgbe_enable_rss = 1;
    383 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
    384     "Enable Receive-Side Scaling (RSS)");
    385 
    386 /* Keep running tab on them for sanity check */
    387 static int ixgbe_total_ports;
    388 
    389 #if 0
    390 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
    391 static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
    392 #endif
    393 
    394 #ifdef NET_MPSAFE
    395 #define IXGBE_MPSAFE		1
    396 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    397 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    398 #else
    399 #define IXGBE_CALLOUT_FLAGS	0
    400 #define IXGBE_SOFTINFT_FLAGS	0
    401 #endif
    402 
    403 /************************************************************************
    404  * ixgbe_initialize_rss_mapping
    405  ************************************************************************/
    406 static void
    407 ixgbe_initialize_rss_mapping(struct adapter *adapter)
    408 {
    409 	struct ixgbe_hw	*hw = &adapter->hw;
    410 	u32             reta = 0, mrqc, rss_key[10];
    411 	int             queue_id, table_size, index_mult;
    412 	int             i, j;
    413 	u32             rss_hash_config;
    414 
    415 	/* force use default RSS key. */
    416 #ifdef __NetBSD__
    417 	rss_getkey((uint8_t *) &rss_key);
    418 #else
    419 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    420 		/* Fetch the configured RSS key */
    421 		rss_getkey((uint8_t *) &rss_key);
    422 	} else {
    423 		/* set up random bits */
    424 		cprng_fast(&rss_key, sizeof(rss_key));
    425 	}
    426 #endif
    427 
    428 	/* Set multiplier for RETA setup and table size based on MAC */
    429 	index_mult = 0x1;
    430 	table_size = 128;
    431 	switch (adapter->hw.mac.type) {
    432 	case ixgbe_mac_82598EB:
    433 		index_mult = 0x11;
    434 		break;
    435 	case ixgbe_mac_X550:
    436 	case ixgbe_mac_X550EM_x:
    437 	case ixgbe_mac_X550EM_a:
    438 		table_size = 512;
    439 		break;
    440 	default:
    441 		break;
    442 	}
    443 
    444 	/* Set up the redirection table */
    445 	for (i = 0, j = 0; i < table_size; i++, j++) {
    446 		if (j == adapter->num_queues)
    447 			j = 0;
    448 
    449 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
    450 			/*
    451 			 * Fetch the RSS bucket id for the given indirection
    452 			 * entry. Cap it at the number of configured buckets
    453 			 * (which is num_queues.)
    454 			 */
    455 			queue_id = rss_get_indirection_to_bucket(i);
    456 			queue_id = queue_id % adapter->num_queues;
    457 		} else
    458 			queue_id = (j * index_mult);
    459 
    460 		/*
    461 		 * The low 8 bits are for hash value (n+0);
    462 		 * The next 8 bits are for hash value (n+1), etc.
    463 		 */
    464 		reta = reta >> 8;
    465 		reta = reta | (((uint32_t) queue_id) << 24);
    466 		if ((i & 3) == 3) {
    467 			if (i < 128)
    468 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
    469 			else
    470 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
    471 				    reta);
    472 			reta = 0;
    473 		}
    474 	}
    475 
    476 	/* Now fill our hash function seeds */
    477 	for (i = 0; i < 10; i++)
    478 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
    479 
    480 	/* Perform hash on these packet types */
    481 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
    482 		rss_hash_config = rss_gethashconfig();
    483 	else {
    484 		/*
    485 		 * Disable UDP - IP fragments aren't currently being handled
    486 		 * and so we end up with a mix of 2-tuple and 4-tuple
    487 		 * traffic.
    488 		 */
    489 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
    490 		                | RSS_HASHTYPE_RSS_TCP_IPV4
    491 		                | RSS_HASHTYPE_RSS_IPV6
    492 		                | RSS_HASHTYPE_RSS_TCP_IPV6
    493 		                | RSS_HASHTYPE_RSS_IPV6_EX
    494 		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
    495 	}
    496 
    497 	mrqc = IXGBE_MRQC_RSSEN;
    498 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
    499 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
    500 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
    501 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
    502 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
    503 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
    504 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
    505 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
    506 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
    507 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
    508 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
    509 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
    510 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
    511 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
    512 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
    513 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
    514 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
    515 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
    516 	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
    517 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
    518 } /* ixgbe_initialize_rss_mapping */
    519 
    520 /************************************************************************
    521  * ixgbe_initialize_receive_units - Setup receive registers and features.
    522  ************************************************************************/
    523 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
    524 
    525 static void
    526 ixgbe_initialize_receive_units(struct adapter *adapter)
    527 {
    528 	struct	rx_ring	*rxr = adapter->rx_rings;
    529 	struct ixgbe_hw	*hw = &adapter->hw;
    530 	struct ifnet    *ifp = adapter->ifp;
    531 	int             i, j;
    532 	u32		bufsz, fctrl, srrctl, rxcsum;
    533 	u32		hlreg;
    534 
    535 	/*
    536 	 * Make sure receives are disabled while
    537 	 * setting up the descriptor ring
    538 	 */
    539 	ixgbe_disable_rx(hw);
    540 
    541 	/* Enable broadcasts */
    542 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
    543 	fctrl |= IXGBE_FCTRL_BAM;
    544 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
    545 		fctrl |= IXGBE_FCTRL_DPF;
    546 		fctrl |= IXGBE_FCTRL_PMCF;
    547 	}
    548 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
    549 
    550 	/* Set for Jumbo Frames? */
    551 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
    552 	if (ifp->if_mtu > ETHERMTU)
    553 		hlreg |= IXGBE_HLREG0_JUMBOEN;
    554 	else
    555 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
    556 
    557 #ifdef DEV_NETMAP
    558 	/* CRC stripping is conditional in Netmap */
    559 	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
    560 	    (ifp->if_capenable & IFCAP_NETMAP) &&
    561 	    !ix_crcstrip)
    562 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
    563 	else
    564 #endif /* DEV_NETMAP */
    565 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
    566 
    567 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
    568 
    569 	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
    570 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
    571 
    572 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
    573 		u64 rdba = rxr->rxdma.dma_paddr;
    574 		u32 tqsmreg, reg;
    575 		int regnum = i / 4;	/* 1 register per 4 queues */
    576 		int regshift = i % 4;	/* 4 bits per 1 queue */
    577 		j = rxr->me;
    578 
    579 		/* Setup the Base and Length of the Rx Descriptor Ring */
    580 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
    581 		    (rdba & 0x00000000ffffffffULL));
    582 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
    583 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
    584 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
    585 
    586 		/* Set up the SRRCTL register */
    587 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
    588 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
    589 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
    590 		srrctl |= bufsz;
    591 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
    592 
    593 		/* Set RQSMR (Receive Queue Statistic Mapping) register */
    594 		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
    595 		reg &= ~(0x000000ff << (regshift * 8));
    596 		reg |= i << (regshift * 8);
    597 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
    598 
    599 		/*
    600 		 * Set RQSMR (Receive Queue Statistic Mapping) register.
    601 		 * Register location for queue 0...7 are different between
    602 		 * 82598 and newer.
    603 		 */
    604 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
    605 			tqsmreg = IXGBE_TQSMR(regnum);
    606 		else
    607 			tqsmreg = IXGBE_TQSM(regnum);
    608 		reg = IXGBE_READ_REG(hw, tqsmreg);
    609 		reg &= ~(0x000000ff << (regshift * 8));
    610 		reg |= i << (regshift * 8);
    611 		IXGBE_WRITE_REG(hw, tqsmreg, reg);
    612 
    613 		/*
    614 		 * Set DROP_EN iff we have no flow control and >1 queue.
    615 		 * Note that srrctl was cleared shortly before during reset,
    616 		 * so we do not need to clear the bit, but do it just in case
    617 		 * this code is moved elsewhere.
    618 		 */
    619 		if (adapter->num_queues > 1 &&
    620 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
    621 			srrctl |= IXGBE_SRRCTL_DROP_EN;
    622 		} else {
    623 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
    624 		}
    625 
    626 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
    627 
    628 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
    629 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
    630 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
    631 
    632 		/* Set the driver rx tail address */
    633 		rxr->tail =  IXGBE_RDT(rxr->me);
    634 	}
    635 
    636 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
    637 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
    638 		            | IXGBE_PSRTYPE_UDPHDR
    639 		            | IXGBE_PSRTYPE_IPV4HDR
    640 		            | IXGBE_PSRTYPE_IPV6HDR;
    641 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
    642 	}
    643 
    644 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
    645 
    646 	ixgbe_initialize_rss_mapping(adapter);
    647 
    648 	if (adapter->num_queues > 1) {
    649 		/* RSS and RX IPP Checksum are mutually exclusive */
    650 		rxcsum |= IXGBE_RXCSUM_PCSD;
    651 	}
    652 
    653 	if (ifp->if_capenable & IFCAP_RXCSUM)
    654 		rxcsum |= IXGBE_RXCSUM_PCSD;
    655 
    656 	/* This is useful for calculating UDP/IP fragment checksums */
    657 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
    658 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
    659 
    660 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
    661 
    662 	return;
    663 } /* ixgbe_initialize_receive_units */
    664 
    665 /************************************************************************
    666  * ixgbe_initialize_transmit_units - Enable transmit units.
    667  ************************************************************************/
    668 static void
    669 ixgbe_initialize_transmit_units(struct adapter *adapter)
    670 {
    671 	struct tx_ring  *txr = adapter->tx_rings;
    672 	struct ixgbe_hw	*hw = &adapter->hw;
    673 
    674 	/* Setup the Base and Length of the Tx Descriptor Ring */
    675 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
    676 		u64 tdba = txr->txdma.dma_paddr;
    677 		u32 txctrl = 0;
    678 		int j = txr->me;
    679 
    680 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
    681 		    (tdba & 0x00000000ffffffffULL));
    682 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
    683 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
    684 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
    685 
    686 		/* Setup the HW Tx Head and Tail descriptor pointers */
    687 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
    688 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
    689 
    690 		/* Cache the tail address */
    691 		txr->tail = IXGBE_TDT(j);
    692 
    693 		/* Disable Head Writeback */
    694 		/*
    695 		 * Note: for X550 series devices, these registers are actually
    696 		 * prefixed with TPH_ isntead of DCA_, but the addresses and
    697 		 * fields remain the same.
    698 		 */
    699 		switch (hw->mac.type) {
    700 		case ixgbe_mac_82598EB:
    701 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
    702 			break;
    703 		default:
    704 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
    705 			break;
    706 		}
    707 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
    708 		switch (hw->mac.type) {
    709 		case ixgbe_mac_82598EB:
    710 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
    711 			break;
    712 		default:
    713 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
    714 			break;
    715 		}
    716 
    717 	}
    718 
    719 	if (hw->mac.type != ixgbe_mac_82598EB) {
    720 		u32 dmatxctl, rttdcs;
    721 
    722 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
    723 		dmatxctl |= IXGBE_DMATXCTL_TE;
    724 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
    725 		/* Disable arbiter to set MTQC */
    726 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
    727 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
    728 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    729 		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
    730 		    ixgbe_get_mtqc(adapter->iov_mode));
    731 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
    732 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
    733 	}
    734 
    735 	return;
    736 } /* ixgbe_initialize_transmit_units */
    737 
    738 /************************************************************************
    739  * ixgbe_attach - Device initialization routine
    740  *
    741  *   Called when the driver is being loaded.
    742  *   Identifies the type of hardware, allocates all resources
    743  *   and initializes the hardware.
    744  *
    745  *   return 0 on success, positive on failure
    746  ************************************************************************/
    747 static void
    748 ixgbe_attach(device_t parent, device_t dev, void *aux)
    749 {
    750 	struct adapter  *adapter;
    751 	struct ixgbe_hw *hw;
    752 	int             error = -1;
    753 	u32		ctrl_ext;
    754 	u16		high, low, nvmreg;
    755 	pcireg_t	id, subid;
    756 	ixgbe_vendor_info_t *ent;
    757 	struct pci_attach_args *pa = aux;
    758 	const char *str;
    759 	char buf[256];
    760 
    761 	INIT_DEBUGOUT("ixgbe_attach: begin");
    762 
    763 	/* Allocate, clear, and link in our adapter structure */
    764 	adapter = device_private(dev);
    765 	adapter->hw.back = adapter;
    766 	adapter->dev = dev;
    767 	hw = &adapter->hw;
    768 	adapter->osdep.pc = pa->pa_pc;
    769 	adapter->osdep.tag = pa->pa_tag;
    770 	if (pci_dma64_available(pa))
    771 		adapter->osdep.dmat = pa->pa_dmat64;
    772 	else
    773 		adapter->osdep.dmat = pa->pa_dmat;
    774 	adapter->osdep.attached = false;
    775 
    776 	ent = ixgbe_lookup(pa);
    777 
    778 	KASSERT(ent != NULL);
    779 
    780 	aprint_normal(": %s, Version - %s\n",
    781 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    782 
    783 	/* Core Lock Init*/
    784 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    785 
    786 	/* Set up the timer callout */
    787 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    788 
    789 	/* Determine hardware revision */
    790 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    791 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    792 
    793 	hw->vendor_id = PCI_VENDOR(id);
    794 	hw->device_id = PCI_PRODUCT(id);
    795 	hw->revision_id =
    796 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    797 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    798 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    799 
    800 	/*
    801 	 * Make sure BUSMASTER is set
    802 	 */
    803 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    804 
    805 	/* Do base PCI setup - map BAR0 */
    806 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    807 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    808 		error = ENXIO;
    809 		goto err_out;
    810 	}
    811 
    812 	/* let hardware know driver is loaded */
    813 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    814 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    815 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    816 
    817 	/*
    818 	 * Initialize the shared code
    819 	 */
    820 	if (ixgbe_init_shared_code(hw)) {
    821 		aprint_error_dev(dev, "Unable to initialize the shared code\n");
    822 		error = ENXIO;
    823 		goto err_out;
    824 	}
    825 
    826 	switch (hw->mac.type) {
    827 	case ixgbe_mac_82598EB:
    828 		str = "82598EB";
    829 		break;
    830 	case ixgbe_mac_82599EB:
    831 		str = "82599EB";
    832 		break;
    833 	case ixgbe_mac_X540:
    834 		str = "X540";
    835 		break;
    836 	case ixgbe_mac_X550:
    837 		str = "X550";
    838 		break;
    839 	case ixgbe_mac_X550EM_x:
    840 		str = "X550EM";
    841 		break;
    842 	case ixgbe_mac_X550EM_a:
    843 		str = "X550EM A";
    844 		break;
    845 	default:
    846 		str = "Unknown";
    847 		break;
    848 	}
    849 	aprint_normal_dev(dev, "device %s\n", str);
    850 
    851 	if (hw->mbx.ops.init_params)
    852 		hw->mbx.ops.init_params(hw);
    853 
    854 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
    855 
    856 	/* Pick up the 82599 settings */
    857 	if (hw->mac.type != ixgbe_mac_82598EB) {
    858 		hw->phy.smart_speed = ixgbe_smart_speed;
    859 		adapter->num_segs = IXGBE_82599_SCATTER;
    860 	} else
    861 		adapter->num_segs = IXGBE_82598_SCATTER;
    862 
    863 	hw->mac.ops.set_lan_id(hw);
    864 	ixgbe_init_device_features(adapter);
    865 
    866 	if (ixgbe_configure_interrupts(adapter)) {
    867 		error = ENXIO;
    868 		goto err_out;
    869 	}
    870 
    871 	/* Allocate multicast array memory. */
    872 	adapter->mta = malloc(sizeof(*adapter->mta) *
    873 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    874 	if (adapter->mta == NULL) {
    875 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    876 		error = ENOMEM;
    877 		goto err_out;
    878 	}
    879 
    880 	/* Enable WoL (if supported) */
    881 	ixgbe_check_wol_support(adapter);
    882 
    883 	/* Verify adapter fan is still functional (if applicable) */
    884 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
    885 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
    886 		ixgbe_check_fan_failure(adapter, esdp, FALSE);
    887 	}
    888 
    889 	/* Ensure SW/FW semaphore is free */
    890 	ixgbe_init_swfw_semaphore(hw);
    891 
    892 	/* Enable EEE power saving */
    893 	if (adapter->feat_en & IXGBE_FEATURE_EEE)
    894 		hw->mac.ops.setup_eee(hw, TRUE);
    895 
    896 	/* Set an initial default flow control value */
    897 	hw->fc.requested_mode = ixgbe_flow_control;
    898 
    899 	/* Sysctls for limiting the amount of work done in the taskqueues */
    900 	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
    901 	    "max number of rx packets to process",
    902 	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
    903 
    904 	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
    905 	    "max number of tx packets to process",
    906 	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
    907 
    908 	/* Do descriptor calc and sanity checks */
    909 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    910 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    911 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    912 		adapter->num_tx_desc = DEFAULT_TXD;
    913 	} else
    914 		adapter->num_tx_desc = ixgbe_txd;
    915 
    916 	/*
    917 	 * With many RX rings it is easy to exceed the
    918 	 * system mbuf allocation. Tuning nmbclusters
    919 	 * can alleviate this.
    920 	 */
    921 	if (nmbclusters > 0) {
    922 		int s;
    923 		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
    924 		if (s > nmbclusters) {
    925 			aprint_error_dev(dev, "RX Descriptors exceed "
    926 			    "system mbuf max, using default instead!\n");
    927 			ixgbe_rxd = DEFAULT_RXD;
    928 		}
    929 	}
    930 
    931 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    932 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
    933 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    934 		adapter->num_rx_desc = DEFAULT_RXD;
    935 	} else
    936 		adapter->num_rx_desc = ixgbe_rxd;
    937 
    938 	/* Allocate our TX/RX Queues */
    939 	if (ixgbe_allocate_queues(adapter)) {
    940 		error = ENOMEM;
    941 		goto err_out;
    942 	}
    943 
    944 	hw->phy.reset_if_overtemp = TRUE;
    945 	error = ixgbe_reset_hw(hw);
    946 	hw->phy.reset_if_overtemp = FALSE;
    947 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    948 		/*
    949 		 * No optics in this port, set up
    950 		 * so the timer routine will probe
    951 		 * for later insertion.
    952 		 */
    953 		adapter->sfp_probe = TRUE;
    954 		error = IXGBE_SUCCESS;
    955 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    956 		aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
    957 		error = EIO;
    958 		goto err_late;
    959 	} else if (error) {
    960 		aprint_error_dev(dev, "Hardware initialization failed\n");
    961 		error = EIO;
    962 		goto err_late;
    963 	}
    964 
    965 	/* Make sure we have a good EEPROM before we read from it */
    966 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
    967 		aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
    968 		error = EIO;
    969 		goto err_late;
    970 	}
    971 
    972 	aprint_normal("%s:", device_xname(dev));
    973 	/* NVM Image Version */
    974 	switch (hw->mac.type) {
    975 	case ixgbe_mac_X540:
    976 	case ixgbe_mac_X550EM_a:
    977 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    978 		if (nvmreg == 0xffff)
    979 			break;
    980 		high = (nvmreg >> 12) & 0x0f;
    981 		low = (nvmreg >> 4) & 0xff;
    982 		id = nvmreg & 0x0f;
    983 		aprint_normal(" NVM Image Version %u.", high);
    984 		if (hw->mac.type == ixgbe_mac_X540)
    985 			str = "%x";
    986 		else
    987 			str = "%02x";
    988 		aprint_normal(str, low);
    989 		aprint_normal(" ID 0x%x,", id);
    990 		break;
    991 	case ixgbe_mac_X550EM_x:
    992 	case ixgbe_mac_X550:
    993 		hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
    994 		if (nvmreg == 0xffff)
    995 			break;
    996 		high = (nvmreg >> 12) & 0x0f;
    997 		low = nvmreg & 0xff;
    998 		aprint_normal(" NVM Image Version %u.%02x,", high, low);
    999 		break;
   1000 	default:
   1001 		break;
   1002 	}
   1003 
   1004 	/* PHY firmware revision */
   1005 	switch (hw->mac.type) {
   1006 	case ixgbe_mac_X540:
   1007 	case ixgbe_mac_X550:
   1008 		hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
   1009 		if (nvmreg == 0xffff)
   1010 			break;
   1011 		high = (nvmreg >> 12) & 0x0f;
   1012 		low = (nvmreg >> 4) & 0xff;
   1013 		id = nvmreg & 0x000f;
   1014 		aprint_normal(" PHY FW Revision %u.", high);
   1015 		if (hw->mac.type == ixgbe_mac_X540)
   1016 			str = "%x";
   1017 		else
   1018 			str = "%02x";
   1019 		aprint_normal(str, low);
   1020 		aprint_normal(" ID 0x%x,", id);
   1021 		break;
   1022 	default:
   1023 		break;
   1024 	}
   1025 
   1026 	/* NVM Map version & OEM NVM Image version */
   1027 	switch (hw->mac.type) {
   1028 	case ixgbe_mac_X550:
   1029 	case ixgbe_mac_X550EM_x:
   1030 	case ixgbe_mac_X550EM_a:
   1031 		hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
   1032 		if (nvmreg != 0xffff) {
   1033 			high = (nvmreg >> 12) & 0x0f;
   1034 			low = nvmreg & 0x00ff;
   1035 			aprint_normal(" NVM Map version %u.%02x,", high, low);
   1036 		}
   1037 		hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
   1038 		if (nvmreg != 0xffff) {
   1039 			high = (nvmreg >> 12) & 0x0f;
   1040 			low = nvmreg & 0x00ff;
   1041 			aprint_verbose(" OEM NVM Image version %u.%02x,", high,
   1042 			    low);
   1043 		}
   1044 		break;
   1045 	default:
   1046 		break;
   1047 	}
   1048 
   1049 	/* Print the ETrackID */
   1050 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
   1051 	hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
   1052 	aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
   1053 
   1054 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   1055 		error = ixgbe_allocate_msix(adapter, pa);
   1056 		if (error) {
   1057 			/* Free allocated queue structures first */
   1058 			ixgbe_free_transmit_structures(adapter);
   1059 			ixgbe_free_receive_structures(adapter);
   1060 			free(adapter->queues, M_DEVBUF);
   1061 
   1062 			/* Fallback to legacy interrupt */
   1063 			adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
   1064 			if (adapter->feat_cap & IXGBE_FEATURE_MSI)
   1065 				adapter->feat_en |= IXGBE_FEATURE_MSI;
   1066 			adapter->num_queues = 1;
   1067 
   1068 			/* Allocate our TX/RX Queues again */
   1069 			if (ixgbe_allocate_queues(adapter)) {
   1070 				error = ENOMEM;
   1071 				goto err_out;
   1072 			}
   1073 		}
   1074 	}
   1075 	if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
   1076 		error = ixgbe_allocate_legacy(adapter, pa);
   1077 	if (error)
   1078 		goto err_late;
   1079 
   1080 	/* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
   1081 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   1082 	    ixgbe_handle_link, adapter);
   1083 	adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1084 	    ixgbe_handle_mod, adapter);
   1085 	adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1086 	    ixgbe_handle_msf, adapter);
   1087 	adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1088 	    ixgbe_handle_phy, adapter);
   1089 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   1090 		adapter->fdir_si =
   1091 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   1092 			ixgbe_reinit_fdir, adapter);
   1093 	if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
   1094 	    || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
   1095 	    || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
   1096 		&& (adapter->fdir_si == NULL))) {
   1097 		aprint_error_dev(dev,
   1098 		    "could not establish software interrupts ()\n");
   1099 		goto err_out;
   1100 	}
   1101 
   1102 	error = ixgbe_start_hw(hw);
   1103 	switch (error) {
   1104 	case IXGBE_ERR_EEPROM_VERSION:
   1105 		aprint_error_dev(dev, "This device is a pre-production adapter/"
   1106 		    "LOM.  Please be aware there may be issues associated "
   1107 		    "with your hardware.\nIf you are experiencing problems "
   1108 		    "please contact your Intel or hardware representative "
   1109 		    "who provided you with this hardware.\n");
   1110 		break;
   1111 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
   1112 		aprint_error_dev(dev, "Unsupported SFP+ Module\n");
   1113 		error = EIO;
   1114 		goto err_late;
   1115 	case IXGBE_ERR_SFP_NOT_PRESENT:
   1116 		aprint_error_dev(dev, "No SFP+ Module found\n");
   1117 		/* falls thru */
   1118 	default:
   1119 		break;
   1120 	}
   1121 
   1122 	/* Setup OS specific network interface */
   1123 	if (ixgbe_setup_interface(dev, adapter) != 0)
   1124 		goto err_late;
   1125 
   1126 	/*
   1127 	 *  Print PHY ID only for copper PHY. On device which has SFP(+) cage
   1128 	 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
   1129 	 */
   1130 	if (hw->phy.media_type == ixgbe_media_type_copper) {
   1131 		uint16_t id1, id2;
   1132 		int oui, model, rev;
   1133 		const char *descr;
   1134 
   1135 		id1 = hw->phy.id >> 16;
   1136 		id2 = hw->phy.id & 0xffff;
   1137 		oui = MII_OUI(id1, id2);
   1138 		model = MII_MODEL(id2);
   1139 		rev = MII_REV(id2);
   1140 		if ((descr = mii_get_descr(oui, model)) != NULL)
   1141 			aprint_normal_dev(dev,
   1142 			    "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
   1143 			    descr, oui, model, rev);
   1144 		else
   1145 			aprint_normal_dev(dev,
   1146 			    "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
   1147 			    oui, model, rev);
   1148 	}
   1149 
   1150 	/* Enable the optics for 82599 SFP+ fiber */
   1151 	ixgbe_enable_tx_laser(hw);
   1152 
   1153 	/* Enable power to the phy. */
   1154 	ixgbe_set_phy_power(hw, TRUE);
   1155 
   1156 	/* Initialize statistics */
   1157 	ixgbe_update_stats_counters(adapter);
   1158 
   1159 	/* Check PCIE slot type/speed/width */
   1160 	ixgbe_get_slot_info(adapter);
   1161 
   1162 	/*
   1163 	 * Do time init and sysctl init here, but
   1164 	 * only on the first port of a bypass adapter.
   1165 	 */
   1166 	ixgbe_bypass_init(adapter);
   1167 
   1168 	/* Set an initial dmac value */
   1169 	adapter->dmac = 0;
   1170 	/* Set initial advertised speeds (if applicable) */
   1171 	adapter->advertise = ixgbe_get_advertise(adapter);
   1172 
   1173 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   1174 		ixgbe_define_iov_schemas(dev, &error);
   1175 
   1176 	/* Add sysctls */
   1177 	ixgbe_add_device_sysctls(adapter);
   1178 	ixgbe_add_hw_stats(adapter);
   1179 
   1180 	/* For Netmap */
   1181 	adapter->init_locked = ixgbe_init_locked;
   1182 	adapter->stop_locked = ixgbe_stop;
   1183 
   1184 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   1185 		ixgbe_netmap_attach(adapter);
   1186 
   1187 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
   1188 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
   1189 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
   1190 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
   1191 
   1192 	if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
   1193 		pmf_class_network_register(dev, adapter->ifp);
   1194 	else
   1195 		aprint_error_dev(dev, "couldn't establish power handler\n");
   1196 
   1197 	INIT_DEBUGOUT("ixgbe_attach: end");
   1198 	adapter->osdep.attached = true;
   1199 
   1200 	return;
   1201 
   1202 err_late:
   1203 	ixgbe_free_transmit_structures(adapter);
   1204 	ixgbe_free_receive_structures(adapter);
   1205 	free(adapter->queues, M_DEVBUF);
   1206 err_out:
   1207 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   1208 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   1209 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   1210 	ixgbe_free_softint(adapter);
   1211 	ixgbe_free_pci_resources(adapter);
   1212 	if (adapter->mta != NULL)
   1213 		free(adapter->mta, M_DEVBUF);
   1214 	IXGBE_CORE_LOCK_DESTROY(adapter);
   1215 
   1216 	return;
   1217 } /* ixgbe_attach */
   1218 
   1219 /************************************************************************
   1220  * ixgbe_check_wol_support
   1221  *
   1222  *   Checks whether the adapter's ports are capable of
   1223  *   Wake On LAN by reading the adapter's NVM.
   1224  *
   1225  *   Sets each port's hw->wol_enabled value depending
   1226  *   on the value read here.
   1227  ************************************************************************/
   1228 static void
   1229 ixgbe_check_wol_support(struct adapter *adapter)
   1230 {
   1231 	struct ixgbe_hw *hw = &adapter->hw;
   1232 	u16             dev_caps = 0;
   1233 
   1234 	/* Find out WoL support for port */
   1235 	adapter->wol_support = hw->wol_enabled = 0;
   1236 	ixgbe_get_device_caps(hw, &dev_caps);
   1237 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
   1238 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
   1239 	     hw->bus.func == 0))
   1240 		adapter->wol_support = hw->wol_enabled = 1;
   1241 
   1242 	/* Save initial wake up filter configuration */
   1243 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
   1244 
   1245 	return;
   1246 } /* ixgbe_check_wol_support */
   1247 
   1248 /************************************************************************
   1249  * ixgbe_setup_interface
   1250  *
   1251  *   Setup networking device structure and register an interface.
   1252  ************************************************************************/
   1253 static int
   1254 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   1255 {
   1256 	struct ethercom *ec = &adapter->osdep.ec;
   1257 	struct ifnet   *ifp;
   1258 	int rv;
   1259 
   1260 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   1261 
   1262 	ifp = adapter->ifp = &ec->ec_if;
   1263 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1264 	ifp->if_baudrate = IF_Gbps(10);
   1265 	ifp->if_init = ixgbe_init;
   1266 	ifp->if_stop = ixgbe_ifstop;
   1267 	ifp->if_softc = adapter;
   1268 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1269 #ifdef IXGBE_MPSAFE
   1270 	ifp->if_extflags = IFEF_MPSAFE;
   1271 #endif
   1272 	ifp->if_ioctl = ixgbe_ioctl;
   1273 #if __FreeBSD_version >= 1100045
   1274 	/* TSO parameters */
   1275 	ifp->if_hw_tsomax = 65518;
   1276 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
   1277 	ifp->if_hw_tsomaxsegsize = 2048;
   1278 #endif
   1279 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1280 #if 0
   1281 		ixgbe_start_locked = ixgbe_legacy_start_locked;
   1282 #endif
   1283 	} else {
   1284 		ifp->if_transmit = ixgbe_mq_start;
   1285 #if 0
   1286 		ixgbe_start_locked = ixgbe_mq_start_locked;
   1287 #endif
   1288 	}
   1289 	ifp->if_start = ixgbe_legacy_start;
   1290 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1291 	IFQ_SET_READY(&ifp->if_snd);
   1292 
   1293 	rv = if_initialize(ifp);
   1294 	if (rv != 0) {
   1295 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1296 		return rv;
   1297 	}
   1298 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1299 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1300 	/*
   1301 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1302 	 * used.
   1303 	 */
   1304 	if_register(ifp);
   1305 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   1306 
   1307 	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1308 
   1309 	/*
   1310 	 * Tell the upper layer(s) we support long frames.
   1311 	 */
   1312 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1313 
   1314 	/* Set capability flags */
   1315 	ifp->if_capabilities |= IFCAP_RXCSUM
   1316 			     |  IFCAP_TXCSUM
   1317 			     |  IFCAP_TSOv4
   1318 			     |  IFCAP_TSOv6
   1319 			     |  IFCAP_LRO;
   1320 	ifp->if_capenable = 0;
   1321 
   1322 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1323 	    		    |  ETHERCAP_VLAN_HWCSUM
   1324 	    		    |  ETHERCAP_JUMBO_MTU
   1325 	    		    |  ETHERCAP_VLAN_MTU;
   1326 
   1327 	/* Enable the above capabilities by default */
   1328 	ec->ec_capenable = ec->ec_capabilities;
   1329 
   1330 	/*
   1331 	 * Don't turn this on by default, if vlans are
   1332 	 * created on another pseudo device (eg. lagg)
   1333 	 * then vlan events are not passed thru, breaking
   1334 	 * operation, but with HW FILTER off it works. If
   1335 	 * using vlans directly on the ixgbe driver you can
   1336 	 * enable this and get full hardware tag filtering.
   1337 	 */
   1338 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   1339 
   1340 	/*
   1341 	 * Specify the media types supported by this adapter and register
   1342 	 * callbacks to update media and link information
   1343 	 */
   1344 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   1345 	    ixgbe_media_status);
   1346 
   1347 	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
   1348 	ixgbe_add_media_types(adapter);
   1349 
   1350 	/* Set autoselect media by default */
   1351 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1352 
   1353 	return (0);
   1354 } /* ixgbe_setup_interface */
   1355 
   1356 /************************************************************************
   1357  * ixgbe_add_media_types
   1358  ************************************************************************/
   1359 static void
   1360 ixgbe_add_media_types(struct adapter *adapter)
   1361 {
   1362 	struct ixgbe_hw *hw = &adapter->hw;
   1363 	device_t        dev = adapter->dev;
   1364 	u64             layer;
   1365 
   1366 	layer = adapter->phy_layer;
   1367 
   1368 #define	ADD(mm, dd)							\
   1369 	ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
   1370 
   1371 	/* Media types with matching NetBSD media defines */
   1372 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
   1373 		ADD(IFM_10G_T | IFM_FDX, 0);
   1374 	}
   1375 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
   1376 		ADD(IFM_1000_T | IFM_FDX, 0);
   1377 	}
   1378 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
   1379 		ADD(IFM_100_TX | IFM_FDX, 0);
   1380 	}
   1381 	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
   1382 		ADD(IFM_10_T | IFM_FDX, 0);
   1383 	}
   1384 
   1385 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   1386 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
   1387 		ADD(IFM_10G_TWINAX | IFM_FDX, 0);
   1388 	}
   1389 
   1390 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
   1391 		ADD(IFM_10G_LR | IFM_FDX, 0);
   1392 		if (hw->phy.multispeed_fiber) {
   1393 			ADD(IFM_1000_LX | IFM_FDX, 0);
   1394 		}
   1395 	}
   1396 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
   1397 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1398 		if (hw->phy.multispeed_fiber) {
   1399 			ADD(IFM_1000_SX | IFM_FDX, 0);
   1400 		}
   1401 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
   1402 		ADD(IFM_1000_SX | IFM_FDX, 0);
   1403 	}
   1404 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
   1405 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1406 	}
   1407 
   1408 #ifdef IFM_ETH_XTYPE
   1409 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1410 		ADD(IFM_10G_KR | IFM_FDX, 0);
   1411 	}
   1412 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1413 		ADD(AIFM_10G_KX4 | IFM_FDX, 0);
   1414 	}
   1415 #else
   1416 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   1417 		device_printf(dev, "Media supported: 10GbaseKR\n");
   1418 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
   1419 		ADD(IFM_10G_SR | IFM_FDX, 0);
   1420 	}
   1421 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   1422 		device_printf(dev, "Media supported: 10GbaseKX4\n");
   1423 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
   1424 		ADD(IFM_10G_CX4 | IFM_FDX, 0);
   1425 	}
   1426 #endif
   1427 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
   1428 		ADD(IFM_1000_KX | IFM_FDX, 0);
   1429 	}
   1430 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
   1431 		ADD(IFM_2500_KX | IFM_FDX, 0);
   1432 	}
   1433 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
   1434 		ADD(IFM_2500_T | IFM_FDX, 0);
   1435 	}
   1436 	if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
   1437 		ADD(IFM_5000_T | IFM_FDX, 0);
   1438 	}
   1439 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
   1440 		device_printf(dev, "Media supported: 1000baseBX\n");
   1441 	/* XXX no ifmedia_set? */
   1442 
   1443 	ADD(IFM_AUTO, 0);
   1444 
   1445 #undef ADD
   1446 } /* ixgbe_add_media_types */
   1447 
   1448 /************************************************************************
   1449  * ixgbe_is_sfp
   1450  ************************************************************************/
   1451 static inline bool
   1452 ixgbe_is_sfp(struct ixgbe_hw *hw)
   1453 {
   1454 	switch (hw->mac.type) {
   1455 	case ixgbe_mac_82598EB:
   1456 		if (hw->phy.type == ixgbe_phy_nl)
   1457 			return TRUE;
   1458 		return FALSE;
   1459 	case ixgbe_mac_82599EB:
   1460 		switch (hw->mac.ops.get_media_type(hw)) {
   1461 		case ixgbe_media_type_fiber:
   1462 		case ixgbe_media_type_fiber_qsfp:
   1463 			return TRUE;
   1464 		default:
   1465 			return FALSE;
   1466 		}
   1467 	case ixgbe_mac_X550EM_x:
   1468 	case ixgbe_mac_X550EM_a:
   1469 		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
   1470 			return TRUE;
   1471 		return FALSE;
   1472 	default:
   1473 		return FALSE;
   1474 	}
   1475 } /* ixgbe_is_sfp */
   1476 
   1477 /************************************************************************
   1478  * ixgbe_config_link
   1479  ************************************************************************/
   1480 static void
   1481 ixgbe_config_link(struct adapter *adapter)
   1482 {
   1483 	struct ixgbe_hw *hw = &adapter->hw;
   1484 	u32             autoneg, err = 0;
   1485 	bool            sfp, negotiate = false;
   1486 
   1487 	sfp = ixgbe_is_sfp(hw);
   1488 
   1489 	if (sfp) {
   1490 		if (hw->phy.multispeed_fiber) {
   1491 			hw->mac.ops.setup_sfp(hw);
   1492 			ixgbe_enable_tx_laser(hw);
   1493 			kpreempt_disable();
   1494 			softint_schedule(adapter->msf_si);
   1495 			kpreempt_enable();
   1496 		} else {
   1497 			kpreempt_disable();
   1498 			softint_schedule(adapter->mod_si);
   1499 			kpreempt_enable();
   1500 		}
   1501 	} else {
   1502 		if (hw->mac.ops.check_link)
   1503 			err = ixgbe_check_link(hw, &adapter->link_speed,
   1504 			    &adapter->link_up, FALSE);
   1505 		if (err)
   1506 			goto out;
   1507 		autoneg = hw->phy.autoneg_advertised;
   1508 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   1509                 	err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
   1510 			    &negotiate);
   1511 		if (err)
   1512 			goto out;
   1513 		if (hw->mac.ops.setup_link)
   1514                 	err = hw->mac.ops.setup_link(hw, autoneg,
   1515 			    adapter->link_up);
   1516 	}
   1517 out:
   1518 
   1519 	return;
   1520 } /* ixgbe_config_link */
   1521 
   1522 /************************************************************************
   1523  * ixgbe_update_stats_counters - Update board statistics counters.
   1524  ************************************************************************/
   1525 static void
   1526 ixgbe_update_stats_counters(struct adapter *adapter)
   1527 {
   1528 	struct ifnet          *ifp = adapter->ifp;
   1529 	struct ixgbe_hw       *hw = &adapter->hw;
   1530 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1531 	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
   1532 	u64                   total_missed_rx = 0;
   1533 	uint64_t              crcerrs, rlec;
   1534 
   1535 	crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   1536 	stats->crcerrs.ev_count += crcerrs;
   1537 	stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   1538 	stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   1539 	stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   1540 	if (hw->mac.type == ixgbe_mac_X550)
   1541 		stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
   1542 
   1543 	for (int i = 0; i < __arraycount(stats->qprc); i++) {
   1544 		int j = i % adapter->num_queues;
   1545 		stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   1546 		stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   1547 		stats->qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   1548 	}
   1549 	for (int i = 0; i < __arraycount(stats->mpc); i++) {
   1550 		uint32_t mp;
   1551 		int j = i % adapter->num_queues;
   1552 
   1553 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
   1554 		/* global total per queue */
   1555 		stats->mpc[j].ev_count += mp;
   1556 		/* running comprehensive total for stats display */
   1557 		total_missed_rx += mp;
   1558 
   1559 		if (hw->mac.type == ixgbe_mac_82598EB)
   1560 			stats->rnbc[j].ev_count
   1561 			    += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
   1562 
   1563 	}
   1564 	stats->mpctotal.ev_count += total_missed_rx;
   1565 
   1566 	/* Document says M[LR]FC are valid when link is up and 10Gbps */
   1567 	if ((adapter->link_active == TRUE)
   1568 	    && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
   1569 		stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   1570 		stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   1571 	}
   1572 	rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
   1573 	stats->rlec.ev_count += rlec;
   1574 
   1575 	/* Hardware workaround, gprc counts missed packets */
   1576 	stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   1577 
   1578 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   1579 	stats->lxontxc.ev_count += lxon;
   1580 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   1581 	stats->lxofftxc.ev_count += lxoff;
   1582 	total = lxon + lxoff;
   1583 
   1584 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1585 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   1586 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   1587 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   1588 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   1589 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   1590 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   1591 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   1592 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   1593 	} else {
   1594 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   1595 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   1596 		/* 82598 only has a counter in the high register */
   1597 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   1598 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   1599 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   1600 	}
   1601 
   1602 	/*
   1603 	 * Workaround: mprc hardware is incorrectly counting
   1604 	 * broadcasts, so for now we subtract those.
   1605 	 */
   1606 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   1607 	stats->bprc.ev_count += bprc;
   1608 	stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
   1609 	    - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   1610 
   1611 	stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   1612 	stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   1613 	stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   1614 	stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   1615 	stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   1616 	stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   1617 
   1618 	stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   1619 	stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   1620 	stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   1621 
   1622 	stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   1623 	stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   1624 	stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   1625 	stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   1626 	stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   1627 	stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   1628 	stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   1629 	stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   1630 	stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   1631 	stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   1632 	stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   1633 	stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   1634 	stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   1635 	stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   1636 	stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   1637 	stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   1638 	stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   1639 	stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   1640 	/* Only read FCOE on 82599 */
   1641 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1642 		stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   1643 		stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   1644 		stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   1645 		stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   1646 		stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   1647 	}
   1648 
   1649 	/* Fill out the OS statistics structure */
   1650 	/*
   1651 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   1652 	 * adapter->stats counters. It's required to make ifconfig -z
   1653 	 * (SOICZIFDATA) work.
   1654 	 */
   1655 	ifp->if_collisions = 0;
   1656 
   1657 	/* Rx Errors */
   1658 	ifp->if_iqdrops += total_missed_rx;
   1659 	ifp->if_ierrors += crcerrs + rlec;
   1660 } /* ixgbe_update_stats_counters */
   1661 
   1662 /************************************************************************
   1663  * ixgbe_add_hw_stats
   1664  *
   1665  *   Add sysctl variables, one per statistic, to the system.
   1666  ************************************************************************/
   1667 static void
   1668 ixgbe_add_hw_stats(struct adapter *adapter)
   1669 {
   1670 	device_t dev = adapter->dev;
   1671 	const struct sysctlnode *rnode, *cnode;
   1672 	struct sysctllog **log = &adapter->sysctllog;
   1673 	struct tx_ring *txr = adapter->tx_rings;
   1674 	struct rx_ring *rxr = adapter->rx_rings;
   1675 	struct ixgbe_hw *hw = &adapter->hw;
   1676 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1677 	const char *xname = device_xname(dev);
   1678 
   1679 	/* Driver Statistics */
   1680 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   1681 	    NULL, xname, "Handled queue in softint");
   1682 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   1683 	    NULL, xname, "Requeued in softint");
   1684 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   1685 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   1686 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   1687 	    NULL, xname, "m_defrag() failed");
   1688 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   1689 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   1690 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   1691 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   1692 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   1693 	    NULL, xname, "Driver tx dma hard fail other");
   1694 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   1695 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   1696 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   1697 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   1698 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   1699 	    NULL, xname, "Watchdog timeouts");
   1700 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   1701 	    NULL, xname, "TSO errors");
   1702 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   1703 	    NULL, xname, "Link MSI-X IRQ Handled");
   1704 
   1705 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1706 		snprintf(adapter->queues[i].evnamebuf,
   1707 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   1708 		    xname, i);
   1709 		snprintf(adapter->queues[i].namebuf,
   1710 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   1711 
   1712 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   1713 			aprint_error_dev(dev, "could not create sysctl root\n");
   1714 			break;
   1715 		}
   1716 
   1717 		if (sysctl_createv(log, 0, &rnode, &rnode,
   1718 		    0, CTLTYPE_NODE,
   1719 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   1720 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   1721 			break;
   1722 
   1723 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1724 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   1725 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   1726 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   1727 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   1728 			break;
   1729 
   1730 #if 0 /* XXX msaitoh */
   1731 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1732 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   1733 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   1734 			NULL, 0, &(adapter->queues[i].irqs),
   1735 		    0, CTL_CREATE, CTL_EOL) != 0)
   1736 			break;
   1737 #endif
   1738 
   1739 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1740 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1741 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   1742 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   1743 		    0, CTL_CREATE, CTL_EOL) != 0)
   1744 			break;
   1745 
   1746 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1747 		    CTLFLAG_READONLY, CTLTYPE_INT,
   1748 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   1749 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   1750 		    0, CTL_CREATE, CTL_EOL) != 0)
   1751 			break;
   1752 
   1753 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   1754 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   1755 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   1756 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   1757 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   1758 		    NULL, adapter->queues[i].evnamebuf,
   1759 		    "Queue No Descriptor Available");
   1760 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   1761 		    NULL, adapter->queues[i].evnamebuf,
   1762 		    "Queue Packets Transmitted");
   1763 #ifndef IXGBE_LEGACY_TX
   1764 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   1765 		    NULL, adapter->queues[i].evnamebuf,
   1766 		    "Packets dropped in pcq");
   1767 #endif
   1768 
   1769 #ifdef LRO
   1770 		struct lro_ctrl *lro = &rxr->lro;
   1771 #endif /* LRO */
   1772 
   1773 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1774 		    CTLFLAG_READONLY,
   1775 		    CTLTYPE_INT,
   1776 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   1777 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   1778 		    CTL_CREATE, CTL_EOL) != 0)
   1779 			break;
   1780 
   1781 		if (sysctl_createv(log, 0, &rnode, &cnode,
   1782 		    CTLFLAG_READONLY,
   1783 		    CTLTYPE_INT,
   1784 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   1785 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   1786 		    CTL_CREATE, CTL_EOL) != 0)
   1787 			break;
   1788 
   1789 		if (i < __arraycount(stats->mpc)) {
   1790 			evcnt_attach_dynamic(&stats->mpc[i],
   1791 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1792 			    "RX Missed Packet Count");
   1793 			if (hw->mac.type == ixgbe_mac_82598EB)
   1794 				evcnt_attach_dynamic(&stats->rnbc[i],
   1795 				    EVCNT_TYPE_MISC, NULL,
   1796 				    adapter->queues[i].evnamebuf,
   1797 				    "Receive No Buffers");
   1798 		}
   1799 		if (i < __arraycount(stats->pxontxc)) {
   1800 			evcnt_attach_dynamic(&stats->pxontxc[i],
   1801 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1802 			    "pxontxc");
   1803 			evcnt_attach_dynamic(&stats->pxonrxc[i],
   1804 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1805 			    "pxonrxc");
   1806 			evcnt_attach_dynamic(&stats->pxofftxc[i],
   1807 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1808 			    "pxofftxc");
   1809 			evcnt_attach_dynamic(&stats->pxoffrxc[i],
   1810 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1811 			    "pxoffrxc");
   1812 			evcnt_attach_dynamic(&stats->pxon2offc[i],
   1813 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1814 			    "pxon2offc");
   1815 		}
   1816 		if (i < __arraycount(stats->qprc)) {
   1817 			evcnt_attach_dynamic(&stats->qprc[i],
   1818 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1819 			    "qprc");
   1820 			evcnt_attach_dynamic(&stats->qptc[i],
   1821 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1822 			    "qptc");
   1823 			evcnt_attach_dynamic(&stats->qbrc[i],
   1824 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1825 			    "qbrc");
   1826 			evcnt_attach_dynamic(&stats->qbtc[i],
   1827 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1828 			    "qbtc");
   1829 			evcnt_attach_dynamic(&stats->qprdc[i],
   1830 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   1831 			    "qprdc");
   1832 		}
   1833 
   1834 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   1835 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   1836 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   1837 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   1838 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   1839 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   1840 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   1841 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   1842 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   1843 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   1844 #ifdef LRO
   1845 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   1846 				CTLFLAG_RD, &lro->lro_queued, 0,
   1847 				"LRO Queued");
   1848 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   1849 				CTLFLAG_RD, &lro->lro_flushed, 0,
   1850 				"LRO Flushed");
   1851 #endif /* LRO */
   1852 	}
   1853 
   1854 	/* MAC stats get their own sub node */
   1855 
   1856 	snprintf(stats->namebuf,
   1857 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   1858 
   1859 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   1860 	    stats->namebuf, "rx csum offload - IP");
   1861 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   1862 	    stats->namebuf, "rx csum offload - L4");
   1863 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   1864 	    stats->namebuf, "rx csum offload - IP bad");
   1865 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   1866 	    stats->namebuf, "rx csum offload - L4 bad");
   1867 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   1868 	    stats->namebuf, "Interrupt conditions zero");
   1869 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   1870 	    stats->namebuf, "Legacy interrupts");
   1871 
   1872 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   1873 	    stats->namebuf, "CRC Errors");
   1874 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   1875 	    stats->namebuf, "Illegal Byte Errors");
   1876 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   1877 	    stats->namebuf, "Byte Errors");
   1878 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   1879 	    stats->namebuf, "MAC Short Packets Discarded");
   1880 	if (hw->mac.type >= ixgbe_mac_X550)
   1881 		evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
   1882 		    stats->namebuf, "Bad SFD");
   1883 	evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
   1884 	    stats->namebuf, "Total Packets Missed");
   1885 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   1886 	    stats->namebuf, "MAC Local Faults");
   1887 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   1888 	    stats->namebuf, "MAC Remote Faults");
   1889 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   1890 	    stats->namebuf, "Receive Length Errors");
   1891 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   1892 	    stats->namebuf, "Link XON Transmitted");
   1893 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   1894 	    stats->namebuf, "Link XON Received");
   1895 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   1896 	    stats->namebuf, "Link XOFF Transmitted");
   1897 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   1898 	    stats->namebuf, "Link XOFF Received");
   1899 
   1900 	/* Packet Reception Stats */
   1901 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   1902 	    stats->namebuf, "Total Octets Received");
   1903 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   1904 	    stats->namebuf, "Good Octets Received");
   1905 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   1906 	    stats->namebuf, "Total Packets Received");
   1907 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   1908 	    stats->namebuf, "Good Packets Received");
   1909 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   1910 	    stats->namebuf, "Multicast Packets Received");
   1911 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   1912 	    stats->namebuf, "Broadcast Packets Received");
   1913 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   1914 	    stats->namebuf, "64 byte frames received ");
   1915 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   1916 	    stats->namebuf, "65-127 byte frames received");
   1917 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   1918 	    stats->namebuf, "128-255 byte frames received");
   1919 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   1920 	    stats->namebuf, "256-511 byte frames received");
   1921 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   1922 	    stats->namebuf, "512-1023 byte frames received");
   1923 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   1924 	    stats->namebuf, "1023-1522 byte frames received");
   1925 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   1926 	    stats->namebuf, "Receive Undersized");
   1927 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   1928 	    stats->namebuf, "Fragmented Packets Received ");
   1929 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   1930 	    stats->namebuf, "Oversized Packets Received");
   1931 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   1932 	    stats->namebuf, "Received Jabber");
   1933 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   1934 	    stats->namebuf, "Management Packets Received");
   1935 	evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
   1936 	    stats->namebuf, "Management Packets Dropped");
   1937 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   1938 	    stats->namebuf, "Checksum Errors");
   1939 
   1940 	/* Packet Transmission Stats */
   1941 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   1942 	    stats->namebuf, "Good Octets Transmitted");
   1943 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   1944 	    stats->namebuf, "Total Packets Transmitted");
   1945 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   1946 	    stats->namebuf, "Good Packets Transmitted");
   1947 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   1948 	    stats->namebuf, "Broadcast Packets Transmitted");
   1949 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   1950 	    stats->namebuf, "Multicast Packets Transmitted");
   1951 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   1952 	    stats->namebuf, "Management Packets Transmitted");
   1953 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   1954 	    stats->namebuf, "64 byte frames transmitted ");
   1955 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   1956 	    stats->namebuf, "65-127 byte frames transmitted");
   1957 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   1958 	    stats->namebuf, "128-255 byte frames transmitted");
   1959 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   1960 	    stats->namebuf, "256-511 byte frames transmitted");
   1961 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   1962 	    stats->namebuf, "512-1023 byte frames transmitted");
   1963 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   1964 	    stats->namebuf, "1024-1522 byte frames transmitted");
   1965 } /* ixgbe_add_hw_stats */
   1966 
   1967 static void
   1968 ixgbe_clear_evcnt(struct adapter *adapter)
   1969 {
   1970 	struct tx_ring *txr = adapter->tx_rings;
   1971 	struct rx_ring *rxr = adapter->rx_rings;
   1972 	struct ixgbe_hw *hw = &adapter->hw;
   1973 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   1974 
   1975 	adapter->handleq.ev_count = 0;
   1976 	adapter->req.ev_count = 0;
   1977 	adapter->efbig_tx_dma_setup.ev_count = 0;
   1978 	adapter->mbuf_defrag_failed.ev_count = 0;
   1979 	adapter->efbig2_tx_dma_setup.ev_count = 0;
   1980 	adapter->einval_tx_dma_setup.ev_count = 0;
   1981 	adapter->other_tx_dma_setup.ev_count = 0;
   1982 	adapter->eagain_tx_dma_setup.ev_count = 0;
   1983 	adapter->enomem_tx_dma_setup.ev_count = 0;
   1984 	adapter->watchdog_events.ev_count = 0;
   1985 	adapter->tso_err.ev_count = 0;
   1986 	adapter->link_irq.ev_count = 0;
   1987 
   1988 	txr = adapter->tx_rings;
   1989 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   1990 		adapter->queues[i].irqs.ev_count = 0;
   1991 		txr->no_desc_avail.ev_count = 0;
   1992 		txr->total_packets.ev_count = 0;
   1993 		txr->tso_tx.ev_count = 0;
   1994 #ifndef IXGBE_LEGACY_TX
   1995 		txr->pcq_drops.ev_count = 0;
   1996 #endif
   1997 
   1998 		if (i < __arraycount(stats->mpc)) {
   1999 			stats->mpc[i].ev_count = 0;
   2000 			if (hw->mac.type == ixgbe_mac_82598EB)
   2001 				stats->rnbc[i].ev_count = 0;
   2002 		}
   2003 		if (i < __arraycount(stats->pxontxc)) {
   2004 			stats->pxontxc[i].ev_count = 0;
   2005 			stats->pxonrxc[i].ev_count = 0;
   2006 			stats->pxofftxc[i].ev_count = 0;
   2007 			stats->pxoffrxc[i].ev_count = 0;
   2008 			stats->pxon2offc[i].ev_count = 0;
   2009 		}
   2010 		if (i < __arraycount(stats->qprc)) {
   2011 			stats->qprc[i].ev_count = 0;
   2012 			stats->qptc[i].ev_count = 0;
   2013 			stats->qbrc[i].ev_count = 0;
   2014 			stats->qbtc[i].ev_count = 0;
   2015 			stats->qprdc[i].ev_count = 0;
   2016 		}
   2017 
   2018 		rxr->rx_packets.ev_count = 0;
   2019 		rxr->rx_bytes.ev_count = 0;
   2020 		rxr->rx_copies.ev_count = 0;
   2021 		rxr->no_jmbuf.ev_count = 0;
   2022 		rxr->rx_discarded.ev_count = 0;
   2023 	}
   2024 	stats->ipcs.ev_count = 0;
   2025 	stats->l4cs.ev_count = 0;
   2026 	stats->ipcs_bad.ev_count = 0;
   2027 	stats->l4cs_bad.ev_count = 0;
   2028 	stats->intzero.ev_count = 0;
   2029 	stats->legint.ev_count = 0;
   2030 	stats->crcerrs.ev_count = 0;
   2031 	stats->illerrc.ev_count = 0;
   2032 	stats->errbc.ev_count = 0;
   2033 	stats->mspdc.ev_count = 0;
   2034 	stats->mbsdc.ev_count = 0;
   2035 	stats->mpctotal.ev_count = 0;
   2036 	stats->mlfc.ev_count = 0;
   2037 	stats->mrfc.ev_count = 0;
   2038 	stats->rlec.ev_count = 0;
   2039 	stats->lxontxc.ev_count = 0;
   2040 	stats->lxonrxc.ev_count = 0;
   2041 	stats->lxofftxc.ev_count = 0;
   2042 	stats->lxoffrxc.ev_count = 0;
   2043 
   2044 	/* Packet Reception Stats */
   2045 	stats->tor.ev_count = 0;
   2046 	stats->gorc.ev_count = 0;
   2047 	stats->tpr.ev_count = 0;
   2048 	stats->gprc.ev_count = 0;
   2049 	stats->mprc.ev_count = 0;
   2050 	stats->bprc.ev_count = 0;
   2051 	stats->prc64.ev_count = 0;
   2052 	stats->prc127.ev_count = 0;
   2053 	stats->prc255.ev_count = 0;
   2054 	stats->prc511.ev_count = 0;
   2055 	stats->prc1023.ev_count = 0;
   2056 	stats->prc1522.ev_count = 0;
   2057 	stats->ruc.ev_count = 0;
   2058 	stats->rfc.ev_count = 0;
   2059 	stats->roc.ev_count = 0;
   2060 	stats->rjc.ev_count = 0;
   2061 	stats->mngprc.ev_count = 0;
   2062 	stats->mngpdc.ev_count = 0;
   2063 	stats->xec.ev_count = 0;
   2064 
   2065 	/* Packet Transmission Stats */
   2066 	stats->gotc.ev_count = 0;
   2067 	stats->tpt.ev_count = 0;
   2068 	stats->gptc.ev_count = 0;
   2069 	stats->bptc.ev_count = 0;
   2070 	stats->mptc.ev_count = 0;
   2071 	stats->mngptc.ev_count = 0;
   2072 	stats->ptc64.ev_count = 0;
   2073 	stats->ptc127.ev_count = 0;
   2074 	stats->ptc255.ev_count = 0;
   2075 	stats->ptc511.ev_count = 0;
   2076 	stats->ptc1023.ev_count = 0;
   2077 	stats->ptc1522.ev_count = 0;
   2078 }
   2079 
   2080 /************************************************************************
   2081  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
   2082  *
   2083  *   Retrieves the TDH value from the hardware
   2084  ************************************************************************/
   2085 static int
   2086 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   2087 {
   2088 	struct sysctlnode node = *rnode;
   2089 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2090 	uint32_t val;
   2091 
   2092 	if (!txr)
   2093 		return (0);
   2094 
   2095 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
   2096 	node.sysctl_data = &val;
   2097 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2098 } /* ixgbe_sysctl_tdh_handler */
   2099 
   2100 /************************************************************************
   2101  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   2102  *
   2103  *   Retrieves the TDT value from the hardware
   2104  ************************************************************************/
   2105 static int
   2106 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   2107 {
   2108 	struct sysctlnode node = *rnode;
   2109 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   2110 	uint32_t val;
   2111 
   2112 	if (!txr)
   2113 		return (0);
   2114 
   2115 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
   2116 	node.sysctl_data = &val;
   2117 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2118 } /* ixgbe_sysctl_tdt_handler */
   2119 
   2120 /************************************************************************
   2121  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
   2122  *
   2123  *   Retrieves the RDH value from the hardware
   2124  ************************************************************************/
   2125 static int
   2126 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2127 {
   2128 	struct sysctlnode node = *rnode;
   2129 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2130 	uint32_t val;
   2131 
   2132 	if (!rxr)
   2133 		return (0);
   2134 
   2135 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
   2136 	node.sysctl_data = &val;
   2137 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2138 } /* ixgbe_sysctl_rdh_handler */
   2139 
   2140 /************************************************************************
   2141  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2142  *
   2143  *   Retrieves the RDT value from the hardware
   2144  ************************************************************************/
   2145 static int
   2146 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2147 {
   2148 	struct sysctlnode node = *rnode;
   2149 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2150 	uint32_t val;
   2151 
   2152 	if (!rxr)
   2153 		return (0);
   2154 
   2155 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
   2156 	node.sysctl_data = &val;
   2157 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2158 } /* ixgbe_sysctl_rdt_handler */
   2159 
   2160 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   2161 /************************************************************************
   2162  * ixgbe_register_vlan
   2163  *
   2164  *   Run via vlan config EVENT, it enables us to use the
   2165  *   HW Filter table since we can get the vlan id. This
   2166  *   just creates the entry in the soft version of the
   2167  *   VFTA, init will repopulate the real table.
   2168  ************************************************************************/
   2169 static void
   2170 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2171 {
   2172 	struct adapter	*adapter = ifp->if_softc;
   2173 	u16		index, bit;
   2174 
   2175 	if (ifp->if_softc != arg)   /* Not our event */
   2176 		return;
   2177 
   2178 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2179 		return;
   2180 
   2181 	IXGBE_CORE_LOCK(adapter);
   2182 	index = (vtag >> 5) & 0x7F;
   2183 	bit = vtag & 0x1F;
   2184 	adapter->shadow_vfta[index] |= (1 << bit);
   2185 	ixgbe_setup_vlan_hw_support(adapter);
   2186 	IXGBE_CORE_UNLOCK(adapter);
   2187 } /* ixgbe_register_vlan */
   2188 
   2189 /************************************************************************
   2190  * ixgbe_unregister_vlan
   2191  *
   2192  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
   2193  ************************************************************************/
   2194 static void
   2195 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2196 {
   2197 	struct adapter	*adapter = ifp->if_softc;
   2198 	u16		index, bit;
   2199 
   2200 	if (ifp->if_softc != arg)
   2201 		return;
   2202 
   2203 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   2204 		return;
   2205 
   2206 	IXGBE_CORE_LOCK(adapter);
   2207 	index = (vtag >> 5) & 0x7F;
   2208 	bit = vtag & 0x1F;
   2209 	adapter->shadow_vfta[index] &= ~(1 << bit);
   2210 	/* Re-init to load the changes */
   2211 	ixgbe_setup_vlan_hw_support(adapter);
   2212 	IXGBE_CORE_UNLOCK(adapter);
   2213 } /* ixgbe_unregister_vlan */
   2214 #endif
   2215 
   2216 static void
   2217 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   2218 {
   2219 	struct ethercom *ec = &adapter->osdep.ec;
   2220 	struct ixgbe_hw *hw = &adapter->hw;
   2221 	struct rx_ring	*rxr;
   2222 	int             i;
   2223 	u32		ctrl;
   2224 
   2225 
   2226 	/*
   2227 	 * We get here thru init_locked, meaning
   2228 	 * a soft reset, this has already cleared
   2229 	 * the VFTA and other state, so if there
   2230 	 * have been no vlan's registered do nothing.
   2231 	 */
   2232 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   2233 		return;
   2234 
   2235 	/* Setup the queues for vlans */
   2236 	if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
   2237 		for (i = 0; i < adapter->num_queues; i++) {
   2238 			rxr = &adapter->rx_rings[i];
   2239 			/* On 82599 the VLAN enable is per/queue in RXDCTL */
   2240 			if (hw->mac.type != ixgbe_mac_82598EB) {
   2241 				ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   2242 				ctrl |= IXGBE_RXDCTL_VME;
   2243 				IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
   2244 			}
   2245 			rxr->vtag_strip = TRUE;
   2246 		}
   2247 	}
   2248 
   2249 	if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
   2250 		return;
   2251 	/*
   2252 	 * A soft reset zero's out the VFTA, so
   2253 	 * we need to repopulate it now.
   2254 	 */
   2255 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
   2256 		if (adapter->shadow_vfta[i] != 0)
   2257 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
   2258 			    adapter->shadow_vfta[i]);
   2259 
   2260 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   2261 	/* Enable the Filter Table if enabled */
   2262 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
   2263 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
   2264 		ctrl |= IXGBE_VLNCTRL_VFE;
   2265 	}
   2266 	if (hw->mac.type == ixgbe_mac_82598EB)
   2267 		ctrl |= IXGBE_VLNCTRL_VME;
   2268 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   2269 } /* ixgbe_setup_vlan_hw_support */
   2270 
   2271 /************************************************************************
   2272  * ixgbe_get_slot_info
   2273  *
   2274  *   Get the width and transaction speed of
   2275  *   the slot this adapter is plugged into.
   2276  ************************************************************************/
   2277 static void
   2278 ixgbe_get_slot_info(struct adapter *adapter)
   2279 {
   2280 	device_t		dev = adapter->dev;
   2281 	struct ixgbe_hw		*hw = &adapter->hw;
   2282 	u32                   offset;
   2283 //	struct ixgbe_mac_info	*mac = &hw->mac;
   2284 	u16			link;
   2285 	int                   bus_info_valid = TRUE;
   2286 
   2287 	/* Some devices are behind an internal bridge */
   2288 	switch (hw->device_id) {
   2289 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
   2290 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
   2291 		goto get_parent_info;
   2292 	default:
   2293 		break;
   2294 	}
   2295 
   2296 	ixgbe_get_bus_info(hw);
   2297 
   2298 	/*
   2299 	 * Some devices don't use PCI-E, but there is no need
   2300 	 * to display "Unknown" for bus speed and width.
   2301 	 */
   2302 	switch (hw->mac.type) {
   2303 	case ixgbe_mac_X550EM_x:
   2304 	case ixgbe_mac_X550EM_a:
   2305 		return;
   2306 	default:
   2307 		goto display;
   2308 	}
   2309 
   2310 get_parent_info:
   2311 	/*
   2312 	 * For the Quad port adapter we need to parse back
   2313 	 * up the PCI tree to find the speed of the expansion
   2314 	 * slot into which this adapter is plugged. A bit more work.
   2315 	 */
   2316 	dev = device_parent(device_parent(dev));
   2317 #if 0
   2318 #ifdef IXGBE_DEBUG
   2319 	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
   2320 	    pci_get_slot(dev), pci_get_function(dev));
   2321 #endif
   2322 	dev = device_parent(device_parent(dev));
   2323 #ifdef IXGBE_DEBUG
   2324 	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
   2325 	    pci_get_slot(dev), pci_get_function(dev));
   2326 #endif
   2327 #endif
   2328 	/* Now get the PCI Express Capabilities offset */
   2329 	if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
   2330 	    PCI_CAP_PCIEXPRESS, &offset, NULL)) {
   2331 		/*
   2332 		 * Hmm...can't get PCI-Express capabilities.
   2333 		 * Falling back to default method.
   2334 		 */
   2335 		bus_info_valid = FALSE;
   2336 		ixgbe_get_bus_info(hw);
   2337 		goto display;
   2338 	}
   2339 	/* ...and read the Link Status Register */
   2340 	link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
   2341 	    offset + PCIE_LCSR) >> 16;
   2342 	ixgbe_set_pci_config_data_generic(hw, link);
   2343 
   2344 display:
   2345 	device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
   2346 	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s" :
   2347 	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s" :
   2348 	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s" :
   2349 	     "Unknown"),
   2350 	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
   2351 	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
   2352 	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
   2353 	     "Unknown"));
   2354 
   2355 	if (bus_info_valid) {
   2356 		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2357 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
   2358 			(hw->bus.speed == ixgbe_bus_speed_2500))) {
   2359 			device_printf(dev, "PCI-Express bandwidth available"
   2360 			    " for this card\n     is not sufficient for"
   2361 			    " optimal performance.\n");
   2362 			device_printf(dev, "For optimal performance a x8 "
   2363 			    "PCIE, or x4 PCIE Gen2 slot is required.\n");
   2364 		}
   2365 		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   2366 		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
   2367 			(hw->bus.speed < ixgbe_bus_speed_8000))) {
   2368 			device_printf(dev, "PCI-Express bandwidth available"
   2369 			    " for this card\n     is not sufficient for"
   2370 			    " optimal performance.\n");
   2371 			device_printf(dev, "For optimal performance a x8 "
   2372 			    "PCIE Gen3 slot is required.\n");
   2373 		}
   2374 	} else
   2375 		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
   2376 
   2377 	return;
   2378 } /* ixgbe_get_slot_info */
   2379 
   2380 /************************************************************************
   2381  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
   2382  ************************************************************************/
   2383 static inline void
   2384 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   2385 {
   2386 	struct ixgbe_hw *hw = &adapter->hw;
   2387 	struct ix_queue *que = &adapter->queues[vector];
   2388 	u64             queue = (u64)(1ULL << vector);
   2389 	u32             mask;
   2390 
   2391 	mutex_enter(&que->im_mtx);
   2392 	if (que->im_nest > 0 && --que->im_nest > 0)
   2393 		goto out;
   2394 
   2395 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2396 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2397 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   2398 	} else {
   2399 		mask = (queue & 0xFFFFFFFF);
   2400 		if (mask)
   2401 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   2402 		mask = (queue >> 32);
   2403 		if (mask)
   2404 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   2405 	}
   2406 out:
   2407 	mutex_exit(&que->im_mtx);
   2408 } /* ixgbe_enable_queue */
   2409 
   2410 /************************************************************************
   2411  * ixgbe_disable_queue
   2412  ************************************************************************/
   2413 static inline void
   2414 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   2415 {
   2416 	struct ixgbe_hw *hw = &adapter->hw;
   2417 	struct ix_queue *que = &adapter->queues[vector];
   2418 	u64             queue = (u64)(1ULL << vector);
   2419 	u32             mask;
   2420 
   2421 	mutex_enter(&que->im_mtx);
   2422 	if (que->im_nest++ > 0)
   2423 		goto  out;
   2424 
   2425 	if (hw->mac.type == ixgbe_mac_82598EB) {
   2426 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   2427 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   2428 	} else {
   2429 		mask = (queue & 0xFFFFFFFF);
   2430 		if (mask)
   2431 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   2432 		mask = (queue >> 32);
   2433 		if (mask)
   2434 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   2435 	}
   2436 out:
   2437 	mutex_exit(&que->im_mtx);
   2438 } /* ixgbe_disable_queue */
   2439 
   2440 /************************************************************************
   2441  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
   2442  ************************************************************************/
   2443 static int
   2444 ixgbe_msix_que(void *arg)
   2445 {
   2446 	struct ix_queue	*que = arg;
   2447 	struct adapter  *adapter = que->adapter;
   2448 	struct ifnet    *ifp = adapter->ifp;
   2449 	struct tx_ring	*txr = que->txr;
   2450 	struct rx_ring	*rxr = que->rxr;
   2451 	bool		more;
   2452 	u32		newitr = 0;
   2453 
   2454 	/* Protect against spurious interrupts */
   2455 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   2456 		return 0;
   2457 
   2458 	ixgbe_disable_queue(adapter, que->msix);
   2459 	++que->irqs.ev_count;
   2460 
   2461 #ifdef __NetBSD__
   2462 	/* Don't run ixgbe_rxeof in interrupt context */
   2463 	more = true;
   2464 #else
   2465 	more = ixgbe_rxeof(que);
   2466 #endif
   2467 
   2468 	IXGBE_TX_LOCK(txr);
   2469 	ixgbe_txeof(txr);
   2470 	IXGBE_TX_UNLOCK(txr);
   2471 
   2472 	/* Do AIM now? */
   2473 
   2474 	if (adapter->enable_aim == false)
   2475 		goto no_calc;
   2476 	/*
   2477 	 * Do Adaptive Interrupt Moderation:
   2478 	 *  - Write out last calculated setting
   2479 	 *  - Calculate based on average size over
   2480 	 *    the last interval.
   2481 	 */
   2482 	if (que->eitr_setting)
   2483 		ixgbe_eitr_write(que, que->eitr_setting);
   2484 
   2485 	que->eitr_setting = 0;
   2486 
   2487 	/* Idle, do nothing */
   2488         if ((txr->bytes == 0) && (rxr->bytes == 0))
   2489                 goto no_calc;
   2490 
   2491 	if ((txr->bytes) && (txr->packets))
   2492 		newitr = txr->bytes/txr->packets;
   2493 	if ((rxr->bytes) && (rxr->packets))
   2494 		newitr = max(newitr, (rxr->bytes / rxr->packets));
   2495 	newitr += 24; /* account for hardware frame, crc */
   2496 
   2497 	/* set an upper boundary */
   2498 	newitr = min(newitr, 3000);
   2499 
   2500 	/* Be nice to the mid range */
   2501 	if ((newitr > 300) && (newitr < 1200))
   2502 		newitr = (newitr / 3);
   2503 	else
   2504 		newitr = (newitr / 2);
   2505 
   2506 	/*
   2507 	 * When RSC is used, ITR interval must be larger than RSC_DELAY.
   2508 	 * Currently, we use 2us for RSC_DELAY. The minimum value is always
   2509 	 * greater than 2us on 100M (and 10M?(not documented)), but it's not
   2510 	 * on 1G and higher.
   2511 	 */
   2512 	if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   2513 	    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   2514 		if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
   2515 			newitr = IXGBE_MIN_RSC_EITR_10G1G;
   2516 	}
   2517 
   2518         /* save for next interrupt */
   2519         que->eitr_setting = newitr;
   2520 
   2521 	/* Reset state */
   2522 	txr->bytes = 0;
   2523 	txr->packets = 0;
   2524 	rxr->bytes = 0;
   2525 	rxr->packets = 0;
   2526 
   2527 no_calc:
   2528 	if (more)
   2529 		softint_schedule(que->que_si);
   2530 	else
   2531 		ixgbe_enable_queue(adapter, que->msix);
   2532 
   2533 	return 1;
   2534 } /* ixgbe_msix_que */
   2535 
   2536 /************************************************************************
   2537  * ixgbe_media_status - Media Ioctl callback
   2538  *
   2539  *   Called whenever the user queries the status of
   2540  *   the interface using ifconfig.
   2541  ************************************************************************/
   2542 static void
   2543 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   2544 {
   2545 	struct adapter *adapter = ifp->if_softc;
   2546 	struct ixgbe_hw *hw = &adapter->hw;
   2547 	int layer;
   2548 
   2549 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   2550 	IXGBE_CORE_LOCK(adapter);
   2551 	ixgbe_update_link_status(adapter);
   2552 
   2553 	ifmr->ifm_status = IFM_AVALID;
   2554 	ifmr->ifm_active = IFM_ETHER;
   2555 
   2556 	if (!adapter->link_active) {
   2557 		ifmr->ifm_active |= IFM_NONE;
   2558 		IXGBE_CORE_UNLOCK(adapter);
   2559 		return;
   2560 	}
   2561 
   2562 	ifmr->ifm_status |= IFM_ACTIVE;
   2563 	layer = adapter->phy_layer;
   2564 
   2565 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
   2566 	    layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
   2567 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
   2568 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
   2569 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
   2570 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
   2571 		switch (adapter->link_speed) {
   2572 		case IXGBE_LINK_SPEED_10GB_FULL:
   2573 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   2574 			break;
   2575 		case IXGBE_LINK_SPEED_5GB_FULL:
   2576 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   2577 			break;
   2578 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2579 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   2580 			break;
   2581 		case IXGBE_LINK_SPEED_1GB_FULL:
   2582 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   2583 			break;
   2584 		case IXGBE_LINK_SPEED_100_FULL:
   2585 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   2586 			break;
   2587 		case IXGBE_LINK_SPEED_10_FULL:
   2588 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   2589 			break;
   2590 		}
   2591 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   2592 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
   2593 		switch (adapter->link_speed) {
   2594 		case IXGBE_LINK_SPEED_10GB_FULL:
   2595 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
   2596 			break;
   2597 		}
   2598 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
   2599 		switch (adapter->link_speed) {
   2600 		case IXGBE_LINK_SPEED_10GB_FULL:
   2601 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
   2602 			break;
   2603 		case IXGBE_LINK_SPEED_1GB_FULL:
   2604 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2605 			break;
   2606 		}
   2607 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
   2608 		switch (adapter->link_speed) {
   2609 		case IXGBE_LINK_SPEED_10GB_FULL:
   2610 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
   2611 			break;
   2612 		case IXGBE_LINK_SPEED_1GB_FULL:
   2613 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   2614 			break;
   2615 		}
   2616 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
   2617 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
   2618 		switch (adapter->link_speed) {
   2619 		case IXGBE_LINK_SPEED_10GB_FULL:
   2620 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2621 			break;
   2622 		case IXGBE_LINK_SPEED_1GB_FULL:
   2623 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
   2624 			break;
   2625 		}
   2626 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
   2627 		switch (adapter->link_speed) {
   2628 		case IXGBE_LINK_SPEED_10GB_FULL:
   2629 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2630 			break;
   2631 		}
   2632 	/*
   2633 	 * XXX: These need to use the proper media types once
   2634 	 * they're added.
   2635 	 */
   2636 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
   2637 		switch (adapter->link_speed) {
   2638 		case IXGBE_LINK_SPEED_10GB_FULL:
   2639 #ifndef IFM_ETH_XTYPE
   2640 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   2641 #else
   2642 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
   2643 #endif
   2644 			break;
   2645 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2646 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2647 			break;
   2648 		case IXGBE_LINK_SPEED_1GB_FULL:
   2649 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2650 			break;
   2651 		}
   2652 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
   2653 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
   2654 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
   2655 		switch (adapter->link_speed) {
   2656 		case IXGBE_LINK_SPEED_10GB_FULL:
   2657 #ifndef IFM_ETH_XTYPE
   2658 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   2659 #else
   2660 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
   2661 #endif
   2662 			break;
   2663 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   2664 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
   2665 			break;
   2666 		case IXGBE_LINK_SPEED_1GB_FULL:
   2667 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
   2668 			break;
   2669 		}
   2670 
   2671 	/* If nothing is recognized... */
   2672 #if 0
   2673 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
   2674 		ifmr->ifm_active |= IFM_UNKNOWN;
   2675 #endif
   2676 
   2677 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   2678 
   2679 	/* Display current flow control setting used on link */
   2680 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
   2681 	    hw->fc.current_mode == ixgbe_fc_full)
   2682 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
   2683 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
   2684 	    hw->fc.current_mode == ixgbe_fc_full)
   2685 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
   2686 
   2687 	IXGBE_CORE_UNLOCK(adapter);
   2688 
   2689 	return;
   2690 } /* ixgbe_media_status */
   2691 
   2692 /************************************************************************
   2693  * ixgbe_media_change - Media Ioctl callback
   2694  *
   2695  *   Called when the user changes speed/duplex using
   2696  *   media/mediopt option with ifconfig.
   2697  ************************************************************************/
   2698 static int
   2699 ixgbe_media_change(struct ifnet *ifp)
   2700 {
   2701 	struct adapter   *adapter = ifp->if_softc;
   2702 	struct ifmedia   *ifm = &adapter->media;
   2703 	struct ixgbe_hw  *hw = &adapter->hw;
   2704 	ixgbe_link_speed speed = 0;
   2705 	ixgbe_link_speed link_caps = 0;
   2706 	bool negotiate = false;
   2707 	s32 err = IXGBE_NOT_IMPLEMENTED;
   2708 
   2709 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   2710 
   2711 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   2712 		return (EINVAL);
   2713 
   2714 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   2715 		return (ENODEV);
   2716 
   2717 	/*
   2718 	 * We don't actually need to check against the supported
   2719 	 * media types of the adapter; ifmedia will take care of
   2720 	 * that for us.
   2721 	 */
   2722 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   2723 	case IFM_AUTO:
   2724 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   2725 		    &negotiate);
   2726 		if (err != IXGBE_SUCCESS) {
   2727 			device_printf(adapter->dev, "Unable to determine "
   2728 			    "supported advertise speeds\n");
   2729 			return (ENODEV);
   2730 		}
   2731 		speed |= link_caps;
   2732 		break;
   2733 	case IFM_10G_T:
   2734 	case IFM_10G_LRM:
   2735 	case IFM_10G_LR:
   2736 	case IFM_10G_TWINAX:
   2737 #ifndef IFM_ETH_XTYPE
   2738 	case IFM_10G_SR: /* KR, too */
   2739 	case IFM_10G_CX4: /* KX4 */
   2740 #else
   2741 	case IFM_10G_KR:
   2742 	case IFM_10G_KX4:
   2743 #endif
   2744 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   2745 		break;
   2746 	case IFM_5000_T:
   2747 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   2748 		break;
   2749 	case IFM_2500_T:
   2750 	case IFM_2500_KX:
   2751 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   2752 		break;
   2753 	case IFM_1000_T:
   2754 	case IFM_1000_LX:
   2755 	case IFM_1000_SX:
   2756 	case IFM_1000_KX:
   2757 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   2758 		break;
   2759 	case IFM_100_TX:
   2760 		speed |= IXGBE_LINK_SPEED_100_FULL;
   2761 		break;
   2762 	case IFM_10_T:
   2763 		speed |= IXGBE_LINK_SPEED_10_FULL;
   2764 		break;
   2765 	default:
   2766 		goto invalid;
   2767 	}
   2768 
   2769 	hw->mac.autotry_restart = TRUE;
   2770 	hw->mac.ops.setup_link(hw, speed, TRUE);
   2771 	adapter->advertise = 0;
   2772 	if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
   2773 		if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
   2774 			adapter->advertise |= 1 << 2;
   2775 		if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
   2776 			adapter->advertise |= 1 << 1;
   2777 		if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
   2778 			adapter->advertise |= 1 << 0;
   2779 		if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
   2780 			adapter->advertise |= 1 << 3;
   2781 		if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
   2782 			adapter->advertise |= 1 << 4;
   2783 		if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
   2784 			adapter->advertise |= 1 << 5;
   2785 	}
   2786 
   2787 	return (0);
   2788 
   2789 invalid:
   2790 	device_printf(adapter->dev, "Invalid media type!\n");
   2791 
   2792 	return (EINVAL);
   2793 } /* ixgbe_media_change */
   2794 
   2795 /************************************************************************
   2796  * ixgbe_set_promisc
   2797  ************************************************************************/
   2798 static void
   2799 ixgbe_set_promisc(struct adapter *adapter)
   2800 {
   2801 	struct ifnet *ifp = adapter->ifp;
   2802 	int          mcnt = 0;
   2803 	u32          rctl;
   2804 	struct ether_multi *enm;
   2805 	struct ether_multistep step;
   2806 	struct ethercom *ec = &adapter->osdep.ec;
   2807 
   2808 	KASSERT(mutex_owned(&adapter->core_mtx));
   2809 	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   2810 	rctl &= (~IXGBE_FCTRL_UPE);
   2811 	if (ifp->if_flags & IFF_ALLMULTI)
   2812 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
   2813 	else {
   2814 		ETHER_LOCK(ec);
   2815 		ETHER_FIRST_MULTI(step, ec, enm);
   2816 		while (enm != NULL) {
   2817 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
   2818 				break;
   2819 			mcnt++;
   2820 			ETHER_NEXT_MULTI(step, enm);
   2821 		}
   2822 		ETHER_UNLOCK(ec);
   2823 	}
   2824 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
   2825 		rctl &= (~IXGBE_FCTRL_MPE);
   2826 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2827 
   2828 	if (ifp->if_flags & IFF_PROMISC) {
   2829 		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   2830 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2831 	} else if (ifp->if_flags & IFF_ALLMULTI) {
   2832 		rctl |= IXGBE_FCTRL_MPE;
   2833 		rctl &= ~IXGBE_FCTRL_UPE;
   2834 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
   2835 	}
   2836 } /* ixgbe_set_promisc */
   2837 
   2838 /************************************************************************
   2839  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
   2840  ************************************************************************/
   2841 static int
   2842 ixgbe_msix_link(void *arg)
   2843 {
   2844 	struct adapter	*adapter = arg;
   2845 	struct ixgbe_hw *hw = &adapter->hw;
   2846 	u32		eicr, eicr_mask;
   2847 	s32             retval;
   2848 
   2849 	++adapter->link_irq.ev_count;
   2850 
   2851 	/* Pause other interrupts */
   2852 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
   2853 
   2854 	/* First get the cause */
   2855 	/*
   2856 	 * The specifications of 82598, 82599, X540 and X550 say EICS register
   2857 	 * is write only. However, Linux says it is a workaround for silicon
   2858 	 * errata to read EICS instead of EICR to get interrupt cause. It seems
   2859 	 * there is a problem about read clear mechanism for EICR register.
   2860 	 */
   2861 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   2862 	/* Be sure the queue bits are not cleared */
   2863 	eicr &= ~IXGBE_EICR_RTX_QUEUE;
   2864 	/* Clear interrupt with write */
   2865 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
   2866 
   2867 	/* Link status change */
   2868 	if (eicr & IXGBE_EICR_LSC) {
   2869 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
   2870 		softint_schedule(adapter->link_si);
   2871 	}
   2872 
   2873 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   2874 		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
   2875 		    (eicr & IXGBE_EICR_FLOW_DIR)) {
   2876 			/* This is probably overkill :) */
   2877 			if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
   2878 				return 1;
   2879 			/* Disable the interrupt */
   2880 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
   2881 			softint_schedule(adapter->fdir_si);
   2882 		}
   2883 
   2884 		if (eicr & IXGBE_EICR_ECC) {
   2885 			device_printf(adapter->dev,
   2886 			    "CRITICAL: ECC ERROR!! Please Reboot!!\n");
   2887 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   2888 		}
   2889 
   2890 		/* Check for over temp condition */
   2891 		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
   2892 			switch (adapter->hw.mac.type) {
   2893 			case ixgbe_mac_X550EM_a:
   2894 				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
   2895 					break;
   2896 				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
   2897 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2898 				IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2899 				    IXGBE_EICR_GPI_SDP0_X550EM_a);
   2900 				retval = hw->phy.ops.check_overtemp(hw);
   2901 				if (retval != IXGBE_ERR_OVERTEMP)
   2902 					break;
   2903 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2904 				device_printf(adapter->dev, "System shutdown required!\n");
   2905 				break;
   2906 			default:
   2907 				if (!(eicr & IXGBE_EICR_TS))
   2908 					break;
   2909 				retval = hw->phy.ops.check_overtemp(hw);
   2910 				if (retval != IXGBE_ERR_OVERTEMP)
   2911 					break;
   2912 				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
   2913 				device_printf(adapter->dev, "System shutdown required!\n");
   2914 				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
   2915 				break;
   2916 			}
   2917 		}
   2918 
   2919 		/* Check for VF message */
   2920 		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
   2921 		    (eicr & IXGBE_EICR_MAILBOX))
   2922 			softint_schedule(adapter->mbx_si);
   2923 	}
   2924 
   2925 	if (ixgbe_is_sfp(hw)) {
   2926 		/* Pluggable optics-related interrupt */
   2927 		if (hw->mac.type >= ixgbe_mac_X540)
   2928 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   2929 		else
   2930 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   2931 
   2932 		if (eicr & eicr_mask) {
   2933 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   2934 			softint_schedule(adapter->mod_si);
   2935 		}
   2936 
   2937 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   2938 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   2939 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   2940 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2941 			softint_schedule(adapter->msf_si);
   2942 		}
   2943 	}
   2944 
   2945 	/* Check for fan failure */
   2946 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   2947 		ixgbe_check_fan_failure(adapter, eicr, TRUE);
   2948 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   2949 	}
   2950 
   2951 	/* External PHY interrupt */
   2952 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   2953 	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
   2954 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
   2955 		softint_schedule(adapter->phy_si);
   2956  	}
   2957 
   2958 	/* Re-enable other interrupts */
   2959 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   2960 	return 1;
   2961 } /* ixgbe_msix_link */
   2962 
   2963 static void
   2964 ixgbe_eitr_write(struct ix_queue *que, uint32_t itr)
   2965 {
   2966 	struct adapter *adapter = que->adapter;
   2967 
   2968         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   2969                 itr |= itr << 16;
   2970         else
   2971                 itr |= IXGBE_EITR_CNT_WDIS;
   2972 
   2973 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
   2974 	    itr);
   2975 }
   2976 
   2977 
   2978 /************************************************************************
   2979  * ixgbe_sysctl_interrupt_rate_handler
   2980  ************************************************************************/
   2981 static int
   2982 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   2983 {
   2984 	struct sysctlnode node = *rnode;
   2985 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   2986 	struct adapter  *adapter = que->adapter;
   2987 	uint32_t reg, usec, rate;
   2988 	int error;
   2989 
   2990 	if (que == NULL)
   2991 		return 0;
   2992 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
   2993 	usec = ((reg & 0x0FF8) >> 3);
   2994 	if (usec > 0)
   2995 		rate = 500000 / usec;
   2996 	else
   2997 		rate = 0;
   2998 	node.sysctl_data = &rate;
   2999 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   3000 	if (error || newp == NULL)
   3001 		return error;
   3002 	reg &= ~0xfff; /* default, no limitation */
   3003 	if (rate > 0 && rate < 500000) {
   3004 		if (rate < 1000)
   3005 			rate = 1000;
   3006 		reg |= ((4000000/rate) & 0xff8);
   3007 		/*
   3008 		 * When RSC is used, ITR interval must be larger than
   3009 		 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
   3010 		 * The minimum value is always greater than 2us on 100M
   3011 		 * (and 10M?(not documented)), but it's not on 1G and higher.
   3012 		 */
   3013 		if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   3014 		    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   3015 			if ((adapter->num_queues > 1)
   3016 			    && (reg < IXGBE_MIN_RSC_EITR_10G1G))
   3017 				return EINVAL;
   3018 		}
   3019 		ixgbe_max_interrupt_rate = rate;
   3020 	} else
   3021 		ixgbe_max_interrupt_rate = 0;
   3022 	ixgbe_eitr_write(que, reg);
   3023 
   3024 	return (0);
   3025 } /* ixgbe_sysctl_interrupt_rate_handler */
   3026 
   3027 const struct sysctlnode *
   3028 ixgbe_sysctl_instance(struct adapter *adapter)
   3029 {
   3030 	const char *dvname;
   3031 	struct sysctllog **log;
   3032 	int rc;
   3033 	const struct sysctlnode *rnode;
   3034 
   3035 	if (adapter->sysctltop != NULL)
   3036 		return adapter->sysctltop;
   3037 
   3038 	log = &adapter->sysctllog;
   3039 	dvname = device_xname(adapter->dev);
   3040 
   3041 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   3042 	    0, CTLTYPE_NODE, dvname,
   3043 	    SYSCTL_DESCR("ixgbe information and settings"),
   3044 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   3045 		goto err;
   3046 
   3047 	return rnode;
   3048 err:
   3049 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   3050 	return NULL;
   3051 }
   3052 
   3053 /************************************************************************
   3054  * ixgbe_add_device_sysctls
   3055  ************************************************************************/
   3056 static void
   3057 ixgbe_add_device_sysctls(struct adapter *adapter)
   3058 {
   3059 	device_t               dev = adapter->dev;
   3060 	struct ixgbe_hw        *hw = &adapter->hw;
   3061 	struct sysctllog **log;
   3062 	const struct sysctlnode *rnode, *cnode;
   3063 
   3064 	log = &adapter->sysctllog;
   3065 
   3066 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   3067 		aprint_error_dev(dev, "could not create sysctl root\n");
   3068 		return;
   3069 	}
   3070 
   3071 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3072 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3073 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
   3074 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   3075 		aprint_error_dev(dev, "could not create sysctl\n");
   3076 
   3077 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3078 	    CTLFLAG_READONLY, CTLTYPE_INT,
   3079 	    "num_queues", SYSCTL_DESCR("Number of queues"),
   3080 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
   3081 		aprint_error_dev(dev, "could not create sysctl\n");
   3082 
   3083 	/* Sysctls for all devices */
   3084 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3085 	    CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
   3086 	    ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
   3087 	    CTL_EOL) != 0)
   3088 		aprint_error_dev(dev, "could not create sysctl\n");
   3089 
   3090 	adapter->enable_aim = ixgbe_enable_aim;
   3091 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3092 	    CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   3093 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   3094 		aprint_error_dev(dev, "could not create sysctl\n");
   3095 
   3096 	if (sysctl_createv(log, 0, &rnode, &cnode,
   3097 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   3098 	    "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
   3099 	    ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
   3100 	    CTL_EOL) != 0)
   3101 		aprint_error_dev(dev, "could not create sysctl\n");
   3102 
   3103 #ifdef IXGBE_DEBUG
   3104 	/* testing sysctls (for all devices) */
   3105 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3106 	    CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
   3107 	    ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
   3108 	    CTL_EOL) != 0)
   3109 		aprint_error_dev(dev, "could not create sysctl\n");
   3110 
   3111 	if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
   3112 	    CTLTYPE_STRING, "print_rss_config",
   3113 	    SYSCTL_DESCR("Prints RSS Configuration"),
   3114 	    ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
   3115 	    CTL_EOL) != 0)
   3116 		aprint_error_dev(dev, "could not create sysctl\n");
   3117 #endif
   3118 	/* for X550 series devices */
   3119 	if (hw->mac.type >= ixgbe_mac_X550)
   3120 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3121 		    CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
   3122 		    ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
   3123 		    CTL_EOL) != 0)
   3124 			aprint_error_dev(dev, "could not create sysctl\n");
   3125 
   3126 	/* for WoL-capable devices */
   3127 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3128 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3129 		    CTLTYPE_BOOL, "wol_enable",
   3130 		    SYSCTL_DESCR("Enable/Disable Wake on LAN"),
   3131 		    ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
   3132 		    CTL_EOL) != 0)
   3133 			aprint_error_dev(dev, "could not create sysctl\n");
   3134 
   3135 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3136 		    CTLTYPE_INT, "wufc",
   3137 		    SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
   3138 		    ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
   3139 		    CTL_EOL) != 0)
   3140 			aprint_error_dev(dev, "could not create sysctl\n");
   3141 	}
   3142 
   3143 	/* for X552/X557-AT devices */
   3144 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   3145 		const struct sysctlnode *phy_node;
   3146 
   3147 		if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
   3148 		    "phy", SYSCTL_DESCR("External PHY sysctls"),
   3149 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
   3150 			aprint_error_dev(dev, "could not create sysctl\n");
   3151 			return;
   3152 		}
   3153 
   3154 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3155 		    CTLTYPE_INT, "temp",
   3156 		    SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
   3157 		    ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
   3158 		    CTL_EOL) != 0)
   3159 			aprint_error_dev(dev, "could not create sysctl\n");
   3160 
   3161 		if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
   3162 		    CTLTYPE_INT, "overtemp_occurred",
   3163 		    SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
   3164 		    ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
   3165 		    CTL_CREATE, CTL_EOL) != 0)
   3166 			aprint_error_dev(dev, "could not create sysctl\n");
   3167 	}
   3168 
   3169 	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
   3170 		if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   3171 		    CTLTYPE_INT, "eee_state",
   3172 		    SYSCTL_DESCR("EEE Power Save State"),
   3173 		    ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
   3174 		    CTL_EOL) != 0)
   3175 			aprint_error_dev(dev, "could not create sysctl\n");
   3176 	}
   3177 } /* ixgbe_add_device_sysctls */
   3178 
   3179 /************************************************************************
   3180  * ixgbe_allocate_pci_resources
   3181  ************************************************************************/
   3182 static int
   3183 ixgbe_allocate_pci_resources(struct adapter *adapter,
   3184     const struct pci_attach_args *pa)
   3185 {
   3186 	pcireg_t	memtype;
   3187 	device_t dev = adapter->dev;
   3188 	bus_addr_t addr;
   3189 	int flags;
   3190 
   3191 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   3192 	switch (memtype) {
   3193 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   3194 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   3195 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   3196 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   3197 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   3198 			goto map_err;
   3199 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   3200 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   3201 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   3202 		}
   3203 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   3204 		     adapter->osdep.mem_size, flags,
   3205 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   3206 map_err:
   3207 			adapter->osdep.mem_size = 0;
   3208 			aprint_error_dev(dev, "unable to map BAR0\n");
   3209 			return ENXIO;
   3210 		}
   3211 		break;
   3212 	default:
   3213 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   3214 		return ENXIO;
   3215 	}
   3216 
   3217 	return (0);
   3218 } /* ixgbe_allocate_pci_resources */
   3219 
   3220 static void
   3221 ixgbe_free_softint(struct adapter *adapter)
   3222 {
   3223 	struct ix_queue *que = adapter->queues;
   3224 	struct tx_ring *txr = adapter->tx_rings;
   3225 	int i;
   3226 
   3227 	for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
   3228 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   3229 			if (txr->txr_si != NULL)
   3230 				softint_disestablish(txr->txr_si);
   3231 		}
   3232 		if (que->que_si != NULL)
   3233 			softint_disestablish(que->que_si);
   3234 	}
   3235 
   3236 	/* Drain the Link queue */
   3237 	if (adapter->link_si != NULL) {
   3238 		softint_disestablish(adapter->link_si);
   3239 		adapter->link_si = NULL;
   3240 	}
   3241 	if (adapter->mod_si != NULL) {
   3242 		softint_disestablish(adapter->mod_si);
   3243 		adapter->mod_si = NULL;
   3244 	}
   3245 	if (adapter->msf_si != NULL) {
   3246 		softint_disestablish(adapter->msf_si);
   3247 		adapter->msf_si = NULL;
   3248 	}
   3249 	if (adapter->phy_si != NULL) {
   3250 		softint_disestablish(adapter->phy_si);
   3251 		adapter->phy_si = NULL;
   3252 	}
   3253 	if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
   3254 		if (adapter->fdir_si != NULL) {
   3255 			softint_disestablish(adapter->fdir_si);
   3256 			adapter->fdir_si = NULL;
   3257 		}
   3258 	}
   3259 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   3260 		if (adapter->mbx_si != NULL) {
   3261 			softint_disestablish(adapter->mbx_si);
   3262 			adapter->mbx_si = NULL;
   3263 		}
   3264 	}
   3265 } /* ixgbe_free_softint */
   3266 
   3267 /************************************************************************
   3268  * ixgbe_detach - Device removal routine
   3269  *
   3270  *   Called when the driver is being removed.
   3271  *   Stops the adapter and deallocates all the resources
   3272  *   that were allocated for driver operation.
   3273  *
   3274  *   return 0 on success, positive on failure
   3275  ************************************************************************/
   3276 static int
   3277 ixgbe_detach(device_t dev, int flags)
   3278 {
   3279 	struct adapter *adapter = device_private(dev);
   3280 	struct rx_ring *rxr = adapter->rx_rings;
   3281 	struct tx_ring *txr = adapter->tx_rings;
   3282 	struct ixgbe_hw *hw = &adapter->hw;
   3283 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   3284 	u32	ctrl_ext;
   3285 
   3286 	INIT_DEBUGOUT("ixgbe_detach: begin");
   3287 	if (adapter->osdep.attached == false)
   3288 		return 0;
   3289 
   3290 	if (ixgbe_pci_iov_detach(dev) != 0) {
   3291 		device_printf(dev, "SR-IOV in use; detach first.\n");
   3292 		return (EBUSY);
   3293 	}
   3294 
   3295 	/* Stop the interface. Callouts are stopped in it. */
   3296 	ixgbe_ifstop(adapter->ifp, 1);
   3297 #if NVLAN > 0
   3298 	/* Make sure VLANs are not using driver */
   3299 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   3300 		;	/* nothing to do: no VLANs */
   3301 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
   3302 		vlan_ifdetach(adapter->ifp);
   3303 	else {
   3304 		aprint_error_dev(dev, "VLANs in use, detach first\n");
   3305 		return (EBUSY);
   3306 	}
   3307 #endif
   3308 
   3309 	pmf_device_deregister(dev);
   3310 
   3311 	ether_ifdetach(adapter->ifp);
   3312 	/* Stop the adapter */
   3313 	IXGBE_CORE_LOCK(adapter);
   3314 	ixgbe_setup_low_power_mode(adapter);
   3315 	IXGBE_CORE_UNLOCK(adapter);
   3316 
   3317 	ixgbe_free_softint(adapter);
   3318 
   3319 	/* let hardware know driver is unloading */
   3320 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
   3321 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
   3322 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
   3323 
   3324 	callout_halt(&adapter->timer, NULL);
   3325 
   3326 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
   3327 		netmap_detach(adapter->ifp);
   3328 
   3329 	ixgbe_free_pci_resources(adapter);
   3330 #if 0	/* XXX the NetBSD port is probably missing something here */
   3331 	bus_generic_detach(dev);
   3332 #endif
   3333 	if_detach(adapter->ifp);
   3334 	if_percpuq_destroy(adapter->ipq);
   3335 
   3336 	sysctl_teardown(&adapter->sysctllog);
   3337 	evcnt_detach(&adapter->handleq);
   3338 	evcnt_detach(&adapter->req);
   3339 	evcnt_detach(&adapter->efbig_tx_dma_setup);
   3340 	evcnt_detach(&adapter->mbuf_defrag_failed);
   3341 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
   3342 	evcnt_detach(&adapter->einval_tx_dma_setup);
   3343 	evcnt_detach(&adapter->other_tx_dma_setup);
   3344 	evcnt_detach(&adapter->eagain_tx_dma_setup);
   3345 	evcnt_detach(&adapter->enomem_tx_dma_setup);
   3346 	evcnt_detach(&adapter->watchdog_events);
   3347 	evcnt_detach(&adapter->tso_err);
   3348 	evcnt_detach(&adapter->link_irq);
   3349 
   3350 	txr = adapter->tx_rings;
   3351 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   3352 		evcnt_detach(&adapter->queues[i].irqs);
   3353 		evcnt_detach(&txr->no_desc_avail);
   3354 		evcnt_detach(&txr->total_packets);
   3355 		evcnt_detach(&txr->tso_tx);
   3356 #ifndef IXGBE_LEGACY_TX
   3357 		evcnt_detach(&txr->pcq_drops);
   3358 #endif
   3359 
   3360 		if (i < __arraycount(stats->mpc)) {
   3361 			evcnt_detach(&stats->mpc[i]);
   3362 			if (hw->mac.type == ixgbe_mac_82598EB)
   3363 				evcnt_detach(&stats->rnbc[i]);
   3364 		}
   3365 		if (i < __arraycount(stats->pxontxc)) {
   3366 			evcnt_detach(&stats->pxontxc[i]);
   3367 			evcnt_detach(&stats->pxonrxc[i]);
   3368 			evcnt_detach(&stats->pxofftxc[i]);
   3369 			evcnt_detach(&stats->pxoffrxc[i]);
   3370 			evcnt_detach(&stats->pxon2offc[i]);
   3371 		}
   3372 		if (i < __arraycount(stats->qprc)) {
   3373 			evcnt_detach(&stats->qprc[i]);
   3374 			evcnt_detach(&stats->qptc[i]);
   3375 			evcnt_detach(&stats->qbrc[i]);
   3376 			evcnt_detach(&stats->qbtc[i]);
   3377 			evcnt_detach(&stats->qprdc[i]);
   3378 		}
   3379 
   3380 		evcnt_detach(&rxr->rx_packets);
   3381 		evcnt_detach(&rxr->rx_bytes);
   3382 		evcnt_detach(&rxr->rx_copies);
   3383 		evcnt_detach(&rxr->no_jmbuf);
   3384 		evcnt_detach(&rxr->rx_discarded);
   3385 	}
   3386 	evcnt_detach(&stats->ipcs);
   3387 	evcnt_detach(&stats->l4cs);
   3388 	evcnt_detach(&stats->ipcs_bad);
   3389 	evcnt_detach(&stats->l4cs_bad);
   3390 	evcnt_detach(&stats->intzero);
   3391 	evcnt_detach(&stats->legint);
   3392 	evcnt_detach(&stats->crcerrs);
   3393 	evcnt_detach(&stats->illerrc);
   3394 	evcnt_detach(&stats->errbc);
   3395 	evcnt_detach(&stats->mspdc);
   3396 	if (hw->mac.type >= ixgbe_mac_X550)
   3397 		evcnt_detach(&stats->mbsdc);
   3398 	evcnt_detach(&stats->mpctotal);
   3399 	evcnt_detach(&stats->mlfc);
   3400 	evcnt_detach(&stats->mrfc);
   3401 	evcnt_detach(&stats->rlec);
   3402 	evcnt_detach(&stats->lxontxc);
   3403 	evcnt_detach(&stats->lxonrxc);
   3404 	evcnt_detach(&stats->lxofftxc);
   3405 	evcnt_detach(&stats->lxoffrxc);
   3406 
   3407 	/* Packet Reception Stats */
   3408 	evcnt_detach(&stats->tor);
   3409 	evcnt_detach(&stats->gorc);
   3410 	evcnt_detach(&stats->tpr);
   3411 	evcnt_detach(&stats->gprc);
   3412 	evcnt_detach(&stats->mprc);
   3413 	evcnt_detach(&stats->bprc);
   3414 	evcnt_detach(&stats->prc64);
   3415 	evcnt_detach(&stats->prc127);
   3416 	evcnt_detach(&stats->prc255);
   3417 	evcnt_detach(&stats->prc511);
   3418 	evcnt_detach(&stats->prc1023);
   3419 	evcnt_detach(&stats->prc1522);
   3420 	evcnt_detach(&stats->ruc);
   3421 	evcnt_detach(&stats->rfc);
   3422 	evcnt_detach(&stats->roc);
   3423 	evcnt_detach(&stats->rjc);
   3424 	evcnt_detach(&stats->mngprc);
   3425 	evcnt_detach(&stats->mngpdc);
   3426 	evcnt_detach(&stats->xec);
   3427 
   3428 	/* Packet Transmission Stats */
   3429 	evcnt_detach(&stats->gotc);
   3430 	evcnt_detach(&stats->tpt);
   3431 	evcnt_detach(&stats->gptc);
   3432 	evcnt_detach(&stats->bptc);
   3433 	evcnt_detach(&stats->mptc);
   3434 	evcnt_detach(&stats->mngptc);
   3435 	evcnt_detach(&stats->ptc64);
   3436 	evcnt_detach(&stats->ptc127);
   3437 	evcnt_detach(&stats->ptc255);
   3438 	evcnt_detach(&stats->ptc511);
   3439 	evcnt_detach(&stats->ptc1023);
   3440 	evcnt_detach(&stats->ptc1522);
   3441 
   3442 	ixgbe_free_transmit_structures(adapter);
   3443 	ixgbe_free_receive_structures(adapter);
   3444 	for (int i = 0; i < adapter->num_queues; i++) {
   3445 		struct ix_queue * que = &adapter->queues[i];
   3446 		mutex_destroy(&que->im_mtx);
   3447 	}
   3448 	free(adapter->queues, M_DEVBUF);
   3449 	free(adapter->mta, M_DEVBUF);
   3450 
   3451 	IXGBE_CORE_LOCK_DESTROY(adapter);
   3452 
   3453 	return (0);
   3454 } /* ixgbe_detach */
   3455 
   3456 /************************************************************************
   3457  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
   3458  *
   3459  *   Prepare the adapter/port for LPLU and/or WoL
   3460  ************************************************************************/
   3461 static int
   3462 ixgbe_setup_low_power_mode(struct adapter *adapter)
   3463 {
   3464 	struct ixgbe_hw *hw = &adapter->hw;
   3465 	device_t        dev = adapter->dev;
   3466 	s32             error = 0;
   3467 
   3468 	KASSERT(mutex_owned(&adapter->core_mtx));
   3469 
   3470 	/* Limit power management flow to X550EM baseT */
   3471 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
   3472 	    hw->phy.ops.enter_lplu) {
   3473 		/* X550EM baseT adapters need a special LPLU flow */
   3474 		hw->phy.reset_disable = true;
   3475 		ixgbe_stop(adapter);
   3476 		error = hw->phy.ops.enter_lplu(hw);
   3477 		if (error)
   3478 			device_printf(dev,
   3479 			    "Error entering LPLU: %d\n", error);
   3480 		hw->phy.reset_disable = false;
   3481 	} else {
   3482 		/* Just stop for other adapters */
   3483 		ixgbe_stop(adapter);
   3484 	}
   3485 
   3486 	if (!hw->wol_enabled) {
   3487 		ixgbe_set_phy_power(hw, FALSE);
   3488 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3489 		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
   3490 	} else {
   3491 		/* Turn off support for APM wakeup. (Using ACPI instead) */
   3492 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
   3493 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
   3494 
   3495 		/*
   3496 		 * Clear Wake Up Status register to prevent any previous wakeup
   3497 		 * events from waking us up immediately after we suspend.
   3498 		 */
   3499 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3500 
   3501 		/*
   3502 		 * Program the Wakeup Filter Control register with user filter
   3503 		 * settings
   3504 		 */
   3505 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
   3506 
   3507 		/* Enable wakeups and power management in Wakeup Control */
   3508 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
   3509 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
   3510 
   3511 	}
   3512 
   3513 	return error;
   3514 } /* ixgbe_setup_low_power_mode */
   3515 
   3516 /************************************************************************
   3517  * ixgbe_shutdown - Shutdown entry point
   3518  ************************************************************************/
   3519 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   3520 static int
   3521 ixgbe_shutdown(device_t dev)
   3522 {
   3523 	struct adapter *adapter = device_private(dev);
   3524 	int error = 0;
   3525 
   3526 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
   3527 
   3528 	IXGBE_CORE_LOCK(adapter);
   3529 	error = ixgbe_setup_low_power_mode(adapter);
   3530 	IXGBE_CORE_UNLOCK(adapter);
   3531 
   3532 	return (error);
   3533 } /* ixgbe_shutdown */
   3534 #endif
   3535 
   3536 /************************************************************************
   3537  * ixgbe_suspend
   3538  *
   3539  *   From D0 to D3
   3540  ************************************************************************/
   3541 static bool
   3542 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
   3543 {
   3544 	struct adapter *adapter = device_private(dev);
   3545 	int            error = 0;
   3546 
   3547 	INIT_DEBUGOUT("ixgbe_suspend: begin");
   3548 
   3549 	IXGBE_CORE_LOCK(adapter);
   3550 
   3551 	error = ixgbe_setup_low_power_mode(adapter);
   3552 
   3553 	IXGBE_CORE_UNLOCK(adapter);
   3554 
   3555 	return (error);
   3556 } /* ixgbe_suspend */
   3557 
   3558 /************************************************************************
   3559  * ixgbe_resume
   3560  *
   3561  *   From D3 to D0
   3562  ************************************************************************/
   3563 static bool
   3564 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
   3565 {
   3566 	struct adapter  *adapter = device_private(dev);
   3567 	struct ifnet    *ifp = adapter->ifp;
   3568 	struct ixgbe_hw *hw = &adapter->hw;
   3569 	u32             wus;
   3570 
   3571 	INIT_DEBUGOUT("ixgbe_resume: begin");
   3572 
   3573 	IXGBE_CORE_LOCK(adapter);
   3574 
   3575 	/* Read & clear WUS register */
   3576 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
   3577 	if (wus)
   3578 		device_printf(dev, "Woken up by (WUS): %#010x\n",
   3579 		    IXGBE_READ_REG(hw, IXGBE_WUS));
   3580 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3581 	/* And clear WUFC until next low-power transition */
   3582 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
   3583 
   3584 	/*
   3585 	 * Required after D3->D0 transition;
   3586 	 * will re-advertise all previous advertised speeds
   3587 	 */
   3588 	if (ifp->if_flags & IFF_UP)
   3589 		ixgbe_init_locked(adapter);
   3590 
   3591 	IXGBE_CORE_UNLOCK(adapter);
   3592 
   3593 	return true;
   3594 } /* ixgbe_resume */
   3595 
   3596 /*
   3597  * Set the various hardware offload abilities.
   3598  *
   3599  * This takes the ifnet's if_capenable flags (e.g. set by the user using
   3600  * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
   3601  * mbuf offload flags the driver will understand.
   3602  */
   3603 static void
   3604 ixgbe_set_if_hwassist(struct adapter *adapter)
   3605 {
   3606 	/* XXX */
   3607 }
   3608 
   3609 /************************************************************************
   3610  * ixgbe_init_locked - Init entry point
   3611  *
   3612  *   Used in two ways: It is used by the stack as an init
   3613  *   entry point in network interface structure. It is also
   3614  *   used by the driver as a hw/sw initialization routine to
   3615  *   get to a consistent state.
   3616  *
   3617  *   return 0 on success, positive on failure
   3618  ************************************************************************/
   3619 static void
   3620 ixgbe_init_locked(struct adapter *adapter)
   3621 {
   3622 	struct ifnet   *ifp = adapter->ifp;
   3623 	device_t 	dev = adapter->dev;
   3624 	struct ixgbe_hw *hw = &adapter->hw;
   3625 	struct tx_ring  *txr;
   3626 	struct rx_ring  *rxr;
   3627 	u32		txdctl, mhadd;
   3628 	u32		rxdctl, rxctrl;
   3629 	u32             ctrl_ext;
   3630 	int             err = 0;
   3631 
   3632 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   3633 
   3634 	KASSERT(mutex_owned(&adapter->core_mtx));
   3635 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
   3636 
   3637 	hw->adapter_stopped = FALSE;
   3638 	ixgbe_stop_adapter(hw);
   3639         callout_stop(&adapter->timer);
   3640 
   3641 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   3642 	adapter->max_frame_size =
   3643 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   3644 
   3645 	/* Queue indices may change with IOV mode */
   3646 	ixgbe_align_all_queue_indices(adapter);
   3647 
   3648 	/* reprogram the RAR[0] in case user changed it. */
   3649 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
   3650 
   3651 	/* Get the latest mac address, User can use a LAA */
   3652 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
   3653 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   3654 	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
   3655 	hw->addr_ctrl.rar_used_count = 1;
   3656 
   3657 	/* Set hardware offload abilities from ifnet flags */
   3658 	ixgbe_set_if_hwassist(adapter);
   3659 
   3660 	/* Prepare transmit descriptors and buffers */
   3661 	if (ixgbe_setup_transmit_structures(adapter)) {
   3662 		device_printf(dev, "Could not setup transmit structures\n");
   3663 		ixgbe_stop(adapter);
   3664 		return;
   3665 	}
   3666 
   3667 	ixgbe_init_hw(hw);
   3668 	ixgbe_initialize_iov(adapter);
   3669 	ixgbe_initialize_transmit_units(adapter);
   3670 
   3671 	/* Setup Multicast table */
   3672 	ixgbe_set_multi(adapter);
   3673 
   3674 	/* Determine the correct mbuf pool, based on frame size */
   3675 	if (adapter->max_frame_size <= MCLBYTES)
   3676 		adapter->rx_mbuf_sz = MCLBYTES;
   3677 	else
   3678 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   3679 
   3680 	/* Prepare receive descriptors and buffers */
   3681 	if (ixgbe_setup_receive_structures(adapter)) {
   3682 		device_printf(dev, "Could not setup receive structures\n");
   3683 		ixgbe_stop(adapter);
   3684 		return;
   3685 	}
   3686 
   3687 	/* Configure RX settings */
   3688 	ixgbe_initialize_receive_units(adapter);
   3689 
   3690 	/* Enable SDP & MSI-X interrupts based on adapter */
   3691 	ixgbe_config_gpie(adapter);
   3692 
   3693 	/* Set MTU size */
   3694 	if (ifp->if_mtu > ETHERMTU) {
   3695 		/* aka IXGBE_MAXFRS on 82599 and newer */
   3696 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   3697 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   3698 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   3699 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   3700 	}
   3701 
   3702 	/* Now enable all the queues */
   3703 	for (int i = 0; i < adapter->num_queues; i++) {
   3704 		txr = &adapter->tx_rings[i];
   3705 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
   3706 		txdctl |= IXGBE_TXDCTL_ENABLE;
   3707 		/* Set WTHRESH to 8, burst writeback */
   3708 		txdctl |= (8 << 16);
   3709 		/*
   3710 		 * When the internal queue falls below PTHRESH (32),
   3711 		 * start prefetching as long as there are at least
   3712 		 * HTHRESH (1) buffers ready. The values are taken
   3713 		 * from the Intel linux driver 3.8.21.
   3714 		 * Prefetching enables tx line rate even with 1 queue.
   3715 		 */
   3716 		txdctl |= (32 << 0) | (1 << 8);
   3717 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
   3718 	}
   3719 
   3720 	for (int i = 0, j = 0; i < adapter->num_queues; i++) {
   3721 		rxr = &adapter->rx_rings[i];
   3722 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
   3723 		if (hw->mac.type == ixgbe_mac_82598EB) {
   3724 			/*
   3725 			 * PTHRESH = 21
   3726 			 * HTHRESH = 4
   3727 			 * WTHRESH = 8
   3728 			 */
   3729 			rxdctl &= ~0x3FFFFF;
   3730 			rxdctl |= 0x080420;
   3731 		}
   3732 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   3733 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
   3734 		for (; j < 10; j++) {
   3735 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
   3736 			    IXGBE_RXDCTL_ENABLE)
   3737 				break;
   3738 			else
   3739 				msec_delay(1);
   3740 		}
   3741 		wmb();
   3742 
   3743 		/*
   3744 		 * In netmap mode, we must preserve the buffers made
   3745 		 * available to userspace before the if_init()
   3746 		 * (this is true by default on the TX side, because
   3747 		 * init makes all buffers available to userspace).
   3748 		 *
   3749 		 * netmap_reset() and the device specific routines
   3750 		 * (e.g. ixgbe_setup_receive_rings()) map these
   3751 		 * buffers at the end of the NIC ring, so here we
   3752 		 * must set the RDT (tail) register to make sure
   3753 		 * they are not overwritten.
   3754 		 *
   3755 		 * In this driver the NIC ring starts at RDH = 0,
   3756 		 * RDT points to the last slot available for reception (?),
   3757 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   3758 		 */
   3759 #ifdef DEV_NETMAP
   3760 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   3761 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   3762 			struct netmap_adapter *na = NA(adapter->ifp);
   3763 			struct netmap_kring *kring = &na->rx_rings[i];
   3764 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   3765 
   3766 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
   3767 		} else
   3768 #endif /* DEV_NETMAP */
   3769 			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
   3770 			    adapter->num_rx_desc - 1);
   3771 	}
   3772 
   3773 	/* Enable Receive engine */
   3774 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   3775 	if (hw->mac.type == ixgbe_mac_82598EB)
   3776 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   3777 	rxctrl |= IXGBE_RXCTRL_RXEN;
   3778 	ixgbe_enable_rx_dma(hw, rxctrl);
   3779 
   3780 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   3781 
   3782 	/* Set up MSI-X routing */
   3783 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3784 		ixgbe_configure_ivars(adapter);
   3785 		/* Set up auto-mask */
   3786 		if (hw->mac.type == ixgbe_mac_82598EB)
   3787 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3788 		else {
   3789 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   3790 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   3791 		}
   3792 	} else {  /* Simple settings for Legacy/MSI */
   3793 		ixgbe_set_ivar(adapter, 0, 0, 0);
   3794 		ixgbe_set_ivar(adapter, 0, 0, 1);
   3795 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   3796 	}
   3797 
   3798 	ixgbe_init_fdir(adapter);
   3799 
   3800 	/*
   3801 	 * Check on any SFP devices that
   3802 	 * need to be kick-started
   3803 	 */
   3804 	if (hw->phy.type == ixgbe_phy_none) {
   3805 		err = hw->phy.ops.identify(hw);
   3806 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   3807                 	device_printf(dev,
   3808 			    "Unsupported SFP+ module type was detected.\n");
   3809 			return;
   3810         	}
   3811 	}
   3812 
   3813 	/* Set moderation on the Link interrupt */
   3814 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
   3815 
   3816 	/* Config/Enable Link */
   3817 	ixgbe_config_link(adapter);
   3818 
   3819 	/* Hardware Packet Buffer & Flow Control setup */
   3820 	ixgbe_config_delay_values(adapter);
   3821 
   3822 	/* Initialize the FC settings */
   3823 	ixgbe_start_hw(hw);
   3824 
   3825 	/* Set up VLAN support and filter */
   3826 	ixgbe_setup_vlan_hw_support(adapter);
   3827 
   3828 	/* Setup DMA Coalescing */
   3829 	ixgbe_config_dmac(adapter);
   3830 
   3831 	/* And now turn on interrupts */
   3832 	ixgbe_enable_intr(adapter);
   3833 
   3834 	/* Enable the use of the MBX by the VF's */
   3835 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
   3836 		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
   3837 		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
   3838 		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
   3839 	}
   3840 
   3841 	/* Update saved flags. See ixgbe_ifflags_cb() */
   3842 	adapter->if_flags = ifp->if_flags;
   3843 
   3844 	/* Now inform the stack we're ready */
   3845 	ifp->if_flags |= IFF_RUNNING;
   3846 
   3847 	return;
   3848 } /* ixgbe_init_locked */
   3849 
   3850 /************************************************************************
   3851  * ixgbe_init
   3852  ************************************************************************/
   3853 static int
   3854 ixgbe_init(struct ifnet *ifp)
   3855 {
   3856 	struct adapter *adapter = ifp->if_softc;
   3857 
   3858 	IXGBE_CORE_LOCK(adapter);
   3859 	ixgbe_init_locked(adapter);
   3860 	IXGBE_CORE_UNLOCK(adapter);
   3861 
   3862 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   3863 } /* ixgbe_init */
   3864 
   3865 /************************************************************************
   3866  * ixgbe_set_ivar
   3867  *
   3868  *   Setup the correct IVAR register for a particular MSI-X interrupt
   3869  *     (yes this is all very magic and confusing :)
   3870  *    - entry is the register array entry
   3871  *    - vector is the MSI-X vector for this queue
   3872  *    - type is RX/TX/MISC
   3873  ************************************************************************/
   3874 static void
   3875 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   3876 {
   3877 	struct ixgbe_hw *hw = &adapter->hw;
   3878 	u32 ivar, index;
   3879 
   3880 	vector |= IXGBE_IVAR_ALLOC_VAL;
   3881 
   3882 	switch (hw->mac.type) {
   3883 
   3884 	case ixgbe_mac_82598EB:
   3885 		if (type == -1)
   3886 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   3887 		else
   3888 			entry += (type * 64);
   3889 		index = (entry >> 2) & 0x1F;
   3890 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   3891 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
   3892 		ivar |= (vector << (8 * (entry & 0x3)));
   3893 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   3894 		break;
   3895 
   3896 	case ixgbe_mac_82599EB:
   3897 	case ixgbe_mac_X540:
   3898 	case ixgbe_mac_X550:
   3899 	case ixgbe_mac_X550EM_x:
   3900 	case ixgbe_mac_X550EM_a:
   3901 		if (type == -1) { /* MISC IVAR */
   3902 			index = (entry & 1) * 8;
   3903 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   3904 			ivar &= ~(0xFF << index);
   3905 			ivar |= (vector << index);
   3906 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   3907 		} else {	/* RX/TX IVARS */
   3908 			index = (16 * (entry & 1)) + (8 * type);
   3909 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   3910 			ivar &= ~(0xFF << index);
   3911 			ivar |= (vector << index);
   3912 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   3913 		}
   3914 
   3915 	default:
   3916 		break;
   3917 	}
   3918 } /* ixgbe_set_ivar */
   3919 
   3920 /************************************************************************
   3921  * ixgbe_configure_ivars
   3922  ************************************************************************/
   3923 static void
   3924 ixgbe_configure_ivars(struct adapter *adapter)
   3925 {
   3926 	struct ix_queue *que = adapter->queues;
   3927 	u32             newitr;
   3928 
   3929 	if (ixgbe_max_interrupt_rate > 0)
   3930 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   3931 	else {
   3932 		/*
   3933 		 * Disable DMA coalescing if interrupt moderation is
   3934 		 * disabled.
   3935 		 */
   3936 		adapter->dmac = 0;
   3937 		newitr = 0;
   3938 	}
   3939 
   3940         for (int i = 0; i < adapter->num_queues; i++, que++) {
   3941 		struct rx_ring *rxr = &adapter->rx_rings[i];
   3942 		struct tx_ring *txr = &adapter->tx_rings[i];
   3943 		/* First the RX queue entry */
   3944                 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
   3945 		/* ... and the TX */
   3946 		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
   3947 		/* Set an Initial EITR value */
   3948 		ixgbe_eitr_write(que, newitr);
   3949 	}
   3950 
   3951 	/* For the Link interrupt */
   3952         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
   3953 } /* ixgbe_configure_ivars */
   3954 
   3955 /************************************************************************
   3956  * ixgbe_config_gpie
   3957  ************************************************************************/
   3958 static void
   3959 ixgbe_config_gpie(struct adapter *adapter)
   3960 {
   3961 	struct ixgbe_hw *hw = &adapter->hw;
   3962 	u32             gpie;
   3963 
   3964 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
   3965 
   3966 	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
   3967 		/* Enable Enhanced MSI-X mode */
   3968 		gpie |= IXGBE_GPIE_MSIX_MODE
   3969 		     |  IXGBE_GPIE_EIAME
   3970 		     |  IXGBE_GPIE_PBA_SUPPORT
   3971 		     |  IXGBE_GPIE_OCD;
   3972 	}
   3973 
   3974 	/* Fan Failure Interrupt */
   3975 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   3976 		gpie |= IXGBE_SDP1_GPIEN;
   3977 
   3978 	/* Thermal Sensor Interrupt */
   3979 	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
   3980 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3981 
   3982 	/* Link detection */
   3983 	switch (hw->mac.type) {
   3984 	case ixgbe_mac_82599EB:
   3985 		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
   3986 		break;
   3987 	case ixgbe_mac_X550EM_x:
   3988 	case ixgbe_mac_X550EM_a:
   3989 		gpie |= IXGBE_SDP0_GPIEN_X540;
   3990 		break;
   3991 	default:
   3992 		break;
   3993 	}
   3994 
   3995 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   3996 
   3997 	return;
   3998 } /* ixgbe_config_gpie */
   3999 
   4000 /************************************************************************
   4001  * ixgbe_config_delay_values
   4002  *
   4003  *   Requires adapter->max_frame_size to be set.
   4004  ************************************************************************/
   4005 static void
   4006 ixgbe_config_delay_values(struct adapter *adapter)
   4007 {
   4008 	struct ixgbe_hw *hw = &adapter->hw;
   4009 	u32             rxpb, frame, size, tmp;
   4010 
   4011 	frame = adapter->max_frame_size;
   4012 
   4013 	/* Calculate High Water */
   4014 	switch (hw->mac.type) {
   4015 	case ixgbe_mac_X540:
   4016 	case ixgbe_mac_X550:
   4017 	case ixgbe_mac_X550EM_x:
   4018 	case ixgbe_mac_X550EM_a:
   4019 		tmp = IXGBE_DV_X540(frame, frame);
   4020 		break;
   4021 	default:
   4022 		tmp = IXGBE_DV(frame, frame);
   4023 		break;
   4024 	}
   4025 	size = IXGBE_BT2KB(tmp);
   4026 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
   4027 	hw->fc.high_water[0] = rxpb - size;
   4028 
   4029 	/* Now calculate Low Water */
   4030 	switch (hw->mac.type) {
   4031 	case ixgbe_mac_X540:
   4032 	case ixgbe_mac_X550:
   4033 	case ixgbe_mac_X550EM_x:
   4034 	case ixgbe_mac_X550EM_a:
   4035 		tmp = IXGBE_LOW_DV_X540(frame);
   4036 		break;
   4037 	default:
   4038 		tmp = IXGBE_LOW_DV(frame);
   4039 		break;
   4040 	}
   4041 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
   4042 
   4043 	hw->fc.pause_time = IXGBE_FC_PAUSE;
   4044 	hw->fc.send_xon = TRUE;
   4045 } /* ixgbe_config_delay_values */
   4046 
   4047 /************************************************************************
   4048  * ixgbe_set_multi - Multicast Update
   4049  *
   4050  *   Called whenever multicast address list is updated.
   4051  ************************************************************************/
   4052 static void
   4053 ixgbe_set_multi(struct adapter *adapter)
   4054 {
   4055 	struct ixgbe_mc_addr	*mta;
   4056 	struct ifnet		*ifp = adapter->ifp;
   4057 	u8			*update_ptr;
   4058 	int			mcnt = 0;
   4059 	u32			fctrl;
   4060 	struct ethercom		*ec = &adapter->osdep.ec;
   4061 	struct ether_multi	*enm;
   4062 	struct ether_multistep	step;
   4063 
   4064 	KASSERT(mutex_owned(&adapter->core_mtx));
   4065 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   4066 
   4067 	mta = adapter->mta;
   4068 	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
   4069 
   4070 	ifp->if_flags &= ~IFF_ALLMULTI;
   4071 	ETHER_LOCK(ec);
   4072 	ETHER_FIRST_MULTI(step, ec, enm);
   4073 	while (enm != NULL) {
   4074 		if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
   4075 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   4076 			ETHER_ADDR_LEN) != 0)) {
   4077 			ifp->if_flags |= IFF_ALLMULTI;
   4078 			break;
   4079 		}
   4080 		bcopy(enm->enm_addrlo,
   4081 		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
   4082 		mta[mcnt].vmdq = adapter->pool;
   4083 		mcnt++;
   4084 		ETHER_NEXT_MULTI(step, enm);
   4085 	}
   4086 	ETHER_UNLOCK(ec);
   4087 
   4088 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   4089 	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4090 	if (ifp->if_flags & IFF_PROMISC)
   4091 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   4092 	else if (ifp->if_flags & IFF_ALLMULTI) {
   4093 		fctrl |= IXGBE_FCTRL_MPE;
   4094 	}
   4095 
   4096 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   4097 
   4098 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
   4099 		update_ptr = (u8 *)mta;
   4100 		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   4101 		    ixgbe_mc_array_itr, TRUE);
   4102 	}
   4103 
   4104 	return;
   4105 } /* ixgbe_set_multi */
   4106 
   4107 /************************************************************************
   4108  * ixgbe_mc_array_itr
   4109  *
   4110  *   An iterator function needed by the multicast shared code.
   4111  *   It feeds the shared code routine the addresses in the
   4112  *   array of ixgbe_set_multi() one by one.
   4113  ************************************************************************/
   4114 static u8 *
   4115 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   4116 {
   4117 	struct ixgbe_mc_addr *mta;
   4118 
   4119 	mta = (struct ixgbe_mc_addr *)*update_ptr;
   4120 	*vmdq = mta->vmdq;
   4121 
   4122 	*update_ptr = (u8*)(mta + 1);
   4123 
   4124 	return (mta->addr);
   4125 } /* ixgbe_mc_array_itr */
   4126 
   4127 /************************************************************************
   4128  * ixgbe_local_timer - Timer routine
   4129  *
   4130  *   Checks for link status, updates statistics,
   4131  *   and runs the watchdog check.
   4132  ************************************************************************/
   4133 static void
   4134 ixgbe_local_timer(void *arg)
   4135 {
   4136 	struct adapter *adapter = arg;
   4137 
   4138 	IXGBE_CORE_LOCK(adapter);
   4139 	ixgbe_local_timer1(adapter);
   4140 	IXGBE_CORE_UNLOCK(adapter);
   4141 }
   4142 
   4143 static void
   4144 ixgbe_local_timer1(void *arg)
   4145 {
   4146 	struct adapter	*adapter = arg;
   4147 	device_t	dev = adapter->dev;
   4148 	struct ix_queue *que = adapter->queues;
   4149 	u64		queues = 0;
   4150 	int		hung = 0;
   4151 
   4152 	KASSERT(mutex_owned(&adapter->core_mtx));
   4153 
   4154 	/* Check for pluggable optics */
   4155 	if (adapter->sfp_probe)
   4156 		if (!ixgbe_sfp_probe(adapter))
   4157 			goto out; /* Nothing to do */
   4158 
   4159 	ixgbe_update_link_status(adapter);
   4160 	ixgbe_update_stats_counters(adapter);
   4161 
   4162 	/*
   4163 	 * Check the TX queues status
   4164 	 *      - mark hung queues so we don't schedule on them
   4165 	 *      - watchdog only if all queues show hung
   4166 	 */
   4167 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4168 		/* Keep track of queues with work for soft irq */
   4169 		if (que->txr->busy)
   4170 			queues |= ((u64)1 << que->me);
   4171 		/*
   4172 		 * Each time txeof runs without cleaning, but there
   4173 		 * are uncleaned descriptors it increments busy. If
   4174 		 * we get to the MAX we declare it hung.
   4175 		 */
   4176 		if (que->busy == IXGBE_QUEUE_HUNG) {
   4177 			++hung;
   4178 			/* Mark the queue as inactive */
   4179 			adapter->active_queues &= ~((u64)1 << que->me);
   4180 			continue;
   4181 		} else {
   4182 			/* Check if we've come back from hung */
   4183 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   4184 				adapter->active_queues |= ((u64)1 << que->me);
   4185 		}
   4186 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   4187 			device_printf(dev,
   4188 			    "Warning queue %d appears to be hung!\n", i);
   4189 			que->txr->busy = IXGBE_QUEUE_HUNG;
   4190 			++hung;
   4191 		}
   4192 	}
   4193 
   4194 	/* Only truely watchdog if all queues show hung */
   4195 	if (hung == adapter->num_queues)
   4196 		goto watchdog;
   4197 	else if (queues != 0) { /* Force an IRQ on queues with work */
   4198 		ixgbe_rearm_queues(adapter, queues);
   4199 	}
   4200 
   4201 out:
   4202 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   4203 	return;
   4204 
   4205 watchdog:
   4206 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   4207 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   4208 	adapter->watchdog_events.ev_count++;
   4209 	ixgbe_init_locked(adapter);
   4210 } /* ixgbe_local_timer */
   4211 
   4212 /************************************************************************
   4213  * ixgbe_sfp_probe
   4214  *
   4215  *   Determine if a port had optics inserted.
   4216  ************************************************************************/
   4217 static bool
   4218 ixgbe_sfp_probe(struct adapter *adapter)
   4219 {
   4220 	struct ixgbe_hw	*hw = &adapter->hw;
   4221 	device_t	dev = adapter->dev;
   4222 	bool		result = FALSE;
   4223 
   4224 	if ((hw->phy.type == ixgbe_phy_nl) &&
   4225 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   4226 		s32 ret = hw->phy.ops.identify_sfp(hw);
   4227 		if (ret)
   4228 			goto out;
   4229 		ret = hw->phy.ops.reset(hw);
   4230 		adapter->sfp_probe = FALSE;
   4231 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4232 			device_printf(dev,"Unsupported SFP+ module detected!");
   4233 			device_printf(dev,
   4234 			    "Reload driver with supported module.\n");
   4235                         goto out;
   4236 		} else
   4237 			device_printf(dev, "SFP+ module detected!\n");
   4238 		/* We now have supported optics */
   4239 		result = TRUE;
   4240 	}
   4241 out:
   4242 
   4243 	return (result);
   4244 } /* ixgbe_sfp_probe */
   4245 
   4246 /************************************************************************
   4247  * ixgbe_handle_mod - Tasklet for SFP module interrupts
   4248  ************************************************************************/
   4249 static void
   4250 ixgbe_handle_mod(void *context)
   4251 {
   4252 	struct adapter  *adapter = context;
   4253 	struct ixgbe_hw *hw = &adapter->hw;
   4254 	device_t	dev = adapter->dev;
   4255 	u32             err, cage_full = 0;
   4256 
   4257 	if (adapter->hw.need_crosstalk_fix) {
   4258 		switch (hw->mac.type) {
   4259 		case ixgbe_mac_82599EB:
   4260 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4261 			    IXGBE_ESDP_SDP2;
   4262 			break;
   4263 		case ixgbe_mac_X550EM_x:
   4264 		case ixgbe_mac_X550EM_a:
   4265 			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
   4266 			    IXGBE_ESDP_SDP0;
   4267 			break;
   4268 		default:
   4269 			break;
   4270 		}
   4271 
   4272 		if (!cage_full)
   4273 			return;
   4274 	}
   4275 
   4276 	err = hw->phy.ops.identify_sfp(hw);
   4277 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4278 		device_printf(dev,
   4279 		    "Unsupported SFP+ module type was detected.\n");
   4280 		return;
   4281 	}
   4282 
   4283 	err = hw->mac.ops.setup_sfp(hw);
   4284 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   4285 		device_printf(dev,
   4286 		    "Setup failure - unsupported SFP+ module type.\n");
   4287 		return;
   4288 	}
   4289 	softint_schedule(adapter->msf_si);
   4290 } /* ixgbe_handle_mod */
   4291 
   4292 
   4293 /************************************************************************
   4294  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
   4295  ************************************************************************/
   4296 static void
   4297 ixgbe_handle_msf(void *context)
   4298 {
   4299 	struct adapter  *adapter = context;
   4300 	struct ixgbe_hw *hw = &adapter->hw;
   4301 	u32             autoneg;
   4302 	bool            negotiate;
   4303 
   4304 	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
   4305 	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
   4306 
   4307 	autoneg = hw->phy.autoneg_advertised;
   4308 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   4309 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   4310 	else
   4311 		negotiate = 0;
   4312 	if (hw->mac.ops.setup_link)
   4313 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
   4314 
   4315 	/* Adjust media types shown in ifconfig */
   4316 	ifmedia_removeall(&adapter->media);
   4317 	ixgbe_add_media_types(adapter);
   4318 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   4319 } /* ixgbe_handle_msf */
   4320 
   4321 /************************************************************************
   4322  * ixgbe_handle_phy - Tasklet for external PHY interrupts
   4323  ************************************************************************/
   4324 static void
   4325 ixgbe_handle_phy(void *context)
   4326 {
   4327 	struct adapter  *adapter = context;
   4328 	struct ixgbe_hw *hw = &adapter->hw;
   4329 	int error;
   4330 
   4331 	error = hw->phy.ops.handle_lasi(hw);
   4332 	if (error == IXGBE_ERR_OVERTEMP)
   4333 		device_printf(adapter->dev,
   4334 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
   4335 		    " PHY will downshift to lower power state!\n");
   4336 	else if (error)
   4337 		device_printf(adapter->dev,
   4338 		    "Error handling LASI interrupt: %d\n", error);
   4339 } /* ixgbe_handle_phy */
   4340 
   4341 static void
   4342 ixgbe_ifstop(struct ifnet *ifp, int disable)
   4343 {
   4344 	struct adapter *adapter = ifp->if_softc;
   4345 
   4346 	IXGBE_CORE_LOCK(adapter);
   4347 	ixgbe_stop(adapter);
   4348 	IXGBE_CORE_UNLOCK(adapter);
   4349 }
   4350 
   4351 /************************************************************************
   4352  * ixgbe_stop - Stop the hardware
   4353  *
   4354  *   Disables all traffic on the adapter by issuing a
   4355  *   global reset on the MAC and deallocates TX/RX buffers.
   4356  ************************************************************************/
   4357 static void
   4358 ixgbe_stop(void *arg)
   4359 {
   4360 	struct ifnet    *ifp;
   4361 	struct adapter  *adapter = arg;
   4362 	struct ixgbe_hw *hw = &adapter->hw;
   4363 
   4364 	ifp = adapter->ifp;
   4365 
   4366 	KASSERT(mutex_owned(&adapter->core_mtx));
   4367 
   4368 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   4369 	ixgbe_disable_intr(adapter);
   4370 	callout_stop(&adapter->timer);
   4371 
   4372 	/* Let the stack know...*/
   4373 	ifp->if_flags &= ~IFF_RUNNING;
   4374 
   4375 	ixgbe_reset_hw(hw);
   4376 	hw->adapter_stopped = FALSE;
   4377 	ixgbe_stop_adapter(hw);
   4378 	if (hw->mac.type == ixgbe_mac_82599EB)
   4379 		ixgbe_stop_mac_link_on_d3_82599(hw);
   4380 	/* Turn off the laser - noop with no optics */
   4381 	ixgbe_disable_tx_laser(hw);
   4382 
   4383 	/* Update the stack */
   4384 	adapter->link_up = FALSE;
   4385 	ixgbe_update_link_status(adapter);
   4386 
   4387 	/* reprogram the RAR[0] in case user changed it. */
   4388 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   4389 
   4390 	return;
   4391 } /* ixgbe_stop */
   4392 
   4393 /************************************************************************
   4394  * ixgbe_update_link_status - Update OS on link state
   4395  *
   4396  * Note: Only updates the OS on the cached link state.
   4397  *       The real check of the hardware only happens with
   4398  *       a link interrupt.
   4399  ************************************************************************/
   4400 static void
   4401 ixgbe_update_link_status(struct adapter *adapter)
   4402 {
   4403 	struct ifnet	*ifp = adapter->ifp;
   4404 	device_t        dev = adapter->dev;
   4405 	struct ixgbe_hw *hw = &adapter->hw;
   4406 
   4407 	if (adapter->link_up) {
   4408 		if (adapter->link_active == FALSE) {
   4409 			if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
   4410 				/*
   4411 				 *  Discard count for both MAC Local Fault and
   4412 				 * Remote Fault because those registers are
   4413 				 * valid only when the link speed is up and
   4414 				 * 10Gbps.
   4415 				 */
   4416 				IXGBE_READ_REG(hw, IXGBE_MLFC);
   4417 				IXGBE_READ_REG(hw, IXGBE_MRFC);
   4418 			}
   4419 
   4420 			if (bootverbose) {
   4421 				const char *bpsmsg;
   4422 
   4423 				switch (adapter->link_speed) {
   4424 				case IXGBE_LINK_SPEED_10GB_FULL:
   4425 					bpsmsg = "10 Gbps";
   4426 					break;
   4427 				case IXGBE_LINK_SPEED_5GB_FULL:
   4428 					bpsmsg = "5 Gbps";
   4429 					break;
   4430 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   4431 					bpsmsg = "2.5 Gbps";
   4432 					break;
   4433 				case IXGBE_LINK_SPEED_1GB_FULL:
   4434 					bpsmsg = "1 Gbps";
   4435 					break;
   4436 				case IXGBE_LINK_SPEED_100_FULL:
   4437 					bpsmsg = "100 Mbps";
   4438 					break;
   4439 				case IXGBE_LINK_SPEED_10_FULL:
   4440 					bpsmsg = "10 Mbps";
   4441 					break;
   4442 				default:
   4443 					bpsmsg = "unknown speed";
   4444 					break;
   4445 				}
   4446 				device_printf(dev, "Link is up %s %s \n",
   4447 				    bpsmsg, "Full Duplex");
   4448 			}
   4449 			adapter->link_active = TRUE;
   4450 			/* Update any Flow Control changes */
   4451 			ixgbe_fc_enable(&adapter->hw);
   4452 			/* Update DMA coalescing config */
   4453 			ixgbe_config_dmac(adapter);
   4454 			if_link_state_change(ifp, LINK_STATE_UP);
   4455 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4456 				ixgbe_ping_all_vfs(adapter);
   4457 		}
   4458 	} else { /* Link down */
   4459 		if (adapter->link_active == TRUE) {
   4460 			if (bootverbose)
   4461 				device_printf(dev, "Link is Down\n");
   4462 			if_link_state_change(ifp, LINK_STATE_DOWN);
   4463 			adapter->link_active = FALSE;
   4464 			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4465 				ixgbe_ping_all_vfs(adapter);
   4466 		}
   4467 	}
   4468 
   4469 	return;
   4470 } /* ixgbe_update_link_status */
   4471 
   4472 /************************************************************************
   4473  * ixgbe_config_dmac - Configure DMA Coalescing
   4474  ************************************************************************/
   4475 static void
   4476 ixgbe_config_dmac(struct adapter *adapter)
   4477 {
   4478 	struct ixgbe_hw *hw = &adapter->hw;
   4479 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
   4480 
   4481 	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
   4482 		return;
   4483 
   4484 	if (dcfg->watchdog_timer ^ adapter->dmac ||
   4485 	    dcfg->link_speed ^ adapter->link_speed) {
   4486 		dcfg->watchdog_timer = adapter->dmac;
   4487 		dcfg->fcoe_en = false;
   4488 		dcfg->link_speed = adapter->link_speed;
   4489 		dcfg->num_tcs = 1;
   4490 
   4491 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
   4492 		    dcfg->watchdog_timer, dcfg->link_speed);
   4493 
   4494 		hw->mac.ops.dmac_config(hw);
   4495 	}
   4496 } /* ixgbe_config_dmac */
   4497 
   4498 /************************************************************************
   4499  * ixgbe_enable_intr
   4500  ************************************************************************/
   4501 static void
   4502 ixgbe_enable_intr(struct adapter *adapter)
   4503 {
   4504 	struct ixgbe_hw	*hw = &adapter->hw;
   4505 	struct ix_queue	*que = adapter->queues;
   4506 	u32		mask, fwsm;
   4507 
   4508 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   4509 
   4510 	switch (adapter->hw.mac.type) {
   4511 	case ixgbe_mac_82599EB:
   4512 		mask |= IXGBE_EIMS_ECC;
   4513 		/* Temperature sensor on some adapters */
   4514 		mask |= IXGBE_EIMS_GPI_SDP0;
   4515 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
   4516 		mask |= IXGBE_EIMS_GPI_SDP1;
   4517 		mask |= IXGBE_EIMS_GPI_SDP2;
   4518 		break;
   4519 	case ixgbe_mac_X540:
   4520 		/* Detect if Thermal Sensor is enabled */
   4521 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
   4522 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
   4523 			mask |= IXGBE_EIMS_TS;
   4524 		mask |= IXGBE_EIMS_ECC;
   4525 		break;
   4526 	case ixgbe_mac_X550:
   4527 		/* MAC thermal sensor is automatically enabled */
   4528 		mask |= IXGBE_EIMS_TS;
   4529 		mask |= IXGBE_EIMS_ECC;
   4530 		break;
   4531 	case ixgbe_mac_X550EM_x:
   4532 	case ixgbe_mac_X550EM_a:
   4533 		/* Some devices use SDP0 for important information */
   4534 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
   4535 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
   4536 		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
   4537 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
   4538 			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
   4539 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
   4540 			mask |= IXGBE_EICR_GPI_SDP0_X540;
   4541 		mask |= IXGBE_EIMS_ECC;
   4542 		break;
   4543 	default:
   4544 		break;
   4545 	}
   4546 
   4547 	/* Enable Fan Failure detection */
   4548 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
   4549 		mask |= IXGBE_EIMS_GPI_SDP1;
   4550 	/* Enable SR-IOV */
   4551 	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
   4552 		mask |= IXGBE_EIMS_MAILBOX;
   4553 	/* Enable Flow Director */
   4554 	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
   4555 		mask |= IXGBE_EIMS_FLOW_DIR;
   4556 
   4557 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   4558 
   4559 	/* With MSI-X we use auto clear */
   4560 	if (adapter->msix_mem) {
   4561 		mask = IXGBE_EIMS_ENABLE_MASK;
   4562 		/* Don't autoclear Link */
   4563 		mask &= ~IXGBE_EIMS_OTHER;
   4564 		mask &= ~IXGBE_EIMS_LSC;
   4565 		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
   4566 			mask &= ~IXGBE_EIMS_MAILBOX;
   4567 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   4568 	}
   4569 
   4570 	/*
   4571 	 * Now enable all queues, this is done separately to
   4572 	 * allow for handling the extended (beyond 32) MSI-X
   4573 	 * vectors that can be used by 82599
   4574 	 */
   4575         for (int i = 0; i < adapter->num_queues; i++, que++)
   4576                 ixgbe_enable_queue(adapter, que->msix);
   4577 
   4578 	IXGBE_WRITE_FLUSH(hw);
   4579 
   4580 	return;
   4581 } /* ixgbe_enable_intr */
   4582 
   4583 /************************************************************************
   4584  * ixgbe_disable_intr
   4585  ************************************************************************/
   4586 static void
   4587 ixgbe_disable_intr(struct adapter *adapter)
   4588 {
   4589 	struct ix_queue	*que = adapter->queues;
   4590 
   4591 	/* disable interrupts other than queues */
   4592 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
   4593 
   4594 	if (adapter->msix_mem)
   4595 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   4596 
   4597 	for (int i = 0; i < adapter->num_queues; i++, que++)
   4598 		ixgbe_disable_queue(adapter, que->msix);
   4599 
   4600 	IXGBE_WRITE_FLUSH(&adapter->hw);
   4601 
   4602 	return;
   4603 } /* ixgbe_disable_intr */
   4604 
   4605 /************************************************************************
   4606  * ixgbe_legacy_irq - Legacy Interrupt Service routine
   4607  ************************************************************************/
   4608 static int
   4609 ixgbe_legacy_irq(void *arg)
   4610 {
   4611 	struct ix_queue *que = arg;
   4612 	struct adapter	*adapter = que->adapter;
   4613 	struct ixgbe_hw	*hw = &adapter->hw;
   4614 	struct ifnet    *ifp = adapter->ifp;
   4615 	struct 		tx_ring *txr = adapter->tx_rings;
   4616 	bool		more = false;
   4617 	u32             eicr, eicr_mask;
   4618 
   4619 	/* Silicon errata #26 on 82598 */
   4620 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
   4621 
   4622 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   4623 
   4624 	adapter->stats.pf.legint.ev_count++;
   4625 	++que->irqs.ev_count;
   4626 	if (eicr == 0) {
   4627 		adapter->stats.pf.intzero.ev_count++;
   4628 		if ((ifp->if_flags & IFF_UP) != 0)
   4629 			ixgbe_enable_intr(adapter);
   4630 		return 0;
   4631 	}
   4632 
   4633 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   4634 #ifdef __NetBSD__
   4635 		/* Don't run ixgbe_rxeof in interrupt context */
   4636 		more = true;
   4637 #else
   4638 		more = ixgbe_rxeof(que);
   4639 #endif
   4640 
   4641 		IXGBE_TX_LOCK(txr);
   4642 		ixgbe_txeof(txr);
   4643 #ifdef notyet
   4644 		if (!ixgbe_ring_empty(ifp, txr->br))
   4645 			ixgbe_start_locked(ifp, txr);
   4646 #endif
   4647 		IXGBE_TX_UNLOCK(txr);
   4648 	}
   4649 
   4650 	/* Check for fan failure */
   4651 	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
   4652 		ixgbe_check_fan_failure(adapter, eicr, true);
   4653 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4654 	}
   4655 
   4656 	/* Link status change */
   4657 	if (eicr & IXGBE_EICR_LSC)
   4658 		softint_schedule(adapter->link_si);
   4659 
   4660 	if (ixgbe_is_sfp(hw)) {
   4661 		/* Pluggable optics-related interrupt */
   4662 		if (hw->mac.type >= ixgbe_mac_X540)
   4663 			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
   4664 		else
   4665 			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   4666 
   4667 		if (eicr & eicr_mask) {
   4668 			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
   4669 			softint_schedule(adapter->mod_si);
   4670 		}
   4671 
   4672 		if ((hw->mac.type == ixgbe_mac_82599EB) &&
   4673 		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   4674 			IXGBE_WRITE_REG(hw, IXGBE_EICR,
   4675 			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   4676 			softint_schedule(adapter->msf_si);
   4677 		}
   4678 	}
   4679 
   4680 	/* External PHY interrupt */
   4681 	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
   4682 	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
   4683 		softint_schedule(adapter->phy_si);
   4684 
   4685 	if (more)
   4686 		softint_schedule(que->que_si);
   4687 	else
   4688 		ixgbe_enable_intr(adapter);
   4689 
   4690 	return 1;
   4691 } /* ixgbe_legacy_irq */
   4692 
   4693 /************************************************************************
   4694  * ixgbe_free_pciintr_resources
   4695  ************************************************************************/
   4696 static void
   4697 ixgbe_free_pciintr_resources(struct adapter *adapter)
   4698 {
   4699 	struct ix_queue *que = adapter->queues;
   4700 	int		rid;
   4701 
   4702 	/*
   4703 	 * Release all msix queue resources:
   4704 	 */
   4705 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   4706 		if (que->res != NULL) {
   4707 			pci_intr_disestablish(adapter->osdep.pc,
   4708 			    adapter->osdep.ihs[i]);
   4709 			adapter->osdep.ihs[i] = NULL;
   4710 		}
   4711 	}
   4712 
   4713 	/* Clean the Legacy or Link interrupt last */
   4714 	if (adapter->vector) /* we are doing MSIX */
   4715 		rid = adapter->vector;
   4716 	else
   4717 		rid = 0;
   4718 
   4719 	if (adapter->osdep.ihs[rid] != NULL) {
   4720 		pci_intr_disestablish(adapter->osdep.pc,
   4721 		    adapter->osdep.ihs[rid]);
   4722 		adapter->osdep.ihs[rid] = NULL;
   4723 	}
   4724 
   4725 	if (adapter->osdep.intrs != NULL) {
   4726 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   4727 		    adapter->osdep.nintrs);
   4728 		adapter->osdep.intrs = NULL;
   4729 	}
   4730 
   4731 	return;
   4732 } /* ixgbe_free_pciintr_resources */
   4733 
   4734 /************************************************************************
   4735  * ixgbe_free_pci_resources
   4736  ************************************************************************/
   4737 static void
   4738 ixgbe_free_pci_resources(struct adapter *adapter)
   4739 {
   4740 
   4741 	ixgbe_free_pciintr_resources(adapter);
   4742 
   4743 	if (adapter->osdep.mem_size != 0) {
   4744 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   4745 		    adapter->osdep.mem_bus_space_handle,
   4746 		    adapter->osdep.mem_size);
   4747 	}
   4748 
   4749 	return;
   4750 } /* ixgbe_free_pci_resources */
   4751 
   4752 /************************************************************************
   4753  * ixgbe_set_sysctl_value
   4754  ************************************************************************/
   4755 static void
   4756 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
   4757     const char *description, int *limit, int value)
   4758 {
   4759 	device_t dev =  adapter->dev;
   4760 	struct sysctllog **log;
   4761 	const struct sysctlnode *rnode, *cnode;
   4762 
   4763 	log = &adapter->sysctllog;
   4764 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   4765 		aprint_error_dev(dev, "could not create sysctl root\n");
   4766 		return;
   4767 	}
   4768 	if (sysctl_createv(log, 0, &rnode, &cnode,
   4769 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   4770 	    name, SYSCTL_DESCR(description),
   4771 		NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   4772 		aprint_error_dev(dev, "could not create sysctl\n");
   4773 	*limit = value;
   4774 } /* ixgbe_set_sysctl_value */
   4775 
   4776 /************************************************************************
   4777  * ixgbe_sysctl_flowcntl
   4778  *
   4779  *   SYSCTL wrapper around setting Flow Control
   4780  ************************************************************************/
   4781 static int
   4782 ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
   4783 {
   4784 	struct sysctlnode node = *rnode;
   4785 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4786 	int error, fc;
   4787 
   4788 	fc = adapter->hw.fc.current_mode;
   4789 	node.sysctl_data = &fc;
   4790 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4791 	if (error != 0 || newp == NULL)
   4792 		return error;
   4793 
   4794 	/* Don't bother if it's not changed */
   4795 	if (fc == adapter->hw.fc.current_mode)
   4796 		return (0);
   4797 
   4798 	return ixgbe_set_flowcntl(adapter, fc);
   4799 } /* ixgbe_sysctl_flowcntl */
   4800 
   4801 /************************************************************************
   4802  * ixgbe_set_flowcntl - Set flow control
   4803  *
   4804  *   Flow control values:
   4805  *     0 - off
   4806  *     1 - rx pause
   4807  *     2 - tx pause
   4808  *     3 - full
   4809  ************************************************************************/
   4810 static int
   4811 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
   4812 {
   4813 	switch (fc) {
   4814 		case ixgbe_fc_rx_pause:
   4815 		case ixgbe_fc_tx_pause:
   4816 		case ixgbe_fc_full:
   4817 			adapter->hw.fc.requested_mode = fc;
   4818 			if (adapter->num_queues > 1)
   4819 				ixgbe_disable_rx_drop(adapter);
   4820 			break;
   4821 		case ixgbe_fc_none:
   4822 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   4823 			if (adapter->num_queues > 1)
   4824 				ixgbe_enable_rx_drop(adapter);
   4825 			break;
   4826 		default:
   4827 			return (EINVAL);
   4828 	}
   4829 
   4830 #if 0 /* XXX NetBSD */
   4831 	/* Don't autoneg if forcing a value */
   4832 	adapter->hw.fc.disable_fc_autoneg = TRUE;
   4833 #endif
   4834 	ixgbe_fc_enable(&adapter->hw);
   4835 
   4836 	return (0);
   4837 } /* ixgbe_set_flowcntl */
   4838 
   4839 /************************************************************************
   4840  * ixgbe_enable_rx_drop
   4841  *
   4842  *   Enable the hardware to drop packets when the buffer is
   4843  *   full. This is useful with multiqueue, so that no single
   4844  *   queue being full stalls the entire RX engine. We only
   4845  *   enable this when Multiqueue is enabled AND Flow Control
   4846  *   is disabled.
   4847  ************************************************************************/
   4848 static void
   4849 ixgbe_enable_rx_drop(struct adapter *adapter)
   4850 {
   4851 	struct ixgbe_hw *hw = &adapter->hw;
   4852 	struct rx_ring  *rxr;
   4853 	u32             srrctl;
   4854 
   4855 	for (int i = 0; i < adapter->num_queues; i++) {
   4856 		rxr = &adapter->rx_rings[i];
   4857 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4858 		srrctl |= IXGBE_SRRCTL_DROP_EN;
   4859 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4860 	}
   4861 
   4862 	/* enable drop for each vf */
   4863 	for (int i = 0; i < adapter->num_vfs; i++) {
   4864 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4865 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
   4866 		    IXGBE_QDE_ENABLE));
   4867 	}
   4868 } /* ixgbe_enable_rx_drop */
   4869 
   4870 /************************************************************************
   4871  * ixgbe_disable_rx_drop
   4872  ************************************************************************/
   4873 static void
   4874 ixgbe_disable_rx_drop(struct adapter *adapter)
   4875 {
   4876 	struct ixgbe_hw *hw = &adapter->hw;
   4877 	struct rx_ring  *rxr;
   4878 	u32             srrctl;
   4879 
   4880 	for (int i = 0; i < adapter->num_queues; i++) {
   4881 		rxr = &adapter->rx_rings[i];
   4882         	srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
   4883         	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
   4884         	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
   4885 	}
   4886 
   4887 	/* disable drop for each vf */
   4888 	for (int i = 0; i < adapter->num_vfs; i++) {
   4889 		IXGBE_WRITE_REG(hw, IXGBE_QDE,
   4890 		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
   4891 	}
   4892 } /* ixgbe_disable_rx_drop */
   4893 
   4894 /************************************************************************
   4895  * ixgbe_sysctl_advertise
   4896  *
   4897  *   SYSCTL wrapper around setting advertised speed
   4898  ************************************************************************/
   4899 static int
   4900 ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
   4901 {
   4902 	struct sysctlnode node = *rnode;
   4903 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4904 	int            error = 0, advertise;
   4905 
   4906 	advertise = adapter->advertise;
   4907 	node.sysctl_data = &advertise;
   4908 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4909 	if (error != 0 || newp == NULL)
   4910 		return error;
   4911 
   4912 	return ixgbe_set_advertise(adapter, advertise);
   4913 } /* ixgbe_sysctl_advertise */
   4914 
   4915 /************************************************************************
   4916  * ixgbe_set_advertise - Control advertised link speed
   4917  *
   4918  *   Flags:
   4919  *     0x00 - Default (all capable link speed)
   4920  *     0x01 - advertise 100 Mb
   4921  *     0x02 - advertise 1G
   4922  *     0x04 - advertise 10G
   4923  *     0x08 - advertise 10 Mb
   4924  *     0x10 - advertise 2.5G
   4925  *     0x20 - advertise 5G
   4926  ************************************************************************/
   4927 static int
   4928 ixgbe_set_advertise(struct adapter *adapter, int advertise)
   4929 {
   4930 	device_t         dev;
   4931 	struct ixgbe_hw  *hw;
   4932 	ixgbe_link_speed speed = 0;
   4933 	ixgbe_link_speed link_caps = 0;
   4934 	s32              err = IXGBE_NOT_IMPLEMENTED;
   4935 	bool             negotiate = FALSE;
   4936 
   4937 	/* Checks to validate new value */
   4938 	if (adapter->advertise == advertise) /* no change */
   4939 		return (0);
   4940 
   4941 	dev = adapter->dev;
   4942 	hw = &adapter->hw;
   4943 
   4944 	/* No speed changes for backplane media */
   4945 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   4946 		return (ENODEV);
   4947 
   4948 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   4949 	    (hw->phy.multispeed_fiber))) {
   4950 		device_printf(dev,
   4951 		    "Advertised speed can only be set on copper or "
   4952 		    "multispeed fiber media types.\n");
   4953 		return (EINVAL);
   4954 	}
   4955 
   4956 	if (advertise < 0x0 || advertise > 0x2f) {
   4957 		device_printf(dev,
   4958 		    "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
   4959 		return (EINVAL);
   4960 	}
   4961 
   4962 	if (hw->mac.ops.get_link_capabilities) {
   4963 		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
   4964 		    &negotiate);
   4965 		if (err != IXGBE_SUCCESS) {
   4966 			device_printf(dev, "Unable to determine supported advertise speeds\n");
   4967 			return (ENODEV);
   4968 		}
   4969 	}
   4970 
   4971 	/* Set new value and report new advertised mode */
   4972 	if (advertise & 0x1) {
   4973 		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
   4974 			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
   4975 			return (EINVAL);
   4976 		}
   4977 		speed |= IXGBE_LINK_SPEED_100_FULL;
   4978 	}
   4979 	if (advertise & 0x2) {
   4980 		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
   4981 			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
   4982 			return (EINVAL);
   4983 		}
   4984 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   4985 	}
   4986 	if (advertise & 0x4) {
   4987 		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
   4988 			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
   4989 			return (EINVAL);
   4990 		}
   4991 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   4992 	}
   4993 	if (advertise & 0x8) {
   4994 		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
   4995 			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
   4996 			return (EINVAL);
   4997 		}
   4998 		speed |= IXGBE_LINK_SPEED_10_FULL;
   4999 	}
   5000 	if (advertise & 0x10) {
   5001 		if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
   5002 			device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
   5003 			return (EINVAL);
   5004 		}
   5005 		speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
   5006 	}
   5007 	if (advertise & 0x20) {
   5008 		if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
   5009 			device_printf(dev, "Interface does not support 5Gb advertised speed\n");
   5010 			return (EINVAL);
   5011 		}
   5012 		speed |= IXGBE_LINK_SPEED_5GB_FULL;
   5013 	}
   5014 	if (advertise == 0)
   5015 		speed = link_caps; /* All capable link speed */
   5016 
   5017 	hw->mac.autotry_restart = TRUE;
   5018 	hw->mac.ops.setup_link(hw, speed, TRUE);
   5019 	adapter->advertise = advertise;
   5020 
   5021 	return (0);
   5022 } /* ixgbe_set_advertise */
   5023 
   5024 /************************************************************************
   5025  * ixgbe_get_advertise - Get current advertised speed settings
   5026  *
   5027  *   Formatted for sysctl usage.
   5028  *   Flags:
   5029  *     0x01 - advertise 100 Mb
   5030  *     0x02 - advertise 1G
   5031  *     0x04 - advertise 10G
   5032  *     0x08 - advertise 10 Mb (yes, Mb)
   5033  *     0x10 - advertise 2.5G
   5034  *     0x20 - advertise 5G
   5035  ************************************************************************/
   5036 static int
   5037 ixgbe_get_advertise(struct adapter *adapter)
   5038 {
   5039 	struct ixgbe_hw  *hw = &adapter->hw;
   5040 	int              speed;
   5041 	ixgbe_link_speed link_caps = 0;
   5042 	s32              err;
   5043 	bool             negotiate = FALSE;
   5044 
   5045 	/*
   5046 	 * Advertised speed means nothing unless it's copper or
   5047 	 * multi-speed fiber
   5048 	 */
   5049 	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
   5050 	    !(hw->phy.multispeed_fiber))
   5051 		return (0);
   5052 
   5053 	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
   5054 	if (err != IXGBE_SUCCESS)
   5055 		return (0);
   5056 
   5057 	speed =
   5058 	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL)  ? 0x04 : 0) |
   5059 	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)   ? 0x02 : 0) |
   5060 	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)   ? 0x01 : 0) |
   5061 	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)    ? 0x08 : 0) |
   5062 	    ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
   5063 	    ((link_caps & IXGBE_LINK_SPEED_5GB_FULL)   ? 0x20 : 0);
   5064 
   5065 	return speed;
   5066 } /* ixgbe_get_advertise */
   5067 
   5068 /************************************************************************
   5069  * ixgbe_sysctl_dmac - Manage DMA Coalescing
   5070  *
   5071  *   Control values:
   5072  *     0/1 - off / on (use default value of 1000)
   5073  *
   5074  *     Legal timer values are:
   5075  *     50,100,250,500,1000,2000,5000,10000
   5076  *
   5077  *     Turning off interrupt moderation will also turn this off.
   5078  ************************************************************************/
   5079 static int
   5080 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
   5081 {
   5082 	struct sysctlnode node = *rnode;
   5083 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5084 	struct ifnet   *ifp = adapter->ifp;
   5085 	int            error;
   5086 	int            newval;
   5087 
   5088 	newval = adapter->dmac;
   5089 	node.sysctl_data = &newval;
   5090 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5091 	if ((error) || (newp == NULL))
   5092 		return (error);
   5093 
   5094 	switch (newval) {
   5095 	case 0:
   5096 		/* Disabled */
   5097 		adapter->dmac = 0;
   5098 		break;
   5099 	case 1:
   5100 		/* Enable and use default */
   5101 		adapter->dmac = 1000;
   5102 		break;
   5103 	case 50:
   5104 	case 100:
   5105 	case 250:
   5106 	case 500:
   5107 	case 1000:
   5108 	case 2000:
   5109 	case 5000:
   5110 	case 10000:
   5111 		/* Legal values - allow */
   5112 		adapter->dmac = newval;
   5113 		break;
   5114 	default:
   5115 		/* Do nothing, illegal value */
   5116 		return (EINVAL);
   5117 	}
   5118 
   5119 	/* Re-initialize hardware if it's already running */
   5120 	if (ifp->if_flags & IFF_RUNNING)
   5121 		ixgbe_init(ifp);
   5122 
   5123 	return (0);
   5124 }
   5125 
   5126 #ifdef IXGBE_DEBUG
   5127 /************************************************************************
   5128  * ixgbe_sysctl_power_state
   5129  *
   5130  *   Sysctl to test power states
   5131  *   Values:
   5132  *     0      - set device to D0
   5133  *     3      - set device to D3
   5134  *     (none) - get current device power state
   5135  ************************************************************************/
   5136 static int
   5137 ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
   5138 {
   5139 #ifdef notyet
   5140 	struct sysctlnode node = *rnode;
   5141 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5142 	device_t       dev =  adapter->dev;
   5143 	int            curr_ps, new_ps, error = 0;
   5144 
   5145 	curr_ps = new_ps = pci_get_powerstate(dev);
   5146 
   5147 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5148 	if ((error) || (req->newp == NULL))
   5149 		return (error);
   5150 
   5151 	if (new_ps == curr_ps)
   5152 		return (0);
   5153 
   5154 	if (new_ps == 3 && curr_ps == 0)
   5155 		error = DEVICE_SUSPEND(dev);
   5156 	else if (new_ps == 0 && curr_ps == 3)
   5157 		error = DEVICE_RESUME(dev);
   5158 	else
   5159 		return (EINVAL);
   5160 
   5161 	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
   5162 
   5163 	return (error);
   5164 #else
   5165 	return 0;
   5166 #endif
   5167 } /* ixgbe_sysctl_power_state */
   5168 #endif
   5169 
   5170 /************************************************************************
   5171  * ixgbe_sysctl_wol_enable
   5172  *
   5173  *   Sysctl to enable/disable the WoL capability,
   5174  *   if supported by the adapter.
   5175  *
   5176  *   Values:
   5177  *     0 - disabled
   5178  *     1 - enabled
   5179  ************************************************************************/
   5180 static int
   5181 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
   5182 {
   5183 	struct sysctlnode node = *rnode;
   5184 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5185 	struct ixgbe_hw *hw = &adapter->hw;
   5186 	bool            new_wol_enabled;
   5187 	int             error = 0;
   5188 
   5189 	new_wol_enabled = hw->wol_enabled;
   5190 	node.sysctl_data = &new_wol_enabled;
   5191 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5192 	if ((error) || (newp == NULL))
   5193 		return (error);
   5194 	if (new_wol_enabled == hw->wol_enabled)
   5195 		return (0);
   5196 
   5197 	if (new_wol_enabled && !adapter->wol_support)
   5198 		return (ENODEV);
   5199 	else
   5200 		hw->wol_enabled = new_wol_enabled;
   5201 
   5202 	return (0);
   5203 } /* ixgbe_sysctl_wol_enable */
   5204 
   5205 /************************************************************************
   5206  * ixgbe_sysctl_wufc - Wake Up Filter Control
   5207  *
   5208  *   Sysctl to enable/disable the types of packets that the
   5209  *   adapter will wake up on upon receipt.
   5210  *   Flags:
   5211  *     0x1  - Link Status Change
   5212  *     0x2  - Magic Packet
   5213  *     0x4  - Direct Exact
   5214  *     0x8  - Directed Multicast
   5215  *     0x10 - Broadcast
   5216  *     0x20 - ARP/IPv4 Request Packet
   5217  *     0x40 - Direct IPv4 Packet
   5218  *     0x80 - Direct IPv6 Packet
   5219  *
   5220  *   Settings not listed above will cause the sysctl to return an error.
   5221  ************************************************************************/
   5222 static int
   5223 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
   5224 {
   5225 	struct sysctlnode node = *rnode;
   5226 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5227 	int error = 0;
   5228 	u32 new_wufc;
   5229 
   5230 	new_wufc = adapter->wufc;
   5231 	node.sysctl_data = &new_wufc;
   5232 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5233 	if ((error) || (newp == NULL))
   5234 		return (error);
   5235 	if (new_wufc == adapter->wufc)
   5236 		return (0);
   5237 
   5238 	if (new_wufc & 0xffffff00)
   5239 		return (EINVAL);
   5240 
   5241 	new_wufc &= 0xff;
   5242 	new_wufc |= (0xffffff & adapter->wufc);
   5243 	adapter->wufc = new_wufc;
   5244 
   5245 	return (0);
   5246 } /* ixgbe_sysctl_wufc */
   5247 
   5248 #ifdef IXGBE_DEBUG
   5249 /************************************************************************
   5250  * ixgbe_sysctl_print_rss_config
   5251  ************************************************************************/
   5252 static int
   5253 ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
   5254 {
   5255 #ifdef notyet
   5256 	struct sysctlnode node = *rnode;
   5257 	struct adapter  *adapter = (struct adapter *)node.sysctl_data;
   5258 	struct ixgbe_hw *hw = &adapter->hw;
   5259 	device_t        dev = adapter->dev;
   5260 	struct sbuf     *buf;
   5261 	int             error = 0, reta_size;
   5262 	u32             reg;
   5263 
   5264 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
   5265 	if (!buf) {
   5266 		device_printf(dev, "Could not allocate sbuf for output.\n");
   5267 		return (ENOMEM);
   5268 	}
   5269 
   5270 	// TODO: use sbufs to make a string to print out
   5271 	/* Set multiplier for RETA setup and table size based on MAC */
   5272 	switch (adapter->hw.mac.type) {
   5273 	case ixgbe_mac_X550:
   5274 	case ixgbe_mac_X550EM_x:
   5275 	case ixgbe_mac_X550EM_a:
   5276 		reta_size = 128;
   5277 		break;
   5278 	default:
   5279 		reta_size = 32;
   5280 		break;
   5281 	}
   5282 
   5283 	/* Print out the redirection table */
   5284 	sbuf_cat(buf, "\n");
   5285 	for (int i = 0; i < reta_size; i++) {
   5286 		if (i < 32) {
   5287 			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
   5288 			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
   5289 		} else {
   5290 			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
   5291 			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
   5292 		}
   5293 	}
   5294 
   5295 	// TODO: print more config
   5296 
   5297 	error = sbuf_finish(buf);
   5298 	if (error)
   5299 		device_printf(dev, "Error finishing sbuf: %d\n", error);
   5300 
   5301 	sbuf_delete(buf);
   5302 #endif
   5303 	return (0);
   5304 } /* ixgbe_sysctl_print_rss_config */
   5305 #endif /* IXGBE_DEBUG */
   5306 
   5307 /************************************************************************
   5308  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
   5309  *
   5310  *   For X552/X557-AT devices using an external PHY
   5311  ************************************************************************/
   5312 static int
   5313 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
   5314 {
   5315 	struct sysctlnode node = *rnode;
   5316 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5317 	struct ixgbe_hw *hw = &adapter->hw;
   5318 	int val;
   5319 	u16 reg;
   5320 	int		error;
   5321 
   5322 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5323 		device_printf(adapter->dev,
   5324 		    "Device has no supported external thermal sensor.\n");
   5325 		return (ENODEV);
   5326 	}
   5327 
   5328 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
   5329 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5330 		device_printf(adapter->dev,
   5331 		    "Error reading from PHY's current temperature register\n");
   5332 		return (EAGAIN);
   5333 	}
   5334 
   5335 	node.sysctl_data = &val;
   5336 
   5337 	/* Shift temp for output */
   5338 	val = reg >> 8;
   5339 
   5340 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5341 	if ((error) || (newp == NULL))
   5342 		return (error);
   5343 
   5344 	return (0);
   5345 } /* ixgbe_sysctl_phy_temp */
   5346 
   5347 /************************************************************************
   5348  * ixgbe_sysctl_phy_overtemp_occurred
   5349  *
   5350  *   Reports (directly from the PHY) whether the current PHY
   5351  *   temperature is over the overtemp threshold.
   5352  ************************************************************************/
   5353 static int
   5354 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
   5355 {
   5356 	struct sysctlnode node = *rnode;
   5357 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   5358 	struct ixgbe_hw *hw = &adapter->hw;
   5359 	int val, error;
   5360 	u16 reg;
   5361 
   5362 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   5363 		device_printf(adapter->dev,
   5364 		    "Device has no supported external thermal sensor.\n");
   5365 		return (ENODEV);
   5366 	}
   5367 
   5368 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
   5369 		IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
   5370 		device_printf(adapter->dev,
   5371 		    "Error reading from PHY's temperature status register\n");
   5372 		return (EAGAIN);
   5373 	}
   5374 
   5375 	node.sysctl_data = &val;
   5376 
   5377 	/* Get occurrence bit */
   5378 	val = !!(reg & 0x4000);
   5379 
   5380 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5381 	if ((error) || (newp == NULL))
   5382 		return (error);
   5383 
   5384 	return (0);
   5385 } /* ixgbe_sysctl_phy_overtemp_occurred */
   5386 
   5387 /************************************************************************
   5388  * ixgbe_sysctl_eee_state
   5389  *
   5390  *   Sysctl to set EEE power saving feature
   5391  *   Values:
   5392  *     0      - disable EEE
   5393  *     1      - enable EEE
   5394  *     (none) - get current device EEE state
   5395  ************************************************************************/
   5396 static int
   5397 ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
   5398 {
   5399 	struct sysctlnode node = *rnode;
   5400 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5401 	struct ifnet   *ifp = adapter->ifp;
   5402 	device_t       dev = adapter->dev;
   5403 	int            curr_eee, new_eee, error = 0;
   5404 	s32            retval;
   5405 
   5406 	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
   5407 	node.sysctl_data = &new_eee;
   5408 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5409 	if ((error) || (newp == NULL))
   5410 		return (error);
   5411 
   5412 	/* Nothing to do */
   5413 	if (new_eee == curr_eee)
   5414 		return (0);
   5415 
   5416 	/* Not supported */
   5417 	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
   5418 		return (EINVAL);
   5419 
   5420 	/* Bounds checking */
   5421 	if ((new_eee < 0) || (new_eee > 1))
   5422 		return (EINVAL);
   5423 
   5424 	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
   5425 	if (retval) {
   5426 		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
   5427 		return (EINVAL);
   5428 	}
   5429 
   5430 	/* Restart auto-neg */
   5431 	ixgbe_init(ifp);
   5432 
   5433 	device_printf(dev, "New EEE state: %d\n", new_eee);
   5434 
   5435 	/* Cache new value */
   5436 	if (new_eee)
   5437 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5438 	else
   5439 		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
   5440 
   5441 	return (error);
   5442 } /* ixgbe_sysctl_eee_state */
   5443 
   5444 /************************************************************************
   5445  * ixgbe_init_device_features
   5446  ************************************************************************/
   5447 static void
   5448 ixgbe_init_device_features(struct adapter *adapter)
   5449 {
   5450 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   5451 	                  | IXGBE_FEATURE_RSS
   5452 	                  | IXGBE_FEATURE_MSI
   5453 	                  | IXGBE_FEATURE_MSIX
   5454 	                  | IXGBE_FEATURE_LEGACY_IRQ
   5455 	                  | IXGBE_FEATURE_LEGACY_TX;
   5456 
   5457 	/* Set capabilities first... */
   5458 	switch (adapter->hw.mac.type) {
   5459 	case ixgbe_mac_82598EB:
   5460 		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
   5461 			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
   5462 		break;
   5463 	case ixgbe_mac_X540:
   5464 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5465 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5466 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
   5467 		    (adapter->hw.bus.func == 0))
   5468 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5469 		break;
   5470 	case ixgbe_mac_X550:
   5471 		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5472 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5473 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5474 		break;
   5475 	case ixgbe_mac_X550EM_x:
   5476 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5477 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5478 		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
   5479 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5480 		break;
   5481 	case ixgbe_mac_X550EM_a:
   5482 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5483 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5484 		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5485 		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
   5486 		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
   5487 			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
   5488 			adapter->feat_cap |= IXGBE_FEATURE_EEE;
   5489 		}
   5490 		break;
   5491 	case ixgbe_mac_82599EB:
   5492 		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
   5493 		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
   5494 		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
   5495 		    (adapter->hw.bus.func == 0))
   5496 			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
   5497 		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
   5498 			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
   5499 		break;
   5500 	default:
   5501 		break;
   5502 	}
   5503 
   5504 	/* Enabled by default... */
   5505 	/* Fan failure detection */
   5506 	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
   5507 		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
   5508 	/* Netmap */
   5509 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   5510 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   5511 	/* EEE */
   5512 	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
   5513 		adapter->feat_en |= IXGBE_FEATURE_EEE;
   5514 	/* Thermal Sensor */
   5515 	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
   5516 		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
   5517 
   5518 	/* Enabled via global sysctl... */
   5519 	/* Flow Director */
   5520 	if (ixgbe_enable_fdir) {
   5521 		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
   5522 			adapter->feat_en |= IXGBE_FEATURE_FDIR;
   5523 		else
   5524 			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
   5525 	}
   5526 	/* Legacy (single queue) transmit */
   5527 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   5528 	    ixgbe_enable_legacy_tx)
   5529 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   5530 	/*
   5531 	 * Message Signal Interrupts - Extended (MSI-X)
   5532 	 * Normal MSI is only enabled if MSI-X calls fail.
   5533 	 */
   5534 	if (!ixgbe_enable_msix)
   5535 		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
   5536 	/* Receive-Side Scaling (RSS) */
   5537 	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
   5538 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   5539 
   5540 	/* Disable features with unmet dependencies... */
   5541 	/* No MSI-X */
   5542 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
   5543 		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   5544 		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   5545 		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
   5546 		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
   5547 	}
   5548 } /* ixgbe_init_device_features */
   5549 
   5550 /************************************************************************
   5551  * ixgbe_probe - Device identification routine
   5552  *
   5553  *   Determines if the driver should be loaded on
   5554  *   adapter based on its PCI vendor/device ID.
   5555  *
   5556  *   return BUS_PROBE_DEFAULT on success, positive on failure
   5557  ************************************************************************/
   5558 static int
   5559 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
   5560 {
   5561 	const struct pci_attach_args *pa = aux;
   5562 
   5563 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
   5564 }
   5565 
   5566 static ixgbe_vendor_info_t *
   5567 ixgbe_lookup(const struct pci_attach_args *pa)
   5568 {
   5569 	ixgbe_vendor_info_t *ent;
   5570 	pcireg_t subid;
   5571 
   5572 	INIT_DEBUGOUT("ixgbe_lookup: begin");
   5573 
   5574 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
   5575 		return NULL;
   5576 
   5577 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
   5578 
   5579 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
   5580 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
   5581 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
   5582 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
   5583 			(ent->subvendor_id == 0)) &&
   5584 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
   5585 			(ent->subdevice_id == 0))) {
   5586 			++ixgbe_total_ports;
   5587 			return ent;
   5588 		}
   5589 	}
   5590 	return NULL;
   5591 }
   5592 
   5593 static int
   5594 ixgbe_ifflags_cb(struct ethercom *ec)
   5595 {
   5596 	struct ifnet *ifp = &ec->ec_if;
   5597 	struct adapter *adapter = ifp->if_softc;
   5598 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   5599 
   5600 	IXGBE_CORE_LOCK(adapter);
   5601 
   5602 	if (change != 0)
   5603 		adapter->if_flags = ifp->if_flags;
   5604 
   5605 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   5606 		rc = ENETRESET;
   5607 	else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   5608 		ixgbe_set_promisc(adapter);
   5609 
   5610 	/* Set up VLAN support and filter */
   5611 	ixgbe_setup_vlan_hw_support(adapter);
   5612 
   5613 	IXGBE_CORE_UNLOCK(adapter);
   5614 
   5615 	return rc;
   5616 }
   5617 
   5618 /************************************************************************
   5619  * ixgbe_ioctl - Ioctl entry point
   5620  *
   5621  *   Called when the user wants to configure the interface.
   5622  *
   5623  *   return 0 on success, positive on failure
   5624  ************************************************************************/
   5625 static int
   5626 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
   5627 {
   5628 	struct adapter	*adapter = ifp->if_softc;
   5629 	struct ixgbe_hw *hw = &adapter->hw;
   5630 	struct ifcapreq *ifcr = data;
   5631 	struct ifreq	*ifr = data;
   5632 	int             error = 0;
   5633 	int l4csum_en;
   5634 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   5635 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   5636 
   5637 	switch (command) {
   5638 	case SIOCSIFFLAGS:
   5639 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   5640 		break;
   5641 	case SIOCADDMULTI:
   5642 	case SIOCDELMULTI:
   5643 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   5644 		break;
   5645 	case SIOCSIFMEDIA:
   5646 	case SIOCGIFMEDIA:
   5647 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   5648 		break;
   5649 	case SIOCSIFCAP:
   5650 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   5651 		break;
   5652 	case SIOCSIFMTU:
   5653 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   5654 		break;
   5655 #ifdef __NetBSD__
   5656 	case SIOCINITIFADDR:
   5657 		IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
   5658 		break;
   5659 	case SIOCGIFFLAGS:
   5660 		IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
   5661 		break;
   5662 	case SIOCGIFAFLAG_IN:
   5663 		IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
   5664 		break;
   5665 	case SIOCGIFADDR:
   5666 		IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
   5667 		break;
   5668 	case SIOCGIFMTU:
   5669 		IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
   5670 		break;
   5671 	case SIOCGIFCAP:
   5672 		IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
   5673 		break;
   5674 	case SIOCGETHERCAP:
   5675 		IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
   5676 		break;
   5677 	case SIOCGLIFADDR:
   5678 		IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
   5679 		break;
   5680 	case SIOCZIFDATA:
   5681 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   5682 		hw->mac.ops.clear_hw_cntrs(hw);
   5683 		ixgbe_clear_evcnt(adapter);
   5684 		break;
   5685 	case SIOCAIFADDR:
   5686 		IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
   5687 		break;
   5688 #endif
   5689 	default:
   5690 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   5691 		break;
   5692 	}
   5693 
   5694 	switch (command) {
   5695 	case SIOCSIFMEDIA:
   5696 	case SIOCGIFMEDIA:
   5697 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   5698 	case SIOCGI2C:
   5699 	{
   5700 		struct ixgbe_i2c_req	i2c;
   5701 
   5702 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
   5703 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
   5704 		if (error != 0)
   5705 			break;
   5706 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
   5707 			error = EINVAL;
   5708 			break;
   5709 		}
   5710 		if (i2c.len > sizeof(i2c.data)) {
   5711 			error = EINVAL;
   5712 			break;
   5713 		}
   5714 
   5715 		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
   5716 		    i2c.dev_addr, i2c.data);
   5717 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
   5718 		break;
   5719 	}
   5720 	case SIOCSIFCAP:
   5721 		/* Layer-4 Rx checksum offload has to be turned on and
   5722 		 * off as a unit.
   5723 		 */
   5724 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   5725 		if (l4csum_en != l4csum && l4csum_en != 0)
   5726 			return EINVAL;
   5727 		/*FALLTHROUGH*/
   5728 	case SIOCADDMULTI:
   5729 	case SIOCDELMULTI:
   5730 	case SIOCSIFFLAGS:
   5731 	case SIOCSIFMTU:
   5732 	default:
   5733 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   5734 			return error;
   5735 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   5736 			;
   5737 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   5738 			IXGBE_CORE_LOCK(adapter);
   5739 			ixgbe_init_locked(adapter);
   5740 			ixgbe_recalculate_max_frame(adapter);
   5741 			IXGBE_CORE_UNLOCK(adapter);
   5742 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   5743 			/*
   5744 			 * Multicast list has changed; set the hardware filter
   5745 			 * accordingly.
   5746 			 */
   5747 			IXGBE_CORE_LOCK(adapter);
   5748 			ixgbe_disable_intr(adapter);
   5749 			ixgbe_set_multi(adapter);
   5750 			ixgbe_enable_intr(adapter);
   5751 			IXGBE_CORE_UNLOCK(adapter);
   5752 		}
   5753 		return 0;
   5754 	}
   5755 
   5756 	return error;
   5757 } /* ixgbe_ioctl */
   5758 
   5759 /************************************************************************
   5760  * ixgbe_check_fan_failure
   5761  ************************************************************************/
   5762 static void
   5763 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
   5764 {
   5765 	u32 mask;
   5766 
   5767 	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
   5768 	    IXGBE_ESDP_SDP1;
   5769 
   5770 	if (reg & mask)
   5771 		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
   5772 } /* ixgbe_check_fan_failure */
   5773 
   5774 /************************************************************************
   5775  * ixgbe_handle_que
   5776  ************************************************************************/
   5777 static void
   5778 ixgbe_handle_que(void *context)
   5779 {
   5780 	struct ix_queue *que = context;
   5781 	struct adapter  *adapter = que->adapter;
   5782 	struct tx_ring  *txr = que->txr;
   5783 	struct ifnet    *ifp = adapter->ifp;
   5784 	bool		more = false;
   5785 
   5786 	adapter->handleq.ev_count++;
   5787 
   5788 	if (ifp->if_flags & IFF_RUNNING) {
   5789 		more = ixgbe_rxeof(que);
   5790 		IXGBE_TX_LOCK(txr);
   5791 		more |= ixgbe_txeof(txr);
   5792 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5793 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   5794 				ixgbe_mq_start_locked(ifp, txr);
   5795 		/* Only for queue 0 */
   5796 		/* NetBSD still needs this for CBQ */
   5797 		if ((&adapter->queues[0] == que)
   5798 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   5799 			ixgbe_legacy_start_locked(ifp, txr);
   5800 		IXGBE_TX_UNLOCK(txr);
   5801 	}
   5802 
   5803 	if (more)
   5804 		softint_schedule(que->que_si);
   5805 	else if (que->res != NULL) {
   5806 		/* Re-enable this interrupt */
   5807 		ixgbe_enable_queue(adapter, que->msix);
   5808 	} else
   5809 		ixgbe_enable_intr(adapter);
   5810 
   5811 	return;
   5812 } /* ixgbe_handle_que */
   5813 
   5814 /************************************************************************
   5815  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
   5816  ************************************************************************/
   5817 static int
   5818 ixgbe_allocate_legacy(struct adapter *adapter,
   5819     const struct pci_attach_args *pa)
   5820 {
   5821 	device_t	dev = adapter->dev;
   5822 	struct ix_queue *que = adapter->queues;
   5823 	struct tx_ring  *txr = adapter->tx_rings;
   5824 	int		counts[PCI_INTR_TYPE_SIZE];
   5825 	pci_intr_type_t intr_type, max_type;
   5826 	char            intrbuf[PCI_INTRSTR_LEN];
   5827 	const char	*intrstr = NULL;
   5828 
   5829 	/* We allocate a single interrupt resource */
   5830 	max_type = PCI_INTR_TYPE_MSI;
   5831 	counts[PCI_INTR_TYPE_MSIX] = 0;
   5832 	counts[PCI_INTR_TYPE_MSI] =
   5833 	    (adapter->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
   5834 	/* Check not feat_en but feat_cap to fallback to INTx */
   5835 	counts[PCI_INTR_TYPE_INTX] =
   5836 	    (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
   5837 
   5838 alloc_retry:
   5839 	if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
   5840 		aprint_error_dev(dev, "couldn't alloc interrupt\n");
   5841 		return ENXIO;
   5842 	}
   5843 	adapter->osdep.nintrs = 1;
   5844 	intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
   5845 	    intrbuf, sizeof(intrbuf));
   5846 	adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
   5847 	    adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
   5848 	    device_xname(dev));
   5849 	intr_type = pci_intr_type(adapter->osdep.pc, adapter->osdep.intrs[0]);
   5850 	if (adapter->osdep.ihs[0] == NULL) {
   5851 		aprint_error_dev(dev,"unable to establish %s\n",
   5852 		    (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5853 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5854 		adapter->osdep.intrs = NULL;
   5855 		switch (intr_type) {
   5856 		case PCI_INTR_TYPE_MSI:
   5857 			/* The next try is for INTx: Disable MSI */
   5858 			max_type = PCI_INTR_TYPE_INTX;
   5859 			counts[PCI_INTR_TYPE_INTX] = 1;
   5860 			adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   5861 			if (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
   5862 				adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   5863 				goto alloc_retry;
   5864 			} else
   5865 				break;
   5866 		case PCI_INTR_TYPE_INTX:
   5867 		default:
   5868 			/* See below */
   5869 			break;
   5870 		}
   5871 	}
   5872 	if (intr_type == PCI_INTR_TYPE_INTX) {
   5873 		adapter->feat_en &= ~IXGBE_FEATURE_MSI;
   5874 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   5875 	}
   5876 	if (adapter->osdep.ihs[0] == NULL) {
   5877 		aprint_error_dev(dev,
   5878 		    "couldn't establish interrupt%s%s\n",
   5879 		    intrstr ? " at " : "", intrstr ? intrstr : "");
   5880 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   5881 		adapter->osdep.intrs = NULL;
   5882 		return ENXIO;
   5883 	}
   5884 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   5885 	/*
   5886 	 * Try allocating a fast interrupt and the associated deferred
   5887 	 * processing contexts.
   5888 	 */
   5889 	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   5890 		txr->txr_si =
   5891 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5892 			ixgbe_deferred_mq_start, txr);
   5893 	que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   5894 	    ixgbe_handle_que, que);
   5895 
   5896 	if ((!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)
   5897 		& (txr->txr_si == NULL)) || (que->que_si == NULL)) {
   5898 		aprint_error_dev(dev,
   5899 		    "could not establish software interrupts\n");
   5900 
   5901 		return ENXIO;
   5902 	}
   5903 	/* For simplicity in the handlers */
   5904 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
   5905 
   5906 	return (0);
   5907 } /* ixgbe_allocate_legacy */
   5908 
   5909 
   5910 /************************************************************************
   5911  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
   5912  ************************************************************************/
   5913 static int
   5914 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   5915 {
   5916 	device_t        dev = adapter->dev;
   5917 	struct 		ix_queue *que = adapter->queues;
   5918 	struct  	tx_ring *txr = adapter->tx_rings;
   5919 	pci_chipset_tag_t pc;
   5920 	char		intrbuf[PCI_INTRSTR_LEN];
   5921 	char		intr_xname[32];
   5922 	const char	*intrstr = NULL;
   5923 	int 		error, vector = 0;
   5924 	int		cpu_id = 0;
   5925 	kcpuset_t	*affinity;
   5926 #ifdef RSS
   5927 	unsigned int    rss_buckets = 0;
   5928 	kcpuset_t	cpu_mask;
   5929 #endif
   5930 
   5931 	pc = adapter->osdep.pc;
   5932 #ifdef	RSS
   5933 	/*
   5934 	 * If we're doing RSS, the number of queues needs to
   5935 	 * match the number of RSS buckets that are configured.
   5936 	 *
   5937 	 * + If there's more queues than RSS buckets, we'll end
   5938 	 *   up with queues that get no traffic.
   5939 	 *
   5940 	 * + If there's more RSS buckets than queues, we'll end
   5941 	 *   up having multiple RSS buckets map to the same queue,
   5942 	 *   so there'll be some contention.
   5943 	 */
   5944 	rss_buckets = rss_getnumbuckets();
   5945 	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
   5946 	    (adapter->num_queues != rss_buckets)) {
   5947 		device_printf(dev,
   5948 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
   5949 		    "; performance will be impacted.\n",
   5950 		    __func__, adapter->num_queues, rss_buckets);
   5951 	}
   5952 #endif
   5953 
   5954 	adapter->osdep.nintrs = adapter->num_queues + 1;
   5955 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   5956 	    adapter->osdep.nintrs) != 0) {
   5957 		aprint_error_dev(dev,
   5958 		    "failed to allocate MSI-X interrupt\n");
   5959 		return (ENXIO);
   5960 	}
   5961 
   5962 	kcpuset_create(&affinity, false);
   5963 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   5964 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   5965 		    device_xname(dev), i);
   5966 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   5967 		    sizeof(intrbuf));
   5968 #ifdef IXGBE_MPSAFE
   5969 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   5970 		    true);
   5971 #endif
   5972 		/* Set the handler function */
   5973 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   5974 		    adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
   5975 		    intr_xname);
   5976 		if (que->res == NULL) {
   5977 			aprint_error_dev(dev,
   5978 			    "Failed to register QUE handler\n");
   5979 			error = ENXIO;
   5980 			goto err_out;
   5981 		}
   5982 		que->msix = vector;
   5983 		adapter->active_queues |= (u64)(1 << que->msix);
   5984 
   5985 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   5986 #ifdef	RSS
   5987 			/*
   5988 			 * The queue ID is used as the RSS layer bucket ID.
   5989 			 * We look up the queue ID -> RSS CPU ID and select
   5990 			 * that.
   5991 			 */
   5992 			cpu_id = rss_getcpu(i % rss_getnumbuckets());
   5993 			CPU_SETOF(cpu_id, &cpu_mask);
   5994 #endif
   5995 		} else {
   5996 			/*
   5997 			 * Bind the MSI-X vector, and thus the
   5998 			 * rings to the corresponding CPU.
   5999 			 *
   6000 			 * This just happens to match the default RSS
   6001 			 * round-robin bucket -> queue -> CPU allocation.
   6002 			 */
   6003 			if (adapter->num_queues > 1)
   6004 				cpu_id = i;
   6005 		}
   6006 		/* Round-robin affinity */
   6007 		kcpuset_zero(affinity);
   6008 		kcpuset_set(affinity, cpu_id % ncpu);
   6009 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   6010 		    NULL);
   6011 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   6012 		    intrstr);
   6013 		if (error == 0) {
   6014 #if 1 /* def IXGBE_DEBUG */
   6015 #ifdef	RSS
   6016 			aprintf_normal(", bound RSS bucket %d to CPU %d", i,
   6017 			    cpu_id % ncpu);
   6018 #else
   6019 			aprint_normal(", bound queue %d to cpu %d", i,
   6020 			    cpu_id % ncpu);
   6021 #endif
   6022 #endif /* IXGBE_DEBUG */
   6023 		}
   6024 		aprint_normal("\n");
   6025 
   6026 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   6027 			txr->txr_si = softint_establish(
   6028 				SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6029 				ixgbe_deferred_mq_start, txr);
   6030 			if (txr->txr_si == NULL) {
   6031 				aprint_error_dev(dev,
   6032 				    "couldn't establish software interrupt\n");
   6033 				error = ENXIO;
   6034 				goto err_out;
   6035 			}
   6036 		}
   6037 		que->que_si
   6038 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6039 			ixgbe_handle_que, que);
   6040 		if (que->que_si == NULL) {
   6041 			aprint_error_dev(dev,
   6042 			    "couldn't establish software interrupt\n");
   6043 			error = ENXIO;
   6044 			goto err_out;
   6045 		}
   6046 	}
   6047 
   6048 	/* and Link */
   6049 	cpu_id++;
   6050 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   6051 	adapter->vector = vector;
   6052 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   6053 	    sizeof(intrbuf));
   6054 #ifdef IXGBE_MPSAFE
   6055 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   6056 	    true);
   6057 #endif
   6058 	/* Set the link handler function */
   6059 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   6060 	    adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
   6061 	    intr_xname);
   6062 	if (adapter->osdep.ihs[vector] == NULL) {
   6063 		adapter->res = NULL;
   6064 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   6065 		error = ENXIO;
   6066 		goto err_out;
   6067 	}
   6068 	/* Round-robin affinity */
   6069 	kcpuset_zero(affinity);
   6070 	kcpuset_set(affinity, cpu_id % ncpu);
   6071 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
   6072 	    NULL);
   6073 
   6074 	aprint_normal_dev(dev,
   6075 	    "for link, interrupting at %s", intrstr);
   6076 	if (error == 0)
   6077 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   6078 	else
   6079 		aprint_normal("\n");
   6080 
   6081 	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
   6082 		adapter->mbx_si =
   6083 		    softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   6084 			ixgbe_handle_mbx, adapter);
   6085 		if (adapter->mbx_si == NULL) {
   6086 			aprint_error_dev(dev,
   6087 			    "could not establish software interrupts\n");
   6088 
   6089 			error = ENXIO;
   6090 			goto err_out;
   6091 		}
   6092 	}
   6093 
   6094 	kcpuset_destroy(affinity);
   6095 	aprint_normal_dev(dev,
   6096 	    "Using MSI-X interrupts with %d vectors\n", vector + 1);
   6097 
   6098 	return (0);
   6099 
   6100 err_out:
   6101 	kcpuset_destroy(affinity);
   6102 	ixgbe_free_softint(adapter);
   6103 	ixgbe_free_pciintr_resources(adapter);
   6104 	return (error);
   6105 } /* ixgbe_allocate_msix */
   6106 
   6107 /************************************************************************
   6108  * ixgbe_configure_interrupts
   6109  *
   6110  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
   6111  *   This will also depend on user settings.
   6112  ************************************************************************/
   6113 static int
   6114 ixgbe_configure_interrupts(struct adapter *adapter)
   6115 {
   6116 	device_t dev = adapter->dev;
   6117 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
   6118 	int want, queues, msgs;
   6119 
   6120 	/* Default to 1 queue if MSI-X setup fails */
   6121 	adapter->num_queues = 1;
   6122 
   6123 	/* Override by tuneable */
   6124 	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
   6125 		goto msi;
   6126 
   6127 	/*
   6128 	 *  NetBSD only: Use single vector MSI when number of CPU is 1 to save
   6129 	 * interrupt slot.
   6130 	 */
   6131 	if (ncpu == 1)
   6132 		goto msi;
   6133 
   6134 	/* First try MSI-X */
   6135 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   6136 	msgs = MIN(msgs, IXG_MAX_NINTR);
   6137 	if (msgs < 2)
   6138 		goto msi;
   6139 
   6140 	adapter->msix_mem = (void *)1; /* XXX */
   6141 
   6142 	/* Figure out a reasonable auto config value */
   6143 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   6144 
   6145 #ifdef	RSS
   6146 	/* If we're doing RSS, clamp at the number of RSS buckets */
   6147 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   6148 		queues = min(queues, rss_getnumbuckets());
   6149 #endif
   6150 	if (ixgbe_num_queues > queues) {
   6151 		aprint_error_dev(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
   6152 		ixgbe_num_queues = queues;
   6153 	}
   6154 
   6155 	if (ixgbe_num_queues != 0)
   6156 		queues = ixgbe_num_queues;
   6157 	else
   6158 		queues = min(queues,
   6159 		    min(mac->max_tx_queues, mac->max_rx_queues));
   6160 
   6161 	/* reflect correct sysctl value */
   6162 	ixgbe_num_queues = queues;
   6163 
   6164 	/*
   6165 	 * Want one vector (RX/TX pair) per queue
   6166 	 * plus an additional for Link.
   6167 	 */
   6168 	want = queues + 1;
   6169 	if (msgs >= want)
   6170 		msgs = want;
   6171 	else {
   6172                	aprint_error_dev(dev, "MSI-X Configuration Problem, "
   6173 		    "%d vectors but %d queues wanted!\n",
   6174 		    msgs, want);
   6175 		goto msi;
   6176 	}
   6177 	adapter->num_queues = queues;
   6178 	adapter->feat_en |= IXGBE_FEATURE_MSIX;
   6179 	return (0);
   6180 
   6181 	/*
   6182 	 * MSI-X allocation failed or provided us with
   6183 	 * less vectors than needed. Free MSI-X resources
   6184 	 * and we'll try enabling MSI.
   6185 	 */
   6186 msi:
   6187 	/* Without MSI-X, some features are no longer supported */
   6188 	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
   6189 	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
   6190 	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
   6191 	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
   6192 
   6193        	msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
   6194 	adapter->msix_mem = NULL; /* XXX */
   6195 	if (msgs > 1)
   6196 		msgs = 1;
   6197 	if (msgs != 0) {
   6198 		msgs = 1;
   6199 		adapter->feat_en |= IXGBE_FEATURE_MSI;
   6200 		return (0);
   6201 	}
   6202 
   6203 	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
   6204 		aprint_error_dev(dev,
   6205 		    "Device does not support legacy interrupts.\n");
   6206 		return 1;
   6207 	}
   6208 
   6209 	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
   6210 
   6211 	return (0);
   6212 } /* ixgbe_configure_interrupts */
   6213 
   6214 
   6215 /************************************************************************
   6216  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
   6217  *
   6218  *   Done outside of interrupt context since the driver might sleep
   6219  ************************************************************************/
   6220 static void
   6221 ixgbe_handle_link(void *context)
   6222 {
   6223 	struct adapter  *adapter = context;
   6224 	struct ixgbe_hw *hw = &adapter->hw;
   6225 
   6226 	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
   6227 	ixgbe_update_link_status(adapter);
   6228 
   6229 	/* Re-enable link interrupts */
   6230 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
   6231 } /* ixgbe_handle_link */
   6232 
   6233 /************************************************************************
   6234  * ixgbe_rearm_queues
   6235  ************************************************************************/
   6236 static void
   6237 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   6238 {
   6239 	u32 mask;
   6240 
   6241 	switch (adapter->hw.mac.type) {
   6242 	case ixgbe_mac_82598EB:
   6243 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   6244 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   6245 		break;
   6246 	case ixgbe_mac_82599EB:
   6247 	case ixgbe_mac_X540:
   6248 	case ixgbe_mac_X550:
   6249 	case ixgbe_mac_X550EM_x:
   6250 	case ixgbe_mac_X550EM_a:
   6251 		mask = (queues & 0xFFFFFFFF);
   6252 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   6253 		mask = (queues >> 32);
   6254 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   6255 		break;
   6256 	default:
   6257 		break;
   6258 	}
   6259 } /* ixgbe_rearm_queues */
   6260