Home | History | Annotate | Line # | Download | only in ixgbe
ixgbe.c revision 1.44
      1 /******************************************************************************
      2 
      3   Copyright (c) 2001-2015, Intel Corporation
      4   All rights reserved.
      5 
      6   Redistribution and use in source and binary forms, with or without
      7   modification, are permitted provided that the following conditions are met:
      8 
      9    1. Redistributions of source code must retain the above copyright notice,
     10       this list of conditions and the following disclaimer.
     11 
     12    2. Redistributions in binary form must reproduce the above copyright
     13       notice, this list of conditions and the following disclaimer in the
     14       documentation and/or other materials provided with the distribution.
     15 
     16    3. Neither the name of the Intel Corporation nor the names of its
     17       contributors may be used to endorse or promote products derived from
     18       this software without specific prior written permission.
     19 
     20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   POSSIBILITY OF SUCH DAMAGE.
     31 
     32 ******************************************************************************/
     33 /*
     34  * Copyright (c) 2011 The NetBSD Foundation, Inc.
     35  * All rights reserved.
     36  *
     37  * This code is derived from software contributed to The NetBSD Foundation
     38  * by Coyote Point Systems, Inc.
     39  *
     40  * Redistribution and use in source and binary forms, with or without
     41  * modification, are permitted provided that the following conditions
     42  * are met:
     43  * 1. Redistributions of source code must retain the above copyright
     44  *    notice, this list of conditions and the following disclaimer.
     45  * 2. Redistributions in binary form must reproduce the above copyright
     46  *    notice, this list of conditions and the following disclaimer in the
     47  *    documentation and/or other materials provided with the distribution.
     48  *
     49  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     50  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     51  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     52  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     53  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     54  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     55  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     56  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     57  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     58  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     59  * POSSIBILITY OF SUCH DAMAGE.
     60  */
     61 /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 282299 2015-05-01 12:10:36Z bz $*/
     62 /*$NetBSD: ixgbe.c,v 1.44 2016/12/01 06:56:28 msaitoh Exp $*/
     63 
     64 #include "opt_inet.h"
     65 #include "opt_inet6.h"
     66 
     67 #include "ixgbe.h"
     68 #include "vlan.h"
     69 
     70 #include <sys/cprng.h>
     71 
     72 /*********************************************************************
     73  *  Set this to one to display debug statistics
     74  *********************************************************************/
     75 int             ixgbe_display_debug_stats = 0;
     76 
     77 /*********************************************************************
     78  *  Driver version
     79  *********************************************************************/
     80 char ixgbe_driver_version[] = "2.8.3";
     81 
     82 /*********************************************************************
     83  *  PCI Device ID Table
     84  *
     85  *  Used by probe to select devices to load on
     86  *  Last field stores an index into ixgbe_strings
     87  *  Last entry must be all 0s
     88  *
     89  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     90  *********************************************************************/
     91 
     92 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
     93 {
     94 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
     95 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
     96 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
     97 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
     98 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
     99 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
    100 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
    101 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
    102 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
    103 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
    104 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
    105 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
    106 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
    107 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
    108 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
    109 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
    110 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
    111 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
    112 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
    113 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
    114 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
    115 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
    116 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
    117 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
    118 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
    119 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
    120 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
    121 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
    122 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
    123 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
    124 	/* required last entry */
    125 	{0, 0, 0, 0, 0}
    126 };
    127 
    128 /*********************************************************************
    129  *  Table of branding strings
    130  *********************************************************************/
    131 
    132 static const char    *ixgbe_strings[] = {
    133 	"Intel(R) PRO/10GbE PCI-Express Network Driver"
    134 };
    135 
    136 /*********************************************************************
    137  *  Function prototypes
    138  *********************************************************************/
    139 static int      ixgbe_probe(device_t, cfdata_t, void *);
    140 static void     ixgbe_attach(device_t, device_t, void *);
    141 static int      ixgbe_detach(device_t, int);
    142 #if 0
    143 static int      ixgbe_shutdown(device_t);
    144 #endif
    145 static bool	ixgbe_suspend(device_t, const pmf_qual_t *);
    146 static bool	ixgbe_resume(device_t, const pmf_qual_t *);
    147 static int      ixgbe_ioctl(struct ifnet *, u_long, void *);
    148 static void	ixgbe_ifstop(struct ifnet *, int);
    149 static int	ixgbe_init(struct ifnet *);
    150 static void	ixgbe_init_locked(struct adapter *);
    151 static void     ixgbe_stop(void *);
    152 static void	ixgbe_add_media_types(struct adapter *);
    153 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
    154 static int      ixgbe_media_change(struct ifnet *);
    155 static void     ixgbe_identify_hardware(struct adapter *);
    156 static int      ixgbe_allocate_pci_resources(struct adapter *,
    157 		    const struct pci_attach_args *);
    158 static void	ixgbe_get_slot_info(struct ixgbe_hw *);
    159 static int      ixgbe_allocate_msix(struct adapter *,
    160 		    const struct pci_attach_args *);
    161 static int      ixgbe_allocate_legacy(struct adapter *,
    162 		    const struct pci_attach_args *);
    163 static int	ixgbe_setup_msix(struct adapter *);
    164 static void	ixgbe_free_pci_resources(struct adapter *);
    165 static void	ixgbe_local_timer(void *);
    166 static int	ixgbe_setup_interface(device_t, struct adapter *);
    167 static void	ixgbe_config_dmac(struct adapter *);
    168 static void	ixgbe_config_delay_values(struct adapter *);
    169 static void	ixgbe_config_link(struct adapter *);
    170 static void	ixgbe_check_eee_support(struct adapter *);
    171 static void	ixgbe_check_wol_support(struct adapter *);
    172 static int	ixgbe_setup_low_power_mode(struct adapter *);
    173 static void	ixgbe_rearm_queues(struct adapter *, u64);
    174 
    175 static void     ixgbe_initialize_transmit_units(struct adapter *);
    176 static void     ixgbe_initialize_receive_units(struct adapter *);
    177 static void	ixgbe_enable_rx_drop(struct adapter *);
    178 static void	ixgbe_disable_rx_drop(struct adapter *);
    179 
    180 static void     ixgbe_enable_intr(struct adapter *);
    181 static void     ixgbe_disable_intr(struct adapter *);
    182 static void     ixgbe_update_stats_counters(struct adapter *);
    183 static void     ixgbe_set_promisc(struct adapter *);
    184 static void     ixgbe_set_multi(struct adapter *);
    185 static void     ixgbe_update_link_status(struct adapter *);
    186 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
    187 static void	ixgbe_configure_ivars(struct adapter *);
    188 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    189 
    190 static void	ixgbe_setup_vlan_hw_support(struct adapter *);
    191 #if 0
    192 static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
    193 static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
    194 #endif
    195 
    196 static void	ixgbe_add_device_sysctls(struct adapter *);
    197 static void     ixgbe_add_hw_stats(struct adapter *);
    198 
    199 /* Sysctl handlers */
    200 static int	ixgbe_set_flowcntl(SYSCTLFN_PROTO);
    201 static int	ixgbe_set_advertise(SYSCTLFN_PROTO);
    202 static int	ixgbe_sysctl_thermal_test(SYSCTLFN_PROTO);
    203 static int	ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
    204 static int	ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
    205 static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
    206 static int	ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
    207 static int	ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
    208 static int	ixgbe_sysctl_eee_enable(SYSCTLFN_PROTO);
    209 static int	ixgbe_sysctl_eee_negotiated(SYSCTLFN_PROTO);
    210 static int	ixgbe_sysctl_eee_rx_lpi_status(SYSCTLFN_PROTO);
    211 static int	ixgbe_sysctl_eee_tx_lpi_status(SYSCTLFN_PROTO);
    212 
    213 /* Support for pluggable optic modules */
    214 static bool	ixgbe_sfp_probe(struct adapter *);
    215 static void	ixgbe_setup_optics(struct adapter *);
    216 
    217 /* Legacy (single vector interrupt handler */
    218 static int	ixgbe_legacy_irq(void *);
    219 
    220 /* The MSI/X Interrupt handlers */
    221 static int	ixgbe_msix_que(void *);
    222 static int	ixgbe_msix_link(void *);
    223 
    224 /* Software interrupts for deferred work */
    225 static void	ixgbe_handle_que(void *);
    226 static void	ixgbe_handle_link(void *);
    227 static void	ixgbe_handle_msf(void *);
    228 static void	ixgbe_handle_mod(void *);
    229 static void	ixgbe_handle_phy(void *);
    230 
    231 const struct sysctlnode *ixgbe_sysctl_instance(struct adapter *);
    232 static ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
    233 
    234 #ifdef IXGBE_FDIR
    235 static void	ixgbe_reinit_fdir(void *, int);
    236 #endif
    237 
    238 /*********************************************************************
    239  *  FreeBSD Device Interface Entry Points
    240  *********************************************************************/
    241 
    242 CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
    243     ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
    244     DVF_DETACH_SHUTDOWN);
    245 
    246 #if 0
    247 devclass_t ix_devclass;
    248 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
    249 
    250 MODULE_DEPEND(ix, pci, 1, 1, 1);
    251 MODULE_DEPEND(ix, ether, 1, 1, 1);
    252 #endif
    253 
    254 /*
    255 ** TUNEABLE PARAMETERS:
    256 */
    257 
    258 /*
    259 ** AIM: Adaptive Interrupt Moderation
    260 ** which means that the interrupt rate
    261 ** is varied over time based on the
    262 ** traffic for that interrupt vector
    263 */
    264 static int ixgbe_enable_aim = TRUE;
    265 #define SYSCTL_INT(__x, __y)
    266 SYSCTL_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
    267 
    268 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    269 SYSCTL_INT("hw.ixgbe.max_interrupt_rate", &ixgbe_max_interrupt_rate);
    270 
    271 /* How many packets rxeof tries to clean at a time */
    272 static int ixgbe_rx_process_limit = 256;
    273 SYSCTL_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
    274 
    275 /* How many packets txeof tries to clean at a time */
    276 static int ixgbe_tx_process_limit = 256;
    277 SYSCTL_INT("hw.ixgbe.tx_process_limit", &ixgbe_tx_process_limit);
    278 
    279 /*
    280 ** Smart speed setting, default to on
    281 ** this only works as a compile option
    282 ** right now as its during attach, set
    283 ** this to 'ixgbe_smart_speed_off' to
    284 ** disable.
    285 */
    286 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
    287 
    288 /*
    289  * MSIX should be the default for best performance,
    290  * but this allows it to be forced off for testing.
    291  */
    292 static int ixgbe_enable_msix = 1;
    293 SYSCTL_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
    294 
    295 /*
    296  * Number of Queues, can be set to 0,
    297  * it then autoconfigures based on the
    298  * number of cpus with a max of 8. This
    299  * can be overriden manually here.
    300  */
    301 static int ixgbe_num_queues = 1;
    302 SYSCTL_INT("hw.ixgbe.num_queues", &ixgbe_num_queues);
    303 
    304 /*
    305 ** Number of TX descriptors per ring,
    306 ** setting higher than RX as this seems
    307 ** the better performing choice.
    308 */
    309 static int ixgbe_txd = PERFORM_TXD;
    310 SYSCTL_INT("hw.ixgbe.txd", &ixgbe_txd);
    311 
    312 /* Number of RX descriptors per ring */
    313 static int ixgbe_rxd = PERFORM_RXD;
    314 SYSCTL_INT("hw.ixgbe.rxd", &ixgbe_rxd);
    315 
    316 /*
    317 ** Defining this on will allow the use
    318 ** of unsupported SFP+ modules, note that
    319 ** doing so you are on your own :)
    320 */
    321 static int allow_unsupported_sfp = false;
    322 SYSCTL_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
    323 
    324 /* Keep running tab on them for sanity check */
    325 static int ixgbe_total_ports;
    326 
    327 #ifdef IXGBE_FDIR
    328 /*
    329 ** Flow Director actually 'steals'
    330 ** part of the packet buffer as its
    331 ** filter pool, this variable controls
    332 ** how much it uses:
    333 **  0 = 64K, 1 = 128K, 2 = 256K
    334 */
    335 static int fdir_pballoc = 1;
    336 #endif
    337 
    338 #ifdef DEV_NETMAP
    339 /*
    340  * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
    341  * be a reference on how to implement netmap support in a driver.
    342  * Additional comments are in ixgbe_netmap.h .
    343  *
    344  * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
    345  * that extend the standard driver.
    346  */
    347 #include <dev/netmap/ixgbe_netmap.h>
    348 #endif /* DEV_NETMAP */
    349 
    350 /*********************************************************************
    351  *  Device identification routine
    352  *
    353  *  ixgbe_probe determines if the driver should be loaded on
    354  *  adapter based on PCI vendor/device id of the adapter.
    355  *
    356  *  return 1 on success, 0 on failure
    357  *********************************************************************/
    358 
    359 static int
    360 ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
    361 {
    362 	const struct pci_attach_args *pa = aux;
    363 
    364 	return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
    365 }
    366 
    367 static ixgbe_vendor_info_t *
    368 ixgbe_lookup(const struct pci_attach_args *pa)
    369 {
    370 	pcireg_t subid;
    371 	ixgbe_vendor_info_t *ent;
    372 
    373 	INIT_DEBUGOUT("ixgbe_probe: begin");
    374 
    375 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    376 		return NULL;
    377 
    378 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    379 
    380 	for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
    381 		if (PCI_VENDOR(pa->pa_id) == ent->vendor_id &&
    382 		    PCI_PRODUCT(pa->pa_id) == ent->device_id &&
    383 
    384 		    (PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id ||
    385 		     ent->subvendor_id == 0) &&
    386 
    387 		    (PCI_SUBSYS_ID(subid) == ent->subdevice_id ||
    388 		     ent->subdevice_id == 0)) {
    389 			++ixgbe_total_ports;
    390 			return ent;
    391 		}
    392 	}
    393 	return NULL;
    394 }
    395 
    396 
    397 /*********************************************************************
    398  *  Device initialization routine
    399  *
    400  *  The attach entry point is called when the driver is being loaded.
    401  *  This routine identifies the type of hardware, allocates all resources
    402  *  and initializes the hardware.
    403  *
    404  *  return 0 on success, positive on failure
    405  *********************************************************************/
    406 
    407 static void
    408 ixgbe_attach(device_t parent, device_t dev, void *aux)
    409 {
    410 	struct adapter *adapter;
    411 	struct ixgbe_hw *hw;
    412 	int             error = -1;
    413 	u16		csum;
    414 	u32		ctrl_ext;
    415 	ixgbe_vendor_info_t *ent;
    416 	struct pci_attach_args *pa = aux;
    417 
    418 	INIT_DEBUGOUT("ixgbe_attach: begin");
    419 
    420 	/* Allocate, clear, and link in our adapter structure */
    421 	adapter = device_private(dev);
    422 	adapter->dev = adapter->osdep.dev = dev;
    423 	hw = &adapter->hw;
    424 	adapter->osdep.pc = pa->pa_pc;
    425 	adapter->osdep.tag = pa->pa_tag;
    426 	adapter->osdep.dmat = pa->pa_dmat;
    427 	adapter->osdep.attached = false;
    428 
    429 	ent = ixgbe_lookup(pa);
    430 
    431 	KASSERT(ent != NULL);
    432 
    433 	aprint_normal(": %s, Version - %s\n",
    434 	    ixgbe_strings[ent->index], ixgbe_driver_version);
    435 
    436 	/* Core Lock Init*/
    437 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    438 
    439 	/* Set up the timer callout */
    440 	callout_init(&adapter->timer, 0);
    441 
    442 	/* Determine hardware revision */
    443 	ixgbe_identify_hardware(adapter);
    444 
    445 	/* Do base PCI setup - map BAR0 */
    446 	if (ixgbe_allocate_pci_resources(adapter, pa)) {
    447 		aprint_error_dev(dev, "Allocation of PCI resources failed\n");
    448 		error = ENXIO;
    449 		goto err_out;
    450 	}
    451 
    452 	/* Do descriptor calc and sanity checks */
    453 	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    454 	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
    455 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    456 		adapter->num_tx_desc = DEFAULT_TXD;
    457 	} else
    458 		adapter->num_tx_desc = ixgbe_txd;
    459 
    460 	/*
    461 	** With many RX rings it is easy to exceed the
    462 	** system mbuf allocation. Tuning nmbclusters
    463 	** can alleviate this.
    464 	*/
    465 	if (nmbclusters > 0) {
    466 		int s;
    467 		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
    468 		if (s > nmbclusters) {
    469 			aprint_error_dev(dev, "RX Descriptors exceed "
    470 			    "system mbuf max, using default instead!\n");
    471 			ixgbe_rxd = DEFAULT_RXD;
    472 		}
    473 	}
    474 
    475 	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    476 	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
    477 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    478 		adapter->num_rx_desc = DEFAULT_RXD;
    479 	} else
    480 		adapter->num_rx_desc = ixgbe_rxd;
    481 
    482 	/* Allocate our TX/RX Queues */
    483 	if (ixgbe_allocate_queues(adapter)) {
    484 		error = ENOMEM;
    485 		goto err_out;
    486 	}
    487 
    488 	/* Allocate multicast array memory. */
    489 	adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
    490 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
    491 	if (adapter->mta == NULL) {
    492 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
    493 		error = ENOMEM;
    494 		goto err_late;
    495 	}
    496 
    497 	/* Initialize the shared code */
    498 	hw->allow_unsupported_sfp = allow_unsupported_sfp;
    499 	error = ixgbe_init_shared_code(hw);
    500 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
    501 		/*
    502 		** No optics in this port, set up
    503 		** so the timer routine will probe
    504 		** for later insertion.
    505 		*/
    506 		adapter->sfp_probe = TRUE;
    507 		error = 0;
    508 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
    509 		aprint_error_dev(dev,"Unsupported SFP+ module detected!\n");
    510 		error = EIO;
    511 		goto err_late;
    512 	} else if (error) {
    513 		aprint_error_dev(dev,"Unable to initialize the shared code\n");
    514 		error = EIO;
    515 		goto err_late;
    516 	}
    517 
    518 	/* Make sure we have a good EEPROM before we read from it */
    519 	if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
    520 		aprint_error_dev(dev,"The EEPROM Checksum Is Not Valid\n");
    521 		error = EIO;
    522 		goto err_late;
    523 	}
    524 
    525 	error = ixgbe_init_hw(hw);
    526 	switch (error) {
    527 	case IXGBE_ERR_EEPROM_VERSION:
    528 		aprint_error_dev(dev, "This device is a pre-production adapter/"
    529 		    "LOM.  Please be aware there may be issues associated "
    530 		    "with your hardware.\n If you are experiencing problems "
    531 		    "please contact your Intel or hardware representative "
    532 		    "who provided you with this hardware.\n");
    533 		break;
    534 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
    535 		aprint_error_dev(dev,"Unsupported SFP+ Module\n");
    536 		error = EIO;
    537 		aprint_error_dev(dev,"Hardware Initialization Failure\n");
    538 		goto err_late;
    539 	case IXGBE_ERR_SFP_NOT_PRESENT:
    540 		device_printf(dev,"No SFP+ Module found\n");
    541 		/* falls thru */
    542 	default:
    543 		break;
    544 	}
    545 
    546 	/* Detect and set physical type */
    547 	ixgbe_setup_optics(adapter);
    548 
    549 	error = -1;
    550 	if ((adapter->msix > 1) && (ixgbe_enable_msix))
    551 		error = ixgbe_allocate_msix(adapter, pa);
    552 	if (error != 0)
    553 		error = ixgbe_allocate_legacy(adapter, pa);
    554 	if (error)
    555 		goto err_late;
    556 
    557 	/* Setup OS specific network interface */
    558 	if (ixgbe_setup_interface(dev, adapter) != 0)
    559 		goto err_late;
    560 
    561 	/* Initialize statistics */
    562 	ixgbe_update_stats_counters(adapter);
    563 
    564         /* Check PCIE slot type/speed/width */
    565 	ixgbe_get_slot_info(hw);
    566 
    567 
    568 	/* Set an initial default flow control value */
    569 	adapter->fc = ixgbe_fc_full;
    570 
    571 	/* Check for certain supported features */
    572 	ixgbe_check_wol_support(adapter);
    573 	ixgbe_check_eee_support(adapter);
    574 
    575 	/* Add sysctls */
    576 	ixgbe_add_device_sysctls(adapter);
    577 	ixgbe_add_hw_stats(adapter);
    578 
    579 	/* let hardware know driver is loaded */
    580 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
    581 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
    582 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
    583 
    584 #ifdef DEV_NETMAP
    585 	ixgbe_netmap_attach(adapter);
    586 #endif /* DEV_NETMAP */
    587 
    588 	if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
    589 		pmf_class_network_register(dev, adapter->ifp);
    590 	else
    591 		aprint_error_dev(dev, "couldn't establish power handler\n");
    592 
    593 	INIT_DEBUGOUT("ixgbe_attach: end");
    594 	adapter->osdep.attached = true;
    595 	return;
    596 
    597 err_late:
    598 	ixgbe_free_transmit_structures(adapter);
    599 	ixgbe_free_receive_structures(adapter);
    600 err_out:
    601 	if (adapter->ifp != NULL)
    602 		if_free(adapter->ifp);
    603 	ixgbe_free_pci_resources(adapter);
    604 	if (adapter->mta != NULL)
    605 		free(adapter->mta, M_DEVBUF);
    606 	return;
    607 }
    608 
    609 /*********************************************************************
    610  *  Device removal routine
    611  *
    612  *  The detach entry point is called when the driver is being removed.
    613  *  This routine stops the adapter and deallocates all the resources
    614  *  that were allocated for driver operation.
    615  *
    616  *  return 0 on success, positive on failure
    617  *********************************************************************/
    618 
    619 static int
    620 ixgbe_detach(device_t dev, int flags)
    621 {
    622 	struct adapter *adapter = device_private(dev);
    623 	struct rx_ring *rxr = adapter->rx_rings;
    624 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
    625 	struct ix_queue *que = adapter->queues;
    626 	struct tx_ring *txr = adapter->tx_rings;
    627 	u32	ctrl_ext;
    628 
    629 	INIT_DEBUGOUT("ixgbe_detach: begin");
    630 	if (adapter->osdep.attached == false)
    631 		return 0;
    632 
    633 #if NVLAN > 0
    634 	/* Make sure VLANs are not using driver */
    635 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    636 		;	/* nothing to do: no VLANs */
    637 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    638 		vlan_ifdetach(adapter->ifp);
    639 	else {
    640 		aprint_error_dev(dev, "VLANs in use\n");
    641 		return EBUSY;
    642 	}
    643 #endif
    644 
    645 	/* Stop the adapter */
    646 	IXGBE_CORE_LOCK(adapter);
    647 	ixgbe_setup_low_power_mode(adapter);
    648 	IXGBE_CORE_UNLOCK(adapter);
    649 
    650 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
    651 #ifndef IXGBE_LEGACY_TX
    652 		softint_disestablish(txr->txq_si);
    653 #endif
    654 		softint_disestablish(que->que_si);
    655 	}
    656 
    657 	/* Drain the Link queue */
    658 	softint_disestablish(adapter->link_si);
    659 	softint_disestablish(adapter->mod_si);
    660 	softint_disestablish(adapter->msf_si);
    661 	softint_disestablish(adapter->phy_si);
    662 #ifdef IXGBE_FDIR
    663 	softint_disestablish(adapter->fdir_si);
    664 #endif
    665 
    666 	/* let hardware know driver is unloading */
    667 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
    668 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
    669 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
    670 
    671 	ether_ifdetach(adapter->ifp);
    672 	callout_halt(&adapter->timer, NULL);
    673 #ifdef DEV_NETMAP
    674 	netmap_detach(adapter->ifp);
    675 #endif /* DEV_NETMAP */
    676 	ixgbe_free_pci_resources(adapter);
    677 #if 0	/* XXX the NetBSD port is probably missing something here */
    678 	bus_generic_detach(dev);
    679 #endif
    680 	if_detach(adapter->ifp);
    681 
    682 	sysctl_teardown(&adapter->sysctllog);
    683 	evcnt_detach(&adapter->handleq);
    684 	evcnt_detach(&adapter->req);
    685 	evcnt_detach(&adapter->morerx);
    686 	evcnt_detach(&adapter->moretx);
    687 	evcnt_detach(&adapter->txloops);
    688 	evcnt_detach(&adapter->efbig_tx_dma_setup);
    689 	evcnt_detach(&adapter->m_defrag_failed);
    690 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
    691 	evcnt_detach(&adapter->einval_tx_dma_setup);
    692 	evcnt_detach(&adapter->other_tx_dma_setup);
    693 	evcnt_detach(&adapter->eagain_tx_dma_setup);
    694 	evcnt_detach(&adapter->enomem_tx_dma_setup);
    695 	evcnt_detach(&adapter->watchdog_events);
    696 	evcnt_detach(&adapter->tso_err);
    697 	evcnt_detach(&adapter->link_irq);
    698 
    699 	txr = adapter->tx_rings;
    700 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
    701 		evcnt_detach(&txr->no_desc_avail);
    702 		evcnt_detach(&txr->total_packets);
    703 		evcnt_detach(&txr->tso_tx);
    704 
    705 		if (i < __arraycount(adapter->stats.pf.mpc)) {
    706 			evcnt_detach(&adapter->stats.pf.mpc[i]);
    707 		}
    708 		if (i < __arraycount(adapter->stats.pf.pxontxc)) {
    709 			evcnt_detach(&adapter->stats.pf.pxontxc[i]);
    710 			evcnt_detach(&adapter->stats.pf.pxonrxc[i]);
    711 			evcnt_detach(&adapter->stats.pf.pxofftxc[i]);
    712 			evcnt_detach(&adapter->stats.pf.pxoffrxc[i]);
    713 			evcnt_detach(&adapter->stats.pf.pxon2offc[i]);
    714 		}
    715 		if (i < __arraycount(adapter->stats.pf.qprc)) {
    716 			evcnt_detach(&adapter->stats.pf.qprc[i]);
    717 			evcnt_detach(&adapter->stats.pf.qptc[i]);
    718 			evcnt_detach(&adapter->stats.pf.qbrc[i]);
    719 			evcnt_detach(&adapter->stats.pf.qbtc[i]);
    720 			evcnt_detach(&adapter->stats.pf.qprdc[i]);
    721 		}
    722 
    723 		evcnt_detach(&rxr->rx_packets);
    724 		evcnt_detach(&rxr->rx_bytes);
    725 		evcnt_detach(&rxr->rx_copies);
    726 		evcnt_detach(&rxr->no_jmbuf);
    727 		evcnt_detach(&rxr->rx_discarded);
    728 		evcnt_detach(&rxr->rx_irq);
    729 	}
    730 	evcnt_detach(&stats->ipcs);
    731 	evcnt_detach(&stats->l4cs);
    732 	evcnt_detach(&stats->ipcs_bad);
    733 	evcnt_detach(&stats->l4cs_bad);
    734 	evcnt_detach(&stats->intzero);
    735 	evcnt_detach(&stats->legint);
    736 	evcnt_detach(&stats->crcerrs);
    737 	evcnt_detach(&stats->illerrc);
    738 	evcnt_detach(&stats->errbc);
    739 	evcnt_detach(&stats->mspdc);
    740 	evcnt_detach(&stats->mlfc);
    741 	evcnt_detach(&stats->mrfc);
    742 	evcnt_detach(&stats->rlec);
    743 	evcnt_detach(&stats->lxontxc);
    744 	evcnt_detach(&stats->lxonrxc);
    745 	evcnt_detach(&stats->lxofftxc);
    746 	evcnt_detach(&stats->lxoffrxc);
    747 
    748 	/* Packet Reception Stats */
    749 	evcnt_detach(&stats->tor);
    750 	evcnt_detach(&stats->gorc);
    751 	evcnt_detach(&stats->tpr);
    752 	evcnt_detach(&stats->gprc);
    753 	evcnt_detach(&stats->mprc);
    754 	evcnt_detach(&stats->bprc);
    755 	evcnt_detach(&stats->prc64);
    756 	evcnt_detach(&stats->prc127);
    757 	evcnt_detach(&stats->prc255);
    758 	evcnt_detach(&stats->prc511);
    759 	evcnt_detach(&stats->prc1023);
    760 	evcnt_detach(&stats->prc1522);
    761 	evcnt_detach(&stats->ruc);
    762 	evcnt_detach(&stats->rfc);
    763 	evcnt_detach(&stats->roc);
    764 	evcnt_detach(&stats->rjc);
    765 	evcnt_detach(&stats->mngprc);
    766 	evcnt_detach(&stats->xec);
    767 
    768 	/* Packet Transmission Stats */
    769 	evcnt_detach(&stats->gotc);
    770 	evcnt_detach(&stats->tpt);
    771 	evcnt_detach(&stats->gptc);
    772 	evcnt_detach(&stats->bptc);
    773 	evcnt_detach(&stats->mptc);
    774 	evcnt_detach(&stats->mngptc);
    775 	evcnt_detach(&stats->ptc64);
    776 	evcnt_detach(&stats->ptc127);
    777 	evcnt_detach(&stats->ptc255);
    778 	evcnt_detach(&stats->ptc511);
    779 	evcnt_detach(&stats->ptc1023);
    780 	evcnt_detach(&stats->ptc1522);
    781 
    782 	ixgbe_free_transmit_structures(adapter);
    783 	ixgbe_free_receive_structures(adapter);
    784 	free(adapter->mta, M_DEVBUF);
    785 
    786 	IXGBE_CORE_LOCK_DESTROY(adapter);
    787 	return (0);
    788 }
    789 
    790 /*********************************************************************
    791  *
    792  *  Shutdown entry point
    793  *
    794  **********************************************************************/
    795 
    796 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
    797 static int
    798 ixgbe_shutdown(device_t dev)
    799 {
    800 	struct adapter *adapter = device_private(dev);
    801 	int error = 0;
    802 
    803 	INIT_DEBUGOUT("ixgbe_shutdown: begin");
    804 
    805 	IXGBE_CORE_LOCK(adapter);
    806 	error = ixgbe_setup_low_power_mode(adapter);
    807 	IXGBE_CORE_UNLOCK(adapter);
    808 
    809 	return (error);
    810 }
    811 #endif
    812 
    813 /**
    814  * Methods for going from:
    815  * D0 -> D3: ixgbe_suspend
    816  * D3 -> D0: ixgbe_resume
    817  */
    818 static bool
    819 ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
    820 {
    821 	struct adapter *adapter = device_private(dev);
    822 	int error = 0;
    823 
    824 	INIT_DEBUGOUT("ixgbe_suspend: begin");
    825 
    826 	IXGBE_CORE_LOCK(adapter);
    827 
    828 	error = ixgbe_setup_low_power_mode(adapter);
    829 
    830 #if 0 /* XXX */
    831 	/* Save state and power down */
    832 	pci_save_state(dev);
    833 	pci_set_powerstate(dev, PCI_POWERSTATE_D3);
    834 #endif
    835 
    836 	IXGBE_CORE_UNLOCK(adapter);
    837 
    838 	return (error);
    839 }
    840 
    841 static bool
    842 ixgbe_resume(device_t dev, const pmf_qual_t *qual)
    843 {
    844 	struct adapter *adapter = device_private(dev);
    845 	struct ifnet *ifp = adapter->ifp;
    846 	struct ixgbe_hw *hw = &adapter->hw;
    847 	u32 wus;
    848 
    849 	INIT_DEBUGOUT("ixgbe_resume: begin");
    850 
    851 	IXGBE_CORE_LOCK(adapter);
    852 
    853 #if 0 /* XXX */
    854 	pci_set_powerstate(dev, PCI_POWERSTATE_D0);
    855 	pci_restore_state(dev);
    856 #endif
    857 
    858 	/* Read & clear WUS register */
    859 	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
    860 	if (wus)
    861 		device_printf(dev, "Woken up by (WUS): %#010x\n",
    862 		    IXGBE_READ_REG(hw, IXGBE_WUS));
    863 	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
    864 	/* And clear WUFC until next low-power transition */
    865 	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
    866 
    867 	/*
    868 	 * Required after D3->D0 transition;
    869 	 * will re-advertise all previous advertised speeds
    870 	 */
    871 	if (ifp->if_flags & IFF_UP)
    872 		ixgbe_init_locked(adapter);
    873 
    874 	IXGBE_CORE_UNLOCK(adapter);
    875 
    876 	INIT_DEBUGOUT("ixgbe_resume: end");
    877 	return true;
    878 }
    879 
    880 static int
    881 ixgbe_ifflags_cb(struct ethercom *ec)
    882 {
    883 	struct ifnet *ifp = &ec->ec_if;
    884 	struct adapter *adapter = ifp->if_softc;
    885 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
    886 
    887 	IXGBE_CORE_LOCK(adapter);
    888 
    889 	if (change != 0)
    890 		adapter->if_flags = ifp->if_flags;
    891 
    892 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
    893 		rc = ENETRESET;
    894 	else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
    895 		ixgbe_set_promisc(adapter);
    896 
    897 	/* Set up VLAN support and filter */
    898 	ixgbe_setup_vlan_hw_support(adapter);
    899 
    900 	IXGBE_CORE_UNLOCK(adapter);
    901 
    902 	return rc;
    903 }
    904 
    905 /*********************************************************************
    906  *  Ioctl entry point
    907  *
    908  *  ixgbe_ioctl is called when the user wants to configure the
    909  *  interface.
    910  *
    911  *  return 0 on success, positive on failure
    912  **********************************************************************/
    913 
    914 static int
    915 ixgbe_ioctl(struct ifnet * ifp, u_long command, void *data)
    916 {
    917 	struct adapter	*adapter = ifp->if_softc;
    918 	struct ifcapreq *ifcr = data;
    919 	struct ifreq	*ifr = data;
    920 	int             error = 0;
    921 	int l4csum_en;
    922 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
    923 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
    924 
    925 	switch (command) {
    926 	case SIOCSIFFLAGS:
    927 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
    928 		break;
    929 	case SIOCADDMULTI:
    930 	case SIOCDELMULTI:
    931 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
    932 		break;
    933 	case SIOCSIFMEDIA:
    934 	case SIOCGIFMEDIA:
    935 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
    936 		break;
    937 	case SIOCSIFCAP:
    938 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
    939 		break;
    940 	case SIOCSIFMTU:
    941 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
    942 		break;
    943 	default:
    944 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
    945 		break;
    946 	}
    947 
    948 	switch (command) {
    949 	case SIOCSIFMEDIA:
    950 	case SIOCGIFMEDIA:
    951 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
    952 	case SIOCGI2C:
    953 	{
    954 		struct ixgbe_hw *hw = &adapter->hw;
    955 		struct ixgbe_i2c_req	i2c;
    956 		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
    957 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
    958 		if (error != 0)
    959 			break;
    960 		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
    961 			error = EINVAL;
    962 			break;
    963 		}
    964 		if (i2c.len > sizeof(i2c.data)) {
    965 			error = EINVAL;
    966 			break;
    967 		}
    968 
    969 		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
    970 		    i2c.dev_addr, i2c.data);
    971 		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
    972 		break;
    973 	}
    974 	case SIOCSIFCAP:
    975 		/* Layer-4 Rx checksum offload has to be turned on and
    976 		 * off as a unit.
    977 		 */
    978 		l4csum_en = ifcr->ifcr_capenable & l4csum;
    979 		if (l4csum_en != l4csum && l4csum_en != 0)
    980 			return EINVAL;
    981 		/*FALLTHROUGH*/
    982 	case SIOCADDMULTI:
    983 	case SIOCDELMULTI:
    984 	case SIOCSIFFLAGS:
    985 	case SIOCSIFMTU:
    986 	default:
    987 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
    988 			return error;
    989 		if ((ifp->if_flags & IFF_RUNNING) == 0)
    990 			;
    991 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
    992 			IXGBE_CORE_LOCK(adapter);
    993 			ixgbe_init_locked(adapter);
    994 			IXGBE_CORE_UNLOCK(adapter);
    995 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
    996 			/*
    997 			 * Multicast list has changed; set the hardware filter
    998 			 * accordingly.
    999 			 */
   1000 			IXGBE_CORE_LOCK(adapter);
   1001 			ixgbe_disable_intr(adapter);
   1002 			ixgbe_set_multi(adapter);
   1003 			ixgbe_enable_intr(adapter);
   1004 			IXGBE_CORE_UNLOCK(adapter);
   1005 		}
   1006 		return 0;
   1007 	}
   1008 
   1009 	return error;
   1010 }
   1011 
   1012 /*********************************************************************
   1013  *  Init entry point
   1014  *
   1015  *  This routine is used in two ways. It is used by the stack as
   1016  *  init entry point in network interface structure. It is also used
   1017  *  by the driver as a hw/sw initialization routine to get to a
   1018  *  consistent state.
   1019  *
   1020  *  return 0 on success, positive on failure
   1021  **********************************************************************/
   1022 #define IXGBE_MHADD_MFS_SHIFT 16
   1023 
   1024 static void
   1025 ixgbe_init_locked(struct adapter *adapter)
   1026 {
   1027 	struct ifnet   *ifp = adapter->ifp;
   1028 	device_t 	dev = adapter->dev;
   1029 	struct ixgbe_hw *hw = &adapter->hw;
   1030 	u32		k, txdctl, mhadd, gpie;
   1031 	u32		rxdctl, rxctrl;
   1032 
   1033 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
   1034 
   1035 	KASSERT(mutex_owned(&adapter->core_mtx));
   1036 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
   1037 	hw->adapter_stopped = FALSE;
   1038 	ixgbe_stop_adapter(hw);
   1039         callout_stop(&adapter->timer);
   1040 
   1041 	/* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
   1042 	adapter->max_frame_size =
   1043 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   1044 
   1045         /* reprogram the RAR[0] in case user changed it. */
   1046         ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   1047 
   1048 	/* Get the latest mac address, User can use a LAA */
   1049 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
   1050 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1051 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
   1052 	hw->addr_ctrl.rar_used_count = 1;
   1053 
   1054 	/* Prepare transmit descriptors and buffers */
   1055 	if (ixgbe_setup_transmit_structures(adapter)) {
   1056 		device_printf(dev, "Could not setup transmit structures\n");
   1057 		ixgbe_stop(adapter);
   1058 		return;
   1059 	}
   1060 
   1061 	ixgbe_init_hw(hw);
   1062 	ixgbe_initialize_transmit_units(adapter);
   1063 
   1064 	/* Setup Multicast table */
   1065 	ixgbe_set_multi(adapter);
   1066 
   1067 	/*
   1068 	** Determine the correct mbuf pool
   1069 	** for doing jumbo frames
   1070 	*/
   1071 	if (adapter->max_frame_size <= 2048)
   1072 		adapter->rx_mbuf_sz = MCLBYTES;
   1073 	else if (adapter->max_frame_size <= 4096)
   1074 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
   1075 	else if (adapter->max_frame_size <= 9216)
   1076 		adapter->rx_mbuf_sz = MJUM9BYTES;
   1077 	else
   1078 		adapter->rx_mbuf_sz = MJUM16BYTES;
   1079 
   1080 	/* Prepare receive descriptors and buffers */
   1081 	if (ixgbe_setup_receive_structures(adapter)) {
   1082 		device_printf(dev, "Could not setup receive structures\n");
   1083 		ixgbe_stop(adapter);
   1084 		return;
   1085 	}
   1086 
   1087 	/* Configure RX settings */
   1088 	ixgbe_initialize_receive_units(adapter);
   1089 
   1090 	gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
   1091 
   1092 	/* Enable Fan Failure Interrupt */
   1093 	gpie |= IXGBE_SDP1_GPIEN_BY_MAC(hw);
   1094 
   1095 	/* Add for Module detection */
   1096 	if (hw->mac.type == ixgbe_mac_82599EB)
   1097 		gpie |= IXGBE_SDP2_GPIEN;
   1098 
   1099 	/*
   1100 	 * Thermal Failure Detection (X540)
   1101 	 * Link Detection (X552)
   1102 	 */
   1103 	if (hw->mac.type == ixgbe_mac_X540 ||
   1104 	    hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
   1105 	    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
   1106 		gpie |= IXGBE_SDP0_GPIEN_X540;
   1107 
   1108 	if (adapter->msix > 1) {
   1109 		/* Enable Enhanced MSIX mode */
   1110 		gpie |= IXGBE_GPIE_MSIX_MODE;
   1111 		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
   1112 		    IXGBE_GPIE_OCD;
   1113 	}
   1114 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
   1115 
   1116 	/* Set MTU size */
   1117 	if (ifp->if_mtu > ETHERMTU) {
   1118 		/* aka IXGBE_MAXFRS on 82599 and newer */
   1119 		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
   1120 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
   1121 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
   1122 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
   1123 	}
   1124 
   1125 	/* Now enable all the queues */
   1126 	for (int i = 0; i < adapter->num_queues; i++) {
   1127 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
   1128 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1129 		/* Set WTHRESH to 8, burst writeback */
   1130 		txdctl |= (8 << 16);
   1131 		/*
   1132 		 * When the internal queue falls below PTHRESH (32),
   1133 		 * start prefetching as long as there are at least
   1134 		 * HTHRESH (1) buffers ready. The values are taken
   1135 		 * from the Intel linux driver 3.8.21.
   1136 		 * Prefetching enables tx line rate even with 1 queue.
   1137 		 */
   1138 		txdctl |= (32 << 0) | (1 << 8);
   1139 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
   1140 	}
   1141 
   1142 	for (int i = 0; i < adapter->num_queues; i++) {
   1143 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
   1144 		if (hw->mac.type == ixgbe_mac_82598EB) {
   1145 			/*
   1146 			** PTHRESH = 21
   1147 			** HTHRESH = 4
   1148 			** WTHRESH = 8
   1149 			*/
   1150 			rxdctl &= ~0x3FFFFF;
   1151 			rxdctl |= 0x080420;
   1152 		}
   1153 		rxdctl |= IXGBE_RXDCTL_ENABLE;
   1154 		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
   1155 		/* XXX I don't trust this loop, and I don't trust the
   1156 		 * XXX memory barrier.  What is this meant to do? --dyoung
   1157 		 */
   1158 		for (k = 0; k < 10; k++) {
   1159 			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
   1160 			    IXGBE_RXDCTL_ENABLE)
   1161 				break;
   1162 			else
   1163 				msec_delay(1);
   1164 		}
   1165 		wmb();
   1166 #ifdef DEV_NETMAP
   1167 		/*
   1168 		 * In netmap mode, we must preserve the buffers made
   1169 		 * available to userspace before the if_init()
   1170 		 * (this is true by default on the TX side, because
   1171 		 * init makes all buffers available to userspace).
   1172 		 *
   1173 		 * netmap_reset() and the device specific routines
   1174 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1175 		 * buffers at the end of the NIC ring, so here we
   1176 		 * must set the RDT (tail) register to make sure
   1177 		 * they are not overwritten.
   1178 		 *
   1179 		 * In this driver the NIC ring starts at RDH = 0,
   1180 		 * RDT points to the last slot available for reception (?),
   1181 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1182 		 */
   1183 		if (ifp->if_capenable & IFCAP_NETMAP) {
   1184 			struct netmap_adapter *na = NA(adapter->ifp);
   1185 			struct netmap_kring *kring = &na->rx_rings[i];
   1186 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1187 
   1188 			IXGBE_WRITE_REG(hw, IXGBE_RDT(i), t);
   1189 		} else
   1190 #endif /* DEV_NETMAP */
   1191 		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
   1192 	}
   1193 
   1194 	/* Enable Receive engine */
   1195 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
   1196 	if (hw->mac.type == ixgbe_mac_82598EB)
   1197 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
   1198 	rxctrl |= IXGBE_RXCTRL_RXEN;
   1199 	ixgbe_enable_rx_dma(hw, rxctrl);
   1200 
   1201 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   1202 
   1203 	/* Set up MSI/X routing */
   1204 	if (ixgbe_enable_msix)  {
   1205 		ixgbe_configure_ivars(adapter);
   1206 		/* Set up auto-mask */
   1207 		if (hw->mac.type == ixgbe_mac_82598EB)
   1208 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   1209 		else {
   1210 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
   1211 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
   1212 		}
   1213 	} else {  /* Simple settings for Legacy/MSI */
   1214                 ixgbe_set_ivar(adapter, 0, 0, 0);
   1215                 ixgbe_set_ivar(adapter, 0, 0, 1);
   1216 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
   1217 	}
   1218 
   1219 #ifdef IXGBE_FDIR
   1220 	/* Init Flow director */
   1221 	if (hw->mac.type != ixgbe_mac_82598EB) {
   1222 		u32 hdrm = 32 << fdir_pballoc;
   1223 
   1224 		hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
   1225 		ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
   1226 	}
   1227 #endif
   1228 
   1229 	/*
   1230 	** Check on any SFP devices that
   1231 	** need to be kick-started
   1232 	*/
   1233 	if (hw->phy.type == ixgbe_phy_none) {
   1234 		int err = hw->phy.ops.identify(hw);
   1235 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   1236                 	device_printf(dev,
   1237 			    "Unsupported SFP+ module type was detected.\n");
   1238 			return;
   1239         	}
   1240 	}
   1241 
   1242 	/* Set moderation on the Link interrupt */
   1243 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
   1244 
   1245 	/* Configure Energy Efficient Ethernet for supported devices */
   1246 	if (adapter->eee_support)
   1247 		ixgbe_setup_eee(hw, adapter->eee_enabled);
   1248 
   1249 	/* Config/Enable Link */
   1250 	ixgbe_config_link(adapter);
   1251 
   1252 	/* Hardware Packet Buffer & Flow Control setup */
   1253 	ixgbe_config_delay_values(adapter);
   1254 
   1255 	/* Initialize the FC settings */
   1256 	ixgbe_start_hw(hw);
   1257 
   1258 	/* Set up VLAN support and filter */
   1259 	ixgbe_setup_vlan_hw_support(adapter);
   1260 
   1261 	/* Setup DMA Coalescing */
   1262 	ixgbe_config_dmac(adapter);
   1263 
   1264 	/* And now turn on interrupts */
   1265 	ixgbe_enable_intr(adapter);
   1266 
   1267 	/* Now inform the stack we're ready */
   1268 	ifp->if_flags |= IFF_RUNNING;
   1269 
   1270 	return;
   1271 }
   1272 
   1273 static int
   1274 ixgbe_init(struct ifnet *ifp)
   1275 {
   1276 	struct adapter *adapter = ifp->if_softc;
   1277 
   1278 	IXGBE_CORE_LOCK(adapter);
   1279 	ixgbe_init_locked(adapter);
   1280 	IXGBE_CORE_UNLOCK(adapter);
   1281 	return 0;	/* XXX ixgbe_init_locked cannot fail?  really? */
   1282 }
   1283 
   1284 static void
   1285 ixgbe_config_delay_values(struct adapter *adapter)
   1286 {
   1287 	struct ixgbe_hw *hw = &adapter->hw;
   1288 	u32 rxpb, frame, size, tmp;
   1289 
   1290 	frame = adapter->max_frame_size;
   1291 
   1292 	/* Calculate High Water */
   1293 	switch (hw->mac.type) {
   1294 	case ixgbe_mac_X540:
   1295 	case ixgbe_mac_X550:
   1296 	case ixgbe_mac_X550EM_x:
   1297 		tmp = IXGBE_DV_X540(frame, frame);
   1298 		break;
   1299 	default:
   1300 		tmp = IXGBE_DV(frame, frame);
   1301 		break;
   1302 	}
   1303 	size = IXGBE_BT2KB(tmp);
   1304 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
   1305 	hw->fc.high_water[0] = rxpb - size;
   1306 
   1307 	/* Now calculate Low Water */
   1308 	switch (hw->mac.type) {
   1309 	case ixgbe_mac_X540:
   1310 	case ixgbe_mac_X550:
   1311 	case ixgbe_mac_X550EM_x:
   1312 		tmp = IXGBE_LOW_DV_X540(frame);
   1313 		break;
   1314 	default:
   1315 		tmp = IXGBE_LOW_DV(frame);
   1316 		break;
   1317 	}
   1318 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
   1319 
   1320 	hw->fc.requested_mode = adapter->fc;
   1321 	hw->fc.pause_time = IXGBE_FC_PAUSE;
   1322 	hw->fc.send_xon = TRUE;
   1323 }
   1324 
   1325 /*
   1326 **
   1327 ** MSIX Interrupt Handlers and Tasklets
   1328 **
   1329 */
   1330 
   1331 static inline void
   1332 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
   1333 {
   1334 	struct ixgbe_hw *hw = &adapter->hw;
   1335 	u64	queue = (u64)(1ULL << vector);
   1336 	u32	mask;
   1337 
   1338 	if (hw->mac.type == ixgbe_mac_82598EB) {
   1339                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1340                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   1341 	} else {
   1342                 mask = (queue & 0xFFFFFFFF);
   1343                 if (mask)
   1344                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
   1345                 mask = (queue >> 32);
   1346                 if (mask)
   1347                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
   1348 	}
   1349 }
   1350 
   1351 __unused static inline void
   1352 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
   1353 {
   1354 	struct ixgbe_hw *hw = &adapter->hw;
   1355 	u64	queue = (u64)(1ULL << vector);
   1356 	u32	mask;
   1357 
   1358 	if (hw->mac.type == ixgbe_mac_82598EB) {
   1359                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
   1360                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
   1361 	} else {
   1362                 mask = (queue & 0xFFFFFFFF);
   1363                 if (mask)
   1364                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
   1365                 mask = (queue >> 32);
   1366                 if (mask)
   1367                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
   1368 	}
   1369 }
   1370 
   1371 static void
   1372 ixgbe_handle_que(void *context)
   1373 {
   1374 	struct ix_queue *que = context;
   1375 	struct adapter  *adapter = que->adapter;
   1376 	struct tx_ring  *txr = que->txr;
   1377 	struct ifnet    *ifp = adapter->ifp;
   1378 
   1379 	adapter->handleq.ev_count++;
   1380 
   1381 	if (ifp->if_flags & IFF_RUNNING) {
   1382 		ixgbe_rxeof(que);
   1383 		IXGBE_TX_LOCK(txr);
   1384 		ixgbe_txeof(txr);
   1385 #ifndef IXGBE_LEGACY_TX
   1386 		if (!drbr_empty(ifp, txr->br))
   1387 			ixgbe_mq_start_locked(ifp, txr);
   1388 #else
   1389 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
   1390 			ixgbe_start_locked(txr, ifp);
   1391 #endif
   1392 		IXGBE_TX_UNLOCK(txr);
   1393 	}
   1394 
   1395 	/* Reenable this interrupt */
   1396 	if (que->res != NULL)
   1397 		ixgbe_enable_queue(adapter, que->msix);
   1398 	else
   1399 		ixgbe_enable_intr(adapter);
   1400 	return;
   1401 }
   1402 
   1403 
   1404 /*********************************************************************
   1405  *
   1406  *  Legacy Interrupt Service routine
   1407  *
   1408  **********************************************************************/
   1409 
   1410 static int
   1411 ixgbe_legacy_irq(void *arg)
   1412 {
   1413 	struct ix_queue *que = arg;
   1414 	struct adapter	*adapter = que->adapter;
   1415 	struct ixgbe_hw	*hw = &adapter->hw;
   1416 	struct ifnet    *ifp = adapter->ifp;
   1417 	struct 		tx_ring *txr = adapter->tx_rings;
   1418 	bool		more = false;
   1419 	u32       	reg_eicr;
   1420 
   1421 
   1422 	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
   1423 
   1424 	adapter->stats.pf.legint.ev_count++;
   1425 	++que->irqs.ev_count;
   1426 	if (reg_eicr == 0) {
   1427 		adapter->stats.pf.intzero.ev_count++;
   1428 		if ((ifp->if_flags & IFF_UP) != 0)
   1429 			ixgbe_enable_intr(adapter);
   1430 		return 0;
   1431 	}
   1432 
   1433 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
   1434 #ifdef __NetBSD__
   1435 		/* Don't run ixgbe_rxeof in interrupt context */
   1436 		more = true;
   1437 #else
   1438 		more = ixgbe_rxeof(que);
   1439 #endif
   1440 
   1441 		IXGBE_TX_LOCK(txr);
   1442 		ixgbe_txeof(txr);
   1443 #ifdef IXGBE_LEGACY_TX
   1444 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
   1445 			ixgbe_start_locked(txr, ifp);
   1446 #else
   1447 		if (!drbr_empty(ifp, txr->br))
   1448 			ixgbe_mq_start_locked(ifp, txr);
   1449 #endif
   1450 		IXGBE_TX_UNLOCK(txr);
   1451 	}
   1452 
   1453 	/* Check for fan failure */
   1454 	if ((hw->phy.media_type == ixgbe_media_type_copper) &&
   1455 	    (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
   1456                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
   1457 		    "REPLACE IMMEDIATELY!!\n");
   1458 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   1459 	}
   1460 
   1461 	/* Link status change */
   1462 	if (reg_eicr & IXGBE_EICR_LSC)
   1463 		softint_schedule(adapter->link_si);
   1464 
   1465 	/* External PHY interrupt */
   1466 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
   1467 	    (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
   1468 		softint_schedule(adapter->phy_si);
   1469 
   1470 	if (more)
   1471 #ifndef IXGBE_LEGACY_TX
   1472 		softint_schedule(txr->txq_si);
   1473 #else
   1474 		softint_schedule(que->que_si);
   1475 #endif
   1476 	else
   1477 		ixgbe_enable_intr(adapter);
   1478 	return 1;
   1479 }
   1480 
   1481 
   1482 /*********************************************************************
   1483  *
   1484  *  MSIX Queue Interrupt Service routine
   1485  *
   1486  **********************************************************************/
   1487 static int
   1488 ixgbe_msix_que(void *arg)
   1489 {
   1490 	struct ix_queue	*que = arg;
   1491 	struct adapter  *adapter = que->adapter;
   1492 	struct ifnet    *ifp = adapter->ifp;
   1493 	struct tx_ring	*txr = que->txr;
   1494 	struct rx_ring	*rxr = que->rxr;
   1495 	bool		more;
   1496 	u32		newitr = 0;
   1497 
   1498 	/* Protect against spurious interrupts */
   1499 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   1500 		return 0;
   1501 
   1502 	ixgbe_disable_queue(adapter, que->msix);
   1503 	++que->irqs.ev_count;
   1504 
   1505 #ifdef __NetBSD__
   1506 	/* Don't run ixgbe_rxeof in interrupt context */
   1507 	more = true;
   1508 #else
   1509 	more = ixgbe_rxeof(que);
   1510 #endif
   1511 
   1512 	IXGBE_TX_LOCK(txr);
   1513 	ixgbe_txeof(txr);
   1514 #ifdef IXGBE_LEGACY_TX
   1515 	if (!IFQ_IS_EMPTY(&adapter->ifp->if_snd))
   1516 		ixgbe_start_locked(txr, ifp);
   1517 #else
   1518 	if (!drbr_empty(ifp, txr->br))
   1519 		ixgbe_mq_start_locked(ifp, txr);
   1520 #endif
   1521 	IXGBE_TX_UNLOCK(txr);
   1522 
   1523 	/* Do AIM now? */
   1524 
   1525 	if (ixgbe_enable_aim == FALSE)
   1526 		goto no_calc;
   1527 	/*
   1528 	** Do Adaptive Interrupt Moderation:
   1529         **  - Write out last calculated setting
   1530 	**  - Calculate based on average size over
   1531 	**    the last interval.
   1532 	*/
   1533         if (que->eitr_setting)
   1534                 IXGBE_WRITE_REG(&adapter->hw,
   1535                     IXGBE_EITR(que->msix), que->eitr_setting);
   1536 
   1537         que->eitr_setting = 0;
   1538 
   1539         /* Idle, do nothing */
   1540         if ((txr->bytes == 0) && (rxr->bytes == 0))
   1541                 goto no_calc;
   1542 
   1543 	if ((txr->bytes) && (txr->packets))
   1544                	newitr = txr->bytes/txr->packets;
   1545 	if ((rxr->bytes) && (rxr->packets))
   1546 		newitr = max(newitr,
   1547 		    (rxr->bytes / rxr->packets));
   1548 	newitr += 24; /* account for hardware frame, crc */
   1549 
   1550 	/* set an upper boundary */
   1551 	newitr = min(newitr, 3000);
   1552 
   1553 	/* Be nice to the mid range */
   1554 	if ((newitr > 300) && (newitr < 1200))
   1555 		newitr = (newitr / 3);
   1556 	else
   1557 		newitr = (newitr / 2);
   1558 
   1559         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
   1560                 newitr |= newitr << 16;
   1561         else
   1562                 newitr |= IXGBE_EITR_CNT_WDIS;
   1563 
   1564         /* save for next interrupt */
   1565         que->eitr_setting = newitr;
   1566 
   1567         /* Reset state */
   1568         txr->bytes = 0;
   1569         txr->packets = 0;
   1570         rxr->bytes = 0;
   1571         rxr->packets = 0;
   1572 
   1573 no_calc:
   1574 	if (more)
   1575 		softint_schedule(que->que_si);
   1576 	else
   1577 		ixgbe_enable_queue(adapter, que->msix);
   1578 	return 1;
   1579 }
   1580 
   1581 
   1582 static int
   1583 ixgbe_msix_link(void *arg)
   1584 {
   1585 	struct adapter	*adapter = arg;
   1586 	struct ixgbe_hw *hw = &adapter->hw;
   1587 	u32		reg_eicr, mod_mask;
   1588 
   1589 	++adapter->link_irq.ev_count;
   1590 
   1591 	/* First get the cause */
   1592 	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
   1593 	/* Be sure the queue bits are not cleared */
   1594 	reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
   1595 	/* Clear interrupt with write */
   1596 	IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
   1597 
   1598 	/* Link status change */
   1599 	if (reg_eicr & IXGBE_EICR_LSC)
   1600 		softint_schedule(adapter->link_si);
   1601 
   1602 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   1603 #ifdef IXGBE_FDIR
   1604 		if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
   1605 			/* This is probably overkill :) */
   1606 			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
   1607 				return 1;
   1608                 	/* Disable the interrupt */
   1609 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
   1610 			softint_schedule(adapter->fdir_si);
   1611 		} else
   1612 #endif
   1613 		if (reg_eicr & IXGBE_EICR_ECC) {
   1614                 	device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
   1615 			    "Please Reboot!!\n");
   1616 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
   1617 		}
   1618 
   1619 		/* Check for over temp condition */
   1620 		if (reg_eicr & IXGBE_EICR_TS) {
   1621 			device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
   1622 			    "PHY IS SHUT DOWN!!\n");
   1623 			device_printf(adapter->dev, "System shutdown required!\n");
   1624 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
   1625 		}
   1626 	}
   1627 
   1628 	/* Pluggable optics-related interrupt */
   1629 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
   1630 		mod_mask = IXGBE_EICR_GPI_SDP0_X540;
   1631 	else
   1632 		mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
   1633 
   1634 	if (ixgbe_is_sfp(hw)) {
   1635 		if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
   1636 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
   1637 			softint_schedule(adapter->msf_si);
   1638 		} else if (reg_eicr & mod_mask) {
   1639 			IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
   1640 			softint_schedule(adapter->mod_si);
   1641 		}
   1642 	}
   1643 
   1644 	/* Check for fan failure */
   1645 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
   1646 	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
   1647 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
   1648                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
   1649 		    "REPLACE IMMEDIATELY!!\n");
   1650 	}
   1651 
   1652 	/* External PHY interrupt */
   1653 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
   1654 	    (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
   1655 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
   1656 		softint_schedule(adapter->phy_si);
   1657  	}
   1658 
   1659 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
   1660 	return 1;
   1661 }
   1662 
   1663 /*********************************************************************
   1664  *
   1665  *  Media Ioctl callback
   1666  *
   1667  *  This routine is called whenever the user queries the status of
   1668  *  the interface using ifconfig.
   1669  *
   1670  **********************************************************************/
   1671 static void
   1672 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
   1673 {
   1674 	struct adapter *adapter = ifp->if_softc;
   1675 	struct ixgbe_hw *hw = &adapter->hw;
   1676 	int layer;
   1677 
   1678 	INIT_DEBUGOUT("ixgbe_media_status: begin");
   1679 	IXGBE_CORE_LOCK(adapter);
   1680 	ixgbe_update_link_status(adapter);
   1681 
   1682 	ifmr->ifm_status = IFM_AVALID;
   1683 	ifmr->ifm_active = IFM_ETHER;
   1684 
   1685 	if (!adapter->link_active) {
   1686 		IXGBE_CORE_UNLOCK(adapter);
   1687 		return;
   1688 	}
   1689 
   1690 	ifmr->ifm_status |= IFM_ACTIVE;
   1691 	layer = ixgbe_get_supported_physical_layer(hw);
   1692 
   1693 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
   1694 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
   1695 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
   1696 		switch (adapter->link_speed) {
   1697 		case IXGBE_LINK_SPEED_10GB_FULL:
   1698 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   1699 			break;
   1700 		case IXGBE_LINK_SPEED_1GB_FULL:
   1701 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1702 			break;
   1703 		case IXGBE_LINK_SPEED_100_FULL:
   1704 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   1705 			break;
   1706 		}
   1707 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   1708 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
   1709 		switch (adapter->link_speed) {
   1710 		case IXGBE_LINK_SPEED_10GB_FULL:
   1711 			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
   1712 			break;
   1713 		}
   1714 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
   1715 		switch (adapter->link_speed) {
   1716 		case IXGBE_LINK_SPEED_10GB_FULL:
   1717 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
   1718 			break;
   1719 		case IXGBE_LINK_SPEED_1GB_FULL:
   1720 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   1721 			break;
   1722 		}
   1723 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
   1724 		switch (adapter->link_speed) {
   1725 		case IXGBE_LINK_SPEED_10GB_FULL:
   1726 			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
   1727 			break;
   1728 		case IXGBE_LINK_SPEED_1GB_FULL:
   1729 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
   1730 			break;
   1731 		}
   1732 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
   1733 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
   1734 		switch (adapter->link_speed) {
   1735 		case IXGBE_LINK_SPEED_10GB_FULL:
   1736 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   1737 			break;
   1738 		case IXGBE_LINK_SPEED_1GB_FULL:
   1739 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
   1740 			break;
   1741 		}
   1742 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
   1743 		switch (adapter->link_speed) {
   1744 		case IXGBE_LINK_SPEED_10GB_FULL:
   1745 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   1746 			break;
   1747 		}
   1748 	/*
   1749 	** XXX: These need to use the proper media types once
   1750 	** they're added.
   1751 	*/
   1752 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
   1753 		switch (adapter->link_speed) {
   1754 		case IXGBE_LINK_SPEED_10GB_FULL:
   1755 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
   1756 			break;
   1757 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   1758 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
   1759 			break;
   1760 		case IXGBE_LINK_SPEED_1GB_FULL:
   1761 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
   1762 			break;
   1763 		}
   1764 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
   1765 	    || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
   1766 		switch (adapter->link_speed) {
   1767 		case IXGBE_LINK_SPEED_10GB_FULL:
   1768 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
   1769 			break;
   1770 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   1771 			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
   1772 			break;
   1773 		case IXGBE_LINK_SPEED_1GB_FULL:
   1774 			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
   1775 			break;
   1776 		}
   1777 
   1778 	/* If nothing is recognized... */
   1779 #if 0
   1780 	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
   1781 		ifmr->ifm_active |= IFM_UNKNOWN;
   1782 #endif
   1783 
   1784 	/* Display current flow control setting used on link */
   1785 	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
   1786 	    hw->fc.current_mode == ixgbe_fc_full)
   1787 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
   1788 	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
   1789 	    hw->fc.current_mode == ixgbe_fc_full)
   1790 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
   1791 
   1792 	IXGBE_CORE_UNLOCK(adapter);
   1793 
   1794 	return;
   1795 }
   1796 
   1797 /*********************************************************************
   1798  *
   1799  *  Media Ioctl callback
   1800  *
   1801  *  This routine is called when the user changes speed/duplex using
   1802  *  media/mediopt option with ifconfig.
   1803  *
   1804  **********************************************************************/
   1805 static int
   1806 ixgbe_media_change(struct ifnet * ifp)
   1807 {
   1808 	struct adapter *adapter = ifp->if_softc;
   1809 	struct ifmedia *ifm = &adapter->media;
   1810 	struct ixgbe_hw *hw = &adapter->hw;
   1811 	ixgbe_link_speed speed = 0;
   1812 
   1813 	INIT_DEBUGOUT("ixgbe_media_change: begin");
   1814 
   1815 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1816 		return (EINVAL);
   1817 
   1818 	if (hw->phy.media_type == ixgbe_media_type_backplane)
   1819 		return (EPERM);
   1820 
   1821 	/*
   1822 	** We don't actually need to check against the supported
   1823 	** media types of the adapter; ifmedia will take care of
   1824 	** that for us.
   1825 	*/
   1826 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1827 		case IFM_AUTO:
   1828 		case IFM_10G_T:
   1829 			speed |= IXGBE_LINK_SPEED_100_FULL;
   1830 		case IFM_10G_LRM:
   1831 		case IFM_10G_SR: /* KR, too */
   1832 		case IFM_10G_LR:
   1833 		case IFM_10G_CX4: /* KX4 */
   1834 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
   1835 		case IFM_10G_TWINAX:
   1836 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
   1837 			break;
   1838 		case IFM_1000_T:
   1839 			speed |= IXGBE_LINK_SPEED_100_FULL;
   1840 		case IFM_1000_LX:
   1841 		case IFM_1000_SX:
   1842 		case IFM_1000_CX: /* KX */
   1843 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
   1844 			break;
   1845 		case IFM_100_TX:
   1846 			speed |= IXGBE_LINK_SPEED_100_FULL;
   1847 			break;
   1848 		default:
   1849 			goto invalid;
   1850 	}
   1851 
   1852 	hw->mac.autotry_restart = TRUE;
   1853 	hw->mac.ops.setup_link(hw, speed, TRUE);
   1854 	adapter->advertise =
   1855 		((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) |
   1856 		((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) |
   1857 		((speed & IXGBE_LINK_SPEED_100_FULL) << 0);
   1858 
   1859 	return (0);
   1860 
   1861 invalid:
   1862 	device_printf(adapter->dev, "Invalid media type!\n");
   1863 	return (EINVAL);
   1864 }
   1865 
   1866 static void
   1867 ixgbe_set_promisc(struct adapter *adapter)
   1868 {
   1869 	struct ether_multi *enm;
   1870 	struct ether_multistep step;
   1871 	u_int32_t       reg_rctl;
   1872 	struct ethercom *ec = &adapter->osdep.ec;
   1873 	struct ifnet   *ifp = adapter->ifp;
   1874 	int		mcnt = 0;
   1875 
   1876 	reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   1877 	reg_rctl &= (~IXGBE_FCTRL_UPE);
   1878 	if (ifp->if_flags & IFF_ALLMULTI)
   1879 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
   1880 	else {
   1881 		ETHER_FIRST_MULTI(step, ec, enm);
   1882 		while (enm != NULL) {
   1883 			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
   1884 				break;
   1885 			mcnt++;
   1886 			ETHER_NEXT_MULTI(step, enm);
   1887 		}
   1888 	}
   1889 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
   1890 		reg_rctl &= (~IXGBE_FCTRL_MPE);
   1891 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
   1892 
   1893 	if (ifp->if_flags & IFF_PROMISC) {
   1894 		reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   1895 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
   1896 	} else if (ifp->if_flags & IFF_ALLMULTI) {
   1897 		reg_rctl |= IXGBE_FCTRL_MPE;
   1898 		reg_rctl &= ~IXGBE_FCTRL_UPE;
   1899 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
   1900 	}
   1901 	return;
   1902 }
   1903 
   1904 
   1905 /*********************************************************************
   1906  *  Multicast Update
   1907  *
   1908  *  This routine is called whenever multicast address list is updated.
   1909  *
   1910  **********************************************************************/
   1911 #define IXGBE_RAR_ENTRIES 16
   1912 
   1913 static void
   1914 ixgbe_set_multi(struct adapter *adapter)
   1915 {
   1916 	struct ether_multi *enm;
   1917 	struct ether_multistep step;
   1918 	u32	fctrl;
   1919 	u8	*mta;
   1920 	u8	*update_ptr;
   1921 	int	mcnt = 0;
   1922 	struct ethercom *ec = &adapter->osdep.ec;
   1923 	struct ifnet   *ifp = adapter->ifp;
   1924 
   1925 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
   1926 
   1927 	mta = adapter->mta;
   1928 	bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
   1929 	    MAX_NUM_MULTICAST_ADDRESSES);
   1930 
   1931 	ifp->if_flags &= ~IFF_ALLMULTI;
   1932 	ETHER_FIRST_MULTI(step, ec, enm);
   1933 	while (enm != NULL) {
   1934 		if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
   1935 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1936 			ETHER_ADDR_LEN) != 0)) {
   1937 			ifp->if_flags |= IFF_ALLMULTI;
   1938 			break;
   1939 		}
   1940 		bcopy(enm->enm_addrlo,
   1941 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1942 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1943 		mcnt++;
   1944 		ETHER_NEXT_MULTI(step, enm);
   1945 	}
   1946 
   1947 	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
   1948 	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   1949 	if (ifp->if_flags & IFF_PROMISC)
   1950 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
   1951 	else if (ifp->if_flags & IFF_ALLMULTI) {
   1952 		fctrl |= IXGBE_FCTRL_MPE;
   1953 	}
   1954 
   1955 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
   1956 
   1957 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
   1958 		update_ptr = mta;
   1959 		ixgbe_update_mc_addr_list(&adapter->hw,
   1960 		    update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
   1961 	}
   1962 
   1963 	return;
   1964 }
   1965 
   1966 /*
   1967  * This is an iterator function now needed by the multicast
   1968  * shared code. It simply feeds the shared code routine the
   1969  * addresses in the array of ixgbe_set_multi() one by one.
   1970  */
   1971 static u8 *
   1972 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1973 {
   1974 	u8 *addr = *update_ptr;
   1975 	u8 *newptr;
   1976 	*vmdq = 0;
   1977 
   1978 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1979 	*update_ptr = newptr;
   1980 	return addr;
   1981 }
   1982 
   1983 
   1984 /*********************************************************************
   1985  *  Timer routine
   1986  *
   1987  *  This routine checks for link status,updates statistics,
   1988  *  and runs the watchdog check.
   1989  *
   1990  **********************************************************************/
   1991 
   1992 static void
   1993 ixgbe_local_timer1(void *arg)
   1994 {
   1995 	struct adapter	*adapter = arg;
   1996 	device_t	dev = adapter->dev;
   1997 	struct ix_queue *que = adapter->queues;
   1998 	u64		queues = 0;
   1999 	int		hung = 0;
   2000 
   2001 	KASSERT(mutex_owned(&adapter->core_mtx));
   2002 
   2003 	/* Check for pluggable optics */
   2004 	if (adapter->sfp_probe)
   2005 		if (!ixgbe_sfp_probe(adapter))
   2006 			goto out; /* Nothing to do */
   2007 
   2008 	ixgbe_update_link_status(adapter);
   2009 	ixgbe_update_stats_counters(adapter);
   2010 
   2011 	/*
   2012 	** Check the TX queues status
   2013 	**	- mark hung queues so we don't schedule on them
   2014 	**      - watchdog only if all queues show hung
   2015 	*/
   2016 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2017 		/* Keep track of queues with work for soft irq */
   2018 		if (que->txr->busy)
   2019 			queues |= ((u64)1 << que->me);
   2020 		/*
   2021 		** Each time txeof runs without cleaning, but there
   2022 		** are uncleaned descriptors it increments busy. If
   2023 		** we get to the MAX we declare it hung.
   2024 		*/
   2025 		if (que->busy == IXGBE_QUEUE_HUNG) {
   2026 			++hung;
   2027 			/* Mark the queue as inactive */
   2028 			adapter->active_queues &= ~((u64)1 << que->me);
   2029 			continue;
   2030 		} else {
   2031 			/* Check if we've come back from hung */
   2032 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   2033                                 adapter->active_queues |= ((u64)1 << que->me);
   2034 		}
   2035 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   2036 			device_printf(dev,"Warning queue %d "
   2037 			    "appears to be hung!\n", i);
   2038 			que->txr->busy = IXGBE_QUEUE_HUNG;
   2039 			++hung;
   2040 		}
   2041 
   2042 	}
   2043 	/* Only truely watchdog if all queues show hung */
   2044 	if (hung == adapter->num_queues)
   2045 		goto watchdog;
   2046 	else if (queues != 0) { /* Force an IRQ on queues with work */
   2047 		ixgbe_rearm_queues(adapter, queues);
   2048 	}
   2049 
   2050 out:
   2051 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
   2052 	return;
   2053 
   2054 watchdog:
   2055 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   2056 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   2057 	adapter->watchdog_events.ev_count++;
   2058 	ixgbe_init_locked(adapter);
   2059 }
   2060 
   2061 static void
   2062 ixgbe_local_timer(void *arg)
   2063 {
   2064 	struct adapter *adapter = arg;
   2065 
   2066 	IXGBE_CORE_LOCK(adapter);
   2067 	ixgbe_local_timer1(adapter);
   2068 	IXGBE_CORE_UNLOCK(adapter);
   2069 }
   2070 
   2071 /*
   2072 ** Note: this routine updates the OS on the link state
   2073 **	the real check of the hardware only happens with
   2074 **	a link interrupt.
   2075 */
   2076 static void
   2077 ixgbe_update_link_status(struct adapter *adapter)
   2078 {
   2079 	struct ifnet	*ifp = adapter->ifp;
   2080 	device_t dev = adapter->dev;
   2081 
   2082 	if (adapter->link_up){
   2083 		if (adapter->link_active == FALSE) {
   2084 			if (bootverbose)
   2085 				device_printf(dev,"Link is up %d Gbps %s \n",
   2086 				    ((adapter->link_speed == 128)? 10:1),
   2087 				    "Full Duplex");
   2088 			adapter->link_active = TRUE;
   2089 			/* Update any Flow Control changes */
   2090 			ixgbe_fc_enable(&adapter->hw);
   2091 			/* Update DMA coalescing config */
   2092 			ixgbe_config_dmac(adapter);
   2093 			if_link_state_change(ifp, LINK_STATE_UP);
   2094 		}
   2095 	} else { /* Link down */
   2096 		if (adapter->link_active == TRUE) {
   2097 			if (bootverbose)
   2098 				device_printf(dev,"Link is Down\n");
   2099 			if_link_state_change(ifp, LINK_STATE_DOWN);
   2100 			adapter->link_active = FALSE;
   2101 		}
   2102 	}
   2103 
   2104 	return;
   2105 }
   2106 
   2107 
   2108 static void
   2109 ixgbe_ifstop(struct ifnet *ifp, int disable)
   2110 {
   2111 	struct adapter *adapter = ifp->if_softc;
   2112 
   2113 	IXGBE_CORE_LOCK(adapter);
   2114 	ixgbe_stop(adapter);
   2115 	IXGBE_CORE_UNLOCK(adapter);
   2116 }
   2117 
   2118 /*********************************************************************
   2119  *
   2120  *  This routine disables all traffic on the adapter by issuing a
   2121  *  global reset on the MAC and deallocates TX/RX buffers.
   2122  *
   2123  **********************************************************************/
   2124 
   2125 static void
   2126 ixgbe_stop(void *arg)
   2127 {
   2128 	struct ifnet   *ifp;
   2129 	struct adapter *adapter = arg;
   2130 	struct ixgbe_hw *hw = &adapter->hw;
   2131 	ifp = adapter->ifp;
   2132 
   2133 	KASSERT(mutex_owned(&adapter->core_mtx));
   2134 
   2135 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
   2136 	ixgbe_disable_intr(adapter);
   2137 	callout_stop(&adapter->timer);
   2138 
   2139 	/* Let the stack know...*/
   2140 	ifp->if_flags &= ~IFF_RUNNING;
   2141 
   2142 	ixgbe_reset_hw(hw);
   2143 	hw->adapter_stopped = FALSE;
   2144 	ixgbe_stop_adapter(hw);
   2145 	if (hw->mac.type == ixgbe_mac_82599EB)
   2146 		ixgbe_stop_mac_link_on_d3_82599(hw);
   2147 	/* Turn off the laser - noop with no optics */
   2148 	ixgbe_disable_tx_laser(hw);
   2149 
   2150 	/* Update the stack */
   2151 	adapter->link_up = FALSE;
   2152 	ixgbe_update_link_status(adapter);
   2153 
   2154 	/* reprogram the RAR[0] in case user changed it. */
   2155 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
   2156 
   2157 	return;
   2158 }
   2159 
   2160 
   2161 /*********************************************************************
   2162  *
   2163  *  Determine hardware revision.
   2164  *
   2165  **********************************************************************/
   2166 static void
   2167 ixgbe_identify_hardware(struct adapter *adapter)
   2168 {
   2169 	pcitag_t tag;
   2170 	pci_chipset_tag_t pc;
   2171 	pcireg_t subid, id;
   2172 	struct ixgbe_hw *hw = &adapter->hw;
   2173 
   2174 	pc = adapter->osdep.pc;
   2175 	tag = adapter->osdep.tag;
   2176 
   2177 	id = pci_conf_read(pc, tag, PCI_ID_REG);
   2178 	subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
   2179 
   2180 	/* Save off the information about this board */
   2181 	hw->vendor_id = PCI_VENDOR(id);
   2182 	hw->device_id = PCI_PRODUCT(id);
   2183 	hw->revision_id =
   2184 	    PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG));
   2185 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
   2186 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
   2187 
   2188 	/*
   2189 	** Make sure BUSMASTER is set
   2190 	*/
   2191 	ixgbe_pci_enable_busmaster(pc, tag);
   2192 
   2193 	/* We need this here to set the num_segs below */
   2194 	ixgbe_set_mac_type(hw);
   2195 
   2196 	/* Pick up the 82599 settings */
   2197 	if (hw->mac.type != ixgbe_mac_82598EB) {
   2198 		hw->phy.smart_speed = ixgbe_smart_speed;
   2199 		adapter->num_segs = IXGBE_82599_SCATTER;
   2200 	} else
   2201 		adapter->num_segs = IXGBE_82598_SCATTER;
   2202 
   2203 	return;
   2204 }
   2205 
   2206 /*********************************************************************
   2207  *
   2208  *  Determine optic type
   2209  *
   2210  **********************************************************************/
   2211 static void
   2212 ixgbe_setup_optics(struct adapter *adapter)
   2213 {
   2214 	struct ixgbe_hw *hw = &adapter->hw;
   2215 	int		layer;
   2216 
   2217 	layer = ixgbe_get_supported_physical_layer(hw);
   2218 
   2219 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
   2220 		adapter->optics = IFM_10G_T;
   2221 		return;
   2222 	}
   2223 
   2224 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
   2225 		adapter->optics = IFM_1000_T;
   2226 		return;
   2227 	}
   2228 
   2229 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
   2230 		adapter->optics = IFM_1000_SX;
   2231 		return;
   2232 	}
   2233 
   2234 	if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
   2235 	    IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
   2236 		adapter->optics = IFM_10G_LR;
   2237 		return;
   2238 	}
   2239 
   2240 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
   2241 		adapter->optics = IFM_10G_SR;
   2242 		return;
   2243 	}
   2244 
   2245 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
   2246 		adapter->optics = IFM_10G_TWINAX;
   2247 		return;
   2248 	}
   2249 
   2250 	if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
   2251 	    IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
   2252 		adapter->optics = IFM_10G_CX4;
   2253 		return;
   2254 	}
   2255 
   2256 	/* If we get here just set the default */
   2257 	adapter->optics = IFM_ETHER | IFM_AUTO;
   2258 	return;
   2259 }
   2260 
   2261 /*********************************************************************
   2262  *
   2263  *  Setup the Legacy or MSI Interrupt handler
   2264  *
   2265  **********************************************************************/
   2266 static int
   2267 ixgbe_allocate_legacy(struct adapter *adapter,
   2268     const struct pci_attach_args *pa)
   2269 {
   2270 	device_t	dev = adapter->dev;
   2271 	struct		ix_queue *que = adapter->queues;
   2272 #ifndef IXGBE_LEGACY_TX
   2273 	struct tx_ring		*txr = adapter->tx_rings;
   2274 #endif
   2275 	int		counts[PCI_INTR_TYPE_SIZE];
   2276 	pci_intr_type_t intr_type, max_type;
   2277 	char intrbuf[PCI_INTRSTR_LEN];
   2278 	const char	*intrstr = NULL;
   2279 
   2280 	/* Allocation settings */
   2281 	max_type = PCI_INTR_TYPE_MSI;
   2282 	counts[PCI_INTR_TYPE_MSIX] = 0;
   2283 	counts[PCI_INTR_TYPE_MSI] = 1;
   2284 	counts[PCI_INTR_TYPE_INTX] = 1;
   2285 
   2286 alloc_retry:
   2287 	if (pci_intr_alloc(pa, &adapter->osdep.intrs, counts, max_type) != 0) {
   2288 		aprint_error_dev(dev, "couldn't alloc interrupt\n");
   2289 		return ENXIO;
   2290 	}
   2291 	adapter->osdep.nintrs = 1;
   2292 	intrstr = pci_intr_string(adapter->osdep.pc, adapter->osdep.intrs[0],
   2293 	    intrbuf, sizeof(intrbuf));
   2294 	adapter->osdep.ihs[0] = pci_intr_establish_xname(adapter->osdep.pc,
   2295 	    adapter->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
   2296 	    device_xname(dev));
   2297 	if (adapter->osdep.ihs[0] == NULL) {
   2298 		intr_type = pci_intr_type(adapter->osdep.pc,
   2299 		    adapter->osdep.intrs[0]);
   2300 		aprint_error_dev(dev,"unable to establish %s\n",
   2301 		    (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   2302 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   2303 		switch (intr_type) {
   2304 		case PCI_INTR_TYPE_MSI:
   2305 			/* The next try is for INTx: Disable MSI */
   2306 			max_type = PCI_INTR_TYPE_INTX;
   2307 			counts[PCI_INTR_TYPE_INTX] = 1;
   2308 			goto alloc_retry;
   2309 		case PCI_INTR_TYPE_INTX:
   2310 		default:
   2311 			/* See below */
   2312 			break;
   2313 		}
   2314 	}
   2315 	if (adapter->osdep.ihs[0] == NULL) {
   2316 		aprint_error_dev(dev,
   2317 		    "couldn't establish interrupt%s%s\n",
   2318 		    intrstr ? " at " : "", intrstr ? intrstr : "");
   2319 		pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1);
   2320 		return ENXIO;
   2321 	}
   2322 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
   2323 	/*
   2324 	 * Try allocating a fast interrupt and the associated deferred
   2325 	 * processing contexts.
   2326 	 */
   2327 #ifndef IXGBE_LEGACY_TX
   2328 	txr->txq_si = softint_establish(SOFTINT_NET, ixgbe_deferred_mq_start,
   2329 	    txr);
   2330 #endif
   2331 	que->que_si = softint_establish(SOFTINT_NET, ixgbe_handle_que, que);
   2332 
   2333 	/* Tasklets for Link, SFP and Multispeed Fiber */
   2334 	adapter->link_si =
   2335 	    softint_establish(SOFTINT_NET, ixgbe_handle_link, adapter);
   2336 	adapter->mod_si =
   2337 	    softint_establish(SOFTINT_NET, ixgbe_handle_mod, adapter);
   2338 	adapter->msf_si =
   2339 	    softint_establish(SOFTINT_NET, ixgbe_handle_msf, adapter);
   2340 	adapter->phy_si =
   2341 	    softint_establish(SOFTINT_NET, ixgbe_handle_phy, adapter);
   2342 
   2343 #ifdef IXGBE_FDIR
   2344 	adapter->fdir_si =
   2345 	    softint_establish(SOFTINT_NET, ixgbe_reinit_fdir, adapter);
   2346 #endif
   2347 	if (que->que_si == NULL ||
   2348 	    adapter->link_si == NULL ||
   2349 	    adapter->mod_si == NULL ||
   2350 #ifdef IXGBE_FDIR
   2351 	    adapter->fdir_si == NULL ||
   2352 #endif
   2353 	    adapter->msf_si == NULL) {
   2354 		aprint_error_dev(dev,
   2355 		    "could not establish software interrupts\n");
   2356 		return ENXIO;
   2357 	}
   2358 
   2359 	/* For simplicity in the handlers */
   2360 	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
   2361 
   2362 	return (0);
   2363 }
   2364 
   2365 
   2366 /*********************************************************************
   2367  *
   2368  *  Setup MSIX Interrupt resources and handlers
   2369  *
   2370  **********************************************************************/
   2371 static int
   2372 ixgbe_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   2373 {
   2374 	device_t        dev = adapter->dev;
   2375 	struct 		ix_queue *que = adapter->queues;
   2376 	struct  	tx_ring *txr = adapter->tx_rings;
   2377 	pci_chipset_tag_t pc;
   2378 	char		intrbuf[PCI_INTRSTR_LEN];
   2379 	char		intr_xname[32];
   2380 	const char	*intrstr = NULL;
   2381 	int 		error, vector = 0;
   2382 	int		cpu_id = 0;
   2383 	kcpuset_t	*affinity;
   2384 #ifdef	RSS
   2385 	cpuset_t	cpu_mask;
   2386 #endif
   2387 
   2388 	pc = adapter->osdep.pc;
   2389 #ifdef	RSS
   2390 	/*
   2391 	 * If we're doing RSS, the number of queues needs to
   2392 	 * match the number of RSS buckets that are configured.
   2393 	 *
   2394 	 * + If there's more queues than RSS buckets, we'll end
   2395 	 *   up with queues that get no traffic.
   2396 	 *
   2397 	 * + If there's more RSS buckets than queues, we'll end
   2398 	 *   up having multiple RSS buckets map to the same queue,
   2399 	 *   so there'll be some contention.
   2400 	 */
   2401 	if (adapter->num_queues != rss_getnumbuckets()) {
   2402 		device_printf(dev,
   2403 		    "%s: number of queues (%d) != number of RSS buckets (%d)"
   2404 		    "; performance will be impacted.\n",
   2405 		    __func__,
   2406 		    adapter->num_queues,
   2407 		    rss_getnumbuckets());
   2408 	}
   2409 #endif
   2410 
   2411 	adapter->osdep.nintrs = adapter->num_queues + 1;
   2412 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   2413 	    adapter->osdep.nintrs) != 0) {
   2414 		aprint_error_dev(dev,
   2415 		    "failed to allocate MSI-X interrupt\n");
   2416 		return (ENXIO);
   2417 	}
   2418 
   2419 	kcpuset_create(&affinity, false);
   2420 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   2421 		snprintf(intr_xname, sizeof(intr_xname), "%s TX/RX",
   2422 		    device_xname(dev));
   2423 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   2424 		    sizeof(intrbuf));
   2425 #ifdef IXG_MPSAFE
   2426 		pci_intr_setattr(pc, adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   2427 		    true);
   2428 #endif
   2429 		/* Set the handler function */
   2430 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   2431 		    adapter->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
   2432 		    intr_xname);
   2433 		if (que->res == NULL) {
   2434 			pci_intr_release(pc, adapter->osdep.intrs,
   2435 			    adapter->osdep.nintrs);
   2436 			aprint_error_dev(dev,
   2437 			    "Failed to register QUE handler\n");
   2438 			kcpuset_destroy(affinity);
   2439 			return ENXIO;
   2440 		}
   2441 		que->msix = vector;
   2442 		adapter->active_queues |= (u64)(1 << que->msix);
   2443 #ifdef	RSS
   2444 		/*
   2445 		 * The queue ID is used as the RSS layer bucket ID.
   2446 		 * We look up the queue ID -> RSS CPU ID and select
   2447 		 * that.
   2448 		 */
   2449 		cpu_id = rss_getcpu(i % rss_getnumbuckets());
   2450 #else
   2451 		/*
   2452 		 * Bind the msix vector, and thus the
   2453 		 * rings to the corresponding cpu.
   2454 		 *
   2455 		 * This just happens to match the default RSS round-robin
   2456 		 * bucket -> queue -> CPU allocation.
   2457 		 */
   2458 		if (adapter->num_queues > 1)
   2459 			cpu_id = i;
   2460 #endif
   2461 		/* Round-robin affinity */
   2462 		kcpuset_zero(affinity);
   2463 		kcpuset_set(affinity, cpu_id % ncpu);
   2464 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   2465 		    NULL);
   2466 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   2467 		    intrstr);
   2468 		if (error == 0) {
   2469 #ifdef	RSS
   2470 			aprintf_normal(
   2471 			    ", bound RSS bucket %d to CPU %d\n",
   2472 			    i, cpu_id);
   2473 #else
   2474 			aprint_verbose(
   2475 			    ", bound queue %d to cpu %d\n",
   2476 			    i, cpu_id);
   2477 #endif
   2478 		} else
   2479 			aprint_normal("\n");
   2480 #ifndef IXGBE_LEGACY_TX
   2481 		txr->txq_si = softint_establish(SOFTINT_NET,
   2482 		    ixgbe_deferred_mq_start, txr);
   2483 #endif
   2484 		que->que_si = softint_establish(SOFTINT_NET, ixgbe_handle_que,
   2485 		    que);
   2486 		if (que->que_si == NULL) {
   2487 			aprint_error_dev(dev,
   2488 			    "could not establish software interrupt\n");
   2489 		}
   2490 	}
   2491 
   2492 	/* and Link */
   2493 	cpu_id++;
   2494 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   2495 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   2496 	    sizeof(intrbuf));
   2497 #ifdef IXG_MPSAFE
   2498 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   2499 	    true);
   2500 #endif
   2501 	/* Set the link handler function */
   2502 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   2503 	    adapter->osdep.intrs[vector], IPL_NET, ixgbe_msix_link, adapter,
   2504 	    intr_xname);
   2505 	if (adapter->osdep.ihs[vector] == NULL) {
   2506 		adapter->res = NULL;
   2507 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   2508 		kcpuset_destroy(affinity);
   2509 		return (ENXIO);
   2510 	}
   2511 	/* Round-robin affinity */
   2512 	kcpuset_zero(affinity);
   2513 	kcpuset_set(affinity, cpu_id % ncpu);
   2514 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   2515 
   2516 	aprint_normal_dev(dev,
   2517 	    "for link, interrupting at %s", intrstr);
   2518 	if (error == 0)
   2519 		aprint_normal(", affinity to cpu %d\n", cpu_id);
   2520 	else
   2521 		aprint_normal("\n");
   2522 
   2523 	adapter->vector = vector;
   2524 	/* Tasklets for Link, SFP and Multispeed Fiber */
   2525 	adapter->link_si =
   2526 	    softint_establish(SOFTINT_NET, ixgbe_handle_link, adapter);
   2527 	adapter->mod_si =
   2528 	    softint_establish(SOFTINT_NET, ixgbe_handle_mod, adapter);
   2529 	adapter->msf_si =
   2530 	    softint_establish(SOFTINT_NET, ixgbe_handle_msf, adapter);
   2531 	adapter->phy_si =
   2532 	    softint_establish(SOFTINT_NET, ixgbe_handle_phy, adapter);
   2533 #ifdef IXGBE_FDIR
   2534 	adapter->fdir_si =
   2535 	    softint_establish(SOFTINT_NET, ixgbe_reinit_fdir, adapter);
   2536 #endif
   2537 
   2538 	kcpuset_destroy(affinity);
   2539 	return (0);
   2540 }
   2541 
   2542 /*
   2543  * Setup Either MSI/X or MSI
   2544  */
   2545 static int
   2546 ixgbe_setup_msix(struct adapter *adapter)
   2547 {
   2548 	device_t dev = adapter->dev;
   2549 	int want, queues, msgs;
   2550 
   2551 	/* Override by tuneable */
   2552 	if (ixgbe_enable_msix == 0)
   2553 		goto msi;
   2554 
   2555 	/* First try MSI/X */
   2556 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   2557 	if (msgs < IXG_MSIX_NINTR)
   2558 		goto msi;
   2559 
   2560 	adapter->msix_mem = (void *)1; /* XXX */
   2561 
   2562 	/* Figure out a reasonable auto config value */
   2563 	queues = (ncpu > (msgs-1)) ? (msgs-1) : ncpu;
   2564 
   2565 #ifdef	RSS
   2566 	/* If we're doing RSS, clamp at the number of RSS buckets */
   2567 	if (queues > rss_getnumbuckets())
   2568 		queues = rss_getnumbuckets();
   2569 #endif
   2570 
   2571 	if (ixgbe_num_queues != 0)
   2572 		queues = ixgbe_num_queues;
   2573 
   2574 	/* reflect correct sysctl value */
   2575 	ixgbe_num_queues = queues;
   2576 
   2577 	/*
   2578 	** Want one vector (RX/TX pair) per queue
   2579 	** plus an additional for Link.
   2580 	*/
   2581 	want = queues + 1;
   2582 	if (msgs >= want)
   2583 		msgs = want;
   2584 	else {
   2585                	aprint_error_dev(dev,
   2586 		    "MSIX Configuration Problem, "
   2587 		    "%d vectors but %d queues wanted!\n",
   2588 		    msgs, want);
   2589 		goto msi;
   2590 	}
   2591 	device_printf(dev,
   2592 	    "Using MSIX interrupts with %d vectors\n", msgs);
   2593 	adapter->num_queues = queues;
   2594 	return (msgs);
   2595 
   2596 	/*
   2597 	** If MSIX alloc failed or provided us with
   2598 	** less than needed, free and fall through to MSI
   2599 	*/
   2600 msi:
   2601        	msgs = pci_msi_count(adapter->osdep.pc, adapter->osdep.tag);
   2602 	adapter->msix_mem = NULL; /* XXX */
   2603        	msgs = 1;
   2604 	aprint_normal_dev(dev,"Using an MSI interrupt\n");
   2605 	return (msgs);
   2606 }
   2607 
   2608 
   2609 static int
   2610 ixgbe_allocate_pci_resources(struct adapter *adapter,
   2611     const struct pci_attach_args *pa)
   2612 {
   2613 	pcireg_t	memtype;
   2614 	device_t        dev = adapter->dev;
   2615 	bus_addr_t addr;
   2616 	int flags;
   2617 
   2618 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   2619 	switch (memtype) {
   2620 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   2621 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   2622 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   2623 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   2624 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   2625 			goto map_err;
   2626 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   2627 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   2628 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   2629 		}
   2630 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   2631 		     adapter->osdep.mem_size, flags,
   2632 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   2633 map_err:
   2634 			adapter->osdep.mem_size = 0;
   2635 			aprint_error_dev(dev, "unable to map BAR0\n");
   2636 			return ENXIO;
   2637 		}
   2638 		break;
   2639 	default:
   2640 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   2641 		return ENXIO;
   2642 	}
   2643 
   2644 	/* Legacy defaults */
   2645 	adapter->num_queues = 1;
   2646 	adapter->hw.back = &adapter->osdep;
   2647 
   2648 	/*
   2649 	** Now setup MSI or MSI/X, should
   2650 	** return us the number of supported
   2651 	** vectors. (Will be 1 for MSI)
   2652 	*/
   2653 	adapter->msix = ixgbe_setup_msix(adapter);
   2654 	return (0);
   2655 }
   2656 
   2657 static void
   2658 ixgbe_free_pci_resources(struct adapter * adapter)
   2659 {
   2660 	struct 		ix_queue *que = adapter->queues;
   2661 	int		rid;
   2662 
   2663 	/*
   2664 	**  Release all msix queue resources:
   2665 	*/
   2666 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2667 		if (que->res != NULL)
   2668 			pci_intr_disestablish(adapter->osdep.pc,
   2669 			    adapter->osdep.ihs[i]);
   2670 	}
   2671 
   2672 
   2673 	/* Clean the Legacy or Link interrupt last */
   2674 	if (adapter->vector) /* we are doing MSIX */
   2675 		rid = adapter->vector;
   2676 	else
   2677 		rid = 0;
   2678 
   2679 	if (adapter->osdep.ihs[rid] != NULL) {
   2680 		pci_intr_disestablish(adapter->osdep.pc,
   2681 		    adapter->osdep.ihs[rid]);
   2682 		adapter->osdep.ihs[rid] = NULL;
   2683 	}
   2684 
   2685 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   2686 	    adapter->osdep.nintrs);
   2687 
   2688 	if (adapter->osdep.mem_size != 0) {
   2689 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   2690 		    adapter->osdep.mem_bus_space_handle,
   2691 		    adapter->osdep.mem_size);
   2692 	}
   2693 
   2694 	return;
   2695 }
   2696 
   2697 /*********************************************************************
   2698  *
   2699  *  Setup networking device structure and register an interface.
   2700  *
   2701  **********************************************************************/
   2702 static int
   2703 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
   2704 {
   2705 	struct ethercom *ec = &adapter->osdep.ec;
   2706 	struct ifnet   *ifp;
   2707 
   2708 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
   2709 
   2710 	ifp = adapter->ifp = &ec->ec_if;
   2711 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   2712 	ifp->if_baudrate = IF_Gbps(10);
   2713 	ifp->if_init = ixgbe_init;
   2714 	ifp->if_stop = ixgbe_ifstop;
   2715 	ifp->if_softc = adapter;
   2716 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2717 	ifp->if_ioctl = ixgbe_ioctl;
   2718 #if __FreeBSD_version >= 1100045
   2719 	/* TSO parameters */
   2720 	ifp->if_hw_tsomax = 65518;
   2721 	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
   2722 	ifp->if_hw_tsomaxsegsize = 2048;
   2723 #endif
   2724 #ifndef IXGBE_LEGACY_TX
   2725 	ifp->if_transmit = ixgbe_mq_start;
   2726 	ifp->if_qflush = ixgbe_qflush;
   2727 #else
   2728 	ifp->if_start = ixgbe_start;
   2729 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   2730 #if 0
   2731 	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
   2732 #endif
   2733 	IFQ_SET_READY(&ifp->if_snd);
   2734 #endif
   2735 
   2736 	if_initialize(ifp);
   2737 	ether_ifattach(ifp, adapter->hw.mac.addr);
   2738 	if_register(ifp);
   2739 	ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
   2740 
   2741 	adapter->max_frame_size =
   2742 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
   2743 
   2744 	/*
   2745 	 * Tell the upper layer(s) we support long frames.
   2746 	 */
   2747 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   2748 
   2749 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4 | IFCAP_TSOv6;
   2750 	ifp->if_capenable = 0;
   2751 
   2752 	ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM;
   2753 	ec->ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2754 	ifp->if_capabilities |= IFCAP_LRO;
   2755 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   2756 	    		    | ETHERCAP_VLAN_MTU;
   2757 	ec->ec_capenable = ec->ec_capabilities;
   2758 
   2759 	/*
   2760 	** Don't turn this on by default, if vlans are
   2761 	** created on another pseudo device (eg. lagg)
   2762 	** then vlan events are not passed thru, breaking
   2763 	** operation, but with HW FILTER off it works. If
   2764 	** using vlans directly on the ixgbe driver you can
   2765 	** enable this and get full hardware tag filtering.
   2766 	*/
   2767 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
   2768 
   2769 	/*
   2770 	 * Specify the media types supported by this adapter and register
   2771 	 * callbacks to update media and link information
   2772 	 */
   2773 	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
   2774 		    ixgbe_media_status);
   2775 
   2776 	ixgbe_add_media_types(adapter);
   2777 
   2778 	/* Autoselect media by default */
   2779 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   2780 
   2781 	return (0);
   2782 }
   2783 
   2784 static void
   2785 ixgbe_add_media_types(struct adapter *adapter)
   2786 {
   2787 	struct ixgbe_hw *hw = &adapter->hw;
   2788 	device_t dev = adapter->dev;
   2789 	int layer;
   2790 
   2791 	layer = ixgbe_get_supported_physical_layer(hw);
   2792 
   2793 	/* Media types with matching NetBSD media defines */
   2794 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
   2795 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
   2796 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
   2797 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
   2798 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
   2799 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
   2800 
   2801 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
   2802 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
   2803 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
   2804 
   2805 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
   2806 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
   2807 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
   2808 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
   2809 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
   2810 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
   2811 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
   2812 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
   2813 
   2814 	/*
   2815 	** Other (no matching NetBSD media type):
   2816 	** To workaround this, we'll assign these completely
   2817 	** inappropriate media types.
   2818 	*/
   2819 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
   2820 		device_printf(dev, "Media supported: 10GbaseKR\n");
   2821 		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
   2822 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
   2823 	}
   2824 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
   2825 		device_printf(dev, "Media supported: 10GbaseKX4\n");
   2826 		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
   2827 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
   2828 	}
   2829 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
   2830 		device_printf(dev, "Media supported: 1000baseKX\n");
   2831 		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
   2832 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
   2833 	}
   2834 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
   2835 		/* Someday, someone will care about you... */
   2836 		device_printf(dev, "Media supported: 1000baseBX\n");
   2837 	}
   2838 
   2839 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
   2840 		ifmedia_add(&adapter->media,
   2841 		    IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
   2842 		ifmedia_add(&adapter->media,
   2843 		    IFM_ETHER | IFM_1000_T, 0, NULL);
   2844 	}
   2845 
   2846 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   2847 }
   2848 
   2849 static void
   2850 ixgbe_config_link(struct adapter *adapter)
   2851 {
   2852 	struct ixgbe_hw *hw = &adapter->hw;
   2853 	u32	autoneg, err = 0;
   2854 	bool	sfp, negotiate;
   2855 
   2856 	sfp = ixgbe_is_sfp(hw);
   2857 
   2858 	if (sfp) {
   2859 		void *ip;
   2860 
   2861 		if (hw->phy.multispeed_fiber) {
   2862 			hw->mac.ops.setup_sfp(hw);
   2863 			ixgbe_enable_tx_laser(hw);
   2864 			ip = adapter->msf_si;
   2865 		} else {
   2866 			ip = adapter->mod_si;
   2867 		}
   2868 
   2869 		kpreempt_disable();
   2870 		softint_schedule(ip);
   2871 		kpreempt_enable();
   2872 	} else {
   2873 		if (hw->mac.ops.check_link)
   2874 			err = ixgbe_check_link(hw, &adapter->link_speed,
   2875 			    &adapter->link_up, FALSE);
   2876 		if (err)
   2877 			goto out;
   2878 		autoneg = hw->phy.autoneg_advertised;
   2879 		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   2880                 	err  = hw->mac.ops.get_link_capabilities(hw,
   2881 			    &autoneg, &negotiate);
   2882 		else
   2883 			negotiate = 0;
   2884 		if (err)
   2885 			goto out;
   2886 		if (hw->mac.ops.setup_link)
   2887                 	err = hw->mac.ops.setup_link(hw,
   2888 			    autoneg, adapter->link_up);
   2889 	}
   2890 out:
   2891 	return;
   2892 }
   2893 
   2894 
   2895 /*********************************************************************
   2896  *
   2897  *  Enable transmit units.
   2898  *
   2899  **********************************************************************/
   2900 static void
   2901 ixgbe_initialize_transmit_units(struct adapter *adapter)
   2902 {
   2903 	struct tx_ring	*txr = adapter->tx_rings;
   2904 	struct ixgbe_hw	*hw = &adapter->hw;
   2905 
   2906 	/* Setup the Base and Length of the Tx Descriptor Ring */
   2907 
   2908 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   2909 		u64	tdba = txr->txdma.dma_paddr;
   2910 		u32	txctrl = 0;
   2911 
   2912 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
   2913 		       (tdba & 0x00000000ffffffffULL));
   2914 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
   2915 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
   2916 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
   2917 
   2918 		/* Setup the HW Tx Head and Tail descriptor pointers */
   2919 		IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
   2920 		IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
   2921 
   2922 		/* Cache the tail address */
   2923 		txr->tail = IXGBE_TDT(txr->me);
   2924 
   2925 		/* Set the processing limit */
   2926 		txr->process_limit = ixgbe_tx_process_limit;
   2927 
   2928 		/* Disable Head Writeback */
   2929 		switch (hw->mac.type) {
   2930 		case ixgbe_mac_82598EB:
   2931 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
   2932 			break;
   2933 		case ixgbe_mac_82599EB:
   2934 		case ixgbe_mac_X540:
   2935 		default:
   2936 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
   2937 			break;
   2938                 }
   2939 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   2940 		switch (hw->mac.type) {
   2941 		case ixgbe_mac_82598EB:
   2942 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
   2943 			break;
   2944 		case ixgbe_mac_82599EB:
   2945 		case ixgbe_mac_X540:
   2946 		default:
   2947 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
   2948 			break;
   2949 		}
   2950 
   2951 	}
   2952 
   2953 	if (hw->mac.type != ixgbe_mac_82598EB) {
   2954 		u32 dmatxctl, rttdcs;
   2955 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
   2956 		dmatxctl |= IXGBE_DMATXCTL_TE;
   2957 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
   2958 		/* Disable arbiter to set MTQC */
   2959 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
   2960 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
   2961 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
   2962 		IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
   2963 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
   2964 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
   2965 	}
   2966 
   2967 	return;
   2968 }
   2969 
   2970 static void
   2971 ixgbe_initialise_rss_mapping(struct adapter *adapter)
   2972 {
   2973 	struct ixgbe_hw	*hw = &adapter->hw;
   2974 	uint32_t reta;
   2975 	int i, j, queue_id, table_size;
   2976 	int index_mult;
   2977 	uint32_t rss_key[10];
   2978 	uint32_t mrqc;
   2979 #ifdef	RSS
   2980 	uint32_t rss_hash_config;
   2981 #endif
   2982 
   2983 	/* Setup RSS */
   2984 	reta = 0;
   2985 
   2986 #ifdef	RSS
   2987 	/* Fetch the configured RSS key */
   2988 	rss_getkey((uint8_t *) &rss_key);
   2989 #else
   2990 	/* set up random bits */
   2991 	cprng_fast(&rss_key, sizeof(rss_key));
   2992 #endif
   2993 
   2994 	/* Set multiplier for RETA setup and table size based on MAC */
   2995 	index_mult = 0x1;
   2996 	table_size = 128;
   2997 	switch (adapter->hw.mac.type) {
   2998 	case ixgbe_mac_82598EB:
   2999 		index_mult = 0x11;
   3000 		break;
   3001 	case ixgbe_mac_X550:
   3002 	case ixgbe_mac_X550EM_x:
   3003 		table_size = 512;
   3004 		break;
   3005 	default:
   3006 		break;
   3007 	}
   3008 
   3009 	/* Set up the redirection table */
   3010 	for (i = 0, j = 0; i < table_size; i++, j++) {
   3011 		if (j == adapter->num_queues) j = 0;
   3012 #ifdef	RSS
   3013 		/*
   3014 		 * Fetch the RSS bucket id for the given indirection entry.
   3015 		 * Cap it at the number of configured buckets (which is
   3016 		 * num_queues.)
   3017 		 */
   3018 		queue_id = rss_get_indirection_to_bucket(i);
   3019 		queue_id = queue_id % adapter->num_queues;
   3020 #else
   3021 		queue_id = (j * index_mult);
   3022 #endif
   3023 		/*
   3024 		 * The low 8 bits are for hash value (n+0);
   3025 		 * The next 8 bits are for hash value (n+1), etc.
   3026 		 */
   3027 		reta = reta >> 8;
   3028 		reta = reta | ( ((uint32_t) queue_id) << 24);
   3029 		if ((i & 3) == 3) {
   3030 			if (i < 128)
   3031 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
   3032 			else
   3033 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
   3034 			reta = 0;
   3035 		}
   3036 	}
   3037 
   3038 	/* Now fill our hash function seeds */
   3039 	for (i = 0; i < 10; i++)
   3040 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
   3041 
   3042 	/* Perform hash on these packet types */
   3043 #ifdef	RSS
   3044 	mrqc = IXGBE_MRQC_RSSEN;
   3045 	rss_hash_config = rss_gethashconfig();
   3046 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
   3047 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
   3048 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
   3049 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
   3050 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
   3051 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
   3052 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
   3053 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
   3054 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
   3055 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
   3056 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
   3057 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
   3058 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
   3059 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
   3060 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
   3061 		device_printf(adapter->dev,
   3062 		    "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, "
   3063 		    "but not supported\n", __func__);
   3064 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
   3065 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
   3066 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
   3067 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
   3068 #else
   3069 	/*
   3070 	 * Disable UDP - IP fragments aren't currently being handled
   3071 	 * and so we end up with a mix of 2-tuple and 4-tuple
   3072 	 * traffic.
   3073 	 */
   3074 	mrqc = IXGBE_MRQC_RSSEN
   3075 	     | IXGBE_MRQC_RSS_FIELD_IPV4
   3076 	     | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
   3077 #if 0
   3078 	     | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
   3079 #endif
   3080 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
   3081 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX
   3082 	     | IXGBE_MRQC_RSS_FIELD_IPV6
   3083 	     | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
   3084 #if 0
   3085 	     | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
   3086 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP
   3087 #endif
   3088 	;
   3089 #endif /* RSS */
   3090 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
   3091 }
   3092 
   3093 
   3094 /*********************************************************************
   3095  *
   3096  *  Setup receive registers and features.
   3097  *
   3098  **********************************************************************/
   3099 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
   3100 
   3101 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
   3102 
   3103 static void
   3104 ixgbe_initialize_receive_units(struct adapter *adapter)
   3105 {
   3106 	int i;
   3107 	struct	rx_ring	*rxr = adapter->rx_rings;
   3108 	struct ixgbe_hw	*hw = &adapter->hw;
   3109 	struct ifnet   *ifp = adapter->ifp;
   3110 	u32		bufsz, fctrl, srrctl, rxcsum;
   3111 	u32		hlreg;
   3112 
   3113 
   3114 	/*
   3115 	 * Make sure receives are disabled while
   3116 	 * setting up the descriptor ring
   3117 	 */
   3118 	ixgbe_disable_rx(hw);
   3119 
   3120 	/* Enable broadcasts */
   3121 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
   3122 	fctrl |= IXGBE_FCTRL_BAM;
   3123 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
   3124 		fctrl |= IXGBE_FCTRL_DPF;
   3125 		fctrl |= IXGBE_FCTRL_PMCF;
   3126 	}
   3127 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
   3128 
   3129 	/* Set for Jumbo Frames? */
   3130 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
   3131 	if (ifp->if_mtu > ETHERMTU)
   3132 		hlreg |= IXGBE_HLREG0_JUMBOEN;
   3133 	else
   3134 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
   3135 #ifdef DEV_NETMAP
   3136 	/* crcstrip is conditional in netmap (in RDRXCTL too ?) */
   3137 	if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
   3138 		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
   3139 	else
   3140 		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
   3141 #endif /* DEV_NETMAP */
   3142 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
   3143 
   3144 	bufsz = (adapter->rx_mbuf_sz +
   3145 	    BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   3146 
   3147 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
   3148 		u64 rdba = rxr->rxdma.dma_paddr;
   3149 
   3150 		/* Setup the Base and Length of the Rx Descriptor Ring */
   3151 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
   3152 			       (rdba & 0x00000000ffffffffULL));
   3153 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
   3154 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
   3155 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   3156 
   3157 		/* Set up the SRRCTL register */
   3158 		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
   3159 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   3160 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   3161 		srrctl |= bufsz;
   3162 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   3163 
   3164 		/*
   3165 		 * Set DROP_EN iff we have no flow control and >1 queue.
   3166 		 * Note that srrctl was cleared shortly before during reset,
   3167 		 * so we do not need to clear the bit, but do it just in case
   3168 		 * this code is moved elsewhere.
   3169 		 */
   3170 		if (adapter->num_queues > 1 &&
   3171 		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
   3172 			srrctl |= IXGBE_SRRCTL_DROP_EN;
   3173 		} else {
   3174 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
   3175 		}
   3176 
   3177 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
   3178 
   3179 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
   3180 		IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
   3181 		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
   3182 
   3183 		/* Set the processing limit */
   3184 		rxr->process_limit = ixgbe_rx_process_limit;
   3185 
   3186 		/* Set the driver rx tail address */
   3187 		rxr->tail =  IXGBE_RDT(rxr->me);
   3188 	}
   3189 
   3190 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
   3191 		u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
   3192 			      IXGBE_PSRTYPE_UDPHDR |
   3193 			      IXGBE_PSRTYPE_IPV4HDR |
   3194 			      IXGBE_PSRTYPE_IPV6HDR;
   3195 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
   3196 	}
   3197 
   3198 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   3199 
   3200 	ixgbe_initialise_rss_mapping(adapter);
   3201 
   3202 	if (adapter->num_queues > 1) {
   3203 		/* RSS and RX IPP Checksum are mutually exclusive */
   3204 		rxcsum |= IXGBE_RXCSUM_PCSD;
   3205 	}
   3206 
   3207 	if (ifp->if_capenable & IFCAP_RXCSUM)
   3208 		rxcsum |= IXGBE_RXCSUM_PCSD;
   3209 
   3210 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   3211 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   3212 
   3213 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   3214 
   3215 	return;
   3216 }
   3217 
   3218 
   3219 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   3220 /*
   3221 ** This routine is run via an vlan config EVENT,
   3222 ** it enables us to use the HW Filter table since
   3223 ** we can get the vlan id. This just creates the
   3224 ** entry in the soft version of the VFTA, init will
   3225 ** repopulate the real table.
   3226 */
   3227 static void
   3228 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   3229 {
   3230 	struct adapter	*adapter = ifp->if_softc;
   3231 	u16		index, bit;
   3232 
   3233 	if (ifp->if_softc !=  arg)   /* Not our event */
   3234 		return;
   3235 
   3236 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   3237 		return;
   3238 
   3239 	IXGBE_CORE_LOCK(adapter);
   3240 	index = (vtag >> 5) & 0x7F;
   3241 	bit = vtag & 0x1F;
   3242 	adapter->shadow_vfta[index] |= (1 << bit);
   3243 	ixgbe_setup_vlan_hw_support(adapter);
   3244 	IXGBE_CORE_UNLOCK(adapter);
   3245 }
   3246 
   3247 /*
   3248 ** This routine is run via an vlan
   3249 ** unconfig EVENT, remove our entry
   3250 ** in the soft vfta.
   3251 */
   3252 static void
   3253 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   3254 {
   3255 	struct adapter	*adapter = ifp->if_softc;
   3256 	u16		index, bit;
   3257 
   3258 	if (ifp->if_softc !=  arg)
   3259 		return;
   3260 
   3261 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
   3262 		return;
   3263 
   3264 	IXGBE_CORE_LOCK(adapter);
   3265 	index = (vtag >> 5) & 0x7F;
   3266 	bit = vtag & 0x1F;
   3267 	adapter->shadow_vfta[index] &= ~(1 << bit);
   3268 	/* Re-init to load the changes */
   3269 	ixgbe_setup_vlan_hw_support(adapter);
   3270 	IXGBE_CORE_UNLOCK(adapter);
   3271 }
   3272 #endif
   3273 
   3274 static void
   3275 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
   3276 {
   3277 	struct ethercom *ec = &adapter->osdep.ec;
   3278 	struct ixgbe_hw *hw = &adapter->hw;
   3279 	struct rx_ring	*rxr;
   3280 	u32		ctrl;
   3281 
   3282 
   3283 	/*
   3284 	** We get here thru init_locked, meaning
   3285 	** a soft reset, this has already cleared
   3286 	** the VFTA and other state, so if there
   3287 	** have been no vlan's registered do nothing.
   3288 	*/
   3289 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   3290 		return;
   3291 
   3292 	/* Setup the queues for vlans */
   3293 	for (int i = 0; i < adapter->num_queues; i++) {
   3294 		rxr = &adapter->rx_rings[i];
   3295 		/* On 82599 the VLAN enable is per/queue in RXDCTL */
   3296 		if (hw->mac.type != ixgbe_mac_82598EB) {
   3297 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
   3298 			ctrl |= IXGBE_RXDCTL_VME;
   3299 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
   3300 		}
   3301 		rxr->vtag_strip = TRUE;
   3302 	}
   3303 
   3304 	if ((ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) == 0)
   3305 		return;
   3306 	/*
   3307 	** A soft reset zero's out the VFTA, so
   3308 	** we need to repopulate it now.
   3309 	*/
   3310 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
   3311 		if (adapter->shadow_vfta[i] != 0)
   3312 			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
   3313 			    adapter->shadow_vfta[i]);
   3314 
   3315 	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
   3316 	/* Enable the Filter Table if enabled */
   3317 	if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) {
   3318 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
   3319 		ctrl |= IXGBE_VLNCTRL_VFE;
   3320 	}
   3321 	if (hw->mac.type == ixgbe_mac_82598EB)
   3322 		ctrl |= IXGBE_VLNCTRL_VME;
   3323 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
   3324 }
   3325 
   3326 static void
   3327 ixgbe_enable_intr(struct adapter *adapter)
   3328 {
   3329 	struct ixgbe_hw	*hw = &adapter->hw;
   3330 	struct ix_queue	*que = adapter->queues;
   3331 	u32		mask, fwsm;
   3332 
   3333 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   3334 	/* Enable Fan Failure detection */
   3335 	if (hw->device_id == IXGBE_DEV_ID_82598AT)
   3336 		    mask |= IXGBE_EIMS_GPI_SDP1;
   3337 
   3338 	switch (adapter->hw.mac.type) {
   3339 		case ixgbe_mac_82599EB:
   3340 			mask |= IXGBE_EIMS_ECC;
   3341 			/* Temperature sensor on some adapters */
   3342 			mask |= IXGBE_EIMS_GPI_SDP0;
   3343 			/* SFP+ (RX_LOS_N & MOD_ABS_N) */
   3344 			mask |= IXGBE_EIMS_GPI_SDP1;
   3345 			mask |= IXGBE_EIMS_GPI_SDP2;
   3346 #ifdef IXGBE_FDIR
   3347 			mask |= IXGBE_EIMS_FLOW_DIR;
   3348 #endif
   3349 			break;
   3350 		case ixgbe_mac_X540:
   3351 			/* Detect if Thermal Sensor is enabled */
   3352 			fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
   3353 			if (fwsm & IXGBE_FWSM_TS_ENABLED)
   3354 				mask |= IXGBE_EIMS_TS;
   3355 			mask |= IXGBE_EIMS_ECC;
   3356 #ifdef IXGBE_FDIR
   3357 			mask |= IXGBE_EIMS_FLOW_DIR;
   3358 #endif
   3359 			break;
   3360 		case ixgbe_mac_X550:
   3361 		case ixgbe_mac_X550EM_x:
   3362 			/* MAC thermal sensor is automatically enabled */
   3363 			mask |= IXGBE_EIMS_TS;
   3364 			/* Some devices use SDP0 for important information */
   3365 			if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
   3366 			    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
   3367 				mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
   3368 			mask |= IXGBE_EIMS_ECC;
   3369 #ifdef IXGBE_FDIR
   3370 			mask |= IXGBE_EIMS_FLOW_DIR;
   3371 #endif
   3372 		/* falls through */
   3373 		default:
   3374 			break;
   3375 	}
   3376 
   3377 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
   3378 
   3379 	/* With MSI-X we use auto clear */
   3380 	if (adapter->msix_mem) {
   3381 		mask = IXGBE_EIMS_ENABLE_MASK;
   3382 		/* Don't autoclear Link */
   3383 		mask &= ~IXGBE_EIMS_OTHER;
   3384 		mask &= ~IXGBE_EIMS_LSC;
   3385 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
   3386 	}
   3387 
   3388 	/*
   3389 	** Now enable all queues, this is done separately to
   3390 	** allow for handling the extended (beyond 32) MSIX
   3391 	** vectors that can be used by 82599
   3392 	*/
   3393         for (int i = 0; i < adapter->num_queues; i++, que++)
   3394                 ixgbe_enable_queue(adapter, que->msix);
   3395 
   3396 	IXGBE_WRITE_FLUSH(hw);
   3397 
   3398 	return;
   3399 }
   3400 
   3401 static void
   3402 ixgbe_disable_intr(struct adapter *adapter)
   3403 {
   3404 	if (adapter->msix_mem)
   3405 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
   3406 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
   3407 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
   3408 	} else {
   3409 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
   3410 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
   3411 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
   3412 	}
   3413 	IXGBE_WRITE_FLUSH(&adapter->hw);
   3414 	return;
   3415 }
   3416 
   3417 /*
   3418 ** Get the width and transaction speed of
   3419 ** the slot this adapter is plugged into.
   3420 */
   3421 static void
   3422 ixgbe_get_slot_info(struct ixgbe_hw *hw)
   3423 {
   3424 	device_t		dev = ((struct ixgbe_osdep *)hw->back)->dev;
   3425 	struct ixgbe_mac_info	*mac = &hw->mac;
   3426 	u16			link;
   3427 
   3428 	/* For most devices simply call the shared code routine */
   3429 	if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
   3430 		ixgbe_get_bus_info(hw);
   3431 		/* These devices don't use PCI-E */
   3432 		switch (hw->mac.type) {
   3433 		case ixgbe_mac_X550EM_x:
   3434 			return;
   3435 		default:
   3436 			goto display;
   3437 		}
   3438 	}
   3439 
   3440 	/*
   3441 	** For the Quad port adapter we need to parse back
   3442 	** up the PCI tree to find the speed of the expansion
   3443 	** slot into which this adapter is plugged. A bit more work.
   3444 	*/
   3445 	dev = device_parent(device_parent(dev));
   3446 #ifdef IXGBE_DEBUG
   3447 	device_printf(dev, "parent pcib = %x,%x,%x\n",
   3448 	    pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
   3449 #endif
   3450 	dev = device_parent(device_parent(dev));
   3451 #ifdef IXGBE_DEBUG
   3452 	device_printf(dev, "slot pcib = %x,%x,%x\n",
   3453 	    pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
   3454 #endif
   3455 	/* Now get the PCI Express Capabilities offset */
   3456 	/* ...and read the Link Status Register */
   3457 	link = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
   3458 	switch (link & IXGBE_PCI_LINK_WIDTH) {
   3459 	case IXGBE_PCI_LINK_WIDTH_1:
   3460 		hw->bus.width = ixgbe_bus_width_pcie_x1;
   3461 		break;
   3462 	case IXGBE_PCI_LINK_WIDTH_2:
   3463 		hw->bus.width = ixgbe_bus_width_pcie_x2;
   3464 		break;
   3465 	case IXGBE_PCI_LINK_WIDTH_4:
   3466 		hw->bus.width = ixgbe_bus_width_pcie_x4;
   3467 		break;
   3468 	case IXGBE_PCI_LINK_WIDTH_8:
   3469 		hw->bus.width = ixgbe_bus_width_pcie_x8;
   3470 		break;
   3471 	default:
   3472 		hw->bus.width = ixgbe_bus_width_unknown;
   3473 		break;
   3474 	}
   3475 
   3476 	switch (link & IXGBE_PCI_LINK_SPEED) {
   3477 	case IXGBE_PCI_LINK_SPEED_2500:
   3478 		hw->bus.speed = ixgbe_bus_speed_2500;
   3479 		break;
   3480 	case IXGBE_PCI_LINK_SPEED_5000:
   3481 		hw->bus.speed = ixgbe_bus_speed_5000;
   3482 		break;
   3483 	case IXGBE_PCI_LINK_SPEED_8000:
   3484 		hw->bus.speed = ixgbe_bus_speed_8000;
   3485 		break;
   3486 	default:
   3487 		hw->bus.speed = ixgbe_bus_speed_unknown;
   3488 		break;
   3489 	}
   3490 
   3491 	mac->ops.set_lan_id(hw);
   3492 
   3493 display:
   3494 	device_printf(dev,"PCI Express Bus: Speed %s %s\n",
   3495 	    ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
   3496 	    (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
   3497 	    (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
   3498 	    (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
   3499 	    (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
   3500 	    (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
   3501 	    ("Unknown"));
   3502 
   3503 	if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   3504 	    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
   3505 	    (hw->bus.speed == ixgbe_bus_speed_2500))) {
   3506 		device_printf(dev, "PCI-Express bandwidth available"
   3507 		    " for this card\n     is not sufficient for"
   3508 		    " optimal performance.\n");
   3509 		device_printf(dev, "For optimal performance a x8 "
   3510 		    "PCIE, or x4 PCIE Gen2 slot is required.\n");
   3511         }
   3512 	if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
   3513 	    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
   3514 	    (hw->bus.speed < ixgbe_bus_speed_8000))) {
   3515 		device_printf(dev, "PCI-Express bandwidth available"
   3516 		    " for this card\n     is not sufficient for"
   3517 		    " optimal performance.\n");
   3518 		device_printf(dev, "For optimal performance a x8 "
   3519 		    "PCIE Gen3 slot is required.\n");
   3520         }
   3521 
   3522 	return;
   3523 }
   3524 
   3525 
   3526 /*
   3527 ** Setup the correct IVAR register for a particular MSIX interrupt
   3528 **   (yes this is all very magic and confusing :)
   3529 **  - entry is the register array entry
   3530 **  - vector is the MSIX vector for this queue
   3531 **  - type is RX/TX/MISC
   3532 */
   3533 static void
   3534 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   3535 {
   3536 	struct ixgbe_hw *hw = &adapter->hw;
   3537 	u32 ivar, index;
   3538 
   3539 	vector |= IXGBE_IVAR_ALLOC_VAL;
   3540 
   3541 	switch (hw->mac.type) {
   3542 
   3543 	case ixgbe_mac_82598EB:
   3544 		if (type == -1)
   3545 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
   3546 		else
   3547 			entry += (type * 64);
   3548 		index = (entry >> 2) & 0x1F;
   3549 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
   3550 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
   3551 		ivar |= (vector << (8 * (entry & 0x3)));
   3552 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
   3553 		break;
   3554 
   3555 	case ixgbe_mac_82599EB:
   3556 	case ixgbe_mac_X540:
   3557 	case ixgbe_mac_X550:
   3558 	case ixgbe_mac_X550EM_x:
   3559 		if (type == -1) { /* MISC IVAR */
   3560 			index = (entry & 1) * 8;
   3561 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
   3562 			ivar &= ~(0xFF << index);
   3563 			ivar |= (vector << index);
   3564 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
   3565 		} else {	/* RX/TX IVARS */
   3566 			index = (16 * (entry & 1)) + (8 * type);
   3567 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
   3568 			ivar &= ~(0xFF << index);
   3569 			ivar |= (vector << index);
   3570 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
   3571 		}
   3572 
   3573 	default:
   3574 		break;
   3575 	}
   3576 }
   3577 
   3578 static void
   3579 ixgbe_configure_ivars(struct adapter *adapter)
   3580 {
   3581 	struct  ix_queue *que = adapter->queues;
   3582 	u32 newitr;
   3583 
   3584 	if (ixgbe_max_interrupt_rate > 0)
   3585 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
   3586 	else {
   3587 		/*
   3588 		** Disable DMA coalescing if interrupt moderation is
   3589 		** disabled.
   3590 		*/
   3591 		adapter->dmac = 0;
   3592 		newitr = 0;
   3593 	}
   3594 
   3595         for (int i = 0; i < adapter->num_queues; i++, que++) {
   3596 		/* First the RX queue entry */
   3597                 ixgbe_set_ivar(adapter, i, que->msix, 0);
   3598 		/* ... and the TX */
   3599 		ixgbe_set_ivar(adapter, i, que->msix, 1);
   3600 		/* Set an Initial EITR value */
   3601                 IXGBE_WRITE_REG(&adapter->hw,
   3602                     IXGBE_EITR(que->msix), newitr);
   3603 	}
   3604 
   3605 	/* For the Link interrupt */
   3606         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
   3607 }
   3608 
   3609 /*
   3610 ** ixgbe_sfp_probe - called in the local timer to
   3611 ** determine if a port had optics inserted.
   3612 */
   3613 static bool ixgbe_sfp_probe(struct adapter *adapter)
   3614 {
   3615 	struct ixgbe_hw	*hw = &adapter->hw;
   3616 	device_t	dev = adapter->dev;
   3617 	bool		result = FALSE;
   3618 
   3619 	if ((hw->phy.type == ixgbe_phy_nl) &&
   3620 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
   3621 		s32 ret = hw->phy.ops.identify_sfp(hw);
   3622 		if (ret)
   3623                         goto out;
   3624 		ret = hw->phy.ops.reset(hw);
   3625 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   3626 			device_printf(dev,"Unsupported SFP+ module detected!");
   3627 			device_printf(dev, "Reload driver with supported module.\n");
   3628 			adapter->sfp_probe = FALSE;
   3629                         goto out;
   3630 		} else
   3631 			device_printf(dev,"SFP+ module detected!\n");
   3632 		/* We now have supported optics */
   3633 		adapter->sfp_probe = FALSE;
   3634 		/* Set the optics type so system reports correctly */
   3635 		ixgbe_setup_optics(adapter);
   3636 		result = TRUE;
   3637 	}
   3638 out:
   3639 	return (result);
   3640 }
   3641 
   3642 /*
   3643 ** Tasklet handler for MSIX Link interrupts
   3644 **  - do outside interrupt since it might sleep
   3645 */
   3646 static void
   3647 ixgbe_handle_link(void *context)
   3648 {
   3649 	struct adapter  *adapter = context;
   3650 
   3651 	ixgbe_check_link(&adapter->hw,
   3652 	    &adapter->link_speed, &adapter->link_up, 0);
   3653 	ixgbe_update_link_status(adapter);
   3654 }
   3655 
   3656 /*
   3657 ** Tasklet for handling SFP module interrupts
   3658 */
   3659 static void
   3660 ixgbe_handle_mod(void *context)
   3661 {
   3662 	struct adapter  *adapter = context;
   3663 	struct ixgbe_hw *hw = &adapter->hw;
   3664 	device_t	dev = adapter->dev;
   3665 	u32 err;
   3666 
   3667 	err = hw->phy.ops.identify_sfp(hw);
   3668 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   3669 		device_printf(dev,
   3670 		    "Unsupported SFP+ module type was detected.\n");
   3671 		return;
   3672 	}
   3673 	err = hw->mac.ops.setup_sfp(hw);
   3674 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
   3675 		device_printf(dev,
   3676 		    "Setup failure - unsupported SFP+ module type.\n");
   3677 		return;
   3678 	}
   3679 	softint_schedule(adapter->msf_si);
   3680 	return;
   3681 }
   3682 
   3683 
   3684 /*
   3685 ** Tasklet for handling MSF (multispeed fiber) interrupts
   3686 */
   3687 static void
   3688 ixgbe_handle_msf(void *context)
   3689 {
   3690 	struct adapter  *adapter = context;
   3691 	struct ixgbe_hw *hw = &adapter->hw;
   3692 	u32 autoneg;
   3693 	bool negotiate;
   3694 	int err;
   3695 
   3696 	err = hw->phy.ops.identify_sfp(hw);
   3697 	if (!err) {
   3698 		ixgbe_setup_optics(adapter);
   3699 		INIT_DEBUGOUT1("ixgbe_sfp_probe: flags: %X\n", adapter->optics);
   3700 	}
   3701 
   3702 	autoneg = hw->phy.autoneg_advertised;
   3703 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
   3704 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
   3705 	else
   3706 		negotiate = 0;
   3707 	if (hw->mac.ops.setup_link)
   3708 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
   3709 
   3710 	ifmedia_removeall(&adapter->media);
   3711 	ixgbe_add_media_types(adapter);
   3712 	return;
   3713 }
   3714 
   3715 /*
   3716 ** Tasklet for handling interrupts from an external PHY
   3717 */
   3718 static void
   3719 ixgbe_handle_phy(void *context)
   3720 {
   3721 	struct adapter  *adapter = context;
   3722 	struct ixgbe_hw *hw = &adapter->hw;
   3723 	int error;
   3724 
   3725 	error = hw->phy.ops.handle_lasi(hw);
   3726 	if (error == IXGBE_ERR_OVERTEMP)
   3727 		device_printf(adapter->dev,
   3728 		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
   3729 		    " PHY will downshift to lower power state!\n");
   3730 	else if (error)
   3731 		device_printf(adapter->dev,
   3732 		    "Error handling LASI interrupt: %d\n",
   3733 		    error);
   3734 	return;
   3735 }
   3736 
   3737 #ifdef IXGBE_FDIR
   3738 /*
   3739 ** Tasklet for reinitializing the Flow Director filter table
   3740 */
   3741 static void
   3742 ixgbe_reinit_fdir(void *context)
   3743 {
   3744 	struct adapter  *adapter = context;
   3745 	struct ifnet   *ifp = adapter->ifp;
   3746 
   3747 	if (adapter->fdir_reinit != 1) /* Shouldn't happen */
   3748 		return;
   3749 	ixgbe_reinit_fdir_tables_82599(&adapter->hw);
   3750 	adapter->fdir_reinit = 0;
   3751 	/* re-enable flow director interrupts */
   3752 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
   3753 	/* Restart the interface */
   3754 	ifp->if_flags |= IFF_RUNNING;
   3755 	return;
   3756 }
   3757 #endif
   3758 
   3759 /*********************************************************************
   3760  *
   3761  *  Configure DMA Coalescing
   3762  *
   3763  **********************************************************************/
   3764 static void
   3765 ixgbe_config_dmac(struct adapter *adapter)
   3766 {
   3767 	struct ixgbe_hw *hw = &adapter->hw;
   3768 	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
   3769 
   3770 	if (hw->mac.type < ixgbe_mac_X550 ||
   3771 	    !hw->mac.ops.dmac_config)
   3772 		return;
   3773 
   3774 	if (dcfg->watchdog_timer ^ adapter->dmac ||
   3775 	    dcfg->link_speed ^ adapter->link_speed) {
   3776 		dcfg->watchdog_timer = adapter->dmac;
   3777 		dcfg->fcoe_en = false;
   3778 		dcfg->link_speed = adapter->link_speed;
   3779 		dcfg->num_tcs = 1;
   3780 
   3781 		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
   3782 		    dcfg->watchdog_timer, dcfg->link_speed);
   3783 
   3784 		hw->mac.ops.dmac_config(hw);
   3785 	}
   3786 }
   3787 
   3788 /*
   3789  * Checks whether the adapter supports Energy Efficient Ethernet
   3790  * or not, based on device ID.
   3791  */
   3792 static void
   3793 ixgbe_check_eee_support(struct adapter *adapter)
   3794 {
   3795 	struct ixgbe_hw *hw = &adapter->hw;
   3796 
   3797 	adapter->eee_support = adapter->eee_enabled =
   3798 	    (hw->device_id == IXGBE_DEV_ID_X550T ||
   3799 	        hw->device_id == IXGBE_DEV_ID_X550EM_X_KR);
   3800 }
   3801 
   3802 /*
   3803  * Checks whether the adapter's ports are capable of
   3804  * Wake On LAN by reading the adapter's NVM.
   3805  *
   3806  * Sets each port's hw->wol_enabled value depending
   3807  * on the value read here.
   3808  */
   3809 static void
   3810 ixgbe_check_wol_support(struct adapter *adapter)
   3811 {
   3812 	struct ixgbe_hw *hw = &adapter->hw;
   3813 	u16 dev_caps = 0;
   3814 
   3815 	/* Find out WoL support for port */
   3816 	adapter->wol_support = hw->wol_enabled = 0;
   3817 	ixgbe_get_device_caps(hw, &dev_caps);
   3818 	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
   3819 	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
   3820 	        hw->bus.func == 0))
   3821 	    adapter->wol_support = hw->wol_enabled = 1;
   3822 
   3823 	/* Save initial wake up filter configuration */
   3824 	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
   3825 
   3826 	return;
   3827 }
   3828 
   3829 /*
   3830  * Prepare the adapter/port for LPLU and/or WoL
   3831  */
   3832 static int
   3833 ixgbe_setup_low_power_mode(struct adapter *adapter)
   3834 {
   3835 	struct ixgbe_hw *hw = &adapter->hw;
   3836 	device_t dev = adapter->dev;
   3837 	s32 error = 0;
   3838 
   3839 	KASSERT(mutex_owned(&adapter->core_mtx));
   3840 
   3841 	/* Limit power management flow to X550EM baseT */
   3842 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
   3843 	    && hw->phy.ops.enter_lplu) {
   3844 		/* Turn off support for APM wakeup. (Using ACPI instead) */
   3845 		IXGBE_WRITE_REG(hw, IXGBE_GRC,
   3846 		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
   3847 
   3848 		/*
   3849 		 * Clear Wake Up Status register to prevent any previous wakeup
   3850 		 * events from waking us up immediately after we suspend.
   3851 		 */
   3852 		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
   3853 
   3854 		/*
   3855 		 * Program the Wakeup Filter Control register with user filter
   3856 		 * settings
   3857 		 */
   3858 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
   3859 
   3860 		/* Enable wakeups and power management in Wakeup Control */
   3861 		IXGBE_WRITE_REG(hw, IXGBE_WUC,
   3862 		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
   3863 
   3864 		/* X550EM baseT adapters need a special LPLU flow */
   3865 		hw->phy.reset_disable = true;
   3866 		ixgbe_stop(adapter);
   3867 		error = hw->phy.ops.enter_lplu(hw);
   3868 		if (error)
   3869 			device_printf(dev,
   3870 			    "Error entering LPLU: %d\n", error);
   3871 		hw->phy.reset_disable = false;
   3872 	} else {
   3873 		/* Just stop for other adapters */
   3874 		ixgbe_stop(adapter);
   3875 	}
   3876 
   3877 	return error;
   3878 }
   3879 
   3880 /**********************************************************************
   3881  *
   3882  *  Update the board statistics counters.
   3883  *
   3884  **********************************************************************/
   3885 static void
   3886 ixgbe_update_stats_counters(struct adapter *adapter)
   3887 {
   3888 	struct ifnet   *ifp = adapter->ifp;
   3889 	struct ixgbe_hw *hw = &adapter->hw;
   3890 	u32 missed_rx = 0, bprc, lxon, lxoff, total;
   3891 	u64 total_missed_rx = 0;
   3892 	uint64_t crcerrs, rlec;
   3893 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   3894 
   3895 	crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
   3896 	stats->crcerrs.ev_count += crcerrs;
   3897 	stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
   3898 	stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
   3899 	stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
   3900 
   3901 	for (int i = 0; i < __arraycount(stats->qprc); i++) {
   3902 		int j = i % adapter->num_queues;
   3903 		stats->qprc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
   3904 		stats->qptc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
   3905 		stats->qprdc[j].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
   3906 	}
   3907 	stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
   3908 	stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
   3909 	rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
   3910 	stats->rlec.ev_count += rlec;
   3911 
   3912 	/* Hardware workaround, gprc counts missed packets */
   3913 	stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
   3914 
   3915 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
   3916 	stats->lxontxc.ev_count += lxon;
   3917 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
   3918 	stats->lxofftxc.ev_count += lxoff;
   3919 	total = lxon + lxoff;
   3920 
   3921 	if (hw->mac.type != ixgbe_mac_82598EB) {
   3922 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
   3923 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
   3924 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
   3925 		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
   3926 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
   3927 		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
   3928 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
   3929 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
   3930 	} else {
   3931 		stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
   3932 		stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
   3933 		/* 82598 only has a counter in the high register */
   3934 		stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
   3935 		stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
   3936 		stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
   3937 	}
   3938 
   3939 	/*
   3940 	 * Workaround: mprc hardware is incorrectly counting
   3941 	 * broadcasts, so for now we subtract those.
   3942 	 */
   3943 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
   3944 	stats->bprc.ev_count += bprc;
   3945 	stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC) - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
   3946 
   3947 	stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
   3948 	stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
   3949 	stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
   3950 	stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
   3951 	stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
   3952 	stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
   3953 
   3954 	stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
   3955 	stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
   3956 	stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
   3957 
   3958 	stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
   3959 	stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
   3960 	stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
   3961 	stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
   3962 	stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
   3963 	stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
   3964 	stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
   3965 	stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
   3966 	stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
   3967 	stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
   3968 	stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
   3969 	stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
   3970 	stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
   3971 	stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
   3972 	stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
   3973 	stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
   3974 	stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
   3975 	stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
   3976 	/* Only read FCOE on 82599 */
   3977 	if (hw->mac.type != ixgbe_mac_82598EB) {
   3978 		stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
   3979 		stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
   3980 		stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
   3981 		stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
   3982 		stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
   3983 	}
   3984 
   3985 	/* Fill out the OS statistics structure */
   3986 	/*
   3987 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   3988 	 * adapter->stats counters. It's required to make ifconfig -z
   3989 	 * (SOICZIFDATA) work.
   3990 	 */
   3991 	ifp->if_collisions = 0;
   3992 
   3993 	/* Rx Errors */
   3994 	ifp->if_iqdrops += total_missed_rx;
   3995 	ifp->if_ierrors += crcerrs + rlec;
   3996 }
   3997 
   3998 /** ixgbe_sysctl_tdh_handler - Handler function
   3999  *  Retrieves the TDH value from the hardware
   4000  */
   4001 static int
   4002 ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
   4003 {
   4004 	struct sysctlnode node = *rnode;
   4005 	uint32_t val;
   4006 	struct tx_ring *txr;
   4007 
   4008 	txr = (struct tx_ring *)node.sysctl_data;
   4009 	if (txr == NULL)
   4010 		return 0;
   4011 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
   4012 	node.sysctl_data = &val;
   4013 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   4014 }
   4015 
   4016 /** ixgbe_sysctl_tdt_handler - Handler function
   4017  *  Retrieves the TDT value from the hardware
   4018  */
   4019 static int
   4020 ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
   4021 {
   4022 	struct sysctlnode node = *rnode;
   4023 	uint32_t val;
   4024 	struct tx_ring *txr;
   4025 
   4026 	txr = (struct tx_ring *)node.sysctl_data;
   4027 	if (txr == NULL)
   4028 		return 0;
   4029 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
   4030 	node.sysctl_data = &val;
   4031 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   4032 }
   4033 
   4034 /** ixgbe_sysctl_rdh_handler - Handler function
   4035  *  Retrieves the RDH value from the hardware
   4036  */
   4037 static int
   4038 ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
   4039 {
   4040 	struct sysctlnode node = *rnode;
   4041 	uint32_t val;
   4042 	struct rx_ring *rxr;
   4043 
   4044 	rxr = (struct rx_ring *)node.sysctl_data;
   4045 	if (rxr == NULL)
   4046 		return 0;
   4047 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
   4048 	node.sysctl_data = &val;
   4049 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   4050 }
   4051 
   4052 /** ixgbe_sysctl_rdt_handler - Handler function
   4053  *  Retrieves the RDT value from the hardware
   4054  */
   4055 static int
   4056 ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
   4057 {
   4058 	struct sysctlnode node = *rnode;
   4059 	uint32_t val;
   4060 	struct rx_ring *rxr;
   4061 
   4062 	rxr = (struct rx_ring *)node.sysctl_data;
   4063 	if (rxr == NULL)
   4064 		return 0;
   4065 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
   4066 	node.sysctl_data = &val;
   4067 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   4068 }
   4069 
   4070 static int
   4071 ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   4072 {
   4073 	struct sysctlnode node = *rnode;
   4074 	struct ix_queue *que;
   4075 	uint32_t reg, usec, rate;
   4076 	int error;
   4077 
   4078 	que = (struct ix_queue *)node.sysctl_data;
   4079 	if (que == NULL)
   4080 		return 0;
   4081 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
   4082 	usec = ((reg & 0x0FF8) >> 3);
   4083 	if (usec > 0)
   4084 		rate = 500000 / usec;
   4085 	else
   4086 		rate = 0;
   4087 	node.sysctl_data = &rate;
   4088 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4089 	if (error)
   4090 		return error;
   4091 	reg &= ~0xfff; /* default, no limitation */
   4092 	ixgbe_max_interrupt_rate = 0;
   4093 	if (rate > 0 && rate < 500000) {
   4094 		if (rate < 1000)
   4095 			rate = 1000;
   4096 		ixgbe_max_interrupt_rate = rate;
   4097 		reg |= ((4000000/rate) & 0xff8 );
   4098 	}
   4099 	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
   4100 	return 0;
   4101 }
   4102 
   4103 const struct sysctlnode *
   4104 ixgbe_sysctl_instance(struct adapter *adapter)
   4105 {
   4106 	const char *dvname;
   4107 	struct sysctllog **log;
   4108 	int rc;
   4109 	const struct sysctlnode *rnode;
   4110 
   4111 	log = &adapter->sysctllog;
   4112 	dvname = device_xname(adapter->dev);
   4113 
   4114 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   4115 	    0, CTLTYPE_NODE, dvname,
   4116 	    SYSCTL_DESCR("ixgbe information and settings"),
   4117 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   4118 		goto err;
   4119 
   4120 	return rnode;
   4121 err:
   4122 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   4123 	return NULL;
   4124 }
   4125 
   4126 static void
   4127 ixgbe_add_device_sysctls(struct adapter *adapter)
   4128 {
   4129 	device_t dev = adapter->dev;
   4130 	struct ixgbe_hw *hw = &adapter->hw;
   4131 	struct sysctllog **log;
   4132 	const struct sysctlnode *rnode, *cnode;
   4133 
   4134 	log = &adapter->sysctllog;
   4135 
   4136 	if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   4137 		aprint_error_dev(dev, "could not create sysctl root\n");
   4138 		return;
   4139 	}
   4140 
   4141 	if (sysctl_createv(log, 0, &rnode, &cnode,
   4142 	    CTLFLAG_READONLY, CTLTYPE_INT,
   4143 	    "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
   4144 	    NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
   4145 		aprint_error_dev(dev, "could not create sysctl\n");
   4146 
   4147 	if (sysctl_createv(log, 0, &rnode, &cnode,
   4148 	    CTLFLAG_READONLY, CTLTYPE_INT,
   4149 	    "num_queues", SYSCTL_DESCR("Number of queues"),
   4150 	    NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
   4151 		aprint_error_dev(dev, "could not create sysctl\n");
   4152 
   4153 	/* Sysctls for all devices */
   4154 	if (sysctl_createv(log, 0, &rnode, &cnode,
   4155 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   4156 	    "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
   4157 	    ixgbe_set_flowcntl, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   4158 		aprint_error_dev(dev, "could not create sysctl\n");
   4159 
   4160 	/* XXX This is an *instance* sysctl controlling a *global* variable.
   4161 	 * XXX It's that way in the FreeBSD driver that this derives from.
   4162 	 */
   4163 	if (sysctl_createv(log, 0, &rnode, &cnode,
   4164 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   4165 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   4166 	    NULL, 0, &ixgbe_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   4167 		aprint_error_dev(dev, "could not create sysctl\n");
   4168 
   4169 	if (sysctl_createv(log, 0, &rnode, &cnode,
   4170 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   4171 	    "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
   4172 	    ixgbe_set_advertise, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   4173 		aprint_error_dev(dev, "could not create sysctl\n");
   4174 
   4175 	if (sysctl_createv(log, 0, &rnode, &cnode,
   4176 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   4177 	    "ts", SYSCTL_DESCR("Thermal Test"),
   4178 	    ixgbe_sysctl_thermal_test, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   4179 		aprint_error_dev(dev, "could not create sysctl\n");
   4180 
   4181 	/* for X550 devices */
   4182 	if (hw->mac.type >= ixgbe_mac_X550)
   4183 		if (sysctl_createv(log, 0, &rnode, &cnode,
   4184 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   4185 		    "dmac", SYSCTL_DESCR("DMA Coalesce"),
   4186 		    ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   4187 			aprint_error_dev(dev, "could not create sysctl\n");
   4188 
   4189 	/* for X550T and X550EM backplane devices */
   4190 	if (hw->device_id == IXGBE_DEV_ID_X550T ||
   4191 	    hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
   4192 		const struct sysctlnode *eee_node;
   4193 
   4194 		if (sysctl_createv(log, 0, &rnode, &eee_node,
   4195 		    0, CTLTYPE_NODE,
   4196 		    "eee", SYSCTL_DESCR("Energy Efficient Ethernet sysctls"),
   4197 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
   4198 			aprint_error_dev(dev, "could not create sysctl\n");
   4199 			return;
   4200 		}
   4201 
   4202 		if (sysctl_createv(log, 0, &eee_node, &cnode,
   4203 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   4204 		    "enable", SYSCTL_DESCR("Enable or Disable EEE"),
   4205 		    ixgbe_sysctl_eee_enable, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   4206 			aprint_error_dev(dev, "could not create sysctl\n");
   4207 
   4208 		if (sysctl_createv(log, 0, &eee_node, &cnode,
   4209 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   4210 		    "negotiated", SYSCTL_DESCR("EEE negotiated on link"),
   4211 		    ixgbe_sysctl_eee_negotiated, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   4212 			aprint_error_dev(dev, "could not create sysctl\n");
   4213 
   4214 		if (sysctl_createv(log, 0, &eee_node, &cnode,
   4215 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   4216 		    "tx_lpi_status", SYSCTL_DESCR("Whether or not TX link is in LPI state"),
   4217 		    ixgbe_sysctl_eee_tx_lpi_status, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   4218 			aprint_error_dev(dev, "could not create sysctl\n");
   4219 
   4220 		if (sysctl_createv(log, 0, &eee_node, &cnode,
   4221 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
   4222 		    "rx_lpi_status", SYSCTL_DESCR("Whether or not RX link is in LPI state"),
   4223 		    ixgbe_sysctl_eee_rx_lpi_status, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   4224 			aprint_error_dev(dev, "could not create sysctl\n");
   4225 
   4226 	}
   4227 
   4228 	/* for certain 10GBaseT devices */
   4229 	if (hw->device_id == IXGBE_DEV_ID_X550T ||
   4230 	    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   4231 		if (sysctl_createv(log, 0, &rnode, &cnode,
   4232 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   4233 		    "wol_enable", SYSCTL_DESCR("Enable/Disable Wake on LAN"),
   4234 		    ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   4235 			aprint_error_dev(dev, "could not create sysctl\n");
   4236 
   4237 		if (sysctl_createv(log, 0, &rnode, &cnode,
   4238 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   4239 		    "wufc", SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
   4240 		    ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   4241 			aprint_error_dev(dev, "could not create sysctl\n");
   4242 	}
   4243 
   4244 	/* for X550EM 10GBaseT devices */
   4245 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
   4246 		const struct sysctlnode *phy_node;
   4247 
   4248 		if (sysctl_createv(log, 0, &rnode, &phy_node,
   4249 		    0, CTLTYPE_NODE,
   4250 		    "phy", SYSCTL_DESCR("External PHY sysctls"),
   4251 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
   4252 			aprint_error_dev(dev, "could not create sysctl\n");
   4253 			return;
   4254 		}
   4255 
   4256 		if (sysctl_createv(log, 0, &phy_node, &cnode,
   4257 		    CTLFLAG_READONLY, CTLTYPE_INT,
   4258 		    "temp", SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
   4259 		    ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   4260 			aprint_error_dev(dev, "could not create sysctl\n");
   4261 
   4262 		if (sysctl_createv(log, 0, &phy_node, &cnode,
   4263 		    CTLFLAG_READONLY, CTLTYPE_INT,
   4264 		    "overtemp_occurred", SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
   4265 		    ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   4266 			aprint_error_dev(dev, "could not create sysctl\n");
   4267 	}
   4268 }
   4269 
   4270 /*
   4271  * Add sysctl variables, one per statistic, to the system.
   4272  */
   4273 static void
   4274 ixgbe_add_hw_stats(struct adapter *adapter)
   4275 {
   4276 	device_t dev = adapter->dev;
   4277 	const struct sysctlnode *rnode, *cnode;
   4278 	struct sysctllog **log = &adapter->sysctllog;
   4279 	struct tx_ring *txr = adapter->tx_rings;
   4280 	struct rx_ring *rxr = adapter->rx_rings;
   4281 	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
   4282 
   4283 	/* Driver Statistics */
   4284 #if 0
   4285 	/* These counters are not updated by the software */
   4286 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
   4287 			CTLFLAG_RD, &adapter->dropped_pkts,
   4288 			"Driver dropped packets");
   4289 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_header_failed",
   4290 			CTLFLAG_RD, &adapter->mbuf_header_failed,
   4291 			"???");
   4292 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_packet_failed",
   4293 			CTLFLAG_RD, &adapter->mbuf_packet_failed,
   4294 			"???");
   4295 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_tx_map_avail",
   4296 			CTLFLAG_RD, &adapter->no_tx_map_avail,
   4297 			"???");
   4298 #endif
   4299 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   4300 	    NULL, device_xname(dev), "Handled queue in softint");
   4301 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   4302 	    NULL, device_xname(dev), "Requeued in softint");
   4303 	evcnt_attach_dynamic(&adapter->morerx, EVCNT_TYPE_MISC,
   4304 	    NULL, device_xname(dev), "Interrupt handler more rx");
   4305 	evcnt_attach_dynamic(&adapter->moretx, EVCNT_TYPE_MISC,
   4306 	    NULL, device_xname(dev), "Interrupt handler more tx");
   4307 	evcnt_attach_dynamic(&adapter->txloops, EVCNT_TYPE_MISC,
   4308 	    NULL, device_xname(dev), "Interrupt handler tx loops");
   4309 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   4310 	    NULL, device_xname(dev), "Driver tx dma soft fail EFBIG");
   4311 	evcnt_attach_dynamic(&adapter->m_defrag_failed, EVCNT_TYPE_MISC,
   4312 	    NULL, device_xname(dev), "m_defrag() failed");
   4313 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   4314 	    NULL, device_xname(dev), "Driver tx dma hard fail EFBIG");
   4315 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   4316 	    NULL, device_xname(dev), "Driver tx dma hard fail EINVAL");
   4317 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   4318 	    NULL, device_xname(dev), "Driver tx dma hard fail other");
   4319 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   4320 	    NULL, device_xname(dev), "Driver tx dma soft fail EAGAIN");
   4321 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   4322 	    NULL, device_xname(dev), "Driver tx dma soft fail ENOMEM");
   4323 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   4324 	    NULL, device_xname(dev), "Watchdog timeouts");
   4325 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   4326 	    NULL, device_xname(dev), "TSO errors");
   4327 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_MISC,
   4328 	    NULL, device_xname(dev), "Link MSIX IRQ Handled");
   4329 
   4330 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   4331 		snprintf(adapter->queues[i].evnamebuf,
   4332 		    sizeof(adapter->queues[i].evnamebuf), "%s queue%d",
   4333 		    device_xname(dev), i);
   4334 		snprintf(adapter->queues[i].namebuf,
   4335 		    sizeof(adapter->queues[i].namebuf), "queue%d", i);
   4336 
   4337 		if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
   4338 			aprint_error_dev(dev, "could not create sysctl root\n");
   4339 			break;
   4340 		}
   4341 
   4342 		if (sysctl_createv(log, 0, &rnode, &rnode,
   4343 		    0, CTLTYPE_NODE,
   4344 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   4345 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   4346 			break;
   4347 
   4348 		if (sysctl_createv(log, 0, &rnode, &cnode,
   4349 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   4350 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   4351 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   4352 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   4353 			break;
   4354 
   4355 #if 0 /* XXX msaitoh */
   4356 		if (sysctl_createv(log, 0, &rnode, &cnode,
   4357 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   4358 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   4359 			NULL, 0, &(adapter->queues[i].irqs),
   4360 		    0, CTL_CREATE, CTL_EOL) != 0)
   4361 			break;
   4362 #endif
   4363 
   4364 		if (sysctl_createv(log, 0, &rnode, &cnode,
   4365 		    CTLFLAG_READONLY, CTLTYPE_INT,
   4366 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   4367 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   4368 		    0, CTL_CREATE, CTL_EOL) != 0)
   4369 			break;
   4370 
   4371 		if (sysctl_createv(log, 0, &rnode, &cnode,
   4372 		    CTLFLAG_READONLY, CTLTYPE_INT,
   4373 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   4374 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   4375 		    0, CTL_CREATE, CTL_EOL) != 0)
   4376 			break;
   4377 
   4378 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   4379 		    NULL, device_xname(dev), "TSO");
   4380 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   4381 		    NULL, adapter->queues[i].evnamebuf,
   4382 		    "Queue No Descriptor Available");
   4383 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   4384 		    NULL, adapter->queues[i].evnamebuf,
   4385 		    "Queue Packets Transmitted");
   4386 #ifndef IXGBE_LEGACY_TX
   4387 		evcnt_attach_dynamic(&txr->br->br_drops, EVCNT_TYPE_MISC,
   4388 		    NULL, adapter->queues[i].evnamebuf,
   4389 		    "Packets dropped in buf_ring");
   4390 #endif
   4391 
   4392 #ifdef LRO
   4393 		struct lro_ctrl *lro = &rxr->lro;
   4394 #endif /* LRO */
   4395 
   4396 		if (sysctl_createv(log, 0, &rnode, &cnode,
   4397 		    CTLFLAG_READONLY,
   4398 		    CTLTYPE_INT,
   4399 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   4400 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   4401 		    CTL_CREATE, CTL_EOL) != 0)
   4402 			break;
   4403 
   4404 		if (sysctl_createv(log, 0, &rnode, &cnode,
   4405 		    CTLFLAG_READONLY,
   4406 		    CTLTYPE_INT,
   4407 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   4408 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   4409 		    CTL_CREATE, CTL_EOL) != 0)
   4410 			break;
   4411 
   4412 		if (i < __arraycount(stats->mpc)) {
   4413 			evcnt_attach_dynamic(&stats->mpc[i],
   4414 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   4415 			    "Missed Packet Count");
   4416 		}
   4417 		if (i < __arraycount(stats->pxontxc)) {
   4418 			evcnt_attach_dynamic(&stats->pxontxc[i],
   4419 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   4420 			    "pxontxc");
   4421 			evcnt_attach_dynamic(&stats->pxonrxc[i],
   4422 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   4423 			    "pxonrxc");
   4424 			evcnt_attach_dynamic(&stats->pxofftxc[i],
   4425 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   4426 			    "pxofftxc");
   4427 			evcnt_attach_dynamic(&stats->pxoffrxc[i],
   4428 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   4429 			    "pxoffrxc");
   4430 			evcnt_attach_dynamic(&stats->pxon2offc[i],
   4431 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   4432 			    "pxon2offc");
   4433 		}
   4434 		if (i < __arraycount(stats->qprc)) {
   4435 			evcnt_attach_dynamic(&stats->qprc[i],
   4436 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   4437 			    "qprc");
   4438 			evcnt_attach_dynamic(&stats->qptc[i],
   4439 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   4440 			    "qptc");
   4441 			evcnt_attach_dynamic(&stats->qbrc[i],
   4442 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   4443 			    "qbrc");
   4444 			evcnt_attach_dynamic(&stats->qbtc[i],
   4445 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   4446 			    "qbtc");
   4447 			evcnt_attach_dynamic(&stats->qprdc[i],
   4448 			    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   4449 			    "qprdc");
   4450 		}
   4451 
   4452 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   4453 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   4454 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   4455 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   4456 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   4457 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   4458 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   4459 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   4460 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   4461 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   4462 		evcnt_attach_dynamic(&rxr->rx_irq, EVCNT_TYPE_MISC,
   4463 		    NULL, adapter->queues[i].evnamebuf, "Rx interrupts");
   4464 #ifdef LRO
   4465 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   4466 				CTLFLAG_RD, &lro->lro_queued, 0,
   4467 				"LRO Queued");
   4468 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   4469 				CTLFLAG_RD, &lro->lro_flushed, 0,
   4470 				"LRO Flushed");
   4471 #endif /* LRO */
   4472 	}
   4473 
   4474 	/* MAC stats get the own sub node */
   4475 
   4476 
   4477 	snprintf(stats->namebuf,
   4478 	    sizeof(stats->namebuf), "%s MAC Statistics", device_xname(dev));
   4479 
   4480 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   4481 	    stats->namebuf, "rx csum offload - IP");
   4482 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   4483 	    stats->namebuf, "rx csum offload - L4");
   4484 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   4485 	    stats->namebuf, "rx csum offload - IP bad");
   4486 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   4487 	    stats->namebuf, "rx csum offload - L4 bad");
   4488 	evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
   4489 	    stats->namebuf, "Interrupt conditions zero");
   4490 	evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
   4491 	    stats->namebuf, "Legacy interrupts");
   4492 	evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
   4493 	    stats->namebuf, "CRC Errors");
   4494 	evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
   4495 	    stats->namebuf, "Illegal Byte Errors");
   4496 	evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
   4497 	    stats->namebuf, "Byte Errors");
   4498 	evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
   4499 	    stats->namebuf, "MAC Short Packets Discarded");
   4500 	evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
   4501 	    stats->namebuf, "MAC Local Faults");
   4502 	evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
   4503 	    stats->namebuf, "MAC Remote Faults");
   4504 	evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
   4505 	    stats->namebuf, "Receive Length Errors");
   4506 	evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
   4507 	    stats->namebuf, "Link XON Transmitted");
   4508 	evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
   4509 	    stats->namebuf, "Link XON Received");
   4510 	evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
   4511 	    stats->namebuf, "Link XOFF Transmitted");
   4512 	evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
   4513 	    stats->namebuf, "Link XOFF Received");
   4514 
   4515 	/* Packet Reception Stats */
   4516 	evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
   4517 	    stats->namebuf, "Total Octets Received");
   4518 	evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
   4519 	    stats->namebuf, "Good Octets Received");
   4520 	evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
   4521 	    stats->namebuf, "Total Packets Received");
   4522 	evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
   4523 	    stats->namebuf, "Good Packets Received");
   4524 	evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
   4525 	    stats->namebuf, "Multicast Packets Received");
   4526 	evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
   4527 	    stats->namebuf, "Broadcast Packets Received");
   4528 	evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
   4529 	    stats->namebuf, "64 byte frames received ");
   4530 	evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
   4531 	    stats->namebuf, "65-127 byte frames received");
   4532 	evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
   4533 	    stats->namebuf, "128-255 byte frames received");
   4534 	evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
   4535 	    stats->namebuf, "256-511 byte frames received");
   4536 	evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
   4537 	    stats->namebuf, "512-1023 byte frames received");
   4538 	evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
   4539 	    stats->namebuf, "1023-1522 byte frames received");
   4540 	evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
   4541 	    stats->namebuf, "Receive Undersized");
   4542 	evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
   4543 	    stats->namebuf, "Fragmented Packets Received ");
   4544 	evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
   4545 	    stats->namebuf, "Oversized Packets Received");
   4546 	evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
   4547 	    stats->namebuf, "Received Jabber");
   4548 	evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
   4549 	    stats->namebuf, "Management Packets Received");
   4550 	evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
   4551 	    stats->namebuf, "Checksum Errors");
   4552 
   4553 	/* Packet Transmission Stats */
   4554 	evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
   4555 	    stats->namebuf, "Good Octets Transmitted");
   4556 	evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
   4557 	    stats->namebuf, "Total Packets Transmitted");
   4558 	evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
   4559 	    stats->namebuf, "Good Packets Transmitted");
   4560 	evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
   4561 	    stats->namebuf, "Broadcast Packets Transmitted");
   4562 	evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
   4563 	    stats->namebuf, "Multicast Packets Transmitted");
   4564 	evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
   4565 	    stats->namebuf, "Management Packets Transmitted");
   4566 	evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
   4567 	    stats->namebuf, "64 byte frames transmitted ");
   4568 	evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
   4569 	    stats->namebuf, "65-127 byte frames transmitted");
   4570 	evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
   4571 	    stats->namebuf, "128-255 byte frames transmitted");
   4572 	evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
   4573 	    stats->namebuf, "256-511 byte frames transmitted");
   4574 	evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
   4575 	    stats->namebuf, "512-1023 byte frames transmitted");
   4576 	evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
   4577 	    stats->namebuf, "1024-1522 byte frames transmitted");
   4578 }
   4579 
   4580 /*
   4581 ** Set flow control using sysctl:
   4582 ** Flow control values:
   4583 ** 	0 - off
   4584 **	1 - rx pause
   4585 **	2 - tx pause
   4586 **	3 - full
   4587 */
   4588 static int
   4589 ixgbe_set_flowcntl(SYSCTLFN_ARGS)
   4590 {
   4591 	struct sysctlnode node = *rnode;
   4592 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4593 	int error, last;
   4594 
   4595 	node.sysctl_data = &adapter->fc;
   4596 	last = adapter->fc;
   4597 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4598 	if (error != 0 || newp == NULL)
   4599 		return error;
   4600 
   4601 	/* Don't bother if it's not changed */
   4602 	if (adapter->fc == last)
   4603 		return (0);
   4604 
   4605 	switch (adapter->fc) {
   4606 		case ixgbe_fc_rx_pause:
   4607 		case ixgbe_fc_tx_pause:
   4608 		case ixgbe_fc_full:
   4609 			adapter->hw.fc.requested_mode = adapter->fc;
   4610 			if (adapter->num_queues > 1)
   4611 				ixgbe_disable_rx_drop(adapter);
   4612 			break;
   4613 		case ixgbe_fc_none:
   4614 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
   4615 			if (adapter->num_queues > 1)
   4616 				ixgbe_enable_rx_drop(adapter);
   4617 			break;
   4618 		default:
   4619 			adapter->fc = last;
   4620 			return (EINVAL);
   4621 	}
   4622 	/* Don't autoneg if forcing a value */
   4623 	adapter->hw.fc.disable_fc_autoneg = TRUE;
   4624 	ixgbe_fc_enable(&adapter->hw);
   4625 	return 0;
   4626 }
   4627 
   4628 /*
   4629 ** Control advertised link speed:
   4630 **	Flags:
   4631 **	0x1 - advertise 100 Mb
   4632 **	0x2 - advertise 1G
   4633 **	0x4 - advertise 10G
   4634 */
   4635 static int
   4636 ixgbe_set_advertise(SYSCTLFN_ARGS)
   4637 {
   4638 	struct sysctlnode	node = *rnode;
   4639 	int			old, error = 0, requested;
   4640 	struct adapter		*adapter = (struct adapter *)node.sysctl_data;
   4641 	device_t		dev;
   4642 	struct ixgbe_hw		*hw;
   4643 	ixgbe_link_speed	speed = 0;
   4644 
   4645 	dev = adapter->dev;
   4646 	hw = &adapter->hw;
   4647 
   4648 	old = requested = adapter->advertise;
   4649 	node.sysctl_data = &requested;
   4650 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4651 	if (error != 0 || newp == NULL)
   4652 		return error;
   4653 
   4654 	/* Checks to validate new value */
   4655 	if (requested == old) /* no change */
   4656 		return (0);
   4657 
   4658 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
   4659 	    (hw->phy.multispeed_fiber))) {
   4660 		device_printf(dev,
   4661 		    "Advertised speed can only be set on copper or "
   4662 		    "multispeed fiber media types.\n");
   4663 		return (EINVAL);
   4664 	}
   4665 
   4666 	if (requested < 0x1 || requested > 0x7) {
   4667 		device_printf(dev,
   4668 		    "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
   4669 		return (EINVAL);
   4670 	}
   4671 
   4672 	if ((requested & 0x1)
   4673 	    && (hw->mac.type != ixgbe_mac_X540)
   4674 	    && (hw->mac.type != ixgbe_mac_X550)) {
   4675 		device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
   4676 		return (EINVAL);
   4677 	}
   4678 
   4679 	adapter->advertise = requested;
   4680 
   4681 	/* Set new value and report new advertised mode */
   4682 	if (requested & 0x1)
   4683 		speed |= IXGBE_LINK_SPEED_100_FULL;
   4684 	if (requested & 0x2)
   4685 		speed |= IXGBE_LINK_SPEED_1GB_FULL;
   4686 	if (requested & 0x4)
   4687 		speed |= IXGBE_LINK_SPEED_10GB_FULL;
   4688 
   4689 	hw->mac.autotry_restart = TRUE;
   4690 	hw->mac.ops.setup_link(hw, speed, TRUE);
   4691 	adapter->advertise = requested;
   4692 
   4693 	return 0;
   4694 }
   4695 
   4696 /*
   4697  * The following two sysctls are for X550 BaseT devices;
   4698  * they deal with the external PHY used in them.
   4699  */
   4700 static int
   4701 ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
   4702 {
   4703 	struct sysctlnode node = *rnode;
   4704 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   4705 	struct ixgbe_hw *hw = &adapter->hw;
   4706 	int val;
   4707 	u16 reg;
   4708 	int		error;
   4709 
   4710 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   4711 		device_printf(adapter->dev,
   4712 		    "Device has no supported external thermal sensor.\n");
   4713 		return (ENODEV);
   4714 	}
   4715 
   4716 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
   4717 				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
   4718 				      &reg)) {
   4719 		device_printf(adapter->dev,
   4720 		    "Error reading from PHY's current temperature register\n");
   4721 		return (EAGAIN);
   4722 	}
   4723 
   4724 	node.sysctl_data = &val;
   4725 
   4726 	/* Shift temp for output */
   4727 	val = reg >> 8;
   4728 
   4729 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4730 	if ((error) || (newp == NULL))
   4731 		return (error);
   4732 
   4733 	return (0);
   4734 }
   4735 
   4736 /*
   4737  * Reports whether the current PHY temperature is over
   4738  * the overtemp threshold.
   4739  *  - This is reported directly from the PHY
   4740  */
   4741 static int
   4742 ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
   4743 {
   4744 	struct sysctlnode node = *rnode;
   4745 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   4746 	struct ixgbe_hw *hw = &adapter->hw;
   4747 	int val, error;
   4748 	u16 reg;
   4749 
   4750 	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
   4751 		device_printf(adapter->dev,
   4752 		    "Device has no supported external thermal sensor.\n");
   4753 		return (ENODEV);
   4754 	}
   4755 
   4756 	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
   4757 				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
   4758 				      &reg)) {
   4759 		device_printf(adapter->dev,
   4760 		    "Error reading from PHY's temperature status register\n");
   4761 		return (EAGAIN);
   4762 	}
   4763 
   4764 	node.sysctl_data = &val;
   4765 
   4766 	/* Get occurrence bit */
   4767 	val = !!(reg & 0x4000);
   4768 
   4769 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4770 	if ((error) || (newp == NULL))
   4771 		return (error);
   4772 
   4773 	return (0);
   4774 }
   4775 
   4776 /*
   4777 ** Thermal Shutdown Trigger (internal MAC)
   4778 **   - Set this to 1 to cause an overtemp event to occur
   4779 */
   4780 static int
   4781 ixgbe_sysctl_thermal_test(SYSCTLFN_ARGS)
   4782 {
   4783 	struct sysctlnode node = *rnode;
   4784 	struct adapter	*adapter = (struct adapter *)node.sysctl_data;
   4785 	int		error, fire = 0;
   4786 	struct ixgbe_hw *hw;
   4787 
   4788 	hw = &adapter->hw;
   4789 
   4790 	node.sysctl_data = &fire;
   4791 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4792 	if ((error) || (newp == NULL))
   4793 		return (error);
   4794 
   4795 	if (fire) {
   4796 		u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
   4797 		reg |= IXGBE_EICR_TS;
   4798 		IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
   4799 	}
   4800 
   4801 	return (0);
   4802 }
   4803 
   4804 /*
   4805 ** Manage DMA Coalescing.
   4806 ** Control values:
   4807 ** 	0/1 - off / on (use default value of 1000)
   4808 **
   4809 **	Legal timer values are:
   4810 **	50,100,250,500,1000,2000,5000,10000
   4811 **
   4812 **	Turning off interrupt moderation will also turn this off.
   4813 */
   4814 static int
   4815 ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
   4816 {
   4817 	struct sysctlnode node = *rnode;
   4818 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4819 	struct ixgbe_hw *hw = &adapter->hw;
   4820 	struct ifnet *ifp = adapter->ifp;
   4821 	int		error;
   4822 	u16		oldval;
   4823 	int		buffer;
   4824 
   4825 	oldval = adapter->dmac;
   4826 	buffer = oldval;
   4827 	node.sysctl_data = &buffer;
   4828 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4829 	if ((error) || (newp == NULL))
   4830 		return (error);
   4831 
   4832 	switch (hw->mac.type) {
   4833 	case ixgbe_mac_X550:
   4834 	case ixgbe_mac_X550EM_x:
   4835 		break;
   4836 	default:
   4837 		device_printf(adapter->dev,
   4838 		    "DMA Coalescing is only supported on X550 devices\n");
   4839 		return (ENODEV);
   4840 	}
   4841 
   4842 	switch (adapter->dmac) {
   4843 	case 0:
   4844 		/* Disabled */
   4845 		break;
   4846 	case 1: /* Enable and use default */
   4847 		adapter->dmac = 1000;
   4848 		break;
   4849 	case 50:
   4850 	case 100:
   4851 	case 250:
   4852 	case 500:
   4853 	case 1000:
   4854 	case 2000:
   4855 	case 5000:
   4856 	case 10000:
   4857 		/* Legal values - allow */
   4858 		break;
   4859 	default:
   4860 		/* Do nothing, illegal value */
   4861 		adapter->dmac = oldval;
   4862 		return (EINVAL);
   4863 	}
   4864 
   4865 	/* Re-initialize hardware if it's already running */
   4866 	if (ifp->if_flags & IFF_RUNNING)
   4867 		ixgbe_init(ifp);
   4868 
   4869 	return (0);
   4870 }
   4871 
   4872 /*
   4873  * Sysctl to enable/disable the WoL capability, if supported by the adapter.
   4874  * Values:
   4875  *	0 - disabled
   4876  *	1 - enabled
   4877  */
   4878 static int
   4879 ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
   4880 {
   4881 	struct sysctlnode node = *rnode;
   4882 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4883 	struct ixgbe_hw *hw = &adapter->hw;
   4884 	int new_wol_enabled;
   4885 	int error = 0;
   4886 
   4887 	new_wol_enabled = hw->wol_enabled;
   4888 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4889 	if ((error) || (newp == NULL))
   4890 		return (error);
   4891 	if (new_wol_enabled == hw->wol_enabled)
   4892 		return (0);
   4893 
   4894 	if (new_wol_enabled > 0 && !adapter->wol_support)
   4895 		return (ENODEV);
   4896 	else
   4897 		hw->wol_enabled = !!(new_wol_enabled);
   4898 
   4899 	return (0);
   4900 }
   4901 
   4902 /*
   4903  * Sysctl to enable/disable the Energy Efficient Ethernet capability,
   4904  * if supported by the adapter.
   4905  * Values:
   4906  *	0 - disabled
   4907  *	1 - enabled
   4908  */
   4909 static int
   4910 ixgbe_sysctl_eee_enable(SYSCTLFN_ARGS)
   4911 {
   4912 	struct sysctlnode node = *rnode;
   4913 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4914 	struct ifnet *ifp = adapter->ifp;
   4915 	int new_eee_enabled, error = 0;
   4916 
   4917 	new_eee_enabled = adapter->eee_enabled;
   4918 	node.sysctl_data = &new_eee_enabled;
   4919 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   4920 	if ((error) || (newp == NULL))
   4921 		return (error);
   4922 	if (new_eee_enabled == adapter->eee_enabled)
   4923 		return (0);
   4924 
   4925 	if (new_eee_enabled > 0 && !adapter->eee_support)
   4926 		return (ENODEV);
   4927 	else
   4928 		adapter->eee_enabled = !!(new_eee_enabled);
   4929 
   4930 	/* Re-initialize hardware if it's already running */
   4931 	if (ifp->if_flags & IFF_RUNNING)
   4932 		ixgbe_init(ifp);
   4933 
   4934 	return (0);
   4935 }
   4936 
   4937 /*
   4938  * Read-only sysctl indicating whether EEE support was negotiated
   4939  * on the link.
   4940  */
   4941 static int
   4942 ixgbe_sysctl_eee_negotiated(SYSCTLFN_ARGS)
   4943 {
   4944 	struct sysctlnode node = *rnode;
   4945 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4946 	struct ixgbe_hw *hw = &adapter->hw;
   4947 	bool status;
   4948 
   4949 	status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
   4950 
   4951 	node.sysctl_data = &status;
   4952 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
   4953 }
   4954 
   4955 /*
   4956  * Read-only sysctl indicating whether RX Link is in LPI state.
   4957  */
   4958 static int
   4959 ixgbe_sysctl_eee_rx_lpi_status(SYSCTLFN_ARGS)
   4960 {
   4961 	struct sysctlnode node = *rnode;
   4962 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4963 	struct ixgbe_hw *hw = &adapter->hw;
   4964 	bool status;
   4965 
   4966 	status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
   4967 	    IXGBE_EEE_RX_LPI_STATUS);
   4968 
   4969 	node.sysctl_data = &status;
   4970 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
   4971 }
   4972 
   4973 /*
   4974  * Read-only sysctl indicating whether TX Link is in LPI state.
   4975  */
   4976 static int
   4977 ixgbe_sysctl_eee_tx_lpi_status(SYSCTLFN_ARGS)
   4978 {
   4979 	struct sysctlnode node = *rnode;
   4980 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   4981 	struct ixgbe_hw *hw = &adapter->hw;
   4982 	bool status;
   4983 
   4984 	status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
   4985 	    IXGBE_EEE_TX_LPI_STATUS);
   4986 
   4987 	node.sysctl_data = &status;
   4988 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
   4989 }
   4990 
   4991 /*
   4992  * Sysctl to enable/disable the types of packets that the
   4993  * adapter will wake up on upon receipt.
   4994  * WUFC - Wake Up Filter Control
   4995  * Flags:
   4996  *	0x1  - Link Status Change
   4997  *	0x2  - Magic Packet
   4998  *	0x4  - Direct Exact
   4999  *	0x8  - Directed Multicast
   5000  *	0x10 - Broadcast
   5001  *	0x20 - ARP/IPv4 Request Packet
   5002  *	0x40 - Direct IPv4 Packet
   5003  *	0x80 - Direct IPv6 Packet
   5004  *
   5005  * Setting another flag will cause the sysctl to return an
   5006  * error.
   5007  */
   5008 static int
   5009 ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
   5010 {
   5011 	struct sysctlnode node = *rnode;
   5012 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   5013 	int error = 0;
   5014 	u32 new_wufc;
   5015 
   5016 	new_wufc = adapter->wufc;
   5017 
   5018 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5019 	if ((error) || (newp == NULL))
   5020 		return (error);
   5021 	if (new_wufc == adapter->wufc)
   5022 		return (0);
   5023 
   5024 	if (new_wufc & 0xffffff00)
   5025 		return (EINVAL);
   5026 	else {
   5027 		new_wufc &= 0xff;
   5028 		new_wufc |= (0xffffff & adapter->wufc);
   5029 		adapter->wufc = new_wufc;
   5030 	}
   5031 
   5032 	return (0);
   5033 }
   5034 
   5035 /*
   5036 ** Enable the hardware to drop packets when the buffer is
   5037 ** full. This is useful when multiqueue,so that no single
   5038 ** queue being full stalls the entire RX engine. We only
   5039 ** enable this when Multiqueue AND when Flow Control is
   5040 ** disabled.
   5041 */
   5042 static void
   5043 ixgbe_enable_rx_drop(struct adapter *adapter)
   5044 {
   5045         struct ixgbe_hw *hw = &adapter->hw;
   5046 
   5047 	for (int i = 0; i < adapter->num_queues; i++) {
   5048         	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
   5049         	srrctl |= IXGBE_SRRCTL_DROP_EN;
   5050         	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
   5051 	}
   5052 }
   5053 
   5054 static void
   5055 ixgbe_disable_rx_drop(struct adapter *adapter)
   5056 {
   5057         struct ixgbe_hw *hw = &adapter->hw;
   5058 
   5059 	for (int i = 0; i < adapter->num_queues; i++) {
   5060         	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
   5061         	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
   5062         	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
   5063 	}
   5064 }
   5065 
   5066 static void
   5067 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
   5068 {
   5069 	u32 mask;
   5070 
   5071 	switch (adapter->hw.mac.type) {
   5072 	case ixgbe_mac_82598EB:
   5073 		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
   5074 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
   5075 		break;
   5076 	case ixgbe_mac_82599EB:
   5077 	case ixgbe_mac_X540:
   5078 	case ixgbe_mac_X550:
   5079 	case ixgbe_mac_X550EM_x:
   5080 		mask = (queues & 0xFFFFFFFF);
   5081 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
   5082 		mask = (queues >> 32);
   5083 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
   5084 		break;
   5085 	default:
   5086 		break;
   5087 	}
   5088 }
   5089