Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.58
      1 /*$NetBSD: ixv.c,v 1.58 2017/08/30 08:49:18 msaitoh Exp $*/
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 320688 2017-07-05 17:27:03Z erj $*/
     36 
     37 
     38 #ifdef _KERNEL_OPT
     39 #include "opt_inet.h"
     40 #include "opt_inet6.h"
     41 #include "opt_net_mpsafe.h"
     42 #endif
     43 
     44 #include "ixgbe.h"
     45 #include "vlan.h"
     46 
     47 /************************************************************************
     48  * Driver version
     49  ************************************************************************/
     50 char ixv_driver_version[] = "1.5.13-k";
     51 
     52 /************************************************************************
     53  * PCI Device ID Table
     54  *
     55  *   Used by probe to select devices to load on
     56  *   Last field stores an index into ixv_strings
     57  *   Last entry must be all 0s
     58  *
     59  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     60  ************************************************************************/
     61 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
     62 {
     63 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     64 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     65 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     66 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     67 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
     68 	/* required last entry */
     69 	{0, 0, 0, 0, 0}
     70 };
     71 
     72 /************************************************************************
     73  * Table of branding strings
     74  ************************************************************************/
     75 static const char *ixv_strings[] = {
     76 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     77 };
     78 
     79 /*********************************************************************
     80  *  Function prototypes
     81  *********************************************************************/
     82 static int      ixv_probe(device_t, cfdata_t, void *);
     83 static void	ixv_attach(device_t, device_t, void *);
     84 static int      ixv_detach(device_t, int);
     85 #if 0
     86 static int      ixv_shutdown(device_t);
     87 #endif
     88 static int	ixv_ifflags_cb(struct ethercom *);
     89 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     90 static int	ixv_init(struct ifnet *);
     91 static void	ixv_init_locked(struct adapter *);
     92 static void	ixv_ifstop(struct ifnet *, int);
     93 static void     ixv_stop(void *);
     94 static void     ixv_init_device_features(struct adapter *);
     95 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     96 static int      ixv_media_change(struct ifnet *);
     97 static int      ixv_allocate_pci_resources(struct adapter *,
     98 		    const struct pci_attach_args *);
     99 static int      ixv_allocate_msix(struct adapter *,
    100 		    const struct pci_attach_args *);
    101 static int      ixv_configure_interrupts(struct adapter *);
    102 static void	ixv_free_pci_resources(struct adapter *);
    103 static void     ixv_local_timer(void *);
    104 static void     ixv_local_timer_locked(void *);
    105 static void     ixv_setup_interface(device_t, struct adapter *);
    106 static int      ixv_negotiate_api(struct adapter *);
    107 
    108 static void     ixv_initialize_transmit_units(struct adapter *);
    109 static void     ixv_initialize_receive_units(struct adapter *);
    110 static void     ixv_initialize_rss_mapping(struct adapter *);
    111 static void     ixv_check_link(struct adapter *);
    112 
    113 static void     ixv_enable_intr(struct adapter *);
    114 static void     ixv_disable_intr(struct adapter *);
    115 static void     ixv_set_multi(struct adapter *);
    116 static void     ixv_update_link_status(struct adapter *);
    117 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    118 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    119 static void	ixv_configure_ivars(struct adapter *);
    120 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    121 
    122 static void	ixv_setup_vlan_support(struct adapter *);
    123 #if 0
    124 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    125 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    126 #endif
    127 
    128 static void	ixv_add_device_sysctls(struct adapter *);
    129 static void	ixv_save_stats(struct adapter *);
    130 static void	ixv_init_stats(struct adapter *);
    131 static void	ixv_update_stats(struct adapter *);
    132 static void	ixv_add_stats_sysctls(struct adapter *);
    133 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    134 		    const char *, int *, int);
    135 
    136 /* The MSI-X Interrupt handlers */
    137 static int	ixv_msix_que(void *);
    138 static int	ixv_msix_mbx(void *);
    139 
    140 /* Deferred interrupt tasklets */
    141 static void	ixv_handle_que(void *);
    142 static void     ixv_handle_link(void *);
    143 
    144 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    145 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    146 
    147 /************************************************************************
    148  * FreeBSD Device Interface Entry Points
    149  ************************************************************************/
    150 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    151     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    152     DVF_DETACH_SHUTDOWN);
    153 
    154 #if 0
    155 static driver_t ixv_driver = {
    156 	"ixv", ixv_methods, sizeof(struct adapter),
    157 };
    158 
    159 devclass_t ixv_devclass;
    160 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    161 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    162 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    163 #endif
    164 
    165 /*
    166  * TUNEABLE PARAMETERS:
    167  */
    168 
    169 /* Number of Queues - do not exceed MSI-X vectors - 1 */
    170 static int ixv_num_queues = 0;
    171 #define	TUNABLE_INT(__x, __y)
    172 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    173 
    174 /*
    175  * AIM: Adaptive Interrupt Moderation
    176  * which means that the interrupt rate
    177  * is varied over time based on the
    178  * traffic for that interrupt vector
    179  */
    180 static bool ixv_enable_aim = false;
    181 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    182 
    183 /* How many packets rxeof tries to clean at a time */
    184 static int ixv_rx_process_limit = 256;
    185 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    186 
    187 /* How many packets txeof tries to clean at a time */
    188 static int ixv_tx_process_limit = 256;
    189 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    190 
    191 /*
    192  * Number of TX descriptors per ring,
    193  * setting higher than RX as this seems
    194  * the better performing choice.
    195  */
    196 static int ixv_txd = DEFAULT_TXD;
    197 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    198 
    199 /* Number of RX descriptors per ring */
    200 static int ixv_rxd = DEFAULT_RXD;
    201 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    202 
    203 /* Legacy Transmit (single queue) */
    204 static int ixv_enable_legacy_tx = 0;
    205 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
    206 
    207 /*
    208  * Shadow VFTA table, this is needed because
    209  * the real filter table gets cleared during
    210  * a soft reset and we need to repopulate it.
    211  */
    212 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
    213 
    214 #ifdef NET_MPSAFE
    215 #define IXGBE_MPSAFE		1
    216 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    217 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    218 #else
    219 #define IXGBE_CALLOUT_FLAGS	0
    220 #define IXGBE_SOFTINFT_FLAGS	0
    221 #endif
    222 
    223 #if 0
    224 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
    225 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
    226 #endif
    227 
    228 /************************************************************************
    229  * ixv_probe - Device identification routine
    230  *
    231  *   Determines if the driver should be loaded on
    232  *   adapter based on its PCI vendor/device ID.
    233  *
    234  *   return BUS_PROBE_DEFAULT on success, positive on failure
    235  ************************************************************************/
    236 static int
    237 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    238 {
    239 #ifdef __HAVE_PCI_MSI_MSIX
    240 	const struct pci_attach_args *pa = aux;
    241 
    242 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    243 #else
    244 	return 0;
    245 #endif
    246 } /* ixv_probe */
    247 
    248 static ixgbe_vendor_info_t *
    249 ixv_lookup(const struct pci_attach_args *pa)
    250 {
    251 	ixgbe_vendor_info_t *ent;
    252 	pcireg_t subid;
    253 
    254 	INIT_DEBUGOUT("ixv_lookup: begin");
    255 
    256 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    257 		return NULL;
    258 
    259 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    260 
    261 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    262 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    263 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    264 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    265 		     (ent->subvendor_id == 0)) &&
    266 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    267 		     (ent->subdevice_id == 0))) {
    268 			return ent;
    269 		}
    270 	}
    271 
    272 	return NULL;
    273 }
    274 
    275 /************************************************************************
    276  * ixv_attach - Device initialization routine
    277  *
    278  *   Called when the driver is being loaded.
    279  *   Identifies the type of hardware, allocates all resources
    280  *   and initializes the hardware.
    281  *
    282  *   return 0 on success, positive on failure
    283  ************************************************************************/
    284 static void
    285 ixv_attach(device_t parent, device_t dev, void *aux)
    286 {
    287 	struct adapter *adapter;
    288 	struct ixgbe_hw *hw;
    289 	int             error = 0;
    290 	pcireg_t	id, subid;
    291 	ixgbe_vendor_info_t *ent;
    292 	const struct pci_attach_args *pa = aux;
    293 
    294 	INIT_DEBUGOUT("ixv_attach: begin");
    295 
    296 	/*
    297 	 * Make sure BUSMASTER is set, on a VM under
    298 	 * KVM it may not be and will break things.
    299 	 */
    300 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    301 
    302 	/* Allocate, clear, and link in our adapter structure */
    303 	adapter = device_private(dev);
    304 	adapter->dev = dev;
    305 	adapter->hw.back = adapter;
    306 	hw = &adapter->hw;
    307 
    308 	adapter->init_locked = ixv_init_locked;
    309 	adapter->stop_locked = ixv_stop;
    310 
    311 	adapter->osdep.pc = pa->pa_pc;
    312 	adapter->osdep.tag = pa->pa_tag;
    313 	if (pci_dma64_available(pa))
    314 		adapter->osdep.dmat = pa->pa_dmat64;
    315 	else
    316 		adapter->osdep.dmat = pa->pa_dmat;
    317 	adapter->osdep.attached = false;
    318 
    319 	ent = ixv_lookup(pa);
    320 
    321 	KASSERT(ent != NULL);
    322 
    323 	aprint_normal(": %s, Version - %s\n",
    324 	    ixv_strings[ent->index], ixv_driver_version);
    325 
    326 	/* Core Lock Init*/
    327 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    328 
    329 	/* Do base PCI setup - map BAR0 */
    330 	if (ixv_allocate_pci_resources(adapter, pa)) {
    331 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    332 		error = ENXIO;
    333 		goto err_out;
    334 	}
    335 
    336 	/* SYSCTL APIs */
    337 	ixv_add_device_sysctls(adapter);
    338 
    339 	/* Set up the timer callout */
    340 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    341 
    342 	/* Save off the information about this board */
    343 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    344 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    345 	hw->vendor_id = PCI_VENDOR(id);
    346 	hw->device_id = PCI_PRODUCT(id);
    347 	hw->revision_id =
    348 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    349 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    350 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    351 
    352 	/* A subset of set_mac_type */
    353 	switch (hw->device_id) {
    354 	case IXGBE_DEV_ID_82599_VF:
    355 		hw->mac.type = ixgbe_mac_82599_vf;
    356 		break;
    357 	case IXGBE_DEV_ID_X540_VF:
    358 		hw->mac.type = ixgbe_mac_X540_vf;
    359 		break;
    360 	case IXGBE_DEV_ID_X550_VF:
    361 		hw->mac.type = ixgbe_mac_X550_vf;
    362 		break;
    363 	case IXGBE_DEV_ID_X550EM_X_VF:
    364 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
    365 		break;
    366 	case IXGBE_DEV_ID_X550EM_A_VF:
    367 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
    368 		break;
    369 	default:
    370 		/* Shouldn't get here since probe succeeded */
    371 		aprint_error_dev(dev, "Unknown device ID!\n");
    372 		error = ENXIO;
    373 		goto err_out;
    374 		break;
    375 	}
    376 
    377 	ixv_init_device_features(adapter);
    378 
    379 	/* Initialize the shared code */
    380 	error = ixgbe_init_ops_vf(hw);
    381 	if (error) {
    382 		aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
    383 		error = EIO;
    384 		goto err_out;
    385 	}
    386 
    387 	/* Setup the mailbox */
    388 	ixgbe_init_mbx_params_vf(hw);
    389 
    390 	/* Set the right number of segments */
    391 	adapter->num_segs = IXGBE_82599_SCATTER;
    392 
    393 	/* Reset mbox api to 1.0 */
    394 	error = hw->mac.ops.reset_hw(hw);
    395 	if (error == IXGBE_ERR_RESET_FAILED)
    396 		aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
    397 	else if (error)
    398 		aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
    399 		    error);
    400 	if (error) {
    401 		error = EIO;
    402 		goto err_out;
    403 	}
    404 
    405 	error = hw->mac.ops.init_hw(hw);
    406 	if (error) {
    407 		aprint_error_dev(dev, "...init_hw() failed!\n");
    408 		error = EIO;
    409 		goto err_out;
    410 	}
    411 
    412 	/* Negotiate mailbox API version */
    413 	error = ixv_negotiate_api(adapter);
    414 	if (error)
    415 		aprint_normal_dev(dev,
    416 		    "MBX API negotiation failed during attach!\n");
    417 
    418 	/* If no mac address was assigned, make a random one */
    419 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    420 		u8 addr[ETHER_ADDR_LEN];
    421 		uint64_t rndval = cprng_fast64();
    422 
    423 		memcpy(addr, &rndval, sizeof(addr));
    424 		addr[0] &= 0xFE;
    425 		addr[0] |= 0x02;
    426 		bcopy(addr, hw->mac.addr, sizeof(addr));
    427 	}
    428 
    429 	/* Register for VLAN events */
    430 #if 0 /* XXX delete after write? */
    431 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    432 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    433 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    434 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    435 #endif
    436 
    437 	/* Sysctls for limiting the amount of work done in the taskqueues */
    438 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    439 	    "max number of rx packets to process",
    440 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    441 
    442 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    443 	    "max number of tx packets to process",
    444 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    445 
    446 	/* Do descriptor calc and sanity checks */
    447 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    448 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    449 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    450 		adapter->num_tx_desc = DEFAULT_TXD;
    451 	} else
    452 		adapter->num_tx_desc = ixv_txd;
    453 
    454 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    455 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    456 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    457 		adapter->num_rx_desc = DEFAULT_RXD;
    458 	} else
    459 		adapter->num_rx_desc = ixv_rxd;
    460 
    461 	/* Setup MSI-X */
    462 	error = ixv_configure_interrupts(adapter);
    463 	if (error)
    464 		goto err_out;
    465 
    466 	/* Allocate our TX/RX Queues */
    467 	if (ixgbe_allocate_queues(adapter)) {
    468 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    469 		error = ENOMEM;
    470 		goto err_out;
    471 	}
    472 
    473 	/* hw.ix defaults init */
    474 	adapter->enable_aim = ixv_enable_aim;
    475 
    476 	/* Setup OS specific network interface */
    477 	ixv_setup_interface(dev, adapter);
    478 
    479 	error = ixv_allocate_msix(adapter, pa);
    480 	if (error) {
    481 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    482 		goto err_late;
    483 	}
    484 
    485 	/* Do the stats setup */
    486 	ixv_save_stats(adapter);
    487 	ixv_init_stats(adapter);
    488 	ixv_add_stats_sysctls(adapter);
    489 
    490 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    491 		ixgbe_netmap_attach(adapter);
    492 
    493 	INIT_DEBUGOUT("ixv_attach: end");
    494 	adapter->osdep.attached = true;
    495 
    496 	return;
    497 
    498 err_late:
    499 	ixgbe_free_transmit_structures(adapter);
    500 	ixgbe_free_receive_structures(adapter);
    501 	free(adapter->queues, M_DEVBUF);
    502 err_out:
    503 	ixv_free_pci_resources(adapter);
    504 	IXGBE_CORE_LOCK_DESTROY(adapter);
    505 
    506 	return;
    507 } /* ixv_attach */
    508 
    509 /************************************************************************
    510  * ixv_detach - Device removal routine
    511  *
    512  *   Called when the driver is being removed.
    513  *   Stops the adapter and deallocates all the resources
    514  *   that were allocated for driver operation.
    515  *
    516  *   return 0 on success, positive on failure
    517  ************************************************************************/
    518 static int
    519 ixv_detach(device_t dev, int flags)
    520 {
    521 	struct adapter  *adapter = device_private(dev);
    522 	struct ix_queue *que = adapter->queues;
    523 	struct tx_ring *txr = adapter->tx_rings;
    524 	struct rx_ring *rxr = adapter->rx_rings;
    525 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
    526 
    527 	INIT_DEBUGOUT("ixv_detach: begin");
    528 	if (adapter->osdep.attached == false)
    529 		return 0;
    530 
    531 	/* Stop the interface. Callouts are stopped in it. */
    532 	ixv_ifstop(adapter->ifp, 1);
    533 
    534 #if NVLAN > 0
    535 	/* Make sure VLANs are not using driver */
    536 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    537 		;	/* nothing to do: no VLANs */
    538 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    539 		vlan_ifdetach(adapter->ifp);
    540 	else {
    541 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    542 		return EBUSY;
    543 	}
    544 #endif
    545 
    546 	IXGBE_CORE_LOCK(adapter);
    547 	ixv_stop(adapter);
    548 	IXGBE_CORE_UNLOCK(adapter);
    549 
    550 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
    551 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
    552 			softint_disestablish(txr->txr_si);
    553 		softint_disestablish(que->que_si);
    554 	}
    555 
    556 	/* Drain the Mailbox(link) queue */
    557 	softint_disestablish(adapter->link_si);
    558 
    559 	/* Unregister VLAN events */
    560 #if 0 /* XXX msaitoh delete after write? */
    561 	if (adapter->vlan_attach != NULL)
    562 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    563 	if (adapter->vlan_detach != NULL)
    564 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    565 #endif
    566 
    567 	ether_ifdetach(adapter->ifp);
    568 	callout_halt(&adapter->timer, NULL);
    569 
    570 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    571 		netmap_detach(adapter->ifp);
    572 
    573 	ixv_free_pci_resources(adapter);
    574 #if 0 /* XXX the NetBSD port is probably missing something here */
    575 	bus_generic_detach(dev);
    576 #endif
    577 	if_detach(adapter->ifp);
    578 	if_percpuq_destroy(adapter->ipq);
    579 
    580 	sysctl_teardown(&adapter->sysctllog);
    581 	evcnt_detach(&adapter->handleq);
    582 	evcnt_detach(&adapter->req);
    583 	evcnt_detach(&adapter->efbig_tx_dma_setup);
    584 	evcnt_detach(&adapter->mbuf_defrag_failed);
    585 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
    586 	evcnt_detach(&adapter->einval_tx_dma_setup);
    587 	evcnt_detach(&adapter->other_tx_dma_setup);
    588 	evcnt_detach(&adapter->eagain_tx_dma_setup);
    589 	evcnt_detach(&adapter->enomem_tx_dma_setup);
    590 	evcnt_detach(&adapter->watchdog_events);
    591 	evcnt_detach(&adapter->tso_err);
    592 	evcnt_detach(&adapter->link_irq);
    593 
    594 	txr = adapter->tx_rings;
    595 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
    596 		evcnt_detach(&adapter->queues[i].irqs);
    597 		evcnt_detach(&txr->no_desc_avail);
    598 		evcnt_detach(&txr->total_packets);
    599 		evcnt_detach(&txr->tso_tx);
    600 #ifndef IXGBE_LEGACY_TX
    601 		evcnt_detach(&txr->pcq_drops);
    602 #endif
    603 
    604 		evcnt_detach(&rxr->rx_packets);
    605 		evcnt_detach(&rxr->rx_bytes);
    606 		evcnt_detach(&rxr->rx_copies);
    607 		evcnt_detach(&rxr->no_jmbuf);
    608 		evcnt_detach(&rxr->rx_discarded);
    609 	}
    610 	evcnt_detach(&stats->ipcs);
    611 	evcnt_detach(&stats->l4cs);
    612 	evcnt_detach(&stats->ipcs_bad);
    613 	evcnt_detach(&stats->l4cs_bad);
    614 
    615 	/* Packet Reception Stats */
    616 	evcnt_detach(&stats->vfgorc);
    617 	evcnt_detach(&stats->vfgprc);
    618 	evcnt_detach(&stats->vfmprc);
    619 
    620 	/* Packet Transmission Stats */
    621 	evcnt_detach(&stats->vfgotc);
    622 	evcnt_detach(&stats->vfgptc);
    623 
    624 	ixgbe_free_transmit_structures(adapter);
    625 	ixgbe_free_receive_structures(adapter);
    626 	free(adapter->queues, M_DEVBUF);
    627 
    628 	IXGBE_CORE_LOCK_DESTROY(adapter);
    629 
    630 	return (0);
    631 } /* ixv_detach */
    632 
    633 /************************************************************************
    634  * ixv_init_locked - Init entry point
    635  *
    636  *   Used in two ways: It is used by the stack as an init entry
    637  *   point in network interface structure. It is also used
    638  *   by the driver as a hw/sw initialization routine to get
    639  *   to a consistent state.
    640  *
    641  *   return 0 on success, positive on failure
    642  ************************************************************************/
    643 static void
    644 ixv_init_locked(struct adapter *adapter)
    645 {
    646 	struct ifnet	*ifp = adapter->ifp;
    647 	device_t 	dev = adapter->dev;
    648 	struct ixgbe_hw *hw = &adapter->hw;
    649 	int             error = 0;
    650 
    651 	INIT_DEBUGOUT("ixv_init_locked: begin");
    652 	KASSERT(mutex_owned(&adapter->core_mtx));
    653 	hw->adapter_stopped = FALSE;
    654 	hw->mac.ops.stop_adapter(hw);
    655         callout_stop(&adapter->timer);
    656 
    657 	/* reprogram the RAR[0] in case user changed it. */
    658 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    659 
    660 	/* Get the latest mac address, User can use a LAA */
    661 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    662 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    663 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
    664 
    665 	/* Prepare transmit descriptors and buffers */
    666 	if (ixgbe_setup_transmit_structures(adapter)) {
    667 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    668 		ixv_stop(adapter);
    669 		return;
    670 	}
    671 
    672 	/* Reset VF and renegotiate mailbox API version */
    673 	hw->mac.ops.reset_hw(hw);
    674 	error = ixv_negotiate_api(adapter);
    675 	if (error)
    676 		device_printf(dev,
    677 		    "Mailbox API negotiation failed in init_locked!\n");
    678 
    679 	ixv_initialize_transmit_units(adapter);
    680 
    681 	/* Setup Multicast table */
    682 	ixv_set_multi(adapter);
    683 
    684 	/*
    685 	 * Determine the correct mbuf pool
    686 	 * for doing jumbo/headersplit
    687 	 */
    688 	if (ifp->if_mtu > ETHERMTU)
    689 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    690 	else
    691 		adapter->rx_mbuf_sz = MCLBYTES;
    692 
    693 	/* Prepare receive descriptors and buffers */
    694 	if (ixgbe_setup_receive_structures(adapter)) {
    695 		device_printf(dev, "Could not setup receive structures\n");
    696 		ixv_stop(adapter);
    697 		return;
    698 	}
    699 
    700 	/* Configure RX settings */
    701 	ixv_initialize_receive_units(adapter);
    702 
    703 #if 0 /* XXX isn't it required? -- msaitoh  */
    704 	/* Set the various hardware offload abilities */
    705 	ifp->if_hwassist = 0;
    706 	if (ifp->if_capenable & IFCAP_TSO4)
    707 		ifp->if_hwassist |= CSUM_TSO;
    708 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    709 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    710 #if __FreeBSD_version >= 800000
    711 		ifp->if_hwassist |= CSUM_SCTP;
    712 #endif
    713 	}
    714 #endif
    715 
    716 	/* Set up VLAN offload and filter */
    717 	ixv_setup_vlan_support(adapter);
    718 
    719 	/* Set up MSI-X routing */
    720 	ixv_configure_ivars(adapter);
    721 
    722 	/* Set up auto-mask */
    723 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    724 
    725 	/* Set moderation on the Link interrupt */
    726 	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
    727 
    728 	/* Stats init */
    729 	ixv_init_stats(adapter);
    730 
    731 	/* Config/Enable Link */
    732 	hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
    733 	    FALSE);
    734 
    735 	/* Start watchdog */
    736 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    737 
    738 	/* And now turn on interrupts */
    739 	ixv_enable_intr(adapter);
    740 
    741 	/* Now inform the stack we're ready */
    742 	ifp->if_flags |= IFF_RUNNING;
    743 	ifp->if_flags &= ~IFF_OACTIVE;
    744 
    745 	return;
    746 } /* ixv_init_locked */
    747 
    748 /*
    749  * MSI-X Interrupt Handlers and Tasklets
    750  */
    751 
    752 static inline void
    753 ixv_enable_queue(struct adapter *adapter, u32 vector)
    754 {
    755 	struct ixgbe_hw *hw = &adapter->hw;
    756 	u32             queue = 1 << vector;
    757 	u32             mask;
    758 
    759 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    760 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    761 } /* ixv_enable_queue */
    762 
    763 static inline void
    764 ixv_disable_queue(struct adapter *adapter, u32 vector)
    765 {
    766 	struct ixgbe_hw *hw = &adapter->hw;
    767 	u64             queue = (u64)(1 << vector);
    768 	u32             mask;
    769 
    770 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    771 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    772 } /* ixv_disable_queue */
    773 
    774 static inline void
    775 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    776 {
    777 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    778 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    779 } /* ixv_rearm_queues */
    780 
    781 
    782 /************************************************************************
    783  * ixv_msix_que - MSI Queue Interrupt Service routine
    784  ************************************************************************/
    785 static int
    786 ixv_msix_que(void *arg)
    787 {
    788 	struct ix_queue	*que = arg;
    789 	struct adapter  *adapter = que->adapter;
    790 	struct ifnet    *ifp = adapter->ifp;
    791 	struct tx_ring	*txr = que->txr;
    792 	struct rx_ring	*rxr = que->rxr;
    793 	bool		more;
    794 	u32		newitr = 0;
    795 
    796 	ixv_disable_queue(adapter, que->msix);
    797 	++que->irqs.ev_count;
    798 
    799 #ifdef __NetBSD__
    800 	/* Don't run ixgbe_rxeof in interrupt context */
    801 	more = true;
    802 #else
    803 	more = ixgbe_rxeof(que);
    804 #endif
    805 
    806 	IXGBE_TX_LOCK(txr);
    807 	ixgbe_txeof(txr);
    808 	/*
    809 	 * Make certain that if the stack
    810 	 * has anything queued the task gets
    811 	 * scheduled to handle it.
    812 	 */
    813 	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
    814 		if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
    815 			ixgbe_mq_start_locked(ifp, txr);
    816 		/* Only for queue 0 */
    817 		/* NetBSD still needs this for CBQ */
    818 		if ((&adapter->queues[0] == que)
    819 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
    820 			ixgbe_legacy_start_locked(ifp, txr);
    821 	IXGBE_TX_UNLOCK(txr);
    822 
    823 	/* Do AIM now? */
    824 
    825 	if (adapter->enable_aim == false)
    826 		goto no_calc;
    827 	/*
    828 	 * Do Adaptive Interrupt Moderation:
    829 	 *  - Write out last calculated setting
    830 	 *  - Calculate based on average size over
    831 	 *    the last interval.
    832 	 */
    833         if (que->eitr_setting)
    834 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
    835 		    que->eitr_setting);
    836 
    837 	que->eitr_setting = 0;
    838 
    839 	/* Idle, do nothing */
    840 	if ((txr->bytes == 0) && (rxr->bytes == 0))
    841 		goto no_calc;
    842 
    843 	if ((txr->bytes) && (txr->packets))
    844 		newitr = txr->bytes/txr->packets;
    845 	if ((rxr->bytes) && (rxr->packets))
    846 		newitr = max(newitr, (rxr->bytes / rxr->packets));
    847 	newitr += 24; /* account for hardware frame, crc */
    848 
    849 	/* set an upper boundary */
    850 	newitr = min(newitr, 3000);
    851 
    852 	/* Be nice to the mid range */
    853 	if ((newitr > 300) && (newitr < 1200))
    854 		newitr = (newitr / 3);
    855 	else
    856 		newitr = (newitr / 2);
    857 
    858 	newitr |= newitr << 16;
    859 
    860 	/* save for next interrupt */
    861 	que->eitr_setting = newitr;
    862 
    863 	/* Reset state */
    864 	txr->bytes = 0;
    865 	txr->packets = 0;
    866 	rxr->bytes = 0;
    867 	rxr->packets = 0;
    868 
    869 no_calc:
    870 	if (more)
    871 		softint_schedule(que->que_si);
    872 	else /* Re-enable this interrupt */
    873 		ixv_enable_queue(adapter, que->msix);
    874 
    875 	return 1;
    876 } /* ixv_msix_que */
    877 
    878 /************************************************************************
    879  * ixv_msix_mbx
    880  ************************************************************************/
    881 static int
    882 ixv_msix_mbx(void *arg)
    883 {
    884 	struct adapter	*adapter = arg;
    885 	struct ixgbe_hw *hw = &adapter->hw;
    886 	u32		reg;
    887 
    888 	++adapter->link_irq.ev_count;
    889 
    890 	/* First get the cause */
    891 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
    892 	/* Clear interrupt with write */
    893 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
    894 
    895 	/* Link status change */
    896 	if (reg & IXGBE_EICR_LSC)
    897 		softint_schedule(adapter->link_si);
    898 
    899 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
    900 
    901 	return 1;
    902 } /* ixv_msix_mbx */
    903 
    904 /************************************************************************
    905  * ixv_media_status - Media Ioctl callback
    906  *
    907  *   Called whenever the user queries the status of
    908  *   the interface using ifconfig.
    909  ************************************************************************/
    910 static void
    911 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
    912 {
    913 	struct adapter *adapter = ifp->if_softc;
    914 
    915 	INIT_DEBUGOUT("ixv_media_status: begin");
    916 	IXGBE_CORE_LOCK(adapter);
    917 	ixv_update_link_status(adapter);
    918 
    919 	ifmr->ifm_status = IFM_AVALID;
    920 	ifmr->ifm_active = IFM_ETHER;
    921 
    922 	if (!adapter->link_active) {
    923 		ifmr->ifm_active |= IFM_NONE;
    924 		IXGBE_CORE_UNLOCK(adapter);
    925 		return;
    926 	}
    927 
    928 	ifmr->ifm_status |= IFM_ACTIVE;
    929 
    930 	switch (adapter->link_speed) {
    931 		case IXGBE_LINK_SPEED_10GB_FULL:
    932 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
    933 			break;
    934 		case IXGBE_LINK_SPEED_1GB_FULL:
    935 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
    936 			break;
    937 		case IXGBE_LINK_SPEED_100_FULL:
    938 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
    939 			break;
    940 		case IXGBE_LINK_SPEED_10_FULL:
    941 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
    942 			break;
    943 	}
    944 
    945 	IXGBE_CORE_UNLOCK(adapter);
    946 
    947 	return;
    948 } /* ixv_media_status */
    949 
    950 /************************************************************************
    951  * ixv_media_change - Media Ioctl callback
    952  *
    953  *   Called when the user changes speed/duplex using
    954  *   media/mediopt option with ifconfig.
    955  ************************************************************************/
    956 static int
    957 ixv_media_change(struct ifnet *ifp)
    958 {
    959 	struct adapter *adapter = ifp->if_softc;
    960 	struct ifmedia *ifm = &adapter->media;
    961 
    962 	INIT_DEBUGOUT("ixv_media_change: begin");
    963 
    964 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    965 		return (EINVAL);
    966 
    967 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
    968 	case IFM_AUTO:
    969 		break;
    970 	default:
    971 		device_printf(adapter->dev, "Only auto media type\n");
    972 		return (EINVAL);
    973 	}
    974 
    975 	return (0);
    976 } /* ixv_media_change */
    977 
    978 
    979 /************************************************************************
    980  * ixv_negotiate_api
    981  *
    982  *   Negotiate the Mailbox API with the PF;
    983  *   start with the most featured API first.
    984  ************************************************************************/
    985 static int
    986 ixv_negotiate_api(struct adapter *adapter)
    987 {
    988 	struct ixgbe_hw *hw = &adapter->hw;
    989 	int             mbx_api[] = { ixgbe_mbox_api_11,
    990 	                              ixgbe_mbox_api_10,
    991 	                              ixgbe_mbox_api_unknown };
    992 	int             i = 0;
    993 
    994 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
    995 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
    996 			return (0);
    997 		i++;
    998 	}
    999 
   1000 	return (EINVAL);
   1001 } /* ixv_negotiate_api */
   1002 
   1003 
   1004 /************************************************************************
   1005  * ixv_set_multi - Multicast Update
   1006  *
   1007  *   Called whenever multicast address list is updated.
   1008  ************************************************************************/
   1009 static void
   1010 ixv_set_multi(struct adapter *adapter)
   1011 {
   1012 	struct ether_multi *enm;
   1013 	struct ether_multistep step;
   1014 	struct ethercom *ec = &adapter->osdep.ec;
   1015 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1016 	u8                 *update_ptr;
   1017 	int                mcnt = 0;
   1018 
   1019 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1020 
   1021 	ETHER_FIRST_MULTI(step, ec, enm);
   1022 	while (enm != NULL) {
   1023 		bcopy(enm->enm_addrlo,
   1024 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1025 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1026 		mcnt++;
   1027 		/* XXX This might be required --msaitoh */
   1028 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1029 			break;
   1030 		ETHER_NEXT_MULTI(step, enm);
   1031 	}
   1032 
   1033 	update_ptr = mta;
   1034 
   1035 	adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   1036 	    ixv_mc_array_itr, TRUE);
   1037 
   1038 	return;
   1039 } /* ixv_set_multi */
   1040 
   1041 /************************************************************************
   1042  * ixv_mc_array_itr
   1043  *
   1044  *   An iterator function needed by the multicast shared code.
   1045  *   It feeds the shared code routine the addresses in the
   1046  *   array of ixv_set_multi() one by one.
   1047  ************************************************************************/
   1048 static u8 *
   1049 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1050 {
   1051 	u8 *addr = *update_ptr;
   1052 	u8 *newptr;
   1053 	*vmdq = 0;
   1054 
   1055 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1056 	*update_ptr = newptr;
   1057 
   1058 	return addr;
   1059 } /* ixv_mc_array_itr */
   1060 
   1061 /************************************************************************
   1062  * ixv_local_timer - Timer routine
   1063  *
   1064  *   Checks for link status, updates statistics,
   1065  *   and runs the watchdog check.
   1066  ************************************************************************/
   1067 static void
   1068 ixv_local_timer(void *arg)
   1069 {
   1070 	struct adapter *adapter = arg;
   1071 
   1072 	IXGBE_CORE_LOCK(adapter);
   1073 	ixv_local_timer_locked(adapter);
   1074 	IXGBE_CORE_UNLOCK(adapter);
   1075 }
   1076 
   1077 static void
   1078 ixv_local_timer_locked(void *arg)
   1079 {
   1080 	struct adapter	*adapter = arg;
   1081 	device_t	dev = adapter->dev;
   1082 	struct ix_queue	*que = adapter->queues;
   1083 	u64		queues = 0;
   1084 	int		hung = 0;
   1085 
   1086 	KASSERT(mutex_owned(&adapter->core_mtx));
   1087 
   1088 	ixv_check_link(adapter);
   1089 
   1090 	/* Stats Update */
   1091 	ixv_update_stats(adapter);
   1092 
   1093 	/*
   1094 	 * Check the TX queues status
   1095 	 *      - mark hung queues so we don't schedule on them
   1096 	 *      - watchdog only if all queues show hung
   1097 	 */
   1098 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1099 		/* Keep track of queues with work for soft irq */
   1100 		if (que->txr->busy)
   1101 			queues |= ((u64)1 << que->me);
   1102 		/*
   1103 		 * Each time txeof runs without cleaning, but there
   1104 		 * are uncleaned descriptors it increments busy. If
   1105 		 * we get to the MAX we declare it hung.
   1106 		 */
   1107 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1108 			++hung;
   1109 			/* Mark the queue as inactive */
   1110 			adapter->active_queues &= ~((u64)1 << que->me);
   1111 			continue;
   1112 		} else {
   1113 			/* Check if we've come back from hung */
   1114 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1115 				adapter->active_queues |= ((u64)1 << que->me);
   1116 		}
   1117 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1118 			device_printf(dev,
   1119 			    "Warning queue %d appears to be hung!\n", i);
   1120 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1121 			++hung;
   1122 		}
   1123 	}
   1124 
   1125 	/* Only truly watchdog if all queues show hung */
   1126 	if (hung == adapter->num_queues)
   1127 		goto watchdog;
   1128 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1129 		ixv_rearm_queues(adapter, queues);
   1130 	}
   1131 
   1132 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1133 
   1134 	return;
   1135 
   1136 watchdog:
   1137 
   1138 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1139 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1140 	adapter->watchdog_events.ev_count++;
   1141 	ixv_init_locked(adapter);
   1142 } /* ixv_local_timer */
   1143 
   1144 /************************************************************************
   1145  * ixv_update_link_status - Update OS on link state
   1146  *
   1147  * Note: Only updates the OS on the cached link state.
   1148  *       The real check of the hardware only happens with
   1149  *       a link interrupt.
   1150  ************************************************************************/
   1151 static void
   1152 ixv_update_link_status(struct adapter *adapter)
   1153 {
   1154 	struct ifnet *ifp = adapter->ifp;
   1155 	device_t     dev = adapter->dev;
   1156 
   1157 	if (adapter->link_up) {
   1158 		if (adapter->link_active == FALSE) {
   1159 			if (bootverbose) {
   1160 				const char *bpsmsg;
   1161 
   1162 				switch (adapter->link_speed) {
   1163 				case IXGBE_LINK_SPEED_10GB_FULL:
   1164 					bpsmsg = "10 Gbps";
   1165 					break;
   1166 				case IXGBE_LINK_SPEED_5GB_FULL:
   1167 					bpsmsg = "5 Gbps";
   1168 					break;
   1169 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   1170 					bpsmsg = "2.5 Gbps";
   1171 					break;
   1172 				case IXGBE_LINK_SPEED_1GB_FULL:
   1173 					bpsmsg = "1 Gbps";
   1174 					break;
   1175 				case IXGBE_LINK_SPEED_100_FULL:
   1176 					bpsmsg = "100 Mbps";
   1177 					break;
   1178 				case IXGBE_LINK_SPEED_10_FULL:
   1179 					bpsmsg = "10 Mbps";
   1180 					break;
   1181 				default:
   1182 					bpsmsg = "unknown speed";
   1183 					break;
   1184 				}
   1185 				device_printf(dev,"Link is up %s %s \n",
   1186 				    bpsmsg, "Full Duplex");
   1187 			}
   1188 			adapter->link_active = TRUE;
   1189 			if_link_state_change(ifp, LINK_STATE_UP);
   1190 		}
   1191 	} else { /* Link down */
   1192 		if (adapter->link_active == TRUE) {
   1193 			if (bootverbose)
   1194 				device_printf(dev,"Link is Down\n");
   1195 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1196 			adapter->link_active = FALSE;
   1197 		}
   1198 	}
   1199 
   1200 	return;
   1201 } /* ixv_update_link_status */
   1202 
   1203 
   1204 /************************************************************************
   1205  * ixv_stop - Stop the hardware
   1206  *
   1207  *   Disables all traffic on the adapter by issuing a
   1208  *   global reset on the MAC and deallocates TX/RX buffers.
   1209  ************************************************************************/
   1210 static void
   1211 ixv_ifstop(struct ifnet *ifp, int disable)
   1212 {
   1213 	struct adapter *adapter = ifp->if_softc;
   1214 
   1215 	IXGBE_CORE_LOCK(adapter);
   1216 	ixv_stop(adapter);
   1217 	IXGBE_CORE_UNLOCK(adapter);
   1218 }
   1219 
   1220 static void
   1221 ixv_stop(void *arg)
   1222 {
   1223 	struct ifnet    *ifp;
   1224 	struct adapter  *adapter = arg;
   1225 	struct ixgbe_hw *hw = &adapter->hw;
   1226 
   1227 	ifp = adapter->ifp;
   1228 
   1229 	KASSERT(mutex_owned(&adapter->core_mtx));
   1230 
   1231 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1232 	ixv_disable_intr(adapter);
   1233 
   1234 	/* Tell the stack that the interface is no longer active */
   1235 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1236 
   1237 	hw->mac.ops.reset_hw(hw);
   1238 	adapter->hw.adapter_stopped = FALSE;
   1239 	hw->mac.ops.stop_adapter(hw);
   1240 	callout_stop(&adapter->timer);
   1241 
   1242 	/* reprogram the RAR[0] in case user changed it. */
   1243 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1244 
   1245 	return;
   1246 } /* ixv_stop */
   1247 
   1248 
   1249 /************************************************************************
   1250  * ixv_allocate_pci_resources
   1251  ************************************************************************/
   1252 static int
   1253 ixv_allocate_pci_resources(struct adapter *adapter,
   1254     const struct pci_attach_args *pa)
   1255 {
   1256 	pcireg_t	memtype;
   1257 	device_t        dev = adapter->dev;
   1258 	bus_addr_t addr;
   1259 	int flags;
   1260 
   1261 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1262 	switch (memtype) {
   1263 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1264 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1265 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1266 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1267 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1268 			goto map_err;
   1269 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1270 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1271 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1272 		}
   1273 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1274 		     adapter->osdep.mem_size, flags,
   1275 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1276 map_err:
   1277 			adapter->osdep.mem_size = 0;
   1278 			aprint_error_dev(dev, "unable to map BAR0\n");
   1279 			return ENXIO;
   1280 		}
   1281 		break;
   1282 	default:
   1283 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1284 		return ENXIO;
   1285 	}
   1286 
   1287 	/* Pick up the tuneable queues */
   1288 	adapter->num_queues = ixv_num_queues;
   1289 
   1290 	return (0);
   1291 } /* ixv_allocate_pci_resources */
   1292 
   1293 /************************************************************************
   1294  * ixv_free_pci_resources
   1295  ************************************************************************/
   1296 static void
   1297 ixv_free_pci_resources(struct adapter * adapter)
   1298 {
   1299 	struct 		ix_queue *que = adapter->queues;
   1300 	int		rid;
   1301 
   1302 	/*
   1303 	 *  Release all msix queue resources:
   1304 	 */
   1305 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1306 		if (que->res != NULL)
   1307 			pci_intr_disestablish(adapter->osdep.pc,
   1308 			    adapter->osdep.ihs[i]);
   1309 	}
   1310 
   1311 
   1312 	/* Clean the Mailbox interrupt last */
   1313 	rid = adapter->vector;
   1314 
   1315 	if (adapter->osdep.ihs[rid] != NULL) {
   1316 		pci_intr_disestablish(adapter->osdep.pc,
   1317 		    adapter->osdep.ihs[rid]);
   1318 		adapter->osdep.ihs[rid] = NULL;
   1319 	}
   1320 
   1321 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1322 	    adapter->osdep.nintrs);
   1323 
   1324 	if (adapter->osdep.mem_size != 0) {
   1325 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1326 		    adapter->osdep.mem_bus_space_handle,
   1327 		    adapter->osdep.mem_size);
   1328 	}
   1329 
   1330 	return;
   1331 } /* ixv_free_pci_resources */
   1332 
   1333 /************************************************************************
   1334  * ixv_setup_interface
   1335  *
   1336  *   Setup networking device structure and register an interface.
   1337  ************************************************************************/
   1338 static void
   1339 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1340 {
   1341 	struct ethercom *ec = &adapter->osdep.ec;
   1342 	struct ifnet   *ifp;
   1343 
   1344 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1345 
   1346 	ifp = adapter->ifp = &ec->ec_if;
   1347 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1348 	ifp->if_baudrate = IF_Gbps(10);
   1349 	ifp->if_init = ixv_init;
   1350 	ifp->if_stop = ixv_ifstop;
   1351 	ifp->if_softc = adapter;
   1352 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1353 #ifdef IXGBE_MPSAFE
   1354 	ifp->if_extflags = IFEF_START_MPSAFE;
   1355 #endif
   1356 	ifp->if_ioctl = ixv_ioctl;
   1357 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1358 #if 0
   1359 		ixv_start_locked = ixgbe_legacy_start_locked;
   1360 #endif
   1361 	} else {
   1362 		ifp->if_transmit = ixgbe_mq_start;
   1363 #if 0
   1364 		ixv_start_locked = ixgbe_mq_start_locked;
   1365 #endif
   1366 	}
   1367 	ifp->if_start = ixgbe_legacy_start;
   1368 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1369 	IFQ_SET_READY(&ifp->if_snd);
   1370 
   1371 	if_initialize(ifp);
   1372 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1373 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1374 	/*
   1375 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1376 	 * used.
   1377 	 */
   1378 	if_register(ifp);
   1379 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1380 
   1381 	adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
   1382 
   1383 	/*
   1384 	 * Tell the upper layer(s) we support long frames.
   1385 	 */
   1386 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1387 
   1388 	/* Set capability flags */
   1389 	ifp->if_capabilities |= IFCAP_HWCSUM
   1390 	                     |  IFCAP_TSOv4
   1391 	                     |  IFCAP_TSOv6;
   1392 	ifp->if_capenable = 0;
   1393 
   1394 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1395 			    |  ETHERCAP_VLAN_HWCSUM
   1396 			    |  ETHERCAP_JUMBO_MTU
   1397 			    |  ETHERCAP_VLAN_MTU;
   1398 
   1399 	/* Enable the above capabilities by default */
   1400 	ec->ec_capenable = ec->ec_capabilities;
   1401 
   1402 	/* Don't enable LRO by default */
   1403 	ifp->if_capabilities |= IFCAP_LRO;
   1404 #if 0
   1405 	ifp->if_capenable = ifp->if_capabilities;
   1406 #endif
   1407 
   1408 	/*
   1409 	 * Specify the media types supported by this adapter and register
   1410 	 * callbacks to update media and link information
   1411 	 */
   1412 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1413 	    ixv_media_status);
   1414 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1415 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1416 
   1417 	return;
   1418 } /* ixv_setup_interface */
   1419 
   1420 
   1421 /************************************************************************
   1422  * ixv_initialize_transmit_units - Enable transmit unit.
   1423  ************************************************************************/
   1424 static void
   1425 ixv_initialize_transmit_units(struct adapter *adapter)
   1426 {
   1427 	struct tx_ring	*txr = adapter->tx_rings;
   1428 	struct ixgbe_hw	*hw = &adapter->hw;
   1429 
   1430 
   1431 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1432 		u64 tdba = txr->txdma.dma_paddr;
   1433 		u32 txctrl, txdctl;
   1434 
   1435 		/* Set WTHRESH to 8, burst writeback */
   1436 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1437 		txdctl |= (8 << 16);
   1438 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1439 
   1440 		/* Set the HW Tx Head and Tail indices */
   1441 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   1442 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   1443 
   1444 		/* Set Tx Tail register */
   1445 		txr->tail = IXGBE_VFTDT(i);
   1446 
   1447 		/* Set Ring parameters */
   1448 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   1449 		    (tdba & 0x00000000ffffffffULL));
   1450 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   1451 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   1452 		    adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
   1453 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   1454 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1455 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   1456 
   1457 		/* Now enable */
   1458 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1459 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1460 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1461 	}
   1462 
   1463 	return;
   1464 } /* ixv_initialize_transmit_units */
   1465 
   1466 
   1467 /************************************************************************
   1468  * ixv_initialize_rss_mapping
   1469  ************************************************************************/
   1470 static void
   1471 ixv_initialize_rss_mapping(struct adapter *adapter)
   1472 {
   1473 	struct ixgbe_hw *hw = &adapter->hw;
   1474 	u32             reta = 0, mrqc, rss_key[10];
   1475 	int             queue_id;
   1476 	int             i, j;
   1477 	u32             rss_hash_config;
   1478 
   1479 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1480 		/* Fetch the configured RSS key */
   1481 		rss_getkey((uint8_t *)&rss_key);
   1482 	} else {
   1483 		/* set up random bits */
   1484 		cprng_fast(&rss_key, sizeof(rss_key));
   1485 	}
   1486 
   1487 	/* Now fill out hash function seeds */
   1488 	for (i = 0; i < 10; i++)
   1489 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
   1490 
   1491 	/* Set up the redirection table */
   1492 	for (i = 0, j = 0; i < 64; i++, j++) {
   1493 		if (j == adapter->num_queues)
   1494 			j = 0;
   1495 
   1496 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1497 			/*
   1498 			 * Fetch the RSS bucket id for the given indirection
   1499 			 * entry. Cap it at the number of configured buckets
   1500 			 * (which is num_queues.)
   1501 			 */
   1502 			queue_id = rss_get_indirection_to_bucket(i);
   1503 			queue_id = queue_id % adapter->num_queues;
   1504 		} else
   1505 			queue_id = j;
   1506 
   1507 		/*
   1508 		 * The low 8 bits are for hash value (n+0);
   1509 		 * The next 8 bits are for hash value (n+1), etc.
   1510 		 */
   1511 		reta >>= 8;
   1512 		reta |= ((uint32_t)queue_id) << 24;
   1513 		if ((i & 3) == 3) {
   1514 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
   1515 			reta = 0;
   1516 		}
   1517 	}
   1518 
   1519 	/* Perform hash on these packet types */
   1520 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   1521 		rss_hash_config = rss_gethashconfig();
   1522 	else {
   1523 		/*
   1524 		 * Disable UDP - IP fragments aren't currently being handled
   1525 		 * and so we end up with a mix of 2-tuple and 4-tuple
   1526 		 * traffic.
   1527 		 */
   1528 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
   1529 		                | RSS_HASHTYPE_RSS_TCP_IPV4
   1530 		                | RSS_HASHTYPE_RSS_IPV6
   1531 		                | RSS_HASHTYPE_RSS_TCP_IPV6;
   1532 	}
   1533 
   1534 	mrqc = IXGBE_MRQC_RSSEN;
   1535 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
   1536 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
   1537 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
   1538 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
   1539 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
   1540 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
   1541 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
   1542 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
   1543 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
   1544 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
   1545 		    __func__);
   1546 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
   1547 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
   1548 		    __func__);
   1549 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
   1550 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
   1551 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
   1552 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
   1553 		    __func__);
   1554 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
   1555 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
   1556 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
   1557 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
   1558 		    __func__);
   1559 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
   1560 } /* ixv_initialize_rss_mapping */
   1561 
   1562 
   1563 /************************************************************************
   1564  * ixv_initialize_receive_units - Setup receive registers and features.
   1565  ************************************************************************/
   1566 static void
   1567 ixv_initialize_receive_units(struct adapter *adapter)
   1568 {
   1569 	struct	rx_ring	*rxr = adapter->rx_rings;
   1570 	struct ixgbe_hw	*hw = &adapter->hw;
   1571 	struct ifnet	*ifp = adapter->ifp;
   1572 	u32		bufsz, rxcsum, psrtype;
   1573 
   1574 	if (ifp->if_mtu > ETHERMTU)
   1575 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1576 	else
   1577 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1578 
   1579 	psrtype = IXGBE_PSRTYPE_TCPHDR
   1580 	        | IXGBE_PSRTYPE_UDPHDR
   1581 	        | IXGBE_PSRTYPE_IPV4HDR
   1582 	        | IXGBE_PSRTYPE_IPV6HDR
   1583 	        | IXGBE_PSRTYPE_L2HDR;
   1584 
   1585 	if (adapter->num_queues > 1)
   1586 		psrtype |= 1 << 29;
   1587 
   1588 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1589 
   1590 	/* Tell PF our max_frame size */
   1591 	if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
   1592 		device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
   1593 	}
   1594 
   1595 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1596 		u64 rdba = rxr->rxdma.dma_paddr;
   1597 		u32 reg, rxdctl;
   1598 
   1599 		/* Disable the queue */
   1600 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1601 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1602 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1603 		for (int j = 0; j < 10; j++) {
   1604 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1605 			    IXGBE_RXDCTL_ENABLE)
   1606 				msec_delay(1);
   1607 			else
   1608 				break;
   1609 		}
   1610 		wmb();
   1611 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1612 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   1613 		    (rdba & 0x00000000ffffffffULL));
   1614 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
   1615 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   1616 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1617 
   1618 		/* Reset the ring indices */
   1619 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1620 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1621 
   1622 		/* Set up the SRRCTL register */
   1623 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   1624 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1625 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1626 		reg |= bufsz;
   1627 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1628 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   1629 
   1630 		/* Capture Rx Tail index */
   1631 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1632 
   1633 		/* Do the queue enabling last */
   1634 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1635 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1636 		for (int k = 0; k < 10; k++) {
   1637 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1638 			    IXGBE_RXDCTL_ENABLE)
   1639 				break;
   1640 			msec_delay(1);
   1641 		}
   1642 		wmb();
   1643 
   1644 		/* Set the Tail Pointer */
   1645 		/*
   1646 		 * In netmap mode, we must preserve the buffers made
   1647 		 * available to userspace before the if_init()
   1648 		 * (this is true by default on the TX side, because
   1649 		 * init makes all buffers available to userspace).
   1650 		 *
   1651 		 * netmap_reset() and the device specific routines
   1652 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1653 		 * buffers at the end of the NIC ring, so here we
   1654 		 * must set the RDT (tail) register to make sure
   1655 		 * they are not overwritten.
   1656 		 *
   1657 		 * In this driver the NIC ring starts at RDH = 0,
   1658 		 * RDT points to the last slot available for reception (?),
   1659 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1660 		 */
   1661 #ifdef DEV_NETMAP
   1662 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   1663 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   1664 			struct netmap_adapter *na = NA(adapter->ifp);
   1665 			struct netmap_kring *kring = &na->rx_rings[i];
   1666 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1667 
   1668 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1669 		} else
   1670 #endif /* DEV_NETMAP */
   1671 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1672 			    adapter->num_rx_desc - 1);
   1673 	}
   1674 
   1675 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1676 
   1677 	ixv_initialize_rss_mapping(adapter);
   1678 
   1679 	if (adapter->num_queues > 1) {
   1680 		/* RSS and RX IPP Checksum are mutually exclusive */
   1681 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1682 	}
   1683 
   1684 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1685 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1686 
   1687 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1688 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1689 
   1690 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1691 
   1692 	return;
   1693 } /* ixv_initialize_receive_units */
   1694 
   1695 /************************************************************************
   1696  * ixv_setup_vlan_support
   1697  ************************************************************************/
   1698 static void
   1699 ixv_setup_vlan_support(struct adapter *adapter)
   1700 {
   1701 	struct ixgbe_hw *hw = &adapter->hw;
   1702 	u32		ctrl, vid, vfta, retry;
   1703 
   1704 	/*
   1705 	 * We get here thru init_locked, meaning
   1706 	 * a soft reset, this has already cleared
   1707 	 * the VFTA and other state, so if there
   1708 	 * have been no vlan's registered do nothing.
   1709 	 */
   1710 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   1711 		return;
   1712 
   1713 	/* Enable the queues */
   1714 	for (int i = 0; i < adapter->num_queues; i++) {
   1715 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1716 		ctrl |= IXGBE_RXDCTL_VME;
   1717 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
   1718 		/*
   1719 		 * Let Rx path know that it needs to store VLAN tag
   1720 		 * as part of extra mbuf info.
   1721 		 */
   1722 		adapter->rx_rings[i].vtag_strip = TRUE;
   1723 	}
   1724 
   1725 	/*
   1726 	 * A soft reset zero's out the VFTA, so
   1727 	 * we need to repopulate it now.
   1728 	 */
   1729 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   1730 		if (ixv_shadow_vfta[i] == 0)
   1731 			continue;
   1732 		vfta = ixv_shadow_vfta[i];
   1733 		/*
   1734 		 * Reconstruct the vlan id's
   1735 		 * based on the bits set in each
   1736 		 * of the array ints.
   1737 		 */
   1738 		for (int j = 0; j < 32; j++) {
   1739 			retry = 0;
   1740 			if ((vfta & (1 << j)) == 0)
   1741 				continue;
   1742 			vid = (i * 32) + j;
   1743 			/* Call the shared code mailbox routine */
   1744 			while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
   1745 				if (++retry > 5)
   1746 					break;
   1747 			}
   1748 		}
   1749 	}
   1750 } /* ixv_setup_vlan_support */
   1751 
   1752 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   1753 /************************************************************************
   1754  * ixv_register_vlan
   1755  *
   1756  *   Run via a vlan config EVENT, it enables us to use the
   1757  *   HW Filter table since we can get the vlan id. This just
   1758  *   creates the entry in the soft version of the VFTA, init
   1759  *   will repopulate the real table.
   1760  ************************************************************************/
   1761 static void
   1762 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1763 {
   1764 	struct adapter	*adapter = ifp->if_softc;
   1765 	u16		index, bit;
   1766 
   1767 	if (ifp->if_softc != arg) /* Not our event */
   1768 		return;
   1769 
   1770 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   1771 		return;
   1772 
   1773 	IXGBE_CORE_LOCK(adapter);
   1774 	index = (vtag >> 5) & 0x7F;
   1775 	bit = vtag & 0x1F;
   1776 	ixv_shadow_vfta[index] |= (1 << bit);
   1777 	/* Re-init to load the changes */
   1778 	ixv_init_locked(adapter);
   1779 	IXGBE_CORE_UNLOCK(adapter);
   1780 } /* ixv_register_vlan */
   1781 
   1782 /************************************************************************
   1783  * ixv_unregister_vlan
   1784  *
   1785  *   Run via a vlan unconfig EVENT, remove our entry
   1786  *   in the soft vfta.
   1787  ************************************************************************/
   1788 static void
   1789 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1790 {
   1791 	struct adapter	*adapter = ifp->if_softc;
   1792 	u16		index, bit;
   1793 
   1794 	if (ifp->if_softc !=  arg)
   1795 		return;
   1796 
   1797 	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
   1798 		return;
   1799 
   1800 	IXGBE_CORE_LOCK(adapter);
   1801 	index = (vtag >> 5) & 0x7F;
   1802 	bit = vtag & 0x1F;
   1803 	ixv_shadow_vfta[index] &= ~(1 << bit);
   1804 	/* Re-init to load the changes */
   1805 	ixv_init_locked(adapter);
   1806 	IXGBE_CORE_UNLOCK(adapter);
   1807 } /* ixv_unregister_vlan */
   1808 #endif
   1809 
   1810 /************************************************************************
   1811  * ixv_enable_intr
   1812  ************************************************************************/
   1813 static void
   1814 ixv_enable_intr(struct adapter *adapter)
   1815 {
   1816 	struct ixgbe_hw *hw = &adapter->hw;
   1817 	struct ix_queue *que = adapter->queues;
   1818 	u32             mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   1819 
   1820 
   1821 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   1822 
   1823 	mask = IXGBE_EIMS_ENABLE_MASK;
   1824 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   1825 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   1826 
   1827 	for (int i = 0; i < adapter->num_queues; i++, que++)
   1828 		ixv_enable_queue(adapter, que->msix);
   1829 
   1830 	IXGBE_WRITE_FLUSH(hw);
   1831 
   1832 	return;
   1833 } /* ixv_enable_intr */
   1834 
   1835 /************************************************************************
   1836  * ixv_disable_intr
   1837  ************************************************************************/
   1838 static void
   1839 ixv_disable_intr(struct adapter *adapter)
   1840 {
   1841 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   1842 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   1843 	IXGBE_WRITE_FLUSH(&adapter->hw);
   1844 
   1845 	return;
   1846 } /* ixv_disable_intr */
   1847 
   1848 /************************************************************************
   1849  * ixv_set_ivar
   1850  *
   1851  *   Setup the correct IVAR register for a particular MSI-X interrupt
   1852  *    - entry is the register array entry
   1853  *    - vector is the MSI-X vector for this queue
   1854  *    - type is RX/TX/MISC
   1855  ************************************************************************/
   1856 static void
   1857 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   1858 {
   1859 	struct ixgbe_hw *hw = &adapter->hw;
   1860 	u32             ivar, index;
   1861 
   1862 	vector |= IXGBE_IVAR_ALLOC_VAL;
   1863 
   1864 	if (type == -1) { /* MISC IVAR */
   1865 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   1866 		ivar &= ~0xFF;
   1867 		ivar |= vector;
   1868 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   1869 	} else {          /* RX/TX IVARS */
   1870 		index = (16 * (entry & 1)) + (8 * type);
   1871 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   1872 		ivar &= ~(0xFF << index);
   1873 		ivar |= (vector << index);
   1874 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   1875 	}
   1876 } /* ixv_set_ivar */
   1877 
   1878 /************************************************************************
   1879  * ixv_configure_ivars
   1880  ************************************************************************/
   1881 static void
   1882 ixv_configure_ivars(struct adapter *adapter)
   1883 {
   1884 	struct ix_queue *que = adapter->queues;
   1885 
   1886 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1887 		/* First the RX queue entry */
   1888 		ixv_set_ivar(adapter, i, que->msix, 0);
   1889 		/* ... and the TX */
   1890 		ixv_set_ivar(adapter, i, que->msix, 1);
   1891 		/* Set an initial value in EITR */
   1892                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
   1893 		    IXGBE_EITR_DEFAULT);
   1894 	}
   1895 
   1896 	/* For the mailbox interrupt */
   1897 	ixv_set_ivar(adapter, 1, adapter->vector, -1);
   1898 } /* ixv_configure_ivars */
   1899 
   1900 
   1901 /************************************************************************
   1902  * ixv_save_stats
   1903  *
   1904  *   The VF stats registers never have a truly virgin
   1905  *   starting point, so this routine tries to make an
   1906  *   artificial one, marking ground zero on attach as
   1907  *   it were.
   1908  ************************************************************************/
   1909 static void
   1910 ixv_save_stats(struct adapter *adapter)
   1911 {
   1912 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   1913 
   1914 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   1915 		stats->saved_reset_vfgprc +=
   1916 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   1917 		stats->saved_reset_vfgptc +=
   1918 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   1919 		stats->saved_reset_vfgorc +=
   1920 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   1921 		stats->saved_reset_vfgotc +=
   1922 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   1923 		stats->saved_reset_vfmprc +=
   1924 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   1925 	}
   1926 } /* ixv_save_stats */
   1927 
   1928 /************************************************************************
   1929  * ixv_init_stats
   1930  ************************************************************************/
   1931 static void
   1932 ixv_init_stats(struct adapter *adapter)
   1933 {
   1934 	struct ixgbe_hw *hw = &adapter->hw;
   1935 
   1936 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   1937 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   1938 	adapter->stats.vf.last_vfgorc |=
   1939 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   1940 
   1941 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   1942 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   1943 	adapter->stats.vf.last_vfgotc |=
   1944 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   1945 
   1946 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   1947 
   1948 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   1949 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   1950 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   1951 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   1952 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   1953 } /* ixv_init_stats */
   1954 
   1955 #define UPDATE_STAT_32(reg, last, count)		\
   1956 {                                                       \
   1957 	u32 current = IXGBE_READ_REG(hw, (reg));	\
   1958 	if (current < (last))				\
   1959 		count.ev_count += 0x100000000LL;	\
   1960 	(last) = current;				\
   1961 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   1962 	count.ev_count |= current;			\
   1963 }
   1964 
   1965 #define UPDATE_STAT_36(lsb, msb, last, count)           \
   1966 {                                                       \
   1967 	u64 cur_lsb = IXGBE_READ_REG(hw, (lsb));	\
   1968 	u64 cur_msb = IXGBE_READ_REG(hw, (msb));	\
   1969 	u64 current = ((cur_msb << 32) | cur_lsb);      \
   1970 	if (current < (last))				\
   1971 		count.ev_count += 0x1000000000LL;	\
   1972 	(last) = current;				\
   1973 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   1974 	count.ev_count |= current;			\
   1975 }
   1976 
   1977 /************************************************************************
   1978  * ixv_update_stats - Update the board statistics counters.
   1979  ************************************************************************/
   1980 void
   1981 ixv_update_stats(struct adapter *adapter)
   1982 {
   1983         struct ixgbe_hw *hw = &adapter->hw;
   1984 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   1985 
   1986         UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
   1987         UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
   1988         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
   1989 	    stats->vfgorc);
   1990         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
   1991 	    stats->vfgotc);
   1992         UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
   1993 
   1994 	/* Fill out the OS statistics structure */
   1995 	/*
   1996 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   1997 	 * adapter->stats counters. It's required to make ifconfig -z
   1998 	 * (SOICZIFDATA) work.
   1999 	 */
   2000 } /* ixv_update_stats */
   2001 
   2002 const struct sysctlnode *
   2003 ixv_sysctl_instance(struct adapter *adapter)
   2004 {
   2005 	const char *dvname;
   2006 	struct sysctllog **log;
   2007 	int rc;
   2008 	const struct sysctlnode *rnode;
   2009 
   2010 	log = &adapter->sysctllog;
   2011 	dvname = device_xname(adapter->dev);
   2012 
   2013 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2014 	    0, CTLTYPE_NODE, dvname,
   2015 	    SYSCTL_DESCR("ixv information and settings"),
   2016 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2017 		goto err;
   2018 
   2019 	return rnode;
   2020 err:
   2021 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2022 	return NULL;
   2023 }
   2024 
   2025 static void
   2026 ixv_add_device_sysctls(struct adapter *adapter)
   2027 {
   2028 	struct sysctllog **log;
   2029 	const struct sysctlnode *rnode, *cnode;
   2030 	device_t dev;
   2031 
   2032 	dev = adapter->dev;
   2033 	log = &adapter->sysctllog;
   2034 
   2035 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2036 		aprint_error_dev(dev, "could not create sysctl root\n");
   2037 		return;
   2038 	}
   2039 
   2040 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2041 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2042 	    "debug", SYSCTL_DESCR("Debug Info"),
   2043 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   2044 		aprint_error_dev(dev, "could not create sysctl\n");
   2045 
   2046 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2047 	    CTLFLAG_READWRITE, CTLTYPE_BOOL,
   2048 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2049 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2050 		aprint_error_dev(dev, "could not create sysctl\n");
   2051 }
   2052 
   2053 /************************************************************************
   2054  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
   2055  ************************************************************************/
   2056 static void
   2057 ixv_add_stats_sysctls(struct adapter *adapter)
   2058 {
   2059 	device_t                dev = adapter->dev;
   2060 	struct tx_ring          *txr = adapter->tx_rings;
   2061 	struct rx_ring          *rxr = adapter->rx_rings;
   2062 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2063 	const struct sysctlnode *rnode;
   2064 	struct sysctllog **log = &adapter->sysctllog;
   2065 	const char *xname = device_xname(dev);
   2066 
   2067 	/* Driver Statistics */
   2068 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   2069 	    NULL, xname, "Handled queue in softint");
   2070 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   2071 	    NULL, xname, "Requeued in softint");
   2072 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   2073 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   2074 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2075 	    NULL, xname, "m_defrag() failed");
   2076 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   2077 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   2078 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   2079 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   2080 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   2081 	    NULL, xname, "Driver tx dma hard fail other");
   2082 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   2083 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   2084 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   2085 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   2086 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2087 	    NULL, xname, "Watchdog timeouts");
   2088 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   2089 	    NULL, xname, "TSO errors");
   2090 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   2091 	    NULL, xname, "Link MSI-X IRQ Handled");
   2092 
   2093 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2094 		snprintf(adapter->queues[i].evnamebuf,
   2095 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   2096 		    xname, i);
   2097 		snprintf(adapter->queues[i].namebuf,
   2098 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   2099 
   2100 		if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2101 			aprint_error_dev(dev, "could not create sysctl root\n");
   2102 			break;
   2103 		}
   2104 
   2105 		if (sysctl_createv(log, 0, &rnode, &rnode,
   2106 		    0, CTLTYPE_NODE,
   2107 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   2108 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   2109 			break;
   2110 
   2111 #if 0 /* not yet */
   2112 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2113 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   2114 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   2115 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   2116 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   2117 			break;
   2118 
   2119 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2120 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   2121 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   2122 			NULL, 0, &(adapter->queues[i].irqs),
   2123 		    0, CTL_CREATE, CTL_EOL) != 0)
   2124 			break;
   2125 
   2126 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2127 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2128 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   2129 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   2130 		    0, CTL_CREATE, CTL_EOL) != 0)
   2131 			break;
   2132 
   2133 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2134 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2135 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   2136 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   2137 		    0, CTL_CREATE, CTL_EOL) != 0)
   2138 			break;
   2139 #endif
   2140 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   2141 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   2142 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   2143 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   2144 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   2145 		    NULL, adapter->queues[i].evnamebuf,
   2146 		    "Queue No Descriptor Available");
   2147 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   2148 		    NULL, adapter->queues[i].evnamebuf,
   2149 		    "Queue Packets Transmitted");
   2150 #ifndef IXGBE_LEGACY_TX
   2151 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   2152 		    NULL, adapter->queues[i].evnamebuf,
   2153 		    "Packets dropped in pcq");
   2154 #endif
   2155 
   2156 #ifdef LRO
   2157 		struct lro_ctrl *lro = &rxr->lro;
   2158 #endif /* LRO */
   2159 
   2160 #if 0 /* not yet */
   2161 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2162 		    CTLFLAG_READONLY,
   2163 		    CTLTYPE_INT,
   2164 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   2165 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   2166 		    CTL_CREATE, CTL_EOL) != 0)
   2167 			break;
   2168 
   2169 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2170 		    CTLFLAG_READONLY,
   2171 		    CTLTYPE_INT,
   2172 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   2173 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   2174 		    CTL_CREATE, CTL_EOL) != 0)
   2175 			break;
   2176 #endif
   2177 
   2178 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   2179 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   2180 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   2181 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   2182 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   2183 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   2184 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   2185 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   2186 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   2187 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   2188 #ifdef LRO
   2189 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   2190 				CTLFLAG_RD, &lro->lro_queued, 0,
   2191 				"LRO Queued");
   2192 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   2193 				CTLFLAG_RD, &lro->lro_flushed, 0,
   2194 				"LRO Flushed");
   2195 #endif /* LRO */
   2196 	}
   2197 
   2198 	/* MAC stats get their own sub node */
   2199 
   2200 	snprintf(stats->namebuf,
   2201 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   2202 
   2203 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   2204 	    stats->namebuf, "rx csum offload - IP");
   2205 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   2206 	    stats->namebuf, "rx csum offload - L4");
   2207 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   2208 	    stats->namebuf, "rx csum offload - IP bad");
   2209 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   2210 	    stats->namebuf, "rx csum offload - L4 bad");
   2211 
   2212 	/* Packet Reception Stats */
   2213 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2214 	    xname, "Good Packets Received");
   2215 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2216 	    xname, "Good Octets Received");
   2217 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2218 	    xname, "Multicast Packets Received");
   2219 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2220 	    xname, "Good Packets Transmitted");
   2221 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2222 	    xname, "Good Octets Transmitted");
   2223 } /* ixv_add_stats_sysctls */
   2224 
   2225 /************************************************************************
   2226  * ixv_set_sysctl_value
   2227  ************************************************************************/
   2228 static void
   2229 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2230 	const char *description, int *limit, int value)
   2231 {
   2232 	device_t dev =  adapter->dev;
   2233 	struct sysctllog **log;
   2234 	const struct sysctlnode *rnode, *cnode;
   2235 
   2236 	log = &adapter->sysctllog;
   2237 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2238 		aprint_error_dev(dev, "could not create sysctl root\n");
   2239 		return;
   2240 	}
   2241 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2242 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2243 	    name, SYSCTL_DESCR(description),
   2244 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2245 		aprint_error_dev(dev, "could not create sysctl\n");
   2246 	*limit = value;
   2247 } /* ixv_set_sysctl_value */
   2248 
   2249 /************************************************************************
   2250  * ixv_print_debug_info
   2251  *
   2252  *   Called only when em_display_debug_stats is enabled.
   2253  *   Provides a way to take a look at important statistics
   2254  *   maintained by the driver and hardware.
   2255  ************************************************************************/
   2256 static void
   2257 ixv_print_debug_info(struct adapter *adapter)
   2258 {
   2259         device_t        dev = adapter->dev;
   2260         struct ixgbe_hw *hw = &adapter->hw;
   2261         struct ix_queue *que = adapter->queues;
   2262         struct rx_ring  *rxr;
   2263         struct tx_ring  *txr;
   2264 #ifdef LRO
   2265         struct lro_ctrl *lro;
   2266 #endif /* LRO */
   2267 
   2268 	device_printf(dev,"Error Byte Count = %u \n",
   2269 	    IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2270 
   2271 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2272 		txr = que->txr;
   2273 		rxr = que->rxr;
   2274 #ifdef LRO
   2275 		lro = &rxr->lro;
   2276 #endif /* LRO */
   2277 		device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
   2278 		    que->msix, (long)que->irqs.ev_count);
   2279 		device_printf(dev,"RX(%d) Packets Received: %lld\n",
   2280 		    rxr->me, (long long)rxr->rx_packets.ev_count);
   2281 		device_printf(dev,"RX(%d) Bytes Received: %lu\n",
   2282 		    rxr->me, (long)rxr->rx_bytes.ev_count);
   2283 #ifdef LRO
   2284 		device_printf(dev,"RX(%d) LRO Queued= %lld\n",
   2285 		    rxr->me, (long long)lro->lro_queued);
   2286 		device_printf(dev,"RX(%d) LRO Flushed= %lld\n",
   2287 		    rxr->me, (long long)lro->lro_flushed);
   2288 #endif /* LRO */
   2289 		device_printf(dev,"TX(%d) Packets Sent: %lu\n",
   2290 		    txr->me, (long)txr->total_packets.ev_count);
   2291 		device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
   2292 		    txr->me, (long)txr->no_desc_avail.ev_count);
   2293 	}
   2294 
   2295 	device_printf(dev, "MBX IRQ Handled: %lu\n",
   2296 	    (long)adapter->link_irq.ev_count);
   2297 } /* ixv_print_debug_info */
   2298 
   2299 /************************************************************************
   2300  * ixv_sysctl_debug
   2301  ************************************************************************/
   2302 static int
   2303 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2304 {
   2305 	struct sysctlnode node;
   2306 	struct adapter *adapter;
   2307 	int            error, result;
   2308 
   2309 	node = *rnode;
   2310 	node.sysctl_data = &result;
   2311 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2312 
   2313 	if (error || newp == NULL)
   2314 		return error;
   2315 
   2316 	if (result == 1) {
   2317 		adapter = (struct adapter *)node.sysctl_data;
   2318 		ixv_print_debug_info(adapter);
   2319 	}
   2320 
   2321 	return 0;
   2322 } /* ixv_sysctl_debug */
   2323 
   2324 /************************************************************************
   2325  * ixv_init_device_features
   2326  ************************************************************************/
   2327 static void
   2328 ixv_init_device_features(struct adapter *adapter)
   2329 {
   2330 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   2331 	                  | IXGBE_FEATURE_VF
   2332 	                  | IXGBE_FEATURE_RSS
   2333 	                  | IXGBE_FEATURE_LEGACY_TX;
   2334 
   2335 	/* A tad short on feature flags for VFs, atm. */
   2336 	switch (adapter->hw.mac.type) {
   2337 	case ixgbe_mac_82599_vf:
   2338 		break;
   2339 	case ixgbe_mac_X540_vf:
   2340 		break;
   2341 	case ixgbe_mac_X550_vf:
   2342 	case ixgbe_mac_X550EM_x_vf:
   2343 	case ixgbe_mac_X550EM_a_vf:
   2344 		adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
   2345 		break;
   2346 	default:
   2347 		break;
   2348 	}
   2349 
   2350 	/* Enabled by default... */
   2351 	/* Is a virtual function (VF) */
   2352 	if (adapter->feat_cap & IXGBE_FEATURE_VF)
   2353 		adapter->feat_en |= IXGBE_FEATURE_VF;
   2354 	/* Netmap */
   2355 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   2356 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   2357 	/* Receive-Side Scaling (RSS) */
   2358 	if (adapter->feat_cap & IXGBE_FEATURE_RSS)
   2359 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   2360 	/* Needs advanced context descriptor regardless of offloads req'd */
   2361 	if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
   2362 		adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
   2363 
   2364 	/* Enabled via sysctl... */
   2365 	/* Legacy (single queue) transmit */
   2366 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   2367 	    ixv_enable_legacy_tx)
   2368 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   2369 } /* ixv_init_device_features */
   2370 
   2371 /************************************************************************
   2372  * ixv_shutdown - Shutdown entry point
   2373  ************************************************************************/
   2374 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   2375 static int
   2376 ixv_shutdown(device_t dev)
   2377 {
   2378 	struct adapter *adapter = device_private(dev);
   2379 	IXGBE_CORE_LOCK(adapter);
   2380 	ixv_stop(adapter);
   2381 	IXGBE_CORE_UNLOCK(adapter);
   2382 
   2383 	return (0);
   2384 } /* ixv_shutdown */
   2385 #endif
   2386 
   2387 static int
   2388 ixv_ifflags_cb(struct ethercom *ec)
   2389 {
   2390 	struct ifnet *ifp = &ec->ec_if;
   2391 	struct adapter *adapter = ifp->if_softc;
   2392 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   2393 
   2394 	IXGBE_CORE_LOCK(adapter);
   2395 
   2396 	if (change != 0)
   2397 		adapter->if_flags = ifp->if_flags;
   2398 
   2399 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   2400 		rc = ENETRESET;
   2401 
   2402 	IXGBE_CORE_UNLOCK(adapter);
   2403 
   2404 	return rc;
   2405 }
   2406 
   2407 
   2408 /************************************************************************
   2409  * ixv_ioctl - Ioctl entry point
   2410  *
   2411  *   Called when the user wants to configure the interface.
   2412  *
   2413  *   return 0 on success, positive on failure
   2414  ************************************************************************/
   2415 static int
   2416 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
   2417 {
   2418 	struct adapter	*adapter = ifp->if_softc;
   2419 	struct ifcapreq *ifcr = data;
   2420 	struct ifreq	*ifr = data;
   2421 	int             error = 0;
   2422 	int l4csum_en;
   2423 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   2424 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   2425 
   2426 	switch (command) {
   2427 	case SIOCSIFFLAGS:
   2428 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   2429 		break;
   2430 	case SIOCADDMULTI:
   2431 	case SIOCDELMULTI:
   2432 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   2433 		break;
   2434 	case SIOCSIFMEDIA:
   2435 	case SIOCGIFMEDIA:
   2436 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   2437 		break;
   2438 	case SIOCSIFCAP:
   2439 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   2440 		break;
   2441 	case SIOCSIFMTU:
   2442 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   2443 		break;
   2444 	default:
   2445 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   2446 		break;
   2447 	}
   2448 
   2449 	switch (command) {
   2450 	case SIOCSIFMEDIA:
   2451 	case SIOCGIFMEDIA:
   2452 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   2453 	case SIOCSIFCAP:
   2454 		/* Layer-4 Rx checksum offload has to be turned on and
   2455 		 * off as a unit.
   2456 		 */
   2457 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   2458 		if (l4csum_en != l4csum && l4csum_en != 0)
   2459 			return EINVAL;
   2460 		/*FALLTHROUGH*/
   2461 	case SIOCADDMULTI:
   2462 	case SIOCDELMULTI:
   2463 	case SIOCSIFFLAGS:
   2464 	case SIOCSIFMTU:
   2465 	default:
   2466 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   2467 			return error;
   2468 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   2469 			;
   2470 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   2471 			IXGBE_CORE_LOCK(adapter);
   2472 			ixv_init_locked(adapter);
   2473 			IXGBE_CORE_UNLOCK(adapter);
   2474 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   2475 			/*
   2476 			 * Multicast list has changed; set the hardware filter
   2477 			 * accordingly.
   2478 			 */
   2479 			IXGBE_CORE_LOCK(adapter);
   2480 			ixv_disable_intr(adapter);
   2481 			ixv_set_multi(adapter);
   2482 			ixv_enable_intr(adapter);
   2483 			IXGBE_CORE_UNLOCK(adapter);
   2484 		}
   2485 		return 0;
   2486 	}
   2487 } /* ixv_ioctl */
   2488 
   2489 /************************************************************************
   2490  * ixv_init
   2491  ************************************************************************/
   2492 static int
   2493 ixv_init(struct ifnet *ifp)
   2494 {
   2495 	struct adapter *adapter = ifp->if_softc;
   2496 
   2497 	IXGBE_CORE_LOCK(adapter);
   2498 	ixv_init_locked(adapter);
   2499 	IXGBE_CORE_UNLOCK(adapter);
   2500 
   2501 	return 0;
   2502 } /* ixv_init */
   2503 
   2504 
   2505 /************************************************************************
   2506  * ixv_handle_que
   2507  ************************************************************************/
   2508 static void
   2509 ixv_handle_que(void *context)
   2510 {
   2511 	struct ix_queue *que = context;
   2512 	struct adapter  *adapter = que->adapter;
   2513 	struct tx_ring	*txr = que->txr;
   2514 	struct ifnet    *ifp = adapter->ifp;
   2515 	bool		more;
   2516 
   2517 	adapter->handleq.ev_count++;
   2518 
   2519 	if (ifp->if_flags & IFF_RUNNING) {
   2520 		more = ixgbe_rxeof(que);
   2521 		IXGBE_TX_LOCK(txr);
   2522 		ixgbe_txeof(txr);
   2523 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   2524 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   2525 				ixgbe_mq_start_locked(ifp, txr);
   2526 		/* Only for queue 0 */
   2527 		if ((&adapter->queues[0] == que)
   2528 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   2529 			ixgbe_legacy_start_locked(ifp, txr);
   2530 		IXGBE_TX_UNLOCK(txr);
   2531 		if (more) {
   2532 			adapter->req.ev_count++;
   2533 			softint_schedule(que->que_si);
   2534 			return;
   2535 		}
   2536 	}
   2537 
   2538 	/* Re-enable this interrupt */
   2539 	ixv_enable_queue(adapter, que->msix);
   2540 
   2541 	return;
   2542 } /* ixv_handle_que */
   2543 
   2544 /************************************************************************
   2545  * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
   2546  ************************************************************************/
   2547 static int
   2548 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   2549 {
   2550 	device_t	dev = adapter->dev;
   2551 	struct ix_queue *que = adapter->queues;
   2552 	struct		tx_ring *txr = adapter->tx_rings;
   2553 	int 		error, msix_ctrl, rid, vector = 0;
   2554 	pci_chipset_tag_t pc;
   2555 	pcitag_t	tag;
   2556 	char		intrbuf[PCI_INTRSTR_LEN];
   2557 	char		intr_xname[32];
   2558 	const char	*intrstr = NULL;
   2559 	kcpuset_t	*affinity;
   2560 	int		cpu_id = 0;
   2561 
   2562 	pc = adapter->osdep.pc;
   2563 	tag = adapter->osdep.tag;
   2564 
   2565 	adapter->osdep.nintrs = adapter->num_queues + 1;
   2566 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   2567 	    adapter->osdep.nintrs) != 0) {
   2568 		aprint_error_dev(dev,
   2569 		    "failed to allocate MSI-X interrupt\n");
   2570 		return (ENXIO);
   2571 	}
   2572 
   2573 	kcpuset_create(&affinity, false);
   2574 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   2575 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   2576 		    device_xname(dev), i);
   2577 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   2578 		    sizeof(intrbuf));
   2579 #ifdef IXGBE_MPSAFE
   2580 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   2581 		    true);
   2582 #endif
   2583 		/* Set the handler function */
   2584 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   2585 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   2586 		    intr_xname);
   2587 		if (que->res == NULL) {
   2588 			pci_intr_release(pc, adapter->osdep.intrs,
   2589 			    adapter->osdep.nintrs);
   2590 			aprint_error_dev(dev,
   2591 			    "Failed to register QUE handler\n");
   2592 			kcpuset_destroy(affinity);
   2593 			return (ENXIO);
   2594 		}
   2595 		que->msix = vector;
   2596         	adapter->active_queues |= (u64)(1 << que->msix);
   2597 
   2598 		cpu_id = i;
   2599 		/* Round-robin affinity */
   2600 		kcpuset_zero(affinity);
   2601 		kcpuset_set(affinity, cpu_id % ncpu);
   2602 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   2603 		    NULL);
   2604 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   2605 		    intrstr);
   2606 		if (error == 0)
   2607 			aprint_normal(", bound queue %d to cpu %d\n",
   2608 			    i, cpu_id % ncpu);
   2609 		else
   2610 			aprint_normal("\n");
   2611 
   2612 #ifndef IXGBE_LEGACY_TX
   2613 		txr->txr_si
   2614 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2615 			ixgbe_deferred_mq_start, txr);
   2616 #endif
   2617 		que->que_si
   2618 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2619 			ixv_handle_que, que);
   2620 		if (que->que_si == NULL) {
   2621 			aprint_error_dev(dev,
   2622 			    "could not establish software interrupt\n");
   2623 		}
   2624 	}
   2625 
   2626 	/* and Mailbox */
   2627 	cpu_id++;
   2628 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   2629 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   2630 	    sizeof(intrbuf));
   2631 #ifdef IXGBE_MPSAFE
   2632 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   2633 	    true);
   2634 #endif
   2635 	/* Set the mbx handler function */
   2636 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   2637 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   2638 	    intr_xname);
   2639 	if (adapter->osdep.ihs[vector] == NULL) {
   2640 		adapter->res = NULL;
   2641 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   2642 		kcpuset_destroy(affinity);
   2643 		return (ENXIO);
   2644 	}
   2645 	/* Round-robin affinity */
   2646 	kcpuset_zero(affinity);
   2647 	kcpuset_set(affinity, cpu_id % ncpu);
   2648 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   2649 
   2650 	aprint_normal_dev(dev,
   2651 	    "for link, interrupting at %s", intrstr);
   2652 	if (error == 0)
   2653 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   2654 	else
   2655 		aprint_normal("\n");
   2656 
   2657 	adapter->vector = vector;
   2658 	/* Tasklets for Mailbox */
   2659 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   2660 	    ixv_handle_link, adapter);
   2661 	/*
   2662 	 * Due to a broken design QEMU will fail to properly
   2663 	 * enable the guest for MSI-X unless the vectors in
   2664 	 * the table are all set up, so we must rewrite the
   2665 	 * ENABLE in the MSI-X control register again at this
   2666 	 * point to cause it to successfully initialize us.
   2667 	 */
   2668 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   2669 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   2670 		rid += PCI_MSIX_CTL;
   2671 		msix_ctrl = pci_conf_read(pc, tag, rid);
   2672 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   2673 		pci_conf_write(pc, tag, rid, msix_ctrl);
   2674 	}
   2675 
   2676 	kcpuset_destroy(affinity);
   2677 	return (0);
   2678 } /* ixv_allocate_msix */
   2679 
   2680 /************************************************************************
   2681  * ixv_configure_interrupts - Setup MSI-X resources
   2682  *
   2683  *   Note: The VF device MUST use MSI-X, there is no fallback.
   2684  ************************************************************************/
   2685 static int
   2686 ixv_configure_interrupts(struct adapter *adapter)
   2687 {
   2688 	device_t dev = adapter->dev;
   2689 	int want, queues, msgs;
   2690 
   2691 	/* Must have at least 2 MSI-X vectors */
   2692 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   2693 	if (msgs < 2) {
   2694 		aprint_error_dev(dev,"MSIX config error\n");
   2695 		return (ENXIO);
   2696 	}
   2697 	msgs = MIN(msgs, IXG_MAX_NINTR);
   2698 
   2699 	/* Figure out a reasonable auto config value */
   2700 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   2701 
   2702 	if (ixv_num_queues != 0)
   2703 		queues = ixv_num_queues;
   2704 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   2705 		queues = IXGBE_VF_MAX_TX_QUEUES;
   2706 
   2707 	/*
   2708 	 * Want vectors for the queues,
   2709 	 * plus an additional for mailbox.
   2710 	 */
   2711 	want = queues + 1;
   2712 	if (msgs >= want)
   2713 		msgs = want;
   2714 	else {
   2715                	aprint_error_dev(dev,
   2716 		    "MSI-X Configuration Problem, "
   2717 		    "%d vectors but %d queues wanted!\n",
   2718 		    msgs, want);
   2719 		return -1;
   2720 	}
   2721 
   2722 	adapter->msix_mem = (void *)1; /* XXX */
   2723 	aprint_normal_dev(dev,
   2724 	    "Using MSI-X interrupts with %d vectors\n", msgs);
   2725 	adapter->num_queues = queues;
   2726 
   2727 	return (0);
   2728 } /* ixv_configure_interrupts */
   2729 
   2730 
   2731 /************************************************************************
   2732  * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
   2733  *
   2734  *   Done outside of interrupt context since the driver might sleep
   2735  ************************************************************************/
   2736 static void
   2737 ixv_handle_link(void *context)
   2738 {
   2739 	struct adapter *adapter = context;
   2740 
   2741 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   2742 	    &adapter->link_up, FALSE);
   2743 	ixv_update_link_status(adapter);
   2744 } /* ixv_handle_link */
   2745 
   2746 /************************************************************************
   2747  * ixv_check_link - Used in the local timer to poll for link changes
   2748  ************************************************************************/
   2749 static void
   2750 ixv_check_link(struct adapter *adapter)
   2751 {
   2752 	adapter->hw.mac.get_link_status = TRUE;
   2753 
   2754 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   2755 	    &adapter->link_up, FALSE);
   2756 	ixv_update_link_status(adapter);
   2757 } /* ixv_check_link */
   2758