Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.61
      1 /*$NetBSD: ixv.c,v 1.61 2017/09/12 05:28:31 msaitoh Exp $*/
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 320688 2017-07-05 17:27:03Z erj $*/
     36 
     37 
     38 #ifdef _KERNEL_OPT
     39 #include "opt_inet.h"
     40 #include "opt_inet6.h"
     41 #include "opt_net_mpsafe.h"
     42 #endif
     43 
     44 #include "ixgbe.h"
     45 #include "vlan.h"
     46 
     47 /************************************************************************
     48  * Driver version
     49  ************************************************************************/
     50 char ixv_driver_version[] = "1.5.13-k";
     51 
     52 /************************************************************************
     53  * PCI Device ID Table
     54  *
     55  *   Used by probe to select devices to load on
     56  *   Last field stores an index into ixv_strings
     57  *   Last entry must be all 0s
     58  *
     59  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     60  ************************************************************************/
     61 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
     62 {
     63 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     64 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     65 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     66 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     67 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
     68 	/* required last entry */
     69 	{0, 0, 0, 0, 0}
     70 };
     71 
     72 /************************************************************************
     73  * Table of branding strings
     74  ************************************************************************/
     75 static const char *ixv_strings[] = {
     76 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     77 };
     78 
     79 /*********************************************************************
     80  *  Function prototypes
     81  *********************************************************************/
     82 static int      ixv_probe(device_t, cfdata_t, void *);
     83 static void	ixv_attach(device_t, device_t, void *);
     84 static int      ixv_detach(device_t, int);
     85 #if 0
     86 static int      ixv_shutdown(device_t);
     87 #endif
     88 static int	ixv_ifflags_cb(struct ethercom *);
     89 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     90 static int	ixv_init(struct ifnet *);
     91 static void	ixv_init_locked(struct adapter *);
     92 static void	ixv_ifstop(struct ifnet *, int);
     93 static void     ixv_stop(void *);
     94 static void     ixv_init_device_features(struct adapter *);
     95 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     96 static int      ixv_media_change(struct ifnet *);
     97 static int      ixv_allocate_pci_resources(struct adapter *,
     98 		    const struct pci_attach_args *);
     99 static int      ixv_allocate_msix(struct adapter *,
    100 		    const struct pci_attach_args *);
    101 static int      ixv_configure_interrupts(struct adapter *);
    102 static void	ixv_free_pci_resources(struct adapter *);
    103 static void     ixv_local_timer(void *);
    104 static void     ixv_local_timer_locked(void *);
    105 static void     ixv_setup_interface(device_t, struct adapter *);
    106 static int      ixv_negotiate_api(struct adapter *);
    107 
    108 static void     ixv_initialize_transmit_units(struct adapter *);
    109 static void     ixv_initialize_receive_units(struct adapter *);
    110 static void     ixv_initialize_rss_mapping(struct adapter *);
    111 static void     ixv_check_link(struct adapter *);
    112 
    113 static void     ixv_enable_intr(struct adapter *);
    114 static void     ixv_disable_intr(struct adapter *);
    115 static void     ixv_set_multi(struct adapter *);
    116 static void     ixv_update_link_status(struct adapter *);
    117 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    118 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    119 static void	ixv_configure_ivars(struct adapter *);
    120 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    121 
    122 static void	ixv_setup_vlan_support(struct adapter *);
    123 #if 0
    124 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    125 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    126 #endif
    127 
    128 static void	ixv_add_device_sysctls(struct adapter *);
    129 static void	ixv_save_stats(struct adapter *);
    130 static void	ixv_init_stats(struct adapter *);
    131 static void	ixv_update_stats(struct adapter *);
    132 static void	ixv_add_stats_sysctls(struct adapter *);
    133 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    134 		    const char *, int *, int);
    135 
    136 /* The MSI-X Interrupt handlers */
    137 static int	ixv_msix_que(void *);
    138 static int	ixv_msix_mbx(void *);
    139 
    140 /* Deferred interrupt tasklets */
    141 static void	ixv_handle_que(void *);
    142 static void     ixv_handle_link(void *);
    143 
    144 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    145 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    146 
    147 /************************************************************************
    148  * FreeBSD Device Interface Entry Points
    149  ************************************************************************/
    150 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    151     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    152     DVF_DETACH_SHUTDOWN);
    153 
    154 #if 0
    155 static driver_t ixv_driver = {
    156 	"ixv", ixv_methods, sizeof(struct adapter),
    157 };
    158 
    159 devclass_t ixv_devclass;
    160 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    161 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    162 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    163 #endif
    164 
    165 /*
    166  * TUNEABLE PARAMETERS:
    167  */
    168 
    169 /* Number of Queues - do not exceed MSI-X vectors - 1 */
    170 static int ixv_num_queues = 0;
    171 #define	TUNABLE_INT(__x, __y)
    172 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    173 
    174 /*
    175  * AIM: Adaptive Interrupt Moderation
    176  * which means that the interrupt rate
    177  * is varied over time based on the
    178  * traffic for that interrupt vector
    179  */
    180 static bool ixv_enable_aim = false;
    181 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    182 
    183 /* How many packets rxeof tries to clean at a time */
    184 static int ixv_rx_process_limit = 256;
    185 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    186 
    187 /* How many packets txeof tries to clean at a time */
    188 static int ixv_tx_process_limit = 256;
    189 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    190 
    191 /*
    192  * Number of TX descriptors per ring,
    193  * setting higher than RX as this seems
    194  * the better performing choice.
    195  */
    196 static int ixv_txd = DEFAULT_TXD;
    197 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    198 
    199 /* Number of RX descriptors per ring */
    200 static int ixv_rxd = DEFAULT_RXD;
    201 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    202 
    203 /* Legacy Transmit (single queue) */
    204 static int ixv_enable_legacy_tx = 0;
    205 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
    206 
    207 /*
    208  * Shadow VFTA table, this is needed because
    209  * the real filter table gets cleared during
    210  * a soft reset and we need to repopulate it.
    211  */
    212 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
    213 
    214 #ifdef NET_MPSAFE
    215 #define IXGBE_MPSAFE		1
    216 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    217 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    218 #else
    219 #define IXGBE_CALLOUT_FLAGS	0
    220 #define IXGBE_SOFTINFT_FLAGS	0
    221 #endif
    222 
    223 #if 0
    224 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
    225 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
    226 #endif
    227 
    228 /************************************************************************
    229  * ixv_probe - Device identification routine
    230  *
    231  *   Determines if the driver should be loaded on
    232  *   adapter based on its PCI vendor/device ID.
    233  *
    234  *   return BUS_PROBE_DEFAULT on success, positive on failure
    235  ************************************************************************/
    236 static int
    237 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    238 {
    239 #ifdef __HAVE_PCI_MSI_MSIX
    240 	const struct pci_attach_args *pa = aux;
    241 
    242 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    243 #else
    244 	return 0;
    245 #endif
    246 } /* ixv_probe */
    247 
    248 static ixgbe_vendor_info_t *
    249 ixv_lookup(const struct pci_attach_args *pa)
    250 {
    251 	ixgbe_vendor_info_t *ent;
    252 	pcireg_t subid;
    253 
    254 	INIT_DEBUGOUT("ixv_lookup: begin");
    255 
    256 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    257 		return NULL;
    258 
    259 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    260 
    261 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    262 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    263 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    264 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    265 		     (ent->subvendor_id == 0)) &&
    266 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    267 		     (ent->subdevice_id == 0))) {
    268 			return ent;
    269 		}
    270 	}
    271 
    272 	return NULL;
    273 }
    274 
    275 /************************************************************************
    276  * ixv_attach - Device initialization routine
    277  *
    278  *   Called when the driver is being loaded.
    279  *   Identifies the type of hardware, allocates all resources
    280  *   and initializes the hardware.
    281  *
    282  *   return 0 on success, positive on failure
    283  ************************************************************************/
    284 static void
    285 ixv_attach(device_t parent, device_t dev, void *aux)
    286 {
    287 	struct adapter *adapter;
    288 	struct ixgbe_hw *hw;
    289 	int             error = 0;
    290 	pcireg_t	id, subid;
    291 	ixgbe_vendor_info_t *ent;
    292 	const struct pci_attach_args *pa = aux;
    293 	const char *apivstr;
    294 	INIT_DEBUGOUT("ixv_attach: begin");
    295 
    296 	/*
    297 	 * Make sure BUSMASTER is set, on a VM under
    298 	 * KVM it may not be and will break things.
    299 	 */
    300 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    301 
    302 	/* Allocate, clear, and link in our adapter structure */
    303 	adapter = device_private(dev);
    304 	adapter->dev = dev;
    305 	adapter->hw.back = adapter;
    306 	hw = &adapter->hw;
    307 
    308 	adapter->init_locked = ixv_init_locked;
    309 	adapter->stop_locked = ixv_stop;
    310 
    311 	adapter->osdep.pc = pa->pa_pc;
    312 	adapter->osdep.tag = pa->pa_tag;
    313 	if (pci_dma64_available(pa))
    314 		adapter->osdep.dmat = pa->pa_dmat64;
    315 	else
    316 		adapter->osdep.dmat = pa->pa_dmat;
    317 	adapter->osdep.attached = false;
    318 
    319 	ent = ixv_lookup(pa);
    320 
    321 	KASSERT(ent != NULL);
    322 
    323 	aprint_normal(": %s, Version - %s\n",
    324 	    ixv_strings[ent->index], ixv_driver_version);
    325 
    326 	/* Core Lock Init*/
    327 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    328 
    329 	/* Do base PCI setup - map BAR0 */
    330 	if (ixv_allocate_pci_resources(adapter, pa)) {
    331 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    332 		error = ENXIO;
    333 		goto err_out;
    334 	}
    335 
    336 	/* SYSCTL APIs */
    337 	ixv_add_device_sysctls(adapter);
    338 
    339 	/* Set up the timer callout */
    340 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    341 
    342 	/* Save off the information about this board */
    343 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    344 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    345 	hw->vendor_id = PCI_VENDOR(id);
    346 	hw->device_id = PCI_PRODUCT(id);
    347 	hw->revision_id =
    348 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    349 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    350 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    351 
    352 	/* A subset of set_mac_type */
    353 	switch (hw->device_id) {
    354 	case IXGBE_DEV_ID_82599_VF:
    355 		hw->mac.type = ixgbe_mac_82599_vf;
    356 		break;
    357 	case IXGBE_DEV_ID_X540_VF:
    358 		hw->mac.type = ixgbe_mac_X540_vf;
    359 		break;
    360 	case IXGBE_DEV_ID_X550_VF:
    361 		hw->mac.type = ixgbe_mac_X550_vf;
    362 		break;
    363 	case IXGBE_DEV_ID_X550EM_X_VF:
    364 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
    365 		break;
    366 	case IXGBE_DEV_ID_X550EM_A_VF:
    367 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
    368 		break;
    369 	default:
    370 		/* Shouldn't get here since probe succeeded */
    371 		aprint_error_dev(dev, "Unknown device ID!\n");
    372 		error = ENXIO;
    373 		goto err_out;
    374 		break;
    375 	}
    376 
    377 	ixv_init_device_features(adapter);
    378 
    379 	/* Initialize the shared code */
    380 	error = ixgbe_init_ops_vf(hw);
    381 	if (error) {
    382 		aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
    383 		error = EIO;
    384 		goto err_out;
    385 	}
    386 
    387 	/* Setup the mailbox */
    388 	ixgbe_init_mbx_params_vf(hw);
    389 
    390 	/* Set the right number of segments */
    391 	adapter->num_segs = IXGBE_82599_SCATTER;
    392 
    393 	/* Reset mbox api to 1.0 */
    394 	error = hw->mac.ops.reset_hw(hw);
    395 	if (error == IXGBE_ERR_RESET_FAILED)
    396 		aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
    397 	else if (error)
    398 		aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
    399 		    error);
    400 	if (error) {
    401 		error = EIO;
    402 		goto err_out;
    403 	}
    404 
    405 	error = hw->mac.ops.init_hw(hw);
    406 	if (error) {
    407 		aprint_error_dev(dev, "...init_hw() failed!\n");
    408 		error = EIO;
    409 		goto err_out;
    410 	}
    411 
    412 	/* Negotiate mailbox API version */
    413 	error = ixv_negotiate_api(adapter);
    414 	if (error)
    415 		aprint_normal_dev(dev,
    416 		    "MBX API negotiation failed during attach!\n");
    417 	switch (hw->api_version) {
    418 	case ixgbe_mbox_api_10:
    419 		apivstr = "1.0";
    420 		break;
    421 	case ixgbe_mbox_api_20:
    422 		apivstr = "2.0";
    423 		break;
    424 	case ixgbe_mbox_api_11:
    425 		apivstr = "1.1";
    426 		break;
    427 	case ixgbe_mbox_api_12:
    428 		apivstr = "1.2";
    429 		break;
    430 	case ixgbe_mbox_api_13:
    431 		apivstr = "1.3";
    432 		break;
    433 	default:
    434 		apivstr = "unknown";
    435 		break;
    436 	}
    437 	aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
    438 
    439 	/* If no mac address was assigned, make a random one */
    440 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    441 		u8 addr[ETHER_ADDR_LEN];
    442 		uint64_t rndval = cprng_strong64();
    443 
    444 		memcpy(addr, &rndval, sizeof(addr));
    445 		addr[0] &= 0xFE;
    446 		addr[0] |= 0x02;
    447 		bcopy(addr, hw->mac.addr, sizeof(addr));
    448 	}
    449 
    450 	/* Register for VLAN events */
    451 #if 0 /* XXX delete after write? */
    452 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    453 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    454 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    455 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    456 #endif
    457 
    458 	/* Sysctls for limiting the amount of work done in the taskqueues */
    459 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    460 	    "max number of rx packets to process",
    461 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    462 
    463 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    464 	    "max number of tx packets to process",
    465 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    466 
    467 	/* Do descriptor calc and sanity checks */
    468 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    469 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    470 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    471 		adapter->num_tx_desc = DEFAULT_TXD;
    472 	} else
    473 		adapter->num_tx_desc = ixv_txd;
    474 
    475 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    476 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    477 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    478 		adapter->num_rx_desc = DEFAULT_RXD;
    479 	} else
    480 		adapter->num_rx_desc = ixv_rxd;
    481 
    482 	/* Setup MSI-X */
    483 	error = ixv_configure_interrupts(adapter);
    484 	if (error)
    485 		goto err_out;
    486 
    487 	/* Allocate our TX/RX Queues */
    488 	if (ixgbe_allocate_queues(adapter)) {
    489 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    490 		error = ENOMEM;
    491 		goto err_out;
    492 	}
    493 
    494 	/* hw.ix defaults init */
    495 	adapter->enable_aim = ixv_enable_aim;
    496 
    497 	/* Setup OS specific network interface */
    498 	ixv_setup_interface(dev, adapter);
    499 
    500 	error = ixv_allocate_msix(adapter, pa);
    501 	if (error) {
    502 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    503 		goto err_late;
    504 	}
    505 
    506 	/* Do the stats setup */
    507 	ixv_save_stats(adapter);
    508 	ixv_init_stats(adapter);
    509 	ixv_add_stats_sysctls(adapter);
    510 
    511 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    512 		ixgbe_netmap_attach(adapter);
    513 
    514 	INIT_DEBUGOUT("ixv_attach: end");
    515 	adapter->osdep.attached = true;
    516 
    517 	return;
    518 
    519 err_late:
    520 	ixgbe_free_transmit_structures(adapter);
    521 	ixgbe_free_receive_structures(adapter);
    522 	free(adapter->queues, M_DEVBUF);
    523 err_out:
    524 	ixv_free_pci_resources(adapter);
    525 	IXGBE_CORE_LOCK_DESTROY(adapter);
    526 
    527 	return;
    528 } /* ixv_attach */
    529 
    530 /************************************************************************
    531  * ixv_detach - Device removal routine
    532  *
    533  *   Called when the driver is being removed.
    534  *   Stops the adapter and deallocates all the resources
    535  *   that were allocated for driver operation.
    536  *
    537  *   return 0 on success, positive on failure
    538  ************************************************************************/
    539 static int
    540 ixv_detach(device_t dev, int flags)
    541 {
    542 	struct adapter  *adapter = device_private(dev);
    543 	struct ix_queue *que = adapter->queues;
    544 	struct tx_ring *txr = adapter->tx_rings;
    545 	struct rx_ring *rxr = adapter->rx_rings;
    546 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
    547 
    548 	INIT_DEBUGOUT("ixv_detach: begin");
    549 	if (adapter->osdep.attached == false)
    550 		return 0;
    551 
    552 	/* Stop the interface. Callouts are stopped in it. */
    553 	ixv_ifstop(adapter->ifp, 1);
    554 
    555 #if NVLAN > 0
    556 	/* Make sure VLANs are not using driver */
    557 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    558 		;	/* nothing to do: no VLANs */
    559 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    560 		vlan_ifdetach(adapter->ifp);
    561 	else {
    562 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    563 		return EBUSY;
    564 	}
    565 #endif
    566 
    567 	IXGBE_CORE_LOCK(adapter);
    568 	ixv_stop(adapter);
    569 	IXGBE_CORE_UNLOCK(adapter);
    570 
    571 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
    572 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
    573 			softint_disestablish(txr->txr_si);
    574 		softint_disestablish(que->que_si);
    575 	}
    576 
    577 	/* Drain the Mailbox(link) queue */
    578 	softint_disestablish(adapter->link_si);
    579 
    580 	/* Unregister VLAN events */
    581 #if 0 /* XXX msaitoh delete after write? */
    582 	if (adapter->vlan_attach != NULL)
    583 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    584 	if (adapter->vlan_detach != NULL)
    585 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    586 #endif
    587 
    588 	ether_ifdetach(adapter->ifp);
    589 	callout_halt(&adapter->timer, NULL);
    590 
    591 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    592 		netmap_detach(adapter->ifp);
    593 
    594 	ixv_free_pci_resources(adapter);
    595 #if 0 /* XXX the NetBSD port is probably missing something here */
    596 	bus_generic_detach(dev);
    597 #endif
    598 	if_detach(adapter->ifp);
    599 	if_percpuq_destroy(adapter->ipq);
    600 
    601 	sysctl_teardown(&adapter->sysctllog);
    602 	evcnt_detach(&adapter->handleq);
    603 	evcnt_detach(&adapter->req);
    604 	evcnt_detach(&adapter->efbig_tx_dma_setup);
    605 	evcnt_detach(&adapter->mbuf_defrag_failed);
    606 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
    607 	evcnt_detach(&adapter->einval_tx_dma_setup);
    608 	evcnt_detach(&adapter->other_tx_dma_setup);
    609 	evcnt_detach(&adapter->eagain_tx_dma_setup);
    610 	evcnt_detach(&adapter->enomem_tx_dma_setup);
    611 	evcnt_detach(&adapter->watchdog_events);
    612 	evcnt_detach(&adapter->tso_err);
    613 	evcnt_detach(&adapter->link_irq);
    614 
    615 	txr = adapter->tx_rings;
    616 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
    617 		evcnt_detach(&adapter->queues[i].irqs);
    618 		evcnt_detach(&txr->no_desc_avail);
    619 		evcnt_detach(&txr->total_packets);
    620 		evcnt_detach(&txr->tso_tx);
    621 #ifndef IXGBE_LEGACY_TX
    622 		evcnt_detach(&txr->pcq_drops);
    623 #endif
    624 
    625 		evcnt_detach(&rxr->rx_packets);
    626 		evcnt_detach(&rxr->rx_bytes);
    627 		evcnt_detach(&rxr->rx_copies);
    628 		evcnt_detach(&rxr->no_jmbuf);
    629 		evcnt_detach(&rxr->rx_discarded);
    630 	}
    631 	evcnt_detach(&stats->ipcs);
    632 	evcnt_detach(&stats->l4cs);
    633 	evcnt_detach(&stats->ipcs_bad);
    634 	evcnt_detach(&stats->l4cs_bad);
    635 
    636 	/* Packet Reception Stats */
    637 	evcnt_detach(&stats->vfgorc);
    638 	evcnt_detach(&stats->vfgprc);
    639 	evcnt_detach(&stats->vfmprc);
    640 
    641 	/* Packet Transmission Stats */
    642 	evcnt_detach(&stats->vfgotc);
    643 	evcnt_detach(&stats->vfgptc);
    644 
    645 	ixgbe_free_transmit_structures(adapter);
    646 	ixgbe_free_receive_structures(adapter);
    647 	free(adapter->queues, M_DEVBUF);
    648 
    649 	IXGBE_CORE_LOCK_DESTROY(adapter);
    650 
    651 	return (0);
    652 } /* ixv_detach */
    653 
    654 /************************************************************************
    655  * ixv_init_locked - Init entry point
    656  *
    657  *   Used in two ways: It is used by the stack as an init entry
    658  *   point in network interface structure. It is also used
    659  *   by the driver as a hw/sw initialization routine to get
    660  *   to a consistent state.
    661  *
    662  *   return 0 on success, positive on failure
    663  ************************************************************************/
    664 static void
    665 ixv_init_locked(struct adapter *adapter)
    666 {
    667 	struct ifnet	*ifp = adapter->ifp;
    668 	device_t 	dev = adapter->dev;
    669 	struct ixgbe_hw *hw = &adapter->hw;
    670 	int             error = 0;
    671 
    672 	INIT_DEBUGOUT("ixv_init_locked: begin");
    673 	KASSERT(mutex_owned(&adapter->core_mtx));
    674 	hw->adapter_stopped = FALSE;
    675 	hw->mac.ops.stop_adapter(hw);
    676         callout_stop(&adapter->timer);
    677 
    678 	/* reprogram the RAR[0] in case user changed it. */
    679 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    680 
    681 	/* Get the latest mac address, User can use a LAA */
    682 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    683 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    684 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
    685 
    686 	/* Prepare transmit descriptors and buffers */
    687 	if (ixgbe_setup_transmit_structures(adapter)) {
    688 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    689 		ixv_stop(adapter);
    690 		return;
    691 	}
    692 
    693 	/* Reset VF and renegotiate mailbox API version */
    694 	hw->mac.ops.reset_hw(hw);
    695 	error = ixv_negotiate_api(adapter);
    696 	if (error)
    697 		device_printf(dev,
    698 		    "Mailbox API negotiation failed in init_locked!\n");
    699 
    700 	ixv_initialize_transmit_units(adapter);
    701 
    702 	/* Setup Multicast table */
    703 	ixv_set_multi(adapter);
    704 
    705 	/*
    706 	 * Determine the correct mbuf pool
    707 	 * for doing jumbo/headersplit
    708 	 */
    709 	if (ifp->if_mtu > ETHERMTU)
    710 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    711 	else
    712 		adapter->rx_mbuf_sz = MCLBYTES;
    713 
    714 	/* Prepare receive descriptors and buffers */
    715 	if (ixgbe_setup_receive_structures(adapter)) {
    716 		device_printf(dev, "Could not setup receive structures\n");
    717 		ixv_stop(adapter);
    718 		return;
    719 	}
    720 
    721 	/* Configure RX settings */
    722 	ixv_initialize_receive_units(adapter);
    723 
    724 #if 0 /* XXX isn't it required? -- msaitoh  */
    725 	/* Set the various hardware offload abilities */
    726 	ifp->if_hwassist = 0;
    727 	if (ifp->if_capenable & IFCAP_TSO4)
    728 		ifp->if_hwassist |= CSUM_TSO;
    729 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    730 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    731 #if __FreeBSD_version >= 800000
    732 		ifp->if_hwassist |= CSUM_SCTP;
    733 #endif
    734 	}
    735 #endif
    736 
    737 	/* Set up VLAN offload and filter */
    738 	ixv_setup_vlan_support(adapter);
    739 
    740 	/* Set up MSI-X routing */
    741 	ixv_configure_ivars(adapter);
    742 
    743 	/* Set up auto-mask */
    744 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    745 
    746 	/* Set moderation on the Link interrupt */
    747 	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
    748 
    749 	/* Stats init */
    750 	ixv_init_stats(adapter);
    751 
    752 	/* Config/Enable Link */
    753 	hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
    754 	    FALSE);
    755 
    756 	/* Start watchdog */
    757 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    758 
    759 	/* And now turn on interrupts */
    760 	ixv_enable_intr(adapter);
    761 
    762 	/* Now inform the stack we're ready */
    763 	ifp->if_flags |= IFF_RUNNING;
    764 	ifp->if_flags &= ~IFF_OACTIVE;
    765 
    766 	return;
    767 } /* ixv_init_locked */
    768 
    769 /*
    770  * MSI-X Interrupt Handlers and Tasklets
    771  */
    772 
    773 static inline void
    774 ixv_enable_queue(struct adapter *adapter, u32 vector)
    775 {
    776 	struct ixgbe_hw *hw = &adapter->hw;
    777 	u32             queue = 1 << vector;
    778 	u32             mask;
    779 
    780 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    781 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    782 } /* ixv_enable_queue */
    783 
    784 static inline void
    785 ixv_disable_queue(struct adapter *adapter, u32 vector)
    786 {
    787 	struct ixgbe_hw *hw = &adapter->hw;
    788 	u64             queue = (u64)(1 << vector);
    789 	u32             mask;
    790 
    791 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    792 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    793 } /* ixv_disable_queue */
    794 
    795 static inline void
    796 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    797 {
    798 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    799 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    800 } /* ixv_rearm_queues */
    801 
    802 
    803 /************************************************************************
    804  * ixv_msix_que - MSI Queue Interrupt Service routine
    805  ************************************************************************/
    806 static int
    807 ixv_msix_que(void *arg)
    808 {
    809 	struct ix_queue	*que = arg;
    810 	struct adapter  *adapter = que->adapter;
    811 	struct tx_ring	*txr = que->txr;
    812 	struct rx_ring	*rxr = que->rxr;
    813 	bool		more;
    814 	u32		newitr = 0;
    815 
    816 	ixv_disable_queue(adapter, que->msix);
    817 	++que->irqs.ev_count;
    818 
    819 #ifdef __NetBSD__
    820 	/* Don't run ixgbe_rxeof in interrupt context */
    821 	more = true;
    822 #else
    823 	more = ixgbe_rxeof(que);
    824 #endif
    825 
    826 	IXGBE_TX_LOCK(txr);
    827 	ixgbe_txeof(txr);
    828 	IXGBE_TX_UNLOCK(txr);
    829 
    830 	/* Do AIM now? */
    831 
    832 	if (adapter->enable_aim == false)
    833 		goto no_calc;
    834 	/*
    835 	 * Do Adaptive Interrupt Moderation:
    836 	 *  - Write out last calculated setting
    837 	 *  - Calculate based on average size over
    838 	 *    the last interval.
    839 	 */
    840         if (que->eitr_setting)
    841 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
    842 		    que->eitr_setting);
    843 
    844 	que->eitr_setting = 0;
    845 
    846 	/* Idle, do nothing */
    847 	if ((txr->bytes == 0) && (rxr->bytes == 0))
    848 		goto no_calc;
    849 
    850 	if ((txr->bytes) && (txr->packets))
    851 		newitr = txr->bytes/txr->packets;
    852 	if ((rxr->bytes) && (rxr->packets))
    853 		newitr = max(newitr, (rxr->bytes / rxr->packets));
    854 	newitr += 24; /* account for hardware frame, crc */
    855 
    856 	/* set an upper boundary */
    857 	newitr = min(newitr, 3000);
    858 
    859 	/* Be nice to the mid range */
    860 	if ((newitr > 300) && (newitr < 1200))
    861 		newitr = (newitr / 3);
    862 	else
    863 		newitr = (newitr / 2);
    864 
    865 	newitr |= newitr << 16;
    866 
    867 	/* save for next interrupt */
    868 	que->eitr_setting = newitr;
    869 
    870 	/* Reset state */
    871 	txr->bytes = 0;
    872 	txr->packets = 0;
    873 	rxr->bytes = 0;
    874 	rxr->packets = 0;
    875 
    876 no_calc:
    877 	if (more)
    878 		softint_schedule(que->que_si);
    879 	else /* Re-enable this interrupt */
    880 		ixv_enable_queue(adapter, que->msix);
    881 
    882 	return 1;
    883 } /* ixv_msix_que */
    884 
    885 /************************************************************************
    886  * ixv_msix_mbx
    887  ************************************************************************/
    888 static int
    889 ixv_msix_mbx(void *arg)
    890 {
    891 	struct adapter	*adapter = arg;
    892 	struct ixgbe_hw *hw = &adapter->hw;
    893 	u32		reg;
    894 
    895 	++adapter->link_irq.ev_count;
    896 
    897 	/* First get the cause */
    898 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
    899 	/* Clear interrupt with write */
    900 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
    901 
    902 	/* Link status change */
    903 	if (reg & IXGBE_EICR_LSC)
    904 		softint_schedule(adapter->link_si);
    905 
    906 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
    907 
    908 	return 1;
    909 } /* ixv_msix_mbx */
    910 
    911 /************************************************************************
    912  * ixv_media_status - Media Ioctl callback
    913  *
    914  *   Called whenever the user queries the status of
    915  *   the interface using ifconfig.
    916  ************************************************************************/
    917 static void
    918 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
    919 {
    920 	struct adapter *adapter = ifp->if_softc;
    921 
    922 	INIT_DEBUGOUT("ixv_media_status: begin");
    923 	IXGBE_CORE_LOCK(adapter);
    924 	ixv_update_link_status(adapter);
    925 
    926 	ifmr->ifm_status = IFM_AVALID;
    927 	ifmr->ifm_active = IFM_ETHER;
    928 
    929 	if (!adapter->link_active) {
    930 		ifmr->ifm_active |= IFM_NONE;
    931 		IXGBE_CORE_UNLOCK(adapter);
    932 		return;
    933 	}
    934 
    935 	ifmr->ifm_status |= IFM_ACTIVE;
    936 
    937 	switch (adapter->link_speed) {
    938 		case IXGBE_LINK_SPEED_10GB_FULL:
    939 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
    940 			break;
    941 		case IXGBE_LINK_SPEED_1GB_FULL:
    942 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
    943 			break;
    944 		case IXGBE_LINK_SPEED_100_FULL:
    945 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
    946 			break;
    947 		case IXGBE_LINK_SPEED_10_FULL:
    948 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
    949 			break;
    950 	}
    951 
    952 	IXGBE_CORE_UNLOCK(adapter);
    953 
    954 	return;
    955 } /* ixv_media_status */
    956 
    957 /************************************************************************
    958  * ixv_media_change - Media Ioctl callback
    959  *
    960  *   Called when the user changes speed/duplex using
    961  *   media/mediopt option with ifconfig.
    962  ************************************************************************/
    963 static int
    964 ixv_media_change(struct ifnet *ifp)
    965 {
    966 	struct adapter *adapter = ifp->if_softc;
    967 	struct ifmedia *ifm = &adapter->media;
    968 
    969 	INIT_DEBUGOUT("ixv_media_change: begin");
    970 
    971 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    972 		return (EINVAL);
    973 
    974 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
    975 	case IFM_AUTO:
    976 		break;
    977 	default:
    978 		device_printf(adapter->dev, "Only auto media type\n");
    979 		return (EINVAL);
    980 	}
    981 
    982 	return (0);
    983 } /* ixv_media_change */
    984 
    985 
    986 /************************************************************************
    987  * ixv_negotiate_api
    988  *
    989  *   Negotiate the Mailbox API with the PF;
    990  *   start with the most featured API first.
    991  ************************************************************************/
    992 static int
    993 ixv_negotiate_api(struct adapter *adapter)
    994 {
    995 	struct ixgbe_hw *hw = &adapter->hw;
    996 	int             mbx_api[] = { ixgbe_mbox_api_11,
    997 	                              ixgbe_mbox_api_10,
    998 	                              ixgbe_mbox_api_unknown };
    999 	int             i = 0;
   1000 
   1001 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
   1002 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
   1003 			return (0);
   1004 		i++;
   1005 	}
   1006 
   1007 	return (EINVAL);
   1008 } /* ixv_negotiate_api */
   1009 
   1010 
   1011 /************************************************************************
   1012  * ixv_set_multi - Multicast Update
   1013  *
   1014  *   Called whenever multicast address list is updated.
   1015  ************************************************************************/
   1016 static void
   1017 ixv_set_multi(struct adapter *adapter)
   1018 {
   1019 	struct ether_multi *enm;
   1020 	struct ether_multistep step;
   1021 	struct ethercom *ec = &adapter->osdep.ec;
   1022 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1023 	u8                 *update_ptr;
   1024 	int                mcnt = 0;
   1025 
   1026 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1027 
   1028 	ETHER_FIRST_MULTI(step, ec, enm);
   1029 	while (enm != NULL) {
   1030 		bcopy(enm->enm_addrlo,
   1031 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1032 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1033 		mcnt++;
   1034 		/* XXX This might be required --msaitoh */
   1035 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1036 			break;
   1037 		ETHER_NEXT_MULTI(step, enm);
   1038 	}
   1039 
   1040 	update_ptr = mta;
   1041 
   1042 	adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   1043 	    ixv_mc_array_itr, TRUE);
   1044 
   1045 	return;
   1046 } /* ixv_set_multi */
   1047 
   1048 /************************************************************************
   1049  * ixv_mc_array_itr
   1050  *
   1051  *   An iterator function needed by the multicast shared code.
   1052  *   It feeds the shared code routine the addresses in the
   1053  *   array of ixv_set_multi() one by one.
   1054  ************************************************************************/
   1055 static u8 *
   1056 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1057 {
   1058 	u8 *addr = *update_ptr;
   1059 	u8 *newptr;
   1060 	*vmdq = 0;
   1061 
   1062 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1063 	*update_ptr = newptr;
   1064 
   1065 	return addr;
   1066 } /* ixv_mc_array_itr */
   1067 
   1068 /************************************************************************
   1069  * ixv_local_timer - Timer routine
   1070  *
   1071  *   Checks for link status, updates statistics,
   1072  *   and runs the watchdog check.
   1073  ************************************************************************/
   1074 static void
   1075 ixv_local_timer(void *arg)
   1076 {
   1077 	struct adapter *adapter = arg;
   1078 
   1079 	IXGBE_CORE_LOCK(adapter);
   1080 	ixv_local_timer_locked(adapter);
   1081 	IXGBE_CORE_UNLOCK(adapter);
   1082 }
   1083 
   1084 static void
   1085 ixv_local_timer_locked(void *arg)
   1086 {
   1087 	struct adapter	*adapter = arg;
   1088 	device_t	dev = adapter->dev;
   1089 	struct ix_queue	*que = adapter->queues;
   1090 	u64		queues = 0;
   1091 	int		hung = 0;
   1092 
   1093 	KASSERT(mutex_owned(&adapter->core_mtx));
   1094 
   1095 	ixv_check_link(adapter);
   1096 
   1097 	/* Stats Update */
   1098 	ixv_update_stats(adapter);
   1099 
   1100 	/*
   1101 	 * Check the TX queues status
   1102 	 *      - mark hung queues so we don't schedule on them
   1103 	 *      - watchdog only if all queues show hung
   1104 	 */
   1105 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1106 		/* Keep track of queues with work for soft irq */
   1107 		if (que->txr->busy)
   1108 			queues |= ((u64)1 << que->me);
   1109 		/*
   1110 		 * Each time txeof runs without cleaning, but there
   1111 		 * are uncleaned descriptors it increments busy. If
   1112 		 * we get to the MAX we declare it hung.
   1113 		 */
   1114 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1115 			++hung;
   1116 			/* Mark the queue as inactive */
   1117 			adapter->active_queues &= ~((u64)1 << que->me);
   1118 			continue;
   1119 		} else {
   1120 			/* Check if we've come back from hung */
   1121 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1122 				adapter->active_queues |= ((u64)1 << que->me);
   1123 		}
   1124 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1125 			device_printf(dev,
   1126 			    "Warning queue %d appears to be hung!\n", i);
   1127 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1128 			++hung;
   1129 		}
   1130 	}
   1131 
   1132 	/* Only truly watchdog if all queues show hung */
   1133 	if (hung == adapter->num_queues)
   1134 		goto watchdog;
   1135 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1136 		ixv_rearm_queues(adapter, queues);
   1137 	}
   1138 
   1139 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1140 
   1141 	return;
   1142 
   1143 watchdog:
   1144 
   1145 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1146 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1147 	adapter->watchdog_events.ev_count++;
   1148 	ixv_init_locked(adapter);
   1149 } /* ixv_local_timer */
   1150 
   1151 /************************************************************************
   1152  * ixv_update_link_status - Update OS on link state
   1153  *
   1154  * Note: Only updates the OS on the cached link state.
   1155  *       The real check of the hardware only happens with
   1156  *       a link interrupt.
   1157  ************************************************************************/
   1158 static void
   1159 ixv_update_link_status(struct adapter *adapter)
   1160 {
   1161 	struct ifnet *ifp = adapter->ifp;
   1162 	device_t     dev = adapter->dev;
   1163 
   1164 	if (adapter->link_up) {
   1165 		if (adapter->link_active == FALSE) {
   1166 			if (bootverbose) {
   1167 				const char *bpsmsg;
   1168 
   1169 				switch (adapter->link_speed) {
   1170 				case IXGBE_LINK_SPEED_10GB_FULL:
   1171 					bpsmsg = "10 Gbps";
   1172 					break;
   1173 				case IXGBE_LINK_SPEED_5GB_FULL:
   1174 					bpsmsg = "5 Gbps";
   1175 					break;
   1176 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   1177 					bpsmsg = "2.5 Gbps";
   1178 					break;
   1179 				case IXGBE_LINK_SPEED_1GB_FULL:
   1180 					bpsmsg = "1 Gbps";
   1181 					break;
   1182 				case IXGBE_LINK_SPEED_100_FULL:
   1183 					bpsmsg = "100 Mbps";
   1184 					break;
   1185 				case IXGBE_LINK_SPEED_10_FULL:
   1186 					bpsmsg = "10 Mbps";
   1187 					break;
   1188 				default:
   1189 					bpsmsg = "unknown speed";
   1190 					break;
   1191 				}
   1192 				device_printf(dev,"Link is up %s %s \n",
   1193 				    bpsmsg, "Full Duplex");
   1194 			}
   1195 			adapter->link_active = TRUE;
   1196 			if_link_state_change(ifp, LINK_STATE_UP);
   1197 		}
   1198 	} else { /* Link down */
   1199 		if (adapter->link_active == TRUE) {
   1200 			if (bootverbose)
   1201 				device_printf(dev,"Link is Down\n");
   1202 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1203 			adapter->link_active = FALSE;
   1204 		}
   1205 	}
   1206 
   1207 	return;
   1208 } /* ixv_update_link_status */
   1209 
   1210 
   1211 /************************************************************************
   1212  * ixv_stop - Stop the hardware
   1213  *
   1214  *   Disables all traffic on the adapter by issuing a
   1215  *   global reset on the MAC and deallocates TX/RX buffers.
   1216  ************************************************************************/
   1217 static void
   1218 ixv_ifstop(struct ifnet *ifp, int disable)
   1219 {
   1220 	struct adapter *adapter = ifp->if_softc;
   1221 
   1222 	IXGBE_CORE_LOCK(adapter);
   1223 	ixv_stop(adapter);
   1224 	IXGBE_CORE_UNLOCK(adapter);
   1225 }
   1226 
   1227 static void
   1228 ixv_stop(void *arg)
   1229 {
   1230 	struct ifnet    *ifp;
   1231 	struct adapter  *adapter = arg;
   1232 	struct ixgbe_hw *hw = &adapter->hw;
   1233 
   1234 	ifp = adapter->ifp;
   1235 
   1236 	KASSERT(mutex_owned(&adapter->core_mtx));
   1237 
   1238 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1239 	ixv_disable_intr(adapter);
   1240 
   1241 	/* Tell the stack that the interface is no longer active */
   1242 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1243 
   1244 	hw->mac.ops.reset_hw(hw);
   1245 	adapter->hw.adapter_stopped = FALSE;
   1246 	hw->mac.ops.stop_adapter(hw);
   1247 	callout_stop(&adapter->timer);
   1248 
   1249 	/* reprogram the RAR[0] in case user changed it. */
   1250 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1251 
   1252 	return;
   1253 } /* ixv_stop */
   1254 
   1255 
   1256 /************************************************************************
   1257  * ixv_allocate_pci_resources
   1258  ************************************************************************/
   1259 static int
   1260 ixv_allocate_pci_resources(struct adapter *adapter,
   1261     const struct pci_attach_args *pa)
   1262 {
   1263 	pcireg_t	memtype;
   1264 	device_t        dev = adapter->dev;
   1265 	bus_addr_t addr;
   1266 	int flags;
   1267 
   1268 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1269 	switch (memtype) {
   1270 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1271 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1272 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1273 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1274 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1275 			goto map_err;
   1276 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1277 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1278 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1279 		}
   1280 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1281 		     adapter->osdep.mem_size, flags,
   1282 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1283 map_err:
   1284 			adapter->osdep.mem_size = 0;
   1285 			aprint_error_dev(dev, "unable to map BAR0\n");
   1286 			return ENXIO;
   1287 		}
   1288 		break;
   1289 	default:
   1290 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1291 		return ENXIO;
   1292 	}
   1293 
   1294 	/* Pick up the tuneable queues */
   1295 	adapter->num_queues = ixv_num_queues;
   1296 
   1297 	return (0);
   1298 } /* ixv_allocate_pci_resources */
   1299 
   1300 /************************************************************************
   1301  * ixv_free_pci_resources
   1302  ************************************************************************/
   1303 static void
   1304 ixv_free_pci_resources(struct adapter * adapter)
   1305 {
   1306 	struct 		ix_queue *que = adapter->queues;
   1307 	int		rid;
   1308 
   1309 	/*
   1310 	 *  Release all msix queue resources:
   1311 	 */
   1312 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1313 		if (que->res != NULL)
   1314 			pci_intr_disestablish(adapter->osdep.pc,
   1315 			    adapter->osdep.ihs[i]);
   1316 	}
   1317 
   1318 
   1319 	/* Clean the Mailbox interrupt last */
   1320 	rid = adapter->vector;
   1321 
   1322 	if (adapter->osdep.ihs[rid] != NULL) {
   1323 		pci_intr_disestablish(adapter->osdep.pc,
   1324 		    adapter->osdep.ihs[rid]);
   1325 		adapter->osdep.ihs[rid] = NULL;
   1326 	}
   1327 
   1328 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1329 	    adapter->osdep.nintrs);
   1330 
   1331 	if (adapter->osdep.mem_size != 0) {
   1332 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1333 		    adapter->osdep.mem_bus_space_handle,
   1334 		    adapter->osdep.mem_size);
   1335 	}
   1336 
   1337 	return;
   1338 } /* ixv_free_pci_resources */
   1339 
   1340 /************************************************************************
   1341  * ixv_setup_interface
   1342  *
   1343  *   Setup networking device structure and register an interface.
   1344  ************************************************************************/
   1345 static void
   1346 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1347 {
   1348 	struct ethercom *ec = &adapter->osdep.ec;
   1349 	struct ifnet   *ifp;
   1350 
   1351 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1352 
   1353 	ifp = adapter->ifp = &ec->ec_if;
   1354 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1355 	ifp->if_baudrate = IF_Gbps(10);
   1356 	ifp->if_init = ixv_init;
   1357 	ifp->if_stop = ixv_ifstop;
   1358 	ifp->if_softc = adapter;
   1359 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1360 #ifdef IXGBE_MPSAFE
   1361 	ifp->if_extflags = IFEF_START_MPSAFE;
   1362 #endif
   1363 	ifp->if_ioctl = ixv_ioctl;
   1364 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1365 #if 0
   1366 		ixv_start_locked = ixgbe_legacy_start_locked;
   1367 #endif
   1368 	} else {
   1369 		ifp->if_transmit = ixgbe_mq_start;
   1370 #if 0
   1371 		ixv_start_locked = ixgbe_mq_start_locked;
   1372 #endif
   1373 	}
   1374 	ifp->if_start = ixgbe_legacy_start;
   1375 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1376 	IFQ_SET_READY(&ifp->if_snd);
   1377 
   1378 	if_initialize(ifp);
   1379 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1380 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1381 	/*
   1382 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1383 	 * used.
   1384 	 */
   1385 	if_register(ifp);
   1386 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1387 
   1388 	adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
   1389 
   1390 	/*
   1391 	 * Tell the upper layer(s) we support long frames.
   1392 	 */
   1393 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1394 
   1395 	/* Set capability flags */
   1396 	ifp->if_capabilities |= IFCAP_HWCSUM
   1397 	                     |  IFCAP_TSOv4
   1398 	                     |  IFCAP_TSOv6;
   1399 	ifp->if_capenable = 0;
   1400 
   1401 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1402 			    |  ETHERCAP_VLAN_HWCSUM
   1403 			    |  ETHERCAP_JUMBO_MTU
   1404 			    |  ETHERCAP_VLAN_MTU;
   1405 
   1406 	/* Enable the above capabilities by default */
   1407 	ec->ec_capenable = ec->ec_capabilities;
   1408 
   1409 	/* Don't enable LRO by default */
   1410 	ifp->if_capabilities |= IFCAP_LRO;
   1411 #if 0
   1412 	ifp->if_capenable = ifp->if_capabilities;
   1413 #endif
   1414 
   1415 	/*
   1416 	 * Specify the media types supported by this adapter and register
   1417 	 * callbacks to update media and link information
   1418 	 */
   1419 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1420 	    ixv_media_status);
   1421 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1422 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1423 
   1424 	return;
   1425 } /* ixv_setup_interface */
   1426 
   1427 
   1428 /************************************************************************
   1429  * ixv_initialize_transmit_units - Enable transmit unit.
   1430  ************************************************************************/
   1431 static void
   1432 ixv_initialize_transmit_units(struct adapter *adapter)
   1433 {
   1434 	struct tx_ring	*txr = adapter->tx_rings;
   1435 	struct ixgbe_hw	*hw = &adapter->hw;
   1436 
   1437 
   1438 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1439 		u64 tdba = txr->txdma.dma_paddr;
   1440 		u32 txctrl, txdctl;
   1441 
   1442 		/* Set WTHRESH to 8, burst writeback */
   1443 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1444 		txdctl |= (8 << 16);
   1445 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1446 
   1447 		/* Set the HW Tx Head and Tail indices */
   1448 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   1449 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   1450 
   1451 		/* Set Tx Tail register */
   1452 		txr->tail = IXGBE_VFTDT(i);
   1453 
   1454 		/* Set Ring parameters */
   1455 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   1456 		    (tdba & 0x00000000ffffffffULL));
   1457 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   1458 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   1459 		    adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
   1460 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   1461 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1462 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   1463 
   1464 		/* Now enable */
   1465 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1466 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1467 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1468 	}
   1469 
   1470 	return;
   1471 } /* ixv_initialize_transmit_units */
   1472 
   1473 
   1474 /************************************************************************
   1475  * ixv_initialize_rss_mapping
   1476  ************************************************************************/
   1477 static void
   1478 ixv_initialize_rss_mapping(struct adapter *adapter)
   1479 {
   1480 	struct ixgbe_hw *hw = &adapter->hw;
   1481 	u32             reta = 0, mrqc, rss_key[10];
   1482 	int             queue_id;
   1483 	int             i, j;
   1484 	u32             rss_hash_config;
   1485 
   1486 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1487 		/* Fetch the configured RSS key */
   1488 		rss_getkey((uint8_t *)&rss_key);
   1489 	} else {
   1490 		/* set up random bits */
   1491 		cprng_fast(&rss_key, sizeof(rss_key));
   1492 	}
   1493 
   1494 	/* Now fill out hash function seeds */
   1495 	for (i = 0; i < 10; i++)
   1496 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
   1497 
   1498 	/* Set up the redirection table */
   1499 	for (i = 0, j = 0; i < 64; i++, j++) {
   1500 		if (j == adapter->num_queues)
   1501 			j = 0;
   1502 
   1503 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1504 			/*
   1505 			 * Fetch the RSS bucket id for the given indirection
   1506 			 * entry. Cap it at the number of configured buckets
   1507 			 * (which is num_queues.)
   1508 			 */
   1509 			queue_id = rss_get_indirection_to_bucket(i);
   1510 			queue_id = queue_id % adapter->num_queues;
   1511 		} else
   1512 			queue_id = j;
   1513 
   1514 		/*
   1515 		 * The low 8 bits are for hash value (n+0);
   1516 		 * The next 8 bits are for hash value (n+1), etc.
   1517 		 */
   1518 		reta >>= 8;
   1519 		reta |= ((uint32_t)queue_id) << 24;
   1520 		if ((i & 3) == 3) {
   1521 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
   1522 			reta = 0;
   1523 		}
   1524 	}
   1525 
   1526 	/* Perform hash on these packet types */
   1527 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   1528 		rss_hash_config = rss_gethashconfig();
   1529 	else {
   1530 		/*
   1531 		 * Disable UDP - IP fragments aren't currently being handled
   1532 		 * and so we end up with a mix of 2-tuple and 4-tuple
   1533 		 * traffic.
   1534 		 */
   1535 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
   1536 		                | RSS_HASHTYPE_RSS_TCP_IPV4
   1537 		                | RSS_HASHTYPE_RSS_IPV6
   1538 		                | RSS_HASHTYPE_RSS_TCP_IPV6;
   1539 	}
   1540 
   1541 	mrqc = IXGBE_MRQC_RSSEN;
   1542 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
   1543 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
   1544 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
   1545 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
   1546 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
   1547 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
   1548 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
   1549 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
   1550 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
   1551 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
   1552 		    __func__);
   1553 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
   1554 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
   1555 		    __func__);
   1556 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
   1557 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
   1558 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
   1559 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
   1560 		    __func__);
   1561 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
   1562 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
   1563 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
   1564 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
   1565 		    __func__);
   1566 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
   1567 } /* ixv_initialize_rss_mapping */
   1568 
   1569 
   1570 /************************************************************************
   1571  * ixv_initialize_receive_units - Setup receive registers and features.
   1572  ************************************************************************/
   1573 static void
   1574 ixv_initialize_receive_units(struct adapter *adapter)
   1575 {
   1576 	struct	rx_ring	*rxr = adapter->rx_rings;
   1577 	struct ixgbe_hw	*hw = &adapter->hw;
   1578 	struct ifnet	*ifp = adapter->ifp;
   1579 	u32		bufsz, rxcsum, psrtype;
   1580 
   1581 	if (ifp->if_mtu > ETHERMTU)
   1582 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1583 	else
   1584 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1585 
   1586 	psrtype = IXGBE_PSRTYPE_TCPHDR
   1587 	        | IXGBE_PSRTYPE_UDPHDR
   1588 	        | IXGBE_PSRTYPE_IPV4HDR
   1589 	        | IXGBE_PSRTYPE_IPV6HDR
   1590 	        | IXGBE_PSRTYPE_L2HDR;
   1591 
   1592 	if (adapter->num_queues > 1)
   1593 		psrtype |= 1 << 29;
   1594 
   1595 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1596 
   1597 	/* Tell PF our max_frame size */
   1598 	if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
   1599 		device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
   1600 	}
   1601 
   1602 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1603 		u64 rdba = rxr->rxdma.dma_paddr;
   1604 		u32 reg, rxdctl;
   1605 
   1606 		/* Disable the queue */
   1607 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1608 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1609 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1610 		for (int j = 0; j < 10; j++) {
   1611 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1612 			    IXGBE_RXDCTL_ENABLE)
   1613 				msec_delay(1);
   1614 			else
   1615 				break;
   1616 		}
   1617 		wmb();
   1618 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1619 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   1620 		    (rdba & 0x00000000ffffffffULL));
   1621 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
   1622 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   1623 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1624 
   1625 		/* Reset the ring indices */
   1626 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1627 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1628 
   1629 		/* Set up the SRRCTL register */
   1630 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   1631 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1632 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1633 		reg |= bufsz;
   1634 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1635 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   1636 
   1637 		/* Capture Rx Tail index */
   1638 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1639 
   1640 		/* Do the queue enabling last */
   1641 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1642 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1643 		for (int k = 0; k < 10; k++) {
   1644 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1645 			    IXGBE_RXDCTL_ENABLE)
   1646 				break;
   1647 			msec_delay(1);
   1648 		}
   1649 		wmb();
   1650 
   1651 		/* Set the Tail Pointer */
   1652 		/*
   1653 		 * In netmap mode, we must preserve the buffers made
   1654 		 * available to userspace before the if_init()
   1655 		 * (this is true by default on the TX side, because
   1656 		 * init makes all buffers available to userspace).
   1657 		 *
   1658 		 * netmap_reset() and the device specific routines
   1659 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1660 		 * buffers at the end of the NIC ring, so here we
   1661 		 * must set the RDT (tail) register to make sure
   1662 		 * they are not overwritten.
   1663 		 *
   1664 		 * In this driver the NIC ring starts at RDH = 0,
   1665 		 * RDT points to the last slot available for reception (?),
   1666 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1667 		 */
   1668 #ifdef DEV_NETMAP
   1669 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   1670 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   1671 			struct netmap_adapter *na = NA(adapter->ifp);
   1672 			struct netmap_kring *kring = &na->rx_rings[i];
   1673 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1674 
   1675 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1676 		} else
   1677 #endif /* DEV_NETMAP */
   1678 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1679 			    adapter->num_rx_desc - 1);
   1680 	}
   1681 
   1682 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1683 
   1684 	ixv_initialize_rss_mapping(adapter);
   1685 
   1686 	if (adapter->num_queues > 1) {
   1687 		/* RSS and RX IPP Checksum are mutually exclusive */
   1688 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1689 	}
   1690 
   1691 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1692 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1693 
   1694 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1695 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1696 
   1697 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1698 
   1699 	return;
   1700 } /* ixv_initialize_receive_units */
   1701 
   1702 /************************************************************************
   1703  * ixv_setup_vlan_support
   1704  ************************************************************************/
   1705 static void
   1706 ixv_setup_vlan_support(struct adapter *adapter)
   1707 {
   1708 	struct ixgbe_hw *hw = &adapter->hw;
   1709 	u32		ctrl, vid, vfta, retry;
   1710 
   1711 	/*
   1712 	 * We get here thru init_locked, meaning
   1713 	 * a soft reset, this has already cleared
   1714 	 * the VFTA and other state, so if there
   1715 	 * have been no vlan's registered do nothing.
   1716 	 */
   1717 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   1718 		return;
   1719 
   1720 	/* Enable the queues */
   1721 	for (int i = 0; i < adapter->num_queues; i++) {
   1722 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1723 		ctrl |= IXGBE_RXDCTL_VME;
   1724 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
   1725 		/*
   1726 		 * Let Rx path know that it needs to store VLAN tag
   1727 		 * as part of extra mbuf info.
   1728 		 */
   1729 		adapter->rx_rings[i].vtag_strip = TRUE;
   1730 	}
   1731 
   1732 	/*
   1733 	 * A soft reset zero's out the VFTA, so
   1734 	 * we need to repopulate it now.
   1735 	 */
   1736 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   1737 		if (ixv_shadow_vfta[i] == 0)
   1738 			continue;
   1739 		vfta = ixv_shadow_vfta[i];
   1740 		/*
   1741 		 * Reconstruct the vlan id's
   1742 		 * based on the bits set in each
   1743 		 * of the array ints.
   1744 		 */
   1745 		for (int j = 0; j < 32; j++) {
   1746 			retry = 0;
   1747 			if ((vfta & (1 << j)) == 0)
   1748 				continue;
   1749 			vid = (i * 32) + j;
   1750 			/* Call the shared code mailbox routine */
   1751 			while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
   1752 				if (++retry > 5)
   1753 					break;
   1754 			}
   1755 		}
   1756 	}
   1757 } /* ixv_setup_vlan_support */
   1758 
   1759 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   1760 /************************************************************************
   1761  * ixv_register_vlan
   1762  *
   1763  *   Run via a vlan config EVENT, it enables us to use the
   1764  *   HW Filter table since we can get the vlan id. This just
   1765  *   creates the entry in the soft version of the VFTA, init
   1766  *   will repopulate the real table.
   1767  ************************************************************************/
   1768 static void
   1769 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1770 {
   1771 	struct adapter	*adapter = ifp->if_softc;
   1772 	u16		index, bit;
   1773 
   1774 	if (ifp->if_softc != arg) /* Not our event */
   1775 		return;
   1776 
   1777 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   1778 		return;
   1779 
   1780 	IXGBE_CORE_LOCK(adapter);
   1781 	index = (vtag >> 5) & 0x7F;
   1782 	bit = vtag & 0x1F;
   1783 	ixv_shadow_vfta[index] |= (1 << bit);
   1784 	/* Re-init to load the changes */
   1785 	ixv_init_locked(adapter);
   1786 	IXGBE_CORE_UNLOCK(adapter);
   1787 } /* ixv_register_vlan */
   1788 
   1789 /************************************************************************
   1790  * ixv_unregister_vlan
   1791  *
   1792  *   Run via a vlan unconfig EVENT, remove our entry
   1793  *   in the soft vfta.
   1794  ************************************************************************/
   1795 static void
   1796 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1797 {
   1798 	struct adapter	*adapter = ifp->if_softc;
   1799 	u16		index, bit;
   1800 
   1801 	if (ifp->if_softc !=  arg)
   1802 		return;
   1803 
   1804 	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
   1805 		return;
   1806 
   1807 	IXGBE_CORE_LOCK(adapter);
   1808 	index = (vtag >> 5) & 0x7F;
   1809 	bit = vtag & 0x1F;
   1810 	ixv_shadow_vfta[index] &= ~(1 << bit);
   1811 	/* Re-init to load the changes */
   1812 	ixv_init_locked(adapter);
   1813 	IXGBE_CORE_UNLOCK(adapter);
   1814 } /* ixv_unregister_vlan */
   1815 #endif
   1816 
   1817 /************************************************************************
   1818  * ixv_enable_intr
   1819  ************************************************************************/
   1820 static void
   1821 ixv_enable_intr(struct adapter *adapter)
   1822 {
   1823 	struct ixgbe_hw *hw = &adapter->hw;
   1824 	struct ix_queue *que = adapter->queues;
   1825 	u32             mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   1826 
   1827 
   1828 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   1829 
   1830 	mask = IXGBE_EIMS_ENABLE_MASK;
   1831 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   1832 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   1833 
   1834 	for (int i = 0; i < adapter->num_queues; i++, que++)
   1835 		ixv_enable_queue(adapter, que->msix);
   1836 
   1837 	IXGBE_WRITE_FLUSH(hw);
   1838 
   1839 	return;
   1840 } /* ixv_enable_intr */
   1841 
   1842 /************************************************************************
   1843  * ixv_disable_intr
   1844  ************************************************************************/
   1845 static void
   1846 ixv_disable_intr(struct adapter *adapter)
   1847 {
   1848 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   1849 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   1850 	IXGBE_WRITE_FLUSH(&adapter->hw);
   1851 
   1852 	return;
   1853 } /* ixv_disable_intr */
   1854 
   1855 /************************************************************************
   1856  * ixv_set_ivar
   1857  *
   1858  *   Setup the correct IVAR register for a particular MSI-X interrupt
   1859  *    - entry is the register array entry
   1860  *    - vector is the MSI-X vector for this queue
   1861  *    - type is RX/TX/MISC
   1862  ************************************************************************/
   1863 static void
   1864 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   1865 {
   1866 	struct ixgbe_hw *hw = &adapter->hw;
   1867 	u32             ivar, index;
   1868 
   1869 	vector |= IXGBE_IVAR_ALLOC_VAL;
   1870 
   1871 	if (type == -1) { /* MISC IVAR */
   1872 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   1873 		ivar &= ~0xFF;
   1874 		ivar |= vector;
   1875 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   1876 	} else {          /* RX/TX IVARS */
   1877 		index = (16 * (entry & 1)) + (8 * type);
   1878 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   1879 		ivar &= ~(0xFF << index);
   1880 		ivar |= (vector << index);
   1881 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   1882 	}
   1883 } /* ixv_set_ivar */
   1884 
   1885 /************************************************************************
   1886  * ixv_configure_ivars
   1887  ************************************************************************/
   1888 static void
   1889 ixv_configure_ivars(struct adapter *adapter)
   1890 {
   1891 	struct ix_queue *que = adapter->queues;
   1892 
   1893 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1894 		/* First the RX queue entry */
   1895 		ixv_set_ivar(adapter, i, que->msix, 0);
   1896 		/* ... and the TX */
   1897 		ixv_set_ivar(adapter, i, que->msix, 1);
   1898 		/* Set an initial value in EITR */
   1899                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
   1900 		    IXGBE_EITR_DEFAULT);
   1901 	}
   1902 
   1903 	/* For the mailbox interrupt */
   1904 	ixv_set_ivar(adapter, 1, adapter->vector, -1);
   1905 } /* ixv_configure_ivars */
   1906 
   1907 
   1908 /************************************************************************
   1909  * ixv_save_stats
   1910  *
   1911  *   The VF stats registers never have a truly virgin
   1912  *   starting point, so this routine tries to make an
   1913  *   artificial one, marking ground zero on attach as
   1914  *   it were.
   1915  ************************************************************************/
   1916 static void
   1917 ixv_save_stats(struct adapter *adapter)
   1918 {
   1919 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   1920 
   1921 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   1922 		stats->saved_reset_vfgprc +=
   1923 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   1924 		stats->saved_reset_vfgptc +=
   1925 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   1926 		stats->saved_reset_vfgorc +=
   1927 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   1928 		stats->saved_reset_vfgotc +=
   1929 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   1930 		stats->saved_reset_vfmprc +=
   1931 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   1932 	}
   1933 } /* ixv_save_stats */
   1934 
   1935 /************************************************************************
   1936  * ixv_init_stats
   1937  ************************************************************************/
   1938 static void
   1939 ixv_init_stats(struct adapter *adapter)
   1940 {
   1941 	struct ixgbe_hw *hw = &adapter->hw;
   1942 
   1943 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   1944 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   1945 	adapter->stats.vf.last_vfgorc |=
   1946 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   1947 
   1948 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   1949 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   1950 	adapter->stats.vf.last_vfgotc |=
   1951 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   1952 
   1953 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   1954 
   1955 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   1956 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   1957 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   1958 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   1959 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   1960 } /* ixv_init_stats */
   1961 
   1962 #define UPDATE_STAT_32(reg, last, count)		\
   1963 {                                                       \
   1964 	u32 current = IXGBE_READ_REG(hw, (reg));	\
   1965 	if (current < (last))				\
   1966 		count.ev_count += 0x100000000LL;	\
   1967 	(last) = current;				\
   1968 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   1969 	count.ev_count |= current;			\
   1970 }
   1971 
   1972 #define UPDATE_STAT_36(lsb, msb, last, count)           \
   1973 {                                                       \
   1974 	u64 cur_lsb = IXGBE_READ_REG(hw, (lsb));	\
   1975 	u64 cur_msb = IXGBE_READ_REG(hw, (msb));	\
   1976 	u64 current = ((cur_msb << 32) | cur_lsb);      \
   1977 	if (current < (last))				\
   1978 		count.ev_count += 0x1000000000LL;	\
   1979 	(last) = current;				\
   1980 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   1981 	count.ev_count |= current;			\
   1982 }
   1983 
   1984 /************************************************************************
   1985  * ixv_update_stats - Update the board statistics counters.
   1986  ************************************************************************/
   1987 void
   1988 ixv_update_stats(struct adapter *adapter)
   1989 {
   1990         struct ixgbe_hw *hw = &adapter->hw;
   1991 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   1992 
   1993         UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
   1994         UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
   1995         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
   1996 	    stats->vfgorc);
   1997         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
   1998 	    stats->vfgotc);
   1999         UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
   2000 
   2001 	/* Fill out the OS statistics structure */
   2002 	/*
   2003 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   2004 	 * adapter->stats counters. It's required to make ifconfig -z
   2005 	 * (SOICZIFDATA) work.
   2006 	 */
   2007 } /* ixv_update_stats */
   2008 
   2009 const struct sysctlnode *
   2010 ixv_sysctl_instance(struct adapter *adapter)
   2011 {
   2012 	const char *dvname;
   2013 	struct sysctllog **log;
   2014 	int rc;
   2015 	const struct sysctlnode *rnode;
   2016 
   2017 	log = &adapter->sysctllog;
   2018 	dvname = device_xname(adapter->dev);
   2019 
   2020 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2021 	    0, CTLTYPE_NODE, dvname,
   2022 	    SYSCTL_DESCR("ixv information and settings"),
   2023 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2024 		goto err;
   2025 
   2026 	return rnode;
   2027 err:
   2028 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2029 	return NULL;
   2030 }
   2031 
   2032 static void
   2033 ixv_add_device_sysctls(struct adapter *adapter)
   2034 {
   2035 	struct sysctllog **log;
   2036 	const struct sysctlnode *rnode, *cnode;
   2037 	device_t dev;
   2038 
   2039 	dev = adapter->dev;
   2040 	log = &adapter->sysctllog;
   2041 
   2042 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2043 		aprint_error_dev(dev, "could not create sysctl root\n");
   2044 		return;
   2045 	}
   2046 
   2047 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2048 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2049 	    "debug", SYSCTL_DESCR("Debug Info"),
   2050 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   2051 		aprint_error_dev(dev, "could not create sysctl\n");
   2052 
   2053 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2054 	    CTLFLAG_READWRITE, CTLTYPE_BOOL,
   2055 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2056 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2057 		aprint_error_dev(dev, "could not create sysctl\n");
   2058 }
   2059 
   2060 /************************************************************************
   2061  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
   2062  ************************************************************************/
   2063 static void
   2064 ixv_add_stats_sysctls(struct adapter *adapter)
   2065 {
   2066 	device_t                dev = adapter->dev;
   2067 	struct tx_ring          *txr = adapter->tx_rings;
   2068 	struct rx_ring          *rxr = adapter->rx_rings;
   2069 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2070 	const struct sysctlnode *rnode;
   2071 	struct sysctllog **log = &adapter->sysctllog;
   2072 	const char *xname = device_xname(dev);
   2073 
   2074 	/* Driver Statistics */
   2075 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   2076 	    NULL, xname, "Handled queue in softint");
   2077 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   2078 	    NULL, xname, "Requeued in softint");
   2079 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   2080 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   2081 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2082 	    NULL, xname, "m_defrag() failed");
   2083 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   2084 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   2085 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   2086 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   2087 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   2088 	    NULL, xname, "Driver tx dma hard fail other");
   2089 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   2090 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   2091 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   2092 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   2093 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2094 	    NULL, xname, "Watchdog timeouts");
   2095 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   2096 	    NULL, xname, "TSO errors");
   2097 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   2098 	    NULL, xname, "Link MSI-X IRQ Handled");
   2099 
   2100 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2101 		snprintf(adapter->queues[i].evnamebuf,
   2102 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   2103 		    xname, i);
   2104 		snprintf(adapter->queues[i].namebuf,
   2105 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   2106 
   2107 		if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2108 			aprint_error_dev(dev, "could not create sysctl root\n");
   2109 			break;
   2110 		}
   2111 
   2112 		if (sysctl_createv(log, 0, &rnode, &rnode,
   2113 		    0, CTLTYPE_NODE,
   2114 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   2115 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   2116 			break;
   2117 
   2118 #if 0 /* not yet */
   2119 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2120 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   2121 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   2122 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   2123 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   2124 			break;
   2125 
   2126 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2127 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   2128 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   2129 			NULL, 0, &(adapter->queues[i].irqs),
   2130 		    0, CTL_CREATE, CTL_EOL) != 0)
   2131 			break;
   2132 
   2133 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2134 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2135 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   2136 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   2137 		    0, CTL_CREATE, CTL_EOL) != 0)
   2138 			break;
   2139 
   2140 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2141 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2142 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   2143 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   2144 		    0, CTL_CREATE, CTL_EOL) != 0)
   2145 			break;
   2146 #endif
   2147 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   2148 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   2149 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   2150 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   2151 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   2152 		    NULL, adapter->queues[i].evnamebuf,
   2153 		    "Queue No Descriptor Available");
   2154 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   2155 		    NULL, adapter->queues[i].evnamebuf,
   2156 		    "Queue Packets Transmitted");
   2157 #ifndef IXGBE_LEGACY_TX
   2158 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   2159 		    NULL, adapter->queues[i].evnamebuf,
   2160 		    "Packets dropped in pcq");
   2161 #endif
   2162 
   2163 #ifdef LRO
   2164 		struct lro_ctrl *lro = &rxr->lro;
   2165 #endif /* LRO */
   2166 
   2167 #if 0 /* not yet */
   2168 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2169 		    CTLFLAG_READONLY,
   2170 		    CTLTYPE_INT,
   2171 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   2172 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   2173 		    CTL_CREATE, CTL_EOL) != 0)
   2174 			break;
   2175 
   2176 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2177 		    CTLFLAG_READONLY,
   2178 		    CTLTYPE_INT,
   2179 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   2180 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   2181 		    CTL_CREATE, CTL_EOL) != 0)
   2182 			break;
   2183 #endif
   2184 
   2185 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   2186 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   2187 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   2188 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   2189 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   2190 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   2191 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   2192 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   2193 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   2194 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   2195 #ifdef LRO
   2196 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   2197 				CTLFLAG_RD, &lro->lro_queued, 0,
   2198 				"LRO Queued");
   2199 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   2200 				CTLFLAG_RD, &lro->lro_flushed, 0,
   2201 				"LRO Flushed");
   2202 #endif /* LRO */
   2203 	}
   2204 
   2205 	/* MAC stats get their own sub node */
   2206 
   2207 	snprintf(stats->namebuf,
   2208 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   2209 
   2210 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   2211 	    stats->namebuf, "rx csum offload - IP");
   2212 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   2213 	    stats->namebuf, "rx csum offload - L4");
   2214 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   2215 	    stats->namebuf, "rx csum offload - IP bad");
   2216 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   2217 	    stats->namebuf, "rx csum offload - L4 bad");
   2218 
   2219 	/* Packet Reception Stats */
   2220 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2221 	    xname, "Good Packets Received");
   2222 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2223 	    xname, "Good Octets Received");
   2224 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2225 	    xname, "Multicast Packets Received");
   2226 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2227 	    xname, "Good Packets Transmitted");
   2228 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2229 	    xname, "Good Octets Transmitted");
   2230 } /* ixv_add_stats_sysctls */
   2231 
   2232 /************************************************************************
   2233  * ixv_set_sysctl_value
   2234  ************************************************************************/
   2235 static void
   2236 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2237 	const char *description, int *limit, int value)
   2238 {
   2239 	device_t dev =  adapter->dev;
   2240 	struct sysctllog **log;
   2241 	const struct sysctlnode *rnode, *cnode;
   2242 
   2243 	log = &adapter->sysctllog;
   2244 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2245 		aprint_error_dev(dev, "could not create sysctl root\n");
   2246 		return;
   2247 	}
   2248 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2249 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2250 	    name, SYSCTL_DESCR(description),
   2251 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2252 		aprint_error_dev(dev, "could not create sysctl\n");
   2253 	*limit = value;
   2254 } /* ixv_set_sysctl_value */
   2255 
   2256 /************************************************************************
   2257  * ixv_print_debug_info
   2258  *
   2259  *   Called only when em_display_debug_stats is enabled.
   2260  *   Provides a way to take a look at important statistics
   2261  *   maintained by the driver and hardware.
   2262  ************************************************************************/
   2263 static void
   2264 ixv_print_debug_info(struct adapter *adapter)
   2265 {
   2266         device_t        dev = adapter->dev;
   2267         struct ixgbe_hw *hw = &adapter->hw;
   2268         struct ix_queue *que = adapter->queues;
   2269         struct rx_ring  *rxr;
   2270         struct tx_ring  *txr;
   2271 #ifdef LRO
   2272         struct lro_ctrl *lro;
   2273 #endif /* LRO */
   2274 
   2275 	device_printf(dev,"Error Byte Count = %u \n",
   2276 	    IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2277 
   2278 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2279 		txr = que->txr;
   2280 		rxr = que->rxr;
   2281 #ifdef LRO
   2282 		lro = &rxr->lro;
   2283 #endif /* LRO */
   2284 		device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
   2285 		    que->msix, (long)que->irqs.ev_count);
   2286 		device_printf(dev,"RX(%d) Packets Received: %lld\n",
   2287 		    rxr->me, (long long)rxr->rx_packets.ev_count);
   2288 		device_printf(dev,"RX(%d) Bytes Received: %lu\n",
   2289 		    rxr->me, (long)rxr->rx_bytes.ev_count);
   2290 #ifdef LRO
   2291 		device_printf(dev,"RX(%d) LRO Queued= %lld\n",
   2292 		    rxr->me, (long long)lro->lro_queued);
   2293 		device_printf(dev,"RX(%d) LRO Flushed= %lld\n",
   2294 		    rxr->me, (long long)lro->lro_flushed);
   2295 #endif /* LRO */
   2296 		device_printf(dev,"TX(%d) Packets Sent: %lu\n",
   2297 		    txr->me, (long)txr->total_packets.ev_count);
   2298 		device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
   2299 		    txr->me, (long)txr->no_desc_avail.ev_count);
   2300 	}
   2301 
   2302 	device_printf(dev, "MBX IRQ Handled: %lu\n",
   2303 	    (long)adapter->link_irq.ev_count);
   2304 } /* ixv_print_debug_info */
   2305 
   2306 /************************************************************************
   2307  * ixv_sysctl_debug
   2308  ************************************************************************/
   2309 static int
   2310 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2311 {
   2312 	struct sysctlnode node;
   2313 	struct adapter *adapter;
   2314 	int            error, result;
   2315 
   2316 	node = *rnode;
   2317 	node.sysctl_data = &result;
   2318 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2319 
   2320 	if (error || newp == NULL)
   2321 		return error;
   2322 
   2323 	if (result == 1) {
   2324 		adapter = (struct adapter *)node.sysctl_data;
   2325 		ixv_print_debug_info(adapter);
   2326 	}
   2327 
   2328 	return 0;
   2329 } /* ixv_sysctl_debug */
   2330 
   2331 /************************************************************************
   2332  * ixv_init_device_features
   2333  ************************************************************************/
   2334 static void
   2335 ixv_init_device_features(struct adapter *adapter)
   2336 {
   2337 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   2338 	                  | IXGBE_FEATURE_VF
   2339 	                  | IXGBE_FEATURE_RSS
   2340 	                  | IXGBE_FEATURE_LEGACY_TX;
   2341 
   2342 	/* A tad short on feature flags for VFs, atm. */
   2343 	switch (adapter->hw.mac.type) {
   2344 	case ixgbe_mac_82599_vf:
   2345 		break;
   2346 	case ixgbe_mac_X540_vf:
   2347 		break;
   2348 	case ixgbe_mac_X550_vf:
   2349 	case ixgbe_mac_X550EM_x_vf:
   2350 	case ixgbe_mac_X550EM_a_vf:
   2351 		adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
   2352 		break;
   2353 	default:
   2354 		break;
   2355 	}
   2356 
   2357 	/* Enabled by default... */
   2358 	/* Is a virtual function (VF) */
   2359 	if (adapter->feat_cap & IXGBE_FEATURE_VF)
   2360 		adapter->feat_en |= IXGBE_FEATURE_VF;
   2361 	/* Netmap */
   2362 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   2363 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   2364 	/* Receive-Side Scaling (RSS) */
   2365 	if (adapter->feat_cap & IXGBE_FEATURE_RSS)
   2366 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   2367 	/* Needs advanced context descriptor regardless of offloads req'd */
   2368 	if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
   2369 		adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
   2370 
   2371 	/* Enabled via sysctl... */
   2372 	/* Legacy (single queue) transmit */
   2373 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   2374 	    ixv_enable_legacy_tx)
   2375 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   2376 } /* ixv_init_device_features */
   2377 
   2378 /************************************************************************
   2379  * ixv_shutdown - Shutdown entry point
   2380  ************************************************************************/
   2381 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   2382 static int
   2383 ixv_shutdown(device_t dev)
   2384 {
   2385 	struct adapter *adapter = device_private(dev);
   2386 	IXGBE_CORE_LOCK(adapter);
   2387 	ixv_stop(adapter);
   2388 	IXGBE_CORE_UNLOCK(adapter);
   2389 
   2390 	return (0);
   2391 } /* ixv_shutdown */
   2392 #endif
   2393 
   2394 static int
   2395 ixv_ifflags_cb(struct ethercom *ec)
   2396 {
   2397 	struct ifnet *ifp = &ec->ec_if;
   2398 	struct adapter *adapter = ifp->if_softc;
   2399 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   2400 
   2401 	IXGBE_CORE_LOCK(adapter);
   2402 
   2403 	if (change != 0)
   2404 		adapter->if_flags = ifp->if_flags;
   2405 
   2406 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   2407 		rc = ENETRESET;
   2408 
   2409 	IXGBE_CORE_UNLOCK(adapter);
   2410 
   2411 	return rc;
   2412 }
   2413 
   2414 
   2415 /************************************************************************
   2416  * ixv_ioctl - Ioctl entry point
   2417  *
   2418  *   Called when the user wants to configure the interface.
   2419  *
   2420  *   return 0 on success, positive on failure
   2421  ************************************************************************/
   2422 static int
   2423 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
   2424 {
   2425 	struct adapter	*adapter = ifp->if_softc;
   2426 	struct ifcapreq *ifcr = data;
   2427 	struct ifreq	*ifr = data;
   2428 	int             error = 0;
   2429 	int l4csum_en;
   2430 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   2431 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   2432 
   2433 	switch (command) {
   2434 	case SIOCSIFFLAGS:
   2435 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   2436 		break;
   2437 	case SIOCADDMULTI:
   2438 	case SIOCDELMULTI:
   2439 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   2440 		break;
   2441 	case SIOCSIFMEDIA:
   2442 	case SIOCGIFMEDIA:
   2443 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   2444 		break;
   2445 	case SIOCSIFCAP:
   2446 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   2447 		break;
   2448 	case SIOCSIFMTU:
   2449 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   2450 		break;
   2451 	default:
   2452 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   2453 		break;
   2454 	}
   2455 
   2456 	switch (command) {
   2457 	case SIOCSIFMEDIA:
   2458 	case SIOCGIFMEDIA:
   2459 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   2460 	case SIOCSIFCAP:
   2461 		/* Layer-4 Rx checksum offload has to be turned on and
   2462 		 * off as a unit.
   2463 		 */
   2464 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   2465 		if (l4csum_en != l4csum && l4csum_en != 0)
   2466 			return EINVAL;
   2467 		/*FALLTHROUGH*/
   2468 	case SIOCADDMULTI:
   2469 	case SIOCDELMULTI:
   2470 	case SIOCSIFFLAGS:
   2471 	case SIOCSIFMTU:
   2472 	default:
   2473 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   2474 			return error;
   2475 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   2476 			;
   2477 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   2478 			IXGBE_CORE_LOCK(adapter);
   2479 			ixv_init_locked(adapter);
   2480 			IXGBE_CORE_UNLOCK(adapter);
   2481 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   2482 			/*
   2483 			 * Multicast list has changed; set the hardware filter
   2484 			 * accordingly.
   2485 			 */
   2486 			IXGBE_CORE_LOCK(adapter);
   2487 			ixv_disable_intr(adapter);
   2488 			ixv_set_multi(adapter);
   2489 			ixv_enable_intr(adapter);
   2490 			IXGBE_CORE_UNLOCK(adapter);
   2491 		}
   2492 		return 0;
   2493 	}
   2494 } /* ixv_ioctl */
   2495 
   2496 /************************************************************************
   2497  * ixv_init
   2498  ************************************************************************/
   2499 static int
   2500 ixv_init(struct ifnet *ifp)
   2501 {
   2502 	struct adapter *adapter = ifp->if_softc;
   2503 
   2504 	IXGBE_CORE_LOCK(adapter);
   2505 	ixv_init_locked(adapter);
   2506 	IXGBE_CORE_UNLOCK(adapter);
   2507 
   2508 	return 0;
   2509 } /* ixv_init */
   2510 
   2511 
   2512 /************************************************************************
   2513  * ixv_handle_que
   2514  ************************************************************************/
   2515 static void
   2516 ixv_handle_que(void *context)
   2517 {
   2518 	struct ix_queue *que = context;
   2519 	struct adapter  *adapter = que->adapter;
   2520 	struct tx_ring	*txr = que->txr;
   2521 	struct ifnet    *ifp = adapter->ifp;
   2522 	bool		more;
   2523 
   2524 	adapter->handleq.ev_count++;
   2525 
   2526 	if (ifp->if_flags & IFF_RUNNING) {
   2527 		more = ixgbe_rxeof(que);
   2528 		IXGBE_TX_LOCK(txr);
   2529 		ixgbe_txeof(txr);
   2530 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   2531 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   2532 				ixgbe_mq_start_locked(ifp, txr);
   2533 		/* Only for queue 0 */
   2534 		/* NetBSD still needs this for CBQ */
   2535 		if ((&adapter->queues[0] == que)
   2536 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   2537 			ixgbe_legacy_start_locked(ifp, txr);
   2538 		IXGBE_TX_UNLOCK(txr);
   2539 		if (more) {
   2540 			adapter->req.ev_count++;
   2541 			softint_schedule(que->que_si);
   2542 			return;
   2543 		}
   2544 	}
   2545 
   2546 	/* Re-enable this interrupt */
   2547 	ixv_enable_queue(adapter, que->msix);
   2548 
   2549 	return;
   2550 } /* ixv_handle_que */
   2551 
   2552 /************************************************************************
   2553  * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
   2554  ************************************************************************/
   2555 static int
   2556 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   2557 {
   2558 	device_t	dev = adapter->dev;
   2559 	struct ix_queue *que = adapter->queues;
   2560 	struct		tx_ring *txr = adapter->tx_rings;
   2561 	int 		error, msix_ctrl, rid, vector = 0;
   2562 	pci_chipset_tag_t pc;
   2563 	pcitag_t	tag;
   2564 	char		intrbuf[PCI_INTRSTR_LEN];
   2565 	char		intr_xname[32];
   2566 	const char	*intrstr = NULL;
   2567 	kcpuset_t	*affinity;
   2568 	int		cpu_id = 0;
   2569 
   2570 	pc = adapter->osdep.pc;
   2571 	tag = adapter->osdep.tag;
   2572 
   2573 	adapter->osdep.nintrs = adapter->num_queues + 1;
   2574 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   2575 	    adapter->osdep.nintrs) != 0) {
   2576 		aprint_error_dev(dev,
   2577 		    "failed to allocate MSI-X interrupt\n");
   2578 		return (ENXIO);
   2579 	}
   2580 
   2581 	kcpuset_create(&affinity, false);
   2582 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   2583 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   2584 		    device_xname(dev), i);
   2585 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   2586 		    sizeof(intrbuf));
   2587 #ifdef IXGBE_MPSAFE
   2588 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   2589 		    true);
   2590 #endif
   2591 		/* Set the handler function */
   2592 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   2593 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   2594 		    intr_xname);
   2595 		if (que->res == NULL) {
   2596 			pci_intr_release(pc, adapter->osdep.intrs,
   2597 			    adapter->osdep.nintrs);
   2598 			aprint_error_dev(dev,
   2599 			    "Failed to register QUE handler\n");
   2600 			kcpuset_destroy(affinity);
   2601 			return (ENXIO);
   2602 		}
   2603 		que->msix = vector;
   2604         	adapter->active_queues |= (u64)(1 << que->msix);
   2605 
   2606 		cpu_id = i;
   2607 		/* Round-robin affinity */
   2608 		kcpuset_zero(affinity);
   2609 		kcpuset_set(affinity, cpu_id % ncpu);
   2610 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   2611 		    NULL);
   2612 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   2613 		    intrstr);
   2614 		if (error == 0)
   2615 			aprint_normal(", bound queue %d to cpu %d\n",
   2616 			    i, cpu_id % ncpu);
   2617 		else
   2618 			aprint_normal("\n");
   2619 
   2620 #ifndef IXGBE_LEGACY_TX
   2621 		txr->txr_si
   2622 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2623 			ixgbe_deferred_mq_start, txr);
   2624 #endif
   2625 		que->que_si
   2626 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2627 			ixv_handle_que, que);
   2628 		if (que->que_si == NULL) {
   2629 			aprint_error_dev(dev,
   2630 			    "could not establish software interrupt\n");
   2631 		}
   2632 	}
   2633 
   2634 	/* and Mailbox */
   2635 	cpu_id++;
   2636 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   2637 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   2638 	    sizeof(intrbuf));
   2639 #ifdef IXGBE_MPSAFE
   2640 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   2641 	    true);
   2642 #endif
   2643 	/* Set the mbx handler function */
   2644 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   2645 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   2646 	    intr_xname);
   2647 	if (adapter->osdep.ihs[vector] == NULL) {
   2648 		adapter->res = NULL;
   2649 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   2650 		kcpuset_destroy(affinity);
   2651 		return (ENXIO);
   2652 	}
   2653 	/* Round-robin affinity */
   2654 	kcpuset_zero(affinity);
   2655 	kcpuset_set(affinity, cpu_id % ncpu);
   2656 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   2657 
   2658 	aprint_normal_dev(dev,
   2659 	    "for link, interrupting at %s", intrstr);
   2660 	if (error == 0)
   2661 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   2662 	else
   2663 		aprint_normal("\n");
   2664 
   2665 	adapter->vector = vector;
   2666 	/* Tasklets for Mailbox */
   2667 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   2668 	    ixv_handle_link, adapter);
   2669 	/*
   2670 	 * Due to a broken design QEMU will fail to properly
   2671 	 * enable the guest for MSI-X unless the vectors in
   2672 	 * the table are all set up, so we must rewrite the
   2673 	 * ENABLE in the MSI-X control register again at this
   2674 	 * point to cause it to successfully initialize us.
   2675 	 */
   2676 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   2677 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   2678 		rid += PCI_MSIX_CTL;
   2679 		msix_ctrl = pci_conf_read(pc, tag, rid);
   2680 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   2681 		pci_conf_write(pc, tag, rid, msix_ctrl);
   2682 	}
   2683 
   2684 	kcpuset_destroy(affinity);
   2685 	return (0);
   2686 } /* ixv_allocate_msix */
   2687 
   2688 /************************************************************************
   2689  * ixv_configure_interrupts - Setup MSI-X resources
   2690  *
   2691  *   Note: The VF device MUST use MSI-X, there is no fallback.
   2692  ************************************************************************/
   2693 static int
   2694 ixv_configure_interrupts(struct adapter *adapter)
   2695 {
   2696 	device_t dev = adapter->dev;
   2697 	int want, queues, msgs;
   2698 
   2699 	/* Must have at least 2 MSI-X vectors */
   2700 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   2701 	if (msgs < 2) {
   2702 		aprint_error_dev(dev,"MSIX config error\n");
   2703 		return (ENXIO);
   2704 	}
   2705 	msgs = MIN(msgs, IXG_MAX_NINTR);
   2706 
   2707 	/* Figure out a reasonable auto config value */
   2708 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   2709 
   2710 	if (ixv_num_queues != 0)
   2711 		queues = ixv_num_queues;
   2712 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   2713 		queues = IXGBE_VF_MAX_TX_QUEUES;
   2714 
   2715 	/*
   2716 	 * Want vectors for the queues,
   2717 	 * plus an additional for mailbox.
   2718 	 */
   2719 	want = queues + 1;
   2720 	if (msgs >= want)
   2721 		msgs = want;
   2722 	else {
   2723                	aprint_error_dev(dev,
   2724 		    "MSI-X Configuration Problem, "
   2725 		    "%d vectors but %d queues wanted!\n",
   2726 		    msgs, want);
   2727 		return -1;
   2728 	}
   2729 
   2730 	adapter->msix_mem = (void *)1; /* XXX */
   2731 	aprint_normal_dev(dev,
   2732 	    "Using MSI-X interrupts with %d vectors\n", msgs);
   2733 	adapter->num_queues = queues;
   2734 
   2735 	return (0);
   2736 } /* ixv_configure_interrupts */
   2737 
   2738 
   2739 /************************************************************************
   2740  * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
   2741  *
   2742  *   Done outside of interrupt context since the driver might sleep
   2743  ************************************************************************/
   2744 static void
   2745 ixv_handle_link(void *context)
   2746 {
   2747 	struct adapter *adapter = context;
   2748 
   2749 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   2750 	    &adapter->link_up, FALSE);
   2751 	ixv_update_link_status(adapter);
   2752 } /* ixv_handle_link */
   2753 
   2754 /************************************************************************
   2755  * ixv_check_link - Used in the local timer to poll for link changes
   2756  ************************************************************************/
   2757 static void
   2758 ixv_check_link(struct adapter *adapter)
   2759 {
   2760 	adapter->hw.mac.get_link_status = TRUE;
   2761 
   2762 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   2763 	    &adapter->link_up, FALSE);
   2764 	ixv_update_link_status(adapter);
   2765 } /* ixv_check_link */
   2766