Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.77
      1 /*$NetBSD: ixv.c,v 1.77 2017/12/21 06:49:26 msaitoh Exp $*/
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 320688 2017-07-05 17:27:03Z erj $*/
     36 
     37 
     38 #ifdef _KERNEL_OPT
     39 #include "opt_inet.h"
     40 #include "opt_inet6.h"
     41 #include "opt_net_mpsafe.h"
     42 #endif
     43 
     44 #include "ixgbe.h"
     45 #include "vlan.h"
     46 
     47 /************************************************************************
     48  * Driver version
     49  ************************************************************************/
     50 char ixv_driver_version[] = "1.5.13-k";
     51 
     52 /************************************************************************
     53  * PCI Device ID Table
     54  *
     55  *   Used by probe to select devices to load on
     56  *   Last field stores an index into ixv_strings
     57  *   Last entry must be all 0s
     58  *
     59  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     60  ************************************************************************/
     61 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
     62 {
     63 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     64 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     65 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     66 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     67 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
     68 	/* required last entry */
     69 	{0, 0, 0, 0, 0}
     70 };
     71 
     72 /************************************************************************
     73  * Table of branding strings
     74  ************************************************************************/
     75 static const char *ixv_strings[] = {
     76 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     77 };
     78 
     79 /*********************************************************************
     80  *  Function prototypes
     81  *********************************************************************/
     82 static int      ixv_probe(device_t, cfdata_t, void *);
     83 static void	ixv_attach(device_t, device_t, void *);
     84 static int      ixv_detach(device_t, int);
     85 #if 0
     86 static int      ixv_shutdown(device_t);
     87 #endif
     88 static int	ixv_ifflags_cb(struct ethercom *);
     89 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     90 static int	ixv_init(struct ifnet *);
     91 static void	ixv_init_locked(struct adapter *);
     92 static void	ixv_ifstop(struct ifnet *, int);
     93 static void     ixv_stop(void *);
     94 static void     ixv_init_device_features(struct adapter *);
     95 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     96 static int      ixv_media_change(struct ifnet *);
     97 static int      ixv_allocate_pci_resources(struct adapter *,
     98 		    const struct pci_attach_args *);
     99 static int      ixv_allocate_msix(struct adapter *,
    100 		    const struct pci_attach_args *);
    101 static int      ixv_configure_interrupts(struct adapter *);
    102 static void	ixv_free_pci_resources(struct adapter *);
    103 static void     ixv_local_timer(void *);
    104 static void     ixv_local_timer_locked(void *);
    105 static int      ixv_setup_interface(device_t, struct adapter *);
    106 static int      ixv_negotiate_api(struct adapter *);
    107 
    108 static void     ixv_initialize_transmit_units(struct adapter *);
    109 static void     ixv_initialize_receive_units(struct adapter *);
    110 static void     ixv_initialize_rss_mapping(struct adapter *);
    111 static void     ixv_check_link(struct adapter *);
    112 
    113 static void     ixv_enable_intr(struct adapter *);
    114 static void     ixv_disable_intr(struct adapter *);
    115 static void     ixv_set_multi(struct adapter *);
    116 static void     ixv_update_link_status(struct adapter *);
    117 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    118 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    119 static void	ixv_configure_ivars(struct adapter *);
    120 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    121 
    122 static void	ixv_setup_vlan_support(struct adapter *);
    123 #if 0
    124 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    125 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    126 #endif
    127 
    128 static void	ixv_add_device_sysctls(struct adapter *);
    129 static void	ixv_save_stats(struct adapter *);
    130 static void	ixv_init_stats(struct adapter *);
    131 static void	ixv_update_stats(struct adapter *);
    132 static void	ixv_add_stats_sysctls(struct adapter *);
    133 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    134 		    const char *, int *, int);
    135 
    136 /* The MSI-X Interrupt handlers */
    137 static int	ixv_msix_que(void *);
    138 static int	ixv_msix_mbx(void *);
    139 
    140 /* Deferred interrupt tasklets */
    141 static void	ixv_handle_que(void *);
    142 static void     ixv_handle_link(void *);
    143 
    144 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    145 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    146 
    147 /************************************************************************
    148  * FreeBSD Device Interface Entry Points
    149  ************************************************************************/
    150 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    151     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    152     DVF_DETACH_SHUTDOWN);
    153 
    154 #if 0
    155 static driver_t ixv_driver = {
    156 	"ixv", ixv_methods, sizeof(struct adapter),
    157 };
    158 
    159 devclass_t ixv_devclass;
    160 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    161 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    162 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    163 #endif
    164 
    165 /*
    166  * TUNEABLE PARAMETERS:
    167  */
    168 
    169 /* Number of Queues - do not exceed MSI-X vectors - 1 */
    170 static int ixv_num_queues = 0;
    171 #define	TUNABLE_INT(__x, __y)
    172 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    173 
    174 /*
    175  * AIM: Adaptive Interrupt Moderation
    176  * which means that the interrupt rate
    177  * is varied over time based on the
    178  * traffic for that interrupt vector
    179  */
    180 static bool ixv_enable_aim = false;
    181 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    182 
    183 /* How many packets rxeof tries to clean at a time */
    184 static int ixv_rx_process_limit = 256;
    185 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    186 
    187 /* How many packets txeof tries to clean at a time */
    188 static int ixv_tx_process_limit = 256;
    189 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    190 
    191 /*
    192  * Number of TX descriptors per ring,
    193  * setting higher than RX as this seems
    194  * the better performing choice.
    195  */
    196 static int ixv_txd = PERFORM_TXD;
    197 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    198 
    199 /* Number of RX descriptors per ring */
    200 static int ixv_rxd = PERFORM_RXD;
    201 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    202 
    203 /* Legacy Transmit (single queue) */
    204 static int ixv_enable_legacy_tx = 0;
    205 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
    206 
    207 #ifdef NET_MPSAFE
    208 #define IXGBE_MPSAFE		1
    209 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    210 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    211 #else
    212 #define IXGBE_CALLOUT_FLAGS	0
    213 #define IXGBE_SOFTINFT_FLAGS	0
    214 #endif
    215 
    216 #if 0
    217 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
    218 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
    219 #endif
    220 
    221 /************************************************************************
    222  * ixv_probe - Device identification routine
    223  *
    224  *   Determines if the driver should be loaded on
    225  *   adapter based on its PCI vendor/device ID.
    226  *
    227  *   return BUS_PROBE_DEFAULT on success, positive on failure
    228  ************************************************************************/
    229 static int
    230 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    231 {
    232 #ifdef __HAVE_PCI_MSI_MSIX
    233 	const struct pci_attach_args *pa = aux;
    234 
    235 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    236 #else
    237 	return 0;
    238 #endif
    239 } /* ixv_probe */
    240 
    241 static ixgbe_vendor_info_t *
    242 ixv_lookup(const struct pci_attach_args *pa)
    243 {
    244 	ixgbe_vendor_info_t *ent;
    245 	pcireg_t subid;
    246 
    247 	INIT_DEBUGOUT("ixv_lookup: begin");
    248 
    249 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    250 		return NULL;
    251 
    252 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    253 
    254 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    255 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    256 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    257 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    258 		     (ent->subvendor_id == 0)) &&
    259 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    260 		     (ent->subdevice_id == 0))) {
    261 			return ent;
    262 		}
    263 	}
    264 
    265 	return NULL;
    266 }
    267 
    268 /************************************************************************
    269  * ixv_attach - Device initialization routine
    270  *
    271  *   Called when the driver is being loaded.
    272  *   Identifies the type of hardware, allocates all resources
    273  *   and initializes the hardware.
    274  *
    275  *   return 0 on success, positive on failure
    276  ************************************************************************/
    277 static void
    278 ixv_attach(device_t parent, device_t dev, void *aux)
    279 {
    280 	struct adapter *adapter;
    281 	struct ixgbe_hw *hw;
    282 	int             error = 0;
    283 	pcireg_t	id, subid;
    284 	ixgbe_vendor_info_t *ent;
    285 	const struct pci_attach_args *pa = aux;
    286 	const char *apivstr;
    287 	const char *str;
    288 	char buf[256];
    289 
    290 	INIT_DEBUGOUT("ixv_attach: begin");
    291 
    292 	/*
    293 	 * Make sure BUSMASTER is set, on a VM under
    294 	 * KVM it may not be and will break things.
    295 	 */
    296 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    297 
    298 	/* Allocate, clear, and link in our adapter structure */
    299 	adapter = device_private(dev);
    300 	adapter->dev = dev;
    301 	adapter->hw.back = adapter;
    302 	hw = &adapter->hw;
    303 
    304 	adapter->init_locked = ixv_init_locked;
    305 	adapter->stop_locked = ixv_stop;
    306 
    307 	adapter->osdep.pc = pa->pa_pc;
    308 	adapter->osdep.tag = pa->pa_tag;
    309 	if (pci_dma64_available(pa))
    310 		adapter->osdep.dmat = pa->pa_dmat64;
    311 	else
    312 		adapter->osdep.dmat = pa->pa_dmat;
    313 	adapter->osdep.attached = false;
    314 
    315 	ent = ixv_lookup(pa);
    316 
    317 	KASSERT(ent != NULL);
    318 
    319 	aprint_normal(": %s, Version - %s\n",
    320 	    ixv_strings[ent->index], ixv_driver_version);
    321 
    322 	/* Core Lock Init*/
    323 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    324 
    325 	/* Do base PCI setup - map BAR0 */
    326 	if (ixv_allocate_pci_resources(adapter, pa)) {
    327 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    328 		error = ENXIO;
    329 		goto err_out;
    330 	}
    331 
    332 	/* SYSCTL APIs */
    333 	ixv_add_device_sysctls(adapter);
    334 
    335 	/* Set up the timer callout */
    336 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    337 
    338 	/* Save off the information about this board */
    339 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    340 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    341 	hw->vendor_id = PCI_VENDOR(id);
    342 	hw->device_id = PCI_PRODUCT(id);
    343 	hw->revision_id =
    344 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    345 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    346 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    347 
    348 	/* A subset of set_mac_type */
    349 	switch (hw->device_id) {
    350 	case IXGBE_DEV_ID_82599_VF:
    351 		hw->mac.type = ixgbe_mac_82599_vf;
    352 		str = "82599 VF";
    353 		break;
    354 	case IXGBE_DEV_ID_X540_VF:
    355 		hw->mac.type = ixgbe_mac_X540_vf;
    356 		str = "X540 VF";
    357 		break;
    358 	case IXGBE_DEV_ID_X550_VF:
    359 		hw->mac.type = ixgbe_mac_X550_vf;
    360 		str = "X550 VF";
    361 		break;
    362 	case IXGBE_DEV_ID_X550EM_X_VF:
    363 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
    364 		str = "X550EM X VF";
    365 		break;
    366 	case IXGBE_DEV_ID_X550EM_A_VF:
    367 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
    368 		str = "X550EM A VF";
    369 		break;
    370 	default:
    371 		/* Shouldn't get here since probe succeeded */
    372 		aprint_error_dev(dev, "Unknown device ID!\n");
    373 		error = ENXIO;
    374 		goto err_out;
    375 		break;
    376 	}
    377 	aprint_normal_dev(dev, "device %s\n", str);
    378 
    379 	ixv_init_device_features(adapter);
    380 
    381 	/* Initialize the shared code */
    382 	error = ixgbe_init_ops_vf(hw);
    383 	if (error) {
    384 		aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
    385 		error = EIO;
    386 		goto err_out;
    387 	}
    388 
    389 	/* Setup the mailbox */
    390 	ixgbe_init_mbx_params_vf(hw);
    391 
    392 	/* Set the right number of segments */
    393 	adapter->num_segs = IXGBE_82599_SCATTER;
    394 
    395 	/* Reset mbox api to 1.0 */
    396 	error = hw->mac.ops.reset_hw(hw);
    397 	if (error == IXGBE_ERR_RESET_FAILED)
    398 		aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
    399 	else if (error)
    400 		aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
    401 		    error);
    402 	if (error) {
    403 		error = EIO;
    404 		goto err_out;
    405 	}
    406 
    407 	error = hw->mac.ops.init_hw(hw);
    408 	if (error) {
    409 		aprint_error_dev(dev, "...init_hw() failed!\n");
    410 		error = EIO;
    411 		goto err_out;
    412 	}
    413 
    414 	/* Negotiate mailbox API version */
    415 	error = ixv_negotiate_api(adapter);
    416 	if (error)
    417 		aprint_normal_dev(dev,
    418 		    "MBX API negotiation failed during attach!\n");
    419 	switch (hw->api_version) {
    420 	case ixgbe_mbox_api_10:
    421 		apivstr = "1.0";
    422 		break;
    423 	case ixgbe_mbox_api_20:
    424 		apivstr = "2.0";
    425 		break;
    426 	case ixgbe_mbox_api_11:
    427 		apivstr = "1.1";
    428 		break;
    429 	case ixgbe_mbox_api_12:
    430 		apivstr = "1.2";
    431 		break;
    432 	case ixgbe_mbox_api_13:
    433 		apivstr = "1.3";
    434 		break;
    435 	default:
    436 		apivstr = "unknown";
    437 		break;
    438 	}
    439 	aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
    440 
    441 	/* If no mac address was assigned, make a random one */
    442 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    443 		u8 addr[ETHER_ADDR_LEN];
    444 		uint64_t rndval = cprng_strong64();
    445 
    446 		memcpy(addr, &rndval, sizeof(addr));
    447 		addr[0] &= 0xFE;
    448 		addr[0] |= 0x02;
    449 		bcopy(addr, hw->mac.addr, sizeof(addr));
    450 	}
    451 
    452 	/* Register for VLAN events */
    453 #if 0 /* XXX delete after write? */
    454 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    455 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    456 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    457 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    458 #endif
    459 
    460 	/* Sysctls for limiting the amount of work done in the taskqueues */
    461 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    462 	    "max number of rx packets to process",
    463 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    464 
    465 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    466 	    "max number of tx packets to process",
    467 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    468 
    469 	/* Do descriptor calc and sanity checks */
    470 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    471 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    472 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    473 		adapter->num_tx_desc = DEFAULT_TXD;
    474 	} else
    475 		adapter->num_tx_desc = ixv_txd;
    476 
    477 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    478 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    479 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    480 		adapter->num_rx_desc = DEFAULT_RXD;
    481 	} else
    482 		adapter->num_rx_desc = ixv_rxd;
    483 
    484 	/* Setup MSI-X */
    485 	error = ixv_configure_interrupts(adapter);
    486 	if (error)
    487 		goto err_out;
    488 
    489 	/* Allocate our TX/RX Queues */
    490 	if (ixgbe_allocate_queues(adapter)) {
    491 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    492 		error = ENOMEM;
    493 		goto err_out;
    494 	}
    495 
    496 	/* hw.ix defaults init */
    497 	adapter->enable_aim = ixv_enable_aim;
    498 
    499 	error = ixv_allocate_msix(adapter, pa);
    500 	if (error) {
    501 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    502 		goto err_late;
    503 	}
    504 
    505 	/* Setup OS specific network interface */
    506 	error = ixv_setup_interface(dev, adapter);
    507 	if (error != 0) {
    508 		aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
    509 		goto err_late;
    510 	}
    511 
    512 	/* Do the stats setup */
    513 	ixv_save_stats(adapter);
    514 	ixv_init_stats(adapter);
    515 	ixv_add_stats_sysctls(adapter);
    516 
    517 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    518 		ixgbe_netmap_attach(adapter);
    519 
    520 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
    521 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
    522 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
    523 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
    524 
    525 	INIT_DEBUGOUT("ixv_attach: end");
    526 	adapter->osdep.attached = true;
    527 
    528 	return;
    529 
    530 err_late:
    531 	ixgbe_free_transmit_structures(adapter);
    532 	ixgbe_free_receive_structures(adapter);
    533 	free(adapter->queues, M_DEVBUF);
    534 err_out:
    535 	ixv_free_pci_resources(adapter);
    536 	IXGBE_CORE_LOCK_DESTROY(adapter);
    537 
    538 	return;
    539 } /* ixv_attach */
    540 
    541 /************************************************************************
    542  * ixv_detach - Device removal routine
    543  *
    544  *   Called when the driver is being removed.
    545  *   Stops the adapter and deallocates all the resources
    546  *   that were allocated for driver operation.
    547  *
    548  *   return 0 on success, positive on failure
    549  ************************************************************************/
    550 static int
    551 ixv_detach(device_t dev, int flags)
    552 {
    553 	struct adapter  *adapter = device_private(dev);
    554 	struct ixgbe_hw *hw = &adapter->hw;
    555 	struct ix_queue *que = adapter->queues;
    556 	struct tx_ring *txr = adapter->tx_rings;
    557 	struct rx_ring *rxr = adapter->rx_rings;
    558 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
    559 
    560 	INIT_DEBUGOUT("ixv_detach: begin");
    561 	if (adapter->osdep.attached == false)
    562 		return 0;
    563 
    564 	/* Stop the interface. Callouts are stopped in it. */
    565 	ixv_ifstop(adapter->ifp, 1);
    566 
    567 #if NVLAN > 0
    568 	/* Make sure VLANs are not using driver */
    569 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    570 		;	/* nothing to do: no VLANs */
    571 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    572 		vlan_ifdetach(adapter->ifp);
    573 	else {
    574 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    575 		return EBUSY;
    576 	}
    577 #endif
    578 
    579 	IXGBE_CORE_LOCK(adapter);
    580 	ixv_stop(adapter);
    581 	IXGBE_CORE_UNLOCK(adapter);
    582 
    583 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
    584 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
    585 			softint_disestablish(txr->txr_si);
    586 		softint_disestablish(que->que_si);
    587 	}
    588 
    589 	/* Drain the Mailbox(link) queue */
    590 	softint_disestablish(adapter->link_si);
    591 
    592 	/* Unregister VLAN events */
    593 #if 0 /* XXX msaitoh delete after write? */
    594 	if (adapter->vlan_attach != NULL)
    595 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    596 	if (adapter->vlan_detach != NULL)
    597 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    598 #endif
    599 
    600 	ether_ifdetach(adapter->ifp);
    601 	callout_halt(&adapter->timer, NULL);
    602 
    603 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    604 		netmap_detach(adapter->ifp);
    605 
    606 	ixv_free_pci_resources(adapter);
    607 #if 0 /* XXX the NetBSD port is probably missing something here */
    608 	bus_generic_detach(dev);
    609 #endif
    610 	if_detach(adapter->ifp);
    611 	if_percpuq_destroy(adapter->ipq);
    612 
    613 	sysctl_teardown(&adapter->sysctllog);
    614 	evcnt_detach(&adapter->handleq);
    615 	evcnt_detach(&adapter->req);
    616 	evcnt_detach(&adapter->efbig_tx_dma_setup);
    617 	evcnt_detach(&adapter->mbuf_defrag_failed);
    618 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
    619 	evcnt_detach(&adapter->einval_tx_dma_setup);
    620 	evcnt_detach(&adapter->other_tx_dma_setup);
    621 	evcnt_detach(&adapter->eagain_tx_dma_setup);
    622 	evcnt_detach(&adapter->enomem_tx_dma_setup);
    623 	evcnt_detach(&adapter->watchdog_events);
    624 	evcnt_detach(&adapter->tso_err);
    625 	evcnt_detach(&adapter->link_irq);
    626 
    627 	txr = adapter->tx_rings;
    628 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
    629 		evcnt_detach(&adapter->queues[i].irqs);
    630 		evcnt_detach(&txr->no_desc_avail);
    631 		evcnt_detach(&txr->total_packets);
    632 		evcnt_detach(&txr->tso_tx);
    633 #ifndef IXGBE_LEGACY_TX
    634 		evcnt_detach(&txr->pcq_drops);
    635 #endif
    636 
    637 		evcnt_detach(&rxr->rx_packets);
    638 		evcnt_detach(&rxr->rx_bytes);
    639 		evcnt_detach(&rxr->rx_copies);
    640 		evcnt_detach(&rxr->no_jmbuf);
    641 		evcnt_detach(&rxr->rx_discarded);
    642 	}
    643 	evcnt_detach(&stats->ipcs);
    644 	evcnt_detach(&stats->l4cs);
    645 	evcnt_detach(&stats->ipcs_bad);
    646 	evcnt_detach(&stats->l4cs_bad);
    647 
    648 	/* Packet Reception Stats */
    649 	evcnt_detach(&stats->vfgorc);
    650 	evcnt_detach(&stats->vfgprc);
    651 	evcnt_detach(&stats->vfmprc);
    652 
    653 	/* Packet Transmission Stats */
    654 	evcnt_detach(&stats->vfgotc);
    655 	evcnt_detach(&stats->vfgptc);
    656 
    657 	/* Mailbox Stats */
    658 	evcnt_detach(&hw->mbx.stats.msgs_tx);
    659 	evcnt_detach(&hw->mbx.stats.msgs_rx);
    660 	evcnt_detach(&hw->mbx.stats.acks);
    661 	evcnt_detach(&hw->mbx.stats.reqs);
    662 	evcnt_detach(&hw->mbx.stats.rsts);
    663 
    664 	ixgbe_free_transmit_structures(adapter);
    665 	ixgbe_free_receive_structures(adapter);
    666 	free(adapter->queues, M_DEVBUF);
    667 
    668 	IXGBE_CORE_LOCK_DESTROY(adapter);
    669 
    670 	return (0);
    671 } /* ixv_detach */
    672 
    673 /************************************************************************
    674  * ixv_init_locked - Init entry point
    675  *
    676  *   Used in two ways: It is used by the stack as an init entry
    677  *   point in network interface structure. It is also used
    678  *   by the driver as a hw/sw initialization routine to get
    679  *   to a consistent state.
    680  *
    681  *   return 0 on success, positive on failure
    682  ************************************************************************/
    683 static void
    684 ixv_init_locked(struct adapter *adapter)
    685 {
    686 	struct ifnet	*ifp = adapter->ifp;
    687 	device_t 	dev = adapter->dev;
    688 	struct ixgbe_hw *hw = &adapter->hw;
    689 	struct ix_queue	*que = adapter->queues;
    690 	int             error = 0;
    691 	uint32_t mask;
    692 	int i;
    693 
    694 	INIT_DEBUGOUT("ixv_init_locked: begin");
    695 	KASSERT(mutex_owned(&adapter->core_mtx));
    696 	hw->adapter_stopped = FALSE;
    697 	hw->mac.ops.stop_adapter(hw);
    698 	callout_stop(&adapter->timer);
    699 
    700 	/* reprogram the RAR[0] in case user changed it. */
    701 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    702 
    703 	/* Get the latest mac address, User can use a LAA */
    704 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    705 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    706 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
    707 
    708 	/* Prepare transmit descriptors and buffers */
    709 	if (ixgbe_setup_transmit_structures(adapter)) {
    710 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    711 		ixv_stop(adapter);
    712 		return;
    713 	}
    714 
    715 	/* Reset VF and renegotiate mailbox API version */
    716 	hw->mac.ops.reset_hw(hw);
    717 	error = ixv_negotiate_api(adapter);
    718 	if (error)
    719 		device_printf(dev,
    720 		    "Mailbox API negotiation failed in init_locked!\n");
    721 
    722 	ixv_initialize_transmit_units(adapter);
    723 
    724 	/* Setup Multicast table */
    725 	ixv_set_multi(adapter);
    726 
    727 	/*
    728 	 * Determine the correct mbuf pool
    729 	 * for doing jumbo/headersplit
    730 	 */
    731 	if (ifp->if_mtu > ETHERMTU)
    732 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    733 	else
    734 		adapter->rx_mbuf_sz = MCLBYTES;
    735 
    736 	/* Prepare receive descriptors and buffers */
    737 	if (ixgbe_setup_receive_structures(adapter)) {
    738 		device_printf(dev, "Could not setup receive structures\n");
    739 		ixv_stop(adapter);
    740 		return;
    741 	}
    742 
    743 	/* Configure RX settings */
    744 	ixv_initialize_receive_units(adapter);
    745 
    746 #if 0 /* XXX isn't it required? -- msaitoh  */
    747 	/* Set the various hardware offload abilities */
    748 	ifp->if_hwassist = 0;
    749 	if (ifp->if_capenable & IFCAP_TSO4)
    750 		ifp->if_hwassist |= CSUM_TSO;
    751 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    752 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    753 #if __FreeBSD_version >= 800000
    754 		ifp->if_hwassist |= CSUM_SCTP;
    755 #endif
    756 	}
    757 #endif
    758 
    759 	/* Set up VLAN offload and filter */
    760 	ixv_setup_vlan_support(adapter);
    761 
    762 	/* Set up MSI-X routing */
    763 	ixv_configure_ivars(adapter);
    764 
    765 	/* Set up auto-mask */
    766 	mask = (1 << adapter->vector);
    767 	for (i = 0; i < adapter->num_queues; i++, que++)
    768 		mask |= (1 << que->msix);
    769 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
    770 
    771 	/* Set moderation on the Link interrupt */
    772 	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
    773 
    774 	/* Stats init */
    775 	ixv_init_stats(adapter);
    776 
    777 	/* Config/Enable Link */
    778 	hw->mac.get_link_status = TRUE;
    779 	hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
    780 	    FALSE);
    781 
    782 	/* Start watchdog */
    783 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    784 
    785 	/* And now turn on interrupts */
    786 	ixv_enable_intr(adapter);
    787 
    788 	/* Now inform the stack we're ready */
    789 	ifp->if_flags |= IFF_RUNNING;
    790 	ifp->if_flags &= ~IFF_OACTIVE;
    791 
    792 	return;
    793 } /* ixv_init_locked */
    794 
    795 /*
    796  * MSI-X Interrupt Handlers and Tasklets
    797  */
    798 
    799 static inline void
    800 ixv_enable_queue(struct adapter *adapter, u32 vector)
    801 {
    802 	struct ixgbe_hw *hw = &adapter->hw;
    803 	u32             queue = 1 << vector;
    804 	u32             mask;
    805 
    806 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    807 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    808 } /* ixv_enable_queue */
    809 
    810 static inline void
    811 ixv_disable_queue(struct adapter *adapter, u32 vector)
    812 {
    813 	struct ixgbe_hw *hw = &adapter->hw;
    814 	u64             queue = (u64)(1 << vector);
    815 	u32             mask;
    816 
    817 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    818 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    819 } /* ixv_disable_queue */
    820 
    821 static inline void
    822 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    823 {
    824 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    825 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    826 } /* ixv_rearm_queues */
    827 
    828 
    829 /************************************************************************
    830  * ixv_msix_que - MSI Queue Interrupt Service routine
    831  ************************************************************************/
    832 static int
    833 ixv_msix_que(void *arg)
    834 {
    835 	struct ix_queue	*que = arg;
    836 	struct adapter  *adapter = que->adapter;
    837 	struct tx_ring	*txr = que->txr;
    838 	struct rx_ring	*rxr = que->rxr;
    839 	bool		more;
    840 	u32		newitr = 0;
    841 
    842 	ixv_disable_queue(adapter, que->msix);
    843 	++que->irqs.ev_count;
    844 
    845 #ifdef __NetBSD__
    846 	/* Don't run ixgbe_rxeof in interrupt context */
    847 	more = true;
    848 #else
    849 	more = ixgbe_rxeof(que);
    850 #endif
    851 
    852 	IXGBE_TX_LOCK(txr);
    853 	ixgbe_txeof(txr);
    854 	IXGBE_TX_UNLOCK(txr);
    855 
    856 	/* Do AIM now? */
    857 
    858 	if (adapter->enable_aim == false)
    859 		goto no_calc;
    860 	/*
    861 	 * Do Adaptive Interrupt Moderation:
    862 	 *  - Write out last calculated setting
    863 	 *  - Calculate based on average size over
    864 	 *    the last interval.
    865 	 */
    866 	if (que->eitr_setting)
    867 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
    868 		    que->eitr_setting);
    869 
    870 	que->eitr_setting = 0;
    871 
    872 	/* Idle, do nothing */
    873 	if ((txr->bytes == 0) && (rxr->bytes == 0))
    874 		goto no_calc;
    875 
    876 	if ((txr->bytes) && (txr->packets))
    877 		newitr = txr->bytes/txr->packets;
    878 	if ((rxr->bytes) && (rxr->packets))
    879 		newitr = max(newitr, (rxr->bytes / rxr->packets));
    880 	newitr += 24; /* account for hardware frame, crc */
    881 
    882 	/* set an upper boundary */
    883 	newitr = min(newitr, 3000);
    884 
    885 	/* Be nice to the mid range */
    886 	if ((newitr > 300) && (newitr < 1200))
    887 		newitr = (newitr / 3);
    888 	else
    889 		newitr = (newitr / 2);
    890 
    891 	newitr |= newitr << 16;
    892 
    893 	/* save for next interrupt */
    894 	que->eitr_setting = newitr;
    895 
    896 	/* Reset state */
    897 	txr->bytes = 0;
    898 	txr->packets = 0;
    899 	rxr->bytes = 0;
    900 	rxr->packets = 0;
    901 
    902 no_calc:
    903 	if (more)
    904 		softint_schedule(que->que_si);
    905 	else /* Re-enable this interrupt */
    906 		ixv_enable_queue(adapter, que->msix);
    907 
    908 	return 1;
    909 } /* ixv_msix_que */
    910 
    911 /************************************************************************
    912  * ixv_msix_mbx
    913  ************************************************************************/
    914 static int
    915 ixv_msix_mbx(void *arg)
    916 {
    917 	struct adapter	*adapter = arg;
    918 	struct ixgbe_hw *hw = &adapter->hw;
    919 
    920 	++adapter->link_irq.ev_count;
    921 	/* NetBSD: We use auto-clear, so it's not required to write VTEICR */
    922 
    923 	/* Link status change */
    924 	hw->mac.get_link_status = TRUE;
    925 	softint_schedule(adapter->link_si);
    926 
    927 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
    928 
    929 	return 1;
    930 } /* ixv_msix_mbx */
    931 
    932 /************************************************************************
    933  * ixv_media_status - Media Ioctl callback
    934  *
    935  *   Called whenever the user queries the status of
    936  *   the interface using ifconfig.
    937  ************************************************************************/
    938 static void
    939 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
    940 {
    941 	struct adapter *adapter = ifp->if_softc;
    942 
    943 	INIT_DEBUGOUT("ixv_media_status: begin");
    944 	IXGBE_CORE_LOCK(adapter);
    945 	ixv_update_link_status(adapter);
    946 
    947 	ifmr->ifm_status = IFM_AVALID;
    948 	ifmr->ifm_active = IFM_ETHER;
    949 
    950 	if (!adapter->link_active) {
    951 		ifmr->ifm_active |= IFM_NONE;
    952 		IXGBE_CORE_UNLOCK(adapter);
    953 		return;
    954 	}
    955 
    956 	ifmr->ifm_status |= IFM_ACTIVE;
    957 
    958 	switch (adapter->link_speed) {
    959 		case IXGBE_LINK_SPEED_10GB_FULL:
    960 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
    961 			break;
    962 		case IXGBE_LINK_SPEED_5GB_FULL:
    963 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
    964 			break;
    965 		case IXGBE_LINK_SPEED_2_5GB_FULL:
    966 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
    967 			break;
    968 		case IXGBE_LINK_SPEED_1GB_FULL:
    969 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
    970 			break;
    971 		case IXGBE_LINK_SPEED_100_FULL:
    972 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
    973 			break;
    974 		case IXGBE_LINK_SPEED_10_FULL:
    975 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
    976 			break;
    977 	}
    978 
    979 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
    980 
    981 	IXGBE_CORE_UNLOCK(adapter);
    982 
    983 	return;
    984 } /* ixv_media_status */
    985 
    986 /************************************************************************
    987  * ixv_media_change - Media Ioctl callback
    988  *
    989  *   Called when the user changes speed/duplex using
    990  *   media/mediopt option with ifconfig.
    991  ************************************************************************/
    992 static int
    993 ixv_media_change(struct ifnet *ifp)
    994 {
    995 	struct adapter *adapter = ifp->if_softc;
    996 	struct ifmedia *ifm = &adapter->media;
    997 
    998 	INIT_DEBUGOUT("ixv_media_change: begin");
    999 
   1000 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1001 		return (EINVAL);
   1002 
   1003 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1004 	case IFM_AUTO:
   1005 		break;
   1006 	default:
   1007 		device_printf(adapter->dev, "Only auto media type\n");
   1008 		return (EINVAL);
   1009 	}
   1010 
   1011 	return (0);
   1012 } /* ixv_media_change */
   1013 
   1014 
   1015 /************************************************************************
   1016  * ixv_negotiate_api
   1017  *
   1018  *   Negotiate the Mailbox API with the PF;
   1019  *   start with the most featured API first.
   1020  ************************************************************************/
   1021 static int
   1022 ixv_negotiate_api(struct adapter *adapter)
   1023 {
   1024 	struct ixgbe_hw *hw = &adapter->hw;
   1025 	int             mbx_api[] = { ixgbe_mbox_api_11,
   1026 	                              ixgbe_mbox_api_10,
   1027 	                              ixgbe_mbox_api_unknown };
   1028 	int             i = 0;
   1029 
   1030 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
   1031 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
   1032 			return (0);
   1033 		i++;
   1034 	}
   1035 
   1036 	return (EINVAL);
   1037 } /* ixv_negotiate_api */
   1038 
   1039 
   1040 /************************************************************************
   1041  * ixv_set_multi - Multicast Update
   1042  *
   1043  *   Called whenever multicast address list is updated.
   1044  ************************************************************************/
   1045 static void
   1046 ixv_set_multi(struct adapter *adapter)
   1047 {
   1048 	struct ether_multi *enm;
   1049 	struct ether_multistep step;
   1050 	struct ethercom *ec = &adapter->osdep.ec;
   1051 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1052 	u8                 *update_ptr;
   1053 	int                mcnt = 0;
   1054 
   1055 	KASSERT(mutex_owned(&adapter->core_mtx));
   1056 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1057 
   1058 	ETHER_LOCK(ec);
   1059 	ETHER_FIRST_MULTI(step, ec, enm);
   1060 	while (enm != NULL) {
   1061 		bcopy(enm->enm_addrlo,
   1062 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1063 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1064 		mcnt++;
   1065 		/* XXX This might be required --msaitoh */
   1066 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1067 			break;
   1068 		ETHER_NEXT_MULTI(step, enm);
   1069 	}
   1070 	ETHER_UNLOCK(ec);
   1071 
   1072 	update_ptr = mta;
   1073 
   1074 	adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   1075 	    ixv_mc_array_itr, TRUE);
   1076 
   1077 	return;
   1078 } /* ixv_set_multi */
   1079 
   1080 /************************************************************************
   1081  * ixv_mc_array_itr
   1082  *
   1083  *   An iterator function needed by the multicast shared code.
   1084  *   It feeds the shared code routine the addresses in the
   1085  *   array of ixv_set_multi() one by one.
   1086  ************************************************************************/
   1087 static u8 *
   1088 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1089 {
   1090 	u8 *addr = *update_ptr;
   1091 	u8 *newptr;
   1092 	*vmdq = 0;
   1093 
   1094 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1095 	*update_ptr = newptr;
   1096 
   1097 	return addr;
   1098 } /* ixv_mc_array_itr */
   1099 
   1100 /************************************************************************
   1101  * ixv_local_timer - Timer routine
   1102  *
   1103  *   Checks for link status, updates statistics,
   1104  *   and runs the watchdog check.
   1105  ************************************************************************/
   1106 static void
   1107 ixv_local_timer(void *arg)
   1108 {
   1109 	struct adapter *adapter = arg;
   1110 
   1111 	IXGBE_CORE_LOCK(adapter);
   1112 	ixv_local_timer_locked(adapter);
   1113 	IXGBE_CORE_UNLOCK(adapter);
   1114 }
   1115 
   1116 static void
   1117 ixv_local_timer_locked(void *arg)
   1118 {
   1119 	struct adapter	*adapter = arg;
   1120 	device_t	dev = adapter->dev;
   1121 	struct ix_queue	*que = adapter->queues;
   1122 	u64		queues = 0;
   1123 	int		hung = 0;
   1124 
   1125 	KASSERT(mutex_owned(&adapter->core_mtx));
   1126 
   1127 	ixv_check_link(adapter);
   1128 
   1129 	/* Stats Update */
   1130 	ixv_update_stats(adapter);
   1131 
   1132 	/*
   1133 	 * Check the TX queues status
   1134 	 *      - mark hung queues so we don't schedule on them
   1135 	 *      - watchdog only if all queues show hung
   1136 	 */
   1137 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1138 		/* Keep track of queues with work for soft irq */
   1139 		if (que->txr->busy)
   1140 			queues |= ((u64)1 << que->me);
   1141 		/*
   1142 		 * Each time txeof runs without cleaning, but there
   1143 		 * are uncleaned descriptors it increments busy. If
   1144 		 * we get to the MAX we declare it hung.
   1145 		 */
   1146 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1147 			++hung;
   1148 			/* Mark the queue as inactive */
   1149 			adapter->active_queues &= ~((u64)1 << que->me);
   1150 			continue;
   1151 		} else {
   1152 			/* Check if we've come back from hung */
   1153 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1154 				adapter->active_queues |= ((u64)1 << que->me);
   1155 		}
   1156 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1157 			device_printf(dev,
   1158 			    "Warning queue %d appears to be hung!\n", i);
   1159 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1160 			++hung;
   1161 		}
   1162 	}
   1163 
   1164 	/* Only truly watchdog if all queues show hung */
   1165 	if (hung == adapter->num_queues)
   1166 		goto watchdog;
   1167 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1168 		ixv_rearm_queues(adapter, queues);
   1169 	}
   1170 
   1171 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1172 
   1173 	return;
   1174 
   1175 watchdog:
   1176 
   1177 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1178 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1179 	adapter->watchdog_events.ev_count++;
   1180 	ixv_init_locked(adapter);
   1181 } /* ixv_local_timer */
   1182 
   1183 /************************************************************************
   1184  * ixv_update_link_status - Update OS on link state
   1185  *
   1186  * Note: Only updates the OS on the cached link state.
   1187  *       The real check of the hardware only happens with
   1188  *       a link interrupt.
   1189  ************************************************************************/
   1190 static void
   1191 ixv_update_link_status(struct adapter *adapter)
   1192 {
   1193 	struct ifnet *ifp = adapter->ifp;
   1194 	device_t     dev = adapter->dev;
   1195 
   1196 	if (adapter->link_up) {
   1197 		if (adapter->link_active == FALSE) {
   1198 			if (bootverbose) {
   1199 				const char *bpsmsg;
   1200 
   1201 				switch (adapter->link_speed) {
   1202 				case IXGBE_LINK_SPEED_10GB_FULL:
   1203 					bpsmsg = "10 Gbps";
   1204 					break;
   1205 				case IXGBE_LINK_SPEED_5GB_FULL:
   1206 					bpsmsg = "5 Gbps";
   1207 					break;
   1208 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   1209 					bpsmsg = "2.5 Gbps";
   1210 					break;
   1211 				case IXGBE_LINK_SPEED_1GB_FULL:
   1212 					bpsmsg = "1 Gbps";
   1213 					break;
   1214 				case IXGBE_LINK_SPEED_100_FULL:
   1215 					bpsmsg = "100 Mbps";
   1216 					break;
   1217 				case IXGBE_LINK_SPEED_10_FULL:
   1218 					bpsmsg = "10 Mbps";
   1219 					break;
   1220 				default:
   1221 					bpsmsg = "unknown speed";
   1222 					break;
   1223 				}
   1224 				device_printf(dev, "Link is up %s %s \n",
   1225 				    bpsmsg, "Full Duplex");
   1226 			}
   1227 			adapter->link_active = TRUE;
   1228 			if_link_state_change(ifp, LINK_STATE_UP);
   1229 		}
   1230 	} else { /* Link down */
   1231 		if (adapter->link_active == TRUE) {
   1232 			if (bootverbose)
   1233 				device_printf(dev, "Link is Down\n");
   1234 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1235 			adapter->link_active = FALSE;
   1236 		}
   1237 	}
   1238 
   1239 	return;
   1240 } /* ixv_update_link_status */
   1241 
   1242 
   1243 /************************************************************************
   1244  * ixv_stop - Stop the hardware
   1245  *
   1246  *   Disables all traffic on the adapter by issuing a
   1247  *   global reset on the MAC and deallocates TX/RX buffers.
   1248  ************************************************************************/
   1249 static void
   1250 ixv_ifstop(struct ifnet *ifp, int disable)
   1251 {
   1252 	struct adapter *adapter = ifp->if_softc;
   1253 
   1254 	IXGBE_CORE_LOCK(adapter);
   1255 	ixv_stop(adapter);
   1256 	IXGBE_CORE_UNLOCK(adapter);
   1257 }
   1258 
   1259 static void
   1260 ixv_stop(void *arg)
   1261 {
   1262 	struct ifnet    *ifp;
   1263 	struct adapter  *adapter = arg;
   1264 	struct ixgbe_hw *hw = &adapter->hw;
   1265 
   1266 	ifp = adapter->ifp;
   1267 
   1268 	KASSERT(mutex_owned(&adapter->core_mtx));
   1269 
   1270 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1271 	ixv_disable_intr(adapter);
   1272 
   1273 	/* Tell the stack that the interface is no longer active */
   1274 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1275 
   1276 	hw->mac.ops.reset_hw(hw);
   1277 	adapter->hw.adapter_stopped = FALSE;
   1278 	hw->mac.ops.stop_adapter(hw);
   1279 	callout_stop(&adapter->timer);
   1280 
   1281 	/* reprogram the RAR[0] in case user changed it. */
   1282 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1283 
   1284 	return;
   1285 } /* ixv_stop */
   1286 
   1287 
   1288 /************************************************************************
   1289  * ixv_allocate_pci_resources
   1290  ************************************************************************/
   1291 static int
   1292 ixv_allocate_pci_resources(struct adapter *adapter,
   1293     const struct pci_attach_args *pa)
   1294 {
   1295 	pcireg_t	memtype;
   1296 	device_t        dev = adapter->dev;
   1297 	bus_addr_t addr;
   1298 	int flags;
   1299 
   1300 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1301 	switch (memtype) {
   1302 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1303 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1304 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1305 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1306 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1307 			goto map_err;
   1308 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1309 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1310 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1311 		}
   1312 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1313 		     adapter->osdep.mem_size, flags,
   1314 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1315 map_err:
   1316 			adapter->osdep.mem_size = 0;
   1317 			aprint_error_dev(dev, "unable to map BAR0\n");
   1318 			return ENXIO;
   1319 		}
   1320 		break;
   1321 	default:
   1322 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1323 		return ENXIO;
   1324 	}
   1325 
   1326 	/* Pick up the tuneable queues */
   1327 	adapter->num_queues = ixv_num_queues;
   1328 
   1329 	return (0);
   1330 } /* ixv_allocate_pci_resources */
   1331 
   1332 /************************************************************************
   1333  * ixv_free_pci_resources
   1334  ************************************************************************/
   1335 static void
   1336 ixv_free_pci_resources(struct adapter * adapter)
   1337 {
   1338 	struct 		ix_queue *que = adapter->queues;
   1339 	int		rid;
   1340 
   1341 	/*
   1342 	 *  Release all msix queue resources:
   1343 	 */
   1344 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1345 		if (que->res != NULL)
   1346 			pci_intr_disestablish(adapter->osdep.pc,
   1347 			    adapter->osdep.ihs[i]);
   1348 	}
   1349 
   1350 
   1351 	/* Clean the Mailbox interrupt last */
   1352 	rid = adapter->vector;
   1353 
   1354 	if (adapter->osdep.ihs[rid] != NULL) {
   1355 		pci_intr_disestablish(adapter->osdep.pc,
   1356 		    adapter->osdep.ihs[rid]);
   1357 		adapter->osdep.ihs[rid] = NULL;
   1358 	}
   1359 
   1360 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1361 	    adapter->osdep.nintrs);
   1362 
   1363 	if (adapter->osdep.mem_size != 0) {
   1364 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1365 		    adapter->osdep.mem_bus_space_handle,
   1366 		    adapter->osdep.mem_size);
   1367 	}
   1368 
   1369 	return;
   1370 } /* ixv_free_pci_resources */
   1371 
   1372 /************************************************************************
   1373  * ixv_setup_interface
   1374  *
   1375  *   Setup networking device structure and register an interface.
   1376  ************************************************************************/
   1377 static int
   1378 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1379 {
   1380 	struct ethercom *ec = &adapter->osdep.ec;
   1381 	struct ifnet   *ifp;
   1382 	int rv;
   1383 
   1384 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1385 
   1386 	ifp = adapter->ifp = &ec->ec_if;
   1387 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1388 	ifp->if_baudrate = IF_Gbps(10);
   1389 	ifp->if_init = ixv_init;
   1390 	ifp->if_stop = ixv_ifstop;
   1391 	ifp->if_softc = adapter;
   1392 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1393 #ifdef IXGBE_MPSAFE
   1394 	ifp->if_extflags = IFEF_MPSAFE;
   1395 #endif
   1396 	ifp->if_ioctl = ixv_ioctl;
   1397 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1398 #if 0
   1399 		ixv_start_locked = ixgbe_legacy_start_locked;
   1400 #endif
   1401 	} else {
   1402 		ifp->if_transmit = ixgbe_mq_start;
   1403 #if 0
   1404 		ixv_start_locked = ixgbe_mq_start_locked;
   1405 #endif
   1406 	}
   1407 	ifp->if_start = ixgbe_legacy_start;
   1408 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1409 	IFQ_SET_READY(&ifp->if_snd);
   1410 
   1411 	rv = if_initialize(ifp);
   1412 	if (rv != 0) {
   1413 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1414 		return rv;
   1415 	}
   1416 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1417 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1418 	/*
   1419 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1420 	 * used.
   1421 	 */
   1422 	if_register(ifp);
   1423 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1424 
   1425 	adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
   1426 
   1427 	/*
   1428 	 * Tell the upper layer(s) we support long frames.
   1429 	 */
   1430 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1431 
   1432 	/* Set capability flags */
   1433 	ifp->if_capabilities |= IFCAP_HWCSUM
   1434 	                     |  IFCAP_TSOv4
   1435 	                     |  IFCAP_TSOv6;
   1436 	ifp->if_capenable = 0;
   1437 
   1438 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1439 			    |  ETHERCAP_VLAN_HWCSUM
   1440 			    |  ETHERCAP_JUMBO_MTU
   1441 			    |  ETHERCAP_VLAN_MTU;
   1442 
   1443 	/* Enable the above capabilities by default */
   1444 	ec->ec_capenable = ec->ec_capabilities;
   1445 
   1446 	/* Don't enable LRO by default */
   1447 	ifp->if_capabilities |= IFCAP_LRO;
   1448 #if 0
   1449 	ifp->if_capenable = ifp->if_capabilities;
   1450 #endif
   1451 
   1452 	/*
   1453 	 * Specify the media types supported by this adapter and register
   1454 	 * callbacks to update media and link information
   1455 	 */
   1456 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1457 	    ixv_media_status);
   1458 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1459 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1460 
   1461 	return 0;
   1462 } /* ixv_setup_interface */
   1463 
   1464 
   1465 /************************************************************************
   1466  * ixv_initialize_transmit_units - Enable transmit unit.
   1467  ************************************************************************/
   1468 static void
   1469 ixv_initialize_transmit_units(struct adapter *adapter)
   1470 {
   1471 	struct tx_ring	*txr = adapter->tx_rings;
   1472 	struct ixgbe_hw	*hw = &adapter->hw;
   1473 
   1474 
   1475 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1476 		u64 tdba = txr->txdma.dma_paddr;
   1477 		u32 txctrl, txdctl;
   1478 
   1479 		/* Set WTHRESH to 8, burst writeback */
   1480 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1481 		txdctl |= (8 << 16);
   1482 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1483 
   1484 		/* Set the HW Tx Head and Tail indices */
   1485 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   1486 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   1487 
   1488 		/* Set Tx Tail register */
   1489 		txr->tail = IXGBE_VFTDT(i);
   1490 
   1491 		/* Set Ring parameters */
   1492 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   1493 		    (tdba & 0x00000000ffffffffULL));
   1494 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   1495 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   1496 		    adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
   1497 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   1498 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1499 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   1500 
   1501 		/* Now enable */
   1502 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1503 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1504 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1505 	}
   1506 
   1507 	return;
   1508 } /* ixv_initialize_transmit_units */
   1509 
   1510 
   1511 /************************************************************************
   1512  * ixv_initialize_rss_mapping
   1513  ************************************************************************/
   1514 static void
   1515 ixv_initialize_rss_mapping(struct adapter *adapter)
   1516 {
   1517 	struct ixgbe_hw *hw = &adapter->hw;
   1518 	u32             reta = 0, mrqc, rss_key[10];
   1519 	int             queue_id;
   1520 	int             i, j;
   1521 	u32             rss_hash_config;
   1522 
   1523 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1524 		/* Fetch the configured RSS key */
   1525 		rss_getkey((uint8_t *)&rss_key);
   1526 	} else {
   1527 		/* set up random bits */
   1528 		cprng_fast(&rss_key, sizeof(rss_key));
   1529 	}
   1530 
   1531 	/* Now fill out hash function seeds */
   1532 	for (i = 0; i < 10; i++)
   1533 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
   1534 
   1535 	/* Set up the redirection table */
   1536 	for (i = 0, j = 0; i < 64; i++, j++) {
   1537 		if (j == adapter->num_queues)
   1538 			j = 0;
   1539 
   1540 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1541 			/*
   1542 			 * Fetch the RSS bucket id for the given indirection
   1543 			 * entry. Cap it at the number of configured buckets
   1544 			 * (which is num_queues.)
   1545 			 */
   1546 			queue_id = rss_get_indirection_to_bucket(i);
   1547 			queue_id = queue_id % adapter->num_queues;
   1548 		} else
   1549 			queue_id = j;
   1550 
   1551 		/*
   1552 		 * The low 8 bits are for hash value (n+0);
   1553 		 * The next 8 bits are for hash value (n+1), etc.
   1554 		 */
   1555 		reta >>= 8;
   1556 		reta |= ((uint32_t)queue_id) << 24;
   1557 		if ((i & 3) == 3) {
   1558 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
   1559 			reta = 0;
   1560 		}
   1561 	}
   1562 
   1563 	/* Perform hash on these packet types */
   1564 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   1565 		rss_hash_config = rss_gethashconfig();
   1566 	else {
   1567 		/*
   1568 		 * Disable UDP - IP fragments aren't currently being handled
   1569 		 * and so we end up with a mix of 2-tuple and 4-tuple
   1570 		 * traffic.
   1571 		 */
   1572 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
   1573 		                | RSS_HASHTYPE_RSS_TCP_IPV4
   1574 		                | RSS_HASHTYPE_RSS_IPV6
   1575 		                | RSS_HASHTYPE_RSS_TCP_IPV6;
   1576 	}
   1577 
   1578 	mrqc = IXGBE_MRQC_RSSEN;
   1579 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
   1580 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
   1581 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
   1582 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
   1583 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
   1584 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
   1585 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
   1586 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
   1587 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
   1588 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
   1589 		    __func__);
   1590 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
   1591 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
   1592 		    __func__);
   1593 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
   1594 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
   1595 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
   1596 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
   1597 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
   1598 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
   1599 		    __func__);
   1600 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
   1601 } /* ixv_initialize_rss_mapping */
   1602 
   1603 
   1604 /************************************************************************
   1605  * ixv_initialize_receive_units - Setup receive registers and features.
   1606  ************************************************************************/
   1607 static void
   1608 ixv_initialize_receive_units(struct adapter *adapter)
   1609 {
   1610 	struct	rx_ring	*rxr = adapter->rx_rings;
   1611 	struct ixgbe_hw	*hw = &adapter->hw;
   1612 	struct ifnet	*ifp = adapter->ifp;
   1613 	u32		bufsz, rxcsum, psrtype;
   1614 
   1615 	if (ifp->if_mtu > ETHERMTU)
   1616 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1617 	else
   1618 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1619 
   1620 	psrtype = IXGBE_PSRTYPE_TCPHDR
   1621 	        | IXGBE_PSRTYPE_UDPHDR
   1622 	        | IXGBE_PSRTYPE_IPV4HDR
   1623 	        | IXGBE_PSRTYPE_IPV6HDR
   1624 	        | IXGBE_PSRTYPE_L2HDR;
   1625 
   1626 	if (adapter->num_queues > 1)
   1627 		psrtype |= 1 << 29;
   1628 
   1629 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1630 
   1631 	/* Tell PF our max_frame size */
   1632 	if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
   1633 		device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
   1634 	}
   1635 
   1636 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1637 		u64 rdba = rxr->rxdma.dma_paddr;
   1638 		u32 reg, rxdctl;
   1639 
   1640 		/* Disable the queue */
   1641 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1642 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1643 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1644 		for (int j = 0; j < 10; j++) {
   1645 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1646 			    IXGBE_RXDCTL_ENABLE)
   1647 				msec_delay(1);
   1648 			else
   1649 				break;
   1650 		}
   1651 		wmb();
   1652 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1653 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   1654 		    (rdba & 0x00000000ffffffffULL));
   1655 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
   1656 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   1657 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1658 
   1659 		/* Reset the ring indices */
   1660 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1661 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1662 
   1663 		/* Set up the SRRCTL register */
   1664 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   1665 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1666 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1667 		reg |= bufsz;
   1668 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1669 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   1670 
   1671 		/* Capture Rx Tail index */
   1672 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1673 
   1674 		/* Do the queue enabling last */
   1675 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1676 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1677 		for (int k = 0; k < 10; k++) {
   1678 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1679 			    IXGBE_RXDCTL_ENABLE)
   1680 				break;
   1681 			msec_delay(1);
   1682 		}
   1683 		wmb();
   1684 
   1685 		/* Set the Tail Pointer */
   1686 		/*
   1687 		 * In netmap mode, we must preserve the buffers made
   1688 		 * available to userspace before the if_init()
   1689 		 * (this is true by default on the TX side, because
   1690 		 * init makes all buffers available to userspace).
   1691 		 *
   1692 		 * netmap_reset() and the device specific routines
   1693 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1694 		 * buffers at the end of the NIC ring, so here we
   1695 		 * must set the RDT (tail) register to make sure
   1696 		 * they are not overwritten.
   1697 		 *
   1698 		 * In this driver the NIC ring starts at RDH = 0,
   1699 		 * RDT points to the last slot available for reception (?),
   1700 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1701 		 */
   1702 #ifdef DEV_NETMAP
   1703 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   1704 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   1705 			struct netmap_adapter *na = NA(adapter->ifp);
   1706 			struct netmap_kring *kring = &na->rx_rings[i];
   1707 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1708 
   1709 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1710 		} else
   1711 #endif /* DEV_NETMAP */
   1712 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1713 			    adapter->num_rx_desc - 1);
   1714 	}
   1715 
   1716 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1717 
   1718 	ixv_initialize_rss_mapping(adapter);
   1719 
   1720 	if (adapter->num_queues > 1) {
   1721 		/* RSS and RX IPP Checksum are mutually exclusive */
   1722 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1723 	}
   1724 
   1725 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1726 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1727 
   1728 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1729 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1730 
   1731 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1732 
   1733 	return;
   1734 } /* ixv_initialize_receive_units */
   1735 
   1736 /************************************************************************
   1737  * ixv_setup_vlan_support
   1738  ************************************************************************/
   1739 static void
   1740 ixv_setup_vlan_support(struct adapter *adapter)
   1741 {
   1742 	struct ethercom *ec = &adapter->osdep.ec;
   1743 	struct ixgbe_hw *hw = &adapter->hw;
   1744 	struct rx_ring  *rxr;
   1745 	u32		ctrl, vid, vfta, retry;
   1746 
   1747 	/*
   1748 	 * We get here thru init_locked, meaning
   1749 	 * a soft reset, this has already cleared
   1750 	 * the VFTA and other state, so if there
   1751 	 * have been no vlan's registered do nothing.
   1752 	 */
   1753 	if (!VLAN_ATTACHED(ec))
   1754 		return;
   1755 
   1756 	/* Enable the queues */
   1757 	for (int i = 0; i < adapter->num_queues; i++) {
   1758 		rxr = &adapter->rx_rings[i];
   1759 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
   1760 		ctrl |= IXGBE_RXDCTL_VME;
   1761 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
   1762 		/*
   1763 		 * Let Rx path know that it needs to store VLAN tag
   1764 		 * as part of extra mbuf info.
   1765 		 */
   1766 		rxr->vtag_strip = TRUE;
   1767 	}
   1768 
   1769 #if 1
   1770 	/* XXX dirty hack. Enable all VIDs */
   1771 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
   1772 	  adapter->shadow_vfta[i] = 0xffffffff;
   1773 #endif
   1774 	/*
   1775 	 * A soft reset zero's out the VFTA, so
   1776 	 * we need to repopulate it now.
   1777 	 */
   1778 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   1779 		if (adapter->shadow_vfta[i] == 0)
   1780 			continue;
   1781 		vfta = adapter->shadow_vfta[i];
   1782 		/*
   1783 		 * Reconstruct the vlan id's
   1784 		 * based on the bits set in each
   1785 		 * of the array ints.
   1786 		 */
   1787 		for (int j = 0; j < 32; j++) {
   1788 			retry = 0;
   1789 			if ((vfta & (1 << j)) == 0)
   1790 				continue;
   1791 			vid = (i * 32) + j;
   1792 			/* Call the shared code mailbox routine */
   1793 			while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
   1794 				if (++retry > 5)
   1795 					break;
   1796 			}
   1797 		}
   1798 	}
   1799 } /* ixv_setup_vlan_support */
   1800 
   1801 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   1802 /************************************************************************
   1803  * ixv_register_vlan
   1804  *
   1805  *   Run via a vlan config EVENT, it enables us to use the
   1806  *   HW Filter table since we can get the vlan id. This just
   1807  *   creates the entry in the soft version of the VFTA, init
   1808  *   will repopulate the real table.
   1809  ************************************************************************/
   1810 static void
   1811 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1812 {
   1813 	struct adapter	*adapter = ifp->if_softc;
   1814 	u16		index, bit;
   1815 
   1816 	if (ifp->if_softc != arg) /* Not our event */
   1817 		return;
   1818 
   1819 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   1820 		return;
   1821 
   1822 	IXGBE_CORE_LOCK(adapter);
   1823 	index = (vtag >> 5) & 0x7F;
   1824 	bit = vtag & 0x1F;
   1825 	adapter->shadow_vfta[index] |= (1 << bit);
   1826 	/* Re-init to load the changes */
   1827 	ixv_init_locked(adapter);
   1828 	IXGBE_CORE_UNLOCK(adapter);
   1829 } /* ixv_register_vlan */
   1830 
   1831 /************************************************************************
   1832  * ixv_unregister_vlan
   1833  *
   1834  *   Run via a vlan unconfig EVENT, remove our entry
   1835  *   in the soft vfta.
   1836  ************************************************************************/
   1837 static void
   1838 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1839 {
   1840 	struct adapter	*adapter = ifp->if_softc;
   1841 	u16		index, bit;
   1842 
   1843 	if (ifp->if_softc !=  arg)
   1844 		return;
   1845 
   1846 	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
   1847 		return;
   1848 
   1849 	IXGBE_CORE_LOCK(adapter);
   1850 	index = (vtag >> 5) & 0x7F;
   1851 	bit = vtag & 0x1F;
   1852 	adapter->shadow_vfta[index] &= ~(1 << bit);
   1853 	/* Re-init to load the changes */
   1854 	ixv_init_locked(adapter);
   1855 	IXGBE_CORE_UNLOCK(adapter);
   1856 } /* ixv_unregister_vlan */
   1857 #endif
   1858 
   1859 /************************************************************************
   1860  * ixv_enable_intr
   1861  ************************************************************************/
   1862 static void
   1863 ixv_enable_intr(struct adapter *adapter)
   1864 {
   1865 	struct ixgbe_hw *hw = &adapter->hw;
   1866 	struct ix_queue *que = adapter->queues;
   1867 	u32             mask;
   1868 	int i;
   1869 
   1870 	/* For VTEIAC */
   1871 	mask = (1 << adapter->vector);
   1872 	for (i = 0; i < adapter->num_queues; i++, que++)
   1873 		mask |= (1 << que->msix);
   1874 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   1875 
   1876 	/* For VTEIMS */
   1877 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
   1878 	que = adapter->queues;
   1879 	for (i = 0; i < adapter->num_queues; i++, que++)
   1880 		ixv_enable_queue(adapter, que->msix);
   1881 
   1882 	IXGBE_WRITE_FLUSH(hw);
   1883 
   1884 	return;
   1885 } /* ixv_enable_intr */
   1886 
   1887 /************************************************************************
   1888  * ixv_disable_intr
   1889  ************************************************************************/
   1890 static void
   1891 ixv_disable_intr(struct adapter *adapter)
   1892 {
   1893 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   1894 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   1895 	IXGBE_WRITE_FLUSH(&adapter->hw);
   1896 
   1897 	return;
   1898 } /* ixv_disable_intr */
   1899 
   1900 /************************************************************************
   1901  * ixv_set_ivar
   1902  *
   1903  *   Setup the correct IVAR register for a particular MSI-X interrupt
   1904  *    - entry is the register array entry
   1905  *    - vector is the MSI-X vector for this queue
   1906  *    - type is RX/TX/MISC
   1907  ************************************************************************/
   1908 static void
   1909 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   1910 {
   1911 	struct ixgbe_hw *hw = &adapter->hw;
   1912 	u32             ivar, index;
   1913 
   1914 	vector |= IXGBE_IVAR_ALLOC_VAL;
   1915 
   1916 	if (type == -1) { /* MISC IVAR */
   1917 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   1918 		ivar &= ~0xFF;
   1919 		ivar |= vector;
   1920 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   1921 	} else {          /* RX/TX IVARS */
   1922 		index = (16 * (entry & 1)) + (8 * type);
   1923 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   1924 		ivar &= ~(0xFF << index);
   1925 		ivar |= (vector << index);
   1926 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   1927 	}
   1928 } /* ixv_set_ivar */
   1929 
   1930 /************************************************************************
   1931  * ixv_configure_ivars
   1932  ************************************************************************/
   1933 static void
   1934 ixv_configure_ivars(struct adapter *adapter)
   1935 {
   1936 	struct ix_queue *que = adapter->queues;
   1937 
   1938 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1939 		/* First the RX queue entry */
   1940 		ixv_set_ivar(adapter, i, que->msix, 0);
   1941 		/* ... and the TX */
   1942 		ixv_set_ivar(adapter, i, que->msix, 1);
   1943 		/* Set an initial value in EITR */
   1944 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
   1945 		    IXGBE_EITR_DEFAULT);
   1946 	}
   1947 
   1948 	/* For the mailbox interrupt */
   1949 	ixv_set_ivar(adapter, 1, adapter->vector, -1);
   1950 } /* ixv_configure_ivars */
   1951 
   1952 
   1953 /************************************************************************
   1954  * ixv_save_stats
   1955  *
   1956  *   The VF stats registers never have a truly virgin
   1957  *   starting point, so this routine tries to make an
   1958  *   artificial one, marking ground zero on attach as
   1959  *   it were.
   1960  ************************************************************************/
   1961 static void
   1962 ixv_save_stats(struct adapter *adapter)
   1963 {
   1964 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   1965 
   1966 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   1967 		stats->saved_reset_vfgprc +=
   1968 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   1969 		stats->saved_reset_vfgptc +=
   1970 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   1971 		stats->saved_reset_vfgorc +=
   1972 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   1973 		stats->saved_reset_vfgotc +=
   1974 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   1975 		stats->saved_reset_vfmprc +=
   1976 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   1977 	}
   1978 } /* ixv_save_stats */
   1979 
   1980 /************************************************************************
   1981  * ixv_init_stats
   1982  ************************************************************************/
   1983 static void
   1984 ixv_init_stats(struct adapter *adapter)
   1985 {
   1986 	struct ixgbe_hw *hw = &adapter->hw;
   1987 
   1988 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   1989 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   1990 	adapter->stats.vf.last_vfgorc |=
   1991 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   1992 
   1993 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   1994 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   1995 	adapter->stats.vf.last_vfgotc |=
   1996 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   1997 
   1998 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   1999 
   2000 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   2001 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   2002 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   2003 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   2004 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   2005 } /* ixv_init_stats */
   2006 
   2007 #define UPDATE_STAT_32(reg, last, count)		\
   2008 {                                                       \
   2009 	u32 current = IXGBE_READ_REG(hw, (reg));	\
   2010 	if (current < (last))				\
   2011 		count.ev_count += 0x100000000LL;	\
   2012 	(last) = current;				\
   2013 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   2014 	count.ev_count |= current;			\
   2015 }
   2016 
   2017 #define UPDATE_STAT_36(lsb, msb, last, count)           \
   2018 {                                                       \
   2019 	u64 cur_lsb = IXGBE_READ_REG(hw, (lsb));	\
   2020 	u64 cur_msb = IXGBE_READ_REG(hw, (msb));	\
   2021 	u64 current = ((cur_msb << 32) | cur_lsb);      \
   2022 	if (current < (last))				\
   2023 		count.ev_count += 0x1000000000LL;	\
   2024 	(last) = current;				\
   2025 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   2026 	count.ev_count |= current;			\
   2027 }
   2028 
   2029 /************************************************************************
   2030  * ixv_update_stats - Update the board statistics counters.
   2031  ************************************************************************/
   2032 void
   2033 ixv_update_stats(struct adapter *adapter)
   2034 {
   2035 	struct ixgbe_hw *hw = &adapter->hw;
   2036 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2037 
   2038         UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
   2039         UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
   2040         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
   2041 	    stats->vfgorc);
   2042         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
   2043 	    stats->vfgotc);
   2044         UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
   2045 
   2046 	/* Fill out the OS statistics structure */
   2047 	/*
   2048 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   2049 	 * adapter->stats counters. It's required to make ifconfig -z
   2050 	 * (SOICZIFDATA) work.
   2051 	 */
   2052 } /* ixv_update_stats */
   2053 
   2054 const struct sysctlnode *
   2055 ixv_sysctl_instance(struct adapter *adapter)
   2056 {
   2057 	const char *dvname;
   2058 	struct sysctllog **log;
   2059 	int rc;
   2060 	const struct sysctlnode *rnode;
   2061 
   2062 	log = &adapter->sysctllog;
   2063 	dvname = device_xname(adapter->dev);
   2064 
   2065 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2066 	    0, CTLTYPE_NODE, dvname,
   2067 	    SYSCTL_DESCR("ixv information and settings"),
   2068 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2069 		goto err;
   2070 
   2071 	return rnode;
   2072 err:
   2073 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2074 	return NULL;
   2075 }
   2076 
   2077 static void
   2078 ixv_add_device_sysctls(struct adapter *adapter)
   2079 {
   2080 	struct sysctllog **log;
   2081 	const struct sysctlnode *rnode, *cnode;
   2082 	device_t dev;
   2083 
   2084 	dev = adapter->dev;
   2085 	log = &adapter->sysctllog;
   2086 
   2087 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2088 		aprint_error_dev(dev, "could not create sysctl root\n");
   2089 		return;
   2090 	}
   2091 
   2092 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2093 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2094 	    "debug", SYSCTL_DESCR("Debug Info"),
   2095 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   2096 		aprint_error_dev(dev, "could not create sysctl\n");
   2097 
   2098 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2099 	    CTLFLAG_READWRITE, CTLTYPE_BOOL,
   2100 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2101 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2102 		aprint_error_dev(dev, "could not create sysctl\n");
   2103 }
   2104 
   2105 /************************************************************************
   2106  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
   2107  ************************************************************************/
   2108 static void
   2109 ixv_add_stats_sysctls(struct adapter *adapter)
   2110 {
   2111 	device_t                dev = adapter->dev;
   2112 	struct tx_ring          *txr = adapter->tx_rings;
   2113 	struct rx_ring          *rxr = adapter->rx_rings;
   2114 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2115 	struct ixgbe_hw *hw = &adapter->hw;
   2116 	const struct sysctlnode *rnode;
   2117 	struct sysctllog **log = &adapter->sysctllog;
   2118 	const char *xname = device_xname(dev);
   2119 
   2120 	/* Driver Statistics */
   2121 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   2122 	    NULL, xname, "Handled queue in softint");
   2123 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   2124 	    NULL, xname, "Requeued in softint");
   2125 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   2126 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   2127 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2128 	    NULL, xname, "m_defrag() failed");
   2129 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   2130 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   2131 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   2132 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   2133 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   2134 	    NULL, xname, "Driver tx dma hard fail other");
   2135 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   2136 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   2137 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   2138 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   2139 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2140 	    NULL, xname, "Watchdog timeouts");
   2141 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   2142 	    NULL, xname, "TSO errors");
   2143 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   2144 	    NULL, xname, "Link MSI-X IRQ Handled");
   2145 
   2146 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2147 		snprintf(adapter->queues[i].evnamebuf,
   2148 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   2149 		    xname, i);
   2150 		snprintf(adapter->queues[i].namebuf,
   2151 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   2152 
   2153 		if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2154 			aprint_error_dev(dev, "could not create sysctl root\n");
   2155 			break;
   2156 		}
   2157 
   2158 		if (sysctl_createv(log, 0, &rnode, &rnode,
   2159 		    0, CTLTYPE_NODE,
   2160 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   2161 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   2162 			break;
   2163 
   2164 #if 0 /* not yet */
   2165 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2166 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   2167 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   2168 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   2169 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   2170 			break;
   2171 
   2172 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2173 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   2174 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   2175 			NULL, 0, &(adapter->queues[i].irqs),
   2176 		    0, CTL_CREATE, CTL_EOL) != 0)
   2177 			break;
   2178 
   2179 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2180 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2181 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   2182 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   2183 		    0, CTL_CREATE, CTL_EOL) != 0)
   2184 			break;
   2185 
   2186 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2187 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2188 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   2189 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   2190 		    0, CTL_CREATE, CTL_EOL) != 0)
   2191 			break;
   2192 #endif
   2193 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   2194 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   2195 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   2196 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   2197 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   2198 		    NULL, adapter->queues[i].evnamebuf,
   2199 		    "Queue No Descriptor Available");
   2200 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   2201 		    NULL, adapter->queues[i].evnamebuf,
   2202 		    "Queue Packets Transmitted");
   2203 #ifndef IXGBE_LEGACY_TX
   2204 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   2205 		    NULL, adapter->queues[i].evnamebuf,
   2206 		    "Packets dropped in pcq");
   2207 #endif
   2208 
   2209 #ifdef LRO
   2210 		struct lro_ctrl *lro = &rxr->lro;
   2211 #endif /* LRO */
   2212 
   2213 #if 0 /* not yet */
   2214 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2215 		    CTLFLAG_READONLY,
   2216 		    CTLTYPE_INT,
   2217 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   2218 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   2219 		    CTL_CREATE, CTL_EOL) != 0)
   2220 			break;
   2221 
   2222 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2223 		    CTLFLAG_READONLY,
   2224 		    CTLTYPE_INT,
   2225 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   2226 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   2227 		    CTL_CREATE, CTL_EOL) != 0)
   2228 			break;
   2229 #endif
   2230 
   2231 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   2232 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   2233 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   2234 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   2235 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   2236 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   2237 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   2238 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   2239 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   2240 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   2241 #ifdef LRO
   2242 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   2243 				CTLFLAG_RD, &lro->lro_queued, 0,
   2244 				"LRO Queued");
   2245 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   2246 				CTLFLAG_RD, &lro->lro_flushed, 0,
   2247 				"LRO Flushed");
   2248 #endif /* LRO */
   2249 	}
   2250 
   2251 	/* MAC stats get their own sub node */
   2252 
   2253 	snprintf(stats->namebuf,
   2254 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   2255 
   2256 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   2257 	    stats->namebuf, "rx csum offload - IP");
   2258 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   2259 	    stats->namebuf, "rx csum offload - L4");
   2260 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   2261 	    stats->namebuf, "rx csum offload - IP bad");
   2262 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   2263 	    stats->namebuf, "rx csum offload - L4 bad");
   2264 
   2265 	/* Packet Reception Stats */
   2266 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2267 	    xname, "Good Packets Received");
   2268 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2269 	    xname, "Good Octets Received");
   2270 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2271 	    xname, "Multicast Packets Received");
   2272 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2273 	    xname, "Good Packets Transmitted");
   2274 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2275 	    xname, "Good Octets Transmitted");
   2276 
   2277 	/* Mailbox Stats */
   2278 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
   2279 	    xname, "message TXs");
   2280 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
   2281 	    xname, "message RXs");
   2282 	evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
   2283 	    xname, "ACKs");
   2284 	evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
   2285 	    xname, "REQs");
   2286 	evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
   2287 	    xname, "RSTs");
   2288 
   2289 } /* ixv_add_stats_sysctls */
   2290 
   2291 /************************************************************************
   2292  * ixv_set_sysctl_value
   2293  ************************************************************************/
   2294 static void
   2295 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2296 	const char *description, int *limit, int value)
   2297 {
   2298 	device_t dev =  adapter->dev;
   2299 	struct sysctllog **log;
   2300 	const struct sysctlnode *rnode, *cnode;
   2301 
   2302 	log = &adapter->sysctllog;
   2303 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2304 		aprint_error_dev(dev, "could not create sysctl root\n");
   2305 		return;
   2306 	}
   2307 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2308 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2309 	    name, SYSCTL_DESCR(description),
   2310 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2311 		aprint_error_dev(dev, "could not create sysctl\n");
   2312 	*limit = value;
   2313 } /* ixv_set_sysctl_value */
   2314 
   2315 /************************************************************************
   2316  * ixv_print_debug_info
   2317  *
   2318  *   Called only when em_display_debug_stats is enabled.
   2319  *   Provides a way to take a look at important statistics
   2320  *   maintained by the driver and hardware.
   2321  ************************************************************************/
   2322 static void
   2323 ixv_print_debug_info(struct adapter *adapter)
   2324 {
   2325         device_t        dev = adapter->dev;
   2326         struct ixgbe_hw *hw = &adapter->hw;
   2327         struct ix_queue *que = adapter->queues;
   2328         struct rx_ring  *rxr;
   2329         struct tx_ring  *txr;
   2330 #ifdef LRO
   2331         struct lro_ctrl *lro;
   2332 #endif /* LRO */
   2333 
   2334 	device_printf(dev, "Error Byte Count = %u \n",
   2335 	    IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2336 
   2337 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2338 		txr = que->txr;
   2339 		rxr = que->rxr;
   2340 #ifdef LRO
   2341 		lro = &rxr->lro;
   2342 #endif /* LRO */
   2343 		device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
   2344 		    que->msix, (long)que->irqs.ev_count);
   2345 		device_printf(dev, "RX(%d) Packets Received: %lld\n",
   2346 		    rxr->me, (long long)rxr->rx_packets.ev_count);
   2347 		device_printf(dev, "RX(%d) Bytes Received: %lu\n",
   2348 		    rxr->me, (long)rxr->rx_bytes.ev_count);
   2349 #ifdef LRO
   2350 		device_printf(dev, "RX(%d) LRO Queued= %lld\n",
   2351 		    rxr->me, (long long)lro->lro_queued);
   2352 		device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
   2353 		    rxr->me, (long long)lro->lro_flushed);
   2354 #endif /* LRO */
   2355 		device_printf(dev, "TX(%d) Packets Sent: %lu\n",
   2356 		    txr->me, (long)txr->total_packets.ev_count);
   2357 		device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
   2358 		    txr->me, (long)txr->no_desc_avail.ev_count);
   2359 	}
   2360 
   2361 	device_printf(dev, "MBX IRQ Handled: %lu\n",
   2362 	    (long)adapter->link_irq.ev_count);
   2363 } /* ixv_print_debug_info */
   2364 
   2365 /************************************************************************
   2366  * ixv_sysctl_debug
   2367  ************************************************************************/
   2368 static int
   2369 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2370 {
   2371 	struct sysctlnode node;
   2372 	struct adapter *adapter;
   2373 	int            error, result;
   2374 
   2375 	node = *rnode;
   2376 	node.sysctl_data = &result;
   2377 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2378 
   2379 	if (error || newp == NULL)
   2380 		return error;
   2381 
   2382 	if (result == 1) {
   2383 		adapter = (struct adapter *)node.sysctl_data;
   2384 		ixv_print_debug_info(adapter);
   2385 	}
   2386 
   2387 	return 0;
   2388 } /* ixv_sysctl_debug */
   2389 
   2390 /************************************************************************
   2391  * ixv_init_device_features
   2392  ************************************************************************/
   2393 static void
   2394 ixv_init_device_features(struct adapter *adapter)
   2395 {
   2396 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   2397 	                  | IXGBE_FEATURE_VF
   2398 	                  | IXGBE_FEATURE_RSS
   2399 	                  | IXGBE_FEATURE_LEGACY_TX;
   2400 
   2401 	/* A tad short on feature flags for VFs, atm. */
   2402 	switch (adapter->hw.mac.type) {
   2403 	case ixgbe_mac_82599_vf:
   2404 		break;
   2405 	case ixgbe_mac_X540_vf:
   2406 		break;
   2407 	case ixgbe_mac_X550_vf:
   2408 	case ixgbe_mac_X550EM_x_vf:
   2409 	case ixgbe_mac_X550EM_a_vf:
   2410 		adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
   2411 		break;
   2412 	default:
   2413 		break;
   2414 	}
   2415 
   2416 	/* Enabled by default... */
   2417 	/* Is a virtual function (VF) */
   2418 	if (adapter->feat_cap & IXGBE_FEATURE_VF)
   2419 		adapter->feat_en |= IXGBE_FEATURE_VF;
   2420 	/* Netmap */
   2421 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   2422 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   2423 	/* Receive-Side Scaling (RSS) */
   2424 	if (adapter->feat_cap & IXGBE_FEATURE_RSS)
   2425 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   2426 	/* Needs advanced context descriptor regardless of offloads req'd */
   2427 	if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
   2428 		adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
   2429 
   2430 	/* Enabled via sysctl... */
   2431 	/* Legacy (single queue) transmit */
   2432 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   2433 	    ixv_enable_legacy_tx)
   2434 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   2435 } /* ixv_init_device_features */
   2436 
   2437 /************************************************************************
   2438  * ixv_shutdown - Shutdown entry point
   2439  ************************************************************************/
   2440 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   2441 static int
   2442 ixv_shutdown(device_t dev)
   2443 {
   2444 	struct adapter *adapter = device_private(dev);
   2445 	IXGBE_CORE_LOCK(adapter);
   2446 	ixv_stop(adapter);
   2447 	IXGBE_CORE_UNLOCK(adapter);
   2448 
   2449 	return (0);
   2450 } /* ixv_shutdown */
   2451 #endif
   2452 
   2453 static int
   2454 ixv_ifflags_cb(struct ethercom *ec)
   2455 {
   2456 	struct ifnet *ifp = &ec->ec_if;
   2457 	struct adapter *adapter = ifp->if_softc;
   2458 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   2459 
   2460 	IXGBE_CORE_LOCK(adapter);
   2461 
   2462 	if (change != 0)
   2463 		adapter->if_flags = ifp->if_flags;
   2464 
   2465 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   2466 		rc = ENETRESET;
   2467 
   2468 	/* Set up VLAN support and filter */
   2469 	ixv_setup_vlan_support(adapter);
   2470 
   2471 	IXGBE_CORE_UNLOCK(adapter);
   2472 
   2473 	return rc;
   2474 }
   2475 
   2476 
   2477 /************************************************************************
   2478  * ixv_ioctl - Ioctl entry point
   2479  *
   2480  *   Called when the user wants to configure the interface.
   2481  *
   2482  *   return 0 on success, positive on failure
   2483  ************************************************************************/
   2484 static int
   2485 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
   2486 {
   2487 	struct adapter	*adapter = ifp->if_softc;
   2488 	struct ifcapreq *ifcr = data;
   2489 	struct ifreq	*ifr = data;
   2490 	int             error = 0;
   2491 	int l4csum_en;
   2492 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   2493 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   2494 
   2495 	switch (command) {
   2496 	case SIOCSIFFLAGS:
   2497 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   2498 		break;
   2499 	case SIOCADDMULTI:
   2500 	case SIOCDELMULTI:
   2501 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   2502 		break;
   2503 	case SIOCSIFMEDIA:
   2504 	case SIOCGIFMEDIA:
   2505 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   2506 		break;
   2507 	case SIOCSIFCAP:
   2508 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   2509 		break;
   2510 	case SIOCSIFMTU:
   2511 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   2512 		break;
   2513 	default:
   2514 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   2515 		break;
   2516 	}
   2517 
   2518 	switch (command) {
   2519 	case SIOCSIFMEDIA:
   2520 	case SIOCGIFMEDIA:
   2521 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   2522 	case SIOCSIFCAP:
   2523 		/* Layer-4 Rx checksum offload has to be turned on and
   2524 		 * off as a unit.
   2525 		 */
   2526 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   2527 		if (l4csum_en != l4csum && l4csum_en != 0)
   2528 			return EINVAL;
   2529 		/*FALLTHROUGH*/
   2530 	case SIOCADDMULTI:
   2531 	case SIOCDELMULTI:
   2532 	case SIOCSIFFLAGS:
   2533 	case SIOCSIFMTU:
   2534 	default:
   2535 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   2536 			return error;
   2537 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   2538 			;
   2539 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   2540 			IXGBE_CORE_LOCK(adapter);
   2541 			ixv_init_locked(adapter);
   2542 			IXGBE_CORE_UNLOCK(adapter);
   2543 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   2544 			/*
   2545 			 * Multicast list has changed; set the hardware filter
   2546 			 * accordingly.
   2547 			 */
   2548 			IXGBE_CORE_LOCK(adapter);
   2549 			ixv_disable_intr(adapter);
   2550 			ixv_set_multi(adapter);
   2551 			ixv_enable_intr(adapter);
   2552 			IXGBE_CORE_UNLOCK(adapter);
   2553 		}
   2554 		return 0;
   2555 	}
   2556 } /* ixv_ioctl */
   2557 
   2558 /************************************************************************
   2559  * ixv_init
   2560  ************************************************************************/
   2561 static int
   2562 ixv_init(struct ifnet *ifp)
   2563 {
   2564 	struct adapter *adapter = ifp->if_softc;
   2565 
   2566 	IXGBE_CORE_LOCK(adapter);
   2567 	ixv_init_locked(adapter);
   2568 	IXGBE_CORE_UNLOCK(adapter);
   2569 
   2570 	return 0;
   2571 } /* ixv_init */
   2572 
   2573 
   2574 /************************************************************************
   2575  * ixv_handle_que
   2576  ************************************************************************/
   2577 static void
   2578 ixv_handle_que(void *context)
   2579 {
   2580 	struct ix_queue *que = context;
   2581 	struct adapter  *adapter = que->adapter;
   2582 	struct tx_ring	*txr = que->txr;
   2583 	struct ifnet    *ifp = adapter->ifp;
   2584 	bool		more;
   2585 
   2586 	adapter->handleq.ev_count++;
   2587 
   2588 	if (ifp->if_flags & IFF_RUNNING) {
   2589 		more = ixgbe_rxeof(que);
   2590 		IXGBE_TX_LOCK(txr);
   2591 		ixgbe_txeof(txr);
   2592 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   2593 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   2594 				ixgbe_mq_start_locked(ifp, txr);
   2595 		/* Only for queue 0 */
   2596 		/* NetBSD still needs this for CBQ */
   2597 		if ((&adapter->queues[0] == que)
   2598 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   2599 			ixgbe_legacy_start_locked(ifp, txr);
   2600 		IXGBE_TX_UNLOCK(txr);
   2601 		if (more) {
   2602 			adapter->req.ev_count++;
   2603 			softint_schedule(que->que_si);
   2604 			return;
   2605 		}
   2606 	}
   2607 
   2608 	/* Re-enable this interrupt */
   2609 	ixv_enable_queue(adapter, que->msix);
   2610 
   2611 	return;
   2612 } /* ixv_handle_que */
   2613 
   2614 /************************************************************************
   2615  * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
   2616  ************************************************************************/
   2617 static int
   2618 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   2619 {
   2620 	device_t	dev = adapter->dev;
   2621 	struct ix_queue *que = adapter->queues;
   2622 	struct		tx_ring *txr = adapter->tx_rings;
   2623 	int 		error, msix_ctrl, rid, vector = 0;
   2624 	pci_chipset_tag_t pc;
   2625 	pcitag_t	tag;
   2626 	char		intrbuf[PCI_INTRSTR_LEN];
   2627 	char		intr_xname[32];
   2628 	const char	*intrstr = NULL;
   2629 	kcpuset_t	*affinity;
   2630 	int		cpu_id = 0;
   2631 
   2632 	pc = adapter->osdep.pc;
   2633 	tag = adapter->osdep.tag;
   2634 
   2635 	adapter->osdep.nintrs = adapter->num_queues + 1;
   2636 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   2637 	    adapter->osdep.nintrs) != 0) {
   2638 		aprint_error_dev(dev,
   2639 		    "failed to allocate MSI-X interrupt\n");
   2640 		return (ENXIO);
   2641 	}
   2642 
   2643 	kcpuset_create(&affinity, false);
   2644 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   2645 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   2646 		    device_xname(dev), i);
   2647 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   2648 		    sizeof(intrbuf));
   2649 #ifdef IXGBE_MPSAFE
   2650 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   2651 		    true);
   2652 #endif
   2653 		/* Set the handler function */
   2654 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   2655 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   2656 		    intr_xname);
   2657 		if (que->res == NULL) {
   2658 			pci_intr_release(pc, adapter->osdep.intrs,
   2659 			    adapter->osdep.nintrs);
   2660 			aprint_error_dev(dev,
   2661 			    "Failed to register QUE handler\n");
   2662 			kcpuset_destroy(affinity);
   2663 			return (ENXIO);
   2664 		}
   2665 		que->msix = vector;
   2666         	adapter->active_queues |= (u64)(1 << que->msix);
   2667 
   2668 		cpu_id = i;
   2669 		/* Round-robin affinity */
   2670 		kcpuset_zero(affinity);
   2671 		kcpuset_set(affinity, cpu_id % ncpu);
   2672 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   2673 		    NULL);
   2674 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   2675 		    intrstr);
   2676 		if (error == 0)
   2677 			aprint_normal(", bound queue %d to cpu %d\n",
   2678 			    i, cpu_id % ncpu);
   2679 		else
   2680 			aprint_normal("\n");
   2681 
   2682 #ifndef IXGBE_LEGACY_TX
   2683 		txr->txr_si
   2684 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2685 			ixgbe_deferred_mq_start, txr);
   2686 #endif
   2687 		que->que_si
   2688 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2689 			ixv_handle_que, que);
   2690 		if (que->que_si == NULL) {
   2691 			aprint_error_dev(dev,
   2692 			    "could not establish software interrupt\n");
   2693 		}
   2694 	}
   2695 
   2696 	/* and Mailbox */
   2697 	cpu_id++;
   2698 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   2699 	adapter->vector = vector;
   2700 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   2701 	    sizeof(intrbuf));
   2702 #ifdef IXGBE_MPSAFE
   2703 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   2704 	    true);
   2705 #endif
   2706 	/* Set the mbx handler function */
   2707 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   2708 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   2709 	    intr_xname);
   2710 	if (adapter->osdep.ihs[vector] == NULL) {
   2711 		adapter->res = NULL;
   2712 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   2713 		kcpuset_destroy(affinity);
   2714 		return (ENXIO);
   2715 	}
   2716 	/* Round-robin affinity */
   2717 	kcpuset_zero(affinity);
   2718 	kcpuset_set(affinity, cpu_id % ncpu);
   2719 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   2720 
   2721 	aprint_normal_dev(dev,
   2722 	    "for link, interrupting at %s", intrstr);
   2723 	if (error == 0)
   2724 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   2725 	else
   2726 		aprint_normal("\n");
   2727 
   2728 	/* Tasklets for Mailbox */
   2729 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   2730 	    ixv_handle_link, adapter);
   2731 	/*
   2732 	 * Due to a broken design QEMU will fail to properly
   2733 	 * enable the guest for MSI-X unless the vectors in
   2734 	 * the table are all set up, so we must rewrite the
   2735 	 * ENABLE in the MSI-X control register again at this
   2736 	 * point to cause it to successfully initialize us.
   2737 	 */
   2738 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   2739 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   2740 		rid += PCI_MSIX_CTL;
   2741 		msix_ctrl = pci_conf_read(pc, tag, rid);
   2742 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   2743 		pci_conf_write(pc, tag, rid, msix_ctrl);
   2744 	}
   2745 
   2746 	kcpuset_destroy(affinity);
   2747 	return (0);
   2748 } /* ixv_allocate_msix */
   2749 
   2750 /************************************************************************
   2751  * ixv_configure_interrupts - Setup MSI-X resources
   2752  *
   2753  *   Note: The VF device MUST use MSI-X, there is no fallback.
   2754  ************************************************************************/
   2755 static int
   2756 ixv_configure_interrupts(struct adapter *adapter)
   2757 {
   2758 	device_t dev = adapter->dev;
   2759 	int want, queues, msgs;
   2760 
   2761 	/* Must have at least 2 MSI-X vectors */
   2762 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   2763 	if (msgs < 2) {
   2764 		aprint_error_dev(dev, "MSIX config error\n");
   2765 		return (ENXIO);
   2766 	}
   2767 	msgs = MIN(msgs, IXG_MAX_NINTR);
   2768 
   2769 	/* Figure out a reasonable auto config value */
   2770 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   2771 
   2772 	if (ixv_num_queues != 0)
   2773 		queues = ixv_num_queues;
   2774 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   2775 		queues = IXGBE_VF_MAX_TX_QUEUES;
   2776 
   2777 	/*
   2778 	 * Want vectors for the queues,
   2779 	 * plus an additional for mailbox.
   2780 	 */
   2781 	want = queues + 1;
   2782 	if (msgs >= want)
   2783 		msgs = want;
   2784 	else {
   2785                	aprint_error_dev(dev,
   2786 		    "MSI-X Configuration Problem, "
   2787 		    "%d vectors but %d queues wanted!\n",
   2788 		    msgs, want);
   2789 		return -1;
   2790 	}
   2791 
   2792 	adapter->msix_mem = (void *)1; /* XXX */
   2793 	aprint_normal_dev(dev,
   2794 	    "Using MSI-X interrupts with %d vectors\n", msgs);
   2795 	adapter->num_queues = queues;
   2796 
   2797 	return (0);
   2798 } /* ixv_configure_interrupts */
   2799 
   2800 
   2801 /************************************************************************
   2802  * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
   2803  *
   2804  *   Done outside of interrupt context since the driver might sleep
   2805  ************************************************************************/
   2806 static void
   2807 ixv_handle_link(void *context)
   2808 {
   2809 	struct adapter *adapter = context;
   2810 
   2811 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   2812 	    &adapter->link_up, FALSE);
   2813 	ixv_update_link_status(adapter);
   2814 } /* ixv_handle_link */
   2815 
   2816 /************************************************************************
   2817  * ixv_check_link - Used in the local timer to poll for link changes
   2818  ************************************************************************/
   2819 static void
   2820 ixv_check_link(struct adapter *adapter)
   2821 {
   2822 	adapter->hw.mac.get_link_status = TRUE;
   2823 
   2824 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   2825 	    &adapter->link_up, FALSE);
   2826 	ixv_update_link_status(adapter);
   2827 } /* ixv_check_link */
   2828