Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.66
      1 /*$NetBSD: ixv.c,v 1.66 2017/09/27 10:31:29 msaitoh Exp $*/
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 320688 2017-07-05 17:27:03Z erj $*/
     36 
     37 
     38 #ifdef _KERNEL_OPT
     39 #include "opt_inet.h"
     40 #include "opt_inet6.h"
     41 #include "opt_net_mpsafe.h"
     42 #endif
     43 
     44 #include "ixgbe.h"
     45 #include "vlan.h"
     46 
     47 /************************************************************************
     48  * Driver version
     49  ************************************************************************/
     50 char ixv_driver_version[] = "1.5.13-k";
     51 
     52 /************************************************************************
     53  * PCI Device ID Table
     54  *
     55  *   Used by probe to select devices to load on
     56  *   Last field stores an index into ixv_strings
     57  *   Last entry must be all 0s
     58  *
     59  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     60  ************************************************************************/
     61 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
     62 {
     63 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     64 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     65 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     66 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     67 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
     68 	/* required last entry */
     69 	{0, 0, 0, 0, 0}
     70 };
     71 
     72 /************************************************************************
     73  * Table of branding strings
     74  ************************************************************************/
     75 static const char *ixv_strings[] = {
     76 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     77 };
     78 
     79 /*********************************************************************
     80  *  Function prototypes
     81  *********************************************************************/
     82 static int      ixv_probe(device_t, cfdata_t, void *);
     83 static void	ixv_attach(device_t, device_t, void *);
     84 static int      ixv_detach(device_t, int);
     85 #if 0
     86 static int      ixv_shutdown(device_t);
     87 #endif
     88 static int	ixv_ifflags_cb(struct ethercom *);
     89 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     90 static int	ixv_init(struct ifnet *);
     91 static void	ixv_init_locked(struct adapter *);
     92 static void	ixv_ifstop(struct ifnet *, int);
     93 static void     ixv_stop(void *);
     94 static void     ixv_init_device_features(struct adapter *);
     95 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     96 static int      ixv_media_change(struct ifnet *);
     97 static int      ixv_allocate_pci_resources(struct adapter *,
     98 		    const struct pci_attach_args *);
     99 static int      ixv_allocate_msix(struct adapter *,
    100 		    const struct pci_attach_args *);
    101 static int      ixv_configure_interrupts(struct adapter *);
    102 static void	ixv_free_pci_resources(struct adapter *);
    103 static void     ixv_local_timer(void *);
    104 static void     ixv_local_timer_locked(void *);
    105 static void     ixv_setup_interface(device_t, struct adapter *);
    106 static int      ixv_negotiate_api(struct adapter *);
    107 
    108 static void     ixv_initialize_transmit_units(struct adapter *);
    109 static void     ixv_initialize_receive_units(struct adapter *);
    110 static void     ixv_initialize_rss_mapping(struct adapter *);
    111 static void     ixv_check_link(struct adapter *);
    112 
    113 static void     ixv_enable_intr(struct adapter *);
    114 static void     ixv_disable_intr(struct adapter *);
    115 static void     ixv_set_multi(struct adapter *);
    116 static void     ixv_update_link_status(struct adapter *);
    117 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    118 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    119 static void	ixv_configure_ivars(struct adapter *);
    120 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    121 
    122 static void	ixv_setup_vlan_support(struct adapter *);
    123 #if 0
    124 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    125 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    126 #endif
    127 
    128 static void	ixv_add_device_sysctls(struct adapter *);
    129 static void	ixv_save_stats(struct adapter *);
    130 static void	ixv_init_stats(struct adapter *);
    131 static void	ixv_update_stats(struct adapter *);
    132 static void	ixv_add_stats_sysctls(struct adapter *);
    133 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    134 		    const char *, int *, int);
    135 
    136 /* The MSI-X Interrupt handlers */
    137 static int	ixv_msix_que(void *);
    138 static int	ixv_msix_mbx(void *);
    139 
    140 /* Deferred interrupt tasklets */
    141 static void	ixv_handle_que(void *);
    142 static void     ixv_handle_link(void *);
    143 
    144 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    145 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    146 
    147 /************************************************************************
    148  * FreeBSD Device Interface Entry Points
    149  ************************************************************************/
    150 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    151     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    152     DVF_DETACH_SHUTDOWN);
    153 
    154 #if 0
    155 static driver_t ixv_driver = {
    156 	"ixv", ixv_methods, sizeof(struct adapter),
    157 };
    158 
    159 devclass_t ixv_devclass;
    160 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    161 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    162 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    163 #endif
    164 
    165 /*
    166  * TUNEABLE PARAMETERS:
    167  */
    168 
    169 /* Number of Queues - do not exceed MSI-X vectors - 1 */
    170 static int ixv_num_queues = 0;
    171 #define	TUNABLE_INT(__x, __y)
    172 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    173 
    174 /*
    175  * AIM: Adaptive Interrupt Moderation
    176  * which means that the interrupt rate
    177  * is varied over time based on the
    178  * traffic for that interrupt vector
    179  */
    180 static bool ixv_enable_aim = false;
    181 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    182 
    183 /* How many packets rxeof tries to clean at a time */
    184 static int ixv_rx_process_limit = 256;
    185 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    186 
    187 /* How many packets txeof tries to clean at a time */
    188 static int ixv_tx_process_limit = 256;
    189 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    190 
    191 /*
    192  * Number of TX descriptors per ring,
    193  * setting higher than RX as this seems
    194  * the better performing choice.
    195  */
    196 static int ixv_txd = PERFORM_TXD;
    197 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    198 
    199 /* Number of RX descriptors per ring */
    200 static int ixv_rxd = PERFORM_RXD;
    201 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    202 
    203 /* Legacy Transmit (single queue) */
    204 static int ixv_enable_legacy_tx = 0;
    205 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
    206 
    207 #ifdef NET_MPSAFE
    208 #define IXGBE_MPSAFE		1
    209 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    210 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    211 #else
    212 #define IXGBE_CALLOUT_FLAGS	0
    213 #define IXGBE_SOFTINFT_FLAGS	0
    214 #endif
    215 
    216 #if 0
    217 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
    218 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
    219 #endif
    220 
    221 /************************************************************************
    222  * ixv_probe - Device identification routine
    223  *
    224  *   Determines if the driver should be loaded on
    225  *   adapter based on its PCI vendor/device ID.
    226  *
    227  *   return BUS_PROBE_DEFAULT on success, positive on failure
    228  ************************************************************************/
    229 static int
    230 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    231 {
    232 #ifdef __HAVE_PCI_MSI_MSIX
    233 	const struct pci_attach_args *pa = aux;
    234 
    235 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    236 #else
    237 	return 0;
    238 #endif
    239 } /* ixv_probe */
    240 
    241 static ixgbe_vendor_info_t *
    242 ixv_lookup(const struct pci_attach_args *pa)
    243 {
    244 	ixgbe_vendor_info_t *ent;
    245 	pcireg_t subid;
    246 
    247 	INIT_DEBUGOUT("ixv_lookup: begin");
    248 
    249 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    250 		return NULL;
    251 
    252 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    253 
    254 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    255 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    256 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    257 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    258 		     (ent->subvendor_id == 0)) &&
    259 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    260 		     (ent->subdevice_id == 0))) {
    261 			return ent;
    262 		}
    263 	}
    264 
    265 	return NULL;
    266 }
    267 
    268 /************************************************************************
    269  * ixv_attach - Device initialization routine
    270  *
    271  *   Called when the driver is being loaded.
    272  *   Identifies the type of hardware, allocates all resources
    273  *   and initializes the hardware.
    274  *
    275  *   return 0 on success, positive on failure
    276  ************************************************************************/
    277 static void
    278 ixv_attach(device_t parent, device_t dev, void *aux)
    279 {
    280 	struct adapter *adapter;
    281 	struct ixgbe_hw *hw;
    282 	int             error = 0;
    283 	pcireg_t	id, subid;
    284 	ixgbe_vendor_info_t *ent;
    285 	const struct pci_attach_args *pa = aux;
    286 	const char *apivstr;
    287 	const char *str;
    288 	char buf[256];
    289 
    290 	INIT_DEBUGOUT("ixv_attach: begin");
    291 
    292 	/*
    293 	 * Make sure BUSMASTER is set, on a VM under
    294 	 * KVM it may not be and will break things.
    295 	 */
    296 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    297 
    298 	/* Allocate, clear, and link in our adapter structure */
    299 	adapter = device_private(dev);
    300 	adapter->dev = dev;
    301 	adapter->hw.back = adapter;
    302 	hw = &adapter->hw;
    303 
    304 	adapter->init_locked = ixv_init_locked;
    305 	adapter->stop_locked = ixv_stop;
    306 
    307 	adapter->osdep.pc = pa->pa_pc;
    308 	adapter->osdep.tag = pa->pa_tag;
    309 	if (pci_dma64_available(pa))
    310 		adapter->osdep.dmat = pa->pa_dmat64;
    311 	else
    312 		adapter->osdep.dmat = pa->pa_dmat;
    313 	adapter->osdep.attached = false;
    314 
    315 	ent = ixv_lookup(pa);
    316 
    317 	KASSERT(ent != NULL);
    318 
    319 	aprint_normal(": %s, Version - %s\n",
    320 	    ixv_strings[ent->index], ixv_driver_version);
    321 
    322 	/* Core Lock Init*/
    323 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    324 
    325 	/* Do base PCI setup - map BAR0 */
    326 	if (ixv_allocate_pci_resources(adapter, pa)) {
    327 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    328 		error = ENXIO;
    329 		goto err_out;
    330 	}
    331 
    332 	/* SYSCTL APIs */
    333 	ixv_add_device_sysctls(adapter);
    334 
    335 	/* Set up the timer callout */
    336 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    337 
    338 	/* Save off the information about this board */
    339 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    340 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    341 	hw->vendor_id = PCI_VENDOR(id);
    342 	hw->device_id = PCI_PRODUCT(id);
    343 	hw->revision_id =
    344 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    345 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    346 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    347 
    348 	/* A subset of set_mac_type */
    349 	switch (hw->device_id) {
    350 	case IXGBE_DEV_ID_82599_VF:
    351 		hw->mac.type = ixgbe_mac_82599_vf;
    352 		str = "82599 VF";
    353 		break;
    354 	case IXGBE_DEV_ID_X540_VF:
    355 		hw->mac.type = ixgbe_mac_X540_vf;
    356 		str = "X540 VF";
    357 		break;
    358 	case IXGBE_DEV_ID_X550_VF:
    359 		hw->mac.type = ixgbe_mac_X550_vf;
    360 		str = "X550 VF";
    361 		break;
    362 	case IXGBE_DEV_ID_X550EM_X_VF:
    363 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
    364 		str = "X550EM X VF";
    365 		break;
    366 	case IXGBE_DEV_ID_X550EM_A_VF:
    367 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
    368 		str = "X550EM A VF";
    369 		break;
    370 	default:
    371 		/* Shouldn't get here since probe succeeded */
    372 		aprint_error_dev(dev, "Unknown device ID!\n");
    373 		error = ENXIO;
    374 		goto err_out;
    375 		break;
    376 	}
    377 	aprint_normal_dev(dev, "device %s\n", str);
    378 
    379 	ixv_init_device_features(adapter);
    380 
    381 	/* Initialize the shared code */
    382 	error = ixgbe_init_ops_vf(hw);
    383 	if (error) {
    384 		aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
    385 		error = EIO;
    386 		goto err_out;
    387 	}
    388 
    389 	/* Setup the mailbox */
    390 	ixgbe_init_mbx_params_vf(hw);
    391 
    392 	/* Set the right number of segments */
    393 	adapter->num_segs = IXGBE_82599_SCATTER;
    394 
    395 	/* Reset mbox api to 1.0 */
    396 	error = hw->mac.ops.reset_hw(hw);
    397 	if (error == IXGBE_ERR_RESET_FAILED)
    398 		aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
    399 	else if (error)
    400 		aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
    401 		    error);
    402 	if (error) {
    403 		error = EIO;
    404 		goto err_out;
    405 	}
    406 
    407 	error = hw->mac.ops.init_hw(hw);
    408 	if (error) {
    409 		aprint_error_dev(dev, "...init_hw() failed!\n");
    410 		error = EIO;
    411 		goto err_out;
    412 	}
    413 
    414 	/* Negotiate mailbox API version */
    415 	error = ixv_negotiate_api(adapter);
    416 	if (error)
    417 		aprint_normal_dev(dev,
    418 		    "MBX API negotiation failed during attach!\n");
    419 	switch (hw->api_version) {
    420 	case ixgbe_mbox_api_10:
    421 		apivstr = "1.0";
    422 		break;
    423 	case ixgbe_mbox_api_20:
    424 		apivstr = "2.0";
    425 		break;
    426 	case ixgbe_mbox_api_11:
    427 		apivstr = "1.1";
    428 		break;
    429 	case ixgbe_mbox_api_12:
    430 		apivstr = "1.2";
    431 		break;
    432 	case ixgbe_mbox_api_13:
    433 		apivstr = "1.3";
    434 		break;
    435 	default:
    436 		apivstr = "unknown";
    437 		break;
    438 	}
    439 	aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
    440 
    441 	/* If no mac address was assigned, make a random one */
    442 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    443 		u8 addr[ETHER_ADDR_LEN];
    444 		uint64_t rndval = cprng_strong64();
    445 
    446 		memcpy(addr, &rndval, sizeof(addr));
    447 		addr[0] &= 0xFE;
    448 		addr[0] |= 0x02;
    449 		bcopy(addr, hw->mac.addr, sizeof(addr));
    450 	}
    451 
    452 	/* Register for VLAN events */
    453 #if 0 /* XXX delete after write? */
    454 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    455 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    456 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    457 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    458 #endif
    459 
    460 	/* Sysctls for limiting the amount of work done in the taskqueues */
    461 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    462 	    "max number of rx packets to process",
    463 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    464 
    465 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    466 	    "max number of tx packets to process",
    467 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    468 
    469 	/* Do descriptor calc and sanity checks */
    470 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    471 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    472 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    473 		adapter->num_tx_desc = DEFAULT_TXD;
    474 	} else
    475 		adapter->num_tx_desc = ixv_txd;
    476 
    477 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    478 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    479 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    480 		adapter->num_rx_desc = DEFAULT_RXD;
    481 	} else
    482 		adapter->num_rx_desc = ixv_rxd;
    483 
    484 	/* Setup MSI-X */
    485 	error = ixv_configure_interrupts(adapter);
    486 	if (error)
    487 		goto err_out;
    488 
    489 	/* Allocate our TX/RX Queues */
    490 	if (ixgbe_allocate_queues(adapter)) {
    491 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    492 		error = ENOMEM;
    493 		goto err_out;
    494 	}
    495 
    496 	/* hw.ix defaults init */
    497 	adapter->enable_aim = ixv_enable_aim;
    498 
    499 	/* Setup OS specific network interface */
    500 	ixv_setup_interface(dev, adapter);
    501 
    502 	error = ixv_allocate_msix(adapter, pa);
    503 	if (error) {
    504 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    505 		goto err_late;
    506 	}
    507 
    508 	/* Do the stats setup */
    509 	ixv_save_stats(adapter);
    510 	ixv_init_stats(adapter);
    511 	ixv_add_stats_sysctls(adapter);
    512 
    513 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    514 		ixgbe_netmap_attach(adapter);
    515 
    516 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
    517 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
    518 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
    519 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
    520 
    521 	INIT_DEBUGOUT("ixv_attach: end");
    522 	adapter->osdep.attached = true;
    523 
    524 	return;
    525 
    526 err_late:
    527 	ixgbe_free_transmit_structures(adapter);
    528 	ixgbe_free_receive_structures(adapter);
    529 	free(adapter->queues, M_DEVBUF);
    530 err_out:
    531 	ixv_free_pci_resources(adapter);
    532 	IXGBE_CORE_LOCK_DESTROY(adapter);
    533 
    534 	return;
    535 } /* ixv_attach */
    536 
    537 /************************************************************************
    538  * ixv_detach - Device removal routine
    539  *
    540  *   Called when the driver is being removed.
    541  *   Stops the adapter and deallocates all the resources
    542  *   that were allocated for driver operation.
    543  *
    544  *   return 0 on success, positive on failure
    545  ************************************************************************/
    546 static int
    547 ixv_detach(device_t dev, int flags)
    548 {
    549 	struct adapter  *adapter = device_private(dev);
    550 	struct ix_queue *que = adapter->queues;
    551 	struct tx_ring *txr = adapter->tx_rings;
    552 	struct rx_ring *rxr = adapter->rx_rings;
    553 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
    554 
    555 	INIT_DEBUGOUT("ixv_detach: begin");
    556 	if (adapter->osdep.attached == false)
    557 		return 0;
    558 
    559 	/* Stop the interface. Callouts are stopped in it. */
    560 	ixv_ifstop(adapter->ifp, 1);
    561 
    562 #if NVLAN > 0
    563 	/* Make sure VLANs are not using driver */
    564 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    565 		;	/* nothing to do: no VLANs */
    566 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    567 		vlan_ifdetach(adapter->ifp);
    568 	else {
    569 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    570 		return EBUSY;
    571 	}
    572 #endif
    573 
    574 	IXGBE_CORE_LOCK(adapter);
    575 	ixv_stop(adapter);
    576 	IXGBE_CORE_UNLOCK(adapter);
    577 
    578 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
    579 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
    580 			softint_disestablish(txr->txr_si);
    581 		softint_disestablish(que->que_si);
    582 	}
    583 
    584 	/* Drain the Mailbox(link) queue */
    585 	softint_disestablish(adapter->link_si);
    586 
    587 	/* Unregister VLAN events */
    588 #if 0 /* XXX msaitoh delete after write? */
    589 	if (adapter->vlan_attach != NULL)
    590 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    591 	if (adapter->vlan_detach != NULL)
    592 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    593 #endif
    594 
    595 	ether_ifdetach(adapter->ifp);
    596 	callout_halt(&adapter->timer, NULL);
    597 
    598 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    599 		netmap_detach(adapter->ifp);
    600 
    601 	ixv_free_pci_resources(adapter);
    602 #if 0 /* XXX the NetBSD port is probably missing something here */
    603 	bus_generic_detach(dev);
    604 #endif
    605 	if_detach(adapter->ifp);
    606 	if_percpuq_destroy(adapter->ipq);
    607 
    608 	sysctl_teardown(&adapter->sysctllog);
    609 	evcnt_detach(&adapter->handleq);
    610 	evcnt_detach(&adapter->req);
    611 	evcnt_detach(&adapter->efbig_tx_dma_setup);
    612 	evcnt_detach(&adapter->mbuf_defrag_failed);
    613 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
    614 	evcnt_detach(&adapter->einval_tx_dma_setup);
    615 	evcnt_detach(&adapter->other_tx_dma_setup);
    616 	evcnt_detach(&adapter->eagain_tx_dma_setup);
    617 	evcnt_detach(&adapter->enomem_tx_dma_setup);
    618 	evcnt_detach(&adapter->watchdog_events);
    619 	evcnt_detach(&adapter->tso_err);
    620 	evcnt_detach(&adapter->link_irq);
    621 
    622 	txr = adapter->tx_rings;
    623 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
    624 		evcnt_detach(&adapter->queues[i].irqs);
    625 		evcnt_detach(&txr->no_desc_avail);
    626 		evcnt_detach(&txr->total_packets);
    627 		evcnt_detach(&txr->tso_tx);
    628 #ifndef IXGBE_LEGACY_TX
    629 		evcnt_detach(&txr->pcq_drops);
    630 #endif
    631 
    632 		evcnt_detach(&rxr->rx_packets);
    633 		evcnt_detach(&rxr->rx_bytes);
    634 		evcnt_detach(&rxr->rx_copies);
    635 		evcnt_detach(&rxr->no_jmbuf);
    636 		evcnt_detach(&rxr->rx_discarded);
    637 	}
    638 	evcnt_detach(&stats->ipcs);
    639 	evcnt_detach(&stats->l4cs);
    640 	evcnt_detach(&stats->ipcs_bad);
    641 	evcnt_detach(&stats->l4cs_bad);
    642 
    643 	/* Packet Reception Stats */
    644 	evcnt_detach(&stats->vfgorc);
    645 	evcnt_detach(&stats->vfgprc);
    646 	evcnt_detach(&stats->vfmprc);
    647 
    648 	/* Packet Transmission Stats */
    649 	evcnt_detach(&stats->vfgotc);
    650 	evcnt_detach(&stats->vfgptc);
    651 
    652 	ixgbe_free_transmit_structures(adapter);
    653 	ixgbe_free_receive_structures(adapter);
    654 	free(adapter->queues, M_DEVBUF);
    655 
    656 	IXGBE_CORE_LOCK_DESTROY(adapter);
    657 
    658 	return (0);
    659 } /* ixv_detach */
    660 
    661 /************************************************************************
    662  * ixv_init_locked - Init entry point
    663  *
    664  *   Used in two ways: It is used by the stack as an init entry
    665  *   point in network interface structure. It is also used
    666  *   by the driver as a hw/sw initialization routine to get
    667  *   to a consistent state.
    668  *
    669  *   return 0 on success, positive on failure
    670  ************************************************************************/
    671 static void
    672 ixv_init_locked(struct adapter *adapter)
    673 {
    674 	struct ifnet	*ifp = adapter->ifp;
    675 	device_t 	dev = adapter->dev;
    676 	struct ixgbe_hw *hw = &adapter->hw;
    677 	int             error = 0;
    678 
    679 	INIT_DEBUGOUT("ixv_init_locked: begin");
    680 	KASSERT(mutex_owned(&adapter->core_mtx));
    681 	hw->adapter_stopped = FALSE;
    682 	hw->mac.ops.stop_adapter(hw);
    683 	callout_stop(&adapter->timer);
    684 
    685 	/* reprogram the RAR[0] in case user changed it. */
    686 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    687 
    688 	/* Get the latest mac address, User can use a LAA */
    689 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    690 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    691 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
    692 
    693 	/* Prepare transmit descriptors and buffers */
    694 	if (ixgbe_setup_transmit_structures(adapter)) {
    695 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    696 		ixv_stop(adapter);
    697 		return;
    698 	}
    699 
    700 	/* Reset VF and renegotiate mailbox API version */
    701 	hw->mac.ops.reset_hw(hw);
    702 	error = ixv_negotiate_api(adapter);
    703 	if (error)
    704 		device_printf(dev,
    705 		    "Mailbox API negotiation failed in init_locked!\n");
    706 
    707 	ixv_initialize_transmit_units(adapter);
    708 
    709 	/* Setup Multicast table */
    710 	ixv_set_multi(adapter);
    711 
    712 	/*
    713 	 * Determine the correct mbuf pool
    714 	 * for doing jumbo/headersplit
    715 	 */
    716 	if (ifp->if_mtu > ETHERMTU)
    717 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    718 	else
    719 		adapter->rx_mbuf_sz = MCLBYTES;
    720 
    721 	/* Prepare receive descriptors and buffers */
    722 	if (ixgbe_setup_receive_structures(adapter)) {
    723 		device_printf(dev, "Could not setup receive structures\n");
    724 		ixv_stop(adapter);
    725 		return;
    726 	}
    727 
    728 	/* Configure RX settings */
    729 	ixv_initialize_receive_units(adapter);
    730 
    731 #if 0 /* XXX isn't it required? -- msaitoh  */
    732 	/* Set the various hardware offload abilities */
    733 	ifp->if_hwassist = 0;
    734 	if (ifp->if_capenable & IFCAP_TSO4)
    735 		ifp->if_hwassist |= CSUM_TSO;
    736 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    737 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    738 #if __FreeBSD_version >= 800000
    739 		ifp->if_hwassist |= CSUM_SCTP;
    740 #endif
    741 	}
    742 #endif
    743 
    744 	/* Set up VLAN offload and filter */
    745 	ixv_setup_vlan_support(adapter);
    746 
    747 	/* Set up MSI-X routing */
    748 	ixv_configure_ivars(adapter);
    749 
    750 	/* Set up auto-mask */
    751 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    752 
    753 	/* Set moderation on the Link interrupt */
    754 	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
    755 
    756 	/* Stats init */
    757 	ixv_init_stats(adapter);
    758 
    759 	/* Config/Enable Link */
    760 	hw->mac.get_link_status = TRUE;
    761 	hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
    762 	    FALSE);
    763 
    764 	/* Start watchdog */
    765 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    766 
    767 	/* And now turn on interrupts */
    768 	ixv_enable_intr(adapter);
    769 
    770 	/* Now inform the stack we're ready */
    771 	ifp->if_flags |= IFF_RUNNING;
    772 	ifp->if_flags &= ~IFF_OACTIVE;
    773 
    774 	return;
    775 } /* ixv_init_locked */
    776 
    777 /*
    778  * MSI-X Interrupt Handlers and Tasklets
    779  */
    780 
    781 static inline void
    782 ixv_enable_queue(struct adapter *adapter, u32 vector)
    783 {
    784 	struct ixgbe_hw *hw = &adapter->hw;
    785 	u32             queue = 1 << vector;
    786 	u32             mask;
    787 
    788 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    789 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    790 } /* ixv_enable_queue */
    791 
    792 static inline void
    793 ixv_disable_queue(struct adapter *adapter, u32 vector)
    794 {
    795 	struct ixgbe_hw *hw = &adapter->hw;
    796 	u64             queue = (u64)(1 << vector);
    797 	u32             mask;
    798 
    799 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    800 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    801 } /* ixv_disable_queue */
    802 
    803 static inline void
    804 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    805 {
    806 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    807 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    808 } /* ixv_rearm_queues */
    809 
    810 
    811 /************************************************************************
    812  * ixv_msix_que - MSI Queue Interrupt Service routine
    813  ************************************************************************/
    814 static int
    815 ixv_msix_que(void *arg)
    816 {
    817 	struct ix_queue	*que = arg;
    818 	struct adapter  *adapter = que->adapter;
    819 	struct tx_ring	*txr = que->txr;
    820 	struct rx_ring	*rxr = que->rxr;
    821 	bool		more;
    822 	u32		newitr = 0;
    823 
    824 	ixv_disable_queue(adapter, que->msix);
    825 	++que->irqs.ev_count;
    826 
    827 #ifdef __NetBSD__
    828 	/* Don't run ixgbe_rxeof in interrupt context */
    829 	more = true;
    830 #else
    831 	more = ixgbe_rxeof(que);
    832 #endif
    833 
    834 	IXGBE_TX_LOCK(txr);
    835 	ixgbe_txeof(txr);
    836 	IXGBE_TX_UNLOCK(txr);
    837 
    838 	/* Do AIM now? */
    839 
    840 	if (adapter->enable_aim == false)
    841 		goto no_calc;
    842 	/*
    843 	 * Do Adaptive Interrupt Moderation:
    844 	 *  - Write out last calculated setting
    845 	 *  - Calculate based on average size over
    846 	 *    the last interval.
    847 	 */
    848 	if (que->eitr_setting)
    849 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
    850 		    que->eitr_setting);
    851 
    852 	que->eitr_setting = 0;
    853 
    854 	/* Idle, do nothing */
    855 	if ((txr->bytes == 0) && (rxr->bytes == 0))
    856 		goto no_calc;
    857 
    858 	if ((txr->bytes) && (txr->packets))
    859 		newitr = txr->bytes/txr->packets;
    860 	if ((rxr->bytes) && (rxr->packets))
    861 		newitr = max(newitr, (rxr->bytes / rxr->packets));
    862 	newitr += 24; /* account for hardware frame, crc */
    863 
    864 	/* set an upper boundary */
    865 	newitr = min(newitr, 3000);
    866 
    867 	/* Be nice to the mid range */
    868 	if ((newitr > 300) && (newitr < 1200))
    869 		newitr = (newitr / 3);
    870 	else
    871 		newitr = (newitr / 2);
    872 
    873 	newitr |= newitr << 16;
    874 
    875 	/* save for next interrupt */
    876 	que->eitr_setting = newitr;
    877 
    878 	/* Reset state */
    879 	txr->bytes = 0;
    880 	txr->packets = 0;
    881 	rxr->bytes = 0;
    882 	rxr->packets = 0;
    883 
    884 no_calc:
    885 	if (more)
    886 		softint_schedule(que->que_si);
    887 	else /* Re-enable this interrupt */
    888 		ixv_enable_queue(adapter, que->msix);
    889 
    890 	return 1;
    891 } /* ixv_msix_que */
    892 
    893 /************************************************************************
    894  * ixv_msix_mbx
    895  ************************************************************************/
    896 static int
    897 ixv_msix_mbx(void *arg)
    898 {
    899 	struct adapter	*adapter = arg;
    900 	struct ixgbe_hw *hw = &adapter->hw;
    901 	u32		reg;
    902 
    903 	++adapter->link_irq.ev_count;
    904 
    905 	/* First get the cause */
    906 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
    907 	/* Clear interrupt with write */
    908 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
    909 
    910 	/* Link status change */
    911 	if (reg & IXGBE_EICR_LSC)
    912 		softint_schedule(adapter->link_si);
    913 
    914 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
    915 
    916 	return 1;
    917 } /* ixv_msix_mbx */
    918 
    919 /************************************************************************
    920  * ixv_media_status - Media Ioctl callback
    921  *
    922  *   Called whenever the user queries the status of
    923  *   the interface using ifconfig.
    924  ************************************************************************/
    925 static void
    926 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
    927 {
    928 	struct adapter *adapter = ifp->if_softc;
    929 
    930 	INIT_DEBUGOUT("ixv_media_status: begin");
    931 	IXGBE_CORE_LOCK(adapter);
    932 	ixv_update_link_status(adapter);
    933 
    934 	ifmr->ifm_status = IFM_AVALID;
    935 	ifmr->ifm_active = IFM_ETHER;
    936 
    937 	if (!adapter->link_active) {
    938 		ifmr->ifm_active |= IFM_NONE;
    939 		IXGBE_CORE_UNLOCK(adapter);
    940 		return;
    941 	}
    942 
    943 	ifmr->ifm_status |= IFM_ACTIVE;
    944 
    945 	switch (adapter->link_speed) {
    946 		case IXGBE_LINK_SPEED_10GB_FULL:
    947 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
    948 			break;
    949 		case IXGBE_LINK_SPEED_1GB_FULL:
    950 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
    951 			break;
    952 		case IXGBE_LINK_SPEED_100_FULL:
    953 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
    954 			break;
    955 		case IXGBE_LINK_SPEED_10_FULL:
    956 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
    957 			break;
    958 	}
    959 
    960 	IXGBE_CORE_UNLOCK(adapter);
    961 
    962 	return;
    963 } /* ixv_media_status */
    964 
    965 /************************************************************************
    966  * ixv_media_change - Media Ioctl callback
    967  *
    968  *   Called when the user changes speed/duplex using
    969  *   media/mediopt option with ifconfig.
    970  ************************************************************************/
    971 static int
    972 ixv_media_change(struct ifnet *ifp)
    973 {
    974 	struct adapter *adapter = ifp->if_softc;
    975 	struct ifmedia *ifm = &adapter->media;
    976 
    977 	INIT_DEBUGOUT("ixv_media_change: begin");
    978 
    979 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    980 		return (EINVAL);
    981 
    982 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
    983 	case IFM_AUTO:
    984 		break;
    985 	default:
    986 		device_printf(adapter->dev, "Only auto media type\n");
    987 		return (EINVAL);
    988 	}
    989 
    990 	return (0);
    991 } /* ixv_media_change */
    992 
    993 
    994 /************************************************************************
    995  * ixv_negotiate_api
    996  *
    997  *   Negotiate the Mailbox API with the PF;
    998  *   start with the most featured API first.
    999  ************************************************************************/
   1000 static int
   1001 ixv_negotiate_api(struct adapter *adapter)
   1002 {
   1003 	struct ixgbe_hw *hw = &adapter->hw;
   1004 	int             mbx_api[] = { ixgbe_mbox_api_11,
   1005 	                              ixgbe_mbox_api_10,
   1006 	                              ixgbe_mbox_api_unknown };
   1007 	int             i = 0;
   1008 
   1009 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
   1010 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
   1011 			return (0);
   1012 		i++;
   1013 	}
   1014 
   1015 	return (EINVAL);
   1016 } /* ixv_negotiate_api */
   1017 
   1018 
   1019 /************************************************************************
   1020  * ixv_set_multi - Multicast Update
   1021  *
   1022  *   Called whenever multicast address list is updated.
   1023  ************************************************************************/
   1024 static void
   1025 ixv_set_multi(struct adapter *adapter)
   1026 {
   1027 	struct ether_multi *enm;
   1028 	struct ether_multistep step;
   1029 	struct ethercom *ec = &adapter->osdep.ec;
   1030 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1031 	u8                 *update_ptr;
   1032 	int                mcnt = 0;
   1033 
   1034 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1035 
   1036 	ETHER_FIRST_MULTI(step, ec, enm);
   1037 	while (enm != NULL) {
   1038 		bcopy(enm->enm_addrlo,
   1039 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1040 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1041 		mcnt++;
   1042 		/* XXX This might be required --msaitoh */
   1043 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1044 			break;
   1045 		ETHER_NEXT_MULTI(step, enm);
   1046 	}
   1047 
   1048 	update_ptr = mta;
   1049 
   1050 	adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   1051 	    ixv_mc_array_itr, TRUE);
   1052 
   1053 	return;
   1054 } /* ixv_set_multi */
   1055 
   1056 /************************************************************************
   1057  * ixv_mc_array_itr
   1058  *
   1059  *   An iterator function needed by the multicast shared code.
   1060  *   It feeds the shared code routine the addresses in the
   1061  *   array of ixv_set_multi() one by one.
   1062  ************************************************************************/
   1063 static u8 *
   1064 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1065 {
   1066 	u8 *addr = *update_ptr;
   1067 	u8 *newptr;
   1068 	*vmdq = 0;
   1069 
   1070 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1071 	*update_ptr = newptr;
   1072 
   1073 	return addr;
   1074 } /* ixv_mc_array_itr */
   1075 
   1076 /************************************************************************
   1077  * ixv_local_timer - Timer routine
   1078  *
   1079  *   Checks for link status, updates statistics,
   1080  *   and runs the watchdog check.
   1081  ************************************************************************/
   1082 static void
   1083 ixv_local_timer(void *arg)
   1084 {
   1085 	struct adapter *adapter = arg;
   1086 
   1087 	IXGBE_CORE_LOCK(adapter);
   1088 	ixv_local_timer_locked(adapter);
   1089 	IXGBE_CORE_UNLOCK(adapter);
   1090 }
   1091 
   1092 static void
   1093 ixv_local_timer_locked(void *arg)
   1094 {
   1095 	struct adapter	*adapter = arg;
   1096 	device_t	dev = adapter->dev;
   1097 	struct ix_queue	*que = adapter->queues;
   1098 	u64		queues = 0;
   1099 	int		hung = 0;
   1100 
   1101 	KASSERT(mutex_owned(&adapter->core_mtx));
   1102 
   1103 	ixv_check_link(adapter);
   1104 
   1105 	/* Stats Update */
   1106 	ixv_update_stats(adapter);
   1107 
   1108 	/*
   1109 	 * Check the TX queues status
   1110 	 *      - mark hung queues so we don't schedule on them
   1111 	 *      - watchdog only if all queues show hung
   1112 	 */
   1113 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1114 		/* Keep track of queues with work for soft irq */
   1115 		if (que->txr->busy)
   1116 			queues |= ((u64)1 << que->me);
   1117 		/*
   1118 		 * Each time txeof runs without cleaning, but there
   1119 		 * are uncleaned descriptors it increments busy. If
   1120 		 * we get to the MAX we declare it hung.
   1121 		 */
   1122 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1123 			++hung;
   1124 			/* Mark the queue as inactive */
   1125 			adapter->active_queues &= ~((u64)1 << que->me);
   1126 			continue;
   1127 		} else {
   1128 			/* Check if we've come back from hung */
   1129 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1130 				adapter->active_queues |= ((u64)1 << que->me);
   1131 		}
   1132 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1133 			device_printf(dev,
   1134 			    "Warning queue %d appears to be hung!\n", i);
   1135 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1136 			++hung;
   1137 		}
   1138 	}
   1139 
   1140 	/* Only truly watchdog if all queues show hung */
   1141 	if (hung == adapter->num_queues)
   1142 		goto watchdog;
   1143 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1144 		ixv_rearm_queues(adapter, queues);
   1145 	}
   1146 
   1147 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1148 
   1149 	return;
   1150 
   1151 watchdog:
   1152 
   1153 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1154 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1155 	adapter->watchdog_events.ev_count++;
   1156 	ixv_init_locked(adapter);
   1157 } /* ixv_local_timer */
   1158 
   1159 /************************************************************************
   1160  * ixv_update_link_status - Update OS on link state
   1161  *
   1162  * Note: Only updates the OS on the cached link state.
   1163  *       The real check of the hardware only happens with
   1164  *       a link interrupt.
   1165  ************************************************************************/
   1166 static void
   1167 ixv_update_link_status(struct adapter *adapter)
   1168 {
   1169 	struct ifnet *ifp = adapter->ifp;
   1170 	device_t     dev = adapter->dev;
   1171 
   1172 	if (adapter->link_up) {
   1173 		if (adapter->link_active == FALSE) {
   1174 			if (bootverbose) {
   1175 				const char *bpsmsg;
   1176 
   1177 				switch (adapter->link_speed) {
   1178 				case IXGBE_LINK_SPEED_10GB_FULL:
   1179 					bpsmsg = "10 Gbps";
   1180 					break;
   1181 				case IXGBE_LINK_SPEED_5GB_FULL:
   1182 					bpsmsg = "5 Gbps";
   1183 					break;
   1184 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   1185 					bpsmsg = "2.5 Gbps";
   1186 					break;
   1187 				case IXGBE_LINK_SPEED_1GB_FULL:
   1188 					bpsmsg = "1 Gbps";
   1189 					break;
   1190 				case IXGBE_LINK_SPEED_100_FULL:
   1191 					bpsmsg = "100 Mbps";
   1192 					break;
   1193 				case IXGBE_LINK_SPEED_10_FULL:
   1194 					bpsmsg = "10 Mbps";
   1195 					break;
   1196 				default:
   1197 					bpsmsg = "unknown speed";
   1198 					break;
   1199 				}
   1200 				device_printf(dev, "Link is up %s %s \n",
   1201 				    bpsmsg, "Full Duplex");
   1202 			}
   1203 			adapter->link_active = TRUE;
   1204 			if_link_state_change(ifp, LINK_STATE_UP);
   1205 		}
   1206 	} else { /* Link down */
   1207 		if (adapter->link_active == TRUE) {
   1208 			if (bootverbose)
   1209 				device_printf(dev, "Link is Down\n");
   1210 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1211 			adapter->link_active = FALSE;
   1212 		}
   1213 	}
   1214 
   1215 	return;
   1216 } /* ixv_update_link_status */
   1217 
   1218 
   1219 /************************************************************************
   1220  * ixv_stop - Stop the hardware
   1221  *
   1222  *   Disables all traffic on the adapter by issuing a
   1223  *   global reset on the MAC and deallocates TX/RX buffers.
   1224  ************************************************************************/
   1225 static void
   1226 ixv_ifstop(struct ifnet *ifp, int disable)
   1227 {
   1228 	struct adapter *adapter = ifp->if_softc;
   1229 
   1230 	IXGBE_CORE_LOCK(adapter);
   1231 	ixv_stop(adapter);
   1232 	IXGBE_CORE_UNLOCK(adapter);
   1233 }
   1234 
   1235 static void
   1236 ixv_stop(void *arg)
   1237 {
   1238 	struct ifnet    *ifp;
   1239 	struct adapter  *adapter = arg;
   1240 	struct ixgbe_hw *hw = &adapter->hw;
   1241 
   1242 	ifp = adapter->ifp;
   1243 
   1244 	KASSERT(mutex_owned(&adapter->core_mtx));
   1245 
   1246 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1247 	ixv_disable_intr(adapter);
   1248 
   1249 	/* Tell the stack that the interface is no longer active */
   1250 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1251 
   1252 	hw->mac.ops.reset_hw(hw);
   1253 	adapter->hw.adapter_stopped = FALSE;
   1254 	hw->mac.ops.stop_adapter(hw);
   1255 	callout_stop(&adapter->timer);
   1256 
   1257 	/* reprogram the RAR[0] in case user changed it. */
   1258 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1259 
   1260 	return;
   1261 } /* ixv_stop */
   1262 
   1263 
   1264 /************************************************************************
   1265  * ixv_allocate_pci_resources
   1266  ************************************************************************/
   1267 static int
   1268 ixv_allocate_pci_resources(struct adapter *adapter,
   1269     const struct pci_attach_args *pa)
   1270 {
   1271 	pcireg_t	memtype;
   1272 	device_t        dev = adapter->dev;
   1273 	bus_addr_t addr;
   1274 	int flags;
   1275 
   1276 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1277 	switch (memtype) {
   1278 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1279 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1280 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1281 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1282 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1283 			goto map_err;
   1284 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1285 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1286 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1287 		}
   1288 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1289 		     adapter->osdep.mem_size, flags,
   1290 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1291 map_err:
   1292 			adapter->osdep.mem_size = 0;
   1293 			aprint_error_dev(dev, "unable to map BAR0\n");
   1294 			return ENXIO;
   1295 		}
   1296 		break;
   1297 	default:
   1298 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1299 		return ENXIO;
   1300 	}
   1301 
   1302 	/* Pick up the tuneable queues */
   1303 	adapter->num_queues = ixv_num_queues;
   1304 
   1305 	return (0);
   1306 } /* ixv_allocate_pci_resources */
   1307 
   1308 /************************************************************************
   1309  * ixv_free_pci_resources
   1310  ************************************************************************/
   1311 static void
   1312 ixv_free_pci_resources(struct adapter * adapter)
   1313 {
   1314 	struct 		ix_queue *que = adapter->queues;
   1315 	int		rid;
   1316 
   1317 	/*
   1318 	 *  Release all msix queue resources:
   1319 	 */
   1320 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1321 		if (que->res != NULL)
   1322 			pci_intr_disestablish(adapter->osdep.pc,
   1323 			    adapter->osdep.ihs[i]);
   1324 	}
   1325 
   1326 
   1327 	/* Clean the Mailbox interrupt last */
   1328 	rid = adapter->vector;
   1329 
   1330 	if (adapter->osdep.ihs[rid] != NULL) {
   1331 		pci_intr_disestablish(adapter->osdep.pc,
   1332 		    adapter->osdep.ihs[rid]);
   1333 		adapter->osdep.ihs[rid] = NULL;
   1334 	}
   1335 
   1336 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1337 	    adapter->osdep.nintrs);
   1338 
   1339 	if (adapter->osdep.mem_size != 0) {
   1340 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1341 		    adapter->osdep.mem_bus_space_handle,
   1342 		    adapter->osdep.mem_size);
   1343 	}
   1344 
   1345 	return;
   1346 } /* ixv_free_pci_resources */
   1347 
   1348 /************************************************************************
   1349  * ixv_setup_interface
   1350  *
   1351  *   Setup networking device structure and register an interface.
   1352  ************************************************************************/
   1353 static void
   1354 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1355 {
   1356 	struct ethercom *ec = &adapter->osdep.ec;
   1357 	struct ifnet   *ifp;
   1358 
   1359 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1360 
   1361 	ifp = adapter->ifp = &ec->ec_if;
   1362 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1363 	ifp->if_baudrate = IF_Gbps(10);
   1364 	ifp->if_init = ixv_init;
   1365 	ifp->if_stop = ixv_ifstop;
   1366 	ifp->if_softc = adapter;
   1367 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1368 #ifdef IXGBE_MPSAFE
   1369 	ifp->if_extflags = IFEF_START_MPSAFE;
   1370 #endif
   1371 	ifp->if_ioctl = ixv_ioctl;
   1372 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1373 #if 0
   1374 		ixv_start_locked = ixgbe_legacy_start_locked;
   1375 #endif
   1376 	} else {
   1377 		ifp->if_transmit = ixgbe_mq_start;
   1378 #if 0
   1379 		ixv_start_locked = ixgbe_mq_start_locked;
   1380 #endif
   1381 	}
   1382 	ifp->if_start = ixgbe_legacy_start;
   1383 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1384 	IFQ_SET_READY(&ifp->if_snd);
   1385 
   1386 	if_initialize(ifp);
   1387 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1388 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1389 	/*
   1390 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1391 	 * used.
   1392 	 */
   1393 	if_register(ifp);
   1394 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1395 
   1396 	adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
   1397 
   1398 	/*
   1399 	 * Tell the upper layer(s) we support long frames.
   1400 	 */
   1401 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1402 
   1403 	/* Set capability flags */
   1404 	ifp->if_capabilities |= IFCAP_HWCSUM
   1405 	                     |  IFCAP_TSOv4
   1406 	                     |  IFCAP_TSOv6;
   1407 	ifp->if_capenable = 0;
   1408 
   1409 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1410 			    |  ETHERCAP_VLAN_HWCSUM
   1411 			    |  ETHERCAP_JUMBO_MTU
   1412 			    |  ETHERCAP_VLAN_MTU;
   1413 
   1414 	/* Enable the above capabilities by default */
   1415 	ec->ec_capenable = ec->ec_capabilities;
   1416 
   1417 	/* Don't enable LRO by default */
   1418 	ifp->if_capabilities |= IFCAP_LRO;
   1419 #if 0
   1420 	ifp->if_capenable = ifp->if_capabilities;
   1421 #endif
   1422 
   1423 	/*
   1424 	 * Specify the media types supported by this adapter and register
   1425 	 * callbacks to update media and link information
   1426 	 */
   1427 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1428 	    ixv_media_status);
   1429 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1430 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1431 
   1432 	return;
   1433 } /* ixv_setup_interface */
   1434 
   1435 
   1436 /************************************************************************
   1437  * ixv_initialize_transmit_units - Enable transmit unit.
   1438  ************************************************************************/
   1439 static void
   1440 ixv_initialize_transmit_units(struct adapter *adapter)
   1441 {
   1442 	struct tx_ring	*txr = adapter->tx_rings;
   1443 	struct ixgbe_hw	*hw = &adapter->hw;
   1444 
   1445 
   1446 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1447 		u64 tdba = txr->txdma.dma_paddr;
   1448 		u32 txctrl, txdctl;
   1449 
   1450 		/* Set WTHRESH to 8, burst writeback */
   1451 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1452 		txdctl |= (8 << 16);
   1453 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1454 
   1455 		/* Set the HW Tx Head and Tail indices */
   1456 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   1457 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   1458 
   1459 		/* Set Tx Tail register */
   1460 		txr->tail = IXGBE_VFTDT(i);
   1461 
   1462 		/* Set Ring parameters */
   1463 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   1464 		    (tdba & 0x00000000ffffffffULL));
   1465 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   1466 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   1467 		    adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
   1468 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   1469 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1470 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   1471 
   1472 		/* Now enable */
   1473 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1474 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1475 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1476 	}
   1477 
   1478 	return;
   1479 } /* ixv_initialize_transmit_units */
   1480 
   1481 
   1482 /************************************************************************
   1483  * ixv_initialize_rss_mapping
   1484  ************************************************************************/
   1485 static void
   1486 ixv_initialize_rss_mapping(struct adapter *adapter)
   1487 {
   1488 	struct ixgbe_hw *hw = &adapter->hw;
   1489 	u32             reta = 0, mrqc, rss_key[10];
   1490 	int             queue_id;
   1491 	int             i, j;
   1492 	u32             rss_hash_config;
   1493 
   1494 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1495 		/* Fetch the configured RSS key */
   1496 		rss_getkey((uint8_t *)&rss_key);
   1497 	} else {
   1498 		/* set up random bits */
   1499 		cprng_fast(&rss_key, sizeof(rss_key));
   1500 	}
   1501 
   1502 	/* Now fill out hash function seeds */
   1503 	for (i = 0; i < 10; i++)
   1504 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
   1505 
   1506 	/* Set up the redirection table */
   1507 	for (i = 0, j = 0; i < 64; i++, j++) {
   1508 		if (j == adapter->num_queues)
   1509 			j = 0;
   1510 
   1511 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1512 			/*
   1513 			 * Fetch the RSS bucket id for the given indirection
   1514 			 * entry. Cap it at the number of configured buckets
   1515 			 * (which is num_queues.)
   1516 			 */
   1517 			queue_id = rss_get_indirection_to_bucket(i);
   1518 			queue_id = queue_id % adapter->num_queues;
   1519 		} else
   1520 			queue_id = j;
   1521 
   1522 		/*
   1523 		 * The low 8 bits are for hash value (n+0);
   1524 		 * The next 8 bits are for hash value (n+1), etc.
   1525 		 */
   1526 		reta >>= 8;
   1527 		reta |= ((uint32_t)queue_id) << 24;
   1528 		if ((i & 3) == 3) {
   1529 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
   1530 			reta = 0;
   1531 		}
   1532 	}
   1533 
   1534 	/* Perform hash on these packet types */
   1535 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   1536 		rss_hash_config = rss_gethashconfig();
   1537 	else {
   1538 		/*
   1539 		 * Disable UDP - IP fragments aren't currently being handled
   1540 		 * and so we end up with a mix of 2-tuple and 4-tuple
   1541 		 * traffic.
   1542 		 */
   1543 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
   1544 		                | RSS_HASHTYPE_RSS_TCP_IPV4
   1545 		                | RSS_HASHTYPE_RSS_IPV6
   1546 		                | RSS_HASHTYPE_RSS_TCP_IPV6;
   1547 	}
   1548 
   1549 	mrqc = IXGBE_MRQC_RSSEN;
   1550 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
   1551 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
   1552 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
   1553 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
   1554 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
   1555 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
   1556 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
   1557 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
   1558 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
   1559 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
   1560 		    __func__);
   1561 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
   1562 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
   1563 		    __func__);
   1564 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
   1565 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
   1566 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
   1567 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
   1568 		    __func__);
   1569 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
   1570 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
   1571 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
   1572 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
   1573 		    __func__);
   1574 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
   1575 } /* ixv_initialize_rss_mapping */
   1576 
   1577 
   1578 /************************************************************************
   1579  * ixv_initialize_receive_units - Setup receive registers and features.
   1580  ************************************************************************/
   1581 static void
   1582 ixv_initialize_receive_units(struct adapter *adapter)
   1583 {
   1584 	struct	rx_ring	*rxr = adapter->rx_rings;
   1585 	struct ixgbe_hw	*hw = &adapter->hw;
   1586 	struct ifnet	*ifp = adapter->ifp;
   1587 	u32		bufsz, rxcsum, psrtype;
   1588 
   1589 	if (ifp->if_mtu > ETHERMTU)
   1590 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1591 	else
   1592 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1593 
   1594 	psrtype = IXGBE_PSRTYPE_TCPHDR
   1595 	        | IXGBE_PSRTYPE_UDPHDR
   1596 	        | IXGBE_PSRTYPE_IPV4HDR
   1597 	        | IXGBE_PSRTYPE_IPV6HDR
   1598 	        | IXGBE_PSRTYPE_L2HDR;
   1599 
   1600 	if (adapter->num_queues > 1)
   1601 		psrtype |= 1 << 29;
   1602 
   1603 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1604 
   1605 	/* Tell PF our max_frame size */
   1606 	if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
   1607 		device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
   1608 	}
   1609 
   1610 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1611 		u64 rdba = rxr->rxdma.dma_paddr;
   1612 		u32 reg, rxdctl;
   1613 
   1614 		/* Disable the queue */
   1615 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1616 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1617 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1618 		for (int j = 0; j < 10; j++) {
   1619 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1620 			    IXGBE_RXDCTL_ENABLE)
   1621 				msec_delay(1);
   1622 			else
   1623 				break;
   1624 		}
   1625 		wmb();
   1626 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1627 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   1628 		    (rdba & 0x00000000ffffffffULL));
   1629 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
   1630 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   1631 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1632 
   1633 		/* Reset the ring indices */
   1634 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1635 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1636 
   1637 		/* Set up the SRRCTL register */
   1638 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   1639 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1640 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1641 		reg |= bufsz;
   1642 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1643 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   1644 
   1645 		/* Capture Rx Tail index */
   1646 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1647 
   1648 		/* Do the queue enabling last */
   1649 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1650 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1651 		for (int k = 0; k < 10; k++) {
   1652 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1653 			    IXGBE_RXDCTL_ENABLE)
   1654 				break;
   1655 			msec_delay(1);
   1656 		}
   1657 		wmb();
   1658 
   1659 		/* Set the Tail Pointer */
   1660 		/*
   1661 		 * In netmap mode, we must preserve the buffers made
   1662 		 * available to userspace before the if_init()
   1663 		 * (this is true by default on the TX side, because
   1664 		 * init makes all buffers available to userspace).
   1665 		 *
   1666 		 * netmap_reset() and the device specific routines
   1667 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1668 		 * buffers at the end of the NIC ring, so here we
   1669 		 * must set the RDT (tail) register to make sure
   1670 		 * they are not overwritten.
   1671 		 *
   1672 		 * In this driver the NIC ring starts at RDH = 0,
   1673 		 * RDT points to the last slot available for reception (?),
   1674 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1675 		 */
   1676 #ifdef DEV_NETMAP
   1677 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   1678 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   1679 			struct netmap_adapter *na = NA(adapter->ifp);
   1680 			struct netmap_kring *kring = &na->rx_rings[i];
   1681 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1682 
   1683 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1684 		} else
   1685 #endif /* DEV_NETMAP */
   1686 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1687 			    adapter->num_rx_desc - 1);
   1688 	}
   1689 
   1690 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1691 
   1692 	ixv_initialize_rss_mapping(adapter);
   1693 
   1694 	if (adapter->num_queues > 1) {
   1695 		/* RSS and RX IPP Checksum are mutually exclusive */
   1696 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1697 	}
   1698 
   1699 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1700 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1701 
   1702 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1703 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1704 
   1705 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1706 
   1707 	return;
   1708 } /* ixv_initialize_receive_units */
   1709 
   1710 /************************************************************************
   1711  * ixv_setup_vlan_support
   1712  ************************************************************************/
   1713 static void
   1714 ixv_setup_vlan_support(struct adapter *adapter)
   1715 {
   1716 	struct ethercom *ec = &adapter->osdep.ec;
   1717 	struct ixgbe_hw *hw = &adapter->hw;
   1718 	struct rx_ring  *rxr;
   1719 	u32		ctrl, vid, vfta, retry;
   1720 
   1721 	/*
   1722 	 * We get here thru init_locked, meaning
   1723 	 * a soft reset, this has already cleared
   1724 	 * the VFTA and other state, so if there
   1725 	 * have been no vlan's registered do nothing.
   1726 	 */
   1727 	if (!VLAN_ATTACHED(ec))
   1728 		return;
   1729 
   1730 	/* Enable the queues */
   1731 	for (int i = 0; i < adapter->num_queues; i++) {
   1732 		rxr = &adapter->rx_rings[i];
   1733 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
   1734 		ctrl |= IXGBE_RXDCTL_VME;
   1735 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
   1736 		/*
   1737 		 * Let Rx path know that it needs to store VLAN tag
   1738 		 * as part of extra mbuf info.
   1739 		 */
   1740 		rxr->vtag_strip = TRUE;
   1741 	}
   1742 
   1743 #if 1
   1744 	/* XXX dirty hack. Enable all VIDs */
   1745 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
   1746 	  adapter->shadow_vfta[i] = 0xffffffff;
   1747 #endif
   1748 	/*
   1749 	 * A soft reset zero's out the VFTA, so
   1750 	 * we need to repopulate it now.
   1751 	 */
   1752 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   1753 		if (adapter->shadow_vfta[i] == 0)
   1754 			continue;
   1755 		vfta = adapter->shadow_vfta[i];
   1756 		/*
   1757 		 * Reconstruct the vlan id's
   1758 		 * based on the bits set in each
   1759 		 * of the array ints.
   1760 		 */
   1761 		for (int j = 0; j < 32; j++) {
   1762 			retry = 0;
   1763 			if ((vfta & (1 << j)) == 0)
   1764 				continue;
   1765 			vid = (i * 32) + j;
   1766 			/* Call the shared code mailbox routine */
   1767 			while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
   1768 				if (++retry > 5)
   1769 					break;
   1770 			}
   1771 		}
   1772 	}
   1773 } /* ixv_setup_vlan_support */
   1774 
   1775 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   1776 /************************************************************************
   1777  * ixv_register_vlan
   1778  *
   1779  *   Run via a vlan config EVENT, it enables us to use the
   1780  *   HW Filter table since we can get the vlan id. This just
   1781  *   creates the entry in the soft version of the VFTA, init
   1782  *   will repopulate the real table.
   1783  ************************************************************************/
   1784 static void
   1785 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1786 {
   1787 	struct adapter	*adapter = ifp->if_softc;
   1788 	u16		index, bit;
   1789 
   1790 	if (ifp->if_softc != arg) /* Not our event */
   1791 		return;
   1792 
   1793 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   1794 		return;
   1795 
   1796 	IXGBE_CORE_LOCK(adapter);
   1797 	index = (vtag >> 5) & 0x7F;
   1798 	bit = vtag & 0x1F;
   1799 	adapter->shadow_vfta[index] |= (1 << bit);
   1800 	/* Re-init to load the changes */
   1801 	ixv_init_locked(adapter);
   1802 	IXGBE_CORE_UNLOCK(adapter);
   1803 } /* ixv_register_vlan */
   1804 
   1805 /************************************************************************
   1806  * ixv_unregister_vlan
   1807  *
   1808  *   Run via a vlan unconfig EVENT, remove our entry
   1809  *   in the soft vfta.
   1810  ************************************************************************/
   1811 static void
   1812 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1813 {
   1814 	struct adapter	*adapter = ifp->if_softc;
   1815 	u16		index, bit;
   1816 
   1817 	if (ifp->if_softc !=  arg)
   1818 		return;
   1819 
   1820 	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
   1821 		return;
   1822 
   1823 	IXGBE_CORE_LOCK(adapter);
   1824 	index = (vtag >> 5) & 0x7F;
   1825 	bit = vtag & 0x1F;
   1826 	adapter->shadow_vfta[index] &= ~(1 << bit);
   1827 	/* Re-init to load the changes */
   1828 	ixv_init_locked(adapter);
   1829 	IXGBE_CORE_UNLOCK(adapter);
   1830 } /* ixv_unregister_vlan */
   1831 #endif
   1832 
   1833 /************************************************************************
   1834  * ixv_enable_intr
   1835  ************************************************************************/
   1836 static void
   1837 ixv_enable_intr(struct adapter *adapter)
   1838 {
   1839 	struct ixgbe_hw *hw = &adapter->hw;
   1840 	struct ix_queue *que = adapter->queues;
   1841 	u32             mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   1842 
   1843 
   1844 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   1845 
   1846 	mask = IXGBE_EIMS_ENABLE_MASK;
   1847 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   1848 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   1849 
   1850 	for (int i = 0; i < adapter->num_queues; i++, que++)
   1851 		ixv_enable_queue(adapter, que->msix);
   1852 
   1853 	IXGBE_WRITE_FLUSH(hw);
   1854 
   1855 	return;
   1856 } /* ixv_enable_intr */
   1857 
   1858 /************************************************************************
   1859  * ixv_disable_intr
   1860  ************************************************************************/
   1861 static void
   1862 ixv_disable_intr(struct adapter *adapter)
   1863 {
   1864 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   1865 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   1866 	IXGBE_WRITE_FLUSH(&adapter->hw);
   1867 
   1868 	return;
   1869 } /* ixv_disable_intr */
   1870 
   1871 /************************************************************************
   1872  * ixv_set_ivar
   1873  *
   1874  *   Setup the correct IVAR register for a particular MSI-X interrupt
   1875  *    - entry is the register array entry
   1876  *    - vector is the MSI-X vector for this queue
   1877  *    - type is RX/TX/MISC
   1878  ************************************************************************/
   1879 static void
   1880 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   1881 {
   1882 	struct ixgbe_hw *hw = &adapter->hw;
   1883 	u32             ivar, index;
   1884 
   1885 	vector |= IXGBE_IVAR_ALLOC_VAL;
   1886 
   1887 	if (type == -1) { /* MISC IVAR */
   1888 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   1889 		ivar &= ~0xFF;
   1890 		ivar |= vector;
   1891 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   1892 	} else {          /* RX/TX IVARS */
   1893 		index = (16 * (entry & 1)) + (8 * type);
   1894 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   1895 		ivar &= ~(0xFF << index);
   1896 		ivar |= (vector << index);
   1897 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   1898 	}
   1899 } /* ixv_set_ivar */
   1900 
   1901 /************************************************************************
   1902  * ixv_configure_ivars
   1903  ************************************************************************/
   1904 static void
   1905 ixv_configure_ivars(struct adapter *adapter)
   1906 {
   1907 	struct ix_queue *que = adapter->queues;
   1908 
   1909 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1910 		/* First the RX queue entry */
   1911 		ixv_set_ivar(adapter, i, que->msix, 0);
   1912 		/* ... and the TX */
   1913 		ixv_set_ivar(adapter, i, que->msix, 1);
   1914 		/* Set an initial value in EITR */
   1915 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
   1916 		    IXGBE_EITR_DEFAULT);
   1917 	}
   1918 
   1919 	/* For the mailbox interrupt */
   1920 	ixv_set_ivar(adapter, 1, adapter->vector, -1);
   1921 } /* ixv_configure_ivars */
   1922 
   1923 
   1924 /************************************************************************
   1925  * ixv_save_stats
   1926  *
   1927  *   The VF stats registers never have a truly virgin
   1928  *   starting point, so this routine tries to make an
   1929  *   artificial one, marking ground zero on attach as
   1930  *   it were.
   1931  ************************************************************************/
   1932 static void
   1933 ixv_save_stats(struct adapter *adapter)
   1934 {
   1935 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   1936 
   1937 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   1938 		stats->saved_reset_vfgprc +=
   1939 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   1940 		stats->saved_reset_vfgptc +=
   1941 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   1942 		stats->saved_reset_vfgorc +=
   1943 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   1944 		stats->saved_reset_vfgotc +=
   1945 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   1946 		stats->saved_reset_vfmprc +=
   1947 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   1948 	}
   1949 } /* ixv_save_stats */
   1950 
   1951 /************************************************************************
   1952  * ixv_init_stats
   1953  ************************************************************************/
   1954 static void
   1955 ixv_init_stats(struct adapter *adapter)
   1956 {
   1957 	struct ixgbe_hw *hw = &adapter->hw;
   1958 
   1959 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   1960 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   1961 	adapter->stats.vf.last_vfgorc |=
   1962 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   1963 
   1964 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   1965 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   1966 	adapter->stats.vf.last_vfgotc |=
   1967 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   1968 
   1969 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   1970 
   1971 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   1972 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   1973 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   1974 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   1975 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   1976 } /* ixv_init_stats */
   1977 
   1978 #define UPDATE_STAT_32(reg, last, count)		\
   1979 {                                                       \
   1980 	u32 current = IXGBE_READ_REG(hw, (reg));	\
   1981 	if (current < (last))				\
   1982 		count.ev_count += 0x100000000LL;	\
   1983 	(last) = current;				\
   1984 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   1985 	count.ev_count |= current;			\
   1986 }
   1987 
   1988 #define UPDATE_STAT_36(lsb, msb, last, count)           \
   1989 {                                                       \
   1990 	u64 cur_lsb = IXGBE_READ_REG(hw, (lsb));	\
   1991 	u64 cur_msb = IXGBE_READ_REG(hw, (msb));	\
   1992 	u64 current = ((cur_msb << 32) | cur_lsb);      \
   1993 	if (current < (last))				\
   1994 		count.ev_count += 0x1000000000LL;	\
   1995 	(last) = current;				\
   1996 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   1997 	count.ev_count |= current;			\
   1998 }
   1999 
   2000 /************************************************************************
   2001  * ixv_update_stats - Update the board statistics counters.
   2002  ************************************************************************/
   2003 void
   2004 ixv_update_stats(struct adapter *adapter)
   2005 {
   2006 	struct ixgbe_hw *hw = &adapter->hw;
   2007 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2008 
   2009         UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
   2010         UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
   2011         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
   2012 	    stats->vfgorc);
   2013         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
   2014 	    stats->vfgotc);
   2015         UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
   2016 
   2017 	/* Fill out the OS statistics structure */
   2018 	/*
   2019 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   2020 	 * adapter->stats counters. It's required to make ifconfig -z
   2021 	 * (SOICZIFDATA) work.
   2022 	 */
   2023 } /* ixv_update_stats */
   2024 
   2025 const struct sysctlnode *
   2026 ixv_sysctl_instance(struct adapter *adapter)
   2027 {
   2028 	const char *dvname;
   2029 	struct sysctllog **log;
   2030 	int rc;
   2031 	const struct sysctlnode *rnode;
   2032 
   2033 	log = &adapter->sysctllog;
   2034 	dvname = device_xname(adapter->dev);
   2035 
   2036 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2037 	    0, CTLTYPE_NODE, dvname,
   2038 	    SYSCTL_DESCR("ixv information and settings"),
   2039 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2040 		goto err;
   2041 
   2042 	return rnode;
   2043 err:
   2044 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2045 	return NULL;
   2046 }
   2047 
   2048 static void
   2049 ixv_add_device_sysctls(struct adapter *adapter)
   2050 {
   2051 	struct sysctllog **log;
   2052 	const struct sysctlnode *rnode, *cnode;
   2053 	device_t dev;
   2054 
   2055 	dev = adapter->dev;
   2056 	log = &adapter->sysctllog;
   2057 
   2058 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2059 		aprint_error_dev(dev, "could not create sysctl root\n");
   2060 		return;
   2061 	}
   2062 
   2063 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2064 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2065 	    "debug", SYSCTL_DESCR("Debug Info"),
   2066 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   2067 		aprint_error_dev(dev, "could not create sysctl\n");
   2068 
   2069 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2070 	    CTLFLAG_READWRITE, CTLTYPE_BOOL,
   2071 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2072 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2073 		aprint_error_dev(dev, "could not create sysctl\n");
   2074 }
   2075 
   2076 /************************************************************************
   2077  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
   2078  ************************************************************************/
   2079 static void
   2080 ixv_add_stats_sysctls(struct adapter *adapter)
   2081 {
   2082 	device_t                dev = adapter->dev;
   2083 	struct tx_ring          *txr = adapter->tx_rings;
   2084 	struct rx_ring          *rxr = adapter->rx_rings;
   2085 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2086 	const struct sysctlnode *rnode;
   2087 	struct sysctllog **log = &adapter->sysctllog;
   2088 	const char *xname = device_xname(dev);
   2089 
   2090 	/* Driver Statistics */
   2091 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   2092 	    NULL, xname, "Handled queue in softint");
   2093 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   2094 	    NULL, xname, "Requeued in softint");
   2095 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   2096 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   2097 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2098 	    NULL, xname, "m_defrag() failed");
   2099 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   2100 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   2101 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   2102 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   2103 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   2104 	    NULL, xname, "Driver tx dma hard fail other");
   2105 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   2106 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   2107 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   2108 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   2109 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2110 	    NULL, xname, "Watchdog timeouts");
   2111 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   2112 	    NULL, xname, "TSO errors");
   2113 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   2114 	    NULL, xname, "Link MSI-X IRQ Handled");
   2115 
   2116 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2117 		snprintf(adapter->queues[i].evnamebuf,
   2118 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   2119 		    xname, i);
   2120 		snprintf(adapter->queues[i].namebuf,
   2121 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   2122 
   2123 		if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2124 			aprint_error_dev(dev, "could not create sysctl root\n");
   2125 			break;
   2126 		}
   2127 
   2128 		if (sysctl_createv(log, 0, &rnode, &rnode,
   2129 		    0, CTLTYPE_NODE,
   2130 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   2131 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   2132 			break;
   2133 
   2134 #if 0 /* not yet */
   2135 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2136 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   2137 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   2138 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   2139 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   2140 			break;
   2141 
   2142 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2143 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   2144 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   2145 			NULL, 0, &(adapter->queues[i].irqs),
   2146 		    0, CTL_CREATE, CTL_EOL) != 0)
   2147 			break;
   2148 
   2149 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2150 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2151 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   2152 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   2153 		    0, CTL_CREATE, CTL_EOL) != 0)
   2154 			break;
   2155 
   2156 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2157 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2158 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   2159 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   2160 		    0, CTL_CREATE, CTL_EOL) != 0)
   2161 			break;
   2162 #endif
   2163 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   2164 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   2165 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   2166 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   2167 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   2168 		    NULL, adapter->queues[i].evnamebuf,
   2169 		    "Queue No Descriptor Available");
   2170 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   2171 		    NULL, adapter->queues[i].evnamebuf,
   2172 		    "Queue Packets Transmitted");
   2173 #ifndef IXGBE_LEGACY_TX
   2174 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   2175 		    NULL, adapter->queues[i].evnamebuf,
   2176 		    "Packets dropped in pcq");
   2177 #endif
   2178 
   2179 #ifdef LRO
   2180 		struct lro_ctrl *lro = &rxr->lro;
   2181 #endif /* LRO */
   2182 
   2183 #if 0 /* not yet */
   2184 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2185 		    CTLFLAG_READONLY,
   2186 		    CTLTYPE_INT,
   2187 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   2188 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   2189 		    CTL_CREATE, CTL_EOL) != 0)
   2190 			break;
   2191 
   2192 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2193 		    CTLFLAG_READONLY,
   2194 		    CTLTYPE_INT,
   2195 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   2196 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   2197 		    CTL_CREATE, CTL_EOL) != 0)
   2198 			break;
   2199 #endif
   2200 
   2201 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   2202 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   2203 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   2204 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   2205 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   2206 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   2207 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   2208 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   2209 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   2210 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   2211 #ifdef LRO
   2212 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   2213 				CTLFLAG_RD, &lro->lro_queued, 0,
   2214 				"LRO Queued");
   2215 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   2216 				CTLFLAG_RD, &lro->lro_flushed, 0,
   2217 				"LRO Flushed");
   2218 #endif /* LRO */
   2219 	}
   2220 
   2221 	/* MAC stats get their own sub node */
   2222 
   2223 	snprintf(stats->namebuf,
   2224 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   2225 
   2226 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   2227 	    stats->namebuf, "rx csum offload - IP");
   2228 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   2229 	    stats->namebuf, "rx csum offload - L4");
   2230 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   2231 	    stats->namebuf, "rx csum offload - IP bad");
   2232 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   2233 	    stats->namebuf, "rx csum offload - L4 bad");
   2234 
   2235 	/* Packet Reception Stats */
   2236 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2237 	    xname, "Good Packets Received");
   2238 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2239 	    xname, "Good Octets Received");
   2240 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2241 	    xname, "Multicast Packets Received");
   2242 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2243 	    xname, "Good Packets Transmitted");
   2244 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2245 	    xname, "Good Octets Transmitted");
   2246 } /* ixv_add_stats_sysctls */
   2247 
   2248 /************************************************************************
   2249  * ixv_set_sysctl_value
   2250  ************************************************************************/
   2251 static void
   2252 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2253 	const char *description, int *limit, int value)
   2254 {
   2255 	device_t dev =  adapter->dev;
   2256 	struct sysctllog **log;
   2257 	const struct sysctlnode *rnode, *cnode;
   2258 
   2259 	log = &adapter->sysctllog;
   2260 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2261 		aprint_error_dev(dev, "could not create sysctl root\n");
   2262 		return;
   2263 	}
   2264 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2265 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2266 	    name, SYSCTL_DESCR(description),
   2267 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2268 		aprint_error_dev(dev, "could not create sysctl\n");
   2269 	*limit = value;
   2270 } /* ixv_set_sysctl_value */
   2271 
   2272 /************************************************************************
   2273  * ixv_print_debug_info
   2274  *
   2275  *   Called only when em_display_debug_stats is enabled.
   2276  *   Provides a way to take a look at important statistics
   2277  *   maintained by the driver and hardware.
   2278  ************************************************************************/
   2279 static void
   2280 ixv_print_debug_info(struct adapter *adapter)
   2281 {
   2282         device_t        dev = adapter->dev;
   2283         struct ixgbe_hw *hw = &adapter->hw;
   2284         struct ix_queue *que = adapter->queues;
   2285         struct rx_ring  *rxr;
   2286         struct tx_ring  *txr;
   2287 #ifdef LRO
   2288         struct lro_ctrl *lro;
   2289 #endif /* LRO */
   2290 
   2291 	device_printf(dev, "Error Byte Count = %u \n",
   2292 	    IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2293 
   2294 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2295 		txr = que->txr;
   2296 		rxr = que->rxr;
   2297 #ifdef LRO
   2298 		lro = &rxr->lro;
   2299 #endif /* LRO */
   2300 		device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
   2301 		    que->msix, (long)que->irqs.ev_count);
   2302 		device_printf(dev, "RX(%d) Packets Received: %lld\n",
   2303 		    rxr->me, (long long)rxr->rx_packets.ev_count);
   2304 		device_printf(dev, "RX(%d) Bytes Received: %lu\n",
   2305 		    rxr->me, (long)rxr->rx_bytes.ev_count);
   2306 #ifdef LRO
   2307 		device_printf(dev, "RX(%d) LRO Queued= %lld\n",
   2308 		    rxr->me, (long long)lro->lro_queued);
   2309 		device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
   2310 		    rxr->me, (long long)lro->lro_flushed);
   2311 #endif /* LRO */
   2312 		device_printf(dev, "TX(%d) Packets Sent: %lu\n",
   2313 		    txr->me, (long)txr->total_packets.ev_count);
   2314 		device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
   2315 		    txr->me, (long)txr->no_desc_avail.ev_count);
   2316 	}
   2317 
   2318 	device_printf(dev, "MBX IRQ Handled: %lu\n",
   2319 	    (long)adapter->link_irq.ev_count);
   2320 } /* ixv_print_debug_info */
   2321 
   2322 /************************************************************************
   2323  * ixv_sysctl_debug
   2324  ************************************************************************/
   2325 static int
   2326 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2327 {
   2328 	struct sysctlnode node;
   2329 	struct adapter *adapter;
   2330 	int            error, result;
   2331 
   2332 	node = *rnode;
   2333 	node.sysctl_data = &result;
   2334 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2335 
   2336 	if (error || newp == NULL)
   2337 		return error;
   2338 
   2339 	if (result == 1) {
   2340 		adapter = (struct adapter *)node.sysctl_data;
   2341 		ixv_print_debug_info(adapter);
   2342 	}
   2343 
   2344 	return 0;
   2345 } /* ixv_sysctl_debug */
   2346 
   2347 /************************************************************************
   2348  * ixv_init_device_features
   2349  ************************************************************************/
   2350 static void
   2351 ixv_init_device_features(struct adapter *adapter)
   2352 {
   2353 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   2354 	                  | IXGBE_FEATURE_VF
   2355 	                  | IXGBE_FEATURE_RSS
   2356 	                  | IXGBE_FEATURE_LEGACY_TX;
   2357 
   2358 	/* A tad short on feature flags for VFs, atm. */
   2359 	switch (adapter->hw.mac.type) {
   2360 	case ixgbe_mac_82599_vf:
   2361 		break;
   2362 	case ixgbe_mac_X540_vf:
   2363 		break;
   2364 	case ixgbe_mac_X550_vf:
   2365 	case ixgbe_mac_X550EM_x_vf:
   2366 	case ixgbe_mac_X550EM_a_vf:
   2367 		adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
   2368 		break;
   2369 	default:
   2370 		break;
   2371 	}
   2372 
   2373 	/* Enabled by default... */
   2374 	/* Is a virtual function (VF) */
   2375 	if (adapter->feat_cap & IXGBE_FEATURE_VF)
   2376 		adapter->feat_en |= IXGBE_FEATURE_VF;
   2377 	/* Netmap */
   2378 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   2379 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   2380 	/* Receive-Side Scaling (RSS) */
   2381 	if (adapter->feat_cap & IXGBE_FEATURE_RSS)
   2382 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   2383 	/* Needs advanced context descriptor regardless of offloads req'd */
   2384 	if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
   2385 		adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
   2386 
   2387 	/* Enabled via sysctl... */
   2388 	/* Legacy (single queue) transmit */
   2389 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   2390 	    ixv_enable_legacy_tx)
   2391 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   2392 } /* ixv_init_device_features */
   2393 
   2394 /************************************************************************
   2395  * ixv_shutdown - Shutdown entry point
   2396  ************************************************************************/
   2397 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   2398 static int
   2399 ixv_shutdown(device_t dev)
   2400 {
   2401 	struct adapter *adapter = device_private(dev);
   2402 	IXGBE_CORE_LOCK(adapter);
   2403 	ixv_stop(adapter);
   2404 	IXGBE_CORE_UNLOCK(adapter);
   2405 
   2406 	return (0);
   2407 } /* ixv_shutdown */
   2408 #endif
   2409 
   2410 static int
   2411 ixv_ifflags_cb(struct ethercom *ec)
   2412 {
   2413 	struct ifnet *ifp = &ec->ec_if;
   2414 	struct adapter *adapter = ifp->if_softc;
   2415 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   2416 
   2417 	IXGBE_CORE_LOCK(adapter);
   2418 
   2419 	if (change != 0)
   2420 		adapter->if_flags = ifp->if_flags;
   2421 
   2422 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   2423 		rc = ENETRESET;
   2424 
   2425 	/* Set up VLAN support and filter */
   2426 	ixv_setup_vlan_support(adapter);
   2427 
   2428 	IXGBE_CORE_UNLOCK(adapter);
   2429 
   2430 	return rc;
   2431 }
   2432 
   2433 
   2434 /************************************************************************
   2435  * ixv_ioctl - Ioctl entry point
   2436  *
   2437  *   Called when the user wants to configure the interface.
   2438  *
   2439  *   return 0 on success, positive on failure
   2440  ************************************************************************/
   2441 static int
   2442 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
   2443 {
   2444 	struct adapter	*adapter = ifp->if_softc;
   2445 	struct ifcapreq *ifcr = data;
   2446 	struct ifreq	*ifr = data;
   2447 	int             error = 0;
   2448 	int l4csum_en;
   2449 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   2450 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   2451 
   2452 	switch (command) {
   2453 	case SIOCSIFFLAGS:
   2454 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   2455 		break;
   2456 	case SIOCADDMULTI:
   2457 	case SIOCDELMULTI:
   2458 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   2459 		break;
   2460 	case SIOCSIFMEDIA:
   2461 	case SIOCGIFMEDIA:
   2462 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   2463 		break;
   2464 	case SIOCSIFCAP:
   2465 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   2466 		break;
   2467 	case SIOCSIFMTU:
   2468 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   2469 		break;
   2470 	default:
   2471 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   2472 		break;
   2473 	}
   2474 
   2475 	switch (command) {
   2476 	case SIOCSIFMEDIA:
   2477 	case SIOCGIFMEDIA:
   2478 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   2479 	case SIOCSIFCAP:
   2480 		/* Layer-4 Rx checksum offload has to be turned on and
   2481 		 * off as a unit.
   2482 		 */
   2483 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   2484 		if (l4csum_en != l4csum && l4csum_en != 0)
   2485 			return EINVAL;
   2486 		/*FALLTHROUGH*/
   2487 	case SIOCADDMULTI:
   2488 	case SIOCDELMULTI:
   2489 	case SIOCSIFFLAGS:
   2490 	case SIOCSIFMTU:
   2491 	default:
   2492 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   2493 			return error;
   2494 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   2495 			;
   2496 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   2497 			IXGBE_CORE_LOCK(adapter);
   2498 			ixv_init_locked(adapter);
   2499 			IXGBE_CORE_UNLOCK(adapter);
   2500 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   2501 			/*
   2502 			 * Multicast list has changed; set the hardware filter
   2503 			 * accordingly.
   2504 			 */
   2505 			IXGBE_CORE_LOCK(adapter);
   2506 			ixv_disable_intr(adapter);
   2507 			ixv_set_multi(adapter);
   2508 			ixv_enable_intr(adapter);
   2509 			IXGBE_CORE_UNLOCK(adapter);
   2510 		}
   2511 		return 0;
   2512 	}
   2513 } /* ixv_ioctl */
   2514 
   2515 /************************************************************************
   2516  * ixv_init
   2517  ************************************************************************/
   2518 static int
   2519 ixv_init(struct ifnet *ifp)
   2520 {
   2521 	struct adapter *adapter = ifp->if_softc;
   2522 
   2523 	IXGBE_CORE_LOCK(adapter);
   2524 	ixv_init_locked(adapter);
   2525 	IXGBE_CORE_UNLOCK(adapter);
   2526 
   2527 	return 0;
   2528 } /* ixv_init */
   2529 
   2530 
   2531 /************************************************************************
   2532  * ixv_handle_que
   2533  ************************************************************************/
   2534 static void
   2535 ixv_handle_que(void *context)
   2536 {
   2537 	struct ix_queue *que = context;
   2538 	struct adapter  *adapter = que->adapter;
   2539 	struct tx_ring	*txr = que->txr;
   2540 	struct ifnet    *ifp = adapter->ifp;
   2541 	bool		more;
   2542 
   2543 	adapter->handleq.ev_count++;
   2544 
   2545 	if (ifp->if_flags & IFF_RUNNING) {
   2546 		more = ixgbe_rxeof(que);
   2547 		IXGBE_TX_LOCK(txr);
   2548 		ixgbe_txeof(txr);
   2549 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   2550 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   2551 				ixgbe_mq_start_locked(ifp, txr);
   2552 		/* Only for queue 0 */
   2553 		/* NetBSD still needs this for CBQ */
   2554 		if ((&adapter->queues[0] == que)
   2555 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   2556 			ixgbe_legacy_start_locked(ifp, txr);
   2557 		IXGBE_TX_UNLOCK(txr);
   2558 		if (more) {
   2559 			adapter->req.ev_count++;
   2560 			softint_schedule(que->que_si);
   2561 			return;
   2562 		}
   2563 	}
   2564 
   2565 	/* Re-enable this interrupt */
   2566 	ixv_enable_queue(adapter, que->msix);
   2567 
   2568 	return;
   2569 } /* ixv_handle_que */
   2570 
   2571 /************************************************************************
   2572  * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
   2573  ************************************************************************/
   2574 static int
   2575 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   2576 {
   2577 	device_t	dev = adapter->dev;
   2578 	struct ix_queue *que = adapter->queues;
   2579 	struct		tx_ring *txr = adapter->tx_rings;
   2580 	int 		error, msix_ctrl, rid, vector = 0;
   2581 	pci_chipset_tag_t pc;
   2582 	pcitag_t	tag;
   2583 	char		intrbuf[PCI_INTRSTR_LEN];
   2584 	char		intr_xname[32];
   2585 	const char	*intrstr = NULL;
   2586 	kcpuset_t	*affinity;
   2587 	int		cpu_id = 0;
   2588 
   2589 	pc = adapter->osdep.pc;
   2590 	tag = adapter->osdep.tag;
   2591 
   2592 	adapter->osdep.nintrs = adapter->num_queues + 1;
   2593 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   2594 	    adapter->osdep.nintrs) != 0) {
   2595 		aprint_error_dev(dev,
   2596 		    "failed to allocate MSI-X interrupt\n");
   2597 		return (ENXIO);
   2598 	}
   2599 
   2600 	kcpuset_create(&affinity, false);
   2601 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   2602 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   2603 		    device_xname(dev), i);
   2604 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   2605 		    sizeof(intrbuf));
   2606 #ifdef IXGBE_MPSAFE
   2607 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   2608 		    true);
   2609 #endif
   2610 		/* Set the handler function */
   2611 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   2612 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   2613 		    intr_xname);
   2614 		if (que->res == NULL) {
   2615 			pci_intr_release(pc, adapter->osdep.intrs,
   2616 			    adapter->osdep.nintrs);
   2617 			aprint_error_dev(dev,
   2618 			    "Failed to register QUE handler\n");
   2619 			kcpuset_destroy(affinity);
   2620 			return (ENXIO);
   2621 		}
   2622 		que->msix = vector;
   2623         	adapter->active_queues |= (u64)(1 << que->msix);
   2624 
   2625 		cpu_id = i;
   2626 		/* Round-robin affinity */
   2627 		kcpuset_zero(affinity);
   2628 		kcpuset_set(affinity, cpu_id % ncpu);
   2629 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   2630 		    NULL);
   2631 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   2632 		    intrstr);
   2633 		if (error == 0)
   2634 			aprint_normal(", bound queue %d to cpu %d\n",
   2635 			    i, cpu_id % ncpu);
   2636 		else
   2637 			aprint_normal("\n");
   2638 
   2639 #ifndef IXGBE_LEGACY_TX
   2640 		txr->txr_si
   2641 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2642 			ixgbe_deferred_mq_start, txr);
   2643 #endif
   2644 		que->que_si
   2645 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2646 			ixv_handle_que, que);
   2647 		if (que->que_si == NULL) {
   2648 			aprint_error_dev(dev,
   2649 			    "could not establish software interrupt\n");
   2650 		}
   2651 	}
   2652 
   2653 	/* and Mailbox */
   2654 	cpu_id++;
   2655 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   2656 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   2657 	    sizeof(intrbuf));
   2658 #ifdef IXGBE_MPSAFE
   2659 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   2660 	    true);
   2661 #endif
   2662 	/* Set the mbx handler function */
   2663 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   2664 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   2665 	    intr_xname);
   2666 	if (adapter->osdep.ihs[vector] == NULL) {
   2667 		adapter->res = NULL;
   2668 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   2669 		kcpuset_destroy(affinity);
   2670 		return (ENXIO);
   2671 	}
   2672 	/* Round-robin affinity */
   2673 	kcpuset_zero(affinity);
   2674 	kcpuset_set(affinity, cpu_id % ncpu);
   2675 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   2676 
   2677 	aprint_normal_dev(dev,
   2678 	    "for link, interrupting at %s", intrstr);
   2679 	if (error == 0)
   2680 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   2681 	else
   2682 		aprint_normal("\n");
   2683 
   2684 	adapter->vector = vector;
   2685 	/* Tasklets for Mailbox */
   2686 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   2687 	    ixv_handle_link, adapter);
   2688 	/*
   2689 	 * Due to a broken design QEMU will fail to properly
   2690 	 * enable the guest for MSI-X unless the vectors in
   2691 	 * the table are all set up, so we must rewrite the
   2692 	 * ENABLE in the MSI-X control register again at this
   2693 	 * point to cause it to successfully initialize us.
   2694 	 */
   2695 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   2696 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   2697 		rid += PCI_MSIX_CTL;
   2698 		msix_ctrl = pci_conf_read(pc, tag, rid);
   2699 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   2700 		pci_conf_write(pc, tag, rid, msix_ctrl);
   2701 	}
   2702 
   2703 	kcpuset_destroy(affinity);
   2704 	return (0);
   2705 } /* ixv_allocate_msix */
   2706 
   2707 /************************************************************************
   2708  * ixv_configure_interrupts - Setup MSI-X resources
   2709  *
   2710  *   Note: The VF device MUST use MSI-X, there is no fallback.
   2711  ************************************************************************/
   2712 static int
   2713 ixv_configure_interrupts(struct adapter *adapter)
   2714 {
   2715 	device_t dev = adapter->dev;
   2716 	int want, queues, msgs;
   2717 
   2718 	/* Must have at least 2 MSI-X vectors */
   2719 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   2720 	if (msgs < 2) {
   2721 		aprint_error_dev(dev, "MSIX config error\n");
   2722 		return (ENXIO);
   2723 	}
   2724 	msgs = MIN(msgs, IXG_MAX_NINTR);
   2725 
   2726 	/* Figure out a reasonable auto config value */
   2727 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   2728 
   2729 	if (ixv_num_queues != 0)
   2730 		queues = ixv_num_queues;
   2731 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   2732 		queues = IXGBE_VF_MAX_TX_QUEUES;
   2733 
   2734 	/*
   2735 	 * Want vectors for the queues,
   2736 	 * plus an additional for mailbox.
   2737 	 */
   2738 	want = queues + 1;
   2739 	if (msgs >= want)
   2740 		msgs = want;
   2741 	else {
   2742                	aprint_error_dev(dev,
   2743 		    "MSI-X Configuration Problem, "
   2744 		    "%d vectors but %d queues wanted!\n",
   2745 		    msgs, want);
   2746 		return -1;
   2747 	}
   2748 
   2749 	adapter->msix_mem = (void *)1; /* XXX */
   2750 	aprint_normal_dev(dev,
   2751 	    "Using MSI-X interrupts with %d vectors\n", msgs);
   2752 	adapter->num_queues = queues;
   2753 
   2754 	return (0);
   2755 } /* ixv_configure_interrupts */
   2756 
   2757 
   2758 /************************************************************************
   2759  * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
   2760  *
   2761  *   Done outside of interrupt context since the driver might sleep
   2762  ************************************************************************/
   2763 static void
   2764 ixv_handle_link(void *context)
   2765 {
   2766 	struct adapter *adapter = context;
   2767 
   2768 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   2769 	    &adapter->link_up, FALSE);
   2770 	ixv_update_link_status(adapter);
   2771 } /* ixv_handle_link */
   2772 
   2773 /************************************************************************
   2774  * ixv_check_link - Used in the local timer to poll for link changes
   2775  ************************************************************************/
   2776 static void
   2777 ixv_check_link(struct adapter *adapter)
   2778 {
   2779 	adapter->hw.mac.get_link_status = TRUE;
   2780 
   2781 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   2782 	    &adapter->link_up, FALSE);
   2783 	ixv_update_link_status(adapter);
   2784 } /* ixv_check_link */
   2785