Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.67
      1 /*$NetBSD: ixv.c,v 1.67 2017/10/03 02:55:37 msaitoh Exp $*/
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 320688 2017-07-05 17:27:03Z erj $*/
     36 
     37 
     38 #ifdef _KERNEL_OPT
     39 #include "opt_inet.h"
     40 #include "opt_inet6.h"
     41 #include "opt_net_mpsafe.h"
     42 #endif
     43 
     44 #include "ixgbe.h"
     45 #include "vlan.h"
     46 
     47 /************************************************************************
     48  * Driver version
     49  ************************************************************************/
     50 char ixv_driver_version[] = "1.5.13-k";
     51 
     52 /************************************************************************
     53  * PCI Device ID Table
     54  *
     55  *   Used by probe to select devices to load on
     56  *   Last field stores an index into ixv_strings
     57  *   Last entry must be all 0s
     58  *
     59  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     60  ************************************************************************/
     61 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
     62 {
     63 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     64 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     65 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     66 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     67 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
     68 	/* required last entry */
     69 	{0, 0, 0, 0, 0}
     70 };
     71 
     72 /************************************************************************
     73  * Table of branding strings
     74  ************************************************************************/
     75 static const char *ixv_strings[] = {
     76 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     77 };
     78 
     79 /*********************************************************************
     80  *  Function prototypes
     81  *********************************************************************/
     82 static int      ixv_probe(device_t, cfdata_t, void *);
     83 static void	ixv_attach(device_t, device_t, void *);
     84 static int      ixv_detach(device_t, int);
     85 #if 0
     86 static int      ixv_shutdown(device_t);
     87 #endif
     88 static int	ixv_ifflags_cb(struct ethercom *);
     89 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     90 static int	ixv_init(struct ifnet *);
     91 static void	ixv_init_locked(struct adapter *);
     92 static void	ixv_ifstop(struct ifnet *, int);
     93 static void     ixv_stop(void *);
     94 static void     ixv_init_device_features(struct adapter *);
     95 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     96 static int      ixv_media_change(struct ifnet *);
     97 static int      ixv_allocate_pci_resources(struct adapter *,
     98 		    const struct pci_attach_args *);
     99 static int      ixv_allocate_msix(struct adapter *,
    100 		    const struct pci_attach_args *);
    101 static int      ixv_configure_interrupts(struct adapter *);
    102 static void	ixv_free_pci_resources(struct adapter *);
    103 static void     ixv_local_timer(void *);
    104 static void     ixv_local_timer_locked(void *);
    105 static void     ixv_setup_interface(device_t, struct adapter *);
    106 static int      ixv_negotiate_api(struct adapter *);
    107 
    108 static void     ixv_initialize_transmit_units(struct adapter *);
    109 static void     ixv_initialize_receive_units(struct adapter *);
    110 static void     ixv_initialize_rss_mapping(struct adapter *);
    111 static void     ixv_check_link(struct adapter *);
    112 
    113 static void     ixv_enable_intr(struct adapter *);
    114 static void     ixv_disable_intr(struct adapter *);
    115 static void     ixv_set_multi(struct adapter *);
    116 static void     ixv_update_link_status(struct adapter *);
    117 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    118 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    119 static void	ixv_configure_ivars(struct adapter *);
    120 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    121 
    122 static void	ixv_setup_vlan_support(struct adapter *);
    123 #if 0
    124 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    125 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    126 #endif
    127 
    128 static void	ixv_add_device_sysctls(struct adapter *);
    129 static void	ixv_save_stats(struct adapter *);
    130 static void	ixv_init_stats(struct adapter *);
    131 static void	ixv_update_stats(struct adapter *);
    132 static void	ixv_add_stats_sysctls(struct adapter *);
    133 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    134 		    const char *, int *, int);
    135 
    136 /* The MSI-X Interrupt handlers */
    137 static int	ixv_msix_que(void *);
    138 static int	ixv_msix_mbx(void *);
    139 
    140 /* Deferred interrupt tasklets */
    141 static void	ixv_handle_que(void *);
    142 static void     ixv_handle_link(void *);
    143 
    144 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    145 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    146 
    147 /************************************************************************
    148  * FreeBSD Device Interface Entry Points
    149  ************************************************************************/
    150 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    151     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    152     DVF_DETACH_SHUTDOWN);
    153 
    154 #if 0
    155 static driver_t ixv_driver = {
    156 	"ixv", ixv_methods, sizeof(struct adapter),
    157 };
    158 
    159 devclass_t ixv_devclass;
    160 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    161 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    162 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    163 #endif
    164 
    165 /*
    166  * TUNEABLE PARAMETERS:
    167  */
    168 
    169 /* Number of Queues - do not exceed MSI-X vectors - 1 */
    170 static int ixv_num_queues = 0;
    171 #define	TUNABLE_INT(__x, __y)
    172 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    173 
    174 /*
    175  * AIM: Adaptive Interrupt Moderation
    176  * which means that the interrupt rate
    177  * is varied over time based on the
    178  * traffic for that interrupt vector
    179  */
    180 static bool ixv_enable_aim = false;
    181 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    182 
    183 /* How many packets rxeof tries to clean at a time */
    184 static int ixv_rx_process_limit = 256;
    185 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    186 
    187 /* How many packets txeof tries to clean at a time */
    188 static int ixv_tx_process_limit = 256;
    189 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    190 
    191 /*
    192  * Number of TX descriptors per ring,
    193  * setting higher than RX as this seems
    194  * the better performing choice.
    195  */
    196 static int ixv_txd = PERFORM_TXD;
    197 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    198 
    199 /* Number of RX descriptors per ring */
    200 static int ixv_rxd = PERFORM_RXD;
    201 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    202 
    203 /* Legacy Transmit (single queue) */
    204 static int ixv_enable_legacy_tx = 0;
    205 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
    206 
    207 #ifdef NET_MPSAFE
    208 #define IXGBE_MPSAFE		1
    209 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    210 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    211 #else
    212 #define IXGBE_CALLOUT_FLAGS	0
    213 #define IXGBE_SOFTINFT_FLAGS	0
    214 #endif
    215 
    216 #if 0
    217 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
    218 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
    219 #endif
    220 
    221 /************************************************************************
    222  * ixv_probe - Device identification routine
    223  *
    224  *   Determines if the driver should be loaded on
    225  *   adapter based on its PCI vendor/device ID.
    226  *
    227  *   return BUS_PROBE_DEFAULT on success, positive on failure
    228  ************************************************************************/
    229 static int
    230 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    231 {
    232 #ifdef __HAVE_PCI_MSI_MSIX
    233 	const struct pci_attach_args *pa = aux;
    234 
    235 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    236 #else
    237 	return 0;
    238 #endif
    239 } /* ixv_probe */
    240 
    241 static ixgbe_vendor_info_t *
    242 ixv_lookup(const struct pci_attach_args *pa)
    243 {
    244 	ixgbe_vendor_info_t *ent;
    245 	pcireg_t subid;
    246 
    247 	INIT_DEBUGOUT("ixv_lookup: begin");
    248 
    249 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    250 		return NULL;
    251 
    252 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    253 
    254 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    255 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    256 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    257 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    258 		     (ent->subvendor_id == 0)) &&
    259 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    260 		     (ent->subdevice_id == 0))) {
    261 			return ent;
    262 		}
    263 	}
    264 
    265 	return NULL;
    266 }
    267 
    268 /************************************************************************
    269  * ixv_attach - Device initialization routine
    270  *
    271  *   Called when the driver is being loaded.
    272  *   Identifies the type of hardware, allocates all resources
    273  *   and initializes the hardware.
    274  *
    275  *   return 0 on success, positive on failure
    276  ************************************************************************/
    277 static void
    278 ixv_attach(device_t parent, device_t dev, void *aux)
    279 {
    280 	struct adapter *adapter;
    281 	struct ixgbe_hw *hw;
    282 	int             error = 0;
    283 	pcireg_t	id, subid;
    284 	ixgbe_vendor_info_t *ent;
    285 	const struct pci_attach_args *pa = aux;
    286 	const char *apivstr;
    287 	const char *str;
    288 	char buf[256];
    289 
    290 	INIT_DEBUGOUT("ixv_attach: begin");
    291 
    292 	/*
    293 	 * Make sure BUSMASTER is set, on a VM under
    294 	 * KVM it may not be and will break things.
    295 	 */
    296 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    297 
    298 	/* Allocate, clear, and link in our adapter structure */
    299 	adapter = device_private(dev);
    300 	adapter->dev = dev;
    301 	adapter->hw.back = adapter;
    302 	hw = &adapter->hw;
    303 
    304 	adapter->init_locked = ixv_init_locked;
    305 	adapter->stop_locked = ixv_stop;
    306 
    307 	adapter->osdep.pc = pa->pa_pc;
    308 	adapter->osdep.tag = pa->pa_tag;
    309 	if (pci_dma64_available(pa))
    310 		adapter->osdep.dmat = pa->pa_dmat64;
    311 	else
    312 		adapter->osdep.dmat = pa->pa_dmat;
    313 	adapter->osdep.attached = false;
    314 
    315 	ent = ixv_lookup(pa);
    316 
    317 	KASSERT(ent != NULL);
    318 
    319 	aprint_normal(": %s, Version - %s\n",
    320 	    ixv_strings[ent->index], ixv_driver_version);
    321 
    322 	/* Core Lock Init*/
    323 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    324 
    325 	/* Do base PCI setup - map BAR0 */
    326 	if (ixv_allocate_pci_resources(adapter, pa)) {
    327 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    328 		error = ENXIO;
    329 		goto err_out;
    330 	}
    331 
    332 	/* SYSCTL APIs */
    333 	ixv_add_device_sysctls(adapter);
    334 
    335 	/* Set up the timer callout */
    336 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    337 
    338 	/* Save off the information about this board */
    339 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    340 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    341 	hw->vendor_id = PCI_VENDOR(id);
    342 	hw->device_id = PCI_PRODUCT(id);
    343 	hw->revision_id =
    344 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    345 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    346 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    347 
    348 	/* A subset of set_mac_type */
    349 	switch (hw->device_id) {
    350 	case IXGBE_DEV_ID_82599_VF:
    351 		hw->mac.type = ixgbe_mac_82599_vf;
    352 		str = "82599 VF";
    353 		break;
    354 	case IXGBE_DEV_ID_X540_VF:
    355 		hw->mac.type = ixgbe_mac_X540_vf;
    356 		str = "X540 VF";
    357 		break;
    358 	case IXGBE_DEV_ID_X550_VF:
    359 		hw->mac.type = ixgbe_mac_X550_vf;
    360 		str = "X550 VF";
    361 		break;
    362 	case IXGBE_DEV_ID_X550EM_X_VF:
    363 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
    364 		str = "X550EM X VF";
    365 		break;
    366 	case IXGBE_DEV_ID_X550EM_A_VF:
    367 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
    368 		str = "X550EM A VF";
    369 		break;
    370 	default:
    371 		/* Shouldn't get here since probe succeeded */
    372 		aprint_error_dev(dev, "Unknown device ID!\n");
    373 		error = ENXIO;
    374 		goto err_out;
    375 		break;
    376 	}
    377 	aprint_normal_dev(dev, "device %s\n", str);
    378 
    379 	ixv_init_device_features(adapter);
    380 
    381 	/* Initialize the shared code */
    382 	error = ixgbe_init_ops_vf(hw);
    383 	if (error) {
    384 		aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
    385 		error = EIO;
    386 		goto err_out;
    387 	}
    388 
    389 	/* Setup the mailbox */
    390 	ixgbe_init_mbx_params_vf(hw);
    391 
    392 	/* Set the right number of segments */
    393 	adapter->num_segs = IXGBE_82599_SCATTER;
    394 
    395 	/* Reset mbox api to 1.0 */
    396 	error = hw->mac.ops.reset_hw(hw);
    397 	if (error == IXGBE_ERR_RESET_FAILED)
    398 		aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
    399 	else if (error)
    400 		aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
    401 		    error);
    402 	if (error) {
    403 		error = EIO;
    404 		goto err_out;
    405 	}
    406 
    407 	error = hw->mac.ops.init_hw(hw);
    408 	if (error) {
    409 		aprint_error_dev(dev, "...init_hw() failed!\n");
    410 		error = EIO;
    411 		goto err_out;
    412 	}
    413 
    414 	/* Negotiate mailbox API version */
    415 	error = ixv_negotiate_api(adapter);
    416 	if (error)
    417 		aprint_normal_dev(dev,
    418 		    "MBX API negotiation failed during attach!\n");
    419 	switch (hw->api_version) {
    420 	case ixgbe_mbox_api_10:
    421 		apivstr = "1.0";
    422 		break;
    423 	case ixgbe_mbox_api_20:
    424 		apivstr = "2.0";
    425 		break;
    426 	case ixgbe_mbox_api_11:
    427 		apivstr = "1.1";
    428 		break;
    429 	case ixgbe_mbox_api_12:
    430 		apivstr = "1.2";
    431 		break;
    432 	case ixgbe_mbox_api_13:
    433 		apivstr = "1.3";
    434 		break;
    435 	default:
    436 		apivstr = "unknown";
    437 		break;
    438 	}
    439 	aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
    440 
    441 	/* If no mac address was assigned, make a random one */
    442 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    443 		u8 addr[ETHER_ADDR_LEN];
    444 		uint64_t rndval = cprng_strong64();
    445 
    446 		memcpy(addr, &rndval, sizeof(addr));
    447 		addr[0] &= 0xFE;
    448 		addr[0] |= 0x02;
    449 		bcopy(addr, hw->mac.addr, sizeof(addr));
    450 	}
    451 
    452 	/* Register for VLAN events */
    453 #if 0 /* XXX delete after write? */
    454 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    455 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    456 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    457 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    458 #endif
    459 
    460 	/* Sysctls for limiting the amount of work done in the taskqueues */
    461 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    462 	    "max number of rx packets to process",
    463 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    464 
    465 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    466 	    "max number of tx packets to process",
    467 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    468 
    469 	/* Do descriptor calc and sanity checks */
    470 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    471 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    472 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    473 		adapter->num_tx_desc = DEFAULT_TXD;
    474 	} else
    475 		adapter->num_tx_desc = ixv_txd;
    476 
    477 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    478 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    479 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    480 		adapter->num_rx_desc = DEFAULT_RXD;
    481 	} else
    482 		adapter->num_rx_desc = ixv_rxd;
    483 
    484 	/* Setup MSI-X */
    485 	error = ixv_configure_interrupts(adapter);
    486 	if (error)
    487 		goto err_out;
    488 
    489 	/* Allocate our TX/RX Queues */
    490 	if (ixgbe_allocate_queues(adapter)) {
    491 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    492 		error = ENOMEM;
    493 		goto err_out;
    494 	}
    495 
    496 	/* hw.ix defaults init */
    497 	adapter->enable_aim = ixv_enable_aim;
    498 
    499 	/* Setup OS specific network interface */
    500 	ixv_setup_interface(dev, adapter);
    501 
    502 	error = ixv_allocate_msix(adapter, pa);
    503 	if (error) {
    504 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    505 		goto err_late;
    506 	}
    507 
    508 	/* Do the stats setup */
    509 	ixv_save_stats(adapter);
    510 	ixv_init_stats(adapter);
    511 	ixv_add_stats_sysctls(adapter);
    512 
    513 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    514 		ixgbe_netmap_attach(adapter);
    515 
    516 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
    517 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
    518 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
    519 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
    520 
    521 	INIT_DEBUGOUT("ixv_attach: end");
    522 	adapter->osdep.attached = true;
    523 
    524 	return;
    525 
    526 err_late:
    527 	ixgbe_free_transmit_structures(adapter);
    528 	ixgbe_free_receive_structures(adapter);
    529 	free(adapter->queues, M_DEVBUF);
    530 err_out:
    531 	ixv_free_pci_resources(adapter);
    532 	IXGBE_CORE_LOCK_DESTROY(adapter);
    533 
    534 	return;
    535 } /* ixv_attach */
    536 
    537 /************************************************************************
    538  * ixv_detach - Device removal routine
    539  *
    540  *   Called when the driver is being removed.
    541  *   Stops the adapter and deallocates all the resources
    542  *   that were allocated for driver operation.
    543  *
    544  *   return 0 on success, positive on failure
    545  ************************************************************************/
    546 static int
    547 ixv_detach(device_t dev, int flags)
    548 {
    549 	struct adapter  *adapter = device_private(dev);
    550 	struct ixgbe_hw *hw = &adapter->hw;
    551 	struct ix_queue *que = adapter->queues;
    552 	struct tx_ring *txr = adapter->tx_rings;
    553 	struct rx_ring *rxr = adapter->rx_rings;
    554 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
    555 
    556 	INIT_DEBUGOUT("ixv_detach: begin");
    557 	if (adapter->osdep.attached == false)
    558 		return 0;
    559 
    560 	/* Stop the interface. Callouts are stopped in it. */
    561 	ixv_ifstop(adapter->ifp, 1);
    562 
    563 #if NVLAN > 0
    564 	/* Make sure VLANs are not using driver */
    565 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    566 		;	/* nothing to do: no VLANs */
    567 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    568 		vlan_ifdetach(adapter->ifp);
    569 	else {
    570 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    571 		return EBUSY;
    572 	}
    573 #endif
    574 
    575 	IXGBE_CORE_LOCK(adapter);
    576 	ixv_stop(adapter);
    577 	IXGBE_CORE_UNLOCK(adapter);
    578 
    579 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
    580 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
    581 			softint_disestablish(txr->txr_si);
    582 		softint_disestablish(que->que_si);
    583 	}
    584 
    585 	/* Drain the Mailbox(link) queue */
    586 	softint_disestablish(adapter->link_si);
    587 
    588 	/* Unregister VLAN events */
    589 #if 0 /* XXX msaitoh delete after write? */
    590 	if (adapter->vlan_attach != NULL)
    591 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    592 	if (adapter->vlan_detach != NULL)
    593 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    594 #endif
    595 
    596 	ether_ifdetach(adapter->ifp);
    597 	callout_halt(&adapter->timer, NULL);
    598 
    599 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    600 		netmap_detach(adapter->ifp);
    601 
    602 	ixv_free_pci_resources(adapter);
    603 #if 0 /* XXX the NetBSD port is probably missing something here */
    604 	bus_generic_detach(dev);
    605 #endif
    606 	if_detach(adapter->ifp);
    607 	if_percpuq_destroy(adapter->ipq);
    608 
    609 	sysctl_teardown(&adapter->sysctllog);
    610 	evcnt_detach(&adapter->handleq);
    611 	evcnt_detach(&adapter->req);
    612 	evcnt_detach(&adapter->efbig_tx_dma_setup);
    613 	evcnt_detach(&adapter->mbuf_defrag_failed);
    614 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
    615 	evcnt_detach(&adapter->einval_tx_dma_setup);
    616 	evcnt_detach(&adapter->other_tx_dma_setup);
    617 	evcnt_detach(&adapter->eagain_tx_dma_setup);
    618 	evcnt_detach(&adapter->enomem_tx_dma_setup);
    619 	evcnt_detach(&adapter->watchdog_events);
    620 	evcnt_detach(&adapter->tso_err);
    621 	evcnt_detach(&adapter->link_irq);
    622 
    623 	txr = adapter->tx_rings;
    624 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
    625 		evcnt_detach(&adapter->queues[i].irqs);
    626 		evcnt_detach(&txr->no_desc_avail);
    627 		evcnt_detach(&txr->total_packets);
    628 		evcnt_detach(&txr->tso_tx);
    629 #ifndef IXGBE_LEGACY_TX
    630 		evcnt_detach(&txr->pcq_drops);
    631 #endif
    632 
    633 		evcnt_detach(&rxr->rx_packets);
    634 		evcnt_detach(&rxr->rx_bytes);
    635 		evcnt_detach(&rxr->rx_copies);
    636 		evcnt_detach(&rxr->no_jmbuf);
    637 		evcnt_detach(&rxr->rx_discarded);
    638 	}
    639 	evcnt_detach(&stats->ipcs);
    640 	evcnt_detach(&stats->l4cs);
    641 	evcnt_detach(&stats->ipcs_bad);
    642 	evcnt_detach(&stats->l4cs_bad);
    643 
    644 	/* Packet Reception Stats */
    645 	evcnt_detach(&stats->vfgorc);
    646 	evcnt_detach(&stats->vfgprc);
    647 	evcnt_detach(&stats->vfmprc);
    648 
    649 	/* Packet Transmission Stats */
    650 	evcnt_detach(&stats->vfgotc);
    651 	evcnt_detach(&stats->vfgptc);
    652 
    653 	/* Mailbox Stats */
    654 	evcnt_detach(&hw->mbx.stats.msgs_tx);
    655 	evcnt_detach(&hw->mbx.stats.msgs_rx);
    656 	evcnt_detach(&hw->mbx.stats.acks);
    657 	evcnt_detach(&hw->mbx.stats.reqs);
    658 	evcnt_detach(&hw->mbx.stats.rsts);
    659 
    660 	ixgbe_free_transmit_structures(adapter);
    661 	ixgbe_free_receive_structures(adapter);
    662 	free(adapter->queues, M_DEVBUF);
    663 
    664 	IXGBE_CORE_LOCK_DESTROY(adapter);
    665 
    666 	return (0);
    667 } /* ixv_detach */
    668 
    669 /************************************************************************
    670  * ixv_init_locked - Init entry point
    671  *
    672  *   Used in two ways: It is used by the stack as an init entry
    673  *   point in network interface structure. It is also used
    674  *   by the driver as a hw/sw initialization routine to get
    675  *   to a consistent state.
    676  *
    677  *   return 0 on success, positive on failure
    678  ************************************************************************/
    679 static void
    680 ixv_init_locked(struct adapter *adapter)
    681 {
    682 	struct ifnet	*ifp = adapter->ifp;
    683 	device_t 	dev = adapter->dev;
    684 	struct ixgbe_hw *hw = &adapter->hw;
    685 	int             error = 0;
    686 
    687 	INIT_DEBUGOUT("ixv_init_locked: begin");
    688 	KASSERT(mutex_owned(&adapter->core_mtx));
    689 	hw->adapter_stopped = FALSE;
    690 	hw->mac.ops.stop_adapter(hw);
    691 	callout_stop(&adapter->timer);
    692 
    693 	/* reprogram the RAR[0] in case user changed it. */
    694 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    695 
    696 	/* Get the latest mac address, User can use a LAA */
    697 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    698 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    699 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
    700 
    701 	/* Prepare transmit descriptors and buffers */
    702 	if (ixgbe_setup_transmit_structures(adapter)) {
    703 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    704 		ixv_stop(adapter);
    705 		return;
    706 	}
    707 
    708 	/* Reset VF and renegotiate mailbox API version */
    709 	hw->mac.ops.reset_hw(hw);
    710 	error = ixv_negotiate_api(adapter);
    711 	if (error)
    712 		device_printf(dev,
    713 		    "Mailbox API negotiation failed in init_locked!\n");
    714 
    715 	ixv_initialize_transmit_units(adapter);
    716 
    717 	/* Setup Multicast table */
    718 	ixv_set_multi(adapter);
    719 
    720 	/*
    721 	 * Determine the correct mbuf pool
    722 	 * for doing jumbo/headersplit
    723 	 */
    724 	if (ifp->if_mtu > ETHERMTU)
    725 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    726 	else
    727 		adapter->rx_mbuf_sz = MCLBYTES;
    728 
    729 	/* Prepare receive descriptors and buffers */
    730 	if (ixgbe_setup_receive_structures(adapter)) {
    731 		device_printf(dev, "Could not setup receive structures\n");
    732 		ixv_stop(adapter);
    733 		return;
    734 	}
    735 
    736 	/* Configure RX settings */
    737 	ixv_initialize_receive_units(adapter);
    738 
    739 #if 0 /* XXX isn't it required? -- msaitoh  */
    740 	/* Set the various hardware offload abilities */
    741 	ifp->if_hwassist = 0;
    742 	if (ifp->if_capenable & IFCAP_TSO4)
    743 		ifp->if_hwassist |= CSUM_TSO;
    744 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    745 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    746 #if __FreeBSD_version >= 800000
    747 		ifp->if_hwassist |= CSUM_SCTP;
    748 #endif
    749 	}
    750 #endif
    751 
    752 	/* Set up VLAN offload and filter */
    753 	ixv_setup_vlan_support(adapter);
    754 
    755 	/* Set up MSI-X routing */
    756 	ixv_configure_ivars(adapter);
    757 
    758 	/* Set up auto-mask */
    759 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    760 
    761 	/* Set moderation on the Link interrupt */
    762 	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
    763 
    764 	/* Stats init */
    765 	ixv_init_stats(adapter);
    766 
    767 	/* Config/Enable Link */
    768 	hw->mac.get_link_status = TRUE;
    769 	hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
    770 	    FALSE);
    771 
    772 	/* Start watchdog */
    773 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    774 
    775 	/* And now turn on interrupts */
    776 	ixv_enable_intr(adapter);
    777 
    778 	/* Now inform the stack we're ready */
    779 	ifp->if_flags |= IFF_RUNNING;
    780 	ifp->if_flags &= ~IFF_OACTIVE;
    781 
    782 	return;
    783 } /* ixv_init_locked */
    784 
    785 /*
    786  * MSI-X Interrupt Handlers and Tasklets
    787  */
    788 
    789 static inline void
    790 ixv_enable_queue(struct adapter *adapter, u32 vector)
    791 {
    792 	struct ixgbe_hw *hw = &adapter->hw;
    793 	u32             queue = 1 << vector;
    794 	u32             mask;
    795 
    796 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    797 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    798 } /* ixv_enable_queue */
    799 
    800 static inline void
    801 ixv_disable_queue(struct adapter *adapter, u32 vector)
    802 {
    803 	struct ixgbe_hw *hw = &adapter->hw;
    804 	u64             queue = (u64)(1 << vector);
    805 	u32             mask;
    806 
    807 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    808 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    809 } /* ixv_disable_queue */
    810 
    811 static inline void
    812 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    813 {
    814 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    815 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    816 } /* ixv_rearm_queues */
    817 
    818 
    819 /************************************************************************
    820  * ixv_msix_que - MSI Queue Interrupt Service routine
    821  ************************************************************************/
    822 static int
    823 ixv_msix_que(void *arg)
    824 {
    825 	struct ix_queue	*que = arg;
    826 	struct adapter  *adapter = que->adapter;
    827 	struct tx_ring	*txr = que->txr;
    828 	struct rx_ring	*rxr = que->rxr;
    829 	bool		more;
    830 	u32		newitr = 0;
    831 
    832 	ixv_disable_queue(adapter, que->msix);
    833 	++que->irqs.ev_count;
    834 
    835 #ifdef __NetBSD__
    836 	/* Don't run ixgbe_rxeof in interrupt context */
    837 	more = true;
    838 #else
    839 	more = ixgbe_rxeof(que);
    840 #endif
    841 
    842 	IXGBE_TX_LOCK(txr);
    843 	ixgbe_txeof(txr);
    844 	IXGBE_TX_UNLOCK(txr);
    845 
    846 	/* Do AIM now? */
    847 
    848 	if (adapter->enable_aim == false)
    849 		goto no_calc;
    850 	/*
    851 	 * Do Adaptive Interrupt Moderation:
    852 	 *  - Write out last calculated setting
    853 	 *  - Calculate based on average size over
    854 	 *    the last interval.
    855 	 */
    856 	if (que->eitr_setting)
    857 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
    858 		    que->eitr_setting);
    859 
    860 	que->eitr_setting = 0;
    861 
    862 	/* Idle, do nothing */
    863 	if ((txr->bytes == 0) && (rxr->bytes == 0))
    864 		goto no_calc;
    865 
    866 	if ((txr->bytes) && (txr->packets))
    867 		newitr = txr->bytes/txr->packets;
    868 	if ((rxr->bytes) && (rxr->packets))
    869 		newitr = max(newitr, (rxr->bytes / rxr->packets));
    870 	newitr += 24; /* account for hardware frame, crc */
    871 
    872 	/* set an upper boundary */
    873 	newitr = min(newitr, 3000);
    874 
    875 	/* Be nice to the mid range */
    876 	if ((newitr > 300) && (newitr < 1200))
    877 		newitr = (newitr / 3);
    878 	else
    879 		newitr = (newitr / 2);
    880 
    881 	newitr |= newitr << 16;
    882 
    883 	/* save for next interrupt */
    884 	que->eitr_setting = newitr;
    885 
    886 	/* Reset state */
    887 	txr->bytes = 0;
    888 	txr->packets = 0;
    889 	rxr->bytes = 0;
    890 	rxr->packets = 0;
    891 
    892 no_calc:
    893 	if (more)
    894 		softint_schedule(que->que_si);
    895 	else /* Re-enable this interrupt */
    896 		ixv_enable_queue(adapter, que->msix);
    897 
    898 	return 1;
    899 } /* ixv_msix_que */
    900 
    901 /************************************************************************
    902  * ixv_msix_mbx
    903  ************************************************************************/
    904 static int
    905 ixv_msix_mbx(void *arg)
    906 {
    907 	struct adapter	*adapter = arg;
    908 	struct ixgbe_hw *hw = &adapter->hw;
    909 	u32		reg;
    910 
    911 	++adapter->link_irq.ev_count;
    912 
    913 	/* First get the cause */
    914 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
    915 	/* Clear interrupt with write */
    916 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
    917 
    918 	/* Link status change */
    919 	if (reg & IXGBE_EICR_LSC)
    920 		softint_schedule(adapter->link_si);
    921 
    922 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
    923 
    924 	return 1;
    925 } /* ixv_msix_mbx */
    926 
    927 /************************************************************************
    928  * ixv_media_status - Media Ioctl callback
    929  *
    930  *   Called whenever the user queries the status of
    931  *   the interface using ifconfig.
    932  ************************************************************************/
    933 static void
    934 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
    935 {
    936 	struct adapter *adapter = ifp->if_softc;
    937 
    938 	INIT_DEBUGOUT("ixv_media_status: begin");
    939 	IXGBE_CORE_LOCK(adapter);
    940 	ixv_update_link_status(adapter);
    941 
    942 	ifmr->ifm_status = IFM_AVALID;
    943 	ifmr->ifm_active = IFM_ETHER;
    944 
    945 	if (!adapter->link_active) {
    946 		ifmr->ifm_active |= IFM_NONE;
    947 		IXGBE_CORE_UNLOCK(adapter);
    948 		return;
    949 	}
    950 
    951 	ifmr->ifm_status |= IFM_ACTIVE;
    952 
    953 	switch (adapter->link_speed) {
    954 		case IXGBE_LINK_SPEED_10GB_FULL:
    955 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
    956 			break;
    957 		case IXGBE_LINK_SPEED_1GB_FULL:
    958 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
    959 			break;
    960 		case IXGBE_LINK_SPEED_100_FULL:
    961 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
    962 			break;
    963 		case IXGBE_LINK_SPEED_10_FULL:
    964 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
    965 			break;
    966 	}
    967 
    968 	IXGBE_CORE_UNLOCK(adapter);
    969 
    970 	return;
    971 } /* ixv_media_status */
    972 
    973 /************************************************************************
    974  * ixv_media_change - Media Ioctl callback
    975  *
    976  *   Called when the user changes speed/duplex using
    977  *   media/mediopt option with ifconfig.
    978  ************************************************************************/
    979 static int
    980 ixv_media_change(struct ifnet *ifp)
    981 {
    982 	struct adapter *adapter = ifp->if_softc;
    983 	struct ifmedia *ifm = &adapter->media;
    984 
    985 	INIT_DEBUGOUT("ixv_media_change: begin");
    986 
    987 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    988 		return (EINVAL);
    989 
    990 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
    991 	case IFM_AUTO:
    992 		break;
    993 	default:
    994 		device_printf(adapter->dev, "Only auto media type\n");
    995 		return (EINVAL);
    996 	}
    997 
    998 	return (0);
    999 } /* ixv_media_change */
   1000 
   1001 
   1002 /************************************************************************
   1003  * ixv_negotiate_api
   1004  *
   1005  *   Negotiate the Mailbox API with the PF;
   1006  *   start with the most featured API first.
   1007  ************************************************************************/
   1008 static int
   1009 ixv_negotiate_api(struct adapter *adapter)
   1010 {
   1011 	struct ixgbe_hw *hw = &adapter->hw;
   1012 	int             mbx_api[] = { ixgbe_mbox_api_11,
   1013 	                              ixgbe_mbox_api_10,
   1014 	                              ixgbe_mbox_api_unknown };
   1015 	int             i = 0;
   1016 
   1017 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
   1018 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
   1019 			return (0);
   1020 		i++;
   1021 	}
   1022 
   1023 	return (EINVAL);
   1024 } /* ixv_negotiate_api */
   1025 
   1026 
   1027 /************************************************************************
   1028  * ixv_set_multi - Multicast Update
   1029  *
   1030  *   Called whenever multicast address list is updated.
   1031  ************************************************************************/
   1032 static void
   1033 ixv_set_multi(struct adapter *adapter)
   1034 {
   1035 	struct ether_multi *enm;
   1036 	struct ether_multistep step;
   1037 	struct ethercom *ec = &adapter->osdep.ec;
   1038 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1039 	u8                 *update_ptr;
   1040 	int                mcnt = 0;
   1041 
   1042 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1043 
   1044 	ETHER_FIRST_MULTI(step, ec, enm);
   1045 	while (enm != NULL) {
   1046 		bcopy(enm->enm_addrlo,
   1047 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1048 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1049 		mcnt++;
   1050 		/* XXX This might be required --msaitoh */
   1051 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1052 			break;
   1053 		ETHER_NEXT_MULTI(step, enm);
   1054 	}
   1055 
   1056 	update_ptr = mta;
   1057 
   1058 	adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   1059 	    ixv_mc_array_itr, TRUE);
   1060 
   1061 	return;
   1062 } /* ixv_set_multi */
   1063 
   1064 /************************************************************************
   1065  * ixv_mc_array_itr
   1066  *
   1067  *   An iterator function needed by the multicast shared code.
   1068  *   It feeds the shared code routine the addresses in the
   1069  *   array of ixv_set_multi() one by one.
   1070  ************************************************************************/
   1071 static u8 *
   1072 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1073 {
   1074 	u8 *addr = *update_ptr;
   1075 	u8 *newptr;
   1076 	*vmdq = 0;
   1077 
   1078 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1079 	*update_ptr = newptr;
   1080 
   1081 	return addr;
   1082 } /* ixv_mc_array_itr */
   1083 
   1084 /************************************************************************
   1085  * ixv_local_timer - Timer routine
   1086  *
   1087  *   Checks for link status, updates statistics,
   1088  *   and runs the watchdog check.
   1089  ************************************************************************/
   1090 static void
   1091 ixv_local_timer(void *arg)
   1092 {
   1093 	struct adapter *adapter = arg;
   1094 
   1095 	IXGBE_CORE_LOCK(adapter);
   1096 	ixv_local_timer_locked(adapter);
   1097 	IXGBE_CORE_UNLOCK(adapter);
   1098 }
   1099 
   1100 static void
   1101 ixv_local_timer_locked(void *arg)
   1102 {
   1103 	struct adapter	*adapter = arg;
   1104 	device_t	dev = adapter->dev;
   1105 	struct ix_queue	*que = adapter->queues;
   1106 	u64		queues = 0;
   1107 	int		hung = 0;
   1108 
   1109 	KASSERT(mutex_owned(&adapter->core_mtx));
   1110 
   1111 	ixv_check_link(adapter);
   1112 
   1113 	/* Stats Update */
   1114 	ixv_update_stats(adapter);
   1115 
   1116 	/*
   1117 	 * Check the TX queues status
   1118 	 *      - mark hung queues so we don't schedule on them
   1119 	 *      - watchdog only if all queues show hung
   1120 	 */
   1121 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1122 		/* Keep track of queues with work for soft irq */
   1123 		if (que->txr->busy)
   1124 			queues |= ((u64)1 << que->me);
   1125 		/*
   1126 		 * Each time txeof runs without cleaning, but there
   1127 		 * are uncleaned descriptors it increments busy. If
   1128 		 * we get to the MAX we declare it hung.
   1129 		 */
   1130 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1131 			++hung;
   1132 			/* Mark the queue as inactive */
   1133 			adapter->active_queues &= ~((u64)1 << que->me);
   1134 			continue;
   1135 		} else {
   1136 			/* Check if we've come back from hung */
   1137 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1138 				adapter->active_queues |= ((u64)1 << que->me);
   1139 		}
   1140 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1141 			device_printf(dev,
   1142 			    "Warning queue %d appears to be hung!\n", i);
   1143 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1144 			++hung;
   1145 		}
   1146 	}
   1147 
   1148 	/* Only truly watchdog if all queues show hung */
   1149 	if (hung == adapter->num_queues)
   1150 		goto watchdog;
   1151 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1152 		ixv_rearm_queues(adapter, queues);
   1153 	}
   1154 
   1155 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1156 
   1157 	return;
   1158 
   1159 watchdog:
   1160 
   1161 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1162 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1163 	adapter->watchdog_events.ev_count++;
   1164 	ixv_init_locked(adapter);
   1165 } /* ixv_local_timer */
   1166 
   1167 /************************************************************************
   1168  * ixv_update_link_status - Update OS on link state
   1169  *
   1170  * Note: Only updates the OS on the cached link state.
   1171  *       The real check of the hardware only happens with
   1172  *       a link interrupt.
   1173  ************************************************************************/
   1174 static void
   1175 ixv_update_link_status(struct adapter *adapter)
   1176 {
   1177 	struct ifnet *ifp = adapter->ifp;
   1178 	device_t     dev = adapter->dev;
   1179 
   1180 	if (adapter->link_up) {
   1181 		if (adapter->link_active == FALSE) {
   1182 			if (bootverbose) {
   1183 				const char *bpsmsg;
   1184 
   1185 				switch (adapter->link_speed) {
   1186 				case IXGBE_LINK_SPEED_10GB_FULL:
   1187 					bpsmsg = "10 Gbps";
   1188 					break;
   1189 				case IXGBE_LINK_SPEED_5GB_FULL:
   1190 					bpsmsg = "5 Gbps";
   1191 					break;
   1192 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   1193 					bpsmsg = "2.5 Gbps";
   1194 					break;
   1195 				case IXGBE_LINK_SPEED_1GB_FULL:
   1196 					bpsmsg = "1 Gbps";
   1197 					break;
   1198 				case IXGBE_LINK_SPEED_100_FULL:
   1199 					bpsmsg = "100 Mbps";
   1200 					break;
   1201 				case IXGBE_LINK_SPEED_10_FULL:
   1202 					bpsmsg = "10 Mbps";
   1203 					break;
   1204 				default:
   1205 					bpsmsg = "unknown speed";
   1206 					break;
   1207 				}
   1208 				device_printf(dev, "Link is up %s %s \n",
   1209 				    bpsmsg, "Full Duplex");
   1210 			}
   1211 			adapter->link_active = TRUE;
   1212 			if_link_state_change(ifp, LINK_STATE_UP);
   1213 		}
   1214 	} else { /* Link down */
   1215 		if (adapter->link_active == TRUE) {
   1216 			if (bootverbose)
   1217 				device_printf(dev, "Link is Down\n");
   1218 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1219 			adapter->link_active = FALSE;
   1220 		}
   1221 	}
   1222 
   1223 	return;
   1224 } /* ixv_update_link_status */
   1225 
   1226 
   1227 /************************************************************************
   1228  * ixv_stop - Stop the hardware
   1229  *
   1230  *   Disables all traffic on the adapter by issuing a
   1231  *   global reset on the MAC and deallocates TX/RX buffers.
   1232  ************************************************************************/
   1233 static void
   1234 ixv_ifstop(struct ifnet *ifp, int disable)
   1235 {
   1236 	struct adapter *adapter = ifp->if_softc;
   1237 
   1238 	IXGBE_CORE_LOCK(adapter);
   1239 	ixv_stop(adapter);
   1240 	IXGBE_CORE_UNLOCK(adapter);
   1241 }
   1242 
   1243 static void
   1244 ixv_stop(void *arg)
   1245 {
   1246 	struct ifnet    *ifp;
   1247 	struct adapter  *adapter = arg;
   1248 	struct ixgbe_hw *hw = &adapter->hw;
   1249 
   1250 	ifp = adapter->ifp;
   1251 
   1252 	KASSERT(mutex_owned(&adapter->core_mtx));
   1253 
   1254 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1255 	ixv_disable_intr(adapter);
   1256 
   1257 	/* Tell the stack that the interface is no longer active */
   1258 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1259 
   1260 	hw->mac.ops.reset_hw(hw);
   1261 	adapter->hw.adapter_stopped = FALSE;
   1262 	hw->mac.ops.stop_adapter(hw);
   1263 	callout_stop(&adapter->timer);
   1264 
   1265 	/* reprogram the RAR[0] in case user changed it. */
   1266 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1267 
   1268 	return;
   1269 } /* ixv_stop */
   1270 
   1271 
   1272 /************************************************************************
   1273  * ixv_allocate_pci_resources
   1274  ************************************************************************/
   1275 static int
   1276 ixv_allocate_pci_resources(struct adapter *adapter,
   1277     const struct pci_attach_args *pa)
   1278 {
   1279 	pcireg_t	memtype;
   1280 	device_t        dev = adapter->dev;
   1281 	bus_addr_t addr;
   1282 	int flags;
   1283 
   1284 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1285 	switch (memtype) {
   1286 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1287 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1288 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1289 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1290 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1291 			goto map_err;
   1292 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1293 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1294 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1295 		}
   1296 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1297 		     adapter->osdep.mem_size, flags,
   1298 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1299 map_err:
   1300 			adapter->osdep.mem_size = 0;
   1301 			aprint_error_dev(dev, "unable to map BAR0\n");
   1302 			return ENXIO;
   1303 		}
   1304 		break;
   1305 	default:
   1306 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1307 		return ENXIO;
   1308 	}
   1309 
   1310 	/* Pick up the tuneable queues */
   1311 	adapter->num_queues = ixv_num_queues;
   1312 
   1313 	return (0);
   1314 } /* ixv_allocate_pci_resources */
   1315 
   1316 /************************************************************************
   1317  * ixv_free_pci_resources
   1318  ************************************************************************/
   1319 static void
   1320 ixv_free_pci_resources(struct adapter * adapter)
   1321 {
   1322 	struct 		ix_queue *que = adapter->queues;
   1323 	int		rid;
   1324 
   1325 	/*
   1326 	 *  Release all msix queue resources:
   1327 	 */
   1328 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1329 		if (que->res != NULL)
   1330 			pci_intr_disestablish(adapter->osdep.pc,
   1331 			    adapter->osdep.ihs[i]);
   1332 	}
   1333 
   1334 
   1335 	/* Clean the Mailbox interrupt last */
   1336 	rid = adapter->vector;
   1337 
   1338 	if (adapter->osdep.ihs[rid] != NULL) {
   1339 		pci_intr_disestablish(adapter->osdep.pc,
   1340 		    adapter->osdep.ihs[rid]);
   1341 		adapter->osdep.ihs[rid] = NULL;
   1342 	}
   1343 
   1344 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1345 	    adapter->osdep.nintrs);
   1346 
   1347 	if (adapter->osdep.mem_size != 0) {
   1348 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1349 		    adapter->osdep.mem_bus_space_handle,
   1350 		    adapter->osdep.mem_size);
   1351 	}
   1352 
   1353 	return;
   1354 } /* ixv_free_pci_resources */
   1355 
   1356 /************************************************************************
   1357  * ixv_setup_interface
   1358  *
   1359  *   Setup networking device structure and register an interface.
   1360  ************************************************************************/
   1361 static void
   1362 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1363 {
   1364 	struct ethercom *ec = &adapter->osdep.ec;
   1365 	struct ifnet   *ifp;
   1366 
   1367 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1368 
   1369 	ifp = adapter->ifp = &ec->ec_if;
   1370 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1371 	ifp->if_baudrate = IF_Gbps(10);
   1372 	ifp->if_init = ixv_init;
   1373 	ifp->if_stop = ixv_ifstop;
   1374 	ifp->if_softc = adapter;
   1375 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1376 #ifdef IXGBE_MPSAFE
   1377 	ifp->if_extflags = IFEF_START_MPSAFE;
   1378 #endif
   1379 	ifp->if_ioctl = ixv_ioctl;
   1380 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1381 #if 0
   1382 		ixv_start_locked = ixgbe_legacy_start_locked;
   1383 #endif
   1384 	} else {
   1385 		ifp->if_transmit = ixgbe_mq_start;
   1386 #if 0
   1387 		ixv_start_locked = ixgbe_mq_start_locked;
   1388 #endif
   1389 	}
   1390 	ifp->if_start = ixgbe_legacy_start;
   1391 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1392 	IFQ_SET_READY(&ifp->if_snd);
   1393 
   1394 	if_initialize(ifp);
   1395 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1396 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1397 	/*
   1398 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1399 	 * used.
   1400 	 */
   1401 	if_register(ifp);
   1402 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1403 
   1404 	adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
   1405 
   1406 	/*
   1407 	 * Tell the upper layer(s) we support long frames.
   1408 	 */
   1409 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1410 
   1411 	/* Set capability flags */
   1412 	ifp->if_capabilities |= IFCAP_HWCSUM
   1413 	                     |  IFCAP_TSOv4
   1414 	                     |  IFCAP_TSOv6;
   1415 	ifp->if_capenable = 0;
   1416 
   1417 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1418 			    |  ETHERCAP_VLAN_HWCSUM
   1419 			    |  ETHERCAP_JUMBO_MTU
   1420 			    |  ETHERCAP_VLAN_MTU;
   1421 
   1422 	/* Enable the above capabilities by default */
   1423 	ec->ec_capenable = ec->ec_capabilities;
   1424 
   1425 	/* Don't enable LRO by default */
   1426 	ifp->if_capabilities |= IFCAP_LRO;
   1427 #if 0
   1428 	ifp->if_capenable = ifp->if_capabilities;
   1429 #endif
   1430 
   1431 	/*
   1432 	 * Specify the media types supported by this adapter and register
   1433 	 * callbacks to update media and link information
   1434 	 */
   1435 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1436 	    ixv_media_status);
   1437 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1438 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1439 
   1440 	return;
   1441 } /* ixv_setup_interface */
   1442 
   1443 
   1444 /************************************************************************
   1445  * ixv_initialize_transmit_units - Enable transmit unit.
   1446  ************************************************************************/
   1447 static void
   1448 ixv_initialize_transmit_units(struct adapter *adapter)
   1449 {
   1450 	struct tx_ring	*txr = adapter->tx_rings;
   1451 	struct ixgbe_hw	*hw = &adapter->hw;
   1452 
   1453 
   1454 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1455 		u64 tdba = txr->txdma.dma_paddr;
   1456 		u32 txctrl, txdctl;
   1457 
   1458 		/* Set WTHRESH to 8, burst writeback */
   1459 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1460 		txdctl |= (8 << 16);
   1461 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1462 
   1463 		/* Set the HW Tx Head and Tail indices */
   1464 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   1465 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   1466 
   1467 		/* Set Tx Tail register */
   1468 		txr->tail = IXGBE_VFTDT(i);
   1469 
   1470 		/* Set Ring parameters */
   1471 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   1472 		    (tdba & 0x00000000ffffffffULL));
   1473 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   1474 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   1475 		    adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
   1476 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   1477 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1478 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   1479 
   1480 		/* Now enable */
   1481 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1482 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1483 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1484 	}
   1485 
   1486 	return;
   1487 } /* ixv_initialize_transmit_units */
   1488 
   1489 
   1490 /************************************************************************
   1491  * ixv_initialize_rss_mapping
   1492  ************************************************************************/
   1493 static void
   1494 ixv_initialize_rss_mapping(struct adapter *adapter)
   1495 {
   1496 	struct ixgbe_hw *hw = &adapter->hw;
   1497 	u32             reta = 0, mrqc, rss_key[10];
   1498 	int             queue_id;
   1499 	int             i, j;
   1500 	u32             rss_hash_config;
   1501 
   1502 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1503 		/* Fetch the configured RSS key */
   1504 		rss_getkey((uint8_t *)&rss_key);
   1505 	} else {
   1506 		/* set up random bits */
   1507 		cprng_fast(&rss_key, sizeof(rss_key));
   1508 	}
   1509 
   1510 	/* Now fill out hash function seeds */
   1511 	for (i = 0; i < 10; i++)
   1512 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
   1513 
   1514 	/* Set up the redirection table */
   1515 	for (i = 0, j = 0; i < 64; i++, j++) {
   1516 		if (j == adapter->num_queues)
   1517 			j = 0;
   1518 
   1519 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1520 			/*
   1521 			 * Fetch the RSS bucket id for the given indirection
   1522 			 * entry. Cap it at the number of configured buckets
   1523 			 * (which is num_queues.)
   1524 			 */
   1525 			queue_id = rss_get_indirection_to_bucket(i);
   1526 			queue_id = queue_id % adapter->num_queues;
   1527 		} else
   1528 			queue_id = j;
   1529 
   1530 		/*
   1531 		 * The low 8 bits are for hash value (n+0);
   1532 		 * The next 8 bits are for hash value (n+1), etc.
   1533 		 */
   1534 		reta >>= 8;
   1535 		reta |= ((uint32_t)queue_id) << 24;
   1536 		if ((i & 3) == 3) {
   1537 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
   1538 			reta = 0;
   1539 		}
   1540 	}
   1541 
   1542 	/* Perform hash on these packet types */
   1543 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   1544 		rss_hash_config = rss_gethashconfig();
   1545 	else {
   1546 		/*
   1547 		 * Disable UDP - IP fragments aren't currently being handled
   1548 		 * and so we end up with a mix of 2-tuple and 4-tuple
   1549 		 * traffic.
   1550 		 */
   1551 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
   1552 		                | RSS_HASHTYPE_RSS_TCP_IPV4
   1553 		                | RSS_HASHTYPE_RSS_IPV6
   1554 		                | RSS_HASHTYPE_RSS_TCP_IPV6;
   1555 	}
   1556 
   1557 	mrqc = IXGBE_MRQC_RSSEN;
   1558 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
   1559 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
   1560 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
   1561 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
   1562 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
   1563 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
   1564 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
   1565 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
   1566 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
   1567 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
   1568 		    __func__);
   1569 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
   1570 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
   1571 		    __func__);
   1572 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
   1573 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
   1574 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
   1575 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
   1576 		    __func__);
   1577 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
   1578 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
   1579 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
   1580 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
   1581 		    __func__);
   1582 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
   1583 } /* ixv_initialize_rss_mapping */
   1584 
   1585 
   1586 /************************************************************************
   1587  * ixv_initialize_receive_units - Setup receive registers and features.
   1588  ************************************************************************/
   1589 static void
   1590 ixv_initialize_receive_units(struct adapter *adapter)
   1591 {
   1592 	struct	rx_ring	*rxr = adapter->rx_rings;
   1593 	struct ixgbe_hw	*hw = &adapter->hw;
   1594 	struct ifnet	*ifp = adapter->ifp;
   1595 	u32		bufsz, rxcsum, psrtype;
   1596 
   1597 	if (ifp->if_mtu > ETHERMTU)
   1598 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1599 	else
   1600 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1601 
   1602 	psrtype = IXGBE_PSRTYPE_TCPHDR
   1603 	        | IXGBE_PSRTYPE_UDPHDR
   1604 	        | IXGBE_PSRTYPE_IPV4HDR
   1605 	        | IXGBE_PSRTYPE_IPV6HDR
   1606 	        | IXGBE_PSRTYPE_L2HDR;
   1607 
   1608 	if (adapter->num_queues > 1)
   1609 		psrtype |= 1 << 29;
   1610 
   1611 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1612 
   1613 	/* Tell PF our max_frame size */
   1614 	if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
   1615 		device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
   1616 	}
   1617 
   1618 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1619 		u64 rdba = rxr->rxdma.dma_paddr;
   1620 		u32 reg, rxdctl;
   1621 
   1622 		/* Disable the queue */
   1623 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1624 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1625 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1626 		for (int j = 0; j < 10; j++) {
   1627 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1628 			    IXGBE_RXDCTL_ENABLE)
   1629 				msec_delay(1);
   1630 			else
   1631 				break;
   1632 		}
   1633 		wmb();
   1634 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1635 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   1636 		    (rdba & 0x00000000ffffffffULL));
   1637 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
   1638 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   1639 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1640 
   1641 		/* Reset the ring indices */
   1642 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1643 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1644 
   1645 		/* Set up the SRRCTL register */
   1646 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   1647 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1648 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1649 		reg |= bufsz;
   1650 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1651 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   1652 
   1653 		/* Capture Rx Tail index */
   1654 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1655 
   1656 		/* Do the queue enabling last */
   1657 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1658 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1659 		for (int k = 0; k < 10; k++) {
   1660 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1661 			    IXGBE_RXDCTL_ENABLE)
   1662 				break;
   1663 			msec_delay(1);
   1664 		}
   1665 		wmb();
   1666 
   1667 		/* Set the Tail Pointer */
   1668 		/*
   1669 		 * In netmap mode, we must preserve the buffers made
   1670 		 * available to userspace before the if_init()
   1671 		 * (this is true by default on the TX side, because
   1672 		 * init makes all buffers available to userspace).
   1673 		 *
   1674 		 * netmap_reset() and the device specific routines
   1675 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1676 		 * buffers at the end of the NIC ring, so here we
   1677 		 * must set the RDT (tail) register to make sure
   1678 		 * they are not overwritten.
   1679 		 *
   1680 		 * In this driver the NIC ring starts at RDH = 0,
   1681 		 * RDT points to the last slot available for reception (?),
   1682 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1683 		 */
   1684 #ifdef DEV_NETMAP
   1685 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   1686 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   1687 			struct netmap_adapter *na = NA(adapter->ifp);
   1688 			struct netmap_kring *kring = &na->rx_rings[i];
   1689 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1690 
   1691 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1692 		} else
   1693 #endif /* DEV_NETMAP */
   1694 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1695 			    adapter->num_rx_desc - 1);
   1696 	}
   1697 
   1698 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1699 
   1700 	ixv_initialize_rss_mapping(adapter);
   1701 
   1702 	if (adapter->num_queues > 1) {
   1703 		/* RSS and RX IPP Checksum are mutually exclusive */
   1704 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1705 	}
   1706 
   1707 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1708 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1709 
   1710 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1711 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1712 
   1713 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1714 
   1715 	return;
   1716 } /* ixv_initialize_receive_units */
   1717 
   1718 /************************************************************************
   1719  * ixv_setup_vlan_support
   1720  ************************************************************************/
   1721 static void
   1722 ixv_setup_vlan_support(struct adapter *adapter)
   1723 {
   1724 	struct ethercom *ec = &adapter->osdep.ec;
   1725 	struct ixgbe_hw *hw = &adapter->hw;
   1726 	struct rx_ring  *rxr;
   1727 	u32		ctrl, vid, vfta, retry;
   1728 
   1729 	/*
   1730 	 * We get here thru init_locked, meaning
   1731 	 * a soft reset, this has already cleared
   1732 	 * the VFTA and other state, so if there
   1733 	 * have been no vlan's registered do nothing.
   1734 	 */
   1735 	if (!VLAN_ATTACHED(ec))
   1736 		return;
   1737 
   1738 	/* Enable the queues */
   1739 	for (int i = 0; i < adapter->num_queues; i++) {
   1740 		rxr = &adapter->rx_rings[i];
   1741 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
   1742 		ctrl |= IXGBE_RXDCTL_VME;
   1743 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
   1744 		/*
   1745 		 * Let Rx path know that it needs to store VLAN tag
   1746 		 * as part of extra mbuf info.
   1747 		 */
   1748 		rxr->vtag_strip = TRUE;
   1749 	}
   1750 
   1751 #if 1
   1752 	/* XXX dirty hack. Enable all VIDs */
   1753 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
   1754 	  adapter->shadow_vfta[i] = 0xffffffff;
   1755 #endif
   1756 	/*
   1757 	 * A soft reset zero's out the VFTA, so
   1758 	 * we need to repopulate it now.
   1759 	 */
   1760 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   1761 		if (adapter->shadow_vfta[i] == 0)
   1762 			continue;
   1763 		vfta = adapter->shadow_vfta[i];
   1764 		/*
   1765 		 * Reconstruct the vlan id's
   1766 		 * based on the bits set in each
   1767 		 * of the array ints.
   1768 		 */
   1769 		for (int j = 0; j < 32; j++) {
   1770 			retry = 0;
   1771 			if ((vfta & (1 << j)) == 0)
   1772 				continue;
   1773 			vid = (i * 32) + j;
   1774 			/* Call the shared code mailbox routine */
   1775 			while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
   1776 				if (++retry > 5)
   1777 					break;
   1778 			}
   1779 		}
   1780 	}
   1781 } /* ixv_setup_vlan_support */
   1782 
   1783 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   1784 /************************************************************************
   1785  * ixv_register_vlan
   1786  *
   1787  *   Run via a vlan config EVENT, it enables us to use the
   1788  *   HW Filter table since we can get the vlan id. This just
   1789  *   creates the entry in the soft version of the VFTA, init
   1790  *   will repopulate the real table.
   1791  ************************************************************************/
   1792 static void
   1793 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1794 {
   1795 	struct adapter	*adapter = ifp->if_softc;
   1796 	u16		index, bit;
   1797 
   1798 	if (ifp->if_softc != arg) /* Not our event */
   1799 		return;
   1800 
   1801 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   1802 		return;
   1803 
   1804 	IXGBE_CORE_LOCK(adapter);
   1805 	index = (vtag >> 5) & 0x7F;
   1806 	bit = vtag & 0x1F;
   1807 	adapter->shadow_vfta[index] |= (1 << bit);
   1808 	/* Re-init to load the changes */
   1809 	ixv_init_locked(adapter);
   1810 	IXGBE_CORE_UNLOCK(adapter);
   1811 } /* ixv_register_vlan */
   1812 
   1813 /************************************************************************
   1814  * ixv_unregister_vlan
   1815  *
   1816  *   Run via a vlan unconfig EVENT, remove our entry
   1817  *   in the soft vfta.
   1818  ************************************************************************/
   1819 static void
   1820 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1821 {
   1822 	struct adapter	*adapter = ifp->if_softc;
   1823 	u16		index, bit;
   1824 
   1825 	if (ifp->if_softc !=  arg)
   1826 		return;
   1827 
   1828 	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
   1829 		return;
   1830 
   1831 	IXGBE_CORE_LOCK(adapter);
   1832 	index = (vtag >> 5) & 0x7F;
   1833 	bit = vtag & 0x1F;
   1834 	adapter->shadow_vfta[index] &= ~(1 << bit);
   1835 	/* Re-init to load the changes */
   1836 	ixv_init_locked(adapter);
   1837 	IXGBE_CORE_UNLOCK(adapter);
   1838 } /* ixv_unregister_vlan */
   1839 #endif
   1840 
   1841 /************************************************************************
   1842  * ixv_enable_intr
   1843  ************************************************************************/
   1844 static void
   1845 ixv_enable_intr(struct adapter *adapter)
   1846 {
   1847 	struct ixgbe_hw *hw = &adapter->hw;
   1848 	struct ix_queue *que = adapter->queues;
   1849 	u32             mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   1850 
   1851 
   1852 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   1853 
   1854 	mask = IXGBE_EIMS_ENABLE_MASK;
   1855 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   1856 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   1857 
   1858 	for (int i = 0; i < adapter->num_queues; i++, que++)
   1859 		ixv_enable_queue(adapter, que->msix);
   1860 
   1861 	IXGBE_WRITE_FLUSH(hw);
   1862 
   1863 	return;
   1864 } /* ixv_enable_intr */
   1865 
   1866 /************************************************************************
   1867  * ixv_disable_intr
   1868  ************************************************************************/
   1869 static void
   1870 ixv_disable_intr(struct adapter *adapter)
   1871 {
   1872 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   1873 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   1874 	IXGBE_WRITE_FLUSH(&adapter->hw);
   1875 
   1876 	return;
   1877 } /* ixv_disable_intr */
   1878 
   1879 /************************************************************************
   1880  * ixv_set_ivar
   1881  *
   1882  *   Setup the correct IVAR register for a particular MSI-X interrupt
   1883  *    - entry is the register array entry
   1884  *    - vector is the MSI-X vector for this queue
   1885  *    - type is RX/TX/MISC
   1886  ************************************************************************/
   1887 static void
   1888 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   1889 {
   1890 	struct ixgbe_hw *hw = &adapter->hw;
   1891 	u32             ivar, index;
   1892 
   1893 	vector |= IXGBE_IVAR_ALLOC_VAL;
   1894 
   1895 	if (type == -1) { /* MISC IVAR */
   1896 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   1897 		ivar &= ~0xFF;
   1898 		ivar |= vector;
   1899 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   1900 	} else {          /* RX/TX IVARS */
   1901 		index = (16 * (entry & 1)) + (8 * type);
   1902 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   1903 		ivar &= ~(0xFF << index);
   1904 		ivar |= (vector << index);
   1905 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   1906 	}
   1907 } /* ixv_set_ivar */
   1908 
   1909 /************************************************************************
   1910  * ixv_configure_ivars
   1911  ************************************************************************/
   1912 static void
   1913 ixv_configure_ivars(struct adapter *adapter)
   1914 {
   1915 	struct ix_queue *que = adapter->queues;
   1916 
   1917 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1918 		/* First the RX queue entry */
   1919 		ixv_set_ivar(adapter, i, que->msix, 0);
   1920 		/* ... and the TX */
   1921 		ixv_set_ivar(adapter, i, que->msix, 1);
   1922 		/* Set an initial value in EITR */
   1923 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
   1924 		    IXGBE_EITR_DEFAULT);
   1925 	}
   1926 
   1927 	/* For the mailbox interrupt */
   1928 	ixv_set_ivar(adapter, 1, adapter->vector, -1);
   1929 } /* ixv_configure_ivars */
   1930 
   1931 
   1932 /************************************************************************
   1933  * ixv_save_stats
   1934  *
   1935  *   The VF stats registers never have a truly virgin
   1936  *   starting point, so this routine tries to make an
   1937  *   artificial one, marking ground zero on attach as
   1938  *   it were.
   1939  ************************************************************************/
   1940 static void
   1941 ixv_save_stats(struct adapter *adapter)
   1942 {
   1943 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   1944 
   1945 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   1946 		stats->saved_reset_vfgprc +=
   1947 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   1948 		stats->saved_reset_vfgptc +=
   1949 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   1950 		stats->saved_reset_vfgorc +=
   1951 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   1952 		stats->saved_reset_vfgotc +=
   1953 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   1954 		stats->saved_reset_vfmprc +=
   1955 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   1956 	}
   1957 } /* ixv_save_stats */
   1958 
   1959 /************************************************************************
   1960  * ixv_init_stats
   1961  ************************************************************************/
   1962 static void
   1963 ixv_init_stats(struct adapter *adapter)
   1964 {
   1965 	struct ixgbe_hw *hw = &adapter->hw;
   1966 
   1967 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   1968 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   1969 	adapter->stats.vf.last_vfgorc |=
   1970 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   1971 
   1972 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   1973 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   1974 	adapter->stats.vf.last_vfgotc |=
   1975 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   1976 
   1977 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   1978 
   1979 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   1980 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   1981 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   1982 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   1983 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   1984 } /* ixv_init_stats */
   1985 
   1986 #define UPDATE_STAT_32(reg, last, count)		\
   1987 {                                                       \
   1988 	u32 current = IXGBE_READ_REG(hw, (reg));	\
   1989 	if (current < (last))				\
   1990 		count.ev_count += 0x100000000LL;	\
   1991 	(last) = current;				\
   1992 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   1993 	count.ev_count |= current;			\
   1994 }
   1995 
   1996 #define UPDATE_STAT_36(lsb, msb, last, count)           \
   1997 {                                                       \
   1998 	u64 cur_lsb = IXGBE_READ_REG(hw, (lsb));	\
   1999 	u64 cur_msb = IXGBE_READ_REG(hw, (msb));	\
   2000 	u64 current = ((cur_msb << 32) | cur_lsb);      \
   2001 	if (current < (last))				\
   2002 		count.ev_count += 0x1000000000LL;	\
   2003 	(last) = current;				\
   2004 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   2005 	count.ev_count |= current;			\
   2006 }
   2007 
   2008 /************************************************************************
   2009  * ixv_update_stats - Update the board statistics counters.
   2010  ************************************************************************/
   2011 void
   2012 ixv_update_stats(struct adapter *adapter)
   2013 {
   2014 	struct ixgbe_hw *hw = &adapter->hw;
   2015 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2016 
   2017         UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
   2018         UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
   2019         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
   2020 	    stats->vfgorc);
   2021         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
   2022 	    stats->vfgotc);
   2023         UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
   2024 
   2025 	/* Fill out the OS statistics structure */
   2026 	/*
   2027 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   2028 	 * adapter->stats counters. It's required to make ifconfig -z
   2029 	 * (SOICZIFDATA) work.
   2030 	 */
   2031 } /* ixv_update_stats */
   2032 
   2033 const struct sysctlnode *
   2034 ixv_sysctl_instance(struct adapter *adapter)
   2035 {
   2036 	const char *dvname;
   2037 	struct sysctllog **log;
   2038 	int rc;
   2039 	const struct sysctlnode *rnode;
   2040 
   2041 	log = &adapter->sysctllog;
   2042 	dvname = device_xname(adapter->dev);
   2043 
   2044 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2045 	    0, CTLTYPE_NODE, dvname,
   2046 	    SYSCTL_DESCR("ixv information and settings"),
   2047 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2048 		goto err;
   2049 
   2050 	return rnode;
   2051 err:
   2052 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2053 	return NULL;
   2054 }
   2055 
   2056 static void
   2057 ixv_add_device_sysctls(struct adapter *adapter)
   2058 {
   2059 	struct sysctllog **log;
   2060 	const struct sysctlnode *rnode, *cnode;
   2061 	device_t dev;
   2062 
   2063 	dev = adapter->dev;
   2064 	log = &adapter->sysctllog;
   2065 
   2066 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2067 		aprint_error_dev(dev, "could not create sysctl root\n");
   2068 		return;
   2069 	}
   2070 
   2071 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2072 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2073 	    "debug", SYSCTL_DESCR("Debug Info"),
   2074 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   2075 		aprint_error_dev(dev, "could not create sysctl\n");
   2076 
   2077 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2078 	    CTLFLAG_READWRITE, CTLTYPE_BOOL,
   2079 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2080 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2081 		aprint_error_dev(dev, "could not create sysctl\n");
   2082 }
   2083 
   2084 /************************************************************************
   2085  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
   2086  ************************************************************************/
   2087 static void
   2088 ixv_add_stats_sysctls(struct adapter *adapter)
   2089 {
   2090 	device_t                dev = adapter->dev;
   2091 	struct tx_ring          *txr = adapter->tx_rings;
   2092 	struct rx_ring          *rxr = adapter->rx_rings;
   2093 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2094 	struct ixgbe_hw *hw = &adapter->hw;
   2095 	const struct sysctlnode *rnode;
   2096 	struct sysctllog **log = &adapter->sysctllog;
   2097 	const char *xname = device_xname(dev);
   2098 
   2099 	/* Driver Statistics */
   2100 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   2101 	    NULL, xname, "Handled queue in softint");
   2102 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   2103 	    NULL, xname, "Requeued in softint");
   2104 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   2105 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   2106 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2107 	    NULL, xname, "m_defrag() failed");
   2108 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   2109 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   2110 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   2111 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   2112 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   2113 	    NULL, xname, "Driver tx dma hard fail other");
   2114 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   2115 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   2116 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   2117 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   2118 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2119 	    NULL, xname, "Watchdog timeouts");
   2120 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   2121 	    NULL, xname, "TSO errors");
   2122 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   2123 	    NULL, xname, "Link MSI-X IRQ Handled");
   2124 
   2125 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2126 		snprintf(adapter->queues[i].evnamebuf,
   2127 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   2128 		    xname, i);
   2129 		snprintf(adapter->queues[i].namebuf,
   2130 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   2131 
   2132 		if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2133 			aprint_error_dev(dev, "could not create sysctl root\n");
   2134 			break;
   2135 		}
   2136 
   2137 		if (sysctl_createv(log, 0, &rnode, &rnode,
   2138 		    0, CTLTYPE_NODE,
   2139 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   2140 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   2141 			break;
   2142 
   2143 #if 0 /* not yet */
   2144 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2145 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   2146 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   2147 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   2148 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   2149 			break;
   2150 
   2151 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2152 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   2153 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   2154 			NULL, 0, &(adapter->queues[i].irqs),
   2155 		    0, CTL_CREATE, CTL_EOL) != 0)
   2156 			break;
   2157 
   2158 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2159 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2160 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   2161 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   2162 		    0, CTL_CREATE, CTL_EOL) != 0)
   2163 			break;
   2164 
   2165 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2166 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2167 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   2168 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   2169 		    0, CTL_CREATE, CTL_EOL) != 0)
   2170 			break;
   2171 #endif
   2172 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   2173 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   2174 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   2175 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   2176 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   2177 		    NULL, adapter->queues[i].evnamebuf,
   2178 		    "Queue No Descriptor Available");
   2179 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   2180 		    NULL, adapter->queues[i].evnamebuf,
   2181 		    "Queue Packets Transmitted");
   2182 #ifndef IXGBE_LEGACY_TX
   2183 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   2184 		    NULL, adapter->queues[i].evnamebuf,
   2185 		    "Packets dropped in pcq");
   2186 #endif
   2187 
   2188 #ifdef LRO
   2189 		struct lro_ctrl *lro = &rxr->lro;
   2190 #endif /* LRO */
   2191 
   2192 #if 0 /* not yet */
   2193 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2194 		    CTLFLAG_READONLY,
   2195 		    CTLTYPE_INT,
   2196 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   2197 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   2198 		    CTL_CREATE, CTL_EOL) != 0)
   2199 			break;
   2200 
   2201 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2202 		    CTLFLAG_READONLY,
   2203 		    CTLTYPE_INT,
   2204 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   2205 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   2206 		    CTL_CREATE, CTL_EOL) != 0)
   2207 			break;
   2208 #endif
   2209 
   2210 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   2211 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   2212 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   2213 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   2214 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   2215 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   2216 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   2217 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   2218 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   2219 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   2220 #ifdef LRO
   2221 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   2222 				CTLFLAG_RD, &lro->lro_queued, 0,
   2223 				"LRO Queued");
   2224 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   2225 				CTLFLAG_RD, &lro->lro_flushed, 0,
   2226 				"LRO Flushed");
   2227 #endif /* LRO */
   2228 	}
   2229 
   2230 	/* MAC stats get their own sub node */
   2231 
   2232 	snprintf(stats->namebuf,
   2233 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   2234 
   2235 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   2236 	    stats->namebuf, "rx csum offload - IP");
   2237 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   2238 	    stats->namebuf, "rx csum offload - L4");
   2239 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   2240 	    stats->namebuf, "rx csum offload - IP bad");
   2241 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   2242 	    stats->namebuf, "rx csum offload - L4 bad");
   2243 
   2244 	/* Packet Reception Stats */
   2245 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2246 	    xname, "Good Packets Received");
   2247 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2248 	    xname, "Good Octets Received");
   2249 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2250 	    xname, "Multicast Packets Received");
   2251 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2252 	    xname, "Good Packets Transmitted");
   2253 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2254 	    xname, "Good Octets Transmitted");
   2255 
   2256 	/* Mailbox Stats */
   2257 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
   2258 	    xname, "message TXs");
   2259 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
   2260 	    xname, "message RXs");
   2261 	evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
   2262 	    xname, "ACKs");
   2263 	evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
   2264 	    xname, "REQs");
   2265 	evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
   2266 	    xname, "RSTs");
   2267 
   2268 } /* ixv_add_stats_sysctls */
   2269 
   2270 /************************************************************************
   2271  * ixv_set_sysctl_value
   2272  ************************************************************************/
   2273 static void
   2274 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2275 	const char *description, int *limit, int value)
   2276 {
   2277 	device_t dev =  adapter->dev;
   2278 	struct sysctllog **log;
   2279 	const struct sysctlnode *rnode, *cnode;
   2280 
   2281 	log = &adapter->sysctllog;
   2282 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2283 		aprint_error_dev(dev, "could not create sysctl root\n");
   2284 		return;
   2285 	}
   2286 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2287 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2288 	    name, SYSCTL_DESCR(description),
   2289 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2290 		aprint_error_dev(dev, "could not create sysctl\n");
   2291 	*limit = value;
   2292 } /* ixv_set_sysctl_value */
   2293 
   2294 /************************************************************************
   2295  * ixv_print_debug_info
   2296  *
   2297  *   Called only when em_display_debug_stats is enabled.
   2298  *   Provides a way to take a look at important statistics
   2299  *   maintained by the driver and hardware.
   2300  ************************************************************************/
   2301 static void
   2302 ixv_print_debug_info(struct adapter *adapter)
   2303 {
   2304         device_t        dev = adapter->dev;
   2305         struct ixgbe_hw *hw = &adapter->hw;
   2306         struct ix_queue *que = adapter->queues;
   2307         struct rx_ring  *rxr;
   2308         struct tx_ring  *txr;
   2309 #ifdef LRO
   2310         struct lro_ctrl *lro;
   2311 #endif /* LRO */
   2312 
   2313 	device_printf(dev, "Error Byte Count = %u \n",
   2314 	    IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2315 
   2316 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2317 		txr = que->txr;
   2318 		rxr = que->rxr;
   2319 #ifdef LRO
   2320 		lro = &rxr->lro;
   2321 #endif /* LRO */
   2322 		device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
   2323 		    que->msix, (long)que->irqs.ev_count);
   2324 		device_printf(dev, "RX(%d) Packets Received: %lld\n",
   2325 		    rxr->me, (long long)rxr->rx_packets.ev_count);
   2326 		device_printf(dev, "RX(%d) Bytes Received: %lu\n",
   2327 		    rxr->me, (long)rxr->rx_bytes.ev_count);
   2328 #ifdef LRO
   2329 		device_printf(dev, "RX(%d) LRO Queued= %lld\n",
   2330 		    rxr->me, (long long)lro->lro_queued);
   2331 		device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
   2332 		    rxr->me, (long long)lro->lro_flushed);
   2333 #endif /* LRO */
   2334 		device_printf(dev, "TX(%d) Packets Sent: %lu\n",
   2335 		    txr->me, (long)txr->total_packets.ev_count);
   2336 		device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
   2337 		    txr->me, (long)txr->no_desc_avail.ev_count);
   2338 	}
   2339 
   2340 	device_printf(dev, "MBX IRQ Handled: %lu\n",
   2341 	    (long)adapter->link_irq.ev_count);
   2342 } /* ixv_print_debug_info */
   2343 
   2344 /************************************************************************
   2345  * ixv_sysctl_debug
   2346  ************************************************************************/
   2347 static int
   2348 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2349 {
   2350 	struct sysctlnode node;
   2351 	struct adapter *adapter;
   2352 	int            error, result;
   2353 
   2354 	node = *rnode;
   2355 	node.sysctl_data = &result;
   2356 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2357 
   2358 	if (error || newp == NULL)
   2359 		return error;
   2360 
   2361 	if (result == 1) {
   2362 		adapter = (struct adapter *)node.sysctl_data;
   2363 		ixv_print_debug_info(adapter);
   2364 	}
   2365 
   2366 	return 0;
   2367 } /* ixv_sysctl_debug */
   2368 
   2369 /************************************************************************
   2370  * ixv_init_device_features
   2371  ************************************************************************/
   2372 static void
   2373 ixv_init_device_features(struct adapter *adapter)
   2374 {
   2375 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   2376 	                  | IXGBE_FEATURE_VF
   2377 	                  | IXGBE_FEATURE_RSS
   2378 	                  | IXGBE_FEATURE_LEGACY_TX;
   2379 
   2380 	/* A tad short on feature flags for VFs, atm. */
   2381 	switch (adapter->hw.mac.type) {
   2382 	case ixgbe_mac_82599_vf:
   2383 		break;
   2384 	case ixgbe_mac_X540_vf:
   2385 		break;
   2386 	case ixgbe_mac_X550_vf:
   2387 	case ixgbe_mac_X550EM_x_vf:
   2388 	case ixgbe_mac_X550EM_a_vf:
   2389 		adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
   2390 		break;
   2391 	default:
   2392 		break;
   2393 	}
   2394 
   2395 	/* Enabled by default... */
   2396 	/* Is a virtual function (VF) */
   2397 	if (adapter->feat_cap & IXGBE_FEATURE_VF)
   2398 		adapter->feat_en |= IXGBE_FEATURE_VF;
   2399 	/* Netmap */
   2400 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   2401 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   2402 	/* Receive-Side Scaling (RSS) */
   2403 	if (adapter->feat_cap & IXGBE_FEATURE_RSS)
   2404 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   2405 	/* Needs advanced context descriptor regardless of offloads req'd */
   2406 	if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
   2407 		adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
   2408 
   2409 	/* Enabled via sysctl... */
   2410 	/* Legacy (single queue) transmit */
   2411 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   2412 	    ixv_enable_legacy_tx)
   2413 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   2414 } /* ixv_init_device_features */
   2415 
   2416 /************************************************************************
   2417  * ixv_shutdown - Shutdown entry point
   2418  ************************************************************************/
   2419 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   2420 static int
   2421 ixv_shutdown(device_t dev)
   2422 {
   2423 	struct adapter *adapter = device_private(dev);
   2424 	IXGBE_CORE_LOCK(adapter);
   2425 	ixv_stop(adapter);
   2426 	IXGBE_CORE_UNLOCK(adapter);
   2427 
   2428 	return (0);
   2429 } /* ixv_shutdown */
   2430 #endif
   2431 
   2432 static int
   2433 ixv_ifflags_cb(struct ethercom *ec)
   2434 {
   2435 	struct ifnet *ifp = &ec->ec_if;
   2436 	struct adapter *adapter = ifp->if_softc;
   2437 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   2438 
   2439 	IXGBE_CORE_LOCK(adapter);
   2440 
   2441 	if (change != 0)
   2442 		adapter->if_flags = ifp->if_flags;
   2443 
   2444 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   2445 		rc = ENETRESET;
   2446 
   2447 	/* Set up VLAN support and filter */
   2448 	ixv_setup_vlan_support(adapter);
   2449 
   2450 	IXGBE_CORE_UNLOCK(adapter);
   2451 
   2452 	return rc;
   2453 }
   2454 
   2455 
   2456 /************************************************************************
   2457  * ixv_ioctl - Ioctl entry point
   2458  *
   2459  *   Called when the user wants to configure the interface.
   2460  *
   2461  *   return 0 on success, positive on failure
   2462  ************************************************************************/
   2463 static int
   2464 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
   2465 {
   2466 	struct adapter	*adapter = ifp->if_softc;
   2467 	struct ifcapreq *ifcr = data;
   2468 	struct ifreq	*ifr = data;
   2469 	int             error = 0;
   2470 	int l4csum_en;
   2471 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   2472 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   2473 
   2474 	switch (command) {
   2475 	case SIOCSIFFLAGS:
   2476 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   2477 		break;
   2478 	case SIOCADDMULTI:
   2479 	case SIOCDELMULTI:
   2480 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   2481 		break;
   2482 	case SIOCSIFMEDIA:
   2483 	case SIOCGIFMEDIA:
   2484 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   2485 		break;
   2486 	case SIOCSIFCAP:
   2487 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   2488 		break;
   2489 	case SIOCSIFMTU:
   2490 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   2491 		break;
   2492 	default:
   2493 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   2494 		break;
   2495 	}
   2496 
   2497 	switch (command) {
   2498 	case SIOCSIFMEDIA:
   2499 	case SIOCGIFMEDIA:
   2500 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   2501 	case SIOCSIFCAP:
   2502 		/* Layer-4 Rx checksum offload has to be turned on and
   2503 		 * off as a unit.
   2504 		 */
   2505 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   2506 		if (l4csum_en != l4csum && l4csum_en != 0)
   2507 			return EINVAL;
   2508 		/*FALLTHROUGH*/
   2509 	case SIOCADDMULTI:
   2510 	case SIOCDELMULTI:
   2511 	case SIOCSIFFLAGS:
   2512 	case SIOCSIFMTU:
   2513 	default:
   2514 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   2515 			return error;
   2516 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   2517 			;
   2518 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   2519 			IXGBE_CORE_LOCK(adapter);
   2520 			ixv_init_locked(adapter);
   2521 			IXGBE_CORE_UNLOCK(adapter);
   2522 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   2523 			/*
   2524 			 * Multicast list has changed; set the hardware filter
   2525 			 * accordingly.
   2526 			 */
   2527 			IXGBE_CORE_LOCK(adapter);
   2528 			ixv_disable_intr(adapter);
   2529 			ixv_set_multi(adapter);
   2530 			ixv_enable_intr(adapter);
   2531 			IXGBE_CORE_UNLOCK(adapter);
   2532 		}
   2533 		return 0;
   2534 	}
   2535 } /* ixv_ioctl */
   2536 
   2537 /************************************************************************
   2538  * ixv_init
   2539  ************************************************************************/
   2540 static int
   2541 ixv_init(struct ifnet *ifp)
   2542 {
   2543 	struct adapter *adapter = ifp->if_softc;
   2544 
   2545 	IXGBE_CORE_LOCK(adapter);
   2546 	ixv_init_locked(adapter);
   2547 	IXGBE_CORE_UNLOCK(adapter);
   2548 
   2549 	return 0;
   2550 } /* ixv_init */
   2551 
   2552 
   2553 /************************************************************************
   2554  * ixv_handle_que
   2555  ************************************************************************/
   2556 static void
   2557 ixv_handle_que(void *context)
   2558 {
   2559 	struct ix_queue *que = context;
   2560 	struct adapter  *adapter = que->adapter;
   2561 	struct tx_ring	*txr = que->txr;
   2562 	struct ifnet    *ifp = adapter->ifp;
   2563 	bool		more;
   2564 
   2565 	adapter->handleq.ev_count++;
   2566 
   2567 	if (ifp->if_flags & IFF_RUNNING) {
   2568 		more = ixgbe_rxeof(que);
   2569 		IXGBE_TX_LOCK(txr);
   2570 		ixgbe_txeof(txr);
   2571 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   2572 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   2573 				ixgbe_mq_start_locked(ifp, txr);
   2574 		/* Only for queue 0 */
   2575 		/* NetBSD still needs this for CBQ */
   2576 		if ((&adapter->queues[0] == que)
   2577 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   2578 			ixgbe_legacy_start_locked(ifp, txr);
   2579 		IXGBE_TX_UNLOCK(txr);
   2580 		if (more) {
   2581 			adapter->req.ev_count++;
   2582 			softint_schedule(que->que_si);
   2583 			return;
   2584 		}
   2585 	}
   2586 
   2587 	/* Re-enable this interrupt */
   2588 	ixv_enable_queue(adapter, que->msix);
   2589 
   2590 	return;
   2591 } /* ixv_handle_que */
   2592 
   2593 /************************************************************************
   2594  * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
   2595  ************************************************************************/
   2596 static int
   2597 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   2598 {
   2599 	device_t	dev = adapter->dev;
   2600 	struct ix_queue *que = adapter->queues;
   2601 	struct		tx_ring *txr = adapter->tx_rings;
   2602 	int 		error, msix_ctrl, rid, vector = 0;
   2603 	pci_chipset_tag_t pc;
   2604 	pcitag_t	tag;
   2605 	char		intrbuf[PCI_INTRSTR_LEN];
   2606 	char		intr_xname[32];
   2607 	const char	*intrstr = NULL;
   2608 	kcpuset_t	*affinity;
   2609 	int		cpu_id = 0;
   2610 
   2611 	pc = adapter->osdep.pc;
   2612 	tag = adapter->osdep.tag;
   2613 
   2614 	adapter->osdep.nintrs = adapter->num_queues + 1;
   2615 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   2616 	    adapter->osdep.nintrs) != 0) {
   2617 		aprint_error_dev(dev,
   2618 		    "failed to allocate MSI-X interrupt\n");
   2619 		return (ENXIO);
   2620 	}
   2621 
   2622 	kcpuset_create(&affinity, false);
   2623 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   2624 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   2625 		    device_xname(dev), i);
   2626 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   2627 		    sizeof(intrbuf));
   2628 #ifdef IXGBE_MPSAFE
   2629 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   2630 		    true);
   2631 #endif
   2632 		/* Set the handler function */
   2633 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   2634 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   2635 		    intr_xname);
   2636 		if (que->res == NULL) {
   2637 			pci_intr_release(pc, adapter->osdep.intrs,
   2638 			    adapter->osdep.nintrs);
   2639 			aprint_error_dev(dev,
   2640 			    "Failed to register QUE handler\n");
   2641 			kcpuset_destroy(affinity);
   2642 			return (ENXIO);
   2643 		}
   2644 		que->msix = vector;
   2645         	adapter->active_queues |= (u64)(1 << que->msix);
   2646 
   2647 		cpu_id = i;
   2648 		/* Round-robin affinity */
   2649 		kcpuset_zero(affinity);
   2650 		kcpuset_set(affinity, cpu_id % ncpu);
   2651 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   2652 		    NULL);
   2653 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   2654 		    intrstr);
   2655 		if (error == 0)
   2656 			aprint_normal(", bound queue %d to cpu %d\n",
   2657 			    i, cpu_id % ncpu);
   2658 		else
   2659 			aprint_normal("\n");
   2660 
   2661 #ifndef IXGBE_LEGACY_TX
   2662 		txr->txr_si
   2663 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2664 			ixgbe_deferred_mq_start, txr);
   2665 #endif
   2666 		que->que_si
   2667 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2668 			ixv_handle_que, que);
   2669 		if (que->que_si == NULL) {
   2670 			aprint_error_dev(dev,
   2671 			    "could not establish software interrupt\n");
   2672 		}
   2673 	}
   2674 
   2675 	/* and Mailbox */
   2676 	cpu_id++;
   2677 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   2678 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   2679 	    sizeof(intrbuf));
   2680 #ifdef IXGBE_MPSAFE
   2681 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   2682 	    true);
   2683 #endif
   2684 	/* Set the mbx handler function */
   2685 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   2686 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   2687 	    intr_xname);
   2688 	if (adapter->osdep.ihs[vector] == NULL) {
   2689 		adapter->res = NULL;
   2690 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   2691 		kcpuset_destroy(affinity);
   2692 		return (ENXIO);
   2693 	}
   2694 	/* Round-robin affinity */
   2695 	kcpuset_zero(affinity);
   2696 	kcpuset_set(affinity, cpu_id % ncpu);
   2697 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   2698 
   2699 	aprint_normal_dev(dev,
   2700 	    "for link, interrupting at %s", intrstr);
   2701 	if (error == 0)
   2702 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   2703 	else
   2704 		aprint_normal("\n");
   2705 
   2706 	adapter->vector = vector;
   2707 	/* Tasklets for Mailbox */
   2708 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   2709 	    ixv_handle_link, adapter);
   2710 	/*
   2711 	 * Due to a broken design QEMU will fail to properly
   2712 	 * enable the guest for MSI-X unless the vectors in
   2713 	 * the table are all set up, so we must rewrite the
   2714 	 * ENABLE in the MSI-X control register again at this
   2715 	 * point to cause it to successfully initialize us.
   2716 	 */
   2717 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   2718 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   2719 		rid += PCI_MSIX_CTL;
   2720 		msix_ctrl = pci_conf_read(pc, tag, rid);
   2721 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   2722 		pci_conf_write(pc, tag, rid, msix_ctrl);
   2723 	}
   2724 
   2725 	kcpuset_destroy(affinity);
   2726 	return (0);
   2727 } /* ixv_allocate_msix */
   2728 
   2729 /************************************************************************
   2730  * ixv_configure_interrupts - Setup MSI-X resources
   2731  *
   2732  *   Note: The VF device MUST use MSI-X, there is no fallback.
   2733  ************************************************************************/
   2734 static int
   2735 ixv_configure_interrupts(struct adapter *adapter)
   2736 {
   2737 	device_t dev = adapter->dev;
   2738 	int want, queues, msgs;
   2739 
   2740 	/* Must have at least 2 MSI-X vectors */
   2741 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   2742 	if (msgs < 2) {
   2743 		aprint_error_dev(dev, "MSIX config error\n");
   2744 		return (ENXIO);
   2745 	}
   2746 	msgs = MIN(msgs, IXG_MAX_NINTR);
   2747 
   2748 	/* Figure out a reasonable auto config value */
   2749 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   2750 
   2751 	if (ixv_num_queues != 0)
   2752 		queues = ixv_num_queues;
   2753 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   2754 		queues = IXGBE_VF_MAX_TX_QUEUES;
   2755 
   2756 	/*
   2757 	 * Want vectors for the queues,
   2758 	 * plus an additional for mailbox.
   2759 	 */
   2760 	want = queues + 1;
   2761 	if (msgs >= want)
   2762 		msgs = want;
   2763 	else {
   2764                	aprint_error_dev(dev,
   2765 		    "MSI-X Configuration Problem, "
   2766 		    "%d vectors but %d queues wanted!\n",
   2767 		    msgs, want);
   2768 		return -1;
   2769 	}
   2770 
   2771 	adapter->msix_mem = (void *)1; /* XXX */
   2772 	aprint_normal_dev(dev,
   2773 	    "Using MSI-X interrupts with %d vectors\n", msgs);
   2774 	adapter->num_queues = queues;
   2775 
   2776 	return (0);
   2777 } /* ixv_configure_interrupts */
   2778 
   2779 
   2780 /************************************************************************
   2781  * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
   2782  *
   2783  *   Done outside of interrupt context since the driver might sleep
   2784  ************************************************************************/
   2785 static void
   2786 ixv_handle_link(void *context)
   2787 {
   2788 	struct adapter *adapter = context;
   2789 
   2790 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   2791 	    &adapter->link_up, FALSE);
   2792 	ixv_update_link_status(adapter);
   2793 } /* ixv_handle_link */
   2794 
   2795 /************************************************************************
   2796  * ixv_check_link - Used in the local timer to poll for link changes
   2797  ************************************************************************/
   2798 static void
   2799 ixv_check_link(struct adapter *adapter)
   2800 {
   2801 	adapter->hw.mac.get_link_status = TRUE;
   2802 
   2803 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   2804 	    &adapter->link_up, FALSE);
   2805 	ixv_update_link_status(adapter);
   2806 } /* ixv_check_link */
   2807