Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.70
      1 /*$NetBSD: ixv.c,v 1.70 2017/10/13 04:52:40 msaitoh Exp $*/
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 320688 2017-07-05 17:27:03Z erj $*/
     36 
     37 
     38 #ifdef _KERNEL_OPT
     39 #include "opt_inet.h"
     40 #include "opt_inet6.h"
     41 #include "opt_net_mpsafe.h"
     42 #endif
     43 
     44 #include "ixgbe.h"
     45 #include "vlan.h"
     46 
     47 /************************************************************************
     48  * Driver version
     49  ************************************************************************/
     50 char ixv_driver_version[] = "1.5.13-k";
     51 
     52 /************************************************************************
     53  * PCI Device ID Table
     54  *
     55  *   Used by probe to select devices to load on
     56  *   Last field stores an index into ixv_strings
     57  *   Last entry must be all 0s
     58  *
     59  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     60  ************************************************************************/
     61 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
     62 {
     63 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     64 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     65 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     66 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     67 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
     68 	/* required last entry */
     69 	{0, 0, 0, 0, 0}
     70 };
     71 
     72 /************************************************************************
     73  * Table of branding strings
     74  ************************************************************************/
     75 static const char *ixv_strings[] = {
     76 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     77 };
     78 
     79 /*********************************************************************
     80  *  Function prototypes
     81  *********************************************************************/
     82 static int      ixv_probe(device_t, cfdata_t, void *);
     83 static void	ixv_attach(device_t, device_t, void *);
     84 static int      ixv_detach(device_t, int);
     85 #if 0
     86 static int      ixv_shutdown(device_t);
     87 #endif
     88 static int	ixv_ifflags_cb(struct ethercom *);
     89 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     90 static int	ixv_init(struct ifnet *);
     91 static void	ixv_init_locked(struct adapter *);
     92 static void	ixv_ifstop(struct ifnet *, int);
     93 static void     ixv_stop(void *);
     94 static void     ixv_init_device_features(struct adapter *);
     95 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     96 static int      ixv_media_change(struct ifnet *);
     97 static int      ixv_allocate_pci_resources(struct adapter *,
     98 		    const struct pci_attach_args *);
     99 static int      ixv_allocate_msix(struct adapter *,
    100 		    const struct pci_attach_args *);
    101 static int      ixv_configure_interrupts(struct adapter *);
    102 static void	ixv_free_pci_resources(struct adapter *);
    103 static void     ixv_local_timer(void *);
    104 static void     ixv_local_timer_locked(void *);
    105 static void     ixv_setup_interface(device_t, struct adapter *);
    106 static int      ixv_negotiate_api(struct adapter *);
    107 
    108 static void     ixv_initialize_transmit_units(struct adapter *);
    109 static void     ixv_initialize_receive_units(struct adapter *);
    110 static void     ixv_initialize_rss_mapping(struct adapter *);
    111 static void     ixv_check_link(struct adapter *);
    112 
    113 static void     ixv_enable_intr(struct adapter *);
    114 static void     ixv_disable_intr(struct adapter *);
    115 static void     ixv_set_multi(struct adapter *);
    116 static void     ixv_update_link_status(struct adapter *);
    117 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    118 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    119 static void	ixv_configure_ivars(struct adapter *);
    120 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    121 
    122 static void	ixv_setup_vlan_support(struct adapter *);
    123 #if 0
    124 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    125 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    126 #endif
    127 
    128 static void	ixv_add_device_sysctls(struct adapter *);
    129 static void	ixv_save_stats(struct adapter *);
    130 static void	ixv_init_stats(struct adapter *);
    131 static void	ixv_update_stats(struct adapter *);
    132 static void	ixv_add_stats_sysctls(struct adapter *);
    133 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    134 		    const char *, int *, int);
    135 
    136 /* The MSI-X Interrupt handlers */
    137 static int	ixv_msix_que(void *);
    138 static int	ixv_msix_mbx(void *);
    139 
    140 /* Deferred interrupt tasklets */
    141 static void	ixv_handle_que(void *);
    142 static void     ixv_handle_link(void *);
    143 
    144 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    145 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    146 
    147 /************************************************************************
    148  * FreeBSD Device Interface Entry Points
    149  ************************************************************************/
    150 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    151     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    152     DVF_DETACH_SHUTDOWN);
    153 
    154 #if 0
    155 static driver_t ixv_driver = {
    156 	"ixv", ixv_methods, sizeof(struct adapter),
    157 };
    158 
    159 devclass_t ixv_devclass;
    160 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    161 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    162 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    163 #endif
    164 
    165 /*
    166  * TUNEABLE PARAMETERS:
    167  */
    168 
    169 /* Number of Queues - do not exceed MSI-X vectors - 1 */
    170 static int ixv_num_queues = 0;
    171 #define	TUNABLE_INT(__x, __y)
    172 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    173 
    174 /*
    175  * AIM: Adaptive Interrupt Moderation
    176  * which means that the interrupt rate
    177  * is varied over time based on the
    178  * traffic for that interrupt vector
    179  */
    180 static bool ixv_enable_aim = false;
    181 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    182 
    183 /* How many packets rxeof tries to clean at a time */
    184 static int ixv_rx_process_limit = 256;
    185 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    186 
    187 /* How many packets txeof tries to clean at a time */
    188 static int ixv_tx_process_limit = 256;
    189 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    190 
    191 /*
    192  * Number of TX descriptors per ring,
    193  * setting higher than RX as this seems
    194  * the better performing choice.
    195  */
    196 static int ixv_txd = PERFORM_TXD;
    197 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    198 
    199 /* Number of RX descriptors per ring */
    200 static int ixv_rxd = PERFORM_RXD;
    201 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    202 
    203 /* Legacy Transmit (single queue) */
    204 static int ixv_enable_legacy_tx = 0;
    205 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
    206 
    207 #ifdef NET_MPSAFE
    208 #define IXGBE_MPSAFE		1
    209 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    210 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    211 #else
    212 #define IXGBE_CALLOUT_FLAGS	0
    213 #define IXGBE_SOFTINFT_FLAGS	0
    214 #endif
    215 
    216 #if 0
    217 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
    218 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
    219 #endif
    220 
    221 /************************************************************************
    222  * ixv_probe - Device identification routine
    223  *
    224  *   Determines if the driver should be loaded on
    225  *   adapter based on its PCI vendor/device ID.
    226  *
    227  *   return BUS_PROBE_DEFAULT on success, positive on failure
    228  ************************************************************************/
    229 static int
    230 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    231 {
    232 #ifdef __HAVE_PCI_MSI_MSIX
    233 	const struct pci_attach_args *pa = aux;
    234 
    235 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    236 #else
    237 	return 0;
    238 #endif
    239 } /* ixv_probe */
    240 
    241 static ixgbe_vendor_info_t *
    242 ixv_lookup(const struct pci_attach_args *pa)
    243 {
    244 	ixgbe_vendor_info_t *ent;
    245 	pcireg_t subid;
    246 
    247 	INIT_DEBUGOUT("ixv_lookup: begin");
    248 
    249 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    250 		return NULL;
    251 
    252 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    253 
    254 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    255 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    256 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    257 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    258 		     (ent->subvendor_id == 0)) &&
    259 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    260 		     (ent->subdevice_id == 0))) {
    261 			return ent;
    262 		}
    263 	}
    264 
    265 	return NULL;
    266 }
    267 
    268 /************************************************************************
    269  * ixv_attach - Device initialization routine
    270  *
    271  *   Called when the driver is being loaded.
    272  *   Identifies the type of hardware, allocates all resources
    273  *   and initializes the hardware.
    274  *
    275  *   return 0 on success, positive on failure
    276  ************************************************************************/
    277 static void
    278 ixv_attach(device_t parent, device_t dev, void *aux)
    279 {
    280 	struct adapter *adapter;
    281 	struct ixgbe_hw *hw;
    282 	int             error = 0;
    283 	pcireg_t	id, subid;
    284 	ixgbe_vendor_info_t *ent;
    285 	const struct pci_attach_args *pa = aux;
    286 	const char *apivstr;
    287 	const char *str;
    288 	char buf[256];
    289 
    290 	INIT_DEBUGOUT("ixv_attach: begin");
    291 
    292 	/*
    293 	 * Make sure BUSMASTER is set, on a VM under
    294 	 * KVM it may not be and will break things.
    295 	 */
    296 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    297 
    298 	/* Allocate, clear, and link in our adapter structure */
    299 	adapter = device_private(dev);
    300 	adapter->dev = dev;
    301 	adapter->hw.back = adapter;
    302 	hw = &adapter->hw;
    303 
    304 	adapter->init_locked = ixv_init_locked;
    305 	adapter->stop_locked = ixv_stop;
    306 
    307 	adapter->osdep.pc = pa->pa_pc;
    308 	adapter->osdep.tag = pa->pa_tag;
    309 	if (pci_dma64_available(pa))
    310 		adapter->osdep.dmat = pa->pa_dmat64;
    311 	else
    312 		adapter->osdep.dmat = pa->pa_dmat;
    313 	adapter->osdep.attached = false;
    314 
    315 	ent = ixv_lookup(pa);
    316 
    317 	KASSERT(ent != NULL);
    318 
    319 	aprint_normal(": %s, Version - %s\n",
    320 	    ixv_strings[ent->index], ixv_driver_version);
    321 
    322 	/* Core Lock Init*/
    323 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    324 
    325 	/* Do base PCI setup - map BAR0 */
    326 	if (ixv_allocate_pci_resources(adapter, pa)) {
    327 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    328 		error = ENXIO;
    329 		goto err_out;
    330 	}
    331 
    332 	/* SYSCTL APIs */
    333 	ixv_add_device_sysctls(adapter);
    334 
    335 	/* Set up the timer callout */
    336 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    337 
    338 	/* Save off the information about this board */
    339 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    340 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    341 	hw->vendor_id = PCI_VENDOR(id);
    342 	hw->device_id = PCI_PRODUCT(id);
    343 	hw->revision_id =
    344 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    345 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    346 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    347 
    348 	/* A subset of set_mac_type */
    349 	switch (hw->device_id) {
    350 	case IXGBE_DEV_ID_82599_VF:
    351 		hw->mac.type = ixgbe_mac_82599_vf;
    352 		str = "82599 VF";
    353 		break;
    354 	case IXGBE_DEV_ID_X540_VF:
    355 		hw->mac.type = ixgbe_mac_X540_vf;
    356 		str = "X540 VF";
    357 		break;
    358 	case IXGBE_DEV_ID_X550_VF:
    359 		hw->mac.type = ixgbe_mac_X550_vf;
    360 		str = "X550 VF";
    361 		break;
    362 	case IXGBE_DEV_ID_X550EM_X_VF:
    363 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
    364 		str = "X550EM X VF";
    365 		break;
    366 	case IXGBE_DEV_ID_X550EM_A_VF:
    367 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
    368 		str = "X550EM A VF";
    369 		break;
    370 	default:
    371 		/* Shouldn't get here since probe succeeded */
    372 		aprint_error_dev(dev, "Unknown device ID!\n");
    373 		error = ENXIO;
    374 		goto err_out;
    375 		break;
    376 	}
    377 	aprint_normal_dev(dev, "device %s\n", str);
    378 
    379 	ixv_init_device_features(adapter);
    380 
    381 	/* Initialize the shared code */
    382 	error = ixgbe_init_ops_vf(hw);
    383 	if (error) {
    384 		aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
    385 		error = EIO;
    386 		goto err_out;
    387 	}
    388 
    389 	/* Setup the mailbox */
    390 	ixgbe_init_mbx_params_vf(hw);
    391 
    392 	/* Set the right number of segments */
    393 	adapter->num_segs = IXGBE_82599_SCATTER;
    394 
    395 	/* Reset mbox api to 1.0 */
    396 	error = hw->mac.ops.reset_hw(hw);
    397 	if (error == IXGBE_ERR_RESET_FAILED)
    398 		aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
    399 	else if (error)
    400 		aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
    401 		    error);
    402 	if (error) {
    403 		error = EIO;
    404 		goto err_out;
    405 	}
    406 
    407 	error = hw->mac.ops.init_hw(hw);
    408 	if (error) {
    409 		aprint_error_dev(dev, "...init_hw() failed!\n");
    410 		error = EIO;
    411 		goto err_out;
    412 	}
    413 
    414 	/* Negotiate mailbox API version */
    415 	error = ixv_negotiate_api(adapter);
    416 	if (error)
    417 		aprint_normal_dev(dev,
    418 		    "MBX API negotiation failed during attach!\n");
    419 	switch (hw->api_version) {
    420 	case ixgbe_mbox_api_10:
    421 		apivstr = "1.0";
    422 		break;
    423 	case ixgbe_mbox_api_20:
    424 		apivstr = "2.0";
    425 		break;
    426 	case ixgbe_mbox_api_11:
    427 		apivstr = "1.1";
    428 		break;
    429 	case ixgbe_mbox_api_12:
    430 		apivstr = "1.2";
    431 		break;
    432 	case ixgbe_mbox_api_13:
    433 		apivstr = "1.3";
    434 		break;
    435 	default:
    436 		apivstr = "unknown";
    437 		break;
    438 	}
    439 	aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
    440 
    441 	/* If no mac address was assigned, make a random one */
    442 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    443 		u8 addr[ETHER_ADDR_LEN];
    444 		uint64_t rndval = cprng_strong64();
    445 
    446 		memcpy(addr, &rndval, sizeof(addr));
    447 		addr[0] &= 0xFE;
    448 		addr[0] |= 0x02;
    449 		bcopy(addr, hw->mac.addr, sizeof(addr));
    450 	}
    451 
    452 	/* Register for VLAN events */
    453 #if 0 /* XXX delete after write? */
    454 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    455 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    456 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    457 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    458 #endif
    459 
    460 	/* Sysctls for limiting the amount of work done in the taskqueues */
    461 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    462 	    "max number of rx packets to process",
    463 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    464 
    465 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    466 	    "max number of tx packets to process",
    467 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    468 
    469 	/* Do descriptor calc and sanity checks */
    470 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    471 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    472 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    473 		adapter->num_tx_desc = DEFAULT_TXD;
    474 	} else
    475 		adapter->num_tx_desc = ixv_txd;
    476 
    477 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    478 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    479 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    480 		adapter->num_rx_desc = DEFAULT_RXD;
    481 	} else
    482 		adapter->num_rx_desc = ixv_rxd;
    483 
    484 	/* Setup MSI-X */
    485 	error = ixv_configure_interrupts(adapter);
    486 	if (error)
    487 		goto err_out;
    488 
    489 	/* Allocate our TX/RX Queues */
    490 	if (ixgbe_allocate_queues(adapter)) {
    491 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    492 		error = ENOMEM;
    493 		goto err_out;
    494 	}
    495 
    496 	/* hw.ix defaults init */
    497 	adapter->enable_aim = ixv_enable_aim;
    498 
    499 	/* Setup OS specific network interface */
    500 	ixv_setup_interface(dev, adapter);
    501 
    502 	error = ixv_allocate_msix(adapter, pa);
    503 	if (error) {
    504 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    505 		goto err_late;
    506 	}
    507 
    508 	/* Do the stats setup */
    509 	ixv_save_stats(adapter);
    510 	ixv_init_stats(adapter);
    511 	ixv_add_stats_sysctls(adapter);
    512 
    513 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    514 		ixgbe_netmap_attach(adapter);
    515 
    516 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
    517 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
    518 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
    519 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
    520 
    521 	INIT_DEBUGOUT("ixv_attach: end");
    522 	adapter->osdep.attached = true;
    523 
    524 	return;
    525 
    526 err_late:
    527 	ixgbe_free_transmit_structures(adapter);
    528 	ixgbe_free_receive_structures(adapter);
    529 	free(adapter->queues, M_DEVBUF);
    530 err_out:
    531 	ixv_free_pci_resources(adapter);
    532 	IXGBE_CORE_LOCK_DESTROY(adapter);
    533 
    534 	return;
    535 } /* ixv_attach */
    536 
    537 /************************************************************************
    538  * ixv_detach - Device removal routine
    539  *
    540  *   Called when the driver is being removed.
    541  *   Stops the adapter and deallocates all the resources
    542  *   that were allocated for driver operation.
    543  *
    544  *   return 0 on success, positive on failure
    545  ************************************************************************/
    546 static int
    547 ixv_detach(device_t dev, int flags)
    548 {
    549 	struct adapter  *adapter = device_private(dev);
    550 	struct ixgbe_hw *hw = &adapter->hw;
    551 	struct ix_queue *que = adapter->queues;
    552 	struct tx_ring *txr = adapter->tx_rings;
    553 	struct rx_ring *rxr = adapter->rx_rings;
    554 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
    555 
    556 	INIT_DEBUGOUT("ixv_detach: begin");
    557 	if (adapter->osdep.attached == false)
    558 		return 0;
    559 
    560 	/* Stop the interface. Callouts are stopped in it. */
    561 	ixv_ifstop(adapter->ifp, 1);
    562 
    563 #if NVLAN > 0
    564 	/* Make sure VLANs are not using driver */
    565 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    566 		;	/* nothing to do: no VLANs */
    567 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    568 		vlan_ifdetach(adapter->ifp);
    569 	else {
    570 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    571 		return EBUSY;
    572 	}
    573 #endif
    574 
    575 	IXGBE_CORE_LOCK(adapter);
    576 	ixv_stop(adapter);
    577 	IXGBE_CORE_UNLOCK(adapter);
    578 
    579 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
    580 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
    581 			softint_disestablish(txr->txr_si);
    582 		softint_disestablish(que->que_si);
    583 	}
    584 
    585 	/* Drain the Mailbox(link) queue */
    586 	softint_disestablish(adapter->link_si);
    587 
    588 	/* Unregister VLAN events */
    589 #if 0 /* XXX msaitoh delete after write? */
    590 	if (adapter->vlan_attach != NULL)
    591 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    592 	if (adapter->vlan_detach != NULL)
    593 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    594 #endif
    595 
    596 	ether_ifdetach(adapter->ifp);
    597 	callout_halt(&adapter->timer, NULL);
    598 
    599 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    600 		netmap_detach(adapter->ifp);
    601 
    602 	ixv_free_pci_resources(adapter);
    603 #if 0 /* XXX the NetBSD port is probably missing something here */
    604 	bus_generic_detach(dev);
    605 #endif
    606 	if_detach(adapter->ifp);
    607 	if_percpuq_destroy(adapter->ipq);
    608 
    609 	sysctl_teardown(&adapter->sysctllog);
    610 	evcnt_detach(&adapter->handleq);
    611 	evcnt_detach(&adapter->req);
    612 	evcnt_detach(&adapter->efbig_tx_dma_setup);
    613 	evcnt_detach(&adapter->mbuf_defrag_failed);
    614 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
    615 	evcnt_detach(&adapter->einval_tx_dma_setup);
    616 	evcnt_detach(&adapter->other_tx_dma_setup);
    617 	evcnt_detach(&adapter->eagain_tx_dma_setup);
    618 	evcnt_detach(&adapter->enomem_tx_dma_setup);
    619 	evcnt_detach(&adapter->watchdog_events);
    620 	evcnt_detach(&adapter->tso_err);
    621 	evcnt_detach(&adapter->link_irq);
    622 
    623 	txr = adapter->tx_rings;
    624 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
    625 		evcnt_detach(&adapter->queues[i].irqs);
    626 		evcnt_detach(&txr->no_desc_avail);
    627 		evcnt_detach(&txr->total_packets);
    628 		evcnt_detach(&txr->tso_tx);
    629 #ifndef IXGBE_LEGACY_TX
    630 		evcnt_detach(&txr->pcq_drops);
    631 #endif
    632 
    633 		evcnt_detach(&rxr->rx_packets);
    634 		evcnt_detach(&rxr->rx_bytes);
    635 		evcnt_detach(&rxr->rx_copies);
    636 		evcnt_detach(&rxr->no_jmbuf);
    637 		evcnt_detach(&rxr->rx_discarded);
    638 	}
    639 	evcnt_detach(&stats->ipcs);
    640 	evcnt_detach(&stats->l4cs);
    641 	evcnt_detach(&stats->ipcs_bad);
    642 	evcnt_detach(&stats->l4cs_bad);
    643 
    644 	/* Packet Reception Stats */
    645 	evcnt_detach(&stats->vfgorc);
    646 	evcnt_detach(&stats->vfgprc);
    647 	evcnt_detach(&stats->vfmprc);
    648 
    649 	/* Packet Transmission Stats */
    650 	evcnt_detach(&stats->vfgotc);
    651 	evcnt_detach(&stats->vfgptc);
    652 
    653 	/* Mailbox Stats */
    654 	evcnt_detach(&hw->mbx.stats.msgs_tx);
    655 	evcnt_detach(&hw->mbx.stats.msgs_rx);
    656 	evcnt_detach(&hw->mbx.stats.acks);
    657 	evcnt_detach(&hw->mbx.stats.reqs);
    658 	evcnt_detach(&hw->mbx.stats.rsts);
    659 
    660 	ixgbe_free_transmit_structures(adapter);
    661 	ixgbe_free_receive_structures(adapter);
    662 	free(adapter->queues, M_DEVBUF);
    663 
    664 	IXGBE_CORE_LOCK_DESTROY(adapter);
    665 
    666 	return (0);
    667 } /* ixv_detach */
    668 
    669 /************************************************************************
    670  * ixv_init_locked - Init entry point
    671  *
    672  *   Used in two ways: It is used by the stack as an init entry
    673  *   point in network interface structure. It is also used
    674  *   by the driver as a hw/sw initialization routine to get
    675  *   to a consistent state.
    676  *
    677  *   return 0 on success, positive on failure
    678  ************************************************************************/
    679 static void
    680 ixv_init_locked(struct adapter *adapter)
    681 {
    682 	struct ifnet	*ifp = adapter->ifp;
    683 	device_t 	dev = adapter->dev;
    684 	struct ixgbe_hw *hw = &adapter->hw;
    685 	struct ix_queue	*que = adapter->queues;
    686 	int             error = 0;
    687 	uint32_t mask;
    688 	int i;
    689 
    690 	INIT_DEBUGOUT("ixv_init_locked: begin");
    691 	KASSERT(mutex_owned(&adapter->core_mtx));
    692 	hw->adapter_stopped = FALSE;
    693 	hw->mac.ops.stop_adapter(hw);
    694 	callout_stop(&adapter->timer);
    695 
    696 	/* reprogram the RAR[0] in case user changed it. */
    697 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    698 
    699 	/* Get the latest mac address, User can use a LAA */
    700 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    701 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    702 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
    703 
    704 	/* Prepare transmit descriptors and buffers */
    705 	if (ixgbe_setup_transmit_structures(adapter)) {
    706 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    707 		ixv_stop(adapter);
    708 		return;
    709 	}
    710 
    711 	/* Reset VF and renegotiate mailbox API version */
    712 	hw->mac.ops.reset_hw(hw);
    713 	error = ixv_negotiate_api(adapter);
    714 	if (error)
    715 		device_printf(dev,
    716 		    "Mailbox API negotiation failed in init_locked!\n");
    717 
    718 	ixv_initialize_transmit_units(adapter);
    719 
    720 	/* Setup Multicast table */
    721 	ixv_set_multi(adapter);
    722 
    723 	/*
    724 	 * Determine the correct mbuf pool
    725 	 * for doing jumbo/headersplit
    726 	 */
    727 	if (ifp->if_mtu > ETHERMTU)
    728 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    729 	else
    730 		adapter->rx_mbuf_sz = MCLBYTES;
    731 
    732 	/* Prepare receive descriptors and buffers */
    733 	if (ixgbe_setup_receive_structures(adapter)) {
    734 		device_printf(dev, "Could not setup receive structures\n");
    735 		ixv_stop(adapter);
    736 		return;
    737 	}
    738 
    739 	/* Configure RX settings */
    740 	ixv_initialize_receive_units(adapter);
    741 
    742 #if 0 /* XXX isn't it required? -- msaitoh  */
    743 	/* Set the various hardware offload abilities */
    744 	ifp->if_hwassist = 0;
    745 	if (ifp->if_capenable & IFCAP_TSO4)
    746 		ifp->if_hwassist |= CSUM_TSO;
    747 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    748 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    749 #if __FreeBSD_version >= 800000
    750 		ifp->if_hwassist |= CSUM_SCTP;
    751 #endif
    752 	}
    753 #endif
    754 
    755 	/* Set up VLAN offload and filter */
    756 	ixv_setup_vlan_support(adapter);
    757 
    758 	/* Set up MSI-X routing */
    759 	ixv_configure_ivars(adapter);
    760 
    761 	/* Set up auto-mask */
    762 	mask = (1 << adapter->vector);
    763 	for (i = 0; i < adapter->num_queues; i++, que++)
    764 		mask |= (1 << que->msix);
    765 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
    766 
    767 	/* Set moderation on the Link interrupt */
    768 	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
    769 
    770 	/* Stats init */
    771 	ixv_init_stats(adapter);
    772 
    773 	/* Config/Enable Link */
    774 	hw->mac.get_link_status = TRUE;
    775 	hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
    776 	    FALSE);
    777 
    778 	/* Start watchdog */
    779 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    780 
    781 	/* And now turn on interrupts */
    782 	ixv_enable_intr(adapter);
    783 
    784 	/* Now inform the stack we're ready */
    785 	ifp->if_flags |= IFF_RUNNING;
    786 	ifp->if_flags &= ~IFF_OACTIVE;
    787 
    788 	return;
    789 } /* ixv_init_locked */
    790 
    791 /*
    792  * MSI-X Interrupt Handlers and Tasklets
    793  */
    794 
    795 static inline void
    796 ixv_enable_queue(struct adapter *adapter, u32 vector)
    797 {
    798 	struct ixgbe_hw *hw = &adapter->hw;
    799 	u32             queue = 1 << vector;
    800 	u32             mask;
    801 
    802 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    803 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    804 } /* ixv_enable_queue */
    805 
    806 static inline void
    807 ixv_disable_queue(struct adapter *adapter, u32 vector)
    808 {
    809 	struct ixgbe_hw *hw = &adapter->hw;
    810 	u64             queue = (u64)(1 << vector);
    811 	u32             mask;
    812 
    813 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    814 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    815 } /* ixv_disable_queue */
    816 
    817 static inline void
    818 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    819 {
    820 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    821 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    822 } /* ixv_rearm_queues */
    823 
    824 
    825 /************************************************************************
    826  * ixv_msix_que - MSI Queue Interrupt Service routine
    827  ************************************************************************/
    828 static int
    829 ixv_msix_que(void *arg)
    830 {
    831 	struct ix_queue	*que = arg;
    832 	struct adapter  *adapter = que->adapter;
    833 	struct tx_ring	*txr = que->txr;
    834 	struct rx_ring	*rxr = que->rxr;
    835 	bool		more;
    836 	u32		newitr = 0;
    837 
    838 	ixv_disable_queue(adapter, que->msix);
    839 	++que->irqs.ev_count;
    840 
    841 #ifdef __NetBSD__
    842 	/* Don't run ixgbe_rxeof in interrupt context */
    843 	more = true;
    844 #else
    845 	more = ixgbe_rxeof(que);
    846 #endif
    847 
    848 	IXGBE_TX_LOCK(txr);
    849 	ixgbe_txeof(txr);
    850 	IXGBE_TX_UNLOCK(txr);
    851 
    852 	/* Do AIM now? */
    853 
    854 	if (adapter->enable_aim == false)
    855 		goto no_calc;
    856 	/*
    857 	 * Do Adaptive Interrupt Moderation:
    858 	 *  - Write out last calculated setting
    859 	 *  - Calculate based on average size over
    860 	 *    the last interval.
    861 	 */
    862 	if (que->eitr_setting)
    863 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
    864 		    que->eitr_setting);
    865 
    866 	que->eitr_setting = 0;
    867 
    868 	/* Idle, do nothing */
    869 	if ((txr->bytes == 0) && (rxr->bytes == 0))
    870 		goto no_calc;
    871 
    872 	if ((txr->bytes) && (txr->packets))
    873 		newitr = txr->bytes/txr->packets;
    874 	if ((rxr->bytes) && (rxr->packets))
    875 		newitr = max(newitr, (rxr->bytes / rxr->packets));
    876 	newitr += 24; /* account for hardware frame, crc */
    877 
    878 	/* set an upper boundary */
    879 	newitr = min(newitr, 3000);
    880 
    881 	/* Be nice to the mid range */
    882 	if ((newitr > 300) && (newitr < 1200))
    883 		newitr = (newitr / 3);
    884 	else
    885 		newitr = (newitr / 2);
    886 
    887 	newitr |= newitr << 16;
    888 
    889 	/* save for next interrupt */
    890 	que->eitr_setting = newitr;
    891 
    892 	/* Reset state */
    893 	txr->bytes = 0;
    894 	txr->packets = 0;
    895 	rxr->bytes = 0;
    896 	rxr->packets = 0;
    897 
    898 no_calc:
    899 	if (more)
    900 		softint_schedule(que->que_si);
    901 	else /* Re-enable this interrupt */
    902 		ixv_enable_queue(adapter, que->msix);
    903 
    904 	return 1;
    905 } /* ixv_msix_que */
    906 
    907 /************************************************************************
    908  * ixv_msix_mbx
    909  ************************************************************************/
    910 static int
    911 ixv_msix_mbx(void *arg)
    912 {
    913 	struct adapter	*adapter = arg;
    914 	struct ixgbe_hw *hw = &adapter->hw;
    915 
    916 	++adapter->link_irq.ev_count;
    917 	/* NetBSD: We use auto-clear, so it's not required to write VTEICR */
    918 
    919 	/* Link status change */
    920 	hw->mac.get_link_status = TRUE;
    921 	softint_schedule(adapter->link_si);
    922 
    923 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
    924 
    925 	return 1;
    926 } /* ixv_msix_mbx */
    927 
    928 /************************************************************************
    929  * ixv_media_status - Media Ioctl callback
    930  *
    931  *   Called whenever the user queries the status of
    932  *   the interface using ifconfig.
    933  ************************************************************************/
    934 static void
    935 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
    936 {
    937 	struct adapter *adapter = ifp->if_softc;
    938 
    939 	INIT_DEBUGOUT("ixv_media_status: begin");
    940 	IXGBE_CORE_LOCK(adapter);
    941 	ixv_update_link_status(adapter);
    942 
    943 	ifmr->ifm_status = IFM_AVALID;
    944 	ifmr->ifm_active = IFM_ETHER;
    945 
    946 	if (!adapter->link_active) {
    947 		ifmr->ifm_active |= IFM_NONE;
    948 		IXGBE_CORE_UNLOCK(adapter);
    949 		return;
    950 	}
    951 
    952 	ifmr->ifm_status |= IFM_ACTIVE;
    953 
    954 	switch (adapter->link_speed) {
    955 		case IXGBE_LINK_SPEED_10GB_FULL:
    956 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
    957 			break;
    958 		case IXGBE_LINK_SPEED_1GB_FULL:
    959 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
    960 			break;
    961 		case IXGBE_LINK_SPEED_100_FULL:
    962 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
    963 			break;
    964 		case IXGBE_LINK_SPEED_10_FULL:
    965 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
    966 			break;
    967 	}
    968 
    969 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
    970 
    971 	IXGBE_CORE_UNLOCK(adapter);
    972 
    973 	return;
    974 } /* ixv_media_status */
    975 
    976 /************************************************************************
    977  * ixv_media_change - Media Ioctl callback
    978  *
    979  *   Called when the user changes speed/duplex using
    980  *   media/mediopt option with ifconfig.
    981  ************************************************************************/
    982 static int
    983 ixv_media_change(struct ifnet *ifp)
    984 {
    985 	struct adapter *adapter = ifp->if_softc;
    986 	struct ifmedia *ifm = &adapter->media;
    987 
    988 	INIT_DEBUGOUT("ixv_media_change: begin");
    989 
    990 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    991 		return (EINVAL);
    992 
    993 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
    994 	case IFM_AUTO:
    995 		break;
    996 	default:
    997 		device_printf(adapter->dev, "Only auto media type\n");
    998 		return (EINVAL);
    999 	}
   1000 
   1001 	return (0);
   1002 } /* ixv_media_change */
   1003 
   1004 
   1005 /************************************************************************
   1006  * ixv_negotiate_api
   1007  *
   1008  *   Negotiate the Mailbox API with the PF;
   1009  *   start with the most featured API first.
   1010  ************************************************************************/
   1011 static int
   1012 ixv_negotiate_api(struct adapter *adapter)
   1013 {
   1014 	struct ixgbe_hw *hw = &adapter->hw;
   1015 	int             mbx_api[] = { ixgbe_mbox_api_11,
   1016 	                              ixgbe_mbox_api_10,
   1017 	                              ixgbe_mbox_api_unknown };
   1018 	int             i = 0;
   1019 
   1020 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
   1021 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
   1022 			return (0);
   1023 		i++;
   1024 	}
   1025 
   1026 	return (EINVAL);
   1027 } /* ixv_negotiate_api */
   1028 
   1029 
   1030 /************************************************************************
   1031  * ixv_set_multi - Multicast Update
   1032  *
   1033  *   Called whenever multicast address list is updated.
   1034  ************************************************************************/
   1035 static void
   1036 ixv_set_multi(struct adapter *adapter)
   1037 {
   1038 	struct ether_multi *enm;
   1039 	struct ether_multistep step;
   1040 	struct ethercom *ec = &adapter->osdep.ec;
   1041 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1042 	u8                 *update_ptr;
   1043 	int                mcnt = 0;
   1044 
   1045 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1046 
   1047 	ETHER_FIRST_MULTI(step, ec, enm);
   1048 	while (enm != NULL) {
   1049 		bcopy(enm->enm_addrlo,
   1050 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1051 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1052 		mcnt++;
   1053 		/* XXX This might be required --msaitoh */
   1054 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1055 			break;
   1056 		ETHER_NEXT_MULTI(step, enm);
   1057 	}
   1058 
   1059 	update_ptr = mta;
   1060 
   1061 	adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   1062 	    ixv_mc_array_itr, TRUE);
   1063 
   1064 	return;
   1065 } /* ixv_set_multi */
   1066 
   1067 /************************************************************************
   1068  * ixv_mc_array_itr
   1069  *
   1070  *   An iterator function needed by the multicast shared code.
   1071  *   It feeds the shared code routine the addresses in the
   1072  *   array of ixv_set_multi() one by one.
   1073  ************************************************************************/
   1074 static u8 *
   1075 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1076 {
   1077 	u8 *addr = *update_ptr;
   1078 	u8 *newptr;
   1079 	*vmdq = 0;
   1080 
   1081 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1082 	*update_ptr = newptr;
   1083 
   1084 	return addr;
   1085 } /* ixv_mc_array_itr */
   1086 
   1087 /************************************************************************
   1088  * ixv_local_timer - Timer routine
   1089  *
   1090  *   Checks for link status, updates statistics,
   1091  *   and runs the watchdog check.
   1092  ************************************************************************/
   1093 static void
   1094 ixv_local_timer(void *arg)
   1095 {
   1096 	struct adapter *adapter = arg;
   1097 
   1098 	IXGBE_CORE_LOCK(adapter);
   1099 	ixv_local_timer_locked(adapter);
   1100 	IXGBE_CORE_UNLOCK(adapter);
   1101 }
   1102 
   1103 static void
   1104 ixv_local_timer_locked(void *arg)
   1105 {
   1106 	struct adapter	*adapter = arg;
   1107 	device_t	dev = adapter->dev;
   1108 	struct ix_queue	*que = adapter->queues;
   1109 	u64		queues = 0;
   1110 	int		hung = 0;
   1111 
   1112 	KASSERT(mutex_owned(&adapter->core_mtx));
   1113 
   1114 	ixv_check_link(adapter);
   1115 
   1116 	/* Stats Update */
   1117 	ixv_update_stats(adapter);
   1118 
   1119 	/*
   1120 	 * Check the TX queues status
   1121 	 *      - mark hung queues so we don't schedule on them
   1122 	 *      - watchdog only if all queues show hung
   1123 	 */
   1124 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1125 		/* Keep track of queues with work for soft irq */
   1126 		if (que->txr->busy)
   1127 			queues |= ((u64)1 << que->me);
   1128 		/*
   1129 		 * Each time txeof runs without cleaning, but there
   1130 		 * are uncleaned descriptors it increments busy. If
   1131 		 * we get to the MAX we declare it hung.
   1132 		 */
   1133 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1134 			++hung;
   1135 			/* Mark the queue as inactive */
   1136 			adapter->active_queues &= ~((u64)1 << que->me);
   1137 			continue;
   1138 		} else {
   1139 			/* Check if we've come back from hung */
   1140 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1141 				adapter->active_queues |= ((u64)1 << que->me);
   1142 		}
   1143 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1144 			device_printf(dev,
   1145 			    "Warning queue %d appears to be hung!\n", i);
   1146 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1147 			++hung;
   1148 		}
   1149 	}
   1150 
   1151 	/* Only truly watchdog if all queues show hung */
   1152 	if (hung == adapter->num_queues)
   1153 		goto watchdog;
   1154 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1155 		ixv_rearm_queues(adapter, queues);
   1156 	}
   1157 
   1158 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1159 
   1160 	return;
   1161 
   1162 watchdog:
   1163 
   1164 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1165 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1166 	adapter->watchdog_events.ev_count++;
   1167 	ixv_init_locked(adapter);
   1168 } /* ixv_local_timer */
   1169 
   1170 /************************************************************************
   1171  * ixv_update_link_status - Update OS on link state
   1172  *
   1173  * Note: Only updates the OS on the cached link state.
   1174  *       The real check of the hardware only happens with
   1175  *       a link interrupt.
   1176  ************************************************************************/
   1177 static void
   1178 ixv_update_link_status(struct adapter *adapter)
   1179 {
   1180 	struct ifnet *ifp = adapter->ifp;
   1181 	device_t     dev = adapter->dev;
   1182 
   1183 	if (adapter->link_up) {
   1184 		if (adapter->link_active == FALSE) {
   1185 			if (bootverbose) {
   1186 				const char *bpsmsg;
   1187 
   1188 				switch (adapter->link_speed) {
   1189 				case IXGBE_LINK_SPEED_10GB_FULL:
   1190 					bpsmsg = "10 Gbps";
   1191 					break;
   1192 				case IXGBE_LINK_SPEED_5GB_FULL:
   1193 					bpsmsg = "5 Gbps";
   1194 					break;
   1195 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   1196 					bpsmsg = "2.5 Gbps";
   1197 					break;
   1198 				case IXGBE_LINK_SPEED_1GB_FULL:
   1199 					bpsmsg = "1 Gbps";
   1200 					break;
   1201 				case IXGBE_LINK_SPEED_100_FULL:
   1202 					bpsmsg = "100 Mbps";
   1203 					break;
   1204 				case IXGBE_LINK_SPEED_10_FULL:
   1205 					bpsmsg = "10 Mbps";
   1206 					break;
   1207 				default:
   1208 					bpsmsg = "unknown speed";
   1209 					break;
   1210 				}
   1211 				device_printf(dev, "Link is up %s %s \n",
   1212 				    bpsmsg, "Full Duplex");
   1213 			}
   1214 			adapter->link_active = TRUE;
   1215 			if_link_state_change(ifp, LINK_STATE_UP);
   1216 		}
   1217 	} else { /* Link down */
   1218 		if (adapter->link_active == TRUE) {
   1219 			if (bootverbose)
   1220 				device_printf(dev, "Link is Down\n");
   1221 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1222 			adapter->link_active = FALSE;
   1223 		}
   1224 	}
   1225 
   1226 	return;
   1227 } /* ixv_update_link_status */
   1228 
   1229 
   1230 /************************************************************************
   1231  * ixv_stop - Stop the hardware
   1232  *
   1233  *   Disables all traffic on the adapter by issuing a
   1234  *   global reset on the MAC and deallocates TX/RX buffers.
   1235  ************************************************************************/
   1236 static void
   1237 ixv_ifstop(struct ifnet *ifp, int disable)
   1238 {
   1239 	struct adapter *adapter = ifp->if_softc;
   1240 
   1241 	IXGBE_CORE_LOCK(adapter);
   1242 	ixv_stop(adapter);
   1243 	IXGBE_CORE_UNLOCK(adapter);
   1244 }
   1245 
   1246 static void
   1247 ixv_stop(void *arg)
   1248 {
   1249 	struct ifnet    *ifp;
   1250 	struct adapter  *adapter = arg;
   1251 	struct ixgbe_hw *hw = &adapter->hw;
   1252 
   1253 	ifp = adapter->ifp;
   1254 
   1255 	KASSERT(mutex_owned(&adapter->core_mtx));
   1256 
   1257 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1258 	ixv_disable_intr(adapter);
   1259 
   1260 	/* Tell the stack that the interface is no longer active */
   1261 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1262 
   1263 	hw->mac.ops.reset_hw(hw);
   1264 	adapter->hw.adapter_stopped = FALSE;
   1265 	hw->mac.ops.stop_adapter(hw);
   1266 	callout_stop(&adapter->timer);
   1267 
   1268 	/* reprogram the RAR[0] in case user changed it. */
   1269 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1270 
   1271 	return;
   1272 } /* ixv_stop */
   1273 
   1274 
   1275 /************************************************************************
   1276  * ixv_allocate_pci_resources
   1277  ************************************************************************/
   1278 static int
   1279 ixv_allocate_pci_resources(struct adapter *adapter,
   1280     const struct pci_attach_args *pa)
   1281 {
   1282 	pcireg_t	memtype;
   1283 	device_t        dev = adapter->dev;
   1284 	bus_addr_t addr;
   1285 	int flags;
   1286 
   1287 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1288 	switch (memtype) {
   1289 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1290 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1291 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1292 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1293 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1294 			goto map_err;
   1295 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1296 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1297 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1298 		}
   1299 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1300 		     adapter->osdep.mem_size, flags,
   1301 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1302 map_err:
   1303 			adapter->osdep.mem_size = 0;
   1304 			aprint_error_dev(dev, "unable to map BAR0\n");
   1305 			return ENXIO;
   1306 		}
   1307 		break;
   1308 	default:
   1309 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1310 		return ENXIO;
   1311 	}
   1312 
   1313 	/* Pick up the tuneable queues */
   1314 	adapter->num_queues = ixv_num_queues;
   1315 
   1316 	return (0);
   1317 } /* ixv_allocate_pci_resources */
   1318 
   1319 /************************************************************************
   1320  * ixv_free_pci_resources
   1321  ************************************************************************/
   1322 static void
   1323 ixv_free_pci_resources(struct adapter * adapter)
   1324 {
   1325 	struct 		ix_queue *que = adapter->queues;
   1326 	int		rid;
   1327 
   1328 	/*
   1329 	 *  Release all msix queue resources:
   1330 	 */
   1331 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1332 		if (que->res != NULL)
   1333 			pci_intr_disestablish(adapter->osdep.pc,
   1334 			    adapter->osdep.ihs[i]);
   1335 	}
   1336 
   1337 
   1338 	/* Clean the Mailbox interrupt last */
   1339 	rid = adapter->vector;
   1340 
   1341 	if (adapter->osdep.ihs[rid] != NULL) {
   1342 		pci_intr_disestablish(adapter->osdep.pc,
   1343 		    adapter->osdep.ihs[rid]);
   1344 		adapter->osdep.ihs[rid] = NULL;
   1345 	}
   1346 
   1347 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1348 	    adapter->osdep.nintrs);
   1349 
   1350 	if (adapter->osdep.mem_size != 0) {
   1351 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1352 		    adapter->osdep.mem_bus_space_handle,
   1353 		    adapter->osdep.mem_size);
   1354 	}
   1355 
   1356 	return;
   1357 } /* ixv_free_pci_resources */
   1358 
   1359 /************************************************************************
   1360  * ixv_setup_interface
   1361  *
   1362  *   Setup networking device structure and register an interface.
   1363  ************************************************************************/
   1364 static void
   1365 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1366 {
   1367 	struct ethercom *ec = &adapter->osdep.ec;
   1368 	struct ifnet   *ifp;
   1369 
   1370 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1371 
   1372 	ifp = adapter->ifp = &ec->ec_if;
   1373 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1374 	ifp->if_baudrate = IF_Gbps(10);
   1375 	ifp->if_init = ixv_init;
   1376 	ifp->if_stop = ixv_ifstop;
   1377 	ifp->if_softc = adapter;
   1378 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1379 #ifdef IXGBE_MPSAFE
   1380 	ifp->if_extflags = IFEF_START_MPSAFE;
   1381 #endif
   1382 	ifp->if_ioctl = ixv_ioctl;
   1383 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1384 #if 0
   1385 		ixv_start_locked = ixgbe_legacy_start_locked;
   1386 #endif
   1387 	} else {
   1388 		ifp->if_transmit = ixgbe_mq_start;
   1389 #if 0
   1390 		ixv_start_locked = ixgbe_mq_start_locked;
   1391 #endif
   1392 	}
   1393 	ifp->if_start = ixgbe_legacy_start;
   1394 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1395 	IFQ_SET_READY(&ifp->if_snd);
   1396 
   1397 	if_initialize(ifp);
   1398 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1399 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1400 	/*
   1401 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1402 	 * used.
   1403 	 */
   1404 	if_register(ifp);
   1405 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1406 
   1407 	adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
   1408 
   1409 	/*
   1410 	 * Tell the upper layer(s) we support long frames.
   1411 	 */
   1412 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1413 
   1414 	/* Set capability flags */
   1415 	ifp->if_capabilities |= IFCAP_HWCSUM
   1416 	                     |  IFCAP_TSOv4
   1417 	                     |  IFCAP_TSOv6;
   1418 	ifp->if_capenable = 0;
   1419 
   1420 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1421 			    |  ETHERCAP_VLAN_HWCSUM
   1422 			    |  ETHERCAP_JUMBO_MTU
   1423 			    |  ETHERCAP_VLAN_MTU;
   1424 
   1425 	/* Enable the above capabilities by default */
   1426 	ec->ec_capenable = ec->ec_capabilities;
   1427 
   1428 	/* Don't enable LRO by default */
   1429 	ifp->if_capabilities |= IFCAP_LRO;
   1430 #if 0
   1431 	ifp->if_capenable = ifp->if_capabilities;
   1432 #endif
   1433 
   1434 	/*
   1435 	 * Specify the media types supported by this adapter and register
   1436 	 * callbacks to update media and link information
   1437 	 */
   1438 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1439 	    ixv_media_status);
   1440 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1441 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1442 
   1443 	return;
   1444 } /* ixv_setup_interface */
   1445 
   1446 
   1447 /************************************************************************
   1448  * ixv_initialize_transmit_units - Enable transmit unit.
   1449  ************************************************************************/
   1450 static void
   1451 ixv_initialize_transmit_units(struct adapter *adapter)
   1452 {
   1453 	struct tx_ring	*txr = adapter->tx_rings;
   1454 	struct ixgbe_hw	*hw = &adapter->hw;
   1455 
   1456 
   1457 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1458 		u64 tdba = txr->txdma.dma_paddr;
   1459 		u32 txctrl, txdctl;
   1460 
   1461 		/* Set WTHRESH to 8, burst writeback */
   1462 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1463 		txdctl |= (8 << 16);
   1464 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1465 
   1466 		/* Set the HW Tx Head and Tail indices */
   1467 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   1468 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   1469 
   1470 		/* Set Tx Tail register */
   1471 		txr->tail = IXGBE_VFTDT(i);
   1472 
   1473 		/* Set Ring parameters */
   1474 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   1475 		    (tdba & 0x00000000ffffffffULL));
   1476 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   1477 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   1478 		    adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
   1479 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   1480 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1481 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   1482 
   1483 		/* Now enable */
   1484 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1485 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1486 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1487 	}
   1488 
   1489 	return;
   1490 } /* ixv_initialize_transmit_units */
   1491 
   1492 
   1493 /************************************************************************
   1494  * ixv_initialize_rss_mapping
   1495  ************************************************************************/
   1496 static void
   1497 ixv_initialize_rss_mapping(struct adapter *adapter)
   1498 {
   1499 	struct ixgbe_hw *hw = &adapter->hw;
   1500 	u32             reta = 0, mrqc, rss_key[10];
   1501 	int             queue_id;
   1502 	int             i, j;
   1503 	u32             rss_hash_config;
   1504 
   1505 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1506 		/* Fetch the configured RSS key */
   1507 		rss_getkey((uint8_t *)&rss_key);
   1508 	} else {
   1509 		/* set up random bits */
   1510 		cprng_fast(&rss_key, sizeof(rss_key));
   1511 	}
   1512 
   1513 	/* Now fill out hash function seeds */
   1514 	for (i = 0; i < 10; i++)
   1515 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
   1516 
   1517 	/* Set up the redirection table */
   1518 	for (i = 0, j = 0; i < 64; i++, j++) {
   1519 		if (j == adapter->num_queues)
   1520 			j = 0;
   1521 
   1522 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1523 			/*
   1524 			 * Fetch the RSS bucket id for the given indirection
   1525 			 * entry. Cap it at the number of configured buckets
   1526 			 * (which is num_queues.)
   1527 			 */
   1528 			queue_id = rss_get_indirection_to_bucket(i);
   1529 			queue_id = queue_id % adapter->num_queues;
   1530 		} else
   1531 			queue_id = j;
   1532 
   1533 		/*
   1534 		 * The low 8 bits are for hash value (n+0);
   1535 		 * The next 8 bits are for hash value (n+1), etc.
   1536 		 */
   1537 		reta >>= 8;
   1538 		reta |= ((uint32_t)queue_id) << 24;
   1539 		if ((i & 3) == 3) {
   1540 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
   1541 			reta = 0;
   1542 		}
   1543 	}
   1544 
   1545 	/* Perform hash on these packet types */
   1546 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   1547 		rss_hash_config = rss_gethashconfig();
   1548 	else {
   1549 		/*
   1550 		 * Disable UDP - IP fragments aren't currently being handled
   1551 		 * and so we end up with a mix of 2-tuple and 4-tuple
   1552 		 * traffic.
   1553 		 */
   1554 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
   1555 		                | RSS_HASHTYPE_RSS_TCP_IPV4
   1556 		                | RSS_HASHTYPE_RSS_IPV6
   1557 		                | RSS_HASHTYPE_RSS_TCP_IPV6;
   1558 	}
   1559 
   1560 	mrqc = IXGBE_MRQC_RSSEN;
   1561 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
   1562 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
   1563 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
   1564 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
   1565 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
   1566 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
   1567 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
   1568 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
   1569 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
   1570 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
   1571 		    __func__);
   1572 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
   1573 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
   1574 		    __func__);
   1575 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
   1576 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
   1577 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
   1578 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
   1579 		    __func__);
   1580 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
   1581 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
   1582 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
   1583 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
   1584 		    __func__);
   1585 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
   1586 } /* ixv_initialize_rss_mapping */
   1587 
   1588 
   1589 /************************************************************************
   1590  * ixv_initialize_receive_units - Setup receive registers and features.
   1591  ************************************************************************/
   1592 static void
   1593 ixv_initialize_receive_units(struct adapter *adapter)
   1594 {
   1595 	struct	rx_ring	*rxr = adapter->rx_rings;
   1596 	struct ixgbe_hw	*hw = &adapter->hw;
   1597 	struct ifnet	*ifp = adapter->ifp;
   1598 	u32		bufsz, rxcsum, psrtype;
   1599 
   1600 	if (ifp->if_mtu > ETHERMTU)
   1601 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1602 	else
   1603 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1604 
   1605 	psrtype = IXGBE_PSRTYPE_TCPHDR
   1606 	        | IXGBE_PSRTYPE_UDPHDR
   1607 	        | IXGBE_PSRTYPE_IPV4HDR
   1608 	        | IXGBE_PSRTYPE_IPV6HDR
   1609 	        | IXGBE_PSRTYPE_L2HDR;
   1610 
   1611 	if (adapter->num_queues > 1)
   1612 		psrtype |= 1 << 29;
   1613 
   1614 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1615 
   1616 	/* Tell PF our max_frame size */
   1617 	if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
   1618 		device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
   1619 	}
   1620 
   1621 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1622 		u64 rdba = rxr->rxdma.dma_paddr;
   1623 		u32 reg, rxdctl;
   1624 
   1625 		/* Disable the queue */
   1626 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1627 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1628 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1629 		for (int j = 0; j < 10; j++) {
   1630 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1631 			    IXGBE_RXDCTL_ENABLE)
   1632 				msec_delay(1);
   1633 			else
   1634 				break;
   1635 		}
   1636 		wmb();
   1637 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1638 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   1639 		    (rdba & 0x00000000ffffffffULL));
   1640 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
   1641 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   1642 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1643 
   1644 		/* Reset the ring indices */
   1645 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1646 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1647 
   1648 		/* Set up the SRRCTL register */
   1649 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   1650 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1651 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1652 		reg |= bufsz;
   1653 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1654 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   1655 
   1656 		/* Capture Rx Tail index */
   1657 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1658 
   1659 		/* Do the queue enabling last */
   1660 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1661 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1662 		for (int k = 0; k < 10; k++) {
   1663 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1664 			    IXGBE_RXDCTL_ENABLE)
   1665 				break;
   1666 			msec_delay(1);
   1667 		}
   1668 		wmb();
   1669 
   1670 		/* Set the Tail Pointer */
   1671 		/*
   1672 		 * In netmap mode, we must preserve the buffers made
   1673 		 * available to userspace before the if_init()
   1674 		 * (this is true by default on the TX side, because
   1675 		 * init makes all buffers available to userspace).
   1676 		 *
   1677 		 * netmap_reset() and the device specific routines
   1678 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1679 		 * buffers at the end of the NIC ring, so here we
   1680 		 * must set the RDT (tail) register to make sure
   1681 		 * they are not overwritten.
   1682 		 *
   1683 		 * In this driver the NIC ring starts at RDH = 0,
   1684 		 * RDT points to the last slot available for reception (?),
   1685 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1686 		 */
   1687 #ifdef DEV_NETMAP
   1688 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   1689 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   1690 			struct netmap_adapter *na = NA(adapter->ifp);
   1691 			struct netmap_kring *kring = &na->rx_rings[i];
   1692 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1693 
   1694 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1695 		} else
   1696 #endif /* DEV_NETMAP */
   1697 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1698 			    adapter->num_rx_desc - 1);
   1699 	}
   1700 
   1701 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1702 
   1703 	ixv_initialize_rss_mapping(adapter);
   1704 
   1705 	if (adapter->num_queues > 1) {
   1706 		/* RSS and RX IPP Checksum are mutually exclusive */
   1707 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1708 	}
   1709 
   1710 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1711 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1712 
   1713 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1714 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1715 
   1716 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1717 
   1718 	return;
   1719 } /* ixv_initialize_receive_units */
   1720 
   1721 /************************************************************************
   1722  * ixv_setup_vlan_support
   1723  ************************************************************************/
   1724 static void
   1725 ixv_setup_vlan_support(struct adapter *adapter)
   1726 {
   1727 	struct ethercom *ec = &adapter->osdep.ec;
   1728 	struct ixgbe_hw *hw = &adapter->hw;
   1729 	struct rx_ring  *rxr;
   1730 	u32		ctrl, vid, vfta, retry;
   1731 
   1732 	/*
   1733 	 * We get here thru init_locked, meaning
   1734 	 * a soft reset, this has already cleared
   1735 	 * the VFTA and other state, so if there
   1736 	 * have been no vlan's registered do nothing.
   1737 	 */
   1738 	if (!VLAN_ATTACHED(ec))
   1739 		return;
   1740 
   1741 	/* Enable the queues */
   1742 	for (int i = 0; i < adapter->num_queues; i++) {
   1743 		rxr = &adapter->rx_rings[i];
   1744 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
   1745 		ctrl |= IXGBE_RXDCTL_VME;
   1746 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
   1747 		/*
   1748 		 * Let Rx path know that it needs to store VLAN tag
   1749 		 * as part of extra mbuf info.
   1750 		 */
   1751 		rxr->vtag_strip = TRUE;
   1752 	}
   1753 
   1754 #if 1
   1755 	/* XXX dirty hack. Enable all VIDs */
   1756 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
   1757 	  adapter->shadow_vfta[i] = 0xffffffff;
   1758 #endif
   1759 	/*
   1760 	 * A soft reset zero's out the VFTA, so
   1761 	 * we need to repopulate it now.
   1762 	 */
   1763 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   1764 		if (adapter->shadow_vfta[i] == 0)
   1765 			continue;
   1766 		vfta = adapter->shadow_vfta[i];
   1767 		/*
   1768 		 * Reconstruct the vlan id's
   1769 		 * based on the bits set in each
   1770 		 * of the array ints.
   1771 		 */
   1772 		for (int j = 0; j < 32; j++) {
   1773 			retry = 0;
   1774 			if ((vfta & (1 << j)) == 0)
   1775 				continue;
   1776 			vid = (i * 32) + j;
   1777 			/* Call the shared code mailbox routine */
   1778 			while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
   1779 				if (++retry > 5)
   1780 					break;
   1781 			}
   1782 		}
   1783 	}
   1784 } /* ixv_setup_vlan_support */
   1785 
   1786 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   1787 /************************************************************************
   1788  * ixv_register_vlan
   1789  *
   1790  *   Run via a vlan config EVENT, it enables us to use the
   1791  *   HW Filter table since we can get the vlan id. This just
   1792  *   creates the entry in the soft version of the VFTA, init
   1793  *   will repopulate the real table.
   1794  ************************************************************************/
   1795 static void
   1796 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1797 {
   1798 	struct adapter	*adapter = ifp->if_softc;
   1799 	u16		index, bit;
   1800 
   1801 	if (ifp->if_softc != arg) /* Not our event */
   1802 		return;
   1803 
   1804 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   1805 		return;
   1806 
   1807 	IXGBE_CORE_LOCK(adapter);
   1808 	index = (vtag >> 5) & 0x7F;
   1809 	bit = vtag & 0x1F;
   1810 	adapter->shadow_vfta[index] |= (1 << bit);
   1811 	/* Re-init to load the changes */
   1812 	ixv_init_locked(adapter);
   1813 	IXGBE_CORE_UNLOCK(adapter);
   1814 } /* ixv_register_vlan */
   1815 
   1816 /************************************************************************
   1817  * ixv_unregister_vlan
   1818  *
   1819  *   Run via a vlan unconfig EVENT, remove our entry
   1820  *   in the soft vfta.
   1821  ************************************************************************/
   1822 static void
   1823 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1824 {
   1825 	struct adapter	*adapter = ifp->if_softc;
   1826 	u16		index, bit;
   1827 
   1828 	if (ifp->if_softc !=  arg)
   1829 		return;
   1830 
   1831 	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
   1832 		return;
   1833 
   1834 	IXGBE_CORE_LOCK(adapter);
   1835 	index = (vtag >> 5) & 0x7F;
   1836 	bit = vtag & 0x1F;
   1837 	adapter->shadow_vfta[index] &= ~(1 << bit);
   1838 	/* Re-init to load the changes */
   1839 	ixv_init_locked(adapter);
   1840 	IXGBE_CORE_UNLOCK(adapter);
   1841 } /* ixv_unregister_vlan */
   1842 #endif
   1843 
   1844 /************************************************************************
   1845  * ixv_enable_intr
   1846  ************************************************************************/
   1847 static void
   1848 ixv_enable_intr(struct adapter *adapter)
   1849 {
   1850 	struct ixgbe_hw *hw = &adapter->hw;
   1851 	struct ix_queue *que = adapter->queues;
   1852 	u32             mask;
   1853 	int i;
   1854 
   1855 	/* For VTEIAC */
   1856 	mask = (1 << adapter->vector);
   1857 	for (i = 0; i < adapter->num_queues; i++, que++)
   1858 		mask |= (1 << que->msix);
   1859 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   1860 
   1861 	/* For VTEIMS */
   1862 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
   1863 	que = adapter->queues;
   1864 	for (i = 0; i < adapter->num_queues; i++, que++)
   1865 		ixv_enable_queue(adapter, que->msix);
   1866 
   1867 	IXGBE_WRITE_FLUSH(hw);
   1868 
   1869 	return;
   1870 } /* ixv_enable_intr */
   1871 
   1872 /************************************************************************
   1873  * ixv_disable_intr
   1874  ************************************************************************/
   1875 static void
   1876 ixv_disable_intr(struct adapter *adapter)
   1877 {
   1878 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   1879 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   1880 	IXGBE_WRITE_FLUSH(&adapter->hw);
   1881 
   1882 	return;
   1883 } /* ixv_disable_intr */
   1884 
   1885 /************************************************************************
   1886  * ixv_set_ivar
   1887  *
   1888  *   Setup the correct IVAR register for a particular MSI-X interrupt
   1889  *    - entry is the register array entry
   1890  *    - vector is the MSI-X vector for this queue
   1891  *    - type is RX/TX/MISC
   1892  ************************************************************************/
   1893 static void
   1894 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   1895 {
   1896 	struct ixgbe_hw *hw = &adapter->hw;
   1897 	u32             ivar, index;
   1898 
   1899 	vector |= IXGBE_IVAR_ALLOC_VAL;
   1900 
   1901 	if (type == -1) { /* MISC IVAR */
   1902 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   1903 		ivar &= ~0xFF;
   1904 		ivar |= vector;
   1905 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   1906 	} else {          /* RX/TX IVARS */
   1907 		index = (16 * (entry & 1)) + (8 * type);
   1908 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   1909 		ivar &= ~(0xFF << index);
   1910 		ivar |= (vector << index);
   1911 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   1912 	}
   1913 } /* ixv_set_ivar */
   1914 
   1915 /************************************************************************
   1916  * ixv_configure_ivars
   1917  ************************************************************************/
   1918 static void
   1919 ixv_configure_ivars(struct adapter *adapter)
   1920 {
   1921 	struct ix_queue *que = adapter->queues;
   1922 
   1923 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1924 		/* First the RX queue entry */
   1925 		ixv_set_ivar(adapter, i, que->msix, 0);
   1926 		/* ... and the TX */
   1927 		ixv_set_ivar(adapter, i, que->msix, 1);
   1928 		/* Set an initial value in EITR */
   1929 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
   1930 		    IXGBE_EITR_DEFAULT);
   1931 	}
   1932 
   1933 	/* For the mailbox interrupt */
   1934 	ixv_set_ivar(adapter, 1, adapter->vector, -1);
   1935 } /* ixv_configure_ivars */
   1936 
   1937 
   1938 /************************************************************************
   1939  * ixv_save_stats
   1940  *
   1941  *   The VF stats registers never have a truly virgin
   1942  *   starting point, so this routine tries to make an
   1943  *   artificial one, marking ground zero on attach as
   1944  *   it were.
   1945  ************************************************************************/
   1946 static void
   1947 ixv_save_stats(struct adapter *adapter)
   1948 {
   1949 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   1950 
   1951 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   1952 		stats->saved_reset_vfgprc +=
   1953 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   1954 		stats->saved_reset_vfgptc +=
   1955 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   1956 		stats->saved_reset_vfgorc +=
   1957 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   1958 		stats->saved_reset_vfgotc +=
   1959 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   1960 		stats->saved_reset_vfmprc +=
   1961 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   1962 	}
   1963 } /* ixv_save_stats */
   1964 
   1965 /************************************************************************
   1966  * ixv_init_stats
   1967  ************************************************************************/
   1968 static void
   1969 ixv_init_stats(struct adapter *adapter)
   1970 {
   1971 	struct ixgbe_hw *hw = &adapter->hw;
   1972 
   1973 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   1974 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   1975 	adapter->stats.vf.last_vfgorc |=
   1976 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   1977 
   1978 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   1979 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   1980 	adapter->stats.vf.last_vfgotc |=
   1981 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   1982 
   1983 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   1984 
   1985 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   1986 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   1987 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   1988 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   1989 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   1990 } /* ixv_init_stats */
   1991 
   1992 #define UPDATE_STAT_32(reg, last, count)		\
   1993 {                                                       \
   1994 	u32 current = IXGBE_READ_REG(hw, (reg));	\
   1995 	if (current < (last))				\
   1996 		count.ev_count += 0x100000000LL;	\
   1997 	(last) = current;				\
   1998 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   1999 	count.ev_count |= current;			\
   2000 }
   2001 
   2002 #define UPDATE_STAT_36(lsb, msb, last, count)           \
   2003 {                                                       \
   2004 	u64 cur_lsb = IXGBE_READ_REG(hw, (lsb));	\
   2005 	u64 cur_msb = IXGBE_READ_REG(hw, (msb));	\
   2006 	u64 current = ((cur_msb << 32) | cur_lsb);      \
   2007 	if (current < (last))				\
   2008 		count.ev_count += 0x1000000000LL;	\
   2009 	(last) = current;				\
   2010 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   2011 	count.ev_count |= current;			\
   2012 }
   2013 
   2014 /************************************************************************
   2015  * ixv_update_stats - Update the board statistics counters.
   2016  ************************************************************************/
   2017 void
   2018 ixv_update_stats(struct adapter *adapter)
   2019 {
   2020 	struct ixgbe_hw *hw = &adapter->hw;
   2021 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2022 
   2023         UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
   2024         UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
   2025         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
   2026 	    stats->vfgorc);
   2027         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
   2028 	    stats->vfgotc);
   2029         UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
   2030 
   2031 	/* Fill out the OS statistics structure */
   2032 	/*
   2033 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   2034 	 * adapter->stats counters. It's required to make ifconfig -z
   2035 	 * (SOICZIFDATA) work.
   2036 	 */
   2037 } /* ixv_update_stats */
   2038 
   2039 const struct sysctlnode *
   2040 ixv_sysctl_instance(struct adapter *adapter)
   2041 {
   2042 	const char *dvname;
   2043 	struct sysctllog **log;
   2044 	int rc;
   2045 	const struct sysctlnode *rnode;
   2046 
   2047 	log = &adapter->sysctllog;
   2048 	dvname = device_xname(adapter->dev);
   2049 
   2050 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2051 	    0, CTLTYPE_NODE, dvname,
   2052 	    SYSCTL_DESCR("ixv information and settings"),
   2053 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2054 		goto err;
   2055 
   2056 	return rnode;
   2057 err:
   2058 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2059 	return NULL;
   2060 }
   2061 
   2062 static void
   2063 ixv_add_device_sysctls(struct adapter *adapter)
   2064 {
   2065 	struct sysctllog **log;
   2066 	const struct sysctlnode *rnode, *cnode;
   2067 	device_t dev;
   2068 
   2069 	dev = adapter->dev;
   2070 	log = &adapter->sysctllog;
   2071 
   2072 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2073 		aprint_error_dev(dev, "could not create sysctl root\n");
   2074 		return;
   2075 	}
   2076 
   2077 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2078 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2079 	    "debug", SYSCTL_DESCR("Debug Info"),
   2080 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   2081 		aprint_error_dev(dev, "could not create sysctl\n");
   2082 
   2083 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2084 	    CTLFLAG_READWRITE, CTLTYPE_BOOL,
   2085 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2086 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2087 		aprint_error_dev(dev, "could not create sysctl\n");
   2088 }
   2089 
   2090 /************************************************************************
   2091  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
   2092  ************************************************************************/
   2093 static void
   2094 ixv_add_stats_sysctls(struct adapter *adapter)
   2095 {
   2096 	device_t                dev = adapter->dev;
   2097 	struct tx_ring          *txr = adapter->tx_rings;
   2098 	struct rx_ring          *rxr = adapter->rx_rings;
   2099 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2100 	struct ixgbe_hw *hw = &adapter->hw;
   2101 	const struct sysctlnode *rnode;
   2102 	struct sysctllog **log = &adapter->sysctllog;
   2103 	const char *xname = device_xname(dev);
   2104 
   2105 	/* Driver Statistics */
   2106 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   2107 	    NULL, xname, "Handled queue in softint");
   2108 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   2109 	    NULL, xname, "Requeued in softint");
   2110 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   2111 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   2112 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2113 	    NULL, xname, "m_defrag() failed");
   2114 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   2115 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   2116 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   2117 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   2118 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   2119 	    NULL, xname, "Driver tx dma hard fail other");
   2120 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   2121 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   2122 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   2123 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   2124 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2125 	    NULL, xname, "Watchdog timeouts");
   2126 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   2127 	    NULL, xname, "TSO errors");
   2128 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   2129 	    NULL, xname, "Link MSI-X IRQ Handled");
   2130 
   2131 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2132 		snprintf(adapter->queues[i].evnamebuf,
   2133 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   2134 		    xname, i);
   2135 		snprintf(adapter->queues[i].namebuf,
   2136 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   2137 
   2138 		if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2139 			aprint_error_dev(dev, "could not create sysctl root\n");
   2140 			break;
   2141 		}
   2142 
   2143 		if (sysctl_createv(log, 0, &rnode, &rnode,
   2144 		    0, CTLTYPE_NODE,
   2145 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   2146 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   2147 			break;
   2148 
   2149 #if 0 /* not yet */
   2150 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2151 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   2152 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   2153 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   2154 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   2155 			break;
   2156 
   2157 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2158 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   2159 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   2160 			NULL, 0, &(adapter->queues[i].irqs),
   2161 		    0, CTL_CREATE, CTL_EOL) != 0)
   2162 			break;
   2163 
   2164 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2165 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2166 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   2167 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   2168 		    0, CTL_CREATE, CTL_EOL) != 0)
   2169 			break;
   2170 
   2171 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2172 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2173 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   2174 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   2175 		    0, CTL_CREATE, CTL_EOL) != 0)
   2176 			break;
   2177 #endif
   2178 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   2179 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   2180 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   2181 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   2182 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   2183 		    NULL, adapter->queues[i].evnamebuf,
   2184 		    "Queue No Descriptor Available");
   2185 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   2186 		    NULL, adapter->queues[i].evnamebuf,
   2187 		    "Queue Packets Transmitted");
   2188 #ifndef IXGBE_LEGACY_TX
   2189 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   2190 		    NULL, adapter->queues[i].evnamebuf,
   2191 		    "Packets dropped in pcq");
   2192 #endif
   2193 
   2194 #ifdef LRO
   2195 		struct lro_ctrl *lro = &rxr->lro;
   2196 #endif /* LRO */
   2197 
   2198 #if 0 /* not yet */
   2199 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2200 		    CTLFLAG_READONLY,
   2201 		    CTLTYPE_INT,
   2202 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   2203 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   2204 		    CTL_CREATE, CTL_EOL) != 0)
   2205 			break;
   2206 
   2207 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2208 		    CTLFLAG_READONLY,
   2209 		    CTLTYPE_INT,
   2210 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   2211 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   2212 		    CTL_CREATE, CTL_EOL) != 0)
   2213 			break;
   2214 #endif
   2215 
   2216 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   2217 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   2218 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   2219 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   2220 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   2221 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   2222 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   2223 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   2224 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   2225 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   2226 #ifdef LRO
   2227 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   2228 				CTLFLAG_RD, &lro->lro_queued, 0,
   2229 				"LRO Queued");
   2230 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   2231 				CTLFLAG_RD, &lro->lro_flushed, 0,
   2232 				"LRO Flushed");
   2233 #endif /* LRO */
   2234 	}
   2235 
   2236 	/* MAC stats get their own sub node */
   2237 
   2238 	snprintf(stats->namebuf,
   2239 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   2240 
   2241 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   2242 	    stats->namebuf, "rx csum offload - IP");
   2243 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   2244 	    stats->namebuf, "rx csum offload - L4");
   2245 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   2246 	    stats->namebuf, "rx csum offload - IP bad");
   2247 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   2248 	    stats->namebuf, "rx csum offload - L4 bad");
   2249 
   2250 	/* Packet Reception Stats */
   2251 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2252 	    xname, "Good Packets Received");
   2253 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2254 	    xname, "Good Octets Received");
   2255 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2256 	    xname, "Multicast Packets Received");
   2257 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2258 	    xname, "Good Packets Transmitted");
   2259 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2260 	    xname, "Good Octets Transmitted");
   2261 
   2262 	/* Mailbox Stats */
   2263 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
   2264 	    xname, "message TXs");
   2265 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
   2266 	    xname, "message RXs");
   2267 	evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
   2268 	    xname, "ACKs");
   2269 	evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
   2270 	    xname, "REQs");
   2271 	evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
   2272 	    xname, "RSTs");
   2273 
   2274 } /* ixv_add_stats_sysctls */
   2275 
   2276 /************************************************************************
   2277  * ixv_set_sysctl_value
   2278  ************************************************************************/
   2279 static void
   2280 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2281 	const char *description, int *limit, int value)
   2282 {
   2283 	device_t dev =  adapter->dev;
   2284 	struct sysctllog **log;
   2285 	const struct sysctlnode *rnode, *cnode;
   2286 
   2287 	log = &adapter->sysctllog;
   2288 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2289 		aprint_error_dev(dev, "could not create sysctl root\n");
   2290 		return;
   2291 	}
   2292 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2293 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2294 	    name, SYSCTL_DESCR(description),
   2295 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2296 		aprint_error_dev(dev, "could not create sysctl\n");
   2297 	*limit = value;
   2298 } /* ixv_set_sysctl_value */
   2299 
   2300 /************************************************************************
   2301  * ixv_print_debug_info
   2302  *
   2303  *   Called only when em_display_debug_stats is enabled.
   2304  *   Provides a way to take a look at important statistics
   2305  *   maintained by the driver and hardware.
   2306  ************************************************************************/
   2307 static void
   2308 ixv_print_debug_info(struct adapter *adapter)
   2309 {
   2310         device_t        dev = adapter->dev;
   2311         struct ixgbe_hw *hw = &adapter->hw;
   2312         struct ix_queue *que = adapter->queues;
   2313         struct rx_ring  *rxr;
   2314         struct tx_ring  *txr;
   2315 #ifdef LRO
   2316         struct lro_ctrl *lro;
   2317 #endif /* LRO */
   2318 
   2319 	device_printf(dev, "Error Byte Count = %u \n",
   2320 	    IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2321 
   2322 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2323 		txr = que->txr;
   2324 		rxr = que->rxr;
   2325 #ifdef LRO
   2326 		lro = &rxr->lro;
   2327 #endif /* LRO */
   2328 		device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
   2329 		    que->msix, (long)que->irqs.ev_count);
   2330 		device_printf(dev, "RX(%d) Packets Received: %lld\n",
   2331 		    rxr->me, (long long)rxr->rx_packets.ev_count);
   2332 		device_printf(dev, "RX(%d) Bytes Received: %lu\n",
   2333 		    rxr->me, (long)rxr->rx_bytes.ev_count);
   2334 #ifdef LRO
   2335 		device_printf(dev, "RX(%d) LRO Queued= %lld\n",
   2336 		    rxr->me, (long long)lro->lro_queued);
   2337 		device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
   2338 		    rxr->me, (long long)lro->lro_flushed);
   2339 #endif /* LRO */
   2340 		device_printf(dev, "TX(%d) Packets Sent: %lu\n",
   2341 		    txr->me, (long)txr->total_packets.ev_count);
   2342 		device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
   2343 		    txr->me, (long)txr->no_desc_avail.ev_count);
   2344 	}
   2345 
   2346 	device_printf(dev, "MBX IRQ Handled: %lu\n",
   2347 	    (long)adapter->link_irq.ev_count);
   2348 } /* ixv_print_debug_info */
   2349 
   2350 /************************************************************************
   2351  * ixv_sysctl_debug
   2352  ************************************************************************/
   2353 static int
   2354 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2355 {
   2356 	struct sysctlnode node;
   2357 	struct adapter *adapter;
   2358 	int            error, result;
   2359 
   2360 	node = *rnode;
   2361 	node.sysctl_data = &result;
   2362 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2363 
   2364 	if (error || newp == NULL)
   2365 		return error;
   2366 
   2367 	if (result == 1) {
   2368 		adapter = (struct adapter *)node.sysctl_data;
   2369 		ixv_print_debug_info(adapter);
   2370 	}
   2371 
   2372 	return 0;
   2373 } /* ixv_sysctl_debug */
   2374 
   2375 /************************************************************************
   2376  * ixv_init_device_features
   2377  ************************************************************************/
   2378 static void
   2379 ixv_init_device_features(struct adapter *adapter)
   2380 {
   2381 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   2382 	                  | IXGBE_FEATURE_VF
   2383 	                  | IXGBE_FEATURE_RSS
   2384 	                  | IXGBE_FEATURE_LEGACY_TX;
   2385 
   2386 	/* A tad short on feature flags for VFs, atm. */
   2387 	switch (adapter->hw.mac.type) {
   2388 	case ixgbe_mac_82599_vf:
   2389 		break;
   2390 	case ixgbe_mac_X540_vf:
   2391 		break;
   2392 	case ixgbe_mac_X550_vf:
   2393 	case ixgbe_mac_X550EM_x_vf:
   2394 	case ixgbe_mac_X550EM_a_vf:
   2395 		adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
   2396 		break;
   2397 	default:
   2398 		break;
   2399 	}
   2400 
   2401 	/* Enabled by default... */
   2402 	/* Is a virtual function (VF) */
   2403 	if (adapter->feat_cap & IXGBE_FEATURE_VF)
   2404 		adapter->feat_en |= IXGBE_FEATURE_VF;
   2405 	/* Netmap */
   2406 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   2407 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   2408 	/* Receive-Side Scaling (RSS) */
   2409 	if (adapter->feat_cap & IXGBE_FEATURE_RSS)
   2410 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   2411 	/* Needs advanced context descriptor regardless of offloads req'd */
   2412 	if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
   2413 		adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
   2414 
   2415 	/* Enabled via sysctl... */
   2416 	/* Legacy (single queue) transmit */
   2417 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   2418 	    ixv_enable_legacy_tx)
   2419 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   2420 } /* ixv_init_device_features */
   2421 
   2422 /************************************************************************
   2423  * ixv_shutdown - Shutdown entry point
   2424  ************************************************************************/
   2425 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   2426 static int
   2427 ixv_shutdown(device_t dev)
   2428 {
   2429 	struct adapter *adapter = device_private(dev);
   2430 	IXGBE_CORE_LOCK(adapter);
   2431 	ixv_stop(adapter);
   2432 	IXGBE_CORE_UNLOCK(adapter);
   2433 
   2434 	return (0);
   2435 } /* ixv_shutdown */
   2436 #endif
   2437 
   2438 static int
   2439 ixv_ifflags_cb(struct ethercom *ec)
   2440 {
   2441 	struct ifnet *ifp = &ec->ec_if;
   2442 	struct adapter *adapter = ifp->if_softc;
   2443 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   2444 
   2445 	IXGBE_CORE_LOCK(adapter);
   2446 
   2447 	if (change != 0)
   2448 		adapter->if_flags = ifp->if_flags;
   2449 
   2450 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   2451 		rc = ENETRESET;
   2452 
   2453 	/* Set up VLAN support and filter */
   2454 	ixv_setup_vlan_support(adapter);
   2455 
   2456 	IXGBE_CORE_UNLOCK(adapter);
   2457 
   2458 	return rc;
   2459 }
   2460 
   2461 
   2462 /************************************************************************
   2463  * ixv_ioctl - Ioctl entry point
   2464  *
   2465  *   Called when the user wants to configure the interface.
   2466  *
   2467  *   return 0 on success, positive on failure
   2468  ************************************************************************/
   2469 static int
   2470 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
   2471 {
   2472 	struct adapter	*adapter = ifp->if_softc;
   2473 	struct ifcapreq *ifcr = data;
   2474 	struct ifreq	*ifr = data;
   2475 	int             error = 0;
   2476 	int l4csum_en;
   2477 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   2478 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   2479 
   2480 	switch (command) {
   2481 	case SIOCSIFFLAGS:
   2482 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   2483 		break;
   2484 	case SIOCADDMULTI:
   2485 	case SIOCDELMULTI:
   2486 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   2487 		break;
   2488 	case SIOCSIFMEDIA:
   2489 	case SIOCGIFMEDIA:
   2490 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   2491 		break;
   2492 	case SIOCSIFCAP:
   2493 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   2494 		break;
   2495 	case SIOCSIFMTU:
   2496 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   2497 		break;
   2498 	default:
   2499 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   2500 		break;
   2501 	}
   2502 
   2503 	switch (command) {
   2504 	case SIOCSIFMEDIA:
   2505 	case SIOCGIFMEDIA:
   2506 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   2507 	case SIOCSIFCAP:
   2508 		/* Layer-4 Rx checksum offload has to be turned on and
   2509 		 * off as a unit.
   2510 		 */
   2511 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   2512 		if (l4csum_en != l4csum && l4csum_en != 0)
   2513 			return EINVAL;
   2514 		/*FALLTHROUGH*/
   2515 	case SIOCADDMULTI:
   2516 	case SIOCDELMULTI:
   2517 	case SIOCSIFFLAGS:
   2518 	case SIOCSIFMTU:
   2519 	default:
   2520 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   2521 			return error;
   2522 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   2523 			;
   2524 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   2525 			IXGBE_CORE_LOCK(adapter);
   2526 			ixv_init_locked(adapter);
   2527 			IXGBE_CORE_UNLOCK(adapter);
   2528 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   2529 			/*
   2530 			 * Multicast list has changed; set the hardware filter
   2531 			 * accordingly.
   2532 			 */
   2533 			IXGBE_CORE_LOCK(adapter);
   2534 			ixv_disable_intr(adapter);
   2535 			ixv_set_multi(adapter);
   2536 			ixv_enable_intr(adapter);
   2537 			IXGBE_CORE_UNLOCK(adapter);
   2538 		}
   2539 		return 0;
   2540 	}
   2541 } /* ixv_ioctl */
   2542 
   2543 /************************************************************************
   2544  * ixv_init
   2545  ************************************************************************/
   2546 static int
   2547 ixv_init(struct ifnet *ifp)
   2548 {
   2549 	struct adapter *adapter = ifp->if_softc;
   2550 
   2551 	IXGBE_CORE_LOCK(adapter);
   2552 	ixv_init_locked(adapter);
   2553 	IXGBE_CORE_UNLOCK(adapter);
   2554 
   2555 	return 0;
   2556 } /* ixv_init */
   2557 
   2558 
   2559 /************************************************************************
   2560  * ixv_handle_que
   2561  ************************************************************************/
   2562 static void
   2563 ixv_handle_que(void *context)
   2564 {
   2565 	struct ix_queue *que = context;
   2566 	struct adapter  *adapter = que->adapter;
   2567 	struct tx_ring	*txr = que->txr;
   2568 	struct ifnet    *ifp = adapter->ifp;
   2569 	bool		more;
   2570 
   2571 	adapter->handleq.ev_count++;
   2572 
   2573 	if (ifp->if_flags & IFF_RUNNING) {
   2574 		more = ixgbe_rxeof(que);
   2575 		IXGBE_TX_LOCK(txr);
   2576 		ixgbe_txeof(txr);
   2577 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   2578 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   2579 				ixgbe_mq_start_locked(ifp, txr);
   2580 		/* Only for queue 0 */
   2581 		/* NetBSD still needs this for CBQ */
   2582 		if ((&adapter->queues[0] == que)
   2583 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   2584 			ixgbe_legacy_start_locked(ifp, txr);
   2585 		IXGBE_TX_UNLOCK(txr);
   2586 		if (more) {
   2587 			adapter->req.ev_count++;
   2588 			softint_schedule(que->que_si);
   2589 			return;
   2590 		}
   2591 	}
   2592 
   2593 	/* Re-enable this interrupt */
   2594 	ixv_enable_queue(adapter, que->msix);
   2595 
   2596 	return;
   2597 } /* ixv_handle_que */
   2598 
   2599 /************************************************************************
   2600  * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
   2601  ************************************************************************/
   2602 static int
   2603 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   2604 {
   2605 	device_t	dev = adapter->dev;
   2606 	struct ix_queue *que = adapter->queues;
   2607 	struct		tx_ring *txr = adapter->tx_rings;
   2608 	int 		error, msix_ctrl, rid, vector = 0;
   2609 	pci_chipset_tag_t pc;
   2610 	pcitag_t	tag;
   2611 	char		intrbuf[PCI_INTRSTR_LEN];
   2612 	char		intr_xname[32];
   2613 	const char	*intrstr = NULL;
   2614 	kcpuset_t	*affinity;
   2615 	int		cpu_id = 0;
   2616 
   2617 	pc = adapter->osdep.pc;
   2618 	tag = adapter->osdep.tag;
   2619 
   2620 	adapter->osdep.nintrs = adapter->num_queues + 1;
   2621 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   2622 	    adapter->osdep.nintrs) != 0) {
   2623 		aprint_error_dev(dev,
   2624 		    "failed to allocate MSI-X interrupt\n");
   2625 		return (ENXIO);
   2626 	}
   2627 
   2628 	kcpuset_create(&affinity, false);
   2629 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   2630 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   2631 		    device_xname(dev), i);
   2632 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   2633 		    sizeof(intrbuf));
   2634 #ifdef IXGBE_MPSAFE
   2635 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   2636 		    true);
   2637 #endif
   2638 		/* Set the handler function */
   2639 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   2640 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   2641 		    intr_xname);
   2642 		if (que->res == NULL) {
   2643 			pci_intr_release(pc, adapter->osdep.intrs,
   2644 			    adapter->osdep.nintrs);
   2645 			aprint_error_dev(dev,
   2646 			    "Failed to register QUE handler\n");
   2647 			kcpuset_destroy(affinity);
   2648 			return (ENXIO);
   2649 		}
   2650 		que->msix = vector;
   2651         	adapter->active_queues |= (u64)(1 << que->msix);
   2652 
   2653 		cpu_id = i;
   2654 		/* Round-robin affinity */
   2655 		kcpuset_zero(affinity);
   2656 		kcpuset_set(affinity, cpu_id % ncpu);
   2657 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   2658 		    NULL);
   2659 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   2660 		    intrstr);
   2661 		if (error == 0)
   2662 			aprint_normal(", bound queue %d to cpu %d\n",
   2663 			    i, cpu_id % ncpu);
   2664 		else
   2665 			aprint_normal("\n");
   2666 
   2667 #ifndef IXGBE_LEGACY_TX
   2668 		txr->txr_si
   2669 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2670 			ixgbe_deferred_mq_start, txr);
   2671 #endif
   2672 		que->que_si
   2673 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2674 			ixv_handle_que, que);
   2675 		if (que->que_si == NULL) {
   2676 			aprint_error_dev(dev,
   2677 			    "could not establish software interrupt\n");
   2678 		}
   2679 	}
   2680 
   2681 	/* and Mailbox */
   2682 	cpu_id++;
   2683 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   2684 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   2685 	    sizeof(intrbuf));
   2686 #ifdef IXGBE_MPSAFE
   2687 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   2688 	    true);
   2689 #endif
   2690 	/* Set the mbx handler function */
   2691 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   2692 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   2693 	    intr_xname);
   2694 	if (adapter->osdep.ihs[vector] == NULL) {
   2695 		adapter->res = NULL;
   2696 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   2697 		kcpuset_destroy(affinity);
   2698 		return (ENXIO);
   2699 	}
   2700 	/* Round-robin affinity */
   2701 	kcpuset_zero(affinity);
   2702 	kcpuset_set(affinity, cpu_id % ncpu);
   2703 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   2704 
   2705 	aprint_normal_dev(dev,
   2706 	    "for link, interrupting at %s", intrstr);
   2707 	if (error == 0)
   2708 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   2709 	else
   2710 		aprint_normal("\n");
   2711 
   2712 	adapter->vector = vector;
   2713 	/* Tasklets for Mailbox */
   2714 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   2715 	    ixv_handle_link, adapter);
   2716 	/*
   2717 	 * Due to a broken design QEMU will fail to properly
   2718 	 * enable the guest for MSI-X unless the vectors in
   2719 	 * the table are all set up, so we must rewrite the
   2720 	 * ENABLE in the MSI-X control register again at this
   2721 	 * point to cause it to successfully initialize us.
   2722 	 */
   2723 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   2724 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   2725 		rid += PCI_MSIX_CTL;
   2726 		msix_ctrl = pci_conf_read(pc, tag, rid);
   2727 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   2728 		pci_conf_write(pc, tag, rid, msix_ctrl);
   2729 	}
   2730 
   2731 	kcpuset_destroy(affinity);
   2732 	return (0);
   2733 } /* ixv_allocate_msix */
   2734 
   2735 /************************************************************************
   2736  * ixv_configure_interrupts - Setup MSI-X resources
   2737  *
   2738  *   Note: The VF device MUST use MSI-X, there is no fallback.
   2739  ************************************************************************/
   2740 static int
   2741 ixv_configure_interrupts(struct adapter *adapter)
   2742 {
   2743 	device_t dev = adapter->dev;
   2744 	int want, queues, msgs;
   2745 
   2746 	/* Must have at least 2 MSI-X vectors */
   2747 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   2748 	if (msgs < 2) {
   2749 		aprint_error_dev(dev, "MSIX config error\n");
   2750 		return (ENXIO);
   2751 	}
   2752 	msgs = MIN(msgs, IXG_MAX_NINTR);
   2753 
   2754 	/* Figure out a reasonable auto config value */
   2755 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   2756 
   2757 	if (ixv_num_queues != 0)
   2758 		queues = ixv_num_queues;
   2759 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   2760 		queues = IXGBE_VF_MAX_TX_QUEUES;
   2761 
   2762 	/*
   2763 	 * Want vectors for the queues,
   2764 	 * plus an additional for mailbox.
   2765 	 */
   2766 	want = queues + 1;
   2767 	if (msgs >= want)
   2768 		msgs = want;
   2769 	else {
   2770                	aprint_error_dev(dev,
   2771 		    "MSI-X Configuration Problem, "
   2772 		    "%d vectors but %d queues wanted!\n",
   2773 		    msgs, want);
   2774 		return -1;
   2775 	}
   2776 
   2777 	adapter->msix_mem = (void *)1; /* XXX */
   2778 	aprint_normal_dev(dev,
   2779 	    "Using MSI-X interrupts with %d vectors\n", msgs);
   2780 	adapter->num_queues = queues;
   2781 
   2782 	return (0);
   2783 } /* ixv_configure_interrupts */
   2784 
   2785 
   2786 /************************************************************************
   2787  * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
   2788  *
   2789  *   Done outside of interrupt context since the driver might sleep
   2790  ************************************************************************/
   2791 static void
   2792 ixv_handle_link(void *context)
   2793 {
   2794 	struct adapter *adapter = context;
   2795 
   2796 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   2797 	    &adapter->link_up, FALSE);
   2798 	ixv_update_link_status(adapter);
   2799 } /* ixv_handle_link */
   2800 
   2801 /************************************************************************
   2802  * ixv_check_link - Used in the local timer to poll for link changes
   2803  ************************************************************************/
   2804 static void
   2805 ixv_check_link(struct adapter *adapter)
   2806 {
   2807 	adapter->hw.mac.get_link_status = TRUE;
   2808 
   2809 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   2810 	    &adapter->link_up, FALSE);
   2811 	ixv_update_link_status(adapter);
   2812 } /* ixv_check_link */
   2813