Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.64
      1 /*$NetBSD: ixv.c,v 1.64 2017/09/15 04:52:32 msaitoh Exp $*/
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 320688 2017-07-05 17:27:03Z erj $*/
     36 
     37 
     38 #ifdef _KERNEL_OPT
     39 #include "opt_inet.h"
     40 #include "opt_inet6.h"
     41 #include "opt_net_mpsafe.h"
     42 #endif
     43 
     44 #include "ixgbe.h"
     45 #include "vlan.h"
     46 
     47 /************************************************************************
     48  * Driver version
     49  ************************************************************************/
     50 char ixv_driver_version[] = "1.5.13-k";
     51 
     52 /************************************************************************
     53  * PCI Device ID Table
     54  *
     55  *   Used by probe to select devices to load on
     56  *   Last field stores an index into ixv_strings
     57  *   Last entry must be all 0s
     58  *
     59  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     60  ************************************************************************/
     61 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
     62 {
     63 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     64 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     65 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     66 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     67 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
     68 	/* required last entry */
     69 	{0, 0, 0, 0, 0}
     70 };
     71 
     72 /************************************************************************
     73  * Table of branding strings
     74  ************************************************************************/
     75 static const char *ixv_strings[] = {
     76 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     77 };
     78 
     79 /*********************************************************************
     80  *  Function prototypes
     81  *********************************************************************/
     82 static int      ixv_probe(device_t, cfdata_t, void *);
     83 static void	ixv_attach(device_t, device_t, void *);
     84 static int      ixv_detach(device_t, int);
     85 #if 0
     86 static int      ixv_shutdown(device_t);
     87 #endif
     88 static int	ixv_ifflags_cb(struct ethercom *);
     89 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     90 static int	ixv_init(struct ifnet *);
     91 static void	ixv_init_locked(struct adapter *);
     92 static void	ixv_ifstop(struct ifnet *, int);
     93 static void     ixv_stop(void *);
     94 static void     ixv_init_device_features(struct adapter *);
     95 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     96 static int      ixv_media_change(struct ifnet *);
     97 static int      ixv_allocate_pci_resources(struct adapter *,
     98 		    const struct pci_attach_args *);
     99 static int      ixv_allocate_msix(struct adapter *,
    100 		    const struct pci_attach_args *);
    101 static int      ixv_configure_interrupts(struct adapter *);
    102 static void	ixv_free_pci_resources(struct adapter *);
    103 static void     ixv_local_timer(void *);
    104 static void     ixv_local_timer_locked(void *);
    105 static void     ixv_setup_interface(device_t, struct adapter *);
    106 static int      ixv_negotiate_api(struct adapter *);
    107 
    108 static void     ixv_initialize_transmit_units(struct adapter *);
    109 static void     ixv_initialize_receive_units(struct adapter *);
    110 static void     ixv_initialize_rss_mapping(struct adapter *);
    111 static void     ixv_check_link(struct adapter *);
    112 
    113 static void     ixv_enable_intr(struct adapter *);
    114 static void     ixv_disable_intr(struct adapter *);
    115 static void     ixv_set_multi(struct adapter *);
    116 static void     ixv_update_link_status(struct adapter *);
    117 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    118 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    119 static void	ixv_configure_ivars(struct adapter *);
    120 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    121 
    122 static void	ixv_setup_vlan_support(struct adapter *);
    123 #if 0
    124 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    125 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    126 #endif
    127 
    128 static void	ixv_add_device_sysctls(struct adapter *);
    129 static void	ixv_save_stats(struct adapter *);
    130 static void	ixv_init_stats(struct adapter *);
    131 static void	ixv_update_stats(struct adapter *);
    132 static void	ixv_add_stats_sysctls(struct adapter *);
    133 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    134 		    const char *, int *, int);
    135 
    136 /* The MSI-X Interrupt handlers */
    137 static int	ixv_msix_que(void *);
    138 static int	ixv_msix_mbx(void *);
    139 
    140 /* Deferred interrupt tasklets */
    141 static void	ixv_handle_que(void *);
    142 static void     ixv_handle_link(void *);
    143 
    144 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    145 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    146 
    147 /************************************************************************
    148  * FreeBSD Device Interface Entry Points
    149  ************************************************************************/
    150 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    151     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    152     DVF_DETACH_SHUTDOWN);
    153 
    154 #if 0
    155 static driver_t ixv_driver = {
    156 	"ixv", ixv_methods, sizeof(struct adapter),
    157 };
    158 
    159 devclass_t ixv_devclass;
    160 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    161 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    162 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    163 #endif
    164 
    165 /*
    166  * TUNEABLE PARAMETERS:
    167  */
    168 
    169 /* Number of Queues - do not exceed MSI-X vectors - 1 */
    170 static int ixv_num_queues = 0;
    171 #define	TUNABLE_INT(__x, __y)
    172 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    173 
    174 /*
    175  * AIM: Adaptive Interrupt Moderation
    176  * which means that the interrupt rate
    177  * is varied over time based on the
    178  * traffic for that interrupt vector
    179  */
    180 static bool ixv_enable_aim = false;
    181 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    182 
    183 /* How many packets rxeof tries to clean at a time */
    184 static int ixv_rx_process_limit = 256;
    185 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    186 
    187 /* How many packets txeof tries to clean at a time */
    188 static int ixv_tx_process_limit = 256;
    189 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    190 
    191 /*
    192  * Number of TX descriptors per ring,
    193  * setting higher than RX as this seems
    194  * the better performing choice.
    195  */
    196 static int ixv_txd = PERFORM_TXD;
    197 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    198 
    199 /* Number of RX descriptors per ring */
    200 static int ixv_rxd = PERFORM_RXD;
    201 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    202 
    203 /* Legacy Transmit (single queue) */
    204 static int ixv_enable_legacy_tx = 0;
    205 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
    206 
    207 /*
    208  * Shadow VFTA table, this is needed because
    209  * the real filter table gets cleared during
    210  * a soft reset and we need to repopulate it.
    211  */
    212 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
    213 
    214 #ifdef NET_MPSAFE
    215 #define IXGBE_MPSAFE		1
    216 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    217 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    218 #else
    219 #define IXGBE_CALLOUT_FLAGS	0
    220 #define IXGBE_SOFTINFT_FLAGS	0
    221 #endif
    222 
    223 #if 0
    224 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
    225 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
    226 #endif
    227 
    228 /************************************************************************
    229  * ixv_probe - Device identification routine
    230  *
    231  *   Determines if the driver should be loaded on
    232  *   adapter based on its PCI vendor/device ID.
    233  *
    234  *   return BUS_PROBE_DEFAULT on success, positive on failure
    235  ************************************************************************/
    236 static int
    237 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    238 {
    239 #ifdef __HAVE_PCI_MSI_MSIX
    240 	const struct pci_attach_args *pa = aux;
    241 
    242 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    243 #else
    244 	return 0;
    245 #endif
    246 } /* ixv_probe */
    247 
    248 static ixgbe_vendor_info_t *
    249 ixv_lookup(const struct pci_attach_args *pa)
    250 {
    251 	ixgbe_vendor_info_t *ent;
    252 	pcireg_t subid;
    253 
    254 	INIT_DEBUGOUT("ixv_lookup: begin");
    255 
    256 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    257 		return NULL;
    258 
    259 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    260 
    261 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    262 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    263 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    264 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    265 		     (ent->subvendor_id == 0)) &&
    266 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    267 		     (ent->subdevice_id == 0))) {
    268 			return ent;
    269 		}
    270 	}
    271 
    272 	return NULL;
    273 }
    274 
    275 /************************************************************************
    276  * ixv_attach - Device initialization routine
    277  *
    278  *   Called when the driver is being loaded.
    279  *   Identifies the type of hardware, allocates all resources
    280  *   and initializes the hardware.
    281  *
    282  *   return 0 on success, positive on failure
    283  ************************************************************************/
    284 static void
    285 ixv_attach(device_t parent, device_t dev, void *aux)
    286 {
    287 	struct adapter *adapter;
    288 	struct ixgbe_hw *hw;
    289 	int             error = 0;
    290 	pcireg_t	id, subid;
    291 	ixgbe_vendor_info_t *ent;
    292 	const struct pci_attach_args *pa = aux;
    293 	const char *apivstr;
    294 	char buf[256];
    295 
    296 	INIT_DEBUGOUT("ixv_attach: begin");
    297 
    298 	/*
    299 	 * Make sure BUSMASTER is set, on a VM under
    300 	 * KVM it may not be and will break things.
    301 	 */
    302 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    303 
    304 	/* Allocate, clear, and link in our adapter structure */
    305 	adapter = device_private(dev);
    306 	adapter->dev = dev;
    307 	adapter->hw.back = adapter;
    308 	hw = &adapter->hw;
    309 
    310 	adapter->init_locked = ixv_init_locked;
    311 	adapter->stop_locked = ixv_stop;
    312 
    313 	adapter->osdep.pc = pa->pa_pc;
    314 	adapter->osdep.tag = pa->pa_tag;
    315 	if (pci_dma64_available(pa))
    316 		adapter->osdep.dmat = pa->pa_dmat64;
    317 	else
    318 		adapter->osdep.dmat = pa->pa_dmat;
    319 	adapter->osdep.attached = false;
    320 
    321 	ent = ixv_lookup(pa);
    322 
    323 	KASSERT(ent != NULL);
    324 
    325 	aprint_normal(": %s, Version - %s\n",
    326 	    ixv_strings[ent->index], ixv_driver_version);
    327 
    328 	/* Core Lock Init*/
    329 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    330 
    331 	/* Do base PCI setup - map BAR0 */
    332 	if (ixv_allocate_pci_resources(adapter, pa)) {
    333 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    334 		error = ENXIO;
    335 		goto err_out;
    336 	}
    337 
    338 	/* SYSCTL APIs */
    339 	ixv_add_device_sysctls(adapter);
    340 
    341 	/* Set up the timer callout */
    342 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    343 
    344 	/* Save off the information about this board */
    345 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    346 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    347 	hw->vendor_id = PCI_VENDOR(id);
    348 	hw->device_id = PCI_PRODUCT(id);
    349 	hw->revision_id =
    350 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    351 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    352 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    353 
    354 	/* A subset of set_mac_type */
    355 	switch (hw->device_id) {
    356 	case IXGBE_DEV_ID_82599_VF:
    357 		hw->mac.type = ixgbe_mac_82599_vf;
    358 		break;
    359 	case IXGBE_DEV_ID_X540_VF:
    360 		hw->mac.type = ixgbe_mac_X540_vf;
    361 		break;
    362 	case IXGBE_DEV_ID_X550_VF:
    363 		hw->mac.type = ixgbe_mac_X550_vf;
    364 		break;
    365 	case IXGBE_DEV_ID_X550EM_X_VF:
    366 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
    367 		break;
    368 	case IXGBE_DEV_ID_X550EM_A_VF:
    369 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
    370 		break;
    371 	default:
    372 		/* Shouldn't get here since probe succeeded */
    373 		aprint_error_dev(dev, "Unknown device ID!\n");
    374 		error = ENXIO;
    375 		goto err_out;
    376 		break;
    377 	}
    378 
    379 	ixv_init_device_features(adapter);
    380 
    381 	/* Initialize the shared code */
    382 	error = ixgbe_init_ops_vf(hw);
    383 	if (error) {
    384 		aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
    385 		error = EIO;
    386 		goto err_out;
    387 	}
    388 
    389 	/* Setup the mailbox */
    390 	ixgbe_init_mbx_params_vf(hw);
    391 
    392 	/* Set the right number of segments */
    393 	adapter->num_segs = IXGBE_82599_SCATTER;
    394 
    395 	/* Reset mbox api to 1.0 */
    396 	error = hw->mac.ops.reset_hw(hw);
    397 	if (error == IXGBE_ERR_RESET_FAILED)
    398 		aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
    399 	else if (error)
    400 		aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
    401 		    error);
    402 	if (error) {
    403 		error = EIO;
    404 		goto err_out;
    405 	}
    406 
    407 	error = hw->mac.ops.init_hw(hw);
    408 	if (error) {
    409 		aprint_error_dev(dev, "...init_hw() failed!\n");
    410 		error = EIO;
    411 		goto err_out;
    412 	}
    413 
    414 	/* Negotiate mailbox API version */
    415 	error = ixv_negotiate_api(adapter);
    416 	if (error)
    417 		aprint_normal_dev(dev,
    418 		    "MBX API negotiation failed during attach!\n");
    419 	switch (hw->api_version) {
    420 	case ixgbe_mbox_api_10:
    421 		apivstr = "1.0";
    422 		break;
    423 	case ixgbe_mbox_api_20:
    424 		apivstr = "2.0";
    425 		break;
    426 	case ixgbe_mbox_api_11:
    427 		apivstr = "1.1";
    428 		break;
    429 	case ixgbe_mbox_api_12:
    430 		apivstr = "1.2";
    431 		break;
    432 	case ixgbe_mbox_api_13:
    433 		apivstr = "1.3";
    434 		break;
    435 	default:
    436 		apivstr = "unknown";
    437 		break;
    438 	}
    439 	aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
    440 
    441 	/* If no mac address was assigned, make a random one */
    442 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    443 		u8 addr[ETHER_ADDR_LEN];
    444 		uint64_t rndval = cprng_strong64();
    445 
    446 		memcpy(addr, &rndval, sizeof(addr));
    447 		addr[0] &= 0xFE;
    448 		addr[0] |= 0x02;
    449 		bcopy(addr, hw->mac.addr, sizeof(addr));
    450 	}
    451 
    452 	/* Register for VLAN events */
    453 #if 0 /* XXX delete after write? */
    454 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    455 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    456 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    457 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    458 #endif
    459 
    460 	/* Sysctls for limiting the amount of work done in the taskqueues */
    461 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    462 	    "max number of rx packets to process",
    463 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    464 
    465 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    466 	    "max number of tx packets to process",
    467 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    468 
    469 	/* Do descriptor calc and sanity checks */
    470 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    471 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    472 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    473 		adapter->num_tx_desc = DEFAULT_TXD;
    474 	} else
    475 		adapter->num_tx_desc = ixv_txd;
    476 
    477 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    478 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    479 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    480 		adapter->num_rx_desc = DEFAULT_RXD;
    481 	} else
    482 		adapter->num_rx_desc = ixv_rxd;
    483 
    484 	/* Setup MSI-X */
    485 	error = ixv_configure_interrupts(adapter);
    486 	if (error)
    487 		goto err_out;
    488 
    489 	/* Allocate our TX/RX Queues */
    490 	if (ixgbe_allocate_queues(adapter)) {
    491 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    492 		error = ENOMEM;
    493 		goto err_out;
    494 	}
    495 
    496 	/* hw.ix defaults init */
    497 	adapter->enable_aim = ixv_enable_aim;
    498 
    499 	/* Setup OS specific network interface */
    500 	ixv_setup_interface(dev, adapter);
    501 
    502 	error = ixv_allocate_msix(adapter, pa);
    503 	if (error) {
    504 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    505 		goto err_late;
    506 	}
    507 
    508 	/* Do the stats setup */
    509 	ixv_save_stats(adapter);
    510 	ixv_init_stats(adapter);
    511 	ixv_add_stats_sysctls(adapter);
    512 
    513 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    514 		ixgbe_netmap_attach(adapter);
    515 
    516 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
    517 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
    518 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
    519 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
    520 
    521 	INIT_DEBUGOUT("ixv_attach: end");
    522 	adapter->osdep.attached = true;
    523 
    524 	return;
    525 
    526 err_late:
    527 	ixgbe_free_transmit_structures(adapter);
    528 	ixgbe_free_receive_structures(adapter);
    529 	free(adapter->queues, M_DEVBUF);
    530 err_out:
    531 	ixv_free_pci_resources(adapter);
    532 	IXGBE_CORE_LOCK_DESTROY(adapter);
    533 
    534 	return;
    535 } /* ixv_attach */
    536 
    537 /************************************************************************
    538  * ixv_detach - Device removal routine
    539  *
    540  *   Called when the driver is being removed.
    541  *   Stops the adapter and deallocates all the resources
    542  *   that were allocated for driver operation.
    543  *
    544  *   return 0 on success, positive on failure
    545  ************************************************************************/
    546 static int
    547 ixv_detach(device_t dev, int flags)
    548 {
    549 	struct adapter  *adapter = device_private(dev);
    550 	struct ix_queue *que = adapter->queues;
    551 	struct tx_ring *txr = adapter->tx_rings;
    552 	struct rx_ring *rxr = adapter->rx_rings;
    553 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
    554 
    555 	INIT_DEBUGOUT("ixv_detach: begin");
    556 	if (adapter->osdep.attached == false)
    557 		return 0;
    558 
    559 	/* Stop the interface. Callouts are stopped in it. */
    560 	ixv_ifstop(adapter->ifp, 1);
    561 
    562 #if NVLAN > 0
    563 	/* Make sure VLANs are not using driver */
    564 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    565 		;	/* nothing to do: no VLANs */
    566 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    567 		vlan_ifdetach(adapter->ifp);
    568 	else {
    569 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    570 		return EBUSY;
    571 	}
    572 #endif
    573 
    574 	IXGBE_CORE_LOCK(adapter);
    575 	ixv_stop(adapter);
    576 	IXGBE_CORE_UNLOCK(adapter);
    577 
    578 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
    579 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
    580 			softint_disestablish(txr->txr_si);
    581 		softint_disestablish(que->que_si);
    582 	}
    583 
    584 	/* Drain the Mailbox(link) queue */
    585 	softint_disestablish(adapter->link_si);
    586 
    587 	/* Unregister VLAN events */
    588 #if 0 /* XXX msaitoh delete after write? */
    589 	if (adapter->vlan_attach != NULL)
    590 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    591 	if (adapter->vlan_detach != NULL)
    592 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    593 #endif
    594 
    595 	ether_ifdetach(adapter->ifp);
    596 	callout_halt(&adapter->timer, NULL);
    597 
    598 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    599 		netmap_detach(adapter->ifp);
    600 
    601 	ixv_free_pci_resources(adapter);
    602 #if 0 /* XXX the NetBSD port is probably missing something here */
    603 	bus_generic_detach(dev);
    604 #endif
    605 	if_detach(adapter->ifp);
    606 	if_percpuq_destroy(adapter->ipq);
    607 
    608 	sysctl_teardown(&adapter->sysctllog);
    609 	evcnt_detach(&adapter->handleq);
    610 	evcnt_detach(&adapter->req);
    611 	evcnt_detach(&adapter->efbig_tx_dma_setup);
    612 	evcnt_detach(&adapter->mbuf_defrag_failed);
    613 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
    614 	evcnt_detach(&adapter->einval_tx_dma_setup);
    615 	evcnt_detach(&adapter->other_tx_dma_setup);
    616 	evcnt_detach(&adapter->eagain_tx_dma_setup);
    617 	evcnt_detach(&adapter->enomem_tx_dma_setup);
    618 	evcnt_detach(&adapter->watchdog_events);
    619 	evcnt_detach(&adapter->tso_err);
    620 	evcnt_detach(&adapter->link_irq);
    621 
    622 	txr = adapter->tx_rings;
    623 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
    624 		evcnt_detach(&adapter->queues[i].irqs);
    625 		evcnt_detach(&txr->no_desc_avail);
    626 		evcnt_detach(&txr->total_packets);
    627 		evcnt_detach(&txr->tso_tx);
    628 #ifndef IXGBE_LEGACY_TX
    629 		evcnt_detach(&txr->pcq_drops);
    630 #endif
    631 
    632 		evcnt_detach(&rxr->rx_packets);
    633 		evcnt_detach(&rxr->rx_bytes);
    634 		evcnt_detach(&rxr->rx_copies);
    635 		evcnt_detach(&rxr->no_jmbuf);
    636 		evcnt_detach(&rxr->rx_discarded);
    637 	}
    638 	evcnt_detach(&stats->ipcs);
    639 	evcnt_detach(&stats->l4cs);
    640 	evcnt_detach(&stats->ipcs_bad);
    641 	evcnt_detach(&stats->l4cs_bad);
    642 
    643 	/* Packet Reception Stats */
    644 	evcnt_detach(&stats->vfgorc);
    645 	evcnt_detach(&stats->vfgprc);
    646 	evcnt_detach(&stats->vfmprc);
    647 
    648 	/* Packet Transmission Stats */
    649 	evcnt_detach(&stats->vfgotc);
    650 	evcnt_detach(&stats->vfgptc);
    651 
    652 	ixgbe_free_transmit_structures(adapter);
    653 	ixgbe_free_receive_structures(adapter);
    654 	free(adapter->queues, M_DEVBUF);
    655 
    656 	IXGBE_CORE_LOCK_DESTROY(adapter);
    657 
    658 	return (0);
    659 } /* ixv_detach */
    660 
    661 /************************************************************************
    662  * ixv_init_locked - Init entry point
    663  *
    664  *   Used in two ways: It is used by the stack as an init entry
    665  *   point in network interface structure. It is also used
    666  *   by the driver as a hw/sw initialization routine to get
    667  *   to a consistent state.
    668  *
    669  *   return 0 on success, positive on failure
    670  ************************************************************************/
    671 static void
    672 ixv_init_locked(struct adapter *adapter)
    673 {
    674 	struct ifnet	*ifp = adapter->ifp;
    675 	device_t 	dev = adapter->dev;
    676 	struct ixgbe_hw *hw = &adapter->hw;
    677 	int             error = 0;
    678 
    679 	INIT_DEBUGOUT("ixv_init_locked: begin");
    680 	KASSERT(mutex_owned(&adapter->core_mtx));
    681 	hw->adapter_stopped = FALSE;
    682 	hw->mac.ops.stop_adapter(hw);
    683 	callout_stop(&adapter->timer);
    684 
    685 	/* reprogram the RAR[0] in case user changed it. */
    686 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    687 
    688 	/* Get the latest mac address, User can use a LAA */
    689 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    690 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    691 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
    692 
    693 	/* Prepare transmit descriptors and buffers */
    694 	if (ixgbe_setup_transmit_structures(adapter)) {
    695 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    696 		ixv_stop(adapter);
    697 		return;
    698 	}
    699 
    700 	/* Reset VF and renegotiate mailbox API version */
    701 	hw->mac.ops.reset_hw(hw);
    702 	error = ixv_negotiate_api(adapter);
    703 	if (error)
    704 		device_printf(dev,
    705 		    "Mailbox API negotiation failed in init_locked!\n");
    706 
    707 	ixv_initialize_transmit_units(adapter);
    708 
    709 	/* Setup Multicast table */
    710 	ixv_set_multi(adapter);
    711 
    712 	/*
    713 	 * Determine the correct mbuf pool
    714 	 * for doing jumbo/headersplit
    715 	 */
    716 	if (ifp->if_mtu > ETHERMTU)
    717 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    718 	else
    719 		adapter->rx_mbuf_sz = MCLBYTES;
    720 
    721 	/* Prepare receive descriptors and buffers */
    722 	if (ixgbe_setup_receive_structures(adapter)) {
    723 		device_printf(dev, "Could not setup receive structures\n");
    724 		ixv_stop(adapter);
    725 		return;
    726 	}
    727 
    728 	/* Configure RX settings */
    729 	ixv_initialize_receive_units(adapter);
    730 
    731 #if 0 /* XXX isn't it required? -- msaitoh  */
    732 	/* Set the various hardware offload abilities */
    733 	ifp->if_hwassist = 0;
    734 	if (ifp->if_capenable & IFCAP_TSO4)
    735 		ifp->if_hwassist |= CSUM_TSO;
    736 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    737 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    738 #if __FreeBSD_version >= 800000
    739 		ifp->if_hwassist |= CSUM_SCTP;
    740 #endif
    741 	}
    742 #endif
    743 
    744 	/* Set up VLAN offload and filter */
    745 	ixv_setup_vlan_support(adapter);
    746 
    747 	/* Set up MSI-X routing */
    748 	ixv_configure_ivars(adapter);
    749 
    750 	/* Set up auto-mask */
    751 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    752 
    753 	/* Set moderation on the Link interrupt */
    754 	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
    755 
    756 	/* Stats init */
    757 	ixv_init_stats(adapter);
    758 
    759 	/* Config/Enable Link */
    760 	hw->mac.get_link_status = TRUE;
    761 	hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
    762 	    FALSE);
    763 
    764 	/* Start watchdog */
    765 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    766 
    767 	/* And now turn on interrupts */
    768 	ixv_enable_intr(adapter);
    769 
    770 	/* Now inform the stack we're ready */
    771 	ifp->if_flags |= IFF_RUNNING;
    772 	ifp->if_flags &= ~IFF_OACTIVE;
    773 
    774 	return;
    775 } /* ixv_init_locked */
    776 
    777 /*
    778  * MSI-X Interrupt Handlers and Tasklets
    779  */
    780 
    781 static inline void
    782 ixv_enable_queue(struct adapter *adapter, u32 vector)
    783 {
    784 	struct ixgbe_hw *hw = &adapter->hw;
    785 	u32             queue = 1 << vector;
    786 	u32             mask;
    787 
    788 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    789 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    790 } /* ixv_enable_queue */
    791 
    792 static inline void
    793 ixv_disable_queue(struct adapter *adapter, u32 vector)
    794 {
    795 	struct ixgbe_hw *hw = &adapter->hw;
    796 	u64             queue = (u64)(1 << vector);
    797 	u32             mask;
    798 
    799 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    800 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    801 } /* ixv_disable_queue */
    802 
    803 static inline void
    804 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    805 {
    806 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    807 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    808 } /* ixv_rearm_queues */
    809 
    810 
    811 /************************************************************************
    812  * ixv_msix_que - MSI Queue Interrupt Service routine
    813  ************************************************************************/
    814 static int
    815 ixv_msix_que(void *arg)
    816 {
    817 	struct ix_queue	*que = arg;
    818 	struct adapter  *adapter = que->adapter;
    819 	struct tx_ring	*txr = que->txr;
    820 	struct rx_ring	*rxr = que->rxr;
    821 	bool		more;
    822 	u32		newitr = 0;
    823 
    824 	ixv_disable_queue(adapter, que->msix);
    825 	++que->irqs.ev_count;
    826 
    827 #ifdef __NetBSD__
    828 	/* Don't run ixgbe_rxeof in interrupt context */
    829 	more = true;
    830 #else
    831 	more = ixgbe_rxeof(que);
    832 #endif
    833 
    834 	IXGBE_TX_LOCK(txr);
    835 	ixgbe_txeof(txr);
    836 	IXGBE_TX_UNLOCK(txr);
    837 
    838 	/* Do AIM now? */
    839 
    840 	if (adapter->enable_aim == false)
    841 		goto no_calc;
    842 	/*
    843 	 * Do Adaptive Interrupt Moderation:
    844 	 *  - Write out last calculated setting
    845 	 *  - Calculate based on average size over
    846 	 *    the last interval.
    847 	 */
    848 	if (que->eitr_setting)
    849 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
    850 		    que->eitr_setting);
    851 
    852 	que->eitr_setting = 0;
    853 
    854 	/* Idle, do nothing */
    855 	if ((txr->bytes == 0) && (rxr->bytes == 0))
    856 		goto no_calc;
    857 
    858 	if ((txr->bytes) && (txr->packets))
    859 		newitr = txr->bytes/txr->packets;
    860 	if ((rxr->bytes) && (rxr->packets))
    861 		newitr = max(newitr, (rxr->bytes / rxr->packets));
    862 	newitr += 24; /* account for hardware frame, crc */
    863 
    864 	/* set an upper boundary */
    865 	newitr = min(newitr, 3000);
    866 
    867 	/* Be nice to the mid range */
    868 	if ((newitr > 300) && (newitr < 1200))
    869 		newitr = (newitr / 3);
    870 	else
    871 		newitr = (newitr / 2);
    872 
    873 	newitr |= newitr << 16;
    874 
    875 	/* save for next interrupt */
    876 	que->eitr_setting = newitr;
    877 
    878 	/* Reset state */
    879 	txr->bytes = 0;
    880 	txr->packets = 0;
    881 	rxr->bytes = 0;
    882 	rxr->packets = 0;
    883 
    884 no_calc:
    885 	if (more)
    886 		softint_schedule(que->que_si);
    887 	else /* Re-enable this interrupt */
    888 		ixv_enable_queue(adapter, que->msix);
    889 
    890 	return 1;
    891 } /* ixv_msix_que */
    892 
    893 /************************************************************************
    894  * ixv_msix_mbx
    895  ************************************************************************/
    896 static int
    897 ixv_msix_mbx(void *arg)
    898 {
    899 	struct adapter	*adapter = arg;
    900 	struct ixgbe_hw *hw = &adapter->hw;
    901 	u32		reg;
    902 
    903 	++adapter->link_irq.ev_count;
    904 
    905 	/* First get the cause */
    906 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
    907 	/* Clear interrupt with write */
    908 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
    909 
    910 	/* Link status change */
    911 	if (reg & IXGBE_EICR_LSC)
    912 		softint_schedule(adapter->link_si);
    913 
    914 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
    915 
    916 	return 1;
    917 } /* ixv_msix_mbx */
    918 
    919 /************************************************************************
    920  * ixv_media_status - Media Ioctl callback
    921  *
    922  *   Called whenever the user queries the status of
    923  *   the interface using ifconfig.
    924  ************************************************************************/
    925 static void
    926 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
    927 {
    928 	struct adapter *adapter = ifp->if_softc;
    929 
    930 	INIT_DEBUGOUT("ixv_media_status: begin");
    931 	IXGBE_CORE_LOCK(adapter);
    932 	ixv_update_link_status(adapter);
    933 
    934 	ifmr->ifm_status = IFM_AVALID;
    935 	ifmr->ifm_active = IFM_ETHER;
    936 
    937 	if (!adapter->link_active) {
    938 		ifmr->ifm_active |= IFM_NONE;
    939 		IXGBE_CORE_UNLOCK(adapter);
    940 		return;
    941 	}
    942 
    943 	ifmr->ifm_status |= IFM_ACTIVE;
    944 
    945 	switch (adapter->link_speed) {
    946 		case IXGBE_LINK_SPEED_10GB_FULL:
    947 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
    948 			break;
    949 		case IXGBE_LINK_SPEED_1GB_FULL:
    950 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
    951 			break;
    952 		case IXGBE_LINK_SPEED_100_FULL:
    953 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
    954 			break;
    955 		case IXGBE_LINK_SPEED_10_FULL:
    956 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
    957 			break;
    958 	}
    959 
    960 	IXGBE_CORE_UNLOCK(adapter);
    961 
    962 	return;
    963 } /* ixv_media_status */
    964 
    965 /************************************************************************
    966  * ixv_media_change - Media Ioctl callback
    967  *
    968  *   Called when the user changes speed/duplex using
    969  *   media/mediopt option with ifconfig.
    970  ************************************************************************/
    971 static int
    972 ixv_media_change(struct ifnet *ifp)
    973 {
    974 	struct adapter *adapter = ifp->if_softc;
    975 	struct ifmedia *ifm = &adapter->media;
    976 
    977 	INIT_DEBUGOUT("ixv_media_change: begin");
    978 
    979 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    980 		return (EINVAL);
    981 
    982 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
    983 	case IFM_AUTO:
    984 		break;
    985 	default:
    986 		device_printf(adapter->dev, "Only auto media type\n");
    987 		return (EINVAL);
    988 	}
    989 
    990 	return (0);
    991 } /* ixv_media_change */
    992 
    993 
    994 /************************************************************************
    995  * ixv_negotiate_api
    996  *
    997  *   Negotiate the Mailbox API with the PF;
    998  *   start with the most featured API first.
    999  ************************************************************************/
   1000 static int
   1001 ixv_negotiate_api(struct adapter *adapter)
   1002 {
   1003 	struct ixgbe_hw *hw = &adapter->hw;
   1004 	int             mbx_api[] = { ixgbe_mbox_api_11,
   1005 	                              ixgbe_mbox_api_10,
   1006 	                              ixgbe_mbox_api_unknown };
   1007 	int             i = 0;
   1008 
   1009 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
   1010 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
   1011 			return (0);
   1012 		i++;
   1013 	}
   1014 
   1015 	return (EINVAL);
   1016 } /* ixv_negotiate_api */
   1017 
   1018 
   1019 /************************************************************************
   1020  * ixv_set_multi - Multicast Update
   1021  *
   1022  *   Called whenever multicast address list is updated.
   1023  ************************************************************************/
   1024 static void
   1025 ixv_set_multi(struct adapter *adapter)
   1026 {
   1027 	struct ether_multi *enm;
   1028 	struct ether_multistep step;
   1029 	struct ethercom *ec = &adapter->osdep.ec;
   1030 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1031 	u8                 *update_ptr;
   1032 	int                mcnt = 0;
   1033 
   1034 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1035 
   1036 	ETHER_FIRST_MULTI(step, ec, enm);
   1037 	while (enm != NULL) {
   1038 		bcopy(enm->enm_addrlo,
   1039 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1040 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1041 		mcnt++;
   1042 		/* XXX This might be required --msaitoh */
   1043 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1044 			break;
   1045 		ETHER_NEXT_MULTI(step, enm);
   1046 	}
   1047 
   1048 	update_ptr = mta;
   1049 
   1050 	adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   1051 	    ixv_mc_array_itr, TRUE);
   1052 
   1053 	return;
   1054 } /* ixv_set_multi */
   1055 
   1056 /************************************************************************
   1057  * ixv_mc_array_itr
   1058  *
   1059  *   An iterator function needed by the multicast shared code.
   1060  *   It feeds the shared code routine the addresses in the
   1061  *   array of ixv_set_multi() one by one.
   1062  ************************************************************************/
   1063 static u8 *
   1064 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1065 {
   1066 	u8 *addr = *update_ptr;
   1067 	u8 *newptr;
   1068 	*vmdq = 0;
   1069 
   1070 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1071 	*update_ptr = newptr;
   1072 
   1073 	return addr;
   1074 } /* ixv_mc_array_itr */
   1075 
   1076 /************************************************************************
   1077  * ixv_local_timer - Timer routine
   1078  *
   1079  *   Checks for link status, updates statistics,
   1080  *   and runs the watchdog check.
   1081  ************************************************************************/
   1082 static void
   1083 ixv_local_timer(void *arg)
   1084 {
   1085 	struct adapter *adapter = arg;
   1086 
   1087 	IXGBE_CORE_LOCK(adapter);
   1088 	ixv_local_timer_locked(adapter);
   1089 	IXGBE_CORE_UNLOCK(adapter);
   1090 }
   1091 
   1092 static void
   1093 ixv_local_timer_locked(void *arg)
   1094 {
   1095 	struct adapter	*adapter = arg;
   1096 	device_t	dev = adapter->dev;
   1097 	struct ix_queue	*que = adapter->queues;
   1098 	u64		queues = 0;
   1099 	int		hung = 0;
   1100 
   1101 	KASSERT(mutex_owned(&adapter->core_mtx));
   1102 
   1103 	ixv_check_link(adapter);
   1104 
   1105 	/* Stats Update */
   1106 	ixv_update_stats(adapter);
   1107 
   1108 	/*
   1109 	 * Check the TX queues status
   1110 	 *      - mark hung queues so we don't schedule on them
   1111 	 *      - watchdog only if all queues show hung
   1112 	 */
   1113 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1114 		/* Keep track of queues with work for soft irq */
   1115 		if (que->txr->busy)
   1116 			queues |= ((u64)1 << que->me);
   1117 		/*
   1118 		 * Each time txeof runs without cleaning, but there
   1119 		 * are uncleaned descriptors it increments busy. If
   1120 		 * we get to the MAX we declare it hung.
   1121 		 */
   1122 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1123 			++hung;
   1124 			/* Mark the queue as inactive */
   1125 			adapter->active_queues &= ~((u64)1 << que->me);
   1126 			continue;
   1127 		} else {
   1128 			/* Check if we've come back from hung */
   1129 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1130 				adapter->active_queues |= ((u64)1 << que->me);
   1131 		}
   1132 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1133 			device_printf(dev,
   1134 			    "Warning queue %d appears to be hung!\n", i);
   1135 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1136 			++hung;
   1137 		}
   1138 	}
   1139 
   1140 	/* Only truly watchdog if all queues show hung */
   1141 	if (hung == adapter->num_queues)
   1142 		goto watchdog;
   1143 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1144 		ixv_rearm_queues(adapter, queues);
   1145 	}
   1146 
   1147 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1148 
   1149 	return;
   1150 
   1151 watchdog:
   1152 
   1153 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1154 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1155 	adapter->watchdog_events.ev_count++;
   1156 	ixv_init_locked(adapter);
   1157 } /* ixv_local_timer */
   1158 
   1159 /************************************************************************
   1160  * ixv_update_link_status - Update OS on link state
   1161  *
   1162  * Note: Only updates the OS on the cached link state.
   1163  *       The real check of the hardware only happens with
   1164  *       a link interrupt.
   1165  ************************************************************************/
   1166 static void
   1167 ixv_update_link_status(struct adapter *adapter)
   1168 {
   1169 	struct ifnet *ifp = adapter->ifp;
   1170 	device_t     dev = adapter->dev;
   1171 
   1172 	if (adapter->link_up) {
   1173 		if (adapter->link_active == FALSE) {
   1174 			if (bootverbose) {
   1175 				const char *bpsmsg;
   1176 
   1177 				switch (adapter->link_speed) {
   1178 				case IXGBE_LINK_SPEED_10GB_FULL:
   1179 					bpsmsg = "10 Gbps";
   1180 					break;
   1181 				case IXGBE_LINK_SPEED_5GB_FULL:
   1182 					bpsmsg = "5 Gbps";
   1183 					break;
   1184 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   1185 					bpsmsg = "2.5 Gbps";
   1186 					break;
   1187 				case IXGBE_LINK_SPEED_1GB_FULL:
   1188 					bpsmsg = "1 Gbps";
   1189 					break;
   1190 				case IXGBE_LINK_SPEED_100_FULL:
   1191 					bpsmsg = "100 Mbps";
   1192 					break;
   1193 				case IXGBE_LINK_SPEED_10_FULL:
   1194 					bpsmsg = "10 Mbps";
   1195 					break;
   1196 				default:
   1197 					bpsmsg = "unknown speed";
   1198 					break;
   1199 				}
   1200 				device_printf(dev, "Link is up %s %s \n",
   1201 				    bpsmsg, "Full Duplex");
   1202 			}
   1203 			adapter->link_active = TRUE;
   1204 			if_link_state_change(ifp, LINK_STATE_UP);
   1205 		}
   1206 	} else { /* Link down */
   1207 		if (adapter->link_active == TRUE) {
   1208 			if (bootverbose)
   1209 				device_printf(dev, "Link is Down\n");
   1210 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1211 			adapter->link_active = FALSE;
   1212 		}
   1213 	}
   1214 
   1215 	return;
   1216 } /* ixv_update_link_status */
   1217 
   1218 
   1219 /************************************************************************
   1220  * ixv_stop - Stop the hardware
   1221  *
   1222  *   Disables all traffic on the adapter by issuing a
   1223  *   global reset on the MAC and deallocates TX/RX buffers.
   1224  ************************************************************************/
   1225 static void
   1226 ixv_ifstop(struct ifnet *ifp, int disable)
   1227 {
   1228 	struct adapter *adapter = ifp->if_softc;
   1229 
   1230 	IXGBE_CORE_LOCK(adapter);
   1231 	ixv_stop(adapter);
   1232 	IXGBE_CORE_UNLOCK(adapter);
   1233 }
   1234 
   1235 static void
   1236 ixv_stop(void *arg)
   1237 {
   1238 	struct ifnet    *ifp;
   1239 	struct adapter  *adapter = arg;
   1240 	struct ixgbe_hw *hw = &adapter->hw;
   1241 
   1242 	ifp = adapter->ifp;
   1243 
   1244 	KASSERT(mutex_owned(&adapter->core_mtx));
   1245 
   1246 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1247 	ixv_disable_intr(adapter);
   1248 
   1249 	/* Tell the stack that the interface is no longer active */
   1250 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1251 
   1252 	hw->mac.ops.reset_hw(hw);
   1253 	adapter->hw.adapter_stopped = FALSE;
   1254 	hw->mac.ops.stop_adapter(hw);
   1255 	callout_stop(&adapter->timer);
   1256 
   1257 	/* reprogram the RAR[0] in case user changed it. */
   1258 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1259 
   1260 	return;
   1261 } /* ixv_stop */
   1262 
   1263 
   1264 /************************************************************************
   1265  * ixv_allocate_pci_resources
   1266  ************************************************************************/
   1267 static int
   1268 ixv_allocate_pci_resources(struct adapter *adapter,
   1269     const struct pci_attach_args *pa)
   1270 {
   1271 	pcireg_t	memtype;
   1272 	device_t        dev = adapter->dev;
   1273 	bus_addr_t addr;
   1274 	int flags;
   1275 
   1276 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1277 	switch (memtype) {
   1278 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1279 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1280 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1281 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1282 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1283 			goto map_err;
   1284 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1285 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1286 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1287 		}
   1288 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1289 		     adapter->osdep.mem_size, flags,
   1290 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1291 map_err:
   1292 			adapter->osdep.mem_size = 0;
   1293 			aprint_error_dev(dev, "unable to map BAR0\n");
   1294 			return ENXIO;
   1295 		}
   1296 		break;
   1297 	default:
   1298 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1299 		return ENXIO;
   1300 	}
   1301 
   1302 	/* Pick up the tuneable queues */
   1303 	adapter->num_queues = ixv_num_queues;
   1304 
   1305 	return (0);
   1306 } /* ixv_allocate_pci_resources */
   1307 
   1308 /************************************************************************
   1309  * ixv_free_pci_resources
   1310  ************************************************************************/
   1311 static void
   1312 ixv_free_pci_resources(struct adapter * adapter)
   1313 {
   1314 	struct 		ix_queue *que = adapter->queues;
   1315 	int		rid;
   1316 
   1317 	/*
   1318 	 *  Release all msix queue resources:
   1319 	 */
   1320 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1321 		if (que->res != NULL)
   1322 			pci_intr_disestablish(adapter->osdep.pc,
   1323 			    adapter->osdep.ihs[i]);
   1324 	}
   1325 
   1326 
   1327 	/* Clean the Mailbox interrupt last */
   1328 	rid = adapter->vector;
   1329 
   1330 	if (adapter->osdep.ihs[rid] != NULL) {
   1331 		pci_intr_disestablish(adapter->osdep.pc,
   1332 		    adapter->osdep.ihs[rid]);
   1333 		adapter->osdep.ihs[rid] = NULL;
   1334 	}
   1335 
   1336 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1337 	    adapter->osdep.nintrs);
   1338 
   1339 	if (adapter->osdep.mem_size != 0) {
   1340 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1341 		    adapter->osdep.mem_bus_space_handle,
   1342 		    adapter->osdep.mem_size);
   1343 	}
   1344 
   1345 	return;
   1346 } /* ixv_free_pci_resources */
   1347 
   1348 /************************************************************************
   1349  * ixv_setup_interface
   1350  *
   1351  *   Setup networking device structure and register an interface.
   1352  ************************************************************************/
   1353 static void
   1354 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1355 {
   1356 	struct ethercom *ec = &adapter->osdep.ec;
   1357 	struct ifnet   *ifp;
   1358 
   1359 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1360 
   1361 	ifp = adapter->ifp = &ec->ec_if;
   1362 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1363 	ifp->if_baudrate = IF_Gbps(10);
   1364 	ifp->if_init = ixv_init;
   1365 	ifp->if_stop = ixv_ifstop;
   1366 	ifp->if_softc = adapter;
   1367 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1368 #ifdef IXGBE_MPSAFE
   1369 	ifp->if_extflags = IFEF_START_MPSAFE;
   1370 #endif
   1371 	ifp->if_ioctl = ixv_ioctl;
   1372 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1373 #if 0
   1374 		ixv_start_locked = ixgbe_legacy_start_locked;
   1375 #endif
   1376 	} else {
   1377 		ifp->if_transmit = ixgbe_mq_start;
   1378 #if 0
   1379 		ixv_start_locked = ixgbe_mq_start_locked;
   1380 #endif
   1381 	}
   1382 	ifp->if_start = ixgbe_legacy_start;
   1383 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1384 	IFQ_SET_READY(&ifp->if_snd);
   1385 
   1386 	if_initialize(ifp);
   1387 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1388 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1389 	/*
   1390 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1391 	 * used.
   1392 	 */
   1393 	if_register(ifp);
   1394 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1395 
   1396 	adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
   1397 
   1398 	/*
   1399 	 * Tell the upper layer(s) we support long frames.
   1400 	 */
   1401 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1402 
   1403 	/* Set capability flags */
   1404 	ifp->if_capabilities |= IFCAP_HWCSUM
   1405 	                     |  IFCAP_TSOv4
   1406 	                     |  IFCAP_TSOv6;
   1407 	ifp->if_capenable = 0;
   1408 
   1409 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1410 			    |  ETHERCAP_VLAN_HWCSUM
   1411 			    |  ETHERCAP_JUMBO_MTU
   1412 			    |  ETHERCAP_VLAN_MTU;
   1413 
   1414 	/* Enable the above capabilities by default */
   1415 	ec->ec_capenable = ec->ec_capabilities;
   1416 
   1417 	/* Don't enable LRO by default */
   1418 	ifp->if_capabilities |= IFCAP_LRO;
   1419 #if 0
   1420 	ifp->if_capenable = ifp->if_capabilities;
   1421 #endif
   1422 
   1423 	/*
   1424 	 * Specify the media types supported by this adapter and register
   1425 	 * callbacks to update media and link information
   1426 	 */
   1427 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1428 	    ixv_media_status);
   1429 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1430 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1431 
   1432 	return;
   1433 } /* ixv_setup_interface */
   1434 
   1435 
   1436 /************************************************************************
   1437  * ixv_initialize_transmit_units - Enable transmit unit.
   1438  ************************************************************************/
   1439 static void
   1440 ixv_initialize_transmit_units(struct adapter *adapter)
   1441 {
   1442 	struct tx_ring	*txr = adapter->tx_rings;
   1443 	struct ixgbe_hw	*hw = &adapter->hw;
   1444 
   1445 
   1446 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1447 		u64 tdba = txr->txdma.dma_paddr;
   1448 		u32 txctrl, txdctl;
   1449 
   1450 		/* Set WTHRESH to 8, burst writeback */
   1451 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1452 		txdctl |= (8 << 16);
   1453 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1454 
   1455 		/* Set the HW Tx Head and Tail indices */
   1456 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   1457 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   1458 
   1459 		/* Set Tx Tail register */
   1460 		txr->tail = IXGBE_VFTDT(i);
   1461 
   1462 		/* Set Ring parameters */
   1463 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   1464 		    (tdba & 0x00000000ffffffffULL));
   1465 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   1466 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   1467 		    adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
   1468 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   1469 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1470 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   1471 
   1472 		/* Now enable */
   1473 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1474 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1475 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1476 	}
   1477 
   1478 	return;
   1479 } /* ixv_initialize_transmit_units */
   1480 
   1481 
   1482 /************************************************************************
   1483  * ixv_initialize_rss_mapping
   1484  ************************************************************************/
   1485 static void
   1486 ixv_initialize_rss_mapping(struct adapter *adapter)
   1487 {
   1488 	struct ixgbe_hw *hw = &adapter->hw;
   1489 	u32             reta = 0, mrqc, rss_key[10];
   1490 	int             queue_id;
   1491 	int             i, j;
   1492 	u32             rss_hash_config;
   1493 
   1494 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1495 		/* Fetch the configured RSS key */
   1496 		rss_getkey((uint8_t *)&rss_key);
   1497 	} else {
   1498 		/* set up random bits */
   1499 		cprng_fast(&rss_key, sizeof(rss_key));
   1500 	}
   1501 
   1502 	/* Now fill out hash function seeds */
   1503 	for (i = 0; i < 10; i++)
   1504 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
   1505 
   1506 	/* Set up the redirection table */
   1507 	for (i = 0, j = 0; i < 64; i++, j++) {
   1508 		if (j == adapter->num_queues)
   1509 			j = 0;
   1510 
   1511 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1512 			/*
   1513 			 * Fetch the RSS bucket id for the given indirection
   1514 			 * entry. Cap it at the number of configured buckets
   1515 			 * (which is num_queues.)
   1516 			 */
   1517 			queue_id = rss_get_indirection_to_bucket(i);
   1518 			queue_id = queue_id % adapter->num_queues;
   1519 		} else
   1520 			queue_id = j;
   1521 
   1522 		/*
   1523 		 * The low 8 bits are for hash value (n+0);
   1524 		 * The next 8 bits are for hash value (n+1), etc.
   1525 		 */
   1526 		reta >>= 8;
   1527 		reta |= ((uint32_t)queue_id) << 24;
   1528 		if ((i & 3) == 3) {
   1529 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
   1530 			reta = 0;
   1531 		}
   1532 	}
   1533 
   1534 	/* Perform hash on these packet types */
   1535 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   1536 		rss_hash_config = rss_gethashconfig();
   1537 	else {
   1538 		/*
   1539 		 * Disable UDP - IP fragments aren't currently being handled
   1540 		 * and so we end up with a mix of 2-tuple and 4-tuple
   1541 		 * traffic.
   1542 		 */
   1543 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
   1544 		                | RSS_HASHTYPE_RSS_TCP_IPV4
   1545 		                | RSS_HASHTYPE_RSS_IPV6
   1546 		                | RSS_HASHTYPE_RSS_TCP_IPV6;
   1547 	}
   1548 
   1549 	mrqc = IXGBE_MRQC_RSSEN;
   1550 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
   1551 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
   1552 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
   1553 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
   1554 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
   1555 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
   1556 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
   1557 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
   1558 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
   1559 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
   1560 		    __func__);
   1561 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
   1562 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
   1563 		    __func__);
   1564 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
   1565 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
   1566 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
   1567 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
   1568 		    __func__);
   1569 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
   1570 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
   1571 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
   1572 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
   1573 		    __func__);
   1574 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
   1575 } /* ixv_initialize_rss_mapping */
   1576 
   1577 
   1578 /************************************************************************
   1579  * ixv_initialize_receive_units - Setup receive registers and features.
   1580  ************************************************************************/
   1581 static void
   1582 ixv_initialize_receive_units(struct adapter *adapter)
   1583 {
   1584 	struct	rx_ring	*rxr = adapter->rx_rings;
   1585 	struct ixgbe_hw	*hw = &adapter->hw;
   1586 	struct ifnet	*ifp = adapter->ifp;
   1587 	u32		bufsz, rxcsum, psrtype;
   1588 
   1589 	if (ifp->if_mtu > ETHERMTU)
   1590 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1591 	else
   1592 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1593 
   1594 	psrtype = IXGBE_PSRTYPE_TCPHDR
   1595 	        | IXGBE_PSRTYPE_UDPHDR
   1596 	        | IXGBE_PSRTYPE_IPV4HDR
   1597 	        | IXGBE_PSRTYPE_IPV6HDR
   1598 	        | IXGBE_PSRTYPE_L2HDR;
   1599 
   1600 	if (adapter->num_queues > 1)
   1601 		psrtype |= 1 << 29;
   1602 
   1603 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1604 
   1605 	/* Tell PF our max_frame size */
   1606 	if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
   1607 		device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
   1608 	}
   1609 
   1610 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1611 		u64 rdba = rxr->rxdma.dma_paddr;
   1612 		u32 reg, rxdctl;
   1613 
   1614 		/* Disable the queue */
   1615 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1616 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1617 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1618 		for (int j = 0; j < 10; j++) {
   1619 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1620 			    IXGBE_RXDCTL_ENABLE)
   1621 				msec_delay(1);
   1622 			else
   1623 				break;
   1624 		}
   1625 		wmb();
   1626 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1627 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   1628 		    (rdba & 0x00000000ffffffffULL));
   1629 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
   1630 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   1631 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1632 
   1633 		/* Reset the ring indices */
   1634 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1635 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1636 
   1637 		/* Set up the SRRCTL register */
   1638 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   1639 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1640 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1641 		reg |= bufsz;
   1642 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1643 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   1644 
   1645 		/* Capture Rx Tail index */
   1646 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1647 
   1648 		/* Do the queue enabling last */
   1649 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1650 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1651 		for (int k = 0; k < 10; k++) {
   1652 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1653 			    IXGBE_RXDCTL_ENABLE)
   1654 				break;
   1655 			msec_delay(1);
   1656 		}
   1657 		wmb();
   1658 
   1659 		/* Set the Tail Pointer */
   1660 		/*
   1661 		 * In netmap mode, we must preserve the buffers made
   1662 		 * available to userspace before the if_init()
   1663 		 * (this is true by default on the TX side, because
   1664 		 * init makes all buffers available to userspace).
   1665 		 *
   1666 		 * netmap_reset() and the device specific routines
   1667 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1668 		 * buffers at the end of the NIC ring, so here we
   1669 		 * must set the RDT (tail) register to make sure
   1670 		 * they are not overwritten.
   1671 		 *
   1672 		 * In this driver the NIC ring starts at RDH = 0,
   1673 		 * RDT points to the last slot available for reception (?),
   1674 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1675 		 */
   1676 #ifdef DEV_NETMAP
   1677 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   1678 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   1679 			struct netmap_adapter *na = NA(adapter->ifp);
   1680 			struct netmap_kring *kring = &na->rx_rings[i];
   1681 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1682 
   1683 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1684 		} else
   1685 #endif /* DEV_NETMAP */
   1686 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1687 			    adapter->num_rx_desc - 1);
   1688 	}
   1689 
   1690 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1691 
   1692 	ixv_initialize_rss_mapping(adapter);
   1693 
   1694 	if (adapter->num_queues > 1) {
   1695 		/* RSS and RX IPP Checksum are mutually exclusive */
   1696 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1697 	}
   1698 
   1699 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1700 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1701 
   1702 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1703 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1704 
   1705 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1706 
   1707 	return;
   1708 } /* ixv_initialize_receive_units */
   1709 
   1710 /************************************************************************
   1711  * ixv_setup_vlan_support
   1712  ************************************************************************/
   1713 static void
   1714 ixv_setup_vlan_support(struct adapter *adapter)
   1715 {
   1716 	struct ixgbe_hw *hw = &adapter->hw;
   1717 	u32		ctrl, vid, vfta, retry;
   1718 
   1719 	/*
   1720 	 * We get here thru init_locked, meaning
   1721 	 * a soft reset, this has already cleared
   1722 	 * the VFTA and other state, so if there
   1723 	 * have been no vlan's registered do nothing.
   1724 	 */
   1725 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
   1726 		return;
   1727 
   1728 	/* Enable the queues */
   1729 	for (int i = 0; i < adapter->num_queues; i++) {
   1730 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1731 		ctrl |= IXGBE_RXDCTL_VME;
   1732 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
   1733 		/*
   1734 		 * Let Rx path know that it needs to store VLAN tag
   1735 		 * as part of extra mbuf info.
   1736 		 */
   1737 		adapter->rx_rings[i].vtag_strip = TRUE;
   1738 	}
   1739 
   1740 	/*
   1741 	 * A soft reset zero's out the VFTA, so
   1742 	 * we need to repopulate it now.
   1743 	 */
   1744 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   1745 		if (ixv_shadow_vfta[i] == 0)
   1746 			continue;
   1747 		vfta = ixv_shadow_vfta[i];
   1748 		/*
   1749 		 * Reconstruct the vlan id's
   1750 		 * based on the bits set in each
   1751 		 * of the array ints.
   1752 		 */
   1753 		for (int j = 0; j < 32; j++) {
   1754 			retry = 0;
   1755 			if ((vfta & (1 << j)) == 0)
   1756 				continue;
   1757 			vid = (i * 32) + j;
   1758 			/* Call the shared code mailbox routine */
   1759 			while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
   1760 				if (++retry > 5)
   1761 					break;
   1762 			}
   1763 		}
   1764 	}
   1765 } /* ixv_setup_vlan_support */
   1766 
   1767 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   1768 /************************************************************************
   1769  * ixv_register_vlan
   1770  *
   1771  *   Run via a vlan config EVENT, it enables us to use the
   1772  *   HW Filter table since we can get the vlan id. This just
   1773  *   creates the entry in the soft version of the VFTA, init
   1774  *   will repopulate the real table.
   1775  ************************************************************************/
   1776 static void
   1777 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1778 {
   1779 	struct adapter	*adapter = ifp->if_softc;
   1780 	u16		index, bit;
   1781 
   1782 	if (ifp->if_softc != arg) /* Not our event */
   1783 		return;
   1784 
   1785 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   1786 		return;
   1787 
   1788 	IXGBE_CORE_LOCK(adapter);
   1789 	index = (vtag >> 5) & 0x7F;
   1790 	bit = vtag & 0x1F;
   1791 	ixv_shadow_vfta[index] |= (1 << bit);
   1792 	/* Re-init to load the changes */
   1793 	ixv_init_locked(adapter);
   1794 	IXGBE_CORE_UNLOCK(adapter);
   1795 } /* ixv_register_vlan */
   1796 
   1797 /************************************************************************
   1798  * ixv_unregister_vlan
   1799  *
   1800  *   Run via a vlan unconfig EVENT, remove our entry
   1801  *   in the soft vfta.
   1802  ************************************************************************/
   1803 static void
   1804 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1805 {
   1806 	struct adapter	*adapter = ifp->if_softc;
   1807 	u16		index, bit;
   1808 
   1809 	if (ifp->if_softc !=  arg)
   1810 		return;
   1811 
   1812 	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
   1813 		return;
   1814 
   1815 	IXGBE_CORE_LOCK(adapter);
   1816 	index = (vtag >> 5) & 0x7F;
   1817 	bit = vtag & 0x1F;
   1818 	ixv_shadow_vfta[index] &= ~(1 << bit);
   1819 	/* Re-init to load the changes */
   1820 	ixv_init_locked(adapter);
   1821 	IXGBE_CORE_UNLOCK(adapter);
   1822 } /* ixv_unregister_vlan */
   1823 #endif
   1824 
   1825 /************************************************************************
   1826  * ixv_enable_intr
   1827  ************************************************************************/
   1828 static void
   1829 ixv_enable_intr(struct adapter *adapter)
   1830 {
   1831 	struct ixgbe_hw *hw = &adapter->hw;
   1832 	struct ix_queue *que = adapter->queues;
   1833 	u32             mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   1834 
   1835 
   1836 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   1837 
   1838 	mask = IXGBE_EIMS_ENABLE_MASK;
   1839 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   1840 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   1841 
   1842 	for (int i = 0; i < adapter->num_queues; i++, que++)
   1843 		ixv_enable_queue(adapter, que->msix);
   1844 
   1845 	IXGBE_WRITE_FLUSH(hw);
   1846 
   1847 	return;
   1848 } /* ixv_enable_intr */
   1849 
   1850 /************************************************************************
   1851  * ixv_disable_intr
   1852  ************************************************************************/
   1853 static void
   1854 ixv_disable_intr(struct adapter *adapter)
   1855 {
   1856 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   1857 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   1858 	IXGBE_WRITE_FLUSH(&adapter->hw);
   1859 
   1860 	return;
   1861 } /* ixv_disable_intr */
   1862 
   1863 /************************************************************************
   1864  * ixv_set_ivar
   1865  *
   1866  *   Setup the correct IVAR register for a particular MSI-X interrupt
   1867  *    - entry is the register array entry
   1868  *    - vector is the MSI-X vector for this queue
   1869  *    - type is RX/TX/MISC
   1870  ************************************************************************/
   1871 static void
   1872 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   1873 {
   1874 	struct ixgbe_hw *hw = &adapter->hw;
   1875 	u32             ivar, index;
   1876 
   1877 	vector |= IXGBE_IVAR_ALLOC_VAL;
   1878 
   1879 	if (type == -1) { /* MISC IVAR */
   1880 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   1881 		ivar &= ~0xFF;
   1882 		ivar |= vector;
   1883 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   1884 	} else {          /* RX/TX IVARS */
   1885 		index = (16 * (entry & 1)) + (8 * type);
   1886 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   1887 		ivar &= ~(0xFF << index);
   1888 		ivar |= (vector << index);
   1889 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   1890 	}
   1891 } /* ixv_set_ivar */
   1892 
   1893 /************************************************************************
   1894  * ixv_configure_ivars
   1895  ************************************************************************/
   1896 static void
   1897 ixv_configure_ivars(struct adapter *adapter)
   1898 {
   1899 	struct ix_queue *que = adapter->queues;
   1900 
   1901 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1902 		/* First the RX queue entry */
   1903 		ixv_set_ivar(adapter, i, que->msix, 0);
   1904 		/* ... and the TX */
   1905 		ixv_set_ivar(adapter, i, que->msix, 1);
   1906 		/* Set an initial value in EITR */
   1907 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
   1908 		    IXGBE_EITR_DEFAULT);
   1909 	}
   1910 
   1911 	/* For the mailbox interrupt */
   1912 	ixv_set_ivar(adapter, 1, adapter->vector, -1);
   1913 } /* ixv_configure_ivars */
   1914 
   1915 
   1916 /************************************************************************
   1917  * ixv_save_stats
   1918  *
   1919  *   The VF stats registers never have a truly virgin
   1920  *   starting point, so this routine tries to make an
   1921  *   artificial one, marking ground zero on attach as
   1922  *   it were.
   1923  ************************************************************************/
   1924 static void
   1925 ixv_save_stats(struct adapter *adapter)
   1926 {
   1927 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   1928 
   1929 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   1930 		stats->saved_reset_vfgprc +=
   1931 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   1932 		stats->saved_reset_vfgptc +=
   1933 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   1934 		stats->saved_reset_vfgorc +=
   1935 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   1936 		stats->saved_reset_vfgotc +=
   1937 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   1938 		stats->saved_reset_vfmprc +=
   1939 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   1940 	}
   1941 } /* ixv_save_stats */
   1942 
   1943 /************************************************************************
   1944  * ixv_init_stats
   1945  ************************************************************************/
   1946 static void
   1947 ixv_init_stats(struct adapter *adapter)
   1948 {
   1949 	struct ixgbe_hw *hw = &adapter->hw;
   1950 
   1951 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   1952 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   1953 	adapter->stats.vf.last_vfgorc |=
   1954 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   1955 
   1956 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   1957 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   1958 	adapter->stats.vf.last_vfgotc |=
   1959 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   1960 
   1961 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   1962 
   1963 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   1964 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   1965 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   1966 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   1967 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   1968 } /* ixv_init_stats */
   1969 
   1970 #define UPDATE_STAT_32(reg, last, count)		\
   1971 {                                                       \
   1972 	u32 current = IXGBE_READ_REG(hw, (reg));	\
   1973 	if (current < (last))				\
   1974 		count.ev_count += 0x100000000LL;	\
   1975 	(last) = current;				\
   1976 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   1977 	count.ev_count |= current;			\
   1978 }
   1979 
   1980 #define UPDATE_STAT_36(lsb, msb, last, count)           \
   1981 {                                                       \
   1982 	u64 cur_lsb = IXGBE_READ_REG(hw, (lsb));	\
   1983 	u64 cur_msb = IXGBE_READ_REG(hw, (msb));	\
   1984 	u64 current = ((cur_msb << 32) | cur_lsb);      \
   1985 	if (current < (last))				\
   1986 		count.ev_count += 0x1000000000LL;	\
   1987 	(last) = current;				\
   1988 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   1989 	count.ev_count |= current;			\
   1990 }
   1991 
   1992 /************************************************************************
   1993  * ixv_update_stats - Update the board statistics counters.
   1994  ************************************************************************/
   1995 void
   1996 ixv_update_stats(struct adapter *adapter)
   1997 {
   1998 	struct ixgbe_hw *hw = &adapter->hw;
   1999 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2000 
   2001         UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
   2002         UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
   2003         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
   2004 	    stats->vfgorc);
   2005         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
   2006 	    stats->vfgotc);
   2007         UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
   2008 
   2009 	/* Fill out the OS statistics structure */
   2010 	/*
   2011 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   2012 	 * adapter->stats counters. It's required to make ifconfig -z
   2013 	 * (SOICZIFDATA) work.
   2014 	 */
   2015 } /* ixv_update_stats */
   2016 
   2017 const struct sysctlnode *
   2018 ixv_sysctl_instance(struct adapter *adapter)
   2019 {
   2020 	const char *dvname;
   2021 	struct sysctllog **log;
   2022 	int rc;
   2023 	const struct sysctlnode *rnode;
   2024 
   2025 	log = &adapter->sysctllog;
   2026 	dvname = device_xname(adapter->dev);
   2027 
   2028 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2029 	    0, CTLTYPE_NODE, dvname,
   2030 	    SYSCTL_DESCR("ixv information and settings"),
   2031 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2032 		goto err;
   2033 
   2034 	return rnode;
   2035 err:
   2036 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2037 	return NULL;
   2038 }
   2039 
   2040 static void
   2041 ixv_add_device_sysctls(struct adapter *adapter)
   2042 {
   2043 	struct sysctllog **log;
   2044 	const struct sysctlnode *rnode, *cnode;
   2045 	device_t dev;
   2046 
   2047 	dev = adapter->dev;
   2048 	log = &adapter->sysctllog;
   2049 
   2050 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2051 		aprint_error_dev(dev, "could not create sysctl root\n");
   2052 		return;
   2053 	}
   2054 
   2055 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2056 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2057 	    "debug", SYSCTL_DESCR("Debug Info"),
   2058 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   2059 		aprint_error_dev(dev, "could not create sysctl\n");
   2060 
   2061 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2062 	    CTLFLAG_READWRITE, CTLTYPE_BOOL,
   2063 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2064 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2065 		aprint_error_dev(dev, "could not create sysctl\n");
   2066 }
   2067 
   2068 /************************************************************************
   2069  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
   2070  ************************************************************************/
   2071 static void
   2072 ixv_add_stats_sysctls(struct adapter *adapter)
   2073 {
   2074 	device_t                dev = adapter->dev;
   2075 	struct tx_ring          *txr = adapter->tx_rings;
   2076 	struct rx_ring          *rxr = adapter->rx_rings;
   2077 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2078 	const struct sysctlnode *rnode;
   2079 	struct sysctllog **log = &adapter->sysctllog;
   2080 	const char *xname = device_xname(dev);
   2081 
   2082 	/* Driver Statistics */
   2083 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   2084 	    NULL, xname, "Handled queue in softint");
   2085 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   2086 	    NULL, xname, "Requeued in softint");
   2087 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   2088 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   2089 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2090 	    NULL, xname, "m_defrag() failed");
   2091 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   2092 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   2093 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   2094 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   2095 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   2096 	    NULL, xname, "Driver tx dma hard fail other");
   2097 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   2098 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   2099 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   2100 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   2101 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2102 	    NULL, xname, "Watchdog timeouts");
   2103 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   2104 	    NULL, xname, "TSO errors");
   2105 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   2106 	    NULL, xname, "Link MSI-X IRQ Handled");
   2107 
   2108 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2109 		snprintf(adapter->queues[i].evnamebuf,
   2110 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   2111 		    xname, i);
   2112 		snprintf(adapter->queues[i].namebuf,
   2113 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   2114 
   2115 		if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2116 			aprint_error_dev(dev, "could not create sysctl root\n");
   2117 			break;
   2118 		}
   2119 
   2120 		if (sysctl_createv(log, 0, &rnode, &rnode,
   2121 		    0, CTLTYPE_NODE,
   2122 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   2123 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   2124 			break;
   2125 
   2126 #if 0 /* not yet */
   2127 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2128 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   2129 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   2130 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   2131 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   2132 			break;
   2133 
   2134 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2135 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   2136 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   2137 			NULL, 0, &(adapter->queues[i].irqs),
   2138 		    0, CTL_CREATE, CTL_EOL) != 0)
   2139 			break;
   2140 
   2141 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2142 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2143 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   2144 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   2145 		    0, CTL_CREATE, CTL_EOL) != 0)
   2146 			break;
   2147 
   2148 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2149 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2150 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   2151 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   2152 		    0, CTL_CREATE, CTL_EOL) != 0)
   2153 			break;
   2154 #endif
   2155 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   2156 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   2157 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   2158 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   2159 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   2160 		    NULL, adapter->queues[i].evnamebuf,
   2161 		    "Queue No Descriptor Available");
   2162 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   2163 		    NULL, adapter->queues[i].evnamebuf,
   2164 		    "Queue Packets Transmitted");
   2165 #ifndef IXGBE_LEGACY_TX
   2166 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   2167 		    NULL, adapter->queues[i].evnamebuf,
   2168 		    "Packets dropped in pcq");
   2169 #endif
   2170 
   2171 #ifdef LRO
   2172 		struct lro_ctrl *lro = &rxr->lro;
   2173 #endif /* LRO */
   2174 
   2175 #if 0 /* not yet */
   2176 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2177 		    CTLFLAG_READONLY,
   2178 		    CTLTYPE_INT,
   2179 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   2180 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   2181 		    CTL_CREATE, CTL_EOL) != 0)
   2182 			break;
   2183 
   2184 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2185 		    CTLFLAG_READONLY,
   2186 		    CTLTYPE_INT,
   2187 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   2188 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   2189 		    CTL_CREATE, CTL_EOL) != 0)
   2190 			break;
   2191 #endif
   2192 
   2193 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   2194 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   2195 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   2196 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   2197 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   2198 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   2199 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   2200 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   2201 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   2202 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   2203 #ifdef LRO
   2204 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   2205 				CTLFLAG_RD, &lro->lro_queued, 0,
   2206 				"LRO Queued");
   2207 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   2208 				CTLFLAG_RD, &lro->lro_flushed, 0,
   2209 				"LRO Flushed");
   2210 #endif /* LRO */
   2211 	}
   2212 
   2213 	/* MAC stats get their own sub node */
   2214 
   2215 	snprintf(stats->namebuf,
   2216 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   2217 
   2218 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   2219 	    stats->namebuf, "rx csum offload - IP");
   2220 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   2221 	    stats->namebuf, "rx csum offload - L4");
   2222 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   2223 	    stats->namebuf, "rx csum offload - IP bad");
   2224 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   2225 	    stats->namebuf, "rx csum offload - L4 bad");
   2226 
   2227 	/* Packet Reception Stats */
   2228 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2229 	    xname, "Good Packets Received");
   2230 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2231 	    xname, "Good Octets Received");
   2232 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2233 	    xname, "Multicast Packets Received");
   2234 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2235 	    xname, "Good Packets Transmitted");
   2236 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2237 	    xname, "Good Octets Transmitted");
   2238 } /* ixv_add_stats_sysctls */
   2239 
   2240 /************************************************************************
   2241  * ixv_set_sysctl_value
   2242  ************************************************************************/
   2243 static void
   2244 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2245 	const char *description, int *limit, int value)
   2246 {
   2247 	device_t dev =  adapter->dev;
   2248 	struct sysctllog **log;
   2249 	const struct sysctlnode *rnode, *cnode;
   2250 
   2251 	log = &adapter->sysctllog;
   2252 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2253 		aprint_error_dev(dev, "could not create sysctl root\n");
   2254 		return;
   2255 	}
   2256 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2257 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2258 	    name, SYSCTL_DESCR(description),
   2259 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2260 		aprint_error_dev(dev, "could not create sysctl\n");
   2261 	*limit = value;
   2262 } /* ixv_set_sysctl_value */
   2263 
   2264 /************************************************************************
   2265  * ixv_print_debug_info
   2266  *
   2267  *   Called only when em_display_debug_stats is enabled.
   2268  *   Provides a way to take a look at important statistics
   2269  *   maintained by the driver and hardware.
   2270  ************************************************************************/
   2271 static void
   2272 ixv_print_debug_info(struct adapter *adapter)
   2273 {
   2274         device_t        dev = adapter->dev;
   2275         struct ixgbe_hw *hw = &adapter->hw;
   2276         struct ix_queue *que = adapter->queues;
   2277         struct rx_ring  *rxr;
   2278         struct tx_ring  *txr;
   2279 #ifdef LRO
   2280         struct lro_ctrl *lro;
   2281 #endif /* LRO */
   2282 
   2283 	device_printf(dev, "Error Byte Count = %u \n",
   2284 	    IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2285 
   2286 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2287 		txr = que->txr;
   2288 		rxr = que->rxr;
   2289 #ifdef LRO
   2290 		lro = &rxr->lro;
   2291 #endif /* LRO */
   2292 		device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
   2293 		    que->msix, (long)que->irqs.ev_count);
   2294 		device_printf(dev, "RX(%d) Packets Received: %lld\n",
   2295 		    rxr->me, (long long)rxr->rx_packets.ev_count);
   2296 		device_printf(dev, "RX(%d) Bytes Received: %lu\n",
   2297 		    rxr->me, (long)rxr->rx_bytes.ev_count);
   2298 #ifdef LRO
   2299 		device_printf(dev, "RX(%d) LRO Queued= %lld\n",
   2300 		    rxr->me, (long long)lro->lro_queued);
   2301 		device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
   2302 		    rxr->me, (long long)lro->lro_flushed);
   2303 #endif /* LRO */
   2304 		device_printf(dev, "TX(%d) Packets Sent: %lu\n",
   2305 		    txr->me, (long)txr->total_packets.ev_count);
   2306 		device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
   2307 		    txr->me, (long)txr->no_desc_avail.ev_count);
   2308 	}
   2309 
   2310 	device_printf(dev, "MBX IRQ Handled: %lu\n",
   2311 	    (long)adapter->link_irq.ev_count);
   2312 } /* ixv_print_debug_info */
   2313 
   2314 /************************************************************************
   2315  * ixv_sysctl_debug
   2316  ************************************************************************/
   2317 static int
   2318 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2319 {
   2320 	struct sysctlnode node;
   2321 	struct adapter *adapter;
   2322 	int            error, result;
   2323 
   2324 	node = *rnode;
   2325 	node.sysctl_data = &result;
   2326 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2327 
   2328 	if (error || newp == NULL)
   2329 		return error;
   2330 
   2331 	if (result == 1) {
   2332 		adapter = (struct adapter *)node.sysctl_data;
   2333 		ixv_print_debug_info(adapter);
   2334 	}
   2335 
   2336 	return 0;
   2337 } /* ixv_sysctl_debug */
   2338 
   2339 /************************************************************************
   2340  * ixv_init_device_features
   2341  ************************************************************************/
   2342 static void
   2343 ixv_init_device_features(struct adapter *adapter)
   2344 {
   2345 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   2346 	                  | IXGBE_FEATURE_VF
   2347 	                  | IXGBE_FEATURE_RSS
   2348 	                  | IXGBE_FEATURE_LEGACY_TX;
   2349 
   2350 	/* A tad short on feature flags for VFs, atm. */
   2351 	switch (adapter->hw.mac.type) {
   2352 	case ixgbe_mac_82599_vf:
   2353 		break;
   2354 	case ixgbe_mac_X540_vf:
   2355 		break;
   2356 	case ixgbe_mac_X550_vf:
   2357 	case ixgbe_mac_X550EM_x_vf:
   2358 	case ixgbe_mac_X550EM_a_vf:
   2359 		adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
   2360 		break;
   2361 	default:
   2362 		break;
   2363 	}
   2364 
   2365 	/* Enabled by default... */
   2366 	/* Is a virtual function (VF) */
   2367 	if (adapter->feat_cap & IXGBE_FEATURE_VF)
   2368 		adapter->feat_en |= IXGBE_FEATURE_VF;
   2369 	/* Netmap */
   2370 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   2371 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   2372 	/* Receive-Side Scaling (RSS) */
   2373 	if (adapter->feat_cap & IXGBE_FEATURE_RSS)
   2374 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   2375 	/* Needs advanced context descriptor regardless of offloads req'd */
   2376 	if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
   2377 		adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
   2378 
   2379 	/* Enabled via sysctl... */
   2380 	/* Legacy (single queue) transmit */
   2381 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   2382 	    ixv_enable_legacy_tx)
   2383 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   2384 } /* ixv_init_device_features */
   2385 
   2386 /************************************************************************
   2387  * ixv_shutdown - Shutdown entry point
   2388  ************************************************************************/
   2389 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   2390 static int
   2391 ixv_shutdown(device_t dev)
   2392 {
   2393 	struct adapter *adapter = device_private(dev);
   2394 	IXGBE_CORE_LOCK(adapter);
   2395 	ixv_stop(adapter);
   2396 	IXGBE_CORE_UNLOCK(adapter);
   2397 
   2398 	return (0);
   2399 } /* ixv_shutdown */
   2400 #endif
   2401 
   2402 static int
   2403 ixv_ifflags_cb(struct ethercom *ec)
   2404 {
   2405 	struct ifnet *ifp = &ec->ec_if;
   2406 	struct adapter *adapter = ifp->if_softc;
   2407 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   2408 
   2409 	IXGBE_CORE_LOCK(adapter);
   2410 
   2411 	if (change != 0)
   2412 		adapter->if_flags = ifp->if_flags;
   2413 
   2414 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   2415 		rc = ENETRESET;
   2416 
   2417 	IXGBE_CORE_UNLOCK(adapter);
   2418 
   2419 	return rc;
   2420 }
   2421 
   2422 
   2423 /************************************************************************
   2424  * ixv_ioctl - Ioctl entry point
   2425  *
   2426  *   Called when the user wants to configure the interface.
   2427  *
   2428  *   return 0 on success, positive on failure
   2429  ************************************************************************/
   2430 static int
   2431 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
   2432 {
   2433 	struct adapter	*adapter = ifp->if_softc;
   2434 	struct ifcapreq *ifcr = data;
   2435 	struct ifreq	*ifr = data;
   2436 	int             error = 0;
   2437 	int l4csum_en;
   2438 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   2439 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   2440 
   2441 	switch (command) {
   2442 	case SIOCSIFFLAGS:
   2443 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   2444 		break;
   2445 	case SIOCADDMULTI:
   2446 	case SIOCDELMULTI:
   2447 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   2448 		break;
   2449 	case SIOCSIFMEDIA:
   2450 	case SIOCGIFMEDIA:
   2451 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   2452 		break;
   2453 	case SIOCSIFCAP:
   2454 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   2455 		break;
   2456 	case SIOCSIFMTU:
   2457 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   2458 		break;
   2459 	default:
   2460 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   2461 		break;
   2462 	}
   2463 
   2464 	switch (command) {
   2465 	case SIOCSIFMEDIA:
   2466 	case SIOCGIFMEDIA:
   2467 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   2468 	case SIOCSIFCAP:
   2469 		/* Layer-4 Rx checksum offload has to be turned on and
   2470 		 * off as a unit.
   2471 		 */
   2472 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   2473 		if (l4csum_en != l4csum && l4csum_en != 0)
   2474 			return EINVAL;
   2475 		/*FALLTHROUGH*/
   2476 	case SIOCADDMULTI:
   2477 	case SIOCDELMULTI:
   2478 	case SIOCSIFFLAGS:
   2479 	case SIOCSIFMTU:
   2480 	default:
   2481 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   2482 			return error;
   2483 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   2484 			;
   2485 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   2486 			IXGBE_CORE_LOCK(adapter);
   2487 			ixv_init_locked(adapter);
   2488 			IXGBE_CORE_UNLOCK(adapter);
   2489 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   2490 			/*
   2491 			 * Multicast list has changed; set the hardware filter
   2492 			 * accordingly.
   2493 			 */
   2494 			IXGBE_CORE_LOCK(adapter);
   2495 			ixv_disable_intr(adapter);
   2496 			ixv_set_multi(adapter);
   2497 			ixv_enable_intr(adapter);
   2498 			IXGBE_CORE_UNLOCK(adapter);
   2499 		}
   2500 		return 0;
   2501 	}
   2502 } /* ixv_ioctl */
   2503 
   2504 /************************************************************************
   2505  * ixv_init
   2506  ************************************************************************/
   2507 static int
   2508 ixv_init(struct ifnet *ifp)
   2509 {
   2510 	struct adapter *adapter = ifp->if_softc;
   2511 
   2512 	IXGBE_CORE_LOCK(adapter);
   2513 	ixv_init_locked(adapter);
   2514 	IXGBE_CORE_UNLOCK(adapter);
   2515 
   2516 	return 0;
   2517 } /* ixv_init */
   2518 
   2519 
   2520 /************************************************************************
   2521  * ixv_handle_que
   2522  ************************************************************************/
   2523 static void
   2524 ixv_handle_que(void *context)
   2525 {
   2526 	struct ix_queue *que = context;
   2527 	struct adapter  *adapter = que->adapter;
   2528 	struct tx_ring	*txr = que->txr;
   2529 	struct ifnet    *ifp = adapter->ifp;
   2530 	bool		more;
   2531 
   2532 	adapter->handleq.ev_count++;
   2533 
   2534 	if (ifp->if_flags & IFF_RUNNING) {
   2535 		more = ixgbe_rxeof(que);
   2536 		IXGBE_TX_LOCK(txr);
   2537 		ixgbe_txeof(txr);
   2538 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   2539 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   2540 				ixgbe_mq_start_locked(ifp, txr);
   2541 		/* Only for queue 0 */
   2542 		/* NetBSD still needs this for CBQ */
   2543 		if ((&adapter->queues[0] == que)
   2544 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   2545 			ixgbe_legacy_start_locked(ifp, txr);
   2546 		IXGBE_TX_UNLOCK(txr);
   2547 		if (more) {
   2548 			adapter->req.ev_count++;
   2549 			softint_schedule(que->que_si);
   2550 			return;
   2551 		}
   2552 	}
   2553 
   2554 	/* Re-enable this interrupt */
   2555 	ixv_enable_queue(adapter, que->msix);
   2556 
   2557 	return;
   2558 } /* ixv_handle_que */
   2559 
   2560 /************************************************************************
   2561  * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
   2562  ************************************************************************/
   2563 static int
   2564 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   2565 {
   2566 	device_t	dev = adapter->dev;
   2567 	struct ix_queue *que = adapter->queues;
   2568 	struct		tx_ring *txr = adapter->tx_rings;
   2569 	int 		error, msix_ctrl, rid, vector = 0;
   2570 	pci_chipset_tag_t pc;
   2571 	pcitag_t	tag;
   2572 	char		intrbuf[PCI_INTRSTR_LEN];
   2573 	char		intr_xname[32];
   2574 	const char	*intrstr = NULL;
   2575 	kcpuset_t	*affinity;
   2576 	int		cpu_id = 0;
   2577 
   2578 	pc = adapter->osdep.pc;
   2579 	tag = adapter->osdep.tag;
   2580 
   2581 	adapter->osdep.nintrs = adapter->num_queues + 1;
   2582 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   2583 	    adapter->osdep.nintrs) != 0) {
   2584 		aprint_error_dev(dev,
   2585 		    "failed to allocate MSI-X interrupt\n");
   2586 		return (ENXIO);
   2587 	}
   2588 
   2589 	kcpuset_create(&affinity, false);
   2590 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   2591 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   2592 		    device_xname(dev), i);
   2593 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   2594 		    sizeof(intrbuf));
   2595 #ifdef IXGBE_MPSAFE
   2596 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   2597 		    true);
   2598 #endif
   2599 		/* Set the handler function */
   2600 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   2601 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   2602 		    intr_xname);
   2603 		if (que->res == NULL) {
   2604 			pci_intr_release(pc, adapter->osdep.intrs,
   2605 			    adapter->osdep.nintrs);
   2606 			aprint_error_dev(dev,
   2607 			    "Failed to register QUE handler\n");
   2608 			kcpuset_destroy(affinity);
   2609 			return (ENXIO);
   2610 		}
   2611 		que->msix = vector;
   2612         	adapter->active_queues |= (u64)(1 << que->msix);
   2613 
   2614 		cpu_id = i;
   2615 		/* Round-robin affinity */
   2616 		kcpuset_zero(affinity);
   2617 		kcpuset_set(affinity, cpu_id % ncpu);
   2618 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   2619 		    NULL);
   2620 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   2621 		    intrstr);
   2622 		if (error == 0)
   2623 			aprint_normal(", bound queue %d to cpu %d\n",
   2624 			    i, cpu_id % ncpu);
   2625 		else
   2626 			aprint_normal("\n");
   2627 
   2628 #ifndef IXGBE_LEGACY_TX
   2629 		txr->txr_si
   2630 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2631 			ixgbe_deferred_mq_start, txr);
   2632 #endif
   2633 		que->que_si
   2634 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2635 			ixv_handle_que, que);
   2636 		if (que->que_si == NULL) {
   2637 			aprint_error_dev(dev,
   2638 			    "could not establish software interrupt\n");
   2639 		}
   2640 	}
   2641 
   2642 	/* and Mailbox */
   2643 	cpu_id++;
   2644 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   2645 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   2646 	    sizeof(intrbuf));
   2647 #ifdef IXGBE_MPSAFE
   2648 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   2649 	    true);
   2650 #endif
   2651 	/* Set the mbx handler function */
   2652 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   2653 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   2654 	    intr_xname);
   2655 	if (adapter->osdep.ihs[vector] == NULL) {
   2656 		adapter->res = NULL;
   2657 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   2658 		kcpuset_destroy(affinity);
   2659 		return (ENXIO);
   2660 	}
   2661 	/* Round-robin affinity */
   2662 	kcpuset_zero(affinity);
   2663 	kcpuset_set(affinity, cpu_id % ncpu);
   2664 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   2665 
   2666 	aprint_normal_dev(dev,
   2667 	    "for link, interrupting at %s", intrstr);
   2668 	if (error == 0)
   2669 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   2670 	else
   2671 		aprint_normal("\n");
   2672 
   2673 	adapter->vector = vector;
   2674 	/* Tasklets for Mailbox */
   2675 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   2676 	    ixv_handle_link, adapter);
   2677 	/*
   2678 	 * Due to a broken design QEMU will fail to properly
   2679 	 * enable the guest for MSI-X unless the vectors in
   2680 	 * the table are all set up, so we must rewrite the
   2681 	 * ENABLE in the MSI-X control register again at this
   2682 	 * point to cause it to successfully initialize us.
   2683 	 */
   2684 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   2685 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   2686 		rid += PCI_MSIX_CTL;
   2687 		msix_ctrl = pci_conf_read(pc, tag, rid);
   2688 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   2689 		pci_conf_write(pc, tag, rid, msix_ctrl);
   2690 	}
   2691 
   2692 	kcpuset_destroy(affinity);
   2693 	return (0);
   2694 } /* ixv_allocate_msix */
   2695 
   2696 /************************************************************************
   2697  * ixv_configure_interrupts - Setup MSI-X resources
   2698  *
   2699  *   Note: The VF device MUST use MSI-X, there is no fallback.
   2700  ************************************************************************/
   2701 static int
   2702 ixv_configure_interrupts(struct adapter *adapter)
   2703 {
   2704 	device_t dev = adapter->dev;
   2705 	int want, queues, msgs;
   2706 
   2707 	/* Must have at least 2 MSI-X vectors */
   2708 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   2709 	if (msgs < 2) {
   2710 		aprint_error_dev(dev, "MSIX config error\n");
   2711 		return (ENXIO);
   2712 	}
   2713 	msgs = MIN(msgs, IXG_MAX_NINTR);
   2714 
   2715 	/* Figure out a reasonable auto config value */
   2716 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   2717 
   2718 	if (ixv_num_queues != 0)
   2719 		queues = ixv_num_queues;
   2720 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   2721 		queues = IXGBE_VF_MAX_TX_QUEUES;
   2722 
   2723 	/*
   2724 	 * Want vectors for the queues,
   2725 	 * plus an additional for mailbox.
   2726 	 */
   2727 	want = queues + 1;
   2728 	if (msgs >= want)
   2729 		msgs = want;
   2730 	else {
   2731                	aprint_error_dev(dev,
   2732 		    "MSI-X Configuration Problem, "
   2733 		    "%d vectors but %d queues wanted!\n",
   2734 		    msgs, want);
   2735 		return -1;
   2736 	}
   2737 
   2738 	adapter->msix_mem = (void *)1; /* XXX */
   2739 	aprint_normal_dev(dev,
   2740 	    "Using MSI-X interrupts with %d vectors\n", msgs);
   2741 	adapter->num_queues = queues;
   2742 
   2743 	return (0);
   2744 } /* ixv_configure_interrupts */
   2745 
   2746 
   2747 /************************************************************************
   2748  * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
   2749  *
   2750  *   Done outside of interrupt context since the driver might sleep
   2751  ************************************************************************/
   2752 static void
   2753 ixv_handle_link(void *context)
   2754 {
   2755 	struct adapter *adapter = context;
   2756 
   2757 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   2758 	    &adapter->link_up, FALSE);
   2759 	ixv_update_link_status(adapter);
   2760 } /* ixv_handle_link */
   2761 
   2762 /************************************************************************
   2763  * ixv_check_link - Used in the local timer to poll for link changes
   2764  ************************************************************************/
   2765 static void
   2766 ixv_check_link(struct adapter *adapter)
   2767 {
   2768 	adapter->hw.mac.get_link_status = TRUE;
   2769 
   2770 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   2771 	    &adapter->link_up, FALSE);
   2772 	ixv_update_link_status(adapter);
   2773 } /* ixv_check_link */
   2774