Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.65
      1 /*$NetBSD: ixv.c,v 1.65 2017/09/15 08:31:32 msaitoh Exp $*/
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 320688 2017-07-05 17:27:03Z erj $*/
     36 
     37 
     38 #ifdef _KERNEL_OPT
     39 #include "opt_inet.h"
     40 #include "opt_inet6.h"
     41 #include "opt_net_mpsafe.h"
     42 #endif
     43 
     44 #include "ixgbe.h"
     45 #include "vlan.h"
     46 
     47 /************************************************************************
     48  * Driver version
     49  ************************************************************************/
     50 char ixv_driver_version[] = "1.5.13-k";
     51 
     52 /************************************************************************
     53  * PCI Device ID Table
     54  *
     55  *   Used by probe to select devices to load on
     56  *   Last field stores an index into ixv_strings
     57  *   Last entry must be all 0s
     58  *
     59  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     60  ************************************************************************/
     61 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
     62 {
     63 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     64 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     65 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     66 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     67 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
     68 	/* required last entry */
     69 	{0, 0, 0, 0, 0}
     70 };
     71 
     72 /************************************************************************
     73  * Table of branding strings
     74  ************************************************************************/
     75 static const char *ixv_strings[] = {
     76 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     77 };
     78 
     79 /*********************************************************************
     80  *  Function prototypes
     81  *********************************************************************/
     82 static int      ixv_probe(device_t, cfdata_t, void *);
     83 static void	ixv_attach(device_t, device_t, void *);
     84 static int      ixv_detach(device_t, int);
     85 #if 0
     86 static int      ixv_shutdown(device_t);
     87 #endif
     88 static int	ixv_ifflags_cb(struct ethercom *);
     89 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     90 static int	ixv_init(struct ifnet *);
     91 static void	ixv_init_locked(struct adapter *);
     92 static void	ixv_ifstop(struct ifnet *, int);
     93 static void     ixv_stop(void *);
     94 static void     ixv_init_device_features(struct adapter *);
     95 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     96 static int      ixv_media_change(struct ifnet *);
     97 static int      ixv_allocate_pci_resources(struct adapter *,
     98 		    const struct pci_attach_args *);
     99 static int      ixv_allocate_msix(struct adapter *,
    100 		    const struct pci_attach_args *);
    101 static int      ixv_configure_interrupts(struct adapter *);
    102 static void	ixv_free_pci_resources(struct adapter *);
    103 static void     ixv_local_timer(void *);
    104 static void     ixv_local_timer_locked(void *);
    105 static void     ixv_setup_interface(device_t, struct adapter *);
    106 static int      ixv_negotiate_api(struct adapter *);
    107 
    108 static void     ixv_initialize_transmit_units(struct adapter *);
    109 static void     ixv_initialize_receive_units(struct adapter *);
    110 static void     ixv_initialize_rss_mapping(struct adapter *);
    111 static void     ixv_check_link(struct adapter *);
    112 
    113 static void     ixv_enable_intr(struct adapter *);
    114 static void     ixv_disable_intr(struct adapter *);
    115 static void     ixv_set_multi(struct adapter *);
    116 static void     ixv_update_link_status(struct adapter *);
    117 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    118 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    119 static void	ixv_configure_ivars(struct adapter *);
    120 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    121 
    122 static void	ixv_setup_vlan_support(struct adapter *);
    123 #if 0
    124 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    125 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    126 #endif
    127 
    128 static void	ixv_add_device_sysctls(struct adapter *);
    129 static void	ixv_save_stats(struct adapter *);
    130 static void	ixv_init_stats(struct adapter *);
    131 static void	ixv_update_stats(struct adapter *);
    132 static void	ixv_add_stats_sysctls(struct adapter *);
    133 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    134 		    const char *, int *, int);
    135 
    136 /* The MSI-X Interrupt handlers */
    137 static int	ixv_msix_que(void *);
    138 static int	ixv_msix_mbx(void *);
    139 
    140 /* Deferred interrupt tasklets */
    141 static void	ixv_handle_que(void *);
    142 static void     ixv_handle_link(void *);
    143 
    144 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    145 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    146 
    147 /************************************************************************
    148  * FreeBSD Device Interface Entry Points
    149  ************************************************************************/
    150 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    151     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    152     DVF_DETACH_SHUTDOWN);
    153 
    154 #if 0
    155 static driver_t ixv_driver = {
    156 	"ixv", ixv_methods, sizeof(struct adapter),
    157 };
    158 
    159 devclass_t ixv_devclass;
    160 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    161 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    162 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    163 #endif
    164 
    165 /*
    166  * TUNEABLE PARAMETERS:
    167  */
    168 
    169 /* Number of Queues - do not exceed MSI-X vectors - 1 */
    170 static int ixv_num_queues = 0;
    171 #define	TUNABLE_INT(__x, __y)
    172 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    173 
    174 /*
    175  * AIM: Adaptive Interrupt Moderation
    176  * which means that the interrupt rate
    177  * is varied over time based on the
    178  * traffic for that interrupt vector
    179  */
    180 static bool ixv_enable_aim = false;
    181 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    182 
    183 /* How many packets rxeof tries to clean at a time */
    184 static int ixv_rx_process_limit = 256;
    185 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    186 
    187 /* How many packets txeof tries to clean at a time */
    188 static int ixv_tx_process_limit = 256;
    189 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    190 
    191 /*
    192  * Number of TX descriptors per ring,
    193  * setting higher than RX as this seems
    194  * the better performing choice.
    195  */
    196 static int ixv_txd = PERFORM_TXD;
    197 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    198 
    199 /* Number of RX descriptors per ring */
    200 static int ixv_rxd = PERFORM_RXD;
    201 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    202 
    203 /* Legacy Transmit (single queue) */
    204 static int ixv_enable_legacy_tx = 0;
    205 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
    206 
    207 #ifdef NET_MPSAFE
    208 #define IXGBE_MPSAFE		1
    209 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    210 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    211 #else
    212 #define IXGBE_CALLOUT_FLAGS	0
    213 #define IXGBE_SOFTINFT_FLAGS	0
    214 #endif
    215 
    216 #if 0
    217 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
    218 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
    219 #endif
    220 
    221 /************************************************************************
    222  * ixv_probe - Device identification routine
    223  *
    224  *   Determines if the driver should be loaded on
    225  *   adapter based on its PCI vendor/device ID.
    226  *
    227  *   return BUS_PROBE_DEFAULT on success, positive on failure
    228  ************************************************************************/
    229 static int
    230 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    231 {
    232 #ifdef __HAVE_PCI_MSI_MSIX
    233 	const struct pci_attach_args *pa = aux;
    234 
    235 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    236 #else
    237 	return 0;
    238 #endif
    239 } /* ixv_probe */
    240 
    241 static ixgbe_vendor_info_t *
    242 ixv_lookup(const struct pci_attach_args *pa)
    243 {
    244 	ixgbe_vendor_info_t *ent;
    245 	pcireg_t subid;
    246 
    247 	INIT_DEBUGOUT("ixv_lookup: begin");
    248 
    249 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    250 		return NULL;
    251 
    252 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    253 
    254 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    255 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    256 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    257 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    258 		     (ent->subvendor_id == 0)) &&
    259 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    260 		     (ent->subdevice_id == 0))) {
    261 			return ent;
    262 		}
    263 	}
    264 
    265 	return NULL;
    266 }
    267 
    268 /************************************************************************
    269  * ixv_attach - Device initialization routine
    270  *
    271  *   Called when the driver is being loaded.
    272  *   Identifies the type of hardware, allocates all resources
    273  *   and initializes the hardware.
    274  *
    275  *   return 0 on success, positive on failure
    276  ************************************************************************/
    277 static void
    278 ixv_attach(device_t parent, device_t dev, void *aux)
    279 {
    280 	struct adapter *adapter;
    281 	struct ixgbe_hw *hw;
    282 	int             error = 0;
    283 	pcireg_t	id, subid;
    284 	ixgbe_vendor_info_t *ent;
    285 	const struct pci_attach_args *pa = aux;
    286 	const char *apivstr;
    287 	char buf[256];
    288 
    289 	INIT_DEBUGOUT("ixv_attach: begin");
    290 
    291 	/*
    292 	 * Make sure BUSMASTER is set, on a VM under
    293 	 * KVM it may not be and will break things.
    294 	 */
    295 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    296 
    297 	/* Allocate, clear, and link in our adapter structure */
    298 	adapter = device_private(dev);
    299 	adapter->dev = dev;
    300 	adapter->hw.back = adapter;
    301 	hw = &adapter->hw;
    302 
    303 	adapter->init_locked = ixv_init_locked;
    304 	adapter->stop_locked = ixv_stop;
    305 
    306 	adapter->osdep.pc = pa->pa_pc;
    307 	adapter->osdep.tag = pa->pa_tag;
    308 	if (pci_dma64_available(pa))
    309 		adapter->osdep.dmat = pa->pa_dmat64;
    310 	else
    311 		adapter->osdep.dmat = pa->pa_dmat;
    312 	adapter->osdep.attached = false;
    313 
    314 	ent = ixv_lookup(pa);
    315 
    316 	KASSERT(ent != NULL);
    317 
    318 	aprint_normal(": %s, Version - %s\n",
    319 	    ixv_strings[ent->index], ixv_driver_version);
    320 
    321 	/* Core Lock Init*/
    322 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    323 
    324 	/* Do base PCI setup - map BAR0 */
    325 	if (ixv_allocate_pci_resources(adapter, pa)) {
    326 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    327 		error = ENXIO;
    328 		goto err_out;
    329 	}
    330 
    331 	/* SYSCTL APIs */
    332 	ixv_add_device_sysctls(adapter);
    333 
    334 	/* Set up the timer callout */
    335 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    336 
    337 	/* Save off the information about this board */
    338 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    339 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    340 	hw->vendor_id = PCI_VENDOR(id);
    341 	hw->device_id = PCI_PRODUCT(id);
    342 	hw->revision_id =
    343 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    344 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    345 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    346 
    347 	/* A subset of set_mac_type */
    348 	switch (hw->device_id) {
    349 	case IXGBE_DEV_ID_82599_VF:
    350 		hw->mac.type = ixgbe_mac_82599_vf;
    351 		break;
    352 	case IXGBE_DEV_ID_X540_VF:
    353 		hw->mac.type = ixgbe_mac_X540_vf;
    354 		break;
    355 	case IXGBE_DEV_ID_X550_VF:
    356 		hw->mac.type = ixgbe_mac_X550_vf;
    357 		break;
    358 	case IXGBE_DEV_ID_X550EM_X_VF:
    359 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
    360 		break;
    361 	case IXGBE_DEV_ID_X550EM_A_VF:
    362 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
    363 		break;
    364 	default:
    365 		/* Shouldn't get here since probe succeeded */
    366 		aprint_error_dev(dev, "Unknown device ID!\n");
    367 		error = ENXIO;
    368 		goto err_out;
    369 		break;
    370 	}
    371 
    372 	ixv_init_device_features(adapter);
    373 
    374 	/* Initialize the shared code */
    375 	error = ixgbe_init_ops_vf(hw);
    376 	if (error) {
    377 		aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
    378 		error = EIO;
    379 		goto err_out;
    380 	}
    381 
    382 	/* Setup the mailbox */
    383 	ixgbe_init_mbx_params_vf(hw);
    384 
    385 	/* Set the right number of segments */
    386 	adapter->num_segs = IXGBE_82599_SCATTER;
    387 
    388 	/* Reset mbox api to 1.0 */
    389 	error = hw->mac.ops.reset_hw(hw);
    390 	if (error == IXGBE_ERR_RESET_FAILED)
    391 		aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
    392 	else if (error)
    393 		aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
    394 		    error);
    395 	if (error) {
    396 		error = EIO;
    397 		goto err_out;
    398 	}
    399 
    400 	error = hw->mac.ops.init_hw(hw);
    401 	if (error) {
    402 		aprint_error_dev(dev, "...init_hw() failed!\n");
    403 		error = EIO;
    404 		goto err_out;
    405 	}
    406 
    407 	/* Negotiate mailbox API version */
    408 	error = ixv_negotiate_api(adapter);
    409 	if (error)
    410 		aprint_normal_dev(dev,
    411 		    "MBX API negotiation failed during attach!\n");
    412 	switch (hw->api_version) {
    413 	case ixgbe_mbox_api_10:
    414 		apivstr = "1.0";
    415 		break;
    416 	case ixgbe_mbox_api_20:
    417 		apivstr = "2.0";
    418 		break;
    419 	case ixgbe_mbox_api_11:
    420 		apivstr = "1.1";
    421 		break;
    422 	case ixgbe_mbox_api_12:
    423 		apivstr = "1.2";
    424 		break;
    425 	case ixgbe_mbox_api_13:
    426 		apivstr = "1.3";
    427 		break;
    428 	default:
    429 		apivstr = "unknown";
    430 		break;
    431 	}
    432 	aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
    433 
    434 	/* If no mac address was assigned, make a random one */
    435 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    436 		u8 addr[ETHER_ADDR_LEN];
    437 		uint64_t rndval = cprng_strong64();
    438 
    439 		memcpy(addr, &rndval, sizeof(addr));
    440 		addr[0] &= 0xFE;
    441 		addr[0] |= 0x02;
    442 		bcopy(addr, hw->mac.addr, sizeof(addr));
    443 	}
    444 
    445 	/* Register for VLAN events */
    446 #if 0 /* XXX delete after write? */
    447 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    448 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    449 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    450 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    451 #endif
    452 
    453 	/* Sysctls for limiting the amount of work done in the taskqueues */
    454 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    455 	    "max number of rx packets to process",
    456 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    457 
    458 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    459 	    "max number of tx packets to process",
    460 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    461 
    462 	/* Do descriptor calc and sanity checks */
    463 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    464 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    465 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    466 		adapter->num_tx_desc = DEFAULT_TXD;
    467 	} else
    468 		adapter->num_tx_desc = ixv_txd;
    469 
    470 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    471 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    472 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    473 		adapter->num_rx_desc = DEFAULT_RXD;
    474 	} else
    475 		adapter->num_rx_desc = ixv_rxd;
    476 
    477 	/* Setup MSI-X */
    478 	error = ixv_configure_interrupts(adapter);
    479 	if (error)
    480 		goto err_out;
    481 
    482 	/* Allocate our TX/RX Queues */
    483 	if (ixgbe_allocate_queues(adapter)) {
    484 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    485 		error = ENOMEM;
    486 		goto err_out;
    487 	}
    488 
    489 	/* hw.ix defaults init */
    490 	adapter->enable_aim = ixv_enable_aim;
    491 
    492 	/* Setup OS specific network interface */
    493 	ixv_setup_interface(dev, adapter);
    494 
    495 	error = ixv_allocate_msix(adapter, pa);
    496 	if (error) {
    497 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    498 		goto err_late;
    499 	}
    500 
    501 	/* Do the stats setup */
    502 	ixv_save_stats(adapter);
    503 	ixv_init_stats(adapter);
    504 	ixv_add_stats_sysctls(adapter);
    505 
    506 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    507 		ixgbe_netmap_attach(adapter);
    508 
    509 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
    510 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
    511 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
    512 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
    513 
    514 	INIT_DEBUGOUT("ixv_attach: end");
    515 	adapter->osdep.attached = true;
    516 
    517 	return;
    518 
    519 err_late:
    520 	ixgbe_free_transmit_structures(adapter);
    521 	ixgbe_free_receive_structures(adapter);
    522 	free(adapter->queues, M_DEVBUF);
    523 err_out:
    524 	ixv_free_pci_resources(adapter);
    525 	IXGBE_CORE_LOCK_DESTROY(adapter);
    526 
    527 	return;
    528 } /* ixv_attach */
    529 
    530 /************************************************************************
    531  * ixv_detach - Device removal routine
    532  *
    533  *   Called when the driver is being removed.
    534  *   Stops the adapter and deallocates all the resources
    535  *   that were allocated for driver operation.
    536  *
    537  *   return 0 on success, positive on failure
    538  ************************************************************************/
    539 static int
    540 ixv_detach(device_t dev, int flags)
    541 {
    542 	struct adapter  *adapter = device_private(dev);
    543 	struct ix_queue *que = adapter->queues;
    544 	struct tx_ring *txr = adapter->tx_rings;
    545 	struct rx_ring *rxr = adapter->rx_rings;
    546 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
    547 
    548 	INIT_DEBUGOUT("ixv_detach: begin");
    549 	if (adapter->osdep.attached == false)
    550 		return 0;
    551 
    552 	/* Stop the interface. Callouts are stopped in it. */
    553 	ixv_ifstop(adapter->ifp, 1);
    554 
    555 #if NVLAN > 0
    556 	/* Make sure VLANs are not using driver */
    557 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    558 		;	/* nothing to do: no VLANs */
    559 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    560 		vlan_ifdetach(adapter->ifp);
    561 	else {
    562 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    563 		return EBUSY;
    564 	}
    565 #endif
    566 
    567 	IXGBE_CORE_LOCK(adapter);
    568 	ixv_stop(adapter);
    569 	IXGBE_CORE_UNLOCK(adapter);
    570 
    571 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
    572 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
    573 			softint_disestablish(txr->txr_si);
    574 		softint_disestablish(que->que_si);
    575 	}
    576 
    577 	/* Drain the Mailbox(link) queue */
    578 	softint_disestablish(adapter->link_si);
    579 
    580 	/* Unregister VLAN events */
    581 #if 0 /* XXX msaitoh delete after write? */
    582 	if (adapter->vlan_attach != NULL)
    583 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    584 	if (adapter->vlan_detach != NULL)
    585 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    586 #endif
    587 
    588 	ether_ifdetach(adapter->ifp);
    589 	callout_halt(&adapter->timer, NULL);
    590 
    591 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    592 		netmap_detach(adapter->ifp);
    593 
    594 	ixv_free_pci_resources(adapter);
    595 #if 0 /* XXX the NetBSD port is probably missing something here */
    596 	bus_generic_detach(dev);
    597 #endif
    598 	if_detach(adapter->ifp);
    599 	if_percpuq_destroy(adapter->ipq);
    600 
    601 	sysctl_teardown(&adapter->sysctllog);
    602 	evcnt_detach(&adapter->handleq);
    603 	evcnt_detach(&adapter->req);
    604 	evcnt_detach(&adapter->efbig_tx_dma_setup);
    605 	evcnt_detach(&adapter->mbuf_defrag_failed);
    606 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
    607 	evcnt_detach(&adapter->einval_tx_dma_setup);
    608 	evcnt_detach(&adapter->other_tx_dma_setup);
    609 	evcnt_detach(&adapter->eagain_tx_dma_setup);
    610 	evcnt_detach(&adapter->enomem_tx_dma_setup);
    611 	evcnt_detach(&adapter->watchdog_events);
    612 	evcnt_detach(&adapter->tso_err);
    613 	evcnt_detach(&adapter->link_irq);
    614 
    615 	txr = adapter->tx_rings;
    616 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
    617 		evcnt_detach(&adapter->queues[i].irqs);
    618 		evcnt_detach(&txr->no_desc_avail);
    619 		evcnt_detach(&txr->total_packets);
    620 		evcnt_detach(&txr->tso_tx);
    621 #ifndef IXGBE_LEGACY_TX
    622 		evcnt_detach(&txr->pcq_drops);
    623 #endif
    624 
    625 		evcnt_detach(&rxr->rx_packets);
    626 		evcnt_detach(&rxr->rx_bytes);
    627 		evcnt_detach(&rxr->rx_copies);
    628 		evcnt_detach(&rxr->no_jmbuf);
    629 		evcnt_detach(&rxr->rx_discarded);
    630 	}
    631 	evcnt_detach(&stats->ipcs);
    632 	evcnt_detach(&stats->l4cs);
    633 	evcnt_detach(&stats->ipcs_bad);
    634 	evcnt_detach(&stats->l4cs_bad);
    635 
    636 	/* Packet Reception Stats */
    637 	evcnt_detach(&stats->vfgorc);
    638 	evcnt_detach(&stats->vfgprc);
    639 	evcnt_detach(&stats->vfmprc);
    640 
    641 	/* Packet Transmission Stats */
    642 	evcnt_detach(&stats->vfgotc);
    643 	evcnt_detach(&stats->vfgptc);
    644 
    645 	ixgbe_free_transmit_structures(adapter);
    646 	ixgbe_free_receive_structures(adapter);
    647 	free(adapter->queues, M_DEVBUF);
    648 
    649 	IXGBE_CORE_LOCK_DESTROY(adapter);
    650 
    651 	return (0);
    652 } /* ixv_detach */
    653 
    654 /************************************************************************
    655  * ixv_init_locked - Init entry point
    656  *
    657  *   Used in two ways: It is used by the stack as an init entry
    658  *   point in network interface structure. It is also used
    659  *   by the driver as a hw/sw initialization routine to get
    660  *   to a consistent state.
    661  *
    662  *   return 0 on success, positive on failure
    663  ************************************************************************/
    664 static void
    665 ixv_init_locked(struct adapter *adapter)
    666 {
    667 	struct ifnet	*ifp = adapter->ifp;
    668 	device_t 	dev = adapter->dev;
    669 	struct ixgbe_hw *hw = &adapter->hw;
    670 	int             error = 0;
    671 
    672 	INIT_DEBUGOUT("ixv_init_locked: begin");
    673 	KASSERT(mutex_owned(&adapter->core_mtx));
    674 	hw->adapter_stopped = FALSE;
    675 	hw->mac.ops.stop_adapter(hw);
    676 	callout_stop(&adapter->timer);
    677 
    678 	/* reprogram the RAR[0] in case user changed it. */
    679 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    680 
    681 	/* Get the latest mac address, User can use a LAA */
    682 	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
    683 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    684 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
    685 
    686 	/* Prepare transmit descriptors and buffers */
    687 	if (ixgbe_setup_transmit_structures(adapter)) {
    688 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    689 		ixv_stop(adapter);
    690 		return;
    691 	}
    692 
    693 	/* Reset VF and renegotiate mailbox API version */
    694 	hw->mac.ops.reset_hw(hw);
    695 	error = ixv_negotiate_api(adapter);
    696 	if (error)
    697 		device_printf(dev,
    698 		    "Mailbox API negotiation failed in init_locked!\n");
    699 
    700 	ixv_initialize_transmit_units(adapter);
    701 
    702 	/* Setup Multicast table */
    703 	ixv_set_multi(adapter);
    704 
    705 	/*
    706 	 * Determine the correct mbuf pool
    707 	 * for doing jumbo/headersplit
    708 	 */
    709 	if (ifp->if_mtu > ETHERMTU)
    710 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    711 	else
    712 		adapter->rx_mbuf_sz = MCLBYTES;
    713 
    714 	/* Prepare receive descriptors and buffers */
    715 	if (ixgbe_setup_receive_structures(adapter)) {
    716 		device_printf(dev, "Could not setup receive structures\n");
    717 		ixv_stop(adapter);
    718 		return;
    719 	}
    720 
    721 	/* Configure RX settings */
    722 	ixv_initialize_receive_units(adapter);
    723 
    724 #if 0 /* XXX isn't it required? -- msaitoh  */
    725 	/* Set the various hardware offload abilities */
    726 	ifp->if_hwassist = 0;
    727 	if (ifp->if_capenable & IFCAP_TSO4)
    728 		ifp->if_hwassist |= CSUM_TSO;
    729 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    730 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    731 #if __FreeBSD_version >= 800000
    732 		ifp->if_hwassist |= CSUM_SCTP;
    733 #endif
    734 	}
    735 #endif
    736 
    737 	/* Set up VLAN offload and filter */
    738 	ixv_setup_vlan_support(adapter);
    739 
    740 	/* Set up MSI-X routing */
    741 	ixv_configure_ivars(adapter);
    742 
    743 	/* Set up auto-mask */
    744 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
    745 
    746 	/* Set moderation on the Link interrupt */
    747 	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
    748 
    749 	/* Stats init */
    750 	ixv_init_stats(adapter);
    751 
    752 	/* Config/Enable Link */
    753 	hw->mac.get_link_status = TRUE;
    754 	hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
    755 	    FALSE);
    756 
    757 	/* Start watchdog */
    758 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    759 
    760 	/* And now turn on interrupts */
    761 	ixv_enable_intr(adapter);
    762 
    763 	/* Now inform the stack we're ready */
    764 	ifp->if_flags |= IFF_RUNNING;
    765 	ifp->if_flags &= ~IFF_OACTIVE;
    766 
    767 	return;
    768 } /* ixv_init_locked */
    769 
    770 /*
    771  * MSI-X Interrupt Handlers and Tasklets
    772  */
    773 
    774 static inline void
    775 ixv_enable_queue(struct adapter *adapter, u32 vector)
    776 {
    777 	struct ixgbe_hw *hw = &adapter->hw;
    778 	u32             queue = 1 << vector;
    779 	u32             mask;
    780 
    781 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    782 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    783 } /* ixv_enable_queue */
    784 
    785 static inline void
    786 ixv_disable_queue(struct adapter *adapter, u32 vector)
    787 {
    788 	struct ixgbe_hw *hw = &adapter->hw;
    789 	u64             queue = (u64)(1 << vector);
    790 	u32             mask;
    791 
    792 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    793 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    794 } /* ixv_disable_queue */
    795 
    796 static inline void
    797 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    798 {
    799 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    800 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    801 } /* ixv_rearm_queues */
    802 
    803 
    804 /************************************************************************
    805  * ixv_msix_que - MSI Queue Interrupt Service routine
    806  ************************************************************************/
    807 static int
    808 ixv_msix_que(void *arg)
    809 {
    810 	struct ix_queue	*que = arg;
    811 	struct adapter  *adapter = que->adapter;
    812 	struct tx_ring	*txr = que->txr;
    813 	struct rx_ring	*rxr = que->rxr;
    814 	bool		more;
    815 	u32		newitr = 0;
    816 
    817 	ixv_disable_queue(adapter, que->msix);
    818 	++que->irqs.ev_count;
    819 
    820 #ifdef __NetBSD__
    821 	/* Don't run ixgbe_rxeof in interrupt context */
    822 	more = true;
    823 #else
    824 	more = ixgbe_rxeof(que);
    825 #endif
    826 
    827 	IXGBE_TX_LOCK(txr);
    828 	ixgbe_txeof(txr);
    829 	IXGBE_TX_UNLOCK(txr);
    830 
    831 	/* Do AIM now? */
    832 
    833 	if (adapter->enable_aim == false)
    834 		goto no_calc;
    835 	/*
    836 	 * Do Adaptive Interrupt Moderation:
    837 	 *  - Write out last calculated setting
    838 	 *  - Calculate based on average size over
    839 	 *    the last interval.
    840 	 */
    841 	if (que->eitr_setting)
    842 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
    843 		    que->eitr_setting);
    844 
    845 	que->eitr_setting = 0;
    846 
    847 	/* Idle, do nothing */
    848 	if ((txr->bytes == 0) && (rxr->bytes == 0))
    849 		goto no_calc;
    850 
    851 	if ((txr->bytes) && (txr->packets))
    852 		newitr = txr->bytes/txr->packets;
    853 	if ((rxr->bytes) && (rxr->packets))
    854 		newitr = max(newitr, (rxr->bytes / rxr->packets));
    855 	newitr += 24; /* account for hardware frame, crc */
    856 
    857 	/* set an upper boundary */
    858 	newitr = min(newitr, 3000);
    859 
    860 	/* Be nice to the mid range */
    861 	if ((newitr > 300) && (newitr < 1200))
    862 		newitr = (newitr / 3);
    863 	else
    864 		newitr = (newitr / 2);
    865 
    866 	newitr |= newitr << 16;
    867 
    868 	/* save for next interrupt */
    869 	que->eitr_setting = newitr;
    870 
    871 	/* Reset state */
    872 	txr->bytes = 0;
    873 	txr->packets = 0;
    874 	rxr->bytes = 0;
    875 	rxr->packets = 0;
    876 
    877 no_calc:
    878 	if (more)
    879 		softint_schedule(que->que_si);
    880 	else /* Re-enable this interrupt */
    881 		ixv_enable_queue(adapter, que->msix);
    882 
    883 	return 1;
    884 } /* ixv_msix_que */
    885 
    886 /************************************************************************
    887  * ixv_msix_mbx
    888  ************************************************************************/
    889 static int
    890 ixv_msix_mbx(void *arg)
    891 {
    892 	struct adapter	*adapter = arg;
    893 	struct ixgbe_hw *hw = &adapter->hw;
    894 	u32		reg;
    895 
    896 	++adapter->link_irq.ev_count;
    897 
    898 	/* First get the cause */
    899 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
    900 	/* Clear interrupt with write */
    901 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
    902 
    903 	/* Link status change */
    904 	if (reg & IXGBE_EICR_LSC)
    905 		softint_schedule(adapter->link_si);
    906 
    907 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
    908 
    909 	return 1;
    910 } /* ixv_msix_mbx */
    911 
    912 /************************************************************************
    913  * ixv_media_status - Media Ioctl callback
    914  *
    915  *   Called whenever the user queries the status of
    916  *   the interface using ifconfig.
    917  ************************************************************************/
    918 static void
    919 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
    920 {
    921 	struct adapter *adapter = ifp->if_softc;
    922 
    923 	INIT_DEBUGOUT("ixv_media_status: begin");
    924 	IXGBE_CORE_LOCK(adapter);
    925 	ixv_update_link_status(adapter);
    926 
    927 	ifmr->ifm_status = IFM_AVALID;
    928 	ifmr->ifm_active = IFM_ETHER;
    929 
    930 	if (!adapter->link_active) {
    931 		ifmr->ifm_active |= IFM_NONE;
    932 		IXGBE_CORE_UNLOCK(adapter);
    933 		return;
    934 	}
    935 
    936 	ifmr->ifm_status |= IFM_ACTIVE;
    937 
    938 	switch (adapter->link_speed) {
    939 		case IXGBE_LINK_SPEED_10GB_FULL:
    940 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
    941 			break;
    942 		case IXGBE_LINK_SPEED_1GB_FULL:
    943 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
    944 			break;
    945 		case IXGBE_LINK_SPEED_100_FULL:
    946 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
    947 			break;
    948 		case IXGBE_LINK_SPEED_10_FULL:
    949 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
    950 			break;
    951 	}
    952 
    953 	IXGBE_CORE_UNLOCK(adapter);
    954 
    955 	return;
    956 } /* ixv_media_status */
    957 
    958 /************************************************************************
    959  * ixv_media_change - Media Ioctl callback
    960  *
    961  *   Called when the user changes speed/duplex using
    962  *   media/mediopt option with ifconfig.
    963  ************************************************************************/
    964 static int
    965 ixv_media_change(struct ifnet *ifp)
    966 {
    967 	struct adapter *adapter = ifp->if_softc;
    968 	struct ifmedia *ifm = &adapter->media;
    969 
    970 	INIT_DEBUGOUT("ixv_media_change: begin");
    971 
    972 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
    973 		return (EINVAL);
    974 
    975 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
    976 	case IFM_AUTO:
    977 		break;
    978 	default:
    979 		device_printf(adapter->dev, "Only auto media type\n");
    980 		return (EINVAL);
    981 	}
    982 
    983 	return (0);
    984 } /* ixv_media_change */
    985 
    986 
    987 /************************************************************************
    988  * ixv_negotiate_api
    989  *
    990  *   Negotiate the Mailbox API with the PF;
    991  *   start with the most featured API first.
    992  ************************************************************************/
    993 static int
    994 ixv_negotiate_api(struct adapter *adapter)
    995 {
    996 	struct ixgbe_hw *hw = &adapter->hw;
    997 	int             mbx_api[] = { ixgbe_mbox_api_11,
    998 	                              ixgbe_mbox_api_10,
    999 	                              ixgbe_mbox_api_unknown };
   1000 	int             i = 0;
   1001 
   1002 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
   1003 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
   1004 			return (0);
   1005 		i++;
   1006 	}
   1007 
   1008 	return (EINVAL);
   1009 } /* ixv_negotiate_api */
   1010 
   1011 
   1012 /************************************************************************
   1013  * ixv_set_multi - Multicast Update
   1014  *
   1015  *   Called whenever multicast address list is updated.
   1016  ************************************************************************/
   1017 static void
   1018 ixv_set_multi(struct adapter *adapter)
   1019 {
   1020 	struct ether_multi *enm;
   1021 	struct ether_multistep step;
   1022 	struct ethercom *ec = &adapter->osdep.ec;
   1023 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1024 	u8                 *update_ptr;
   1025 	int                mcnt = 0;
   1026 
   1027 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1028 
   1029 	ETHER_FIRST_MULTI(step, ec, enm);
   1030 	while (enm != NULL) {
   1031 		bcopy(enm->enm_addrlo,
   1032 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1033 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1034 		mcnt++;
   1035 		/* XXX This might be required --msaitoh */
   1036 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1037 			break;
   1038 		ETHER_NEXT_MULTI(step, enm);
   1039 	}
   1040 
   1041 	update_ptr = mta;
   1042 
   1043 	adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   1044 	    ixv_mc_array_itr, TRUE);
   1045 
   1046 	return;
   1047 } /* ixv_set_multi */
   1048 
   1049 /************************************************************************
   1050  * ixv_mc_array_itr
   1051  *
   1052  *   An iterator function needed by the multicast shared code.
   1053  *   It feeds the shared code routine the addresses in the
   1054  *   array of ixv_set_multi() one by one.
   1055  ************************************************************************/
   1056 static u8 *
   1057 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1058 {
   1059 	u8 *addr = *update_ptr;
   1060 	u8 *newptr;
   1061 	*vmdq = 0;
   1062 
   1063 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1064 	*update_ptr = newptr;
   1065 
   1066 	return addr;
   1067 } /* ixv_mc_array_itr */
   1068 
   1069 /************************************************************************
   1070  * ixv_local_timer - Timer routine
   1071  *
   1072  *   Checks for link status, updates statistics,
   1073  *   and runs the watchdog check.
   1074  ************************************************************************/
   1075 static void
   1076 ixv_local_timer(void *arg)
   1077 {
   1078 	struct adapter *adapter = arg;
   1079 
   1080 	IXGBE_CORE_LOCK(adapter);
   1081 	ixv_local_timer_locked(adapter);
   1082 	IXGBE_CORE_UNLOCK(adapter);
   1083 }
   1084 
   1085 static void
   1086 ixv_local_timer_locked(void *arg)
   1087 {
   1088 	struct adapter	*adapter = arg;
   1089 	device_t	dev = adapter->dev;
   1090 	struct ix_queue	*que = adapter->queues;
   1091 	u64		queues = 0;
   1092 	int		hung = 0;
   1093 
   1094 	KASSERT(mutex_owned(&adapter->core_mtx));
   1095 
   1096 	ixv_check_link(adapter);
   1097 
   1098 	/* Stats Update */
   1099 	ixv_update_stats(adapter);
   1100 
   1101 	/*
   1102 	 * Check the TX queues status
   1103 	 *      - mark hung queues so we don't schedule on them
   1104 	 *      - watchdog only if all queues show hung
   1105 	 */
   1106 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1107 		/* Keep track of queues with work for soft irq */
   1108 		if (que->txr->busy)
   1109 			queues |= ((u64)1 << que->me);
   1110 		/*
   1111 		 * Each time txeof runs without cleaning, but there
   1112 		 * are uncleaned descriptors it increments busy. If
   1113 		 * we get to the MAX we declare it hung.
   1114 		 */
   1115 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1116 			++hung;
   1117 			/* Mark the queue as inactive */
   1118 			adapter->active_queues &= ~((u64)1 << que->me);
   1119 			continue;
   1120 		} else {
   1121 			/* Check if we've come back from hung */
   1122 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1123 				adapter->active_queues |= ((u64)1 << que->me);
   1124 		}
   1125 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1126 			device_printf(dev,
   1127 			    "Warning queue %d appears to be hung!\n", i);
   1128 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1129 			++hung;
   1130 		}
   1131 	}
   1132 
   1133 	/* Only truly watchdog if all queues show hung */
   1134 	if (hung == adapter->num_queues)
   1135 		goto watchdog;
   1136 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1137 		ixv_rearm_queues(adapter, queues);
   1138 	}
   1139 
   1140 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1141 
   1142 	return;
   1143 
   1144 watchdog:
   1145 
   1146 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1147 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1148 	adapter->watchdog_events.ev_count++;
   1149 	ixv_init_locked(adapter);
   1150 } /* ixv_local_timer */
   1151 
   1152 /************************************************************************
   1153  * ixv_update_link_status - Update OS on link state
   1154  *
   1155  * Note: Only updates the OS on the cached link state.
   1156  *       The real check of the hardware only happens with
   1157  *       a link interrupt.
   1158  ************************************************************************/
   1159 static void
   1160 ixv_update_link_status(struct adapter *adapter)
   1161 {
   1162 	struct ifnet *ifp = adapter->ifp;
   1163 	device_t     dev = adapter->dev;
   1164 
   1165 	if (adapter->link_up) {
   1166 		if (adapter->link_active == FALSE) {
   1167 			if (bootverbose) {
   1168 				const char *bpsmsg;
   1169 
   1170 				switch (adapter->link_speed) {
   1171 				case IXGBE_LINK_SPEED_10GB_FULL:
   1172 					bpsmsg = "10 Gbps";
   1173 					break;
   1174 				case IXGBE_LINK_SPEED_5GB_FULL:
   1175 					bpsmsg = "5 Gbps";
   1176 					break;
   1177 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   1178 					bpsmsg = "2.5 Gbps";
   1179 					break;
   1180 				case IXGBE_LINK_SPEED_1GB_FULL:
   1181 					bpsmsg = "1 Gbps";
   1182 					break;
   1183 				case IXGBE_LINK_SPEED_100_FULL:
   1184 					bpsmsg = "100 Mbps";
   1185 					break;
   1186 				case IXGBE_LINK_SPEED_10_FULL:
   1187 					bpsmsg = "10 Mbps";
   1188 					break;
   1189 				default:
   1190 					bpsmsg = "unknown speed";
   1191 					break;
   1192 				}
   1193 				device_printf(dev, "Link is up %s %s \n",
   1194 				    bpsmsg, "Full Duplex");
   1195 			}
   1196 			adapter->link_active = TRUE;
   1197 			if_link_state_change(ifp, LINK_STATE_UP);
   1198 		}
   1199 	} else { /* Link down */
   1200 		if (adapter->link_active == TRUE) {
   1201 			if (bootverbose)
   1202 				device_printf(dev, "Link is Down\n");
   1203 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1204 			adapter->link_active = FALSE;
   1205 		}
   1206 	}
   1207 
   1208 	return;
   1209 } /* ixv_update_link_status */
   1210 
   1211 
   1212 /************************************************************************
   1213  * ixv_stop - Stop the hardware
   1214  *
   1215  *   Disables all traffic on the adapter by issuing a
   1216  *   global reset on the MAC and deallocates TX/RX buffers.
   1217  ************************************************************************/
   1218 static void
   1219 ixv_ifstop(struct ifnet *ifp, int disable)
   1220 {
   1221 	struct adapter *adapter = ifp->if_softc;
   1222 
   1223 	IXGBE_CORE_LOCK(adapter);
   1224 	ixv_stop(adapter);
   1225 	IXGBE_CORE_UNLOCK(adapter);
   1226 }
   1227 
   1228 static void
   1229 ixv_stop(void *arg)
   1230 {
   1231 	struct ifnet    *ifp;
   1232 	struct adapter  *adapter = arg;
   1233 	struct ixgbe_hw *hw = &adapter->hw;
   1234 
   1235 	ifp = adapter->ifp;
   1236 
   1237 	KASSERT(mutex_owned(&adapter->core_mtx));
   1238 
   1239 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1240 	ixv_disable_intr(adapter);
   1241 
   1242 	/* Tell the stack that the interface is no longer active */
   1243 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1244 
   1245 	hw->mac.ops.reset_hw(hw);
   1246 	adapter->hw.adapter_stopped = FALSE;
   1247 	hw->mac.ops.stop_adapter(hw);
   1248 	callout_stop(&adapter->timer);
   1249 
   1250 	/* reprogram the RAR[0] in case user changed it. */
   1251 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1252 
   1253 	return;
   1254 } /* ixv_stop */
   1255 
   1256 
   1257 /************************************************************************
   1258  * ixv_allocate_pci_resources
   1259  ************************************************************************/
   1260 static int
   1261 ixv_allocate_pci_resources(struct adapter *adapter,
   1262     const struct pci_attach_args *pa)
   1263 {
   1264 	pcireg_t	memtype;
   1265 	device_t        dev = adapter->dev;
   1266 	bus_addr_t addr;
   1267 	int flags;
   1268 
   1269 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1270 	switch (memtype) {
   1271 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1272 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1273 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1274 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1275 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1276 			goto map_err;
   1277 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1278 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1279 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1280 		}
   1281 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1282 		     adapter->osdep.mem_size, flags,
   1283 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1284 map_err:
   1285 			adapter->osdep.mem_size = 0;
   1286 			aprint_error_dev(dev, "unable to map BAR0\n");
   1287 			return ENXIO;
   1288 		}
   1289 		break;
   1290 	default:
   1291 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1292 		return ENXIO;
   1293 	}
   1294 
   1295 	/* Pick up the tuneable queues */
   1296 	adapter->num_queues = ixv_num_queues;
   1297 
   1298 	return (0);
   1299 } /* ixv_allocate_pci_resources */
   1300 
   1301 /************************************************************************
   1302  * ixv_free_pci_resources
   1303  ************************************************************************/
   1304 static void
   1305 ixv_free_pci_resources(struct adapter * adapter)
   1306 {
   1307 	struct 		ix_queue *que = adapter->queues;
   1308 	int		rid;
   1309 
   1310 	/*
   1311 	 *  Release all msix queue resources:
   1312 	 */
   1313 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1314 		if (que->res != NULL)
   1315 			pci_intr_disestablish(adapter->osdep.pc,
   1316 			    adapter->osdep.ihs[i]);
   1317 	}
   1318 
   1319 
   1320 	/* Clean the Mailbox interrupt last */
   1321 	rid = adapter->vector;
   1322 
   1323 	if (adapter->osdep.ihs[rid] != NULL) {
   1324 		pci_intr_disestablish(adapter->osdep.pc,
   1325 		    adapter->osdep.ihs[rid]);
   1326 		adapter->osdep.ihs[rid] = NULL;
   1327 	}
   1328 
   1329 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1330 	    adapter->osdep.nintrs);
   1331 
   1332 	if (adapter->osdep.mem_size != 0) {
   1333 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1334 		    adapter->osdep.mem_bus_space_handle,
   1335 		    adapter->osdep.mem_size);
   1336 	}
   1337 
   1338 	return;
   1339 } /* ixv_free_pci_resources */
   1340 
   1341 /************************************************************************
   1342  * ixv_setup_interface
   1343  *
   1344  *   Setup networking device structure and register an interface.
   1345  ************************************************************************/
   1346 static void
   1347 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1348 {
   1349 	struct ethercom *ec = &adapter->osdep.ec;
   1350 	struct ifnet   *ifp;
   1351 
   1352 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1353 
   1354 	ifp = adapter->ifp = &ec->ec_if;
   1355 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1356 	ifp->if_baudrate = IF_Gbps(10);
   1357 	ifp->if_init = ixv_init;
   1358 	ifp->if_stop = ixv_ifstop;
   1359 	ifp->if_softc = adapter;
   1360 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1361 #ifdef IXGBE_MPSAFE
   1362 	ifp->if_extflags = IFEF_START_MPSAFE;
   1363 #endif
   1364 	ifp->if_ioctl = ixv_ioctl;
   1365 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1366 #if 0
   1367 		ixv_start_locked = ixgbe_legacy_start_locked;
   1368 #endif
   1369 	} else {
   1370 		ifp->if_transmit = ixgbe_mq_start;
   1371 #if 0
   1372 		ixv_start_locked = ixgbe_mq_start_locked;
   1373 #endif
   1374 	}
   1375 	ifp->if_start = ixgbe_legacy_start;
   1376 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1377 	IFQ_SET_READY(&ifp->if_snd);
   1378 
   1379 	if_initialize(ifp);
   1380 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1381 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1382 	/*
   1383 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1384 	 * used.
   1385 	 */
   1386 	if_register(ifp);
   1387 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1388 
   1389 	adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
   1390 
   1391 	/*
   1392 	 * Tell the upper layer(s) we support long frames.
   1393 	 */
   1394 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1395 
   1396 	/* Set capability flags */
   1397 	ifp->if_capabilities |= IFCAP_HWCSUM
   1398 	                     |  IFCAP_TSOv4
   1399 	                     |  IFCAP_TSOv6;
   1400 	ifp->if_capenable = 0;
   1401 
   1402 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1403 			    |  ETHERCAP_VLAN_HWCSUM
   1404 			    |  ETHERCAP_JUMBO_MTU
   1405 			    |  ETHERCAP_VLAN_MTU;
   1406 
   1407 	/* Enable the above capabilities by default */
   1408 	ec->ec_capenable = ec->ec_capabilities;
   1409 
   1410 	/* Don't enable LRO by default */
   1411 	ifp->if_capabilities |= IFCAP_LRO;
   1412 #if 0
   1413 	ifp->if_capenable = ifp->if_capabilities;
   1414 #endif
   1415 
   1416 	/*
   1417 	 * Specify the media types supported by this adapter and register
   1418 	 * callbacks to update media and link information
   1419 	 */
   1420 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1421 	    ixv_media_status);
   1422 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1423 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1424 
   1425 	return;
   1426 } /* ixv_setup_interface */
   1427 
   1428 
   1429 /************************************************************************
   1430  * ixv_initialize_transmit_units - Enable transmit unit.
   1431  ************************************************************************/
   1432 static void
   1433 ixv_initialize_transmit_units(struct adapter *adapter)
   1434 {
   1435 	struct tx_ring	*txr = adapter->tx_rings;
   1436 	struct ixgbe_hw	*hw = &adapter->hw;
   1437 
   1438 
   1439 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
   1440 		u64 tdba = txr->txdma.dma_paddr;
   1441 		u32 txctrl, txdctl;
   1442 
   1443 		/* Set WTHRESH to 8, burst writeback */
   1444 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1445 		txdctl |= (8 << 16);
   1446 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1447 
   1448 		/* Set the HW Tx Head and Tail indices */
   1449 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
   1450 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
   1451 
   1452 		/* Set Tx Tail register */
   1453 		txr->tail = IXGBE_VFTDT(i);
   1454 
   1455 		/* Set Ring parameters */
   1456 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
   1457 		    (tdba & 0x00000000ffffffffULL));
   1458 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
   1459 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
   1460 		    adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
   1461 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
   1462 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1463 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
   1464 
   1465 		/* Now enable */
   1466 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
   1467 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1468 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
   1469 	}
   1470 
   1471 	return;
   1472 } /* ixv_initialize_transmit_units */
   1473 
   1474 
   1475 /************************************************************************
   1476  * ixv_initialize_rss_mapping
   1477  ************************************************************************/
   1478 static void
   1479 ixv_initialize_rss_mapping(struct adapter *adapter)
   1480 {
   1481 	struct ixgbe_hw *hw = &adapter->hw;
   1482 	u32             reta = 0, mrqc, rss_key[10];
   1483 	int             queue_id;
   1484 	int             i, j;
   1485 	u32             rss_hash_config;
   1486 
   1487 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1488 		/* Fetch the configured RSS key */
   1489 		rss_getkey((uint8_t *)&rss_key);
   1490 	} else {
   1491 		/* set up random bits */
   1492 		cprng_fast(&rss_key, sizeof(rss_key));
   1493 	}
   1494 
   1495 	/* Now fill out hash function seeds */
   1496 	for (i = 0; i < 10; i++)
   1497 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
   1498 
   1499 	/* Set up the redirection table */
   1500 	for (i = 0, j = 0; i < 64; i++, j++) {
   1501 		if (j == adapter->num_queues)
   1502 			j = 0;
   1503 
   1504 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1505 			/*
   1506 			 * Fetch the RSS bucket id for the given indirection
   1507 			 * entry. Cap it at the number of configured buckets
   1508 			 * (which is num_queues.)
   1509 			 */
   1510 			queue_id = rss_get_indirection_to_bucket(i);
   1511 			queue_id = queue_id % adapter->num_queues;
   1512 		} else
   1513 			queue_id = j;
   1514 
   1515 		/*
   1516 		 * The low 8 bits are for hash value (n+0);
   1517 		 * The next 8 bits are for hash value (n+1), etc.
   1518 		 */
   1519 		reta >>= 8;
   1520 		reta |= ((uint32_t)queue_id) << 24;
   1521 		if ((i & 3) == 3) {
   1522 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
   1523 			reta = 0;
   1524 		}
   1525 	}
   1526 
   1527 	/* Perform hash on these packet types */
   1528 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   1529 		rss_hash_config = rss_gethashconfig();
   1530 	else {
   1531 		/*
   1532 		 * Disable UDP - IP fragments aren't currently being handled
   1533 		 * and so we end up with a mix of 2-tuple and 4-tuple
   1534 		 * traffic.
   1535 		 */
   1536 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
   1537 		                | RSS_HASHTYPE_RSS_TCP_IPV4
   1538 		                | RSS_HASHTYPE_RSS_IPV6
   1539 		                | RSS_HASHTYPE_RSS_TCP_IPV6;
   1540 	}
   1541 
   1542 	mrqc = IXGBE_MRQC_RSSEN;
   1543 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
   1544 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
   1545 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
   1546 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
   1547 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
   1548 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
   1549 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
   1550 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
   1551 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
   1552 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
   1553 		    __func__);
   1554 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
   1555 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
   1556 		    __func__);
   1557 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
   1558 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
   1559 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
   1560 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
   1561 		    __func__);
   1562 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
   1563 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
   1564 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
   1565 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
   1566 		    __func__);
   1567 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
   1568 } /* ixv_initialize_rss_mapping */
   1569 
   1570 
   1571 /************************************************************************
   1572  * ixv_initialize_receive_units - Setup receive registers and features.
   1573  ************************************************************************/
   1574 static void
   1575 ixv_initialize_receive_units(struct adapter *adapter)
   1576 {
   1577 	struct	rx_ring	*rxr = adapter->rx_rings;
   1578 	struct ixgbe_hw	*hw = &adapter->hw;
   1579 	struct ifnet	*ifp = adapter->ifp;
   1580 	u32		bufsz, rxcsum, psrtype;
   1581 
   1582 	if (ifp->if_mtu > ETHERMTU)
   1583 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1584 	else
   1585 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1586 
   1587 	psrtype = IXGBE_PSRTYPE_TCPHDR
   1588 	        | IXGBE_PSRTYPE_UDPHDR
   1589 	        | IXGBE_PSRTYPE_IPV4HDR
   1590 	        | IXGBE_PSRTYPE_IPV6HDR
   1591 	        | IXGBE_PSRTYPE_L2HDR;
   1592 
   1593 	if (adapter->num_queues > 1)
   1594 		psrtype |= 1 << 29;
   1595 
   1596 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1597 
   1598 	/* Tell PF our max_frame size */
   1599 	if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
   1600 		device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
   1601 	}
   1602 
   1603 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1604 		u64 rdba = rxr->rxdma.dma_paddr;
   1605 		u32 reg, rxdctl;
   1606 
   1607 		/* Disable the queue */
   1608 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
   1609 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1610 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1611 		for (int j = 0; j < 10; j++) {
   1612 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1613 			    IXGBE_RXDCTL_ENABLE)
   1614 				msec_delay(1);
   1615 			else
   1616 				break;
   1617 		}
   1618 		wmb();
   1619 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1620 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
   1621 		    (rdba & 0x00000000ffffffffULL));
   1622 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
   1623 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
   1624 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1625 
   1626 		/* Reset the ring indices */
   1627 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1628 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1629 
   1630 		/* Set up the SRRCTL register */
   1631 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
   1632 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1633 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1634 		reg |= bufsz;
   1635 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1636 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
   1637 
   1638 		/* Capture Rx Tail index */
   1639 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1640 
   1641 		/* Do the queue enabling last */
   1642 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1643 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
   1644 		for (int k = 0; k < 10; k++) {
   1645 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
   1646 			    IXGBE_RXDCTL_ENABLE)
   1647 				break;
   1648 			msec_delay(1);
   1649 		}
   1650 		wmb();
   1651 
   1652 		/* Set the Tail Pointer */
   1653 		/*
   1654 		 * In netmap mode, we must preserve the buffers made
   1655 		 * available to userspace before the if_init()
   1656 		 * (this is true by default on the TX side, because
   1657 		 * init makes all buffers available to userspace).
   1658 		 *
   1659 		 * netmap_reset() and the device specific routines
   1660 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1661 		 * buffers at the end of the NIC ring, so here we
   1662 		 * must set the RDT (tail) register to make sure
   1663 		 * they are not overwritten.
   1664 		 *
   1665 		 * In this driver the NIC ring starts at RDH = 0,
   1666 		 * RDT points to the last slot available for reception (?),
   1667 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1668 		 */
   1669 #ifdef DEV_NETMAP
   1670 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   1671 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   1672 			struct netmap_adapter *na = NA(adapter->ifp);
   1673 			struct netmap_kring *kring = &na->rx_rings[i];
   1674 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1675 
   1676 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1677 		} else
   1678 #endif /* DEV_NETMAP */
   1679 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1680 			    adapter->num_rx_desc - 1);
   1681 	}
   1682 
   1683 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1684 
   1685 	ixv_initialize_rss_mapping(adapter);
   1686 
   1687 	if (adapter->num_queues > 1) {
   1688 		/* RSS and RX IPP Checksum are mutually exclusive */
   1689 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1690 	}
   1691 
   1692 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1693 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1694 
   1695 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1696 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1697 
   1698 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1699 
   1700 	return;
   1701 } /* ixv_initialize_receive_units */
   1702 
   1703 /************************************************************************
   1704  * ixv_setup_vlan_support
   1705  ************************************************************************/
   1706 static void
   1707 ixv_setup_vlan_support(struct adapter *adapter)
   1708 {
   1709 	struct ethercom *ec = &adapter->osdep.ec;
   1710 	struct ixgbe_hw *hw = &adapter->hw;
   1711 	struct rx_ring  *rxr;
   1712 	u32		ctrl, vid, vfta, retry;
   1713 
   1714 	/*
   1715 	 * We get here thru init_locked, meaning
   1716 	 * a soft reset, this has already cleared
   1717 	 * the VFTA and other state, so if there
   1718 	 * have been no vlan's registered do nothing.
   1719 	 */
   1720 	if (!VLAN_ATTACHED(ec))
   1721 		return;
   1722 
   1723 	/* Enable the queues */
   1724 	for (int i = 0; i < adapter->num_queues; i++) {
   1725 		rxr = &adapter->rx_rings[i];
   1726 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
   1727 		ctrl |= IXGBE_RXDCTL_VME;
   1728 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
   1729 		/*
   1730 		 * Let Rx path know that it needs to store VLAN tag
   1731 		 * as part of extra mbuf info.
   1732 		 */
   1733 		rxr->vtag_strip = TRUE;
   1734 	}
   1735 
   1736 #if 1
   1737 	/* XXX dirty hack. Enable all VIDs */
   1738 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
   1739 	  adapter->shadow_vfta[i] = 0xffffffff;
   1740 #endif
   1741 	/*
   1742 	 * A soft reset zero's out the VFTA, so
   1743 	 * we need to repopulate it now.
   1744 	 */
   1745 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   1746 		if (adapter->shadow_vfta[i] == 0)
   1747 			continue;
   1748 		vfta = adapter->shadow_vfta[i];
   1749 		/*
   1750 		 * Reconstruct the vlan id's
   1751 		 * based on the bits set in each
   1752 		 * of the array ints.
   1753 		 */
   1754 		for (int j = 0; j < 32; j++) {
   1755 			retry = 0;
   1756 			if ((vfta & (1 << j)) == 0)
   1757 				continue;
   1758 			vid = (i * 32) + j;
   1759 			/* Call the shared code mailbox routine */
   1760 			while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
   1761 				if (++retry > 5)
   1762 					break;
   1763 			}
   1764 		}
   1765 	}
   1766 } /* ixv_setup_vlan_support */
   1767 
   1768 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   1769 /************************************************************************
   1770  * ixv_register_vlan
   1771  *
   1772  *   Run via a vlan config EVENT, it enables us to use the
   1773  *   HW Filter table since we can get the vlan id. This just
   1774  *   creates the entry in the soft version of the VFTA, init
   1775  *   will repopulate the real table.
   1776  ************************************************************************/
   1777 static void
   1778 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1779 {
   1780 	struct adapter	*adapter = ifp->if_softc;
   1781 	u16		index, bit;
   1782 
   1783 	if (ifp->if_softc != arg) /* Not our event */
   1784 		return;
   1785 
   1786 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   1787 		return;
   1788 
   1789 	IXGBE_CORE_LOCK(adapter);
   1790 	index = (vtag >> 5) & 0x7F;
   1791 	bit = vtag & 0x1F;
   1792 	adapter->shadow_vfta[index] |= (1 << bit);
   1793 	/* Re-init to load the changes */
   1794 	ixv_init_locked(adapter);
   1795 	IXGBE_CORE_UNLOCK(adapter);
   1796 } /* ixv_register_vlan */
   1797 
   1798 /************************************************************************
   1799  * ixv_unregister_vlan
   1800  *
   1801  *   Run via a vlan unconfig EVENT, remove our entry
   1802  *   in the soft vfta.
   1803  ************************************************************************/
   1804 static void
   1805 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   1806 {
   1807 	struct adapter	*adapter = ifp->if_softc;
   1808 	u16		index, bit;
   1809 
   1810 	if (ifp->if_softc !=  arg)
   1811 		return;
   1812 
   1813 	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
   1814 		return;
   1815 
   1816 	IXGBE_CORE_LOCK(adapter);
   1817 	index = (vtag >> 5) & 0x7F;
   1818 	bit = vtag & 0x1F;
   1819 	adapter->shadow_vfta[index] &= ~(1 << bit);
   1820 	/* Re-init to load the changes */
   1821 	ixv_init_locked(adapter);
   1822 	IXGBE_CORE_UNLOCK(adapter);
   1823 } /* ixv_unregister_vlan */
   1824 #endif
   1825 
   1826 /************************************************************************
   1827  * ixv_enable_intr
   1828  ************************************************************************/
   1829 static void
   1830 ixv_enable_intr(struct adapter *adapter)
   1831 {
   1832 	struct ixgbe_hw *hw = &adapter->hw;
   1833 	struct ix_queue *que = adapter->queues;
   1834 	u32             mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
   1835 
   1836 
   1837 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
   1838 
   1839 	mask = IXGBE_EIMS_ENABLE_MASK;
   1840 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
   1841 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   1842 
   1843 	for (int i = 0; i < adapter->num_queues; i++, que++)
   1844 		ixv_enable_queue(adapter, que->msix);
   1845 
   1846 	IXGBE_WRITE_FLUSH(hw);
   1847 
   1848 	return;
   1849 } /* ixv_enable_intr */
   1850 
   1851 /************************************************************************
   1852  * ixv_disable_intr
   1853  ************************************************************************/
   1854 static void
   1855 ixv_disable_intr(struct adapter *adapter)
   1856 {
   1857 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   1858 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
   1859 	IXGBE_WRITE_FLUSH(&adapter->hw);
   1860 
   1861 	return;
   1862 } /* ixv_disable_intr */
   1863 
   1864 /************************************************************************
   1865  * ixv_set_ivar
   1866  *
   1867  *   Setup the correct IVAR register for a particular MSI-X interrupt
   1868  *    - entry is the register array entry
   1869  *    - vector is the MSI-X vector for this queue
   1870  *    - type is RX/TX/MISC
   1871  ************************************************************************/
   1872 static void
   1873 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   1874 {
   1875 	struct ixgbe_hw *hw = &adapter->hw;
   1876 	u32             ivar, index;
   1877 
   1878 	vector |= IXGBE_IVAR_ALLOC_VAL;
   1879 
   1880 	if (type == -1) { /* MISC IVAR */
   1881 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   1882 		ivar &= ~0xFF;
   1883 		ivar |= vector;
   1884 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   1885 	} else {          /* RX/TX IVARS */
   1886 		index = (16 * (entry & 1)) + (8 * type);
   1887 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   1888 		ivar &= ~(0xFF << index);
   1889 		ivar |= (vector << index);
   1890 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   1891 	}
   1892 } /* ixv_set_ivar */
   1893 
   1894 /************************************************************************
   1895  * ixv_configure_ivars
   1896  ************************************************************************/
   1897 static void
   1898 ixv_configure_ivars(struct adapter *adapter)
   1899 {
   1900 	struct ix_queue *que = adapter->queues;
   1901 
   1902 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1903 		/* First the RX queue entry */
   1904 		ixv_set_ivar(adapter, i, que->msix, 0);
   1905 		/* ... and the TX */
   1906 		ixv_set_ivar(adapter, i, que->msix, 1);
   1907 		/* Set an initial value in EITR */
   1908 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
   1909 		    IXGBE_EITR_DEFAULT);
   1910 	}
   1911 
   1912 	/* For the mailbox interrupt */
   1913 	ixv_set_ivar(adapter, 1, adapter->vector, -1);
   1914 } /* ixv_configure_ivars */
   1915 
   1916 
   1917 /************************************************************************
   1918  * ixv_save_stats
   1919  *
   1920  *   The VF stats registers never have a truly virgin
   1921  *   starting point, so this routine tries to make an
   1922  *   artificial one, marking ground zero on attach as
   1923  *   it were.
   1924  ************************************************************************/
   1925 static void
   1926 ixv_save_stats(struct adapter *adapter)
   1927 {
   1928 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   1929 
   1930 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   1931 		stats->saved_reset_vfgprc +=
   1932 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   1933 		stats->saved_reset_vfgptc +=
   1934 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   1935 		stats->saved_reset_vfgorc +=
   1936 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   1937 		stats->saved_reset_vfgotc +=
   1938 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   1939 		stats->saved_reset_vfmprc +=
   1940 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   1941 	}
   1942 } /* ixv_save_stats */
   1943 
   1944 /************************************************************************
   1945  * ixv_init_stats
   1946  ************************************************************************/
   1947 static void
   1948 ixv_init_stats(struct adapter *adapter)
   1949 {
   1950 	struct ixgbe_hw *hw = &adapter->hw;
   1951 
   1952 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   1953 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   1954 	adapter->stats.vf.last_vfgorc |=
   1955 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   1956 
   1957 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   1958 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   1959 	adapter->stats.vf.last_vfgotc |=
   1960 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   1961 
   1962 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   1963 
   1964 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   1965 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   1966 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   1967 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   1968 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   1969 } /* ixv_init_stats */
   1970 
   1971 #define UPDATE_STAT_32(reg, last, count)		\
   1972 {                                                       \
   1973 	u32 current = IXGBE_READ_REG(hw, (reg));	\
   1974 	if (current < (last))				\
   1975 		count.ev_count += 0x100000000LL;	\
   1976 	(last) = current;				\
   1977 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   1978 	count.ev_count |= current;			\
   1979 }
   1980 
   1981 #define UPDATE_STAT_36(lsb, msb, last, count)           \
   1982 {                                                       \
   1983 	u64 cur_lsb = IXGBE_READ_REG(hw, (lsb));	\
   1984 	u64 cur_msb = IXGBE_READ_REG(hw, (msb));	\
   1985 	u64 current = ((cur_msb << 32) | cur_lsb);      \
   1986 	if (current < (last))				\
   1987 		count.ev_count += 0x1000000000LL;	\
   1988 	(last) = current;				\
   1989 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   1990 	count.ev_count |= current;			\
   1991 }
   1992 
   1993 /************************************************************************
   1994  * ixv_update_stats - Update the board statistics counters.
   1995  ************************************************************************/
   1996 void
   1997 ixv_update_stats(struct adapter *adapter)
   1998 {
   1999 	struct ixgbe_hw *hw = &adapter->hw;
   2000 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2001 
   2002         UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
   2003         UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
   2004         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
   2005 	    stats->vfgorc);
   2006         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
   2007 	    stats->vfgotc);
   2008         UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
   2009 
   2010 	/* Fill out the OS statistics structure */
   2011 	/*
   2012 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   2013 	 * adapter->stats counters. It's required to make ifconfig -z
   2014 	 * (SOICZIFDATA) work.
   2015 	 */
   2016 } /* ixv_update_stats */
   2017 
   2018 const struct sysctlnode *
   2019 ixv_sysctl_instance(struct adapter *adapter)
   2020 {
   2021 	const char *dvname;
   2022 	struct sysctllog **log;
   2023 	int rc;
   2024 	const struct sysctlnode *rnode;
   2025 
   2026 	log = &adapter->sysctllog;
   2027 	dvname = device_xname(adapter->dev);
   2028 
   2029 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2030 	    0, CTLTYPE_NODE, dvname,
   2031 	    SYSCTL_DESCR("ixv information and settings"),
   2032 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2033 		goto err;
   2034 
   2035 	return rnode;
   2036 err:
   2037 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2038 	return NULL;
   2039 }
   2040 
   2041 static void
   2042 ixv_add_device_sysctls(struct adapter *adapter)
   2043 {
   2044 	struct sysctllog **log;
   2045 	const struct sysctlnode *rnode, *cnode;
   2046 	device_t dev;
   2047 
   2048 	dev = adapter->dev;
   2049 	log = &adapter->sysctllog;
   2050 
   2051 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2052 		aprint_error_dev(dev, "could not create sysctl root\n");
   2053 		return;
   2054 	}
   2055 
   2056 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2057 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2058 	    "debug", SYSCTL_DESCR("Debug Info"),
   2059 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   2060 		aprint_error_dev(dev, "could not create sysctl\n");
   2061 
   2062 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2063 	    CTLFLAG_READWRITE, CTLTYPE_BOOL,
   2064 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2065 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2066 		aprint_error_dev(dev, "could not create sysctl\n");
   2067 }
   2068 
   2069 /************************************************************************
   2070  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
   2071  ************************************************************************/
   2072 static void
   2073 ixv_add_stats_sysctls(struct adapter *adapter)
   2074 {
   2075 	device_t                dev = adapter->dev;
   2076 	struct tx_ring          *txr = adapter->tx_rings;
   2077 	struct rx_ring          *rxr = adapter->rx_rings;
   2078 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2079 	const struct sysctlnode *rnode;
   2080 	struct sysctllog **log = &adapter->sysctllog;
   2081 	const char *xname = device_xname(dev);
   2082 
   2083 	/* Driver Statistics */
   2084 	evcnt_attach_dynamic(&adapter->handleq, EVCNT_TYPE_MISC,
   2085 	    NULL, xname, "Handled queue in softint");
   2086 	evcnt_attach_dynamic(&adapter->req, EVCNT_TYPE_MISC,
   2087 	    NULL, xname, "Requeued in softint");
   2088 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   2089 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   2090 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2091 	    NULL, xname, "m_defrag() failed");
   2092 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   2093 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   2094 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   2095 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   2096 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   2097 	    NULL, xname, "Driver tx dma hard fail other");
   2098 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   2099 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   2100 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   2101 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   2102 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2103 	    NULL, xname, "Watchdog timeouts");
   2104 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   2105 	    NULL, xname, "TSO errors");
   2106 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   2107 	    NULL, xname, "Link MSI-X IRQ Handled");
   2108 
   2109 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2110 		snprintf(adapter->queues[i].evnamebuf,
   2111 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   2112 		    xname, i);
   2113 		snprintf(adapter->queues[i].namebuf,
   2114 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   2115 
   2116 		if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2117 			aprint_error_dev(dev, "could not create sysctl root\n");
   2118 			break;
   2119 		}
   2120 
   2121 		if (sysctl_createv(log, 0, &rnode, &rnode,
   2122 		    0, CTLTYPE_NODE,
   2123 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   2124 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   2125 			break;
   2126 
   2127 #if 0 /* not yet */
   2128 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2129 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   2130 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   2131 		    ixgbe_sysctl_interrupt_rate_handler, 0,
   2132 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   2133 			break;
   2134 
   2135 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2136 		    CTLFLAG_READONLY, CTLTYPE_QUAD,
   2137 		    "irqs", SYSCTL_DESCR("irqs on this queue"),
   2138 			NULL, 0, &(adapter->queues[i].irqs),
   2139 		    0, CTL_CREATE, CTL_EOL) != 0)
   2140 			break;
   2141 
   2142 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2143 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2144 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   2145 		    ixgbe_sysctl_tdh_handler, 0, (void *)txr,
   2146 		    0, CTL_CREATE, CTL_EOL) != 0)
   2147 			break;
   2148 
   2149 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2150 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2151 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   2152 		    ixgbe_sysctl_tdt_handler, 0, (void *)txr,
   2153 		    0, CTL_CREATE, CTL_EOL) != 0)
   2154 			break;
   2155 #endif
   2156 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   2157 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   2158 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   2159 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   2160 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   2161 		    NULL, adapter->queues[i].evnamebuf,
   2162 		    "Queue No Descriptor Available");
   2163 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   2164 		    NULL, adapter->queues[i].evnamebuf,
   2165 		    "Queue Packets Transmitted");
   2166 #ifndef IXGBE_LEGACY_TX
   2167 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   2168 		    NULL, adapter->queues[i].evnamebuf,
   2169 		    "Packets dropped in pcq");
   2170 #endif
   2171 
   2172 #ifdef LRO
   2173 		struct lro_ctrl *lro = &rxr->lro;
   2174 #endif /* LRO */
   2175 
   2176 #if 0 /* not yet */
   2177 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2178 		    CTLFLAG_READONLY,
   2179 		    CTLTYPE_INT,
   2180 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   2181 		    ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
   2182 		    CTL_CREATE, CTL_EOL) != 0)
   2183 			break;
   2184 
   2185 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2186 		    CTLFLAG_READONLY,
   2187 		    CTLTYPE_INT,
   2188 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   2189 		    ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
   2190 		    CTL_CREATE, CTL_EOL) != 0)
   2191 			break;
   2192 #endif
   2193 
   2194 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   2195 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   2196 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   2197 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   2198 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   2199 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   2200 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   2201 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   2202 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   2203 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   2204 #ifdef LRO
   2205 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   2206 				CTLFLAG_RD, &lro->lro_queued, 0,
   2207 				"LRO Queued");
   2208 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   2209 				CTLFLAG_RD, &lro->lro_flushed, 0,
   2210 				"LRO Flushed");
   2211 #endif /* LRO */
   2212 	}
   2213 
   2214 	/* MAC stats get their own sub node */
   2215 
   2216 	snprintf(stats->namebuf,
   2217 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   2218 
   2219 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   2220 	    stats->namebuf, "rx csum offload - IP");
   2221 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   2222 	    stats->namebuf, "rx csum offload - L4");
   2223 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   2224 	    stats->namebuf, "rx csum offload - IP bad");
   2225 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   2226 	    stats->namebuf, "rx csum offload - L4 bad");
   2227 
   2228 	/* Packet Reception Stats */
   2229 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2230 	    xname, "Good Packets Received");
   2231 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2232 	    xname, "Good Octets Received");
   2233 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2234 	    xname, "Multicast Packets Received");
   2235 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2236 	    xname, "Good Packets Transmitted");
   2237 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2238 	    xname, "Good Octets Transmitted");
   2239 } /* ixv_add_stats_sysctls */
   2240 
   2241 /************************************************************************
   2242  * ixv_set_sysctl_value
   2243  ************************************************************************/
   2244 static void
   2245 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2246 	const char *description, int *limit, int value)
   2247 {
   2248 	device_t dev =  adapter->dev;
   2249 	struct sysctllog **log;
   2250 	const struct sysctlnode *rnode, *cnode;
   2251 
   2252 	log = &adapter->sysctllog;
   2253 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2254 		aprint_error_dev(dev, "could not create sysctl root\n");
   2255 		return;
   2256 	}
   2257 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2258 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2259 	    name, SYSCTL_DESCR(description),
   2260 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2261 		aprint_error_dev(dev, "could not create sysctl\n");
   2262 	*limit = value;
   2263 } /* ixv_set_sysctl_value */
   2264 
   2265 /************************************************************************
   2266  * ixv_print_debug_info
   2267  *
   2268  *   Called only when em_display_debug_stats is enabled.
   2269  *   Provides a way to take a look at important statistics
   2270  *   maintained by the driver and hardware.
   2271  ************************************************************************/
   2272 static void
   2273 ixv_print_debug_info(struct adapter *adapter)
   2274 {
   2275         device_t        dev = adapter->dev;
   2276         struct ixgbe_hw *hw = &adapter->hw;
   2277         struct ix_queue *que = adapter->queues;
   2278         struct rx_ring  *rxr;
   2279         struct tx_ring  *txr;
   2280 #ifdef LRO
   2281         struct lro_ctrl *lro;
   2282 #endif /* LRO */
   2283 
   2284 	device_printf(dev, "Error Byte Count = %u \n",
   2285 	    IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2286 
   2287 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2288 		txr = que->txr;
   2289 		rxr = que->rxr;
   2290 #ifdef LRO
   2291 		lro = &rxr->lro;
   2292 #endif /* LRO */
   2293 		device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
   2294 		    que->msix, (long)que->irqs.ev_count);
   2295 		device_printf(dev, "RX(%d) Packets Received: %lld\n",
   2296 		    rxr->me, (long long)rxr->rx_packets.ev_count);
   2297 		device_printf(dev, "RX(%d) Bytes Received: %lu\n",
   2298 		    rxr->me, (long)rxr->rx_bytes.ev_count);
   2299 #ifdef LRO
   2300 		device_printf(dev, "RX(%d) LRO Queued= %lld\n",
   2301 		    rxr->me, (long long)lro->lro_queued);
   2302 		device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
   2303 		    rxr->me, (long long)lro->lro_flushed);
   2304 #endif /* LRO */
   2305 		device_printf(dev, "TX(%d) Packets Sent: %lu\n",
   2306 		    txr->me, (long)txr->total_packets.ev_count);
   2307 		device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
   2308 		    txr->me, (long)txr->no_desc_avail.ev_count);
   2309 	}
   2310 
   2311 	device_printf(dev, "MBX IRQ Handled: %lu\n",
   2312 	    (long)adapter->link_irq.ev_count);
   2313 } /* ixv_print_debug_info */
   2314 
   2315 /************************************************************************
   2316  * ixv_sysctl_debug
   2317  ************************************************************************/
   2318 static int
   2319 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2320 {
   2321 	struct sysctlnode node;
   2322 	struct adapter *adapter;
   2323 	int            error, result;
   2324 
   2325 	node = *rnode;
   2326 	node.sysctl_data = &result;
   2327 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2328 
   2329 	if (error || newp == NULL)
   2330 		return error;
   2331 
   2332 	if (result == 1) {
   2333 		adapter = (struct adapter *)node.sysctl_data;
   2334 		ixv_print_debug_info(adapter);
   2335 	}
   2336 
   2337 	return 0;
   2338 } /* ixv_sysctl_debug */
   2339 
   2340 /************************************************************************
   2341  * ixv_init_device_features
   2342  ************************************************************************/
   2343 static void
   2344 ixv_init_device_features(struct adapter *adapter)
   2345 {
   2346 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   2347 	                  | IXGBE_FEATURE_VF
   2348 	                  | IXGBE_FEATURE_RSS
   2349 	                  | IXGBE_FEATURE_LEGACY_TX;
   2350 
   2351 	/* A tad short on feature flags for VFs, atm. */
   2352 	switch (adapter->hw.mac.type) {
   2353 	case ixgbe_mac_82599_vf:
   2354 		break;
   2355 	case ixgbe_mac_X540_vf:
   2356 		break;
   2357 	case ixgbe_mac_X550_vf:
   2358 	case ixgbe_mac_X550EM_x_vf:
   2359 	case ixgbe_mac_X550EM_a_vf:
   2360 		adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
   2361 		break;
   2362 	default:
   2363 		break;
   2364 	}
   2365 
   2366 	/* Enabled by default... */
   2367 	/* Is a virtual function (VF) */
   2368 	if (adapter->feat_cap & IXGBE_FEATURE_VF)
   2369 		adapter->feat_en |= IXGBE_FEATURE_VF;
   2370 	/* Netmap */
   2371 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   2372 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   2373 	/* Receive-Side Scaling (RSS) */
   2374 	if (adapter->feat_cap & IXGBE_FEATURE_RSS)
   2375 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   2376 	/* Needs advanced context descriptor regardless of offloads req'd */
   2377 	if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
   2378 		adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
   2379 
   2380 	/* Enabled via sysctl... */
   2381 	/* Legacy (single queue) transmit */
   2382 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   2383 	    ixv_enable_legacy_tx)
   2384 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   2385 } /* ixv_init_device_features */
   2386 
   2387 /************************************************************************
   2388  * ixv_shutdown - Shutdown entry point
   2389  ************************************************************************/
   2390 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   2391 static int
   2392 ixv_shutdown(device_t dev)
   2393 {
   2394 	struct adapter *adapter = device_private(dev);
   2395 	IXGBE_CORE_LOCK(adapter);
   2396 	ixv_stop(adapter);
   2397 	IXGBE_CORE_UNLOCK(adapter);
   2398 
   2399 	return (0);
   2400 } /* ixv_shutdown */
   2401 #endif
   2402 
   2403 static int
   2404 ixv_ifflags_cb(struct ethercom *ec)
   2405 {
   2406 	struct ifnet *ifp = &ec->ec_if;
   2407 	struct adapter *adapter = ifp->if_softc;
   2408 	int change = ifp->if_flags ^ adapter->if_flags, rc = 0;
   2409 
   2410 	IXGBE_CORE_LOCK(adapter);
   2411 
   2412 	if (change != 0)
   2413 		adapter->if_flags = ifp->if_flags;
   2414 
   2415 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   2416 		rc = ENETRESET;
   2417 
   2418 	/* Set up VLAN support and filter */
   2419 	ixv_setup_vlan_support(adapter);
   2420 
   2421 	IXGBE_CORE_UNLOCK(adapter);
   2422 
   2423 	return rc;
   2424 }
   2425 
   2426 
   2427 /************************************************************************
   2428  * ixv_ioctl - Ioctl entry point
   2429  *
   2430  *   Called when the user wants to configure the interface.
   2431  *
   2432  *   return 0 on success, positive on failure
   2433  ************************************************************************/
   2434 static int
   2435 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
   2436 {
   2437 	struct adapter	*adapter = ifp->if_softc;
   2438 	struct ifcapreq *ifcr = data;
   2439 	struct ifreq	*ifr = data;
   2440 	int             error = 0;
   2441 	int l4csum_en;
   2442 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   2443 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   2444 
   2445 	switch (command) {
   2446 	case SIOCSIFFLAGS:
   2447 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   2448 		break;
   2449 	case SIOCADDMULTI:
   2450 	case SIOCDELMULTI:
   2451 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   2452 		break;
   2453 	case SIOCSIFMEDIA:
   2454 	case SIOCGIFMEDIA:
   2455 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   2456 		break;
   2457 	case SIOCSIFCAP:
   2458 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   2459 		break;
   2460 	case SIOCSIFMTU:
   2461 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   2462 		break;
   2463 	default:
   2464 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   2465 		break;
   2466 	}
   2467 
   2468 	switch (command) {
   2469 	case SIOCSIFMEDIA:
   2470 	case SIOCGIFMEDIA:
   2471 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   2472 	case SIOCSIFCAP:
   2473 		/* Layer-4 Rx checksum offload has to be turned on and
   2474 		 * off as a unit.
   2475 		 */
   2476 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   2477 		if (l4csum_en != l4csum && l4csum_en != 0)
   2478 			return EINVAL;
   2479 		/*FALLTHROUGH*/
   2480 	case SIOCADDMULTI:
   2481 	case SIOCDELMULTI:
   2482 	case SIOCSIFFLAGS:
   2483 	case SIOCSIFMTU:
   2484 	default:
   2485 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   2486 			return error;
   2487 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   2488 			;
   2489 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   2490 			IXGBE_CORE_LOCK(adapter);
   2491 			ixv_init_locked(adapter);
   2492 			IXGBE_CORE_UNLOCK(adapter);
   2493 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   2494 			/*
   2495 			 * Multicast list has changed; set the hardware filter
   2496 			 * accordingly.
   2497 			 */
   2498 			IXGBE_CORE_LOCK(adapter);
   2499 			ixv_disable_intr(adapter);
   2500 			ixv_set_multi(adapter);
   2501 			ixv_enable_intr(adapter);
   2502 			IXGBE_CORE_UNLOCK(adapter);
   2503 		}
   2504 		return 0;
   2505 	}
   2506 } /* ixv_ioctl */
   2507 
   2508 /************************************************************************
   2509  * ixv_init
   2510  ************************************************************************/
   2511 static int
   2512 ixv_init(struct ifnet *ifp)
   2513 {
   2514 	struct adapter *adapter = ifp->if_softc;
   2515 
   2516 	IXGBE_CORE_LOCK(adapter);
   2517 	ixv_init_locked(adapter);
   2518 	IXGBE_CORE_UNLOCK(adapter);
   2519 
   2520 	return 0;
   2521 } /* ixv_init */
   2522 
   2523 
   2524 /************************************************************************
   2525  * ixv_handle_que
   2526  ************************************************************************/
   2527 static void
   2528 ixv_handle_que(void *context)
   2529 {
   2530 	struct ix_queue *que = context;
   2531 	struct adapter  *adapter = que->adapter;
   2532 	struct tx_ring	*txr = que->txr;
   2533 	struct ifnet    *ifp = adapter->ifp;
   2534 	bool		more;
   2535 
   2536 	adapter->handleq.ev_count++;
   2537 
   2538 	if (ifp->if_flags & IFF_RUNNING) {
   2539 		more = ixgbe_rxeof(que);
   2540 		IXGBE_TX_LOCK(txr);
   2541 		ixgbe_txeof(txr);
   2542 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   2543 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   2544 				ixgbe_mq_start_locked(ifp, txr);
   2545 		/* Only for queue 0 */
   2546 		/* NetBSD still needs this for CBQ */
   2547 		if ((&adapter->queues[0] == que)
   2548 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   2549 			ixgbe_legacy_start_locked(ifp, txr);
   2550 		IXGBE_TX_UNLOCK(txr);
   2551 		if (more) {
   2552 			adapter->req.ev_count++;
   2553 			softint_schedule(que->que_si);
   2554 			return;
   2555 		}
   2556 	}
   2557 
   2558 	/* Re-enable this interrupt */
   2559 	ixv_enable_queue(adapter, que->msix);
   2560 
   2561 	return;
   2562 } /* ixv_handle_que */
   2563 
   2564 /************************************************************************
   2565  * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
   2566  ************************************************************************/
   2567 static int
   2568 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   2569 {
   2570 	device_t	dev = adapter->dev;
   2571 	struct ix_queue *que = adapter->queues;
   2572 	struct		tx_ring *txr = adapter->tx_rings;
   2573 	int 		error, msix_ctrl, rid, vector = 0;
   2574 	pci_chipset_tag_t pc;
   2575 	pcitag_t	tag;
   2576 	char		intrbuf[PCI_INTRSTR_LEN];
   2577 	char		intr_xname[32];
   2578 	const char	*intrstr = NULL;
   2579 	kcpuset_t	*affinity;
   2580 	int		cpu_id = 0;
   2581 
   2582 	pc = adapter->osdep.pc;
   2583 	tag = adapter->osdep.tag;
   2584 
   2585 	adapter->osdep.nintrs = adapter->num_queues + 1;
   2586 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   2587 	    adapter->osdep.nintrs) != 0) {
   2588 		aprint_error_dev(dev,
   2589 		    "failed to allocate MSI-X interrupt\n");
   2590 		return (ENXIO);
   2591 	}
   2592 
   2593 	kcpuset_create(&affinity, false);
   2594 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   2595 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   2596 		    device_xname(dev), i);
   2597 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   2598 		    sizeof(intrbuf));
   2599 #ifdef IXGBE_MPSAFE
   2600 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   2601 		    true);
   2602 #endif
   2603 		/* Set the handler function */
   2604 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   2605 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   2606 		    intr_xname);
   2607 		if (que->res == NULL) {
   2608 			pci_intr_release(pc, adapter->osdep.intrs,
   2609 			    adapter->osdep.nintrs);
   2610 			aprint_error_dev(dev,
   2611 			    "Failed to register QUE handler\n");
   2612 			kcpuset_destroy(affinity);
   2613 			return (ENXIO);
   2614 		}
   2615 		que->msix = vector;
   2616         	adapter->active_queues |= (u64)(1 << que->msix);
   2617 
   2618 		cpu_id = i;
   2619 		/* Round-robin affinity */
   2620 		kcpuset_zero(affinity);
   2621 		kcpuset_set(affinity, cpu_id % ncpu);
   2622 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   2623 		    NULL);
   2624 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   2625 		    intrstr);
   2626 		if (error == 0)
   2627 			aprint_normal(", bound queue %d to cpu %d\n",
   2628 			    i, cpu_id % ncpu);
   2629 		else
   2630 			aprint_normal("\n");
   2631 
   2632 #ifndef IXGBE_LEGACY_TX
   2633 		txr->txr_si
   2634 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2635 			ixgbe_deferred_mq_start, txr);
   2636 #endif
   2637 		que->que_si
   2638 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2639 			ixv_handle_que, que);
   2640 		if (que->que_si == NULL) {
   2641 			aprint_error_dev(dev,
   2642 			    "could not establish software interrupt\n");
   2643 		}
   2644 	}
   2645 
   2646 	/* and Mailbox */
   2647 	cpu_id++;
   2648 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   2649 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   2650 	    sizeof(intrbuf));
   2651 #ifdef IXGBE_MPSAFE
   2652 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   2653 	    true);
   2654 #endif
   2655 	/* Set the mbx handler function */
   2656 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   2657 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   2658 	    intr_xname);
   2659 	if (adapter->osdep.ihs[vector] == NULL) {
   2660 		adapter->res = NULL;
   2661 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   2662 		kcpuset_destroy(affinity);
   2663 		return (ENXIO);
   2664 	}
   2665 	/* Round-robin affinity */
   2666 	kcpuset_zero(affinity);
   2667 	kcpuset_set(affinity, cpu_id % ncpu);
   2668 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   2669 
   2670 	aprint_normal_dev(dev,
   2671 	    "for link, interrupting at %s", intrstr);
   2672 	if (error == 0)
   2673 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   2674 	else
   2675 		aprint_normal("\n");
   2676 
   2677 	adapter->vector = vector;
   2678 	/* Tasklets for Mailbox */
   2679 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   2680 	    ixv_handle_link, adapter);
   2681 	/*
   2682 	 * Due to a broken design QEMU will fail to properly
   2683 	 * enable the guest for MSI-X unless the vectors in
   2684 	 * the table are all set up, so we must rewrite the
   2685 	 * ENABLE in the MSI-X control register again at this
   2686 	 * point to cause it to successfully initialize us.
   2687 	 */
   2688 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   2689 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   2690 		rid += PCI_MSIX_CTL;
   2691 		msix_ctrl = pci_conf_read(pc, tag, rid);
   2692 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   2693 		pci_conf_write(pc, tag, rid, msix_ctrl);
   2694 	}
   2695 
   2696 	kcpuset_destroy(affinity);
   2697 	return (0);
   2698 } /* ixv_allocate_msix */
   2699 
   2700 /************************************************************************
   2701  * ixv_configure_interrupts - Setup MSI-X resources
   2702  *
   2703  *   Note: The VF device MUST use MSI-X, there is no fallback.
   2704  ************************************************************************/
   2705 static int
   2706 ixv_configure_interrupts(struct adapter *adapter)
   2707 {
   2708 	device_t dev = adapter->dev;
   2709 	int want, queues, msgs;
   2710 
   2711 	/* Must have at least 2 MSI-X vectors */
   2712 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   2713 	if (msgs < 2) {
   2714 		aprint_error_dev(dev, "MSIX config error\n");
   2715 		return (ENXIO);
   2716 	}
   2717 	msgs = MIN(msgs, IXG_MAX_NINTR);
   2718 
   2719 	/* Figure out a reasonable auto config value */
   2720 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   2721 
   2722 	if (ixv_num_queues != 0)
   2723 		queues = ixv_num_queues;
   2724 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   2725 		queues = IXGBE_VF_MAX_TX_QUEUES;
   2726 
   2727 	/*
   2728 	 * Want vectors for the queues,
   2729 	 * plus an additional for mailbox.
   2730 	 */
   2731 	want = queues + 1;
   2732 	if (msgs >= want)
   2733 		msgs = want;
   2734 	else {
   2735                	aprint_error_dev(dev,
   2736 		    "MSI-X Configuration Problem, "
   2737 		    "%d vectors but %d queues wanted!\n",
   2738 		    msgs, want);
   2739 		return -1;
   2740 	}
   2741 
   2742 	adapter->msix_mem = (void *)1; /* XXX */
   2743 	aprint_normal_dev(dev,
   2744 	    "Using MSI-X interrupts with %d vectors\n", msgs);
   2745 	adapter->num_queues = queues;
   2746 
   2747 	return (0);
   2748 } /* ixv_configure_interrupts */
   2749 
   2750 
   2751 /************************************************************************
   2752  * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
   2753  *
   2754  *   Done outside of interrupt context since the driver might sleep
   2755  ************************************************************************/
   2756 static void
   2757 ixv_handle_link(void *context)
   2758 {
   2759 	struct adapter *adapter = context;
   2760 
   2761 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   2762 	    &adapter->link_up, FALSE);
   2763 	ixv_update_link_status(adapter);
   2764 } /* ixv_handle_link */
   2765 
   2766 /************************************************************************
   2767  * ixv_check_link - Used in the local timer to poll for link changes
   2768  ************************************************************************/
   2769 static void
   2770 ixv_check_link(struct adapter *adapter)
   2771 {
   2772 	adapter->hw.mac.get_link_status = TRUE;
   2773 
   2774 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   2775 	    &adapter->link_up, FALSE);
   2776 	ixv_update_link_status(adapter);
   2777 } /* ixv_check_link */
   2778