Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.118
      1 /*$NetBSD: ixv.c,v 1.118 2019/07/04 09:02:24 msaitoh Exp $*/
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/
     36 
     37 #ifdef _KERNEL_OPT
     38 #include "opt_inet.h"
     39 #include "opt_inet6.h"
     40 #include "opt_net_mpsafe.h"
     41 #endif
     42 
     43 #include "ixgbe.h"
     44 #include "vlan.h"
     45 
     46 /************************************************************************
     47  * Driver version
     48  ************************************************************************/
     49 static const char ixv_driver_version[] = "2.0.1-k";
     50 /* XXX NetBSD: + 1.5.17 */
     51 
     52 /************************************************************************
     53  * PCI Device ID Table
     54  *
     55  *   Used by probe to select devices to load on
     56  *   Last field stores an index into ixv_strings
     57  *   Last entry must be all 0s
     58  *
     59  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     60  ************************************************************************/
     61 static const ixgbe_vendor_info_t ixv_vendor_info_array[] =
     62 {
     63 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     64 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     65 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     66 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     67 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
     68 	/* required last entry */
     69 	{0, 0, 0, 0, 0}
     70 };
     71 
     72 /************************************************************************
     73  * Table of branding strings
     74  ************************************************************************/
     75 static const char *ixv_strings[] = {
     76 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     77 };
     78 
     79 /*********************************************************************
     80  *  Function prototypes
     81  *********************************************************************/
     82 static int	ixv_probe(device_t, cfdata_t, void *);
     83 static void	ixv_attach(device_t, device_t, void *);
     84 static int	ixv_detach(device_t, int);
     85 #if 0
     86 static int	ixv_shutdown(device_t);
     87 #endif
     88 static int	ixv_ifflags_cb(struct ethercom *);
     89 static int	ixv_ioctl(struct ifnet *, u_long, void *);
     90 static int	ixv_init(struct ifnet *);
     91 static void	ixv_init_locked(struct adapter *);
     92 static void	ixv_ifstop(struct ifnet *, int);
     93 static void	ixv_stop(void *);
     94 static void	ixv_init_device_features(struct adapter *);
     95 static void	ixv_media_status(struct ifnet *, struct ifmediareq *);
     96 static int	ixv_media_change(struct ifnet *);
     97 static int	ixv_allocate_pci_resources(struct adapter *,
     98 		    const struct pci_attach_args *);
     99 static int	ixv_allocate_msix(struct adapter *,
    100 		    const struct pci_attach_args *);
    101 static int	ixv_configure_interrupts(struct adapter *);
    102 static void	ixv_free_pci_resources(struct adapter *);
    103 static void	ixv_local_timer(void *);
    104 static void	ixv_local_timer_locked(void *);
    105 static int	ixv_setup_interface(device_t, struct adapter *);
    106 static int	ixv_negotiate_api(struct adapter *);
    107 
    108 static void	ixv_initialize_transmit_units(struct adapter *);
    109 static void	ixv_initialize_receive_units(struct adapter *);
    110 static void	ixv_initialize_rss_mapping(struct adapter *);
    111 static s32	ixv_check_link(struct adapter *);
    112 
    113 static void	ixv_enable_intr(struct adapter *);
    114 static void	ixv_disable_intr(struct adapter *);
    115 static void	ixv_set_multi(struct adapter *);
    116 static void	ixv_update_link_status(struct adapter *);
    117 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    118 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    119 static void	ixv_configure_ivars(struct adapter *);
    120 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    121 static void	ixv_eitr_write(struct adapter *, uint32_t, uint32_t);
    122 
    123 static void	ixv_setup_vlan_support(struct adapter *);
    124 #if 0
    125 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    126 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    127 #endif
    128 
    129 static void	ixv_add_device_sysctls(struct adapter *);
    130 static void	ixv_save_stats(struct adapter *);
    131 static void	ixv_init_stats(struct adapter *);
    132 static void	ixv_update_stats(struct adapter *);
    133 static void	ixv_add_stats_sysctls(struct adapter *);
    134 
    135 
    136 /* Sysctl handlers */
    137 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    138 		    const char *, int *, int);
    139 static int	ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    140 static int	ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
    141 static int	ixv_sysctl_rdh_handler(SYSCTLFN_PROTO);
    142 static int	ixv_sysctl_rdt_handler(SYSCTLFN_PROTO);
    143 static int	ixv_sysctl_tdt_handler(SYSCTLFN_PROTO);
    144 static int	ixv_sysctl_tdh_handler(SYSCTLFN_PROTO);
    145 
    146 /* The MSI-X Interrupt handlers */
    147 static int	ixv_msix_que(void *);
    148 static int	ixv_msix_mbx(void *);
    149 
    150 /* Deferred interrupt tasklets */
    151 static void	ixv_handle_que(void *);
    152 static void	ixv_handle_link(void *);
    153 
    154 /* Workqueue handler for deferred work */
    155 static void	ixv_handle_que_work(struct work *, void *);
    156 
    157 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    158 static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    159 
    160 /************************************************************************
    161  * FreeBSD Device Interface Entry Points
    162  ************************************************************************/
    163 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    164     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    165     DVF_DETACH_SHUTDOWN);
    166 
    167 #if 0
    168 static driver_t ixv_driver = {
    169 	"ixv", ixv_methods, sizeof(struct adapter),
    170 };
    171 
    172 devclass_t ixv_devclass;
    173 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    174 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    175 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    176 #endif
    177 
    178 /*
    179  * TUNEABLE PARAMETERS:
    180  */
    181 
    182 /* Number of Queues - do not exceed MSI-X vectors - 1 */
    183 static int ixv_num_queues = 0;
    184 #define	TUNABLE_INT(__x, __y)
    185 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    186 
    187 /*
    188  * AIM: Adaptive Interrupt Moderation
    189  * which means that the interrupt rate
    190  * is varied over time based on the
    191  * traffic for that interrupt vector
    192  */
    193 static bool ixv_enable_aim = false;
    194 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    195 
    196 static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    197 TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate);
    198 
    199 /* How many packets rxeof tries to clean at a time */
    200 static int ixv_rx_process_limit = 256;
    201 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    202 
    203 /* How many packets txeof tries to clean at a time */
    204 static int ixv_tx_process_limit = 256;
    205 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    206 
    207 /* Which packet processing uses workqueue or softint */
    208 static bool ixv_txrx_workqueue = false;
    209 
    210 /*
    211  * Number of TX descriptors per ring,
    212  * setting higher than RX as this seems
    213  * the better performing choice.
    214  */
    215 static int ixv_txd = PERFORM_TXD;
    216 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    217 
    218 /* Number of RX descriptors per ring */
    219 static int ixv_rxd = PERFORM_RXD;
    220 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    221 
    222 /* Legacy Transmit (single queue) */
    223 static int ixv_enable_legacy_tx = 0;
    224 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
    225 
    226 #ifdef NET_MPSAFE
    227 #define IXGBE_MPSAFE		1
    228 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    229 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    230 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    231 #else
    232 #define IXGBE_CALLOUT_FLAGS	0
    233 #define IXGBE_SOFTINFT_FLAGS	0
    234 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU
    235 #endif
    236 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
    237 
    238 #if 0
    239 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
    240 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
    241 #endif
    242 
    243 /************************************************************************
    244  * ixv_probe - Device identification routine
    245  *
    246  *   Determines if the driver should be loaded on
    247  *   adapter based on its PCI vendor/device ID.
    248  *
    249  *   return BUS_PROBE_DEFAULT on success, positive on failure
    250  ************************************************************************/
    251 static int
    252 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    253 {
    254 #ifdef __HAVE_PCI_MSI_MSIX
    255 	const struct pci_attach_args *pa = aux;
    256 
    257 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    258 #else
    259 	return 0;
    260 #endif
    261 } /* ixv_probe */
    262 
    263 static const ixgbe_vendor_info_t *
    264 ixv_lookup(const struct pci_attach_args *pa)
    265 {
    266 	const ixgbe_vendor_info_t *ent;
    267 	pcireg_t subid;
    268 
    269 	INIT_DEBUGOUT("ixv_lookup: begin");
    270 
    271 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    272 		return NULL;
    273 
    274 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    275 
    276 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    277 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    278 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    279 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    280 		     (ent->subvendor_id == 0)) &&
    281 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    282 		     (ent->subdevice_id == 0))) {
    283 			return ent;
    284 		}
    285 	}
    286 
    287 	return NULL;
    288 }
    289 
    290 /************************************************************************
    291  * ixv_attach - Device initialization routine
    292  *
    293  *   Called when the driver is being loaded.
    294  *   Identifies the type of hardware, allocates all resources
    295  *   and initializes the hardware.
    296  *
    297  *   return 0 on success, positive on failure
    298  ************************************************************************/
    299 static void
    300 ixv_attach(device_t parent, device_t dev, void *aux)
    301 {
    302 	struct adapter *adapter;
    303 	struct ixgbe_hw *hw;
    304 	int		error = 0;
    305 	pcireg_t	id, subid;
    306 	const ixgbe_vendor_info_t *ent;
    307 	const struct pci_attach_args *pa = aux;
    308 	const char *apivstr;
    309 	const char *str;
    310 	char buf[256];
    311 
    312 	INIT_DEBUGOUT("ixv_attach: begin");
    313 
    314 	/*
    315 	 * Make sure BUSMASTER is set, on a VM under
    316 	 * KVM it may not be and will break things.
    317 	 */
    318 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    319 
    320 	/* Allocate, clear, and link in our adapter structure */
    321 	adapter = device_private(dev);
    322 	adapter->dev = dev;
    323 	adapter->hw.back = adapter;
    324 	hw = &adapter->hw;
    325 
    326 	adapter->init_locked = ixv_init_locked;
    327 	adapter->stop_locked = ixv_stop;
    328 
    329 	adapter->osdep.pc = pa->pa_pc;
    330 	adapter->osdep.tag = pa->pa_tag;
    331 	if (pci_dma64_available(pa))
    332 		adapter->osdep.dmat = pa->pa_dmat64;
    333 	else
    334 		adapter->osdep.dmat = pa->pa_dmat;
    335 	adapter->osdep.attached = false;
    336 
    337 	ent = ixv_lookup(pa);
    338 
    339 	KASSERT(ent != NULL);
    340 
    341 	aprint_normal(": %s, Version - %s\n",
    342 	    ixv_strings[ent->index], ixv_driver_version);
    343 
    344 	/* Core Lock Init*/
    345 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    346 
    347 	/* Do base PCI setup - map BAR0 */
    348 	if (ixv_allocate_pci_resources(adapter, pa)) {
    349 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    350 		error = ENXIO;
    351 		goto err_out;
    352 	}
    353 
    354 	/* SYSCTL APIs */
    355 	ixv_add_device_sysctls(adapter);
    356 
    357 	/* Set up the timer callout */
    358 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    359 
    360 	/* Save off the information about this board */
    361 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    362 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    363 	hw->vendor_id = PCI_VENDOR(id);
    364 	hw->device_id = PCI_PRODUCT(id);
    365 	hw->revision_id =
    366 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    367 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    368 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    369 
    370 	/* A subset of set_mac_type */
    371 	switch (hw->device_id) {
    372 	case IXGBE_DEV_ID_82599_VF:
    373 		hw->mac.type = ixgbe_mac_82599_vf;
    374 		str = "82599 VF";
    375 		break;
    376 	case IXGBE_DEV_ID_X540_VF:
    377 		hw->mac.type = ixgbe_mac_X540_vf;
    378 		str = "X540 VF";
    379 		break;
    380 	case IXGBE_DEV_ID_X550_VF:
    381 		hw->mac.type = ixgbe_mac_X550_vf;
    382 		str = "X550 VF";
    383 		break;
    384 	case IXGBE_DEV_ID_X550EM_X_VF:
    385 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
    386 		str = "X550EM X VF";
    387 		break;
    388 	case IXGBE_DEV_ID_X550EM_A_VF:
    389 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
    390 		str = "X550EM A VF";
    391 		break;
    392 	default:
    393 		/* Shouldn't get here since probe succeeded */
    394 		aprint_error_dev(dev, "Unknown device ID!\n");
    395 		error = ENXIO;
    396 		goto err_out;
    397 		break;
    398 	}
    399 	aprint_normal_dev(dev, "device %s\n", str);
    400 
    401 	ixv_init_device_features(adapter);
    402 
    403 	/* Initialize the shared code */
    404 	error = ixgbe_init_ops_vf(hw);
    405 	if (error) {
    406 		aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
    407 		error = EIO;
    408 		goto err_out;
    409 	}
    410 
    411 	/* Setup the mailbox */
    412 	ixgbe_init_mbx_params_vf(hw);
    413 
    414 	/* Set the right number of segments */
    415 	adapter->num_segs = IXGBE_82599_SCATTER;
    416 
    417 	/* Reset mbox api to 1.0 */
    418 	error = hw->mac.ops.reset_hw(hw);
    419 	if (error == IXGBE_ERR_RESET_FAILED)
    420 		aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
    421 	else if (error)
    422 		aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
    423 		    error);
    424 	if (error) {
    425 		error = EIO;
    426 		goto err_out;
    427 	}
    428 
    429 	error = hw->mac.ops.init_hw(hw);
    430 	if (error) {
    431 		aprint_error_dev(dev, "...init_hw() failed!\n");
    432 		error = EIO;
    433 		goto err_out;
    434 	}
    435 
    436 	/* Negotiate mailbox API version */
    437 	error = ixv_negotiate_api(adapter);
    438 	if (error)
    439 		aprint_normal_dev(dev,
    440 		    "MBX API negotiation failed during attach!\n");
    441 	switch (hw->api_version) {
    442 	case ixgbe_mbox_api_10:
    443 		apivstr = "1.0";
    444 		break;
    445 	case ixgbe_mbox_api_20:
    446 		apivstr = "2.0";
    447 		break;
    448 	case ixgbe_mbox_api_11:
    449 		apivstr = "1.1";
    450 		break;
    451 	case ixgbe_mbox_api_12:
    452 		apivstr = "1.2";
    453 		break;
    454 	case ixgbe_mbox_api_13:
    455 		apivstr = "1.3";
    456 		break;
    457 	default:
    458 		apivstr = "unknown";
    459 		break;
    460 	}
    461 	aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
    462 
    463 	/* If no mac address was assigned, make a random one */
    464 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    465 		u8 addr[ETHER_ADDR_LEN];
    466 		uint64_t rndval = cprng_strong64();
    467 
    468 		memcpy(addr, &rndval, sizeof(addr));
    469 		addr[0] &= 0xFE;
    470 		addr[0] |= 0x02;
    471 		bcopy(addr, hw->mac.addr, sizeof(addr));
    472 	}
    473 
    474 	/* Register for VLAN events */
    475 #if 0 /* XXX delete after write? */
    476 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    477 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    478 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    479 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    480 #endif
    481 
    482 	/* Sysctls for limiting the amount of work done in the taskqueues */
    483 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    484 	    "max number of rx packets to process",
    485 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    486 
    487 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    488 	    "max number of tx packets to process",
    489 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    490 
    491 	/* Do descriptor calc and sanity checks */
    492 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    493 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    494 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    495 		adapter->num_tx_desc = DEFAULT_TXD;
    496 	} else
    497 		adapter->num_tx_desc = ixv_txd;
    498 
    499 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    500 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    501 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    502 		adapter->num_rx_desc = DEFAULT_RXD;
    503 	} else
    504 		adapter->num_rx_desc = ixv_rxd;
    505 
    506 	/* Setup MSI-X */
    507 	error = ixv_configure_interrupts(adapter);
    508 	if (error)
    509 		goto err_out;
    510 
    511 	/* Allocate our TX/RX Queues */
    512 	if (ixgbe_allocate_queues(adapter)) {
    513 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    514 		error = ENOMEM;
    515 		goto err_out;
    516 	}
    517 
    518 	/* hw.ix defaults init */
    519 	adapter->enable_aim = ixv_enable_aim;
    520 
    521 	adapter->txrx_use_workqueue = ixv_txrx_workqueue;
    522 
    523 	error = ixv_allocate_msix(adapter, pa);
    524 	if (error) {
    525 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    526 		goto err_late;
    527 	}
    528 
    529 	/* Setup OS specific network interface */
    530 	error = ixv_setup_interface(dev, adapter);
    531 	if (error != 0) {
    532 		aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
    533 		goto err_late;
    534 	}
    535 
    536 	/* Do the stats setup */
    537 	ixv_save_stats(adapter);
    538 	ixv_init_stats(adapter);
    539 	ixv_add_stats_sysctls(adapter);
    540 
    541 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    542 		ixgbe_netmap_attach(adapter);
    543 
    544 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
    545 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
    546 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
    547 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
    548 
    549 	INIT_DEBUGOUT("ixv_attach: end");
    550 	adapter->osdep.attached = true;
    551 
    552 	return;
    553 
    554 err_late:
    555 	ixgbe_free_transmit_structures(adapter);
    556 	ixgbe_free_receive_structures(adapter);
    557 	free(adapter->queues, M_DEVBUF);
    558 err_out:
    559 	ixv_free_pci_resources(adapter);
    560 	IXGBE_CORE_LOCK_DESTROY(adapter);
    561 
    562 	return;
    563 } /* ixv_attach */
    564 
    565 /************************************************************************
    566  * ixv_detach - Device removal routine
    567  *
    568  *   Called when the driver is being removed.
    569  *   Stops the adapter and deallocates all the resources
    570  *   that were allocated for driver operation.
    571  *
    572  *   return 0 on success, positive on failure
    573  ************************************************************************/
    574 static int
    575 ixv_detach(device_t dev, int flags)
    576 {
    577 	struct adapter	*adapter = device_private(dev);
    578 	struct ixgbe_hw *hw = &adapter->hw;
    579 	struct ix_queue *que = adapter->queues;
    580 	struct tx_ring *txr = adapter->tx_rings;
    581 	struct rx_ring *rxr = adapter->rx_rings;
    582 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
    583 
    584 	INIT_DEBUGOUT("ixv_detach: begin");
    585 	if (adapter->osdep.attached == false)
    586 		return 0;
    587 
    588 	/* Stop the interface. Callouts are stopped in it. */
    589 	ixv_ifstop(adapter->ifp, 1);
    590 
    591 #if NVLAN > 0
    592 	/* Make sure VLANs are not using driver */
    593 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    594 		;	/* nothing to do: no VLANs */
    595 	else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
    596 		vlan_ifdetach(adapter->ifp);
    597 	else {
    598 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    599 		return EBUSY;
    600 	}
    601 #endif
    602 
    603 	IXGBE_CORE_LOCK(adapter);
    604 	ixv_stop(adapter);
    605 	IXGBE_CORE_UNLOCK(adapter);
    606 
    607 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
    608 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
    609 			softint_disestablish(txr->txr_si);
    610 		softint_disestablish(que->que_si);
    611 	}
    612 	if (adapter->txr_wq != NULL)
    613 		workqueue_destroy(adapter->txr_wq);
    614 	if (adapter->txr_wq_enqueued != NULL)
    615 		percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
    616 	if (adapter->que_wq != NULL)
    617 		workqueue_destroy(adapter->que_wq);
    618 
    619 	/* Drain the Mailbox(link) queue */
    620 	softint_disestablish(adapter->link_si);
    621 
    622 	/* Unregister VLAN events */
    623 #if 0 /* XXX msaitoh delete after write? */
    624 	if (adapter->vlan_attach != NULL)
    625 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    626 	if (adapter->vlan_detach != NULL)
    627 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    628 #endif
    629 
    630 	ether_ifdetach(adapter->ifp);
    631 	callout_halt(&adapter->timer, NULL);
    632 
    633 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    634 		netmap_detach(adapter->ifp);
    635 
    636 	ixv_free_pci_resources(adapter);
    637 #if 0 /* XXX the NetBSD port is probably missing something here */
    638 	bus_generic_detach(dev);
    639 #endif
    640 	if_detach(adapter->ifp);
    641 	if_percpuq_destroy(adapter->ipq);
    642 
    643 	sysctl_teardown(&adapter->sysctllog);
    644 	evcnt_detach(&adapter->efbig_tx_dma_setup);
    645 	evcnt_detach(&adapter->mbuf_defrag_failed);
    646 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
    647 	evcnt_detach(&adapter->einval_tx_dma_setup);
    648 	evcnt_detach(&adapter->other_tx_dma_setup);
    649 	evcnt_detach(&adapter->eagain_tx_dma_setup);
    650 	evcnt_detach(&adapter->enomem_tx_dma_setup);
    651 	evcnt_detach(&adapter->watchdog_events);
    652 	evcnt_detach(&adapter->tso_err);
    653 	evcnt_detach(&adapter->link_irq);
    654 
    655 	txr = adapter->tx_rings;
    656 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
    657 		evcnt_detach(&adapter->queues[i].irqs);
    658 		evcnt_detach(&adapter->queues[i].handleq);
    659 		evcnt_detach(&adapter->queues[i].req);
    660 		evcnt_detach(&txr->no_desc_avail);
    661 		evcnt_detach(&txr->total_packets);
    662 		evcnt_detach(&txr->tso_tx);
    663 #ifndef IXGBE_LEGACY_TX
    664 		evcnt_detach(&txr->pcq_drops);
    665 #endif
    666 
    667 		evcnt_detach(&rxr->rx_packets);
    668 		evcnt_detach(&rxr->rx_bytes);
    669 		evcnt_detach(&rxr->rx_copies);
    670 		evcnt_detach(&rxr->no_jmbuf);
    671 		evcnt_detach(&rxr->rx_discarded);
    672 	}
    673 	evcnt_detach(&stats->ipcs);
    674 	evcnt_detach(&stats->l4cs);
    675 	evcnt_detach(&stats->ipcs_bad);
    676 	evcnt_detach(&stats->l4cs_bad);
    677 
    678 	/* Packet Reception Stats */
    679 	evcnt_detach(&stats->vfgorc);
    680 	evcnt_detach(&stats->vfgprc);
    681 	evcnt_detach(&stats->vfmprc);
    682 
    683 	/* Packet Transmission Stats */
    684 	evcnt_detach(&stats->vfgotc);
    685 	evcnt_detach(&stats->vfgptc);
    686 
    687 	/* Mailbox Stats */
    688 	evcnt_detach(&hw->mbx.stats.msgs_tx);
    689 	evcnt_detach(&hw->mbx.stats.msgs_rx);
    690 	evcnt_detach(&hw->mbx.stats.acks);
    691 	evcnt_detach(&hw->mbx.stats.reqs);
    692 	evcnt_detach(&hw->mbx.stats.rsts);
    693 
    694 	ixgbe_free_transmit_structures(adapter);
    695 	ixgbe_free_receive_structures(adapter);
    696 	for (int i = 0; i < adapter->num_queues; i++) {
    697 		struct ix_queue *lque = &adapter->queues[i];
    698 		mutex_destroy(&lque->dc_mtx);
    699 	}
    700 	free(adapter->queues, M_DEVBUF);
    701 
    702 	IXGBE_CORE_LOCK_DESTROY(adapter);
    703 
    704 	return (0);
    705 } /* ixv_detach */
    706 
    707 /************************************************************************
    708  * ixv_init_locked - Init entry point
    709  *
    710  *   Used in two ways: It is used by the stack as an init entry
    711  *   point in network interface structure. It is also used
    712  *   by the driver as a hw/sw initialization routine to get
    713  *   to a consistent state.
    714  *
    715  *   return 0 on success, positive on failure
    716  ************************************************************************/
    717 static void
    718 ixv_init_locked(struct adapter *adapter)
    719 {
    720 	struct ifnet	*ifp = adapter->ifp;
    721 	device_t	dev = adapter->dev;
    722 	struct ixgbe_hw *hw = &adapter->hw;
    723 	struct ix_queue	*que;
    724 	int		error = 0;
    725 	uint32_t mask;
    726 	int i;
    727 
    728 	INIT_DEBUGOUT("ixv_init_locked: begin");
    729 	KASSERT(mutex_owned(&adapter->core_mtx));
    730 	hw->adapter_stopped = FALSE;
    731 	hw->mac.ops.stop_adapter(hw);
    732 	callout_stop(&adapter->timer);
    733 	for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
    734 		que->disabled_count = 0;
    735 
    736 	/* reprogram the RAR[0] in case user changed it. */
    737 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    738 
    739 	/* Get the latest mac address, User can use a LAA */
    740 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
    741 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    742 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
    743 
    744 	/* Prepare transmit descriptors and buffers */
    745 	if (ixgbe_setup_transmit_structures(adapter)) {
    746 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    747 		ixv_stop(adapter);
    748 		return;
    749 	}
    750 
    751 	/* Reset VF and renegotiate mailbox API version */
    752 	hw->mac.ops.reset_hw(hw);
    753 	hw->mac.ops.start_hw(hw);
    754 	error = ixv_negotiate_api(adapter);
    755 	if (error)
    756 		device_printf(dev,
    757 		    "Mailbox API negotiation failed in init_locked!\n");
    758 
    759 	ixv_initialize_transmit_units(adapter);
    760 
    761 	/* Setup Multicast table */
    762 	ixv_set_multi(adapter);
    763 
    764 	/*
    765 	 * Determine the correct mbuf pool
    766 	 * for doing jumbo/headersplit
    767 	 */
    768 	if (ifp->if_mtu > ETHERMTU)
    769 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    770 	else
    771 		adapter->rx_mbuf_sz = MCLBYTES;
    772 
    773 	/* Prepare receive descriptors and buffers */
    774 	if (ixgbe_setup_receive_structures(adapter)) {
    775 		device_printf(dev, "Could not setup receive structures\n");
    776 		ixv_stop(adapter);
    777 		return;
    778 	}
    779 
    780 	/* Configure RX settings */
    781 	ixv_initialize_receive_units(adapter);
    782 
    783 #if 0 /* XXX isn't it required? -- msaitoh  */
    784 	/* Set the various hardware offload abilities */
    785 	ifp->if_hwassist = 0;
    786 	if (ifp->if_capenable & IFCAP_TSO4)
    787 		ifp->if_hwassist |= CSUM_TSO;
    788 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    789 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    790 #if __FreeBSD_version >= 800000
    791 		ifp->if_hwassist |= CSUM_SCTP;
    792 #endif
    793 	}
    794 #endif
    795 
    796 	/* Set up VLAN offload and filter */
    797 	ixv_setup_vlan_support(adapter);
    798 
    799 	/* Set up MSI-X routing */
    800 	ixv_configure_ivars(adapter);
    801 
    802 	/* Set up auto-mask */
    803 	mask = (1 << adapter->vector);
    804 	for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
    805 		mask |= (1 << que->msix);
    806 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
    807 
    808 	/* Set moderation on the Link interrupt */
    809 	ixv_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
    810 
    811 	/* Stats init */
    812 	ixv_init_stats(adapter);
    813 
    814 	/* Config/Enable Link */
    815 	hw->mac.get_link_status = TRUE;
    816 	hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
    817 	    FALSE);
    818 
    819 	/* Start watchdog */
    820 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    821 
    822 	/* And now turn on interrupts */
    823 	ixv_enable_intr(adapter);
    824 
    825 	/* Update saved flags. See ixgbe_ifflags_cb() */
    826 	adapter->if_flags = ifp->if_flags;
    827 
    828 	/* Now inform the stack we're ready */
    829 	ifp->if_flags |= IFF_RUNNING;
    830 	ifp->if_flags &= ~IFF_OACTIVE;
    831 
    832 	return;
    833 } /* ixv_init_locked */
    834 
    835 /************************************************************************
    836  * ixv_enable_queue
    837  ************************************************************************/
    838 static inline void
    839 ixv_enable_queue(struct adapter *adapter, u32 vector)
    840 {
    841 	struct ixgbe_hw *hw = &adapter->hw;
    842 	struct ix_queue *que = &adapter->queues[vector];
    843 	u32		queue = 1 << vector;
    844 	u32		mask;
    845 
    846 	mutex_enter(&que->dc_mtx);
    847 	if (que->disabled_count > 0 && --que->disabled_count > 0)
    848 		goto out;
    849 
    850 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    851 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    852 out:
    853 	mutex_exit(&que->dc_mtx);
    854 } /* ixv_enable_queue */
    855 
    856 /************************************************************************
    857  * ixv_disable_queue
    858  ************************************************************************/
    859 static inline void
    860 ixv_disable_queue(struct adapter *adapter, u32 vector)
    861 {
    862 	struct ixgbe_hw *hw = &adapter->hw;
    863 	struct ix_queue *que = &adapter->queues[vector];
    864 	u64		queue = (u64)(1 << vector);
    865 	u32		mask;
    866 
    867 	mutex_enter(&que->dc_mtx);
    868 	if (que->disabled_count++ > 0)
    869 		goto  out;
    870 
    871 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    872 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    873 out:
    874 	mutex_exit(&que->dc_mtx);
    875 } /* ixv_disable_queue */
    876 
    877 #if 0
    878 static inline void
    879 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    880 {
    881 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    882 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    883 } /* ixv_rearm_queues */
    884 #endif
    885 
    886 
    887 /************************************************************************
    888  * ixv_msix_que - MSI-X Queue Interrupt Service routine
    889  ************************************************************************/
    890 static int
    891 ixv_msix_que(void *arg)
    892 {
    893 	struct ix_queue	*que = arg;
    894 	struct adapter	*adapter = que->adapter;
    895 	struct tx_ring	*txr = que->txr;
    896 	struct rx_ring	*rxr = que->rxr;
    897 	bool		more;
    898 	u32		newitr = 0;
    899 
    900 	ixv_disable_queue(adapter, que->msix);
    901 	++que->irqs.ev_count;
    902 
    903 #ifdef __NetBSD__
    904 	/* Don't run ixgbe_rxeof in interrupt context */
    905 	more = true;
    906 #else
    907 	more = ixgbe_rxeof(que);
    908 #endif
    909 
    910 	IXGBE_TX_LOCK(txr);
    911 	ixgbe_txeof(txr);
    912 	IXGBE_TX_UNLOCK(txr);
    913 
    914 	/* Do AIM now? */
    915 
    916 	if (adapter->enable_aim == false)
    917 		goto no_calc;
    918 	/*
    919 	 * Do Adaptive Interrupt Moderation:
    920 	 *  - Write out last calculated setting
    921 	 *  - Calculate based on average size over
    922 	 *    the last interval.
    923 	 */
    924 	if (que->eitr_setting)
    925 		ixv_eitr_write(adapter, que->msix, que->eitr_setting);
    926 
    927 	que->eitr_setting = 0;
    928 
    929 	/* Idle, do nothing */
    930 	if ((txr->bytes == 0) && (rxr->bytes == 0))
    931 		goto no_calc;
    932 
    933 	if ((txr->bytes) && (txr->packets))
    934 		newitr = txr->bytes/txr->packets;
    935 	if ((rxr->bytes) && (rxr->packets))
    936 		newitr = uimax(newitr, (rxr->bytes / rxr->packets));
    937 	newitr += 24; /* account for hardware frame, crc */
    938 
    939 	/* set an upper boundary */
    940 	newitr = uimin(newitr, 3000);
    941 
    942 	/* Be nice to the mid range */
    943 	if ((newitr > 300) && (newitr < 1200))
    944 		newitr = (newitr / 3);
    945 	else
    946 		newitr = (newitr / 2);
    947 
    948 	/*
    949 	 * When RSC is used, ITR interval must be larger than RSC_DELAY.
    950 	 * Currently, we use 2us for RSC_DELAY. The minimum value is always
    951 	 * greater than 2us on 100M (and 10M?(not documented)), but it's not
    952 	 * on 1G and higher.
    953 	 */
    954 	if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
    955 	    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
    956 		if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
    957 			newitr = IXGBE_MIN_RSC_EITR_10G1G;
    958 	}
    959 
    960 	/* save for next interrupt */
    961 	que->eitr_setting = newitr;
    962 
    963 	/* Reset state */
    964 	txr->bytes = 0;
    965 	txr->packets = 0;
    966 	rxr->bytes = 0;
    967 	rxr->packets = 0;
    968 
    969 no_calc:
    970 	if (more)
    971 		softint_schedule(que->que_si);
    972 	else /* Re-enable this interrupt */
    973 		ixv_enable_queue(adapter, que->msix);
    974 
    975 	return 1;
    976 } /* ixv_msix_que */
    977 
    978 /************************************************************************
    979  * ixv_msix_mbx
    980  ************************************************************************/
    981 static int
    982 ixv_msix_mbx(void *arg)
    983 {
    984 	struct adapter	*adapter = arg;
    985 	struct ixgbe_hw *hw = &adapter->hw;
    986 
    987 	++adapter->link_irq.ev_count;
    988 	/* NetBSD: We use auto-clear, so it's not required to write VTEICR */
    989 
    990 	/* Link status change */
    991 	hw->mac.get_link_status = TRUE;
    992 	softint_schedule(adapter->link_si);
    993 
    994 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
    995 
    996 	return 1;
    997 } /* ixv_msix_mbx */
    998 
    999 static void
   1000 ixv_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
   1001 {
   1002 
   1003 	/*
   1004 	 * Newer devices than 82598 have VF function, so this function is
   1005 	 * simple.
   1006 	 */
   1007 	itr |= IXGBE_EITR_CNT_WDIS;
   1008 
   1009 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(index), itr);
   1010 }
   1011 
   1012 
   1013 /************************************************************************
   1014  * ixv_media_status - Media Ioctl callback
   1015  *
   1016  *   Called whenever the user queries the status of
   1017  *   the interface using ifconfig.
   1018  ************************************************************************/
   1019 static void
   1020 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   1021 {
   1022 	struct adapter *adapter = ifp->if_softc;
   1023 
   1024 	INIT_DEBUGOUT("ixv_media_status: begin");
   1025 	IXGBE_CORE_LOCK(adapter);
   1026 	ixv_update_link_status(adapter);
   1027 
   1028 	ifmr->ifm_status = IFM_AVALID;
   1029 	ifmr->ifm_active = IFM_ETHER;
   1030 
   1031 	if (adapter->link_active != LINK_STATE_UP) {
   1032 		ifmr->ifm_active |= IFM_NONE;
   1033 		IXGBE_CORE_UNLOCK(adapter);
   1034 		return;
   1035 	}
   1036 
   1037 	ifmr->ifm_status |= IFM_ACTIVE;
   1038 
   1039 	switch (adapter->link_speed) {
   1040 		case IXGBE_LINK_SPEED_10GB_FULL:
   1041 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   1042 			break;
   1043 		case IXGBE_LINK_SPEED_5GB_FULL:
   1044 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   1045 			break;
   1046 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   1047 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   1048 			break;
   1049 		case IXGBE_LINK_SPEED_1GB_FULL:
   1050 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1051 			break;
   1052 		case IXGBE_LINK_SPEED_100_FULL:
   1053 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   1054 			break;
   1055 		case IXGBE_LINK_SPEED_10_FULL:
   1056 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   1057 			break;
   1058 	}
   1059 
   1060 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   1061 
   1062 	IXGBE_CORE_UNLOCK(adapter);
   1063 } /* ixv_media_status */
   1064 
   1065 /************************************************************************
   1066  * ixv_media_change - Media Ioctl callback
   1067  *
   1068  *   Called when the user changes speed/duplex using
   1069  *   media/mediopt option with ifconfig.
   1070  ************************************************************************/
   1071 static int
   1072 ixv_media_change(struct ifnet *ifp)
   1073 {
   1074 	struct adapter *adapter = ifp->if_softc;
   1075 	struct ifmedia *ifm = &adapter->media;
   1076 
   1077 	INIT_DEBUGOUT("ixv_media_change: begin");
   1078 
   1079 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1080 		return (EINVAL);
   1081 
   1082 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1083 	case IFM_AUTO:
   1084 		break;
   1085 	default:
   1086 		device_printf(adapter->dev, "Only auto media type\n");
   1087 		return (EINVAL);
   1088 	}
   1089 
   1090 	return (0);
   1091 } /* ixv_media_change */
   1092 
   1093 
   1094 /************************************************************************
   1095  * ixv_negotiate_api
   1096  *
   1097  *   Negotiate the Mailbox API with the PF;
   1098  *   start with the most featured API first.
   1099  ************************************************************************/
   1100 static int
   1101 ixv_negotiate_api(struct adapter *adapter)
   1102 {
   1103 	struct ixgbe_hw *hw = &adapter->hw;
   1104 	int		mbx_api[] = { ixgbe_mbox_api_11,
   1105 				      ixgbe_mbox_api_10,
   1106 				      ixgbe_mbox_api_unknown };
   1107 	int		i = 0;
   1108 
   1109 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
   1110 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
   1111 			return (0);
   1112 		i++;
   1113 	}
   1114 
   1115 	return (EINVAL);
   1116 } /* ixv_negotiate_api */
   1117 
   1118 
   1119 /************************************************************************
   1120  * ixv_set_multi - Multicast Update
   1121  *
   1122  *   Called whenever multicast address list is updated.
   1123  ************************************************************************/
   1124 static void
   1125 ixv_set_multi(struct adapter *adapter)
   1126 {
   1127 	struct ether_multi *enm;
   1128 	struct ether_multistep step;
   1129 	struct ethercom *ec = &adapter->osdep.ec;
   1130 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1131 	u8		   *update_ptr;
   1132 	int		   mcnt = 0;
   1133 
   1134 	KASSERT(mutex_owned(&adapter->core_mtx));
   1135 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1136 
   1137 	ETHER_LOCK(ec);
   1138 	ETHER_FIRST_MULTI(step, ec, enm);
   1139 	while (enm != NULL) {
   1140 		bcopy(enm->enm_addrlo,
   1141 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1142 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1143 		mcnt++;
   1144 		/* XXX This might be required --msaitoh */
   1145 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1146 			break;
   1147 		ETHER_NEXT_MULTI(step, enm);
   1148 	}
   1149 	ETHER_UNLOCK(ec);
   1150 
   1151 	update_ptr = mta;
   1152 
   1153 	adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   1154 	    ixv_mc_array_itr, TRUE);
   1155 } /* ixv_set_multi */
   1156 
   1157 /************************************************************************
   1158  * ixv_mc_array_itr
   1159  *
   1160  *   An iterator function needed by the multicast shared code.
   1161  *   It feeds the shared code routine the addresses in the
   1162  *   array of ixv_set_multi() one by one.
   1163  ************************************************************************/
   1164 static u8 *
   1165 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1166 {
   1167 	u8 *addr = *update_ptr;
   1168 	u8 *newptr;
   1169 
   1170 	*vmdq = 0;
   1171 
   1172 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1173 	*update_ptr = newptr;
   1174 
   1175 	return addr;
   1176 } /* ixv_mc_array_itr */
   1177 
   1178 /************************************************************************
   1179  * ixv_local_timer - Timer routine
   1180  *
   1181  *   Checks for link status, updates statistics,
   1182  *   and runs the watchdog check.
   1183  ************************************************************************/
   1184 static void
   1185 ixv_local_timer(void *arg)
   1186 {
   1187 	struct adapter *adapter = arg;
   1188 
   1189 	IXGBE_CORE_LOCK(adapter);
   1190 	ixv_local_timer_locked(adapter);
   1191 	IXGBE_CORE_UNLOCK(adapter);
   1192 }
   1193 
   1194 static void
   1195 ixv_local_timer_locked(void *arg)
   1196 {
   1197 	struct adapter	*adapter = arg;
   1198 	device_t	dev = adapter->dev;
   1199 	struct ix_queue	*que = adapter->queues;
   1200 	u64		queues = 0;
   1201 	u64		v0, v1, v2, v3, v4, v5, v6, v7;
   1202 	int		hung = 0;
   1203 	int		i;
   1204 
   1205 	KASSERT(mutex_owned(&adapter->core_mtx));
   1206 
   1207 	if (ixv_check_link(adapter)) {
   1208 		ixv_init_locked(adapter);
   1209 		return;
   1210 	}
   1211 
   1212 	/* Stats Update */
   1213 	ixv_update_stats(adapter);
   1214 
   1215 	/* Update some event counters */
   1216 	v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
   1217 	que = adapter->queues;
   1218 	for (i = 0; i < adapter->num_queues; i++, que++) {
   1219 		struct tx_ring	*txr = que->txr;
   1220 
   1221 		v0 += txr->q_efbig_tx_dma_setup;
   1222 		v1 += txr->q_mbuf_defrag_failed;
   1223 		v2 += txr->q_efbig2_tx_dma_setup;
   1224 		v3 += txr->q_einval_tx_dma_setup;
   1225 		v4 += txr->q_other_tx_dma_setup;
   1226 		v5 += txr->q_eagain_tx_dma_setup;
   1227 		v6 += txr->q_enomem_tx_dma_setup;
   1228 		v7 += txr->q_tso_err;
   1229 	}
   1230 	adapter->efbig_tx_dma_setup.ev_count = v0;
   1231 	adapter->mbuf_defrag_failed.ev_count = v1;
   1232 	adapter->efbig2_tx_dma_setup.ev_count = v2;
   1233 	adapter->einval_tx_dma_setup.ev_count = v3;
   1234 	adapter->other_tx_dma_setup.ev_count = v4;
   1235 	adapter->eagain_tx_dma_setup.ev_count = v5;
   1236 	adapter->enomem_tx_dma_setup.ev_count = v6;
   1237 	adapter->tso_err.ev_count = v7;
   1238 
   1239 	/*
   1240 	 * Check the TX queues status
   1241 	 *	- mark hung queues so we don't schedule on them
   1242 	 *	- watchdog only if all queues show hung
   1243 	 */
   1244 	que = adapter->queues;
   1245 	for (i = 0; i < adapter->num_queues; i++, que++) {
   1246 		/* Keep track of queues with work for soft irq */
   1247 		if (que->txr->busy)
   1248 			queues |= ((u64)1 << que->me);
   1249 		/*
   1250 		 * Each time txeof runs without cleaning, but there
   1251 		 * are uncleaned descriptors it increments busy. If
   1252 		 * we get to the MAX we declare it hung.
   1253 		 */
   1254 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1255 			++hung;
   1256 			/* Mark the queue as inactive */
   1257 			adapter->active_queues &= ~((u64)1 << que->me);
   1258 			continue;
   1259 		} else {
   1260 			/* Check if we've come back from hung */
   1261 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1262 				adapter->active_queues |= ((u64)1 << que->me);
   1263 		}
   1264 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1265 			device_printf(dev,
   1266 			    "Warning queue %d appears to be hung!\n", i);
   1267 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1268 			++hung;
   1269 		}
   1270 	}
   1271 
   1272 	/* Only truly watchdog if all queues show hung */
   1273 	if (hung == adapter->num_queues)
   1274 		goto watchdog;
   1275 #if 0
   1276 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1277 		ixv_rearm_queues(adapter, queues);
   1278 	}
   1279 #endif
   1280 
   1281 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1282 
   1283 	return;
   1284 
   1285 watchdog:
   1286 
   1287 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1288 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1289 	adapter->watchdog_events.ev_count++;
   1290 	ixv_init_locked(adapter);
   1291 } /* ixv_local_timer */
   1292 
   1293 /************************************************************************
   1294  * ixv_update_link_status - Update OS on link state
   1295  *
   1296  * Note: Only updates the OS on the cached link state.
   1297  *	 The real check of the hardware only happens with
   1298  *	 a link interrupt.
   1299  ************************************************************************/
   1300 static void
   1301 ixv_update_link_status(struct adapter *adapter)
   1302 {
   1303 	struct ifnet *ifp = adapter->ifp;
   1304 	device_t     dev = adapter->dev;
   1305 
   1306 	KASSERT(mutex_owned(&adapter->core_mtx));
   1307 
   1308 	if (adapter->link_up) {
   1309 		if (adapter->link_active != LINK_STATE_UP) {
   1310 			if (bootverbose) {
   1311 				const char *bpsmsg;
   1312 
   1313 				switch (adapter->link_speed) {
   1314 				case IXGBE_LINK_SPEED_10GB_FULL:
   1315 					bpsmsg = "10 Gbps";
   1316 					break;
   1317 				case IXGBE_LINK_SPEED_5GB_FULL:
   1318 					bpsmsg = "5 Gbps";
   1319 					break;
   1320 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   1321 					bpsmsg = "2.5 Gbps";
   1322 					break;
   1323 				case IXGBE_LINK_SPEED_1GB_FULL:
   1324 					bpsmsg = "1 Gbps";
   1325 					break;
   1326 				case IXGBE_LINK_SPEED_100_FULL:
   1327 					bpsmsg = "100 Mbps";
   1328 					break;
   1329 				case IXGBE_LINK_SPEED_10_FULL:
   1330 					bpsmsg = "10 Mbps";
   1331 					break;
   1332 				default:
   1333 					bpsmsg = "unknown speed";
   1334 					break;
   1335 				}
   1336 				device_printf(dev, "Link is up %s %s \n",
   1337 				    bpsmsg, "Full Duplex");
   1338 			}
   1339 			adapter->link_active = LINK_STATE_UP;
   1340 			if_link_state_change(ifp, LINK_STATE_UP);
   1341 		}
   1342 	} else {
   1343 		/*
   1344 		 * Do it when link active changes to DOWN. i.e.
   1345 		 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
   1346 		 * b) LINK_STATE_UP	 -> LINK_STATE_DOWN
   1347 		 */
   1348 		if (adapter->link_active != LINK_STATE_DOWN) {
   1349 			if (bootverbose)
   1350 				device_printf(dev, "Link is Down\n");
   1351 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1352 			adapter->link_active = LINK_STATE_DOWN;
   1353 		}
   1354 	}
   1355 } /* ixv_update_link_status */
   1356 
   1357 
   1358 /************************************************************************
   1359  * ixv_stop - Stop the hardware
   1360  *
   1361  *   Disables all traffic on the adapter by issuing a
   1362  *   global reset on the MAC and deallocates TX/RX buffers.
   1363  ************************************************************************/
   1364 static void
   1365 ixv_ifstop(struct ifnet *ifp, int disable)
   1366 {
   1367 	struct adapter *adapter = ifp->if_softc;
   1368 
   1369 	IXGBE_CORE_LOCK(adapter);
   1370 	ixv_stop(adapter);
   1371 	IXGBE_CORE_UNLOCK(adapter);
   1372 }
   1373 
   1374 static void
   1375 ixv_stop(void *arg)
   1376 {
   1377 	struct ifnet	*ifp;
   1378 	struct adapter	*adapter = arg;
   1379 	struct ixgbe_hw *hw = &adapter->hw;
   1380 
   1381 	ifp = adapter->ifp;
   1382 
   1383 	KASSERT(mutex_owned(&adapter->core_mtx));
   1384 
   1385 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1386 	ixv_disable_intr(adapter);
   1387 
   1388 	/* Tell the stack that the interface is no longer active */
   1389 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1390 
   1391 	hw->mac.ops.reset_hw(hw);
   1392 	adapter->hw.adapter_stopped = FALSE;
   1393 	hw->mac.ops.stop_adapter(hw);
   1394 	callout_stop(&adapter->timer);
   1395 
   1396 	/* reprogram the RAR[0] in case user changed it. */
   1397 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1398 
   1399 	return;
   1400 } /* ixv_stop */
   1401 
   1402 
   1403 /************************************************************************
   1404  * ixv_allocate_pci_resources
   1405  ************************************************************************/
   1406 static int
   1407 ixv_allocate_pci_resources(struct adapter *adapter,
   1408     const struct pci_attach_args *pa)
   1409 {
   1410 	pcireg_t	memtype, csr;
   1411 	device_t	dev = adapter->dev;
   1412 	bus_addr_t addr;
   1413 	int flags;
   1414 
   1415 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1416 	switch (memtype) {
   1417 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1418 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1419 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1420 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1421 		      memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1422 			goto map_err;
   1423 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1424 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1425 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1426 		}
   1427 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1428 		     adapter->osdep.mem_size, flags,
   1429 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1430 map_err:
   1431 			adapter->osdep.mem_size = 0;
   1432 			aprint_error_dev(dev, "unable to map BAR0\n");
   1433 			return ENXIO;
   1434 		}
   1435 		/*
   1436 		 * Enable address decoding for memory range in case it's not
   1437 		 * set.
   1438 		 */
   1439 		csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1440 		    PCI_COMMAND_STATUS_REG);
   1441 		csr |= PCI_COMMAND_MEM_ENABLE;
   1442 		pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
   1443 		    csr);
   1444 		break;
   1445 	default:
   1446 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1447 		return ENXIO;
   1448 	}
   1449 
   1450 	/* Pick up the tuneable queues */
   1451 	adapter->num_queues = ixv_num_queues;
   1452 
   1453 	return (0);
   1454 } /* ixv_allocate_pci_resources */
   1455 
   1456 /************************************************************************
   1457  * ixv_free_pci_resources
   1458  ************************************************************************/
   1459 static void
   1460 ixv_free_pci_resources(struct adapter * adapter)
   1461 {
   1462 	struct		ix_queue *que = adapter->queues;
   1463 	int		rid;
   1464 
   1465 	/*
   1466 	 *  Release all msix queue resources:
   1467 	 */
   1468 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1469 		if (que->res != NULL)
   1470 			pci_intr_disestablish(adapter->osdep.pc,
   1471 			    adapter->osdep.ihs[i]);
   1472 	}
   1473 
   1474 
   1475 	/* Clean the Mailbox interrupt last */
   1476 	rid = adapter->vector;
   1477 
   1478 	if (adapter->osdep.ihs[rid] != NULL) {
   1479 		pci_intr_disestablish(adapter->osdep.pc,
   1480 		    adapter->osdep.ihs[rid]);
   1481 		adapter->osdep.ihs[rid] = NULL;
   1482 	}
   1483 
   1484 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1485 	    adapter->osdep.nintrs);
   1486 
   1487 	if (adapter->osdep.mem_size != 0) {
   1488 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1489 		    adapter->osdep.mem_bus_space_handle,
   1490 		    adapter->osdep.mem_size);
   1491 	}
   1492 
   1493 	return;
   1494 } /* ixv_free_pci_resources */
   1495 
   1496 /************************************************************************
   1497  * ixv_setup_interface
   1498  *
   1499  *   Setup networking device structure and register an interface.
   1500  ************************************************************************/
   1501 static int
   1502 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1503 {
   1504 	struct ethercom *ec = &adapter->osdep.ec;
   1505 	struct ifnet   *ifp;
   1506 	int rv;
   1507 
   1508 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1509 
   1510 	ifp = adapter->ifp = &ec->ec_if;
   1511 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1512 	ifp->if_baudrate = IF_Gbps(10);
   1513 	ifp->if_init = ixv_init;
   1514 	ifp->if_stop = ixv_ifstop;
   1515 	ifp->if_softc = adapter;
   1516 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1517 #ifdef IXGBE_MPSAFE
   1518 	ifp->if_extflags = IFEF_MPSAFE;
   1519 #endif
   1520 	ifp->if_ioctl = ixv_ioctl;
   1521 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1522 #if 0
   1523 		ixv_start_locked = ixgbe_legacy_start_locked;
   1524 #endif
   1525 	} else {
   1526 		ifp->if_transmit = ixgbe_mq_start;
   1527 #if 0
   1528 		ixv_start_locked = ixgbe_mq_start_locked;
   1529 #endif
   1530 	}
   1531 	ifp->if_start = ixgbe_legacy_start;
   1532 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1533 	IFQ_SET_READY(&ifp->if_snd);
   1534 
   1535 	rv = if_initialize(ifp);
   1536 	if (rv != 0) {
   1537 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1538 		return rv;
   1539 	}
   1540 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1541 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1542 	/*
   1543 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1544 	 * used.
   1545 	 */
   1546 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1547 
   1548 	adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
   1549 
   1550 	/*
   1551 	 * Tell the upper layer(s) we support long frames.
   1552 	 */
   1553 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1554 
   1555 	/* Set capability flags */
   1556 	ifp->if_capabilities |= IFCAP_HWCSUM
   1557 			     |	IFCAP_TSOv4
   1558 			     |	IFCAP_TSOv6;
   1559 	ifp->if_capenable = 0;
   1560 
   1561 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1562 			    |  ETHERCAP_VLAN_HWCSUM
   1563 			    |  ETHERCAP_JUMBO_MTU
   1564 			    |  ETHERCAP_VLAN_MTU;
   1565 
   1566 	/* Enable the above capabilities by default */
   1567 	ec->ec_capenable = ec->ec_capabilities;
   1568 
   1569 	/* Don't enable LRO by default */
   1570 #if 0
   1571 	/* NetBSD doesn't support LRO yet */
   1572 	ifp->if_capabilities |= IFCAP_LRO;
   1573 #endif
   1574 
   1575 	/*
   1576 	 * Specify the media types supported by this adapter and register
   1577 	 * callbacks to update media and link information
   1578 	 */
   1579 	ec->ec_ifmedia = &adapter->media;
   1580 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1581 	    ixv_media_status);
   1582 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1583 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1584 
   1585 	if_register(ifp);
   1586 
   1587 	return 0;
   1588 } /* ixv_setup_interface */
   1589 
   1590 
   1591 /************************************************************************
   1592  * ixv_initialize_transmit_units - Enable transmit unit.
   1593  ************************************************************************/
   1594 static void
   1595 ixv_initialize_transmit_units(struct adapter *adapter)
   1596 {
   1597 	struct tx_ring	*txr = adapter->tx_rings;
   1598 	struct ixgbe_hw	*hw = &adapter->hw;
   1599 	int i;
   1600 
   1601 	for (i = 0; i < adapter->num_queues; i++, txr++) {
   1602 		u64 tdba = txr->txdma.dma_paddr;
   1603 		u32 txctrl, txdctl;
   1604 		int j = txr->me;
   1605 
   1606 		/* Set WTHRESH to 8, burst writeback */
   1607 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
   1608 		txdctl |= (8 << 16);
   1609 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
   1610 
   1611 		/* Set the HW Tx Head and Tail indices */
   1612 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
   1613 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
   1614 
   1615 		/* Set Tx Tail register */
   1616 		txr->tail = IXGBE_VFTDT(j);
   1617 
   1618 		txr->txr_no_space = false;
   1619 
   1620 		/* Set Ring parameters */
   1621 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
   1622 		    (tdba & 0x00000000ffffffffULL));
   1623 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
   1624 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
   1625 		    adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
   1626 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
   1627 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1628 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
   1629 
   1630 		/* Now enable */
   1631 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
   1632 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1633 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
   1634 	}
   1635 
   1636 	return;
   1637 } /* ixv_initialize_transmit_units */
   1638 
   1639 
   1640 /************************************************************************
   1641  * ixv_initialize_rss_mapping
   1642  ************************************************************************/
   1643 static void
   1644 ixv_initialize_rss_mapping(struct adapter *adapter)
   1645 {
   1646 	struct ixgbe_hw *hw = &adapter->hw;
   1647 	u32		reta = 0, mrqc, rss_key[10];
   1648 	int		queue_id;
   1649 	int		i, j;
   1650 	u32		rss_hash_config;
   1651 
   1652 	/* force use default RSS key. */
   1653 #ifdef __NetBSD__
   1654 	rss_getkey((uint8_t *) &rss_key);
   1655 #else
   1656 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1657 		/* Fetch the configured RSS key */
   1658 		rss_getkey((uint8_t *)&rss_key);
   1659 	} else {
   1660 		/* set up random bits */
   1661 		cprng_fast(&rss_key, sizeof(rss_key));
   1662 	}
   1663 #endif
   1664 
   1665 	/* Now fill out hash function seeds */
   1666 	for (i = 0; i < 10; i++)
   1667 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
   1668 
   1669 	/* Set up the redirection table */
   1670 	for (i = 0, j = 0; i < 64; i++, j++) {
   1671 		if (j == adapter->num_queues)
   1672 			j = 0;
   1673 
   1674 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1675 			/*
   1676 			 * Fetch the RSS bucket id for the given indirection
   1677 			 * entry. Cap it at the number of configured buckets
   1678 			 * (which is num_queues.)
   1679 			 */
   1680 			queue_id = rss_get_indirection_to_bucket(i);
   1681 			queue_id = queue_id % adapter->num_queues;
   1682 		} else
   1683 			queue_id = j;
   1684 
   1685 		/*
   1686 		 * The low 8 bits are for hash value (n+0);
   1687 		 * The next 8 bits are for hash value (n+1), etc.
   1688 		 */
   1689 		reta >>= 8;
   1690 		reta |= ((uint32_t)queue_id) << 24;
   1691 		if ((i & 3) == 3) {
   1692 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
   1693 			reta = 0;
   1694 		}
   1695 	}
   1696 
   1697 	/* Perform hash on these packet types */
   1698 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   1699 		rss_hash_config = rss_gethashconfig();
   1700 	else {
   1701 		/*
   1702 		 * Disable UDP - IP fragments aren't currently being handled
   1703 		 * and so we end up with a mix of 2-tuple and 4-tuple
   1704 		 * traffic.
   1705 		 */
   1706 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
   1707 				| RSS_HASHTYPE_RSS_TCP_IPV4
   1708 				| RSS_HASHTYPE_RSS_IPV6
   1709 				| RSS_HASHTYPE_RSS_TCP_IPV6;
   1710 	}
   1711 
   1712 	mrqc = IXGBE_MRQC_RSSEN;
   1713 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
   1714 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
   1715 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
   1716 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
   1717 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
   1718 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
   1719 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
   1720 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
   1721 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
   1722 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
   1723 		    __func__);
   1724 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
   1725 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
   1726 		    __func__);
   1727 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
   1728 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
   1729 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
   1730 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
   1731 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
   1732 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
   1733 		    __func__);
   1734 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
   1735 } /* ixv_initialize_rss_mapping */
   1736 
   1737 
   1738 /************************************************************************
   1739  * ixv_initialize_receive_units - Setup receive registers and features.
   1740  ************************************************************************/
   1741 static void
   1742 ixv_initialize_receive_units(struct adapter *adapter)
   1743 {
   1744 	struct	rx_ring	*rxr = adapter->rx_rings;
   1745 	struct ixgbe_hw	*hw = &adapter->hw;
   1746 	struct ifnet	*ifp = adapter->ifp;
   1747 	u32		bufsz, psrtype;
   1748 
   1749 	if (ifp->if_mtu > ETHERMTU)
   1750 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1751 	else
   1752 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1753 
   1754 	psrtype = IXGBE_PSRTYPE_TCPHDR
   1755 		| IXGBE_PSRTYPE_UDPHDR
   1756 		| IXGBE_PSRTYPE_IPV4HDR
   1757 		| IXGBE_PSRTYPE_IPV6HDR
   1758 		| IXGBE_PSRTYPE_L2HDR;
   1759 
   1760 	if (adapter->num_queues > 1)
   1761 		psrtype |= 1 << 29;
   1762 
   1763 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1764 
   1765 	/* Tell PF our max_frame size */
   1766 	if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
   1767 		device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
   1768 	}
   1769 
   1770 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1771 		u64 rdba = rxr->rxdma.dma_paddr;
   1772 		u32 reg, rxdctl;
   1773 		int j = rxr->me;
   1774 
   1775 		/* Disable the queue */
   1776 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
   1777 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1778 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
   1779 		for (int k = 0; k < 10; k++) {
   1780 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
   1781 			    IXGBE_RXDCTL_ENABLE)
   1782 				msec_delay(1);
   1783 			else
   1784 				break;
   1785 		}
   1786 		wmb();
   1787 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1788 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
   1789 		    (rdba & 0x00000000ffffffffULL));
   1790 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
   1791 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
   1792 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1793 
   1794 		/* Reset the ring indices */
   1795 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1796 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1797 
   1798 		/* Set up the SRRCTL register */
   1799 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
   1800 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1801 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1802 		reg |= bufsz;
   1803 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1804 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
   1805 
   1806 		/* Capture Rx Tail index */
   1807 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1808 
   1809 		/* Do the queue enabling last */
   1810 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1811 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
   1812 		for (int k = 0; k < 10; k++) {
   1813 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
   1814 			    IXGBE_RXDCTL_ENABLE)
   1815 				break;
   1816 			msec_delay(1);
   1817 		}
   1818 		wmb();
   1819 
   1820 		/* Set the Tail Pointer */
   1821 #ifdef DEV_NETMAP
   1822 		/*
   1823 		 * In netmap mode, we must preserve the buffers made
   1824 		 * available to userspace before the if_init()
   1825 		 * (this is true by default on the TX side, because
   1826 		 * init makes all buffers available to userspace).
   1827 		 *
   1828 		 * netmap_reset() and the device specific routines
   1829 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1830 		 * buffers at the end of the NIC ring, so here we
   1831 		 * must set the RDT (tail) register to make sure
   1832 		 * they are not overwritten.
   1833 		 *
   1834 		 * In this driver the NIC ring starts at RDH = 0,
   1835 		 * RDT points to the last slot available for reception (?),
   1836 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1837 		 */
   1838 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   1839 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   1840 			struct netmap_adapter *na = NA(adapter->ifp);
   1841 			struct netmap_kring *kring = na->rx_rings[i];
   1842 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1843 
   1844 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1845 		} else
   1846 #endif /* DEV_NETMAP */
   1847 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1848 			    adapter->num_rx_desc - 1);
   1849 	}
   1850 
   1851 	ixv_initialize_rss_mapping(adapter);
   1852 } /* ixv_initialize_receive_units */
   1853 
   1854 /************************************************************************
   1855  * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function
   1856  *
   1857  *   Retrieves the TDH value from the hardware
   1858  ************************************************************************/
   1859 static int
   1860 ixv_sysctl_tdh_handler(SYSCTLFN_ARGS)
   1861 {
   1862 	struct sysctlnode node = *rnode;
   1863 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   1864 	uint32_t val;
   1865 
   1866 	if (!txr)
   1867 		return (0);
   1868 
   1869 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDH(txr->me));
   1870 	node.sysctl_data = &val;
   1871 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   1872 } /* ixv_sysctl_tdh_handler */
   1873 
   1874 /************************************************************************
   1875  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   1876  *
   1877  *   Retrieves the TDT value from the hardware
   1878  ************************************************************************/
   1879 static int
   1880 ixv_sysctl_tdt_handler(SYSCTLFN_ARGS)
   1881 {
   1882 	struct sysctlnode node = *rnode;
   1883 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   1884 	uint32_t val;
   1885 
   1886 	if (!txr)
   1887 		return (0);
   1888 
   1889 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDT(txr->me));
   1890 	node.sysctl_data = &val;
   1891 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   1892 } /* ixv_sysctl_tdt_handler */
   1893 
   1894 /************************************************************************
   1895  * ixv_sysctl_next_to_check_handler - Receive Descriptor next to check
   1896  * handler function
   1897  *
   1898  *   Retrieves the next_to_check value
   1899  ************************************************************************/
   1900 static int
   1901 ixv_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
   1902 {
   1903 	struct sysctlnode node = *rnode;
   1904 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   1905 	uint32_t val;
   1906 
   1907 	if (!rxr)
   1908 		return (0);
   1909 
   1910 	val = rxr->next_to_check;
   1911 	node.sysctl_data = &val;
   1912 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   1913 } /* ixv_sysctl_next_to_check_handler */
   1914 
   1915 /************************************************************************
   1916  * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function
   1917  *
   1918  *   Retrieves the RDH value from the hardware
   1919  ************************************************************************/
   1920 static int
   1921 ixv_sysctl_rdh_handler(SYSCTLFN_ARGS)
   1922 {
   1923 	struct sysctlnode node = *rnode;
   1924 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   1925 	uint32_t val;
   1926 
   1927 	if (!rxr)
   1928 		return (0);
   1929 
   1930 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDH(rxr->me));
   1931 	node.sysctl_data = &val;
   1932 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   1933 } /* ixv_sysctl_rdh_handler */
   1934 
   1935 /************************************************************************
   1936  * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function
   1937  *
   1938  *   Retrieves the RDT value from the hardware
   1939  ************************************************************************/
   1940 static int
   1941 ixv_sysctl_rdt_handler(SYSCTLFN_ARGS)
   1942 {
   1943 	struct sysctlnode node = *rnode;
   1944 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   1945 	uint32_t val;
   1946 
   1947 	if (!rxr)
   1948 		return (0);
   1949 
   1950 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDT(rxr->me));
   1951 	node.sysctl_data = &val;
   1952 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   1953 } /* ixv_sysctl_rdt_handler */
   1954 
   1955 /************************************************************************
   1956  * ixv_setup_vlan_support
   1957  ************************************************************************/
   1958 static void
   1959 ixv_setup_vlan_support(struct adapter *adapter)
   1960 {
   1961 	struct ethercom *ec = &adapter->osdep.ec;
   1962 	struct ixgbe_hw *hw = &adapter->hw;
   1963 	struct rx_ring	*rxr;
   1964 	u32		ctrl, vid, vfta, retry;
   1965 	bool		hwtagging;
   1966 
   1967 	/*
   1968 	 *  This function is called from both if_init and ifflags_cb()
   1969 	 * on NetBSD.
   1970 	 */
   1971 
   1972 	/* Enable HW tagging only if any vlan is attached */
   1973 	hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
   1974 	    && VLAN_ATTACHED(ec);
   1975 
   1976 	/* Enable the queues */
   1977 	for (int i = 0; i < adapter->num_queues; i++) {
   1978 		rxr = &adapter->rx_rings[i];
   1979 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
   1980 		if (hwtagging)
   1981 			ctrl |= IXGBE_RXDCTL_VME;
   1982 		else
   1983 			ctrl &= ~IXGBE_RXDCTL_VME;
   1984 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
   1985 		/*
   1986 		 * Let Rx path know that it needs to store VLAN tag
   1987 		 * as part of extra mbuf info.
   1988 		 */
   1989 		rxr->vtag_strip = hwtagging ? TRUE : FALSE;
   1990 	}
   1991 
   1992 	/* XXX dirty hack. Enable all VIDs if any VLAN is attached */
   1993 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
   1994 		adapter->shadow_vfta[i]
   1995 		    = VLAN_ATTACHED(&adapter->osdep.ec) ? 0xffffffff : 0;
   1996 
   1997 	/*
   1998 	 * A soft reset zero's out the VFTA, so
   1999 	 * we need to repopulate it now.
   2000 	 */
   2001 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   2002 		if (adapter->shadow_vfta[i] == 0)
   2003 			continue;
   2004 		vfta = adapter->shadow_vfta[i];
   2005 		/*
   2006 		 * Reconstruct the vlan id's
   2007 		 * based on the bits set in each
   2008 		 * of the array ints.
   2009 		 */
   2010 		for (int j = 0; j < 32; j++) {
   2011 			retry = 0;
   2012 			if ((vfta & (1 << j)) == 0)
   2013 				continue;
   2014 			vid = (i * 32) + j;
   2015 			/* Call the shared code mailbox routine */
   2016 			while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
   2017 				if (++retry > 5)
   2018 					break;
   2019 			}
   2020 		}
   2021 	}
   2022 } /* ixv_setup_vlan_support */
   2023 
   2024 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   2025 /************************************************************************
   2026  * ixv_register_vlan
   2027  *
   2028  *   Run via a vlan config EVENT, it enables us to use the
   2029  *   HW Filter table since we can get the vlan id. This just
   2030  *   creates the entry in the soft version of the VFTA, init
   2031  *   will repopulate the real table.
   2032  ************************************************************************/
   2033 static void
   2034 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2035 {
   2036 	struct adapter	*adapter = ifp->if_softc;
   2037 	u16		index, bit;
   2038 
   2039 	if (ifp->if_softc != arg) /* Not our event */
   2040 		return;
   2041 
   2042 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   2043 		return;
   2044 
   2045 	IXGBE_CORE_LOCK(adapter);
   2046 	index = (vtag >> 5) & 0x7F;
   2047 	bit = vtag & 0x1F;
   2048 	adapter->shadow_vfta[index] |= (1 << bit);
   2049 	/* Re-init to load the changes */
   2050 	ixv_init_locked(adapter);
   2051 	IXGBE_CORE_UNLOCK(adapter);
   2052 } /* ixv_register_vlan */
   2053 
   2054 /************************************************************************
   2055  * ixv_unregister_vlan
   2056  *
   2057  *   Run via a vlan unconfig EVENT, remove our entry
   2058  *   in the soft vfta.
   2059  ************************************************************************/
   2060 static void
   2061 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2062 {
   2063 	struct adapter	*adapter = ifp->if_softc;
   2064 	u16		index, bit;
   2065 
   2066 	if (ifp->if_softc !=  arg)
   2067 		return;
   2068 
   2069 	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
   2070 		return;
   2071 
   2072 	IXGBE_CORE_LOCK(adapter);
   2073 	index = (vtag >> 5) & 0x7F;
   2074 	bit = vtag & 0x1F;
   2075 	adapter->shadow_vfta[index] &= ~(1 << bit);
   2076 	/* Re-init to load the changes */
   2077 	ixv_init_locked(adapter);
   2078 	IXGBE_CORE_UNLOCK(adapter);
   2079 } /* ixv_unregister_vlan */
   2080 #endif
   2081 
   2082 /************************************************************************
   2083  * ixv_enable_intr
   2084  ************************************************************************/
   2085 static void
   2086 ixv_enable_intr(struct adapter *adapter)
   2087 {
   2088 	struct ixgbe_hw *hw = &adapter->hw;
   2089 	struct ix_queue *que = adapter->queues;
   2090 	u32		mask;
   2091 	int i;
   2092 
   2093 	/* For VTEIAC */
   2094 	mask = (1 << adapter->vector);
   2095 	for (i = 0; i < adapter->num_queues; i++, que++)
   2096 		mask |= (1 << que->msix);
   2097 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   2098 
   2099 	/* For VTEIMS */
   2100 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
   2101 	que = adapter->queues;
   2102 	for (i = 0; i < adapter->num_queues; i++, que++)
   2103 		ixv_enable_queue(adapter, que->msix);
   2104 
   2105 	IXGBE_WRITE_FLUSH(hw);
   2106 } /* ixv_enable_intr */
   2107 
   2108 /************************************************************************
   2109  * ixv_disable_intr
   2110  ************************************************************************/
   2111 static void
   2112 ixv_disable_intr(struct adapter *adapter)
   2113 {
   2114 	struct ix_queue	*que = adapter->queues;
   2115 
   2116 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   2117 
   2118 	/* disable interrupts other than queues */
   2119 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, adapter->vector);
   2120 
   2121 	for (int i = 0; i < adapter->num_queues; i++, que++)
   2122 		ixv_disable_queue(adapter, que->msix);
   2123 
   2124 	IXGBE_WRITE_FLUSH(&adapter->hw);
   2125 } /* ixv_disable_intr */
   2126 
   2127 /************************************************************************
   2128  * ixv_set_ivar
   2129  *
   2130  *   Setup the correct IVAR register for a particular MSI-X interrupt
   2131  *    - entry is the register array entry
   2132  *    - vector is the MSI-X vector for this queue
   2133  *    - type is RX/TX/MISC
   2134  ************************************************************************/
   2135 static void
   2136 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   2137 {
   2138 	struct ixgbe_hw *hw = &adapter->hw;
   2139 	u32		ivar, index;
   2140 
   2141 	vector |= IXGBE_IVAR_ALLOC_VAL;
   2142 
   2143 	if (type == -1) { /* MISC IVAR */
   2144 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   2145 		ivar &= ~0xFF;
   2146 		ivar |= vector;
   2147 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   2148 	} else {	  /* RX/TX IVARS */
   2149 		index = (16 * (entry & 1)) + (8 * type);
   2150 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   2151 		ivar &= ~(0xFF << index);
   2152 		ivar |= (vector << index);
   2153 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   2154 	}
   2155 } /* ixv_set_ivar */
   2156 
   2157 /************************************************************************
   2158  * ixv_configure_ivars
   2159  ************************************************************************/
   2160 static void
   2161 ixv_configure_ivars(struct adapter *adapter)
   2162 {
   2163 	struct ix_queue *que = adapter->queues;
   2164 
   2165 	/* XXX We should sync EITR value calculation with ixgbe.c? */
   2166 
   2167 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2168 		/* First the RX queue entry */
   2169 		ixv_set_ivar(adapter, i, que->msix, 0);
   2170 		/* ... and the TX */
   2171 		ixv_set_ivar(adapter, i, que->msix, 1);
   2172 		/* Set an initial value in EITR */
   2173 		ixv_eitr_write(adapter, que->msix, IXGBE_EITR_DEFAULT);
   2174 	}
   2175 
   2176 	/* For the mailbox interrupt */
   2177 	ixv_set_ivar(adapter, 1, adapter->vector, -1);
   2178 } /* ixv_configure_ivars */
   2179 
   2180 
   2181 /************************************************************************
   2182  * ixv_save_stats
   2183  *
   2184  *   The VF stats registers never have a truly virgin
   2185  *   starting point, so this routine tries to make an
   2186  *   artificial one, marking ground zero on attach as
   2187  *   it were.
   2188  ************************************************************************/
   2189 static void
   2190 ixv_save_stats(struct adapter *adapter)
   2191 {
   2192 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2193 
   2194 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   2195 		stats->saved_reset_vfgprc +=
   2196 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   2197 		stats->saved_reset_vfgptc +=
   2198 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   2199 		stats->saved_reset_vfgorc +=
   2200 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   2201 		stats->saved_reset_vfgotc +=
   2202 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   2203 		stats->saved_reset_vfmprc +=
   2204 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   2205 	}
   2206 } /* ixv_save_stats */
   2207 
   2208 /************************************************************************
   2209  * ixv_init_stats
   2210  ************************************************************************/
   2211 static void
   2212 ixv_init_stats(struct adapter *adapter)
   2213 {
   2214 	struct ixgbe_hw *hw = &adapter->hw;
   2215 
   2216 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   2217 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   2218 	adapter->stats.vf.last_vfgorc |=
   2219 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   2220 
   2221 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   2222 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   2223 	adapter->stats.vf.last_vfgotc |=
   2224 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   2225 
   2226 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   2227 
   2228 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   2229 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   2230 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   2231 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   2232 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   2233 } /* ixv_init_stats */
   2234 
   2235 #define UPDATE_STAT_32(reg, last, count)		\
   2236 {							\
   2237 	u32 current = IXGBE_READ_REG(hw, (reg));	\
   2238 	if (current < (last))				\
   2239 		count.ev_count += 0x100000000LL;	\
   2240 	(last) = current;				\
   2241 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   2242 	count.ev_count |= current;			\
   2243 }
   2244 
   2245 #define UPDATE_STAT_36(lsb, msb, last, count)		\
   2246 {							\
   2247 	u64 cur_lsb = IXGBE_READ_REG(hw, (lsb));	\
   2248 	u64 cur_msb = IXGBE_READ_REG(hw, (msb));	\
   2249 	u64 current = ((cur_msb << 32) | cur_lsb);	\
   2250 	if (current < (last))				\
   2251 		count.ev_count += 0x1000000000LL;	\
   2252 	(last) = current;				\
   2253 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   2254 	count.ev_count |= current;			\
   2255 }
   2256 
   2257 /************************************************************************
   2258  * ixv_update_stats - Update the board statistics counters.
   2259  ************************************************************************/
   2260 void
   2261 ixv_update_stats(struct adapter *adapter)
   2262 {
   2263 	struct ixgbe_hw *hw = &adapter->hw;
   2264 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2265 
   2266 	UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
   2267 	UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
   2268 	UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
   2269 	    stats->vfgorc);
   2270 	UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
   2271 	    stats->vfgotc);
   2272 	UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
   2273 
   2274 	/* Fill out the OS statistics structure */
   2275 	/*
   2276 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   2277 	 * adapter->stats counters. It's required to make ifconfig -z
   2278 	 * (SOICZIFDATA) work.
   2279 	 */
   2280 } /* ixv_update_stats */
   2281 
   2282 /************************************************************************
   2283  * ixv_sysctl_interrupt_rate_handler
   2284  ************************************************************************/
   2285 static int
   2286 ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   2287 {
   2288 	struct sysctlnode node = *rnode;
   2289 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   2290 	struct adapter	*adapter = que->adapter;
   2291 	uint32_t reg, usec, rate;
   2292 	int error;
   2293 
   2294 	if (que == NULL)
   2295 		return 0;
   2296 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix));
   2297 	usec = ((reg & 0x0FF8) >> 3);
   2298 	if (usec > 0)
   2299 		rate = 500000 / usec;
   2300 	else
   2301 		rate = 0;
   2302 	node.sysctl_data = &rate;
   2303 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2304 	if (error || newp == NULL)
   2305 		return error;
   2306 	reg &= ~0xfff; /* default, no limitation */
   2307 	if (rate > 0 && rate < 500000) {
   2308 		if (rate < 1000)
   2309 			rate = 1000;
   2310 		reg |= ((4000000/rate) & 0xff8);
   2311 		/*
   2312 		 * When RSC is used, ITR interval must be larger than
   2313 		 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
   2314 		 * The minimum value is always greater than 2us on 100M
   2315 		 * (and 10M?(not documented)), but it's not on 1G and higher.
   2316 		 */
   2317 		if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   2318 		    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   2319 			if ((adapter->num_queues > 1)
   2320 			    && (reg < IXGBE_MIN_RSC_EITR_10G1G))
   2321 				return EINVAL;
   2322 		}
   2323 		ixv_max_interrupt_rate = rate;
   2324 	} else
   2325 		ixv_max_interrupt_rate = 0;
   2326 	ixv_eitr_write(adapter, que->msix, reg);
   2327 
   2328 	return (0);
   2329 } /* ixv_sysctl_interrupt_rate_handler */
   2330 
   2331 const struct sysctlnode *
   2332 ixv_sysctl_instance(struct adapter *adapter)
   2333 {
   2334 	const char *dvname;
   2335 	struct sysctllog **log;
   2336 	int rc;
   2337 	const struct sysctlnode *rnode;
   2338 
   2339 	log = &adapter->sysctllog;
   2340 	dvname = device_xname(adapter->dev);
   2341 
   2342 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2343 	    0, CTLTYPE_NODE, dvname,
   2344 	    SYSCTL_DESCR("ixv information and settings"),
   2345 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2346 		goto err;
   2347 
   2348 	return rnode;
   2349 err:
   2350 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2351 	return NULL;
   2352 }
   2353 
   2354 static void
   2355 ixv_add_device_sysctls(struct adapter *adapter)
   2356 {
   2357 	struct sysctllog **log;
   2358 	const struct sysctlnode *rnode, *cnode;
   2359 	device_t dev;
   2360 
   2361 	dev = adapter->dev;
   2362 	log = &adapter->sysctllog;
   2363 
   2364 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2365 		aprint_error_dev(dev, "could not create sysctl root\n");
   2366 		return;
   2367 	}
   2368 
   2369 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2370 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2371 	    "debug", SYSCTL_DESCR("Debug Info"),
   2372 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   2373 		aprint_error_dev(dev, "could not create sysctl\n");
   2374 
   2375 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2376 	    CTLFLAG_READWRITE, CTLTYPE_BOOL,
   2377 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2378 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2379 		aprint_error_dev(dev, "could not create sysctl\n");
   2380 
   2381 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2382 	    CTLFLAG_READWRITE, CTLTYPE_BOOL,
   2383 	    "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   2384 		NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
   2385 		aprint_error_dev(dev, "could not create sysctl\n");
   2386 }
   2387 
   2388 /************************************************************************
   2389  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
   2390  ************************************************************************/
   2391 static void
   2392 ixv_add_stats_sysctls(struct adapter *adapter)
   2393 {
   2394 	device_t		dev = adapter->dev;
   2395 	struct tx_ring		*txr = adapter->tx_rings;
   2396 	struct rx_ring		*rxr = adapter->rx_rings;
   2397 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2398 	struct ixgbe_hw *hw = &adapter->hw;
   2399 	const struct sysctlnode *rnode, *cnode;
   2400 	struct sysctllog **log = &adapter->sysctllog;
   2401 	const char *xname = device_xname(dev);
   2402 
   2403 	/* Driver Statistics */
   2404 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   2405 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   2406 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2407 	    NULL, xname, "m_defrag() failed");
   2408 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   2409 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   2410 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   2411 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   2412 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   2413 	    NULL, xname, "Driver tx dma hard fail other");
   2414 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   2415 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   2416 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   2417 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   2418 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2419 	    NULL, xname, "Watchdog timeouts");
   2420 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   2421 	    NULL, xname, "TSO errors");
   2422 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   2423 	    NULL, xname, "Link MSI-X IRQ Handled");
   2424 
   2425 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2426 		snprintf(adapter->queues[i].evnamebuf,
   2427 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   2428 		    xname, i);
   2429 		snprintf(adapter->queues[i].namebuf,
   2430 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   2431 
   2432 		if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2433 			aprint_error_dev(dev, "could not create sysctl root\n");
   2434 			break;
   2435 		}
   2436 
   2437 		if (sysctl_createv(log, 0, &rnode, &rnode,
   2438 		    0, CTLTYPE_NODE,
   2439 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   2440 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   2441 			break;
   2442 
   2443 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2444 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   2445 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   2446 		    ixv_sysctl_interrupt_rate_handler, 0,
   2447 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   2448 			break;
   2449 
   2450 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2451 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2452 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   2453 		    ixv_sysctl_tdh_handler, 0, (void *)txr,
   2454 		    0, CTL_CREATE, CTL_EOL) != 0)
   2455 			break;
   2456 
   2457 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2458 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2459 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   2460 		    ixv_sysctl_tdt_handler, 0, (void *)txr,
   2461 		    0, CTL_CREATE, CTL_EOL) != 0)
   2462 			break;
   2463 
   2464 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   2465 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   2466 		evcnt_attach_dynamic(&adapter->queues[i].handleq,
   2467 		    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   2468 		    "Handled queue in softint");
   2469 		evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
   2470 		    NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
   2471 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   2472 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   2473 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   2474 		    NULL, adapter->queues[i].evnamebuf,
   2475 		    "Queue No Descriptor Available");
   2476 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   2477 		    NULL, adapter->queues[i].evnamebuf,
   2478 		    "Queue Packets Transmitted");
   2479 #ifndef IXGBE_LEGACY_TX
   2480 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   2481 		    NULL, adapter->queues[i].evnamebuf,
   2482 		    "Packets dropped in pcq");
   2483 #endif
   2484 
   2485 #ifdef LRO
   2486 		struct lro_ctrl *lro = &rxr->lro;
   2487 #endif /* LRO */
   2488 
   2489 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2490 		    CTLFLAG_READONLY,
   2491 		    CTLTYPE_INT,
   2492 		    "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
   2493 			ixv_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
   2494 		    CTL_CREATE, CTL_EOL) != 0)
   2495 			break;
   2496 
   2497 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2498 		    CTLFLAG_READONLY,
   2499 		    CTLTYPE_INT,
   2500 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   2501 		    ixv_sysctl_rdh_handler, 0, (void *)rxr, 0,
   2502 		    CTL_CREATE, CTL_EOL) != 0)
   2503 			break;
   2504 
   2505 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2506 		    CTLFLAG_READONLY,
   2507 		    CTLTYPE_INT,
   2508 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   2509 		    ixv_sysctl_rdt_handler, 0, (void *)rxr, 0,
   2510 		    CTL_CREATE, CTL_EOL) != 0)
   2511 			break;
   2512 
   2513 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   2514 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   2515 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   2516 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   2517 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   2518 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   2519 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   2520 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   2521 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   2522 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   2523 #ifdef LRO
   2524 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   2525 				CTLFLAG_RD, &lro->lro_queued, 0,
   2526 				"LRO Queued");
   2527 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   2528 				CTLFLAG_RD, &lro->lro_flushed, 0,
   2529 				"LRO Flushed");
   2530 #endif /* LRO */
   2531 	}
   2532 
   2533 	/* MAC stats get their own sub node */
   2534 
   2535 	snprintf(stats->namebuf,
   2536 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   2537 
   2538 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   2539 	    stats->namebuf, "rx csum offload - IP");
   2540 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   2541 	    stats->namebuf, "rx csum offload - L4");
   2542 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   2543 	    stats->namebuf, "rx csum offload - IP bad");
   2544 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   2545 	    stats->namebuf, "rx csum offload - L4 bad");
   2546 
   2547 	/* Packet Reception Stats */
   2548 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2549 	    xname, "Good Packets Received");
   2550 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2551 	    xname, "Good Octets Received");
   2552 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2553 	    xname, "Multicast Packets Received");
   2554 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2555 	    xname, "Good Packets Transmitted");
   2556 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2557 	    xname, "Good Octets Transmitted");
   2558 
   2559 	/* Mailbox Stats */
   2560 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
   2561 	    xname, "message TXs");
   2562 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
   2563 	    xname, "message RXs");
   2564 	evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
   2565 	    xname, "ACKs");
   2566 	evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
   2567 	    xname, "REQs");
   2568 	evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
   2569 	    xname, "RSTs");
   2570 
   2571 } /* ixv_add_stats_sysctls */
   2572 
   2573 /************************************************************************
   2574  * ixv_set_sysctl_value
   2575  ************************************************************************/
   2576 static void
   2577 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2578 	const char *description, int *limit, int value)
   2579 {
   2580 	device_t dev =	adapter->dev;
   2581 	struct sysctllog **log;
   2582 	const struct sysctlnode *rnode, *cnode;
   2583 
   2584 	log = &adapter->sysctllog;
   2585 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2586 		aprint_error_dev(dev, "could not create sysctl root\n");
   2587 		return;
   2588 	}
   2589 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2590 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2591 	    name, SYSCTL_DESCR(description),
   2592 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2593 		aprint_error_dev(dev, "could not create sysctl\n");
   2594 	*limit = value;
   2595 } /* ixv_set_sysctl_value */
   2596 
   2597 /************************************************************************
   2598  * ixv_print_debug_info
   2599  *
   2600  *   Called only when em_display_debug_stats is enabled.
   2601  *   Provides a way to take a look at important statistics
   2602  *   maintained by the driver and hardware.
   2603  ************************************************************************/
   2604 static void
   2605 ixv_print_debug_info(struct adapter *adapter)
   2606 {
   2607 	device_t	dev = adapter->dev;
   2608 	struct ix_queue *que = adapter->queues;
   2609 	struct rx_ring	*rxr;
   2610 	struct tx_ring	*txr;
   2611 #ifdef LRO
   2612 	struct lro_ctrl *lro;
   2613 #endif /* LRO */
   2614 
   2615 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2616 		txr = que->txr;
   2617 		rxr = que->rxr;
   2618 #ifdef LRO
   2619 		lro = &rxr->lro;
   2620 #endif /* LRO */
   2621 		device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
   2622 		    que->msix, (long)que->irqs.ev_count);
   2623 		device_printf(dev, "RX(%d) Packets Received: %lld\n",
   2624 		    rxr->me, (long long)rxr->rx_packets.ev_count);
   2625 		device_printf(dev, "RX(%d) Bytes Received: %lu\n",
   2626 		    rxr->me, (long)rxr->rx_bytes.ev_count);
   2627 #ifdef LRO
   2628 		device_printf(dev, "RX(%d) LRO Queued= %ju\n",
   2629 		    rxr->me, (uintmax_t)lro->lro_queued);
   2630 		device_printf(dev, "RX(%d) LRO Flushed= %ju\n",
   2631 		    rxr->me, (uintmax_t)lro->lro_flushed);
   2632 #endif /* LRO */
   2633 		device_printf(dev, "TX(%d) Packets Sent: %lu\n",
   2634 		    txr->me, (long)txr->total_packets.ev_count);
   2635 		device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
   2636 		    txr->me, (long)txr->no_desc_avail.ev_count);
   2637 	}
   2638 
   2639 	device_printf(dev, "MBX IRQ Handled: %lu\n",
   2640 	    (long)adapter->link_irq.ev_count);
   2641 } /* ixv_print_debug_info */
   2642 
   2643 /************************************************************************
   2644  * ixv_sysctl_debug
   2645  ************************************************************************/
   2646 static int
   2647 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2648 {
   2649 	struct sysctlnode node = *rnode;
   2650 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   2651 	int	       error, result;
   2652 
   2653 	node.sysctl_data = &result;
   2654 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2655 
   2656 	if (error || newp == NULL)
   2657 		return error;
   2658 
   2659 	if (result == 1)
   2660 		ixv_print_debug_info(adapter);
   2661 
   2662 	return 0;
   2663 } /* ixv_sysctl_debug */
   2664 
   2665 /************************************************************************
   2666  * ixv_init_device_features
   2667  ************************************************************************/
   2668 static void
   2669 ixv_init_device_features(struct adapter *adapter)
   2670 {
   2671 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   2672 			  | IXGBE_FEATURE_VF
   2673 			  | IXGBE_FEATURE_RSS
   2674 			  | IXGBE_FEATURE_LEGACY_TX;
   2675 
   2676 	/* A tad short on feature flags for VFs, atm. */
   2677 	switch (adapter->hw.mac.type) {
   2678 	case ixgbe_mac_82599_vf:
   2679 		break;
   2680 	case ixgbe_mac_X540_vf:
   2681 		break;
   2682 	case ixgbe_mac_X550_vf:
   2683 	case ixgbe_mac_X550EM_x_vf:
   2684 	case ixgbe_mac_X550EM_a_vf:
   2685 		adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
   2686 		break;
   2687 	default:
   2688 		break;
   2689 	}
   2690 
   2691 	/* Enabled by default... */
   2692 	/* Is a virtual function (VF) */
   2693 	if (adapter->feat_cap & IXGBE_FEATURE_VF)
   2694 		adapter->feat_en |= IXGBE_FEATURE_VF;
   2695 	/* Netmap */
   2696 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   2697 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   2698 	/* Receive-Side Scaling (RSS) */
   2699 	if (adapter->feat_cap & IXGBE_FEATURE_RSS)
   2700 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   2701 	/* Needs advanced context descriptor regardless of offloads req'd */
   2702 	if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
   2703 		adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
   2704 
   2705 	/* Enabled via sysctl... */
   2706 	/* Legacy (single queue) transmit */
   2707 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   2708 	    ixv_enable_legacy_tx)
   2709 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   2710 } /* ixv_init_device_features */
   2711 
   2712 /************************************************************************
   2713  * ixv_shutdown - Shutdown entry point
   2714  ************************************************************************/
   2715 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   2716 static int
   2717 ixv_shutdown(device_t dev)
   2718 {
   2719 	struct adapter *adapter = device_private(dev);
   2720 	IXGBE_CORE_LOCK(adapter);
   2721 	ixv_stop(adapter);
   2722 	IXGBE_CORE_UNLOCK(adapter);
   2723 
   2724 	return (0);
   2725 } /* ixv_shutdown */
   2726 #endif
   2727 
   2728 static int
   2729 ixv_ifflags_cb(struct ethercom *ec)
   2730 {
   2731 	struct ifnet *ifp = &ec->ec_if;
   2732 	struct adapter *adapter = ifp->if_softc;
   2733 	int change, rv = 0;
   2734 
   2735 	IXGBE_CORE_LOCK(adapter);
   2736 
   2737 	change = ifp->if_flags ^ adapter->if_flags;
   2738 	if (change != 0)
   2739 		adapter->if_flags = ifp->if_flags;
   2740 
   2741 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2742 		rv = ENETRESET;
   2743 		goto out;
   2744 	}
   2745 
   2746 	/* Set up VLAN support and filter */
   2747 	ixv_setup_vlan_support(adapter);
   2748 
   2749 out:
   2750 	IXGBE_CORE_UNLOCK(adapter);
   2751 
   2752 	return rv;
   2753 }
   2754 
   2755 
   2756 /************************************************************************
   2757  * ixv_ioctl - Ioctl entry point
   2758  *
   2759  *   Called when the user wants to configure the interface.
   2760  *
   2761  *   return 0 on success, positive on failure
   2762  ************************************************************************/
   2763 static int
   2764 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
   2765 {
   2766 	struct adapter	*adapter = ifp->if_softc;
   2767 	struct ifcapreq *ifcr = data;
   2768 	int		error = 0;
   2769 	int l4csum_en;
   2770 	const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
   2771 	     IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2772 
   2773 	switch (command) {
   2774 	case SIOCSIFFLAGS:
   2775 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   2776 		break;
   2777 	case SIOCADDMULTI:
   2778 	case SIOCDELMULTI:
   2779 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   2780 		break;
   2781 	case SIOCSIFMEDIA:
   2782 	case SIOCGIFMEDIA:
   2783 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   2784 		break;
   2785 	case SIOCSIFCAP:
   2786 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   2787 		break;
   2788 	case SIOCSIFMTU:
   2789 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   2790 		break;
   2791 	default:
   2792 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   2793 		break;
   2794 	}
   2795 
   2796 	switch (command) {
   2797 	case SIOCSIFCAP:
   2798 		/* Layer-4 Rx checksum offload has to be turned on and
   2799 		 * off as a unit.
   2800 		 */
   2801 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   2802 		if (l4csum_en != l4csum && l4csum_en != 0)
   2803 			return EINVAL;
   2804 		/*FALLTHROUGH*/
   2805 	case SIOCADDMULTI:
   2806 	case SIOCDELMULTI:
   2807 	case SIOCSIFFLAGS:
   2808 	case SIOCSIFMTU:
   2809 	default:
   2810 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   2811 			return error;
   2812 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   2813 			;
   2814 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   2815 			IXGBE_CORE_LOCK(adapter);
   2816 			ixv_init_locked(adapter);
   2817 			IXGBE_CORE_UNLOCK(adapter);
   2818 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   2819 			/*
   2820 			 * Multicast list has changed; set the hardware filter
   2821 			 * accordingly.
   2822 			 */
   2823 			IXGBE_CORE_LOCK(adapter);
   2824 			ixv_disable_intr(adapter);
   2825 			ixv_set_multi(adapter);
   2826 			ixv_enable_intr(adapter);
   2827 			IXGBE_CORE_UNLOCK(adapter);
   2828 		}
   2829 		return 0;
   2830 	}
   2831 } /* ixv_ioctl */
   2832 
   2833 /************************************************************************
   2834  * ixv_init
   2835  ************************************************************************/
   2836 static int
   2837 ixv_init(struct ifnet *ifp)
   2838 {
   2839 	struct adapter *adapter = ifp->if_softc;
   2840 
   2841 	IXGBE_CORE_LOCK(adapter);
   2842 	ixv_init_locked(adapter);
   2843 	IXGBE_CORE_UNLOCK(adapter);
   2844 
   2845 	return 0;
   2846 } /* ixv_init */
   2847 
   2848 /************************************************************************
   2849  * ixv_handle_que
   2850  ************************************************************************/
   2851 static void
   2852 ixv_handle_que(void *context)
   2853 {
   2854 	struct ix_queue *que = context;
   2855 	struct adapter	*adapter = que->adapter;
   2856 	struct tx_ring	*txr = que->txr;
   2857 	struct ifnet	*ifp = adapter->ifp;
   2858 	bool		more;
   2859 
   2860 	que->handleq.ev_count++;
   2861 
   2862 	if (ifp->if_flags & IFF_RUNNING) {
   2863 		more = ixgbe_rxeof(que);
   2864 		IXGBE_TX_LOCK(txr);
   2865 		more |= ixgbe_txeof(txr);
   2866 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   2867 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   2868 				ixgbe_mq_start_locked(ifp, txr);
   2869 		/* Only for queue 0 */
   2870 		/* NetBSD still needs this for CBQ */
   2871 		if ((&adapter->queues[0] == que)
   2872 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   2873 			ixgbe_legacy_start_locked(ifp, txr);
   2874 		IXGBE_TX_UNLOCK(txr);
   2875 		if (more) {
   2876 			que->req.ev_count++;
   2877 			if (adapter->txrx_use_workqueue) {
   2878 				/*
   2879 				 * "enqueued flag" is not required here
   2880 				 * the same as ixg(4). See ixgbe_msix_que().
   2881 				 */
   2882 				workqueue_enqueue(adapter->que_wq,
   2883 				    &que->wq_cookie, curcpu());
   2884 			} else
   2885 				  softint_schedule(que->que_si);
   2886 			return;
   2887 		}
   2888 	}
   2889 
   2890 	/* Re-enable this interrupt */
   2891 	ixv_enable_queue(adapter, que->msix);
   2892 
   2893 	return;
   2894 } /* ixv_handle_que */
   2895 
   2896 /************************************************************************
   2897  * ixv_handle_que_work
   2898  ************************************************************************/
   2899 static void
   2900 ixv_handle_que_work(struct work *wk, void *context)
   2901 {
   2902 	struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
   2903 
   2904 	/*
   2905 	 * "enqueued flag" is not required here the same as ixg(4).
   2906 	 * See ixgbe_msix_que().
   2907 	 */
   2908 	ixv_handle_que(que);
   2909 }
   2910 
   2911 /************************************************************************
   2912  * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
   2913  ************************************************************************/
   2914 static int
   2915 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   2916 {
   2917 	device_t	dev = adapter->dev;
   2918 	struct ix_queue *que = adapter->queues;
   2919 	struct		tx_ring *txr = adapter->tx_rings;
   2920 	int		error, msix_ctrl, rid, vector = 0;
   2921 	pci_chipset_tag_t pc;
   2922 	pcitag_t	tag;
   2923 	char		intrbuf[PCI_INTRSTR_LEN];
   2924 	char		wqname[MAXCOMLEN];
   2925 	char		intr_xname[32];
   2926 	const char	*intrstr = NULL;
   2927 	kcpuset_t	*affinity;
   2928 	int		cpu_id = 0;
   2929 
   2930 	pc = adapter->osdep.pc;
   2931 	tag = adapter->osdep.tag;
   2932 
   2933 	adapter->osdep.nintrs = adapter->num_queues + 1;
   2934 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   2935 	    adapter->osdep.nintrs) != 0) {
   2936 		aprint_error_dev(dev,
   2937 		    "failed to allocate MSI-X interrupt\n");
   2938 		return (ENXIO);
   2939 	}
   2940 
   2941 	kcpuset_create(&affinity, false);
   2942 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   2943 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   2944 		    device_xname(dev), i);
   2945 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   2946 		    sizeof(intrbuf));
   2947 #ifdef IXGBE_MPSAFE
   2948 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   2949 		    true);
   2950 #endif
   2951 		/* Set the handler function */
   2952 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   2953 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   2954 		    intr_xname);
   2955 		if (que->res == NULL) {
   2956 			pci_intr_release(pc, adapter->osdep.intrs,
   2957 			    adapter->osdep.nintrs);
   2958 			aprint_error_dev(dev,
   2959 			    "Failed to register QUE handler\n");
   2960 			kcpuset_destroy(affinity);
   2961 			return (ENXIO);
   2962 		}
   2963 		que->msix = vector;
   2964 		adapter->active_queues |= (u64)(1 << que->msix);
   2965 
   2966 		cpu_id = i;
   2967 		/* Round-robin affinity */
   2968 		kcpuset_zero(affinity);
   2969 		kcpuset_set(affinity, cpu_id % ncpu);
   2970 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   2971 		    NULL);
   2972 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   2973 		    intrstr);
   2974 		if (error == 0)
   2975 			aprint_normal(", bound queue %d to cpu %d\n",
   2976 			    i, cpu_id % ncpu);
   2977 		else
   2978 			aprint_normal("\n");
   2979 
   2980 #ifndef IXGBE_LEGACY_TX
   2981 		txr->txr_si
   2982 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2983 			ixgbe_deferred_mq_start, txr);
   2984 #endif
   2985 		que->que_si
   2986 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2987 			ixv_handle_que, que);
   2988 		if (que->que_si == NULL) {
   2989 			aprint_error_dev(dev,
   2990 			    "could not establish software interrupt\n");
   2991 		}
   2992 	}
   2993 	snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
   2994 	error = workqueue_create(&adapter->txr_wq, wqname,
   2995 	    ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   2996 	    IXGBE_WORKQUEUE_FLAGS);
   2997 	if (error) {
   2998 		aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
   2999 	}
   3000 	adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
   3001 
   3002 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
   3003 	error = workqueue_create(&adapter->que_wq, wqname,
   3004 	    ixv_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   3005 	    IXGBE_WORKQUEUE_FLAGS);
   3006 	if (error) {
   3007 		aprint_error_dev(dev,
   3008 		    "couldn't create workqueue\n");
   3009 	}
   3010 
   3011 	/* and Mailbox */
   3012 	cpu_id++;
   3013 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   3014 	adapter->vector = vector;
   3015 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   3016 	    sizeof(intrbuf));
   3017 #ifdef IXGBE_MPSAFE
   3018 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   3019 	    true);
   3020 #endif
   3021 	/* Set the mbx handler function */
   3022 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   3023 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   3024 	    intr_xname);
   3025 	if (adapter->osdep.ihs[vector] == NULL) {
   3026 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   3027 		kcpuset_destroy(affinity);
   3028 		return (ENXIO);
   3029 	}
   3030 	/* Round-robin affinity */
   3031 	kcpuset_zero(affinity);
   3032 	kcpuset_set(affinity, cpu_id % ncpu);
   3033 	error = interrupt_distribute(adapter->osdep.ihs[vector],
   3034 	    affinity, NULL);
   3035 
   3036 	aprint_normal_dev(dev,
   3037 	    "for link, interrupting at %s", intrstr);
   3038 	if (error == 0)
   3039 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   3040 	else
   3041 		aprint_normal("\n");
   3042 
   3043 	/* Tasklets for Mailbox */
   3044 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   3045 	    ixv_handle_link, adapter);
   3046 	/*
   3047 	 * Due to a broken design QEMU will fail to properly
   3048 	 * enable the guest for MSI-X unless the vectors in
   3049 	 * the table are all set up, so we must rewrite the
   3050 	 * ENABLE in the MSI-X control register again at this
   3051 	 * point to cause it to successfully initialize us.
   3052 	 */
   3053 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   3054 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   3055 		rid += PCI_MSIX_CTL;
   3056 		msix_ctrl = pci_conf_read(pc, tag, rid);
   3057 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   3058 		pci_conf_write(pc, tag, rid, msix_ctrl);
   3059 	}
   3060 
   3061 	kcpuset_destroy(affinity);
   3062 	return (0);
   3063 } /* ixv_allocate_msix */
   3064 
   3065 /************************************************************************
   3066  * ixv_configure_interrupts - Setup MSI-X resources
   3067  *
   3068  *   Note: The VF device MUST use MSI-X, there is no fallback.
   3069  ************************************************************************/
   3070 static int
   3071 ixv_configure_interrupts(struct adapter *adapter)
   3072 {
   3073 	device_t dev = adapter->dev;
   3074 	int want, queues, msgs;
   3075 
   3076 	/* Must have at least 2 MSI-X vectors */
   3077 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   3078 	if (msgs < 2) {
   3079 		aprint_error_dev(dev, "MSIX config error\n");
   3080 		return (ENXIO);
   3081 	}
   3082 	msgs = MIN(msgs, IXG_MAX_NINTR);
   3083 
   3084 	/* Figure out a reasonable auto config value */
   3085 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   3086 
   3087 	if (ixv_num_queues != 0)
   3088 		queues = ixv_num_queues;
   3089 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   3090 		queues = IXGBE_VF_MAX_TX_QUEUES;
   3091 
   3092 	/*
   3093 	 * Want vectors for the queues,
   3094 	 * plus an additional for mailbox.
   3095 	 */
   3096 	want = queues + 1;
   3097 	if (msgs >= want)
   3098 		msgs = want;
   3099 	else {
   3100 		aprint_error_dev(dev,
   3101 		    "MSI-X Configuration Problem, "
   3102 		    "%d vectors but %d queues wanted!\n",
   3103 		    msgs, want);
   3104 		return -1;
   3105 	}
   3106 
   3107 	adapter->msix_mem = (void *)1; /* XXX */
   3108 	aprint_normal_dev(dev,
   3109 	    "Using MSI-X interrupts with %d vectors\n", msgs);
   3110 	adapter->num_queues = queues;
   3111 
   3112 	return (0);
   3113 } /* ixv_configure_interrupts */
   3114 
   3115 
   3116 /************************************************************************
   3117  * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
   3118  *
   3119  *   Done outside of interrupt context since the driver might sleep
   3120  ************************************************************************/
   3121 static void
   3122 ixv_handle_link(void *context)
   3123 {
   3124 	struct adapter *adapter = context;
   3125 
   3126 	IXGBE_CORE_LOCK(adapter);
   3127 
   3128 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   3129 	    &adapter->link_up, FALSE);
   3130 	ixv_update_link_status(adapter);
   3131 
   3132 	IXGBE_CORE_UNLOCK(adapter);
   3133 } /* ixv_handle_link */
   3134 
   3135 /************************************************************************
   3136  * ixv_check_link - Used in the local timer to poll for link changes
   3137  ************************************************************************/
   3138 static s32
   3139 ixv_check_link(struct adapter *adapter)
   3140 {
   3141 	s32 error;
   3142 
   3143 	KASSERT(mutex_owned(&adapter->core_mtx));
   3144 
   3145 	adapter->hw.mac.get_link_status = TRUE;
   3146 
   3147 	error = adapter->hw.mac.ops.check_link(&adapter->hw,
   3148 	    &adapter->link_speed, &adapter->link_up, FALSE);
   3149 	ixv_update_link_status(adapter);
   3150 
   3151 	return error;
   3152 } /* ixv_check_link */
   3153