Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.97
      1 /*$NetBSD: ixv.c,v 1.97 2018/05/14 09:21:36 msaitoh Exp $*/
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/
     36 
     37 #ifdef _KERNEL_OPT
     38 #include "opt_inet.h"
     39 #include "opt_inet6.h"
     40 #include "opt_net_mpsafe.h"
     41 #endif
     42 
     43 #include "ixgbe.h"
     44 #include "vlan.h"
     45 
     46 /************************************************************************
     47  * Driver version
     48  ************************************************************************/
     49 char ixv_driver_version[] = "2.0.1-k";
     50 
     51 /************************************************************************
     52  * PCI Device ID Table
     53  *
     54  *   Used by probe to select devices to load on
     55  *   Last field stores an index into ixv_strings
     56  *   Last entry must be all 0s
     57  *
     58  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     59  ************************************************************************/
     60 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
     61 {
     62 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     63 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     64 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     65 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     66 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
     67 	/* required last entry */
     68 	{0, 0, 0, 0, 0}
     69 };
     70 
     71 /************************************************************************
     72  * Table of branding strings
     73  ************************************************************************/
     74 static const char *ixv_strings[] = {
     75 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     76 };
     77 
     78 /*********************************************************************
     79  *  Function prototypes
     80  *********************************************************************/
     81 static int      ixv_probe(device_t, cfdata_t, void *);
     82 static void	ixv_attach(device_t, device_t, void *);
     83 static int      ixv_detach(device_t, int);
     84 #if 0
     85 static int      ixv_shutdown(device_t);
     86 #endif
     87 static int	ixv_ifflags_cb(struct ethercom *);
     88 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     89 static int	ixv_init(struct ifnet *);
     90 static void	ixv_init_locked(struct adapter *);
     91 static void	ixv_ifstop(struct ifnet *, int);
     92 static void     ixv_stop(void *);
     93 static void     ixv_init_device_features(struct adapter *);
     94 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     95 static int      ixv_media_change(struct ifnet *);
     96 static int      ixv_allocate_pci_resources(struct adapter *,
     97 		    const struct pci_attach_args *);
     98 static int      ixv_allocate_msix(struct adapter *,
     99 		    const struct pci_attach_args *);
    100 static int      ixv_configure_interrupts(struct adapter *);
    101 static void	ixv_free_pci_resources(struct adapter *);
    102 static void     ixv_local_timer(void *);
    103 static void     ixv_local_timer_locked(void *);
    104 static void	ixv_watchdog(struct ifnet *);
    105 static bool	ixv_watchdog_txq(struct ifnet *, struct tx_ring *, bool *);
    106 static int      ixv_setup_interface(device_t, struct adapter *);
    107 static int      ixv_negotiate_api(struct adapter *);
    108 
    109 static void     ixv_initialize_transmit_units(struct adapter *);
    110 static void     ixv_initialize_receive_units(struct adapter *);
    111 static void     ixv_initialize_rss_mapping(struct adapter *);
    112 static void     ixv_check_link(struct adapter *);
    113 
    114 static void     ixv_enable_intr(struct adapter *);
    115 static void     ixv_disable_intr(struct adapter *);
    116 static void     ixv_set_multi(struct adapter *);
    117 static void     ixv_update_link_status(struct adapter *);
    118 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    119 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    120 static void	ixv_configure_ivars(struct adapter *);
    121 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    122 static void	ixv_eitr_write(struct adapter *, uint32_t, uint32_t);
    123 
    124 static void	ixv_setup_vlan_support(struct adapter *);
    125 #if 0
    126 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    127 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    128 #endif
    129 
    130 static void	ixv_add_device_sysctls(struct adapter *);
    131 static void	ixv_save_stats(struct adapter *);
    132 static void	ixv_init_stats(struct adapter *);
    133 static void	ixv_update_stats(struct adapter *);
    134 static void	ixv_add_stats_sysctls(struct adapter *);
    135 
    136 
    137 /* Sysctl handlers */
    138 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    139 		    const char *, int *, int);
    140 static int      ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    141 static int      ixv_sysctl_rdh_handler(SYSCTLFN_PROTO);
    142 static int      ixv_sysctl_rdt_handler(SYSCTLFN_PROTO);
    143 static int      ixv_sysctl_tdt_handler(SYSCTLFN_PROTO);
    144 static int      ixv_sysctl_tdh_handler(SYSCTLFN_PROTO);
    145 
    146 /* The MSI-X Interrupt handlers */
    147 static int	ixv_msix_que(void *);
    148 static int	ixv_msix_mbx(void *);
    149 
    150 /* Deferred interrupt tasklets */
    151 static void	ixv_handle_que(void *);
    152 static void     ixv_handle_link(void *);
    153 
    154 /* Workqueue handler for deferred work */
    155 static void	ixv_handle_que_work(struct work *, void *);
    156 
    157 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    158 static ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    159 
    160 /************************************************************************
    161  * FreeBSD Device Interface Entry Points
    162  ************************************************************************/
    163 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    164     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    165     DVF_DETACH_SHUTDOWN);
    166 
    167 #if 0
    168 static driver_t ixv_driver = {
    169 	"ixv", ixv_methods, sizeof(struct adapter),
    170 };
    171 
    172 devclass_t ixv_devclass;
    173 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    174 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    175 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    176 #endif
    177 
    178 /*
    179  * TUNEABLE PARAMETERS:
    180  */
    181 
    182 /* Number of Queues - do not exceed MSI-X vectors - 1 */
    183 static int ixv_num_queues = 0;
    184 #define	TUNABLE_INT(__x, __y)
    185 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    186 
    187 /*
    188  * AIM: Adaptive Interrupt Moderation
    189  * which means that the interrupt rate
    190  * is varied over time based on the
    191  * traffic for that interrupt vector
    192  */
    193 static bool ixv_enable_aim = false;
    194 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    195 
    196 static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    197 TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate);
    198 
    199 /* How many packets rxeof tries to clean at a time */
    200 static int ixv_rx_process_limit = 256;
    201 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    202 
    203 /* How many packets txeof tries to clean at a time */
    204 static int ixv_tx_process_limit = 256;
    205 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    206 
    207 /* Which pakcet processing uses workqueue or softint */
    208 static bool ixv_txrx_workqueue = false;
    209 
    210 /*
    211  * Number of TX descriptors per ring,
    212  * setting higher than RX as this seems
    213  * the better performing choice.
    214  */
    215 static int ixv_txd = PERFORM_TXD;
    216 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    217 
    218 /* Number of RX descriptors per ring */
    219 static int ixv_rxd = PERFORM_RXD;
    220 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    221 
    222 /* Legacy Transmit (single queue) */
    223 static int ixv_enable_legacy_tx = 0;
    224 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
    225 
    226 #ifdef NET_MPSAFE
    227 #define IXGBE_MPSAFE		1
    228 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    229 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    230 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    231 #else
    232 #define IXGBE_CALLOUT_FLAGS	0
    233 #define IXGBE_SOFTINFT_FLAGS	0
    234 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU
    235 #endif
    236 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
    237 
    238 #if 0
    239 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
    240 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
    241 #endif
    242 
    243 /************************************************************************
    244  * ixv_probe - Device identification routine
    245  *
    246  *   Determines if the driver should be loaded on
    247  *   adapter based on its PCI vendor/device ID.
    248  *
    249  *   return BUS_PROBE_DEFAULT on success, positive on failure
    250  ************************************************************************/
    251 static int
    252 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    253 {
    254 #ifdef __HAVE_PCI_MSI_MSIX
    255 	const struct pci_attach_args *pa = aux;
    256 
    257 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    258 #else
    259 	return 0;
    260 #endif
    261 } /* ixv_probe */
    262 
    263 static ixgbe_vendor_info_t *
    264 ixv_lookup(const struct pci_attach_args *pa)
    265 {
    266 	ixgbe_vendor_info_t *ent;
    267 	pcireg_t subid;
    268 
    269 	INIT_DEBUGOUT("ixv_lookup: begin");
    270 
    271 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    272 		return NULL;
    273 
    274 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    275 
    276 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    277 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    278 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    279 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    280 		     (ent->subvendor_id == 0)) &&
    281 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    282 		     (ent->subdevice_id == 0))) {
    283 			return ent;
    284 		}
    285 	}
    286 
    287 	return NULL;
    288 }
    289 
    290 /************************************************************************
    291  * ixv_attach - Device initialization routine
    292  *
    293  *   Called when the driver is being loaded.
    294  *   Identifies the type of hardware, allocates all resources
    295  *   and initializes the hardware.
    296  *
    297  *   return 0 on success, positive on failure
    298  ************************************************************************/
    299 static void
    300 ixv_attach(device_t parent, device_t dev, void *aux)
    301 {
    302 	struct adapter *adapter;
    303 	struct ixgbe_hw *hw;
    304 	int             error = 0;
    305 	pcireg_t	id, subid;
    306 	ixgbe_vendor_info_t *ent;
    307 	const struct pci_attach_args *pa = aux;
    308 	const char *apivstr;
    309 	const char *str;
    310 	char buf[256];
    311 
    312 	INIT_DEBUGOUT("ixv_attach: begin");
    313 
    314 	/*
    315 	 * Make sure BUSMASTER is set, on a VM under
    316 	 * KVM it may not be and will break things.
    317 	 */
    318 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    319 
    320 	/* Allocate, clear, and link in our adapter structure */
    321 	adapter = device_private(dev);
    322 	adapter->dev = dev;
    323 	adapter->hw.back = adapter;
    324 	hw = &adapter->hw;
    325 
    326 	adapter->init_locked = ixv_init_locked;
    327 	adapter->stop_locked = ixv_stop;
    328 
    329 	adapter->osdep.pc = pa->pa_pc;
    330 	adapter->osdep.tag = pa->pa_tag;
    331 	if (pci_dma64_available(pa))
    332 		adapter->osdep.dmat = pa->pa_dmat64;
    333 	else
    334 		adapter->osdep.dmat = pa->pa_dmat;
    335 	adapter->osdep.attached = false;
    336 
    337 	ent = ixv_lookup(pa);
    338 
    339 	KASSERT(ent != NULL);
    340 
    341 	aprint_normal(": %s, Version - %s\n",
    342 	    ixv_strings[ent->index], ixv_driver_version);
    343 
    344 	/* Core Lock Init*/
    345 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    346 
    347 	/* Do base PCI setup - map BAR0 */
    348 	if (ixv_allocate_pci_resources(adapter, pa)) {
    349 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    350 		error = ENXIO;
    351 		goto err_out;
    352 	}
    353 
    354 	/* SYSCTL APIs */
    355 	ixv_add_device_sysctls(adapter);
    356 
    357 	/* Set up the timer callout */
    358 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    359 
    360 	/* Save off the information about this board */
    361 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    362 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    363 	hw->vendor_id = PCI_VENDOR(id);
    364 	hw->device_id = PCI_PRODUCT(id);
    365 	hw->revision_id =
    366 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    367 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    368 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    369 
    370 	/* A subset of set_mac_type */
    371 	switch (hw->device_id) {
    372 	case IXGBE_DEV_ID_82599_VF:
    373 		hw->mac.type = ixgbe_mac_82599_vf;
    374 		str = "82599 VF";
    375 		break;
    376 	case IXGBE_DEV_ID_X540_VF:
    377 		hw->mac.type = ixgbe_mac_X540_vf;
    378 		str = "X540 VF";
    379 		break;
    380 	case IXGBE_DEV_ID_X550_VF:
    381 		hw->mac.type = ixgbe_mac_X550_vf;
    382 		str = "X550 VF";
    383 		break;
    384 	case IXGBE_DEV_ID_X550EM_X_VF:
    385 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
    386 		str = "X550EM X VF";
    387 		break;
    388 	case IXGBE_DEV_ID_X550EM_A_VF:
    389 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
    390 		str = "X550EM A VF";
    391 		break;
    392 	default:
    393 		/* Shouldn't get here since probe succeeded */
    394 		aprint_error_dev(dev, "Unknown device ID!\n");
    395 		error = ENXIO;
    396 		goto err_out;
    397 		break;
    398 	}
    399 	aprint_normal_dev(dev, "device %s\n", str);
    400 
    401 	ixv_init_device_features(adapter);
    402 
    403 	/* Initialize the shared code */
    404 	error = ixgbe_init_ops_vf(hw);
    405 	if (error) {
    406 		aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
    407 		error = EIO;
    408 		goto err_out;
    409 	}
    410 
    411 	/* Setup the mailbox */
    412 	ixgbe_init_mbx_params_vf(hw);
    413 
    414 	/* Set the right number of segments */
    415 	adapter->num_segs = IXGBE_82599_SCATTER;
    416 
    417 	/* Reset mbox api to 1.0 */
    418 	error = hw->mac.ops.reset_hw(hw);
    419 	if (error == IXGBE_ERR_RESET_FAILED)
    420 		aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
    421 	else if (error)
    422 		aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
    423 		    error);
    424 	if (error) {
    425 		error = EIO;
    426 		goto err_out;
    427 	}
    428 
    429 	error = hw->mac.ops.init_hw(hw);
    430 	if (error) {
    431 		aprint_error_dev(dev, "...init_hw() failed!\n");
    432 		error = EIO;
    433 		goto err_out;
    434 	}
    435 
    436 	/* Negotiate mailbox API version */
    437 	error = ixv_negotiate_api(adapter);
    438 	if (error)
    439 		aprint_normal_dev(dev,
    440 		    "MBX API negotiation failed during attach!\n");
    441 	switch (hw->api_version) {
    442 	case ixgbe_mbox_api_10:
    443 		apivstr = "1.0";
    444 		break;
    445 	case ixgbe_mbox_api_20:
    446 		apivstr = "2.0";
    447 		break;
    448 	case ixgbe_mbox_api_11:
    449 		apivstr = "1.1";
    450 		break;
    451 	case ixgbe_mbox_api_12:
    452 		apivstr = "1.2";
    453 		break;
    454 	case ixgbe_mbox_api_13:
    455 		apivstr = "1.3";
    456 		break;
    457 	default:
    458 		apivstr = "unknown";
    459 		break;
    460 	}
    461 	aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
    462 
    463 	/* If no mac address was assigned, make a random one */
    464 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    465 		u8 addr[ETHER_ADDR_LEN];
    466 		uint64_t rndval = cprng_strong64();
    467 
    468 		memcpy(addr, &rndval, sizeof(addr));
    469 		addr[0] &= 0xFE;
    470 		addr[0] |= 0x02;
    471 		bcopy(addr, hw->mac.addr, sizeof(addr));
    472 	}
    473 
    474 	/* Register for VLAN events */
    475 #if 0 /* XXX delete after write? */
    476 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    477 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    478 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    479 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    480 #endif
    481 
    482 	/* Sysctls for limiting the amount of work done in the taskqueues */
    483 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    484 	    "max number of rx packets to process",
    485 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    486 
    487 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    488 	    "max number of tx packets to process",
    489 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    490 
    491 	/* Do descriptor calc and sanity checks */
    492 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    493 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    494 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    495 		adapter->num_tx_desc = DEFAULT_TXD;
    496 	} else
    497 		adapter->num_tx_desc = ixv_txd;
    498 
    499 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    500 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    501 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    502 		adapter->num_rx_desc = DEFAULT_RXD;
    503 	} else
    504 		adapter->num_rx_desc = ixv_rxd;
    505 
    506 	/* Setup MSI-X */
    507 	error = ixv_configure_interrupts(adapter);
    508 	if (error)
    509 		goto err_out;
    510 
    511 	/* Allocate our TX/RX Queues */
    512 	if (ixgbe_allocate_queues(adapter)) {
    513 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    514 		error = ENOMEM;
    515 		goto err_out;
    516 	}
    517 
    518 	/* hw.ix defaults init */
    519 	adapter->enable_aim = ixv_enable_aim;
    520 
    521 	adapter->txrx_use_workqueue = ixv_txrx_workqueue;
    522 
    523 	error = ixv_allocate_msix(adapter, pa);
    524 	if (error) {
    525 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    526 		goto err_late;
    527 	}
    528 
    529 	/* Setup OS specific network interface */
    530 	error = ixv_setup_interface(dev, adapter);
    531 	if (error != 0) {
    532 		aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
    533 		goto err_late;
    534 	}
    535 
    536 	/* Do the stats setup */
    537 	ixv_save_stats(adapter);
    538 	ixv_init_stats(adapter);
    539 	ixv_add_stats_sysctls(adapter);
    540 
    541 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    542 		ixgbe_netmap_attach(adapter);
    543 
    544 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
    545 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
    546 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
    547 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
    548 
    549 	INIT_DEBUGOUT("ixv_attach: end");
    550 	adapter->osdep.attached = true;
    551 
    552 	return;
    553 
    554 err_late:
    555 	ixgbe_free_transmit_structures(adapter);
    556 	ixgbe_free_receive_structures(adapter);
    557 	free(adapter->queues, M_DEVBUF);
    558 err_out:
    559 	ixv_free_pci_resources(adapter);
    560 	IXGBE_CORE_LOCK_DESTROY(adapter);
    561 
    562 	return;
    563 } /* ixv_attach */
    564 
    565 /************************************************************************
    566  * ixv_detach - Device removal routine
    567  *
    568  *   Called when the driver is being removed.
    569  *   Stops the adapter and deallocates all the resources
    570  *   that were allocated for driver operation.
    571  *
    572  *   return 0 on success, positive on failure
    573  ************************************************************************/
    574 static int
    575 ixv_detach(device_t dev, int flags)
    576 {
    577 	struct adapter  *adapter = device_private(dev);
    578 	struct ixgbe_hw *hw = &adapter->hw;
    579 	struct ix_queue *que = adapter->queues;
    580 	struct tx_ring *txr = adapter->tx_rings;
    581 	struct rx_ring *rxr = adapter->rx_rings;
    582 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
    583 
    584 	INIT_DEBUGOUT("ixv_detach: begin");
    585 	if (adapter->osdep.attached == false)
    586 		return 0;
    587 
    588 	/* Stop the interface. Callouts are stopped in it. */
    589 	ixv_ifstop(adapter->ifp, 1);
    590 
    591 #if NVLAN > 0
    592 	/* Make sure VLANs are not using driver */
    593 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    594 		;	/* nothing to do: no VLANs */
    595 	else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0)
    596 		vlan_ifdetach(adapter->ifp);
    597 	else {
    598 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    599 		return EBUSY;
    600 	}
    601 #endif
    602 
    603 	IXGBE_CORE_LOCK(adapter);
    604 	ixv_stop(adapter);
    605 	IXGBE_CORE_UNLOCK(adapter);
    606 
    607 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
    608 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
    609 			softint_disestablish(txr->txr_si);
    610 		softint_disestablish(que->que_si);
    611 	}
    612 	if (adapter->txr_wq != NULL)
    613 		workqueue_destroy(adapter->txr_wq);
    614 	if (adapter->txr_wq_enqueued != NULL)
    615 		percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
    616 	if (adapter->que_wq != NULL)
    617 		workqueue_destroy(adapter->que_wq);
    618 
    619 	/* Drain the Mailbox(link) queue */
    620 	softint_disestablish(adapter->link_si);
    621 
    622 	/* Unregister VLAN events */
    623 #if 0 /* XXX msaitoh delete after write? */
    624 	if (adapter->vlan_attach != NULL)
    625 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    626 	if (adapter->vlan_detach != NULL)
    627 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    628 #endif
    629 
    630 	ether_ifdetach(adapter->ifp);
    631 	callout_halt(&adapter->timer, NULL);
    632 
    633 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    634 		netmap_detach(adapter->ifp);
    635 
    636 	ixv_free_pci_resources(adapter);
    637 #if 0 /* XXX the NetBSD port is probably missing something here */
    638 	bus_generic_detach(dev);
    639 #endif
    640 	if_detach(adapter->ifp);
    641 	if_percpuq_destroy(adapter->ipq);
    642 
    643 	sysctl_teardown(&adapter->sysctllog);
    644 	evcnt_detach(&adapter->efbig_tx_dma_setup);
    645 	evcnt_detach(&adapter->mbuf_defrag_failed);
    646 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
    647 	evcnt_detach(&adapter->einval_tx_dma_setup);
    648 	evcnt_detach(&adapter->other_tx_dma_setup);
    649 	evcnt_detach(&adapter->eagain_tx_dma_setup);
    650 	evcnt_detach(&adapter->enomem_tx_dma_setup);
    651 	evcnt_detach(&adapter->watchdog_events);
    652 	evcnt_detach(&adapter->tso_err);
    653 	evcnt_detach(&adapter->link_irq);
    654 
    655 	txr = adapter->tx_rings;
    656 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
    657 		evcnt_detach(&adapter->queues[i].irqs);
    658 		evcnt_detach(&adapter->queues[i].handleq);
    659 		evcnt_detach(&adapter->queues[i].req);
    660 		evcnt_detach(&txr->no_desc_avail);
    661 		evcnt_detach(&txr->total_packets);
    662 		evcnt_detach(&txr->tso_tx);
    663 #ifndef IXGBE_LEGACY_TX
    664 		evcnt_detach(&txr->pcq_drops);
    665 #endif
    666 
    667 		evcnt_detach(&rxr->rx_packets);
    668 		evcnt_detach(&rxr->rx_bytes);
    669 		evcnt_detach(&rxr->rx_copies);
    670 		evcnt_detach(&rxr->no_jmbuf);
    671 		evcnt_detach(&rxr->rx_discarded);
    672 	}
    673 	evcnt_detach(&stats->ipcs);
    674 	evcnt_detach(&stats->l4cs);
    675 	evcnt_detach(&stats->ipcs_bad);
    676 	evcnt_detach(&stats->l4cs_bad);
    677 
    678 	/* Packet Reception Stats */
    679 	evcnt_detach(&stats->vfgorc);
    680 	evcnt_detach(&stats->vfgprc);
    681 	evcnt_detach(&stats->vfmprc);
    682 
    683 	/* Packet Transmission Stats */
    684 	evcnt_detach(&stats->vfgotc);
    685 	evcnt_detach(&stats->vfgptc);
    686 
    687 	/* Mailbox Stats */
    688 	evcnt_detach(&hw->mbx.stats.msgs_tx);
    689 	evcnt_detach(&hw->mbx.stats.msgs_rx);
    690 	evcnt_detach(&hw->mbx.stats.acks);
    691 	evcnt_detach(&hw->mbx.stats.reqs);
    692 	evcnt_detach(&hw->mbx.stats.rsts);
    693 
    694 	ixgbe_free_transmit_structures(adapter);
    695 	ixgbe_free_receive_structures(adapter);
    696 	for (int i = 0; i < adapter->num_queues; i++) {
    697 		struct ix_queue *lque = &adapter->queues[i];
    698 		mutex_destroy(&lque->dc_mtx);
    699 	}
    700 	free(adapter->queues, M_DEVBUF);
    701 
    702 	IXGBE_CORE_LOCK_DESTROY(adapter);
    703 
    704 	return (0);
    705 } /* ixv_detach */
    706 
    707 /************************************************************************
    708  * ixv_init_locked - Init entry point
    709  *
    710  *   Used in two ways: It is used by the stack as an init entry
    711  *   point in network interface structure. It is also used
    712  *   by the driver as a hw/sw initialization routine to get
    713  *   to a consistent state.
    714  *
    715  *   return 0 on success, positive on failure
    716  ************************************************************************/
    717 static void
    718 ixv_init_locked(struct adapter *adapter)
    719 {
    720 	struct ifnet	*ifp = adapter->ifp;
    721 	device_t 	dev = adapter->dev;
    722 	struct ixgbe_hw *hw = &adapter->hw;
    723 	struct ix_queue	*que = adapter->queues;
    724 	int             error = 0;
    725 	uint32_t mask;
    726 	int i;
    727 
    728 	INIT_DEBUGOUT("ixv_init_locked: begin");
    729 	KASSERT(mutex_owned(&adapter->core_mtx));
    730 	hw->adapter_stopped = FALSE;
    731 	hw->mac.ops.stop_adapter(hw);
    732 	callout_stop(&adapter->timer);
    733 
    734 	/* reprogram the RAR[0] in case user changed it. */
    735 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    736 
    737 	/* Get the latest mac address, User can use a LAA */
    738 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
    739 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    740 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
    741 
    742 	/* Prepare transmit descriptors and buffers */
    743 	if (ixgbe_setup_transmit_structures(adapter)) {
    744 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    745 		ixv_stop(adapter);
    746 		return;
    747 	}
    748 
    749 	/* Reset VF and renegotiate mailbox API version */
    750 	hw->mac.ops.reset_hw(hw);
    751 	hw->mac.ops.start_hw(hw);
    752 	error = ixv_negotiate_api(adapter);
    753 	if (error)
    754 		device_printf(dev,
    755 		    "Mailbox API negotiation failed in init_locked!\n");
    756 
    757 	ixv_initialize_transmit_units(adapter);
    758 
    759 	/* Setup Multicast table */
    760 	ixv_set_multi(adapter);
    761 
    762 	/*
    763 	 * Determine the correct mbuf pool
    764 	 * for doing jumbo/headersplit
    765 	 */
    766 	if (ifp->if_mtu > ETHERMTU)
    767 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    768 	else
    769 		adapter->rx_mbuf_sz = MCLBYTES;
    770 
    771 	/* Prepare receive descriptors and buffers */
    772 	if (ixgbe_setup_receive_structures(adapter)) {
    773 		device_printf(dev, "Could not setup receive structures\n");
    774 		ixv_stop(adapter);
    775 		return;
    776 	}
    777 
    778 	/* Configure RX settings */
    779 	ixv_initialize_receive_units(adapter);
    780 
    781 #if 0 /* XXX isn't it required? -- msaitoh  */
    782 	/* Set the various hardware offload abilities */
    783 	ifp->if_hwassist = 0;
    784 	if (ifp->if_capenable & IFCAP_TSO4)
    785 		ifp->if_hwassist |= CSUM_TSO;
    786 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    787 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    788 #if __FreeBSD_version >= 800000
    789 		ifp->if_hwassist |= CSUM_SCTP;
    790 #endif
    791 	}
    792 #endif
    793 
    794 	/* Set up VLAN offload and filter */
    795 	ixv_setup_vlan_support(adapter);
    796 
    797 	/* Set up MSI-X routing */
    798 	ixv_configure_ivars(adapter);
    799 
    800 	/* Set up auto-mask */
    801 	mask = (1 << adapter->vector);
    802 	for (i = 0; i < adapter->num_queues; i++, que++)
    803 		mask |= (1 << que->msix);
    804 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
    805 
    806 	/* Set moderation on the Link interrupt */
    807 	ixv_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
    808 
    809 	/* Stats init */
    810 	ixv_init_stats(adapter);
    811 
    812 	/* Config/Enable Link */
    813 	hw->mac.get_link_status = TRUE;
    814 	hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
    815 	    FALSE);
    816 
    817 	/* Start watchdog */
    818 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    819 
    820 	/* And now turn on interrupts */
    821 	ixv_enable_intr(adapter);
    822 
    823 	/* Update saved flags. See ixgbe_ifflags_cb() */
    824 	adapter->if_flags = ifp->if_flags;
    825 
    826 	/* Now inform the stack we're ready */
    827 	ifp->if_flags |= IFF_RUNNING;
    828 	ifp->if_flags &= ~IFF_OACTIVE;
    829 
    830 	return;
    831 } /* ixv_init_locked */
    832 
    833 /************************************************************************
    834  * ixv_enable_queue
    835  ************************************************************************/
    836 static inline void
    837 ixv_enable_queue(struct adapter *adapter, u32 vector)
    838 {
    839 	struct ixgbe_hw *hw = &adapter->hw;
    840 	struct ix_queue *que = &adapter->queues[vector];
    841 	u32             queue = 1 << vector;
    842 	u32             mask;
    843 
    844 	mutex_enter(&que->dc_mtx);
    845 	if (que->disabled_count > 0 && --que->disabled_count > 0)
    846 		goto out;
    847 
    848 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    849 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    850 out:
    851 	mutex_exit(&que->dc_mtx);
    852 } /* ixv_enable_queue */
    853 
    854 /************************************************************************
    855  * ixv_disable_queue
    856  ************************************************************************/
    857 static inline void
    858 ixv_disable_queue(struct adapter *adapter, u32 vector)
    859 {
    860 	struct ixgbe_hw *hw = &adapter->hw;
    861 	struct ix_queue *que = &adapter->queues[vector];
    862 	u64             queue = (u64)(1 << vector);
    863 	u32             mask;
    864 
    865 	mutex_enter(&que->dc_mtx);
    866 	if (que->disabled_count++ > 0)
    867 		goto  out;
    868 
    869 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    870 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    871 out:
    872 	mutex_exit(&que->dc_mtx);
    873 } /* ixv_disable_queue */
    874 
    875 static inline void
    876 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    877 {
    878 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    879 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    880 } /* ixv_rearm_queues */
    881 
    882 
    883 /************************************************************************
    884  * ixv_msix_que - MSI-X Queue Interrupt Service routine
    885  ************************************************************************/
    886 static int
    887 ixv_msix_que(void *arg)
    888 {
    889 	struct ix_queue	*que = arg;
    890 	struct adapter  *adapter = que->adapter;
    891 	struct tx_ring	*txr = que->txr;
    892 	struct rx_ring	*rxr = que->rxr;
    893 	bool		more;
    894 	u32		newitr = 0;
    895 
    896 	ixv_disable_queue(adapter, que->msix);
    897 	++que->irqs.ev_count;
    898 
    899 #ifdef __NetBSD__
    900 	/* Don't run ixgbe_rxeof in interrupt context */
    901 	more = true;
    902 #else
    903 	more = ixgbe_rxeof(que);
    904 #endif
    905 
    906 	IXGBE_TX_LOCK(txr);
    907 	ixgbe_txeof(txr);
    908 	IXGBE_TX_UNLOCK(txr);
    909 
    910 	/* Do AIM now? */
    911 
    912 	if (adapter->enable_aim == false)
    913 		goto no_calc;
    914 	/*
    915 	 * Do Adaptive Interrupt Moderation:
    916 	 *  - Write out last calculated setting
    917 	 *  - Calculate based on average size over
    918 	 *    the last interval.
    919 	 */
    920 	if (que->eitr_setting)
    921 		ixv_eitr_write(adapter, que->msix, que->eitr_setting);
    922 
    923 	que->eitr_setting = 0;
    924 
    925 	/* Idle, do nothing */
    926 	if ((txr->bytes == 0) && (rxr->bytes == 0))
    927 		goto no_calc;
    928 
    929 	if ((txr->bytes) && (txr->packets))
    930 		newitr = txr->bytes/txr->packets;
    931 	if ((rxr->bytes) && (rxr->packets))
    932 		newitr = max(newitr, (rxr->bytes / rxr->packets));
    933 	newitr += 24; /* account for hardware frame, crc */
    934 
    935 	/* set an upper boundary */
    936 	newitr = min(newitr, 3000);
    937 
    938 	/* Be nice to the mid range */
    939 	if ((newitr > 300) && (newitr < 1200))
    940 		newitr = (newitr / 3);
    941 	else
    942 		newitr = (newitr / 2);
    943 
    944 	/*
    945 	 * When RSC is used, ITR interval must be larger than RSC_DELAY.
    946 	 * Currently, we use 2us for RSC_DELAY. The minimum value is always
    947 	 * greater than 2us on 100M (and 10M?(not documented)), but it's not
    948 	 * on 1G and higher.
    949 	 */
    950 	if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
    951 	    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
    952 		if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
    953 			newitr = IXGBE_MIN_RSC_EITR_10G1G;
    954 	}
    955 
    956 	/* save for next interrupt */
    957 	que->eitr_setting = newitr;
    958 
    959 	/* Reset state */
    960 	txr->bytes = 0;
    961 	txr->packets = 0;
    962 	rxr->bytes = 0;
    963 	rxr->packets = 0;
    964 
    965 no_calc:
    966 	if (more)
    967 		softint_schedule(que->que_si);
    968 	else /* Re-enable this interrupt */
    969 		ixv_enable_queue(adapter, que->msix);
    970 
    971 	return 1;
    972 } /* ixv_msix_que */
    973 
    974 /************************************************************************
    975  * ixv_msix_mbx
    976  ************************************************************************/
    977 static int
    978 ixv_msix_mbx(void *arg)
    979 {
    980 	struct adapter	*adapter = arg;
    981 	struct ixgbe_hw *hw = &adapter->hw;
    982 
    983 	++adapter->link_irq.ev_count;
    984 	/* NetBSD: We use auto-clear, so it's not required to write VTEICR */
    985 
    986 	/* Link status change */
    987 	hw->mac.get_link_status = TRUE;
    988 	softint_schedule(adapter->link_si);
    989 
    990 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
    991 
    992 	return 1;
    993 } /* ixv_msix_mbx */
    994 
    995 static void
    996 ixv_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
    997 {
    998 
    999 	/*
   1000 	 * Newer devices than 82598 have VF function, so this function is
   1001 	 * simple.
   1002 	 */
   1003 	itr |= IXGBE_EITR_CNT_WDIS;
   1004 
   1005 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(index), itr);
   1006 }
   1007 
   1008 
   1009 /************************************************************************
   1010  * ixv_media_status - Media Ioctl callback
   1011  *
   1012  *   Called whenever the user queries the status of
   1013  *   the interface using ifconfig.
   1014  ************************************************************************/
   1015 static void
   1016 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   1017 {
   1018 	struct adapter *adapter = ifp->if_softc;
   1019 
   1020 	INIT_DEBUGOUT("ixv_media_status: begin");
   1021 	IXGBE_CORE_LOCK(adapter);
   1022 	ixv_update_link_status(adapter);
   1023 
   1024 	ifmr->ifm_status = IFM_AVALID;
   1025 	ifmr->ifm_active = IFM_ETHER;
   1026 
   1027 	if (!adapter->link_active) {
   1028 		ifmr->ifm_active |= IFM_NONE;
   1029 		IXGBE_CORE_UNLOCK(adapter);
   1030 		return;
   1031 	}
   1032 
   1033 	ifmr->ifm_status |= IFM_ACTIVE;
   1034 
   1035 	switch (adapter->link_speed) {
   1036 		case IXGBE_LINK_SPEED_10GB_FULL:
   1037 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   1038 			break;
   1039 		case IXGBE_LINK_SPEED_5GB_FULL:
   1040 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   1041 			break;
   1042 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   1043 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   1044 			break;
   1045 		case IXGBE_LINK_SPEED_1GB_FULL:
   1046 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1047 			break;
   1048 		case IXGBE_LINK_SPEED_100_FULL:
   1049 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   1050 			break;
   1051 		case IXGBE_LINK_SPEED_10_FULL:
   1052 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   1053 			break;
   1054 	}
   1055 
   1056 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   1057 
   1058 	IXGBE_CORE_UNLOCK(adapter);
   1059 } /* ixv_media_status */
   1060 
   1061 /************************************************************************
   1062  * ixv_media_change - Media Ioctl callback
   1063  *
   1064  *   Called when the user changes speed/duplex using
   1065  *   media/mediopt option with ifconfig.
   1066  ************************************************************************/
   1067 static int
   1068 ixv_media_change(struct ifnet *ifp)
   1069 {
   1070 	struct adapter *adapter = ifp->if_softc;
   1071 	struct ifmedia *ifm = &adapter->media;
   1072 
   1073 	INIT_DEBUGOUT("ixv_media_change: begin");
   1074 
   1075 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1076 		return (EINVAL);
   1077 
   1078 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1079 	case IFM_AUTO:
   1080 		break;
   1081 	default:
   1082 		device_printf(adapter->dev, "Only auto media type\n");
   1083 		return (EINVAL);
   1084 	}
   1085 
   1086 	return (0);
   1087 } /* ixv_media_change */
   1088 
   1089 
   1090 /************************************************************************
   1091  * ixv_negotiate_api
   1092  *
   1093  *   Negotiate the Mailbox API with the PF;
   1094  *   start with the most featured API first.
   1095  ************************************************************************/
   1096 static int
   1097 ixv_negotiate_api(struct adapter *adapter)
   1098 {
   1099 	struct ixgbe_hw *hw = &adapter->hw;
   1100 	int             mbx_api[] = { ixgbe_mbox_api_11,
   1101 	                              ixgbe_mbox_api_10,
   1102 	                              ixgbe_mbox_api_unknown };
   1103 	int             i = 0;
   1104 
   1105 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
   1106 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
   1107 			return (0);
   1108 		i++;
   1109 	}
   1110 
   1111 	return (EINVAL);
   1112 } /* ixv_negotiate_api */
   1113 
   1114 
   1115 /************************************************************************
   1116  * ixv_set_multi - Multicast Update
   1117  *
   1118  *   Called whenever multicast address list is updated.
   1119  ************************************************************************/
   1120 static void
   1121 ixv_set_multi(struct adapter *adapter)
   1122 {
   1123 	struct ether_multi *enm;
   1124 	struct ether_multistep step;
   1125 	struct ethercom *ec = &adapter->osdep.ec;
   1126 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1127 	u8                 *update_ptr;
   1128 	int                mcnt = 0;
   1129 
   1130 	KASSERT(mutex_owned(&adapter->core_mtx));
   1131 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1132 
   1133 	ETHER_LOCK(ec);
   1134 	ETHER_FIRST_MULTI(step, ec, enm);
   1135 	while (enm != NULL) {
   1136 		bcopy(enm->enm_addrlo,
   1137 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1138 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1139 		mcnt++;
   1140 		/* XXX This might be required --msaitoh */
   1141 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1142 			break;
   1143 		ETHER_NEXT_MULTI(step, enm);
   1144 	}
   1145 	ETHER_UNLOCK(ec);
   1146 
   1147 	update_ptr = mta;
   1148 
   1149 	adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   1150 	    ixv_mc_array_itr, TRUE);
   1151 } /* ixv_set_multi */
   1152 
   1153 /************************************************************************
   1154  * ixv_mc_array_itr
   1155  *
   1156  *   An iterator function needed by the multicast shared code.
   1157  *   It feeds the shared code routine the addresses in the
   1158  *   array of ixv_set_multi() one by one.
   1159  ************************************************************************/
   1160 static u8 *
   1161 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1162 {
   1163 	u8 *addr = *update_ptr;
   1164 	u8 *newptr;
   1165 
   1166 	*vmdq = 0;
   1167 
   1168 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1169 	*update_ptr = newptr;
   1170 
   1171 	return addr;
   1172 } /* ixv_mc_array_itr */
   1173 
   1174 /************************************************************************
   1175  * ixv_local_timer - Timer routine
   1176  *
   1177  *   Checks for link status, updates statistics,
   1178  *   and runs the watchdog check.
   1179  ************************************************************************/
   1180 static void
   1181 ixv_local_timer(void *arg)
   1182 {
   1183 	struct adapter *adapter = arg;
   1184 
   1185 	IXGBE_CORE_LOCK(adapter);
   1186 	ixv_local_timer_locked(adapter);
   1187 	IXGBE_CORE_UNLOCK(adapter);
   1188 }
   1189 
   1190 static void
   1191 ixv_local_timer_locked(void *arg)
   1192 {
   1193 	struct adapter	*adapter = arg;
   1194 	struct ix_queue	*que = adapter->queues;
   1195 	u64		v0, v1, v2, v3, v4, v5, v6, v7;
   1196 	int		i;
   1197 
   1198 	KASSERT(mutex_owned(&adapter->core_mtx));
   1199 
   1200 	ixv_check_link(adapter);
   1201 
   1202 	/* Stats Update */
   1203 	ixv_update_stats(adapter);
   1204 
   1205 	/* Update some event counters */
   1206 	v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
   1207 	que = adapter->queues;
   1208 	for (i = 0; i < adapter->num_queues; i++, que++) {
   1209 		struct tx_ring  *txr = que->txr;
   1210 
   1211 		v0 += txr->q_efbig_tx_dma_setup;
   1212 		v1 += txr->q_mbuf_defrag_failed;
   1213 		v2 += txr->q_efbig2_tx_dma_setup;
   1214 		v3 += txr->q_einval_tx_dma_setup;
   1215 		v4 += txr->q_other_tx_dma_setup;
   1216 		v5 += txr->q_eagain_tx_dma_setup;
   1217 		v6 += txr->q_enomem_tx_dma_setup;
   1218 		v7 += txr->q_tso_err;
   1219 	}
   1220 	adapter->efbig_tx_dma_setup.ev_count = v0;
   1221 	adapter->mbuf_defrag_failed.ev_count = v1;
   1222 	adapter->efbig2_tx_dma_setup.ev_count = v2;
   1223 	adapter->einval_tx_dma_setup.ev_count = v3;
   1224 	adapter->other_tx_dma_setup.ev_count = v4;
   1225 	adapter->eagain_tx_dma_setup.ev_count = v5;
   1226 	adapter->enomem_tx_dma_setup.ev_count = v6;
   1227 	adapter->tso_err.ev_count = v7;
   1228 
   1229 	ixv_watchdog(adapter->ifp);
   1230 
   1231 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1232 
   1233 	return;
   1234 } /* ixv_local_timer */
   1235 
   1236 static void
   1237 ixv_watchdog(struct ifnet *ifp)
   1238 {
   1239 	struct adapter  *adapter = ifp->if_softc;
   1240 	struct ix_queue *que;
   1241 	struct tx_ring  *txr;
   1242 	u64		queues = 0;
   1243 	bool		hung = false;
   1244 	bool		sending = false;
   1245 	int		i;
   1246 
   1247 	txr = adapter->tx_rings;
   1248 	for (i = 0; i < adapter->num_queues; i++, txr++) {
   1249 		hung = ixv_watchdog_txq(ifp, txr, &sending);
   1250 		if (hung)
   1251 			break;
   1252 		else if (sending)
   1253 			queues |= ((u64)1 << txr->me);
   1254 	}
   1255 
   1256 	if (hung) {
   1257 		ifp->if_flags &= ~IFF_RUNNING;
   1258 		ifp->if_oerrors++;
   1259 		adapter->watchdog_events.ev_count++;
   1260 		ixv_init_locked(adapter);
   1261 	} else if (queues != 0) {
   1262 		/*
   1263 		 * Force an IRQ on queues with work
   1264 		 *
   1265 		 * Calling ixv_rearm_queues() might not required. I've never
   1266 		 * seen any device timeout on ixv(4).
   1267 		 */
   1268 		que = adapter->queues;
   1269 		for (i = 0; i < adapter->num_queues; i++, que++) {
   1270 			u64 index = queues & ((u64)1 << i);
   1271 
   1272 			mutex_enter(&que->dc_mtx);
   1273 			if ((index != 0) && (que->disabled_count == 0))
   1274 				ixv_rearm_queues(adapter, index);
   1275 			mutex_exit(&que->dc_mtx);
   1276 		}
   1277 	}
   1278 }
   1279 
   1280 static bool
   1281 ixv_watchdog_txq(struct ifnet *ifp, struct tx_ring *txr, bool *sending)
   1282 {
   1283 	struct adapter *adapter = ifp->if_softc;
   1284 	device_t dev = adapter->dev;
   1285 	bool hung = false;
   1286 	bool more = false;
   1287 
   1288 	IXGBE_TX_LOCK(txr);
   1289 	*sending = txr->sending;
   1290 	if (*sending && ((time_uptime - txr->lastsent) > IXGBE_TX_TIMEOUT)) {
   1291 		/*
   1292 		 * Since we're using delayed interrupts, sweep up before we
   1293 		 * report an error.
   1294 		 */
   1295 		do {
   1296 			more = ixgbe_txeof(txr);
   1297 		} while (more);
   1298 		hung = true;
   1299 		device_printf(dev,
   1300 		    "Watchdog timeout (queue %d%s)-- resetting\n", txr->me,
   1301 		    (txr->tx_avail == txr->num_desc)
   1302 		    ? ", lost interrupt?" : "");
   1303 	}
   1304 	IXGBE_TX_UNLOCK(txr);
   1305 
   1306 	return hung;
   1307 }
   1308 
   1309 /************************************************************************
   1310  * ixv_update_link_status - Update OS on link state
   1311  *
   1312  * Note: Only updates the OS on the cached link state.
   1313  *       The real check of the hardware only happens with
   1314  *       a link interrupt.
   1315  ************************************************************************/
   1316 static void
   1317 ixv_update_link_status(struct adapter *adapter)
   1318 {
   1319 	struct ifnet *ifp = adapter->ifp;
   1320 	device_t     dev = adapter->dev;
   1321 
   1322 	KASSERT(mutex_owned(&adapter->core_mtx));
   1323 
   1324 	if (adapter->link_up) {
   1325 		if (adapter->link_active == FALSE) {
   1326 			if (bootverbose) {
   1327 				const char *bpsmsg;
   1328 
   1329 				switch (adapter->link_speed) {
   1330 				case IXGBE_LINK_SPEED_10GB_FULL:
   1331 					bpsmsg = "10 Gbps";
   1332 					break;
   1333 				case IXGBE_LINK_SPEED_5GB_FULL:
   1334 					bpsmsg = "5 Gbps";
   1335 					break;
   1336 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   1337 					bpsmsg = "2.5 Gbps";
   1338 					break;
   1339 				case IXGBE_LINK_SPEED_1GB_FULL:
   1340 					bpsmsg = "1 Gbps";
   1341 					break;
   1342 				case IXGBE_LINK_SPEED_100_FULL:
   1343 					bpsmsg = "100 Mbps";
   1344 					break;
   1345 				case IXGBE_LINK_SPEED_10_FULL:
   1346 					bpsmsg = "10 Mbps";
   1347 					break;
   1348 				default:
   1349 					bpsmsg = "unknown speed";
   1350 					break;
   1351 				}
   1352 				device_printf(dev, "Link is up %s %s \n",
   1353 				    bpsmsg, "Full Duplex");
   1354 			}
   1355 			adapter->link_active = TRUE;
   1356 			if_link_state_change(ifp, LINK_STATE_UP);
   1357 		}
   1358 	} else { /* Link down */
   1359 		if (adapter->link_active == TRUE) {
   1360 			if (bootverbose)
   1361 				device_printf(dev, "Link is Down\n");
   1362 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1363 			adapter->link_active = FALSE;
   1364 		}
   1365 	}
   1366 } /* ixv_update_link_status */
   1367 
   1368 
   1369 /************************************************************************
   1370  * ixv_stop - Stop the hardware
   1371  *
   1372  *   Disables all traffic on the adapter by issuing a
   1373  *   global reset on the MAC and deallocates TX/RX buffers.
   1374  ************************************************************************/
   1375 static void
   1376 ixv_ifstop(struct ifnet *ifp, int disable)
   1377 {
   1378 	struct adapter *adapter = ifp->if_softc;
   1379 
   1380 	IXGBE_CORE_LOCK(adapter);
   1381 	ixv_stop(adapter);
   1382 	IXGBE_CORE_UNLOCK(adapter);
   1383 }
   1384 
   1385 static void
   1386 ixv_stop(void *arg)
   1387 {
   1388 	struct ifnet    *ifp;
   1389 	struct adapter  *adapter = arg;
   1390 	struct ixgbe_hw *hw = &adapter->hw;
   1391 	struct tx_ring  *txr;
   1392 	int i;
   1393 
   1394 	ifp = adapter->ifp;
   1395 
   1396 	KASSERT(mutex_owned(&adapter->core_mtx));
   1397 
   1398 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1399 	ixv_disable_intr(adapter);
   1400 	callout_stop(&adapter->timer);
   1401 
   1402 	txr = adapter->tx_rings;
   1403 	for (i = 0; i < adapter->num_queues; i++, txr++) {
   1404 		IXGBE_TX_LOCK(txr);
   1405 		txr->sending = false;
   1406 		IXGBE_TX_UNLOCK(txr);
   1407 	}
   1408 
   1409 	/* Tell the stack that the interface is no longer active */
   1410 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1411 
   1412 	hw->mac.ops.reset_hw(hw);
   1413 	adapter->hw.adapter_stopped = FALSE;
   1414 	hw->mac.ops.stop_adapter(hw);
   1415 
   1416 	/* reprogram the RAR[0] in case user changed it. */
   1417 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1418 
   1419 	return;
   1420 } /* ixv_stop */
   1421 
   1422 
   1423 /************************************************************************
   1424  * ixv_allocate_pci_resources
   1425  ************************************************************************/
   1426 static int
   1427 ixv_allocate_pci_resources(struct adapter *adapter,
   1428     const struct pci_attach_args *pa)
   1429 {
   1430 	pcireg_t	memtype;
   1431 	device_t        dev = adapter->dev;
   1432 	bus_addr_t addr;
   1433 	int flags;
   1434 
   1435 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1436 	switch (memtype) {
   1437 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1438 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1439 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1440 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1441 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1442 			goto map_err;
   1443 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1444 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1445 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1446 		}
   1447 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1448 		     adapter->osdep.mem_size, flags,
   1449 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1450 map_err:
   1451 			adapter->osdep.mem_size = 0;
   1452 			aprint_error_dev(dev, "unable to map BAR0\n");
   1453 			return ENXIO;
   1454 		}
   1455 		break;
   1456 	default:
   1457 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1458 		return ENXIO;
   1459 	}
   1460 
   1461 	/* Pick up the tuneable queues */
   1462 	adapter->num_queues = ixv_num_queues;
   1463 
   1464 	return (0);
   1465 } /* ixv_allocate_pci_resources */
   1466 
   1467 /************************************************************************
   1468  * ixv_free_pci_resources
   1469  ************************************************************************/
   1470 static void
   1471 ixv_free_pci_resources(struct adapter * adapter)
   1472 {
   1473 	struct 		ix_queue *que = adapter->queues;
   1474 	int		rid;
   1475 
   1476 	/*
   1477 	 *  Release all msix queue resources:
   1478 	 */
   1479 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1480 		if (que->res != NULL)
   1481 			pci_intr_disestablish(adapter->osdep.pc,
   1482 			    adapter->osdep.ihs[i]);
   1483 	}
   1484 
   1485 
   1486 	/* Clean the Mailbox interrupt last */
   1487 	rid = adapter->vector;
   1488 
   1489 	if (adapter->osdep.ihs[rid] != NULL) {
   1490 		pci_intr_disestablish(adapter->osdep.pc,
   1491 		    adapter->osdep.ihs[rid]);
   1492 		adapter->osdep.ihs[rid] = NULL;
   1493 	}
   1494 
   1495 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1496 	    adapter->osdep.nintrs);
   1497 
   1498 	if (adapter->osdep.mem_size != 0) {
   1499 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1500 		    adapter->osdep.mem_bus_space_handle,
   1501 		    adapter->osdep.mem_size);
   1502 	}
   1503 
   1504 	return;
   1505 } /* ixv_free_pci_resources */
   1506 
   1507 /************************************************************************
   1508  * ixv_setup_interface
   1509  *
   1510  *   Setup networking device structure and register an interface.
   1511  ************************************************************************/
   1512 static int
   1513 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1514 {
   1515 	struct ethercom *ec = &adapter->osdep.ec;
   1516 	struct ifnet   *ifp;
   1517 	int rv;
   1518 
   1519 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1520 
   1521 	ifp = adapter->ifp = &ec->ec_if;
   1522 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1523 	ifp->if_baudrate = IF_Gbps(10);
   1524 	ifp->if_init = ixv_init;
   1525 	ifp->if_stop = ixv_ifstop;
   1526 	ifp->if_softc = adapter;
   1527 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1528 #ifdef IXGBE_MPSAFE
   1529 	ifp->if_extflags = IFEF_MPSAFE;
   1530 #endif
   1531 	ifp->if_ioctl = ixv_ioctl;
   1532 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1533 #if 0
   1534 		ixv_start_locked = ixgbe_legacy_start_locked;
   1535 #endif
   1536 	} else {
   1537 		ifp->if_transmit = ixgbe_mq_start;
   1538 #if 0
   1539 		ixv_start_locked = ixgbe_mq_start_locked;
   1540 #endif
   1541 	}
   1542 	ifp->if_start = ixgbe_legacy_start;
   1543 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1544 	IFQ_SET_READY(&ifp->if_snd);
   1545 
   1546 	rv = if_initialize(ifp);
   1547 	if (rv != 0) {
   1548 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1549 		return rv;
   1550 	}
   1551 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1552 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1553 	/*
   1554 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1555 	 * used.
   1556 	 */
   1557 	if_register(ifp);
   1558 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1559 
   1560 	adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
   1561 
   1562 	/*
   1563 	 * Tell the upper layer(s) we support long frames.
   1564 	 */
   1565 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1566 
   1567 	/* Set capability flags */
   1568 	ifp->if_capabilities |= IFCAP_HWCSUM
   1569 	                     |  IFCAP_TSOv4
   1570 	                     |  IFCAP_TSOv6;
   1571 	ifp->if_capenable = 0;
   1572 
   1573 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1574 			    |  ETHERCAP_VLAN_HWCSUM
   1575 			    |  ETHERCAP_JUMBO_MTU
   1576 			    |  ETHERCAP_VLAN_MTU;
   1577 
   1578 	/* Enable the above capabilities by default */
   1579 	ec->ec_capenable = ec->ec_capabilities;
   1580 
   1581 	/* Don't enable LRO by default */
   1582 	ifp->if_capabilities |= IFCAP_LRO;
   1583 #if 0
   1584 	ifp->if_capenable = ifp->if_capabilities;
   1585 #endif
   1586 
   1587 	/*
   1588 	 * Specify the media types supported by this adapter and register
   1589 	 * callbacks to update media and link information
   1590 	 */
   1591 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1592 	    ixv_media_status);
   1593 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1594 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1595 
   1596 	return 0;
   1597 } /* ixv_setup_interface */
   1598 
   1599 
   1600 /************************************************************************
   1601  * ixv_initialize_transmit_units - Enable transmit unit.
   1602  ************************************************************************/
   1603 static void
   1604 ixv_initialize_transmit_units(struct adapter *adapter)
   1605 {
   1606 	struct tx_ring	*txr = adapter->tx_rings;
   1607 	struct ixgbe_hw	*hw = &adapter->hw;
   1608 	int i;
   1609 
   1610 	for (i = 0; i < adapter->num_queues; i++, txr++) {
   1611 		u64 tdba = txr->txdma.dma_paddr;
   1612 		u32 txctrl, txdctl;
   1613 		int j = txr->me;
   1614 
   1615 		/* Set WTHRESH to 8, burst writeback */
   1616 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
   1617 		txdctl |= (8 << 16);
   1618 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
   1619 
   1620 		/* Set the HW Tx Head and Tail indices */
   1621 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
   1622 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
   1623 
   1624 		/* Set Tx Tail register */
   1625 		txr->tail = IXGBE_VFTDT(j);
   1626 
   1627 		/* Set Ring parameters */
   1628 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
   1629 		    (tdba & 0x00000000ffffffffULL));
   1630 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
   1631 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
   1632 		    adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
   1633 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
   1634 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1635 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
   1636 
   1637 		/* Now enable */
   1638 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
   1639 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1640 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
   1641 	}
   1642 
   1643 	return;
   1644 } /* ixv_initialize_transmit_units */
   1645 
   1646 
   1647 /************************************************************************
   1648  * ixv_initialize_rss_mapping
   1649  ************************************************************************/
   1650 static void
   1651 ixv_initialize_rss_mapping(struct adapter *adapter)
   1652 {
   1653 	struct ixgbe_hw *hw = &adapter->hw;
   1654 	u32             reta = 0, mrqc, rss_key[10];
   1655 	int             queue_id;
   1656 	int             i, j;
   1657 	u32             rss_hash_config;
   1658 
   1659 	/* force use default RSS key. */
   1660 #ifdef __NetBSD__
   1661 	rss_getkey((uint8_t *) &rss_key);
   1662 #else
   1663 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1664 		/* Fetch the configured RSS key */
   1665 		rss_getkey((uint8_t *)&rss_key);
   1666 	} else {
   1667 		/* set up random bits */
   1668 		cprng_fast(&rss_key, sizeof(rss_key));
   1669 	}
   1670 #endif
   1671 
   1672 	/* Now fill out hash function seeds */
   1673 	for (i = 0; i < 10; i++)
   1674 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
   1675 
   1676 	/* Set up the redirection table */
   1677 	for (i = 0, j = 0; i < 64; i++, j++) {
   1678 		if (j == adapter->num_queues)
   1679 			j = 0;
   1680 
   1681 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1682 			/*
   1683 			 * Fetch the RSS bucket id for the given indirection
   1684 			 * entry. Cap it at the number of configured buckets
   1685 			 * (which is num_queues.)
   1686 			 */
   1687 			queue_id = rss_get_indirection_to_bucket(i);
   1688 			queue_id = queue_id % adapter->num_queues;
   1689 		} else
   1690 			queue_id = j;
   1691 
   1692 		/*
   1693 		 * The low 8 bits are for hash value (n+0);
   1694 		 * The next 8 bits are for hash value (n+1), etc.
   1695 		 */
   1696 		reta >>= 8;
   1697 		reta |= ((uint32_t)queue_id) << 24;
   1698 		if ((i & 3) == 3) {
   1699 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
   1700 			reta = 0;
   1701 		}
   1702 	}
   1703 
   1704 	/* Perform hash on these packet types */
   1705 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   1706 		rss_hash_config = rss_gethashconfig();
   1707 	else {
   1708 		/*
   1709 		 * Disable UDP - IP fragments aren't currently being handled
   1710 		 * and so we end up with a mix of 2-tuple and 4-tuple
   1711 		 * traffic.
   1712 		 */
   1713 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
   1714 		                | RSS_HASHTYPE_RSS_TCP_IPV4
   1715 		                | RSS_HASHTYPE_RSS_IPV6
   1716 		                | RSS_HASHTYPE_RSS_TCP_IPV6;
   1717 	}
   1718 
   1719 	mrqc = IXGBE_MRQC_RSSEN;
   1720 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
   1721 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
   1722 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
   1723 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
   1724 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
   1725 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
   1726 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
   1727 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
   1728 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
   1729 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
   1730 		    __func__);
   1731 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
   1732 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
   1733 		    __func__);
   1734 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
   1735 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
   1736 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
   1737 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
   1738 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
   1739 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
   1740 		    __func__);
   1741 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
   1742 } /* ixv_initialize_rss_mapping */
   1743 
   1744 
   1745 /************************************************************************
   1746  * ixv_initialize_receive_units - Setup receive registers and features.
   1747  ************************************************************************/
   1748 static void
   1749 ixv_initialize_receive_units(struct adapter *adapter)
   1750 {
   1751 	struct	rx_ring	*rxr = adapter->rx_rings;
   1752 	struct ixgbe_hw	*hw = &adapter->hw;
   1753 	struct ifnet	*ifp = adapter->ifp;
   1754 	u32		bufsz, rxcsum, psrtype;
   1755 
   1756 	if (ifp->if_mtu > ETHERMTU)
   1757 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1758 	else
   1759 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1760 
   1761 	psrtype = IXGBE_PSRTYPE_TCPHDR
   1762 	        | IXGBE_PSRTYPE_UDPHDR
   1763 	        | IXGBE_PSRTYPE_IPV4HDR
   1764 	        | IXGBE_PSRTYPE_IPV6HDR
   1765 	        | IXGBE_PSRTYPE_L2HDR;
   1766 
   1767 	if (adapter->num_queues > 1)
   1768 		psrtype |= 1 << 29;
   1769 
   1770 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1771 
   1772 	/* Tell PF our max_frame size */
   1773 	if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
   1774 		device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
   1775 	}
   1776 
   1777 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1778 		u64 rdba = rxr->rxdma.dma_paddr;
   1779 		u32 reg, rxdctl;
   1780 		int j = rxr->me;
   1781 
   1782 		/* Disable the queue */
   1783 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
   1784 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1785 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
   1786 		for (int k = 0; k < 10; k++) {
   1787 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
   1788 			    IXGBE_RXDCTL_ENABLE)
   1789 				msec_delay(1);
   1790 			else
   1791 				break;
   1792 		}
   1793 		wmb();
   1794 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1795 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
   1796 		    (rdba & 0x00000000ffffffffULL));
   1797 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
   1798 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
   1799 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1800 
   1801 		/* Reset the ring indices */
   1802 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1803 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1804 
   1805 		/* Set up the SRRCTL register */
   1806 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
   1807 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1808 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1809 		reg |= bufsz;
   1810 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1811 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
   1812 
   1813 		/* Capture Rx Tail index */
   1814 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1815 
   1816 		/* Do the queue enabling last */
   1817 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1818 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
   1819 		for (int k = 0; k < 10; k++) {
   1820 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
   1821 			    IXGBE_RXDCTL_ENABLE)
   1822 				break;
   1823 			msec_delay(1);
   1824 		}
   1825 		wmb();
   1826 
   1827 		/* Set the Tail Pointer */
   1828 #ifdef DEV_NETMAP
   1829 		/*
   1830 		 * In netmap mode, we must preserve the buffers made
   1831 		 * available to userspace before the if_init()
   1832 		 * (this is true by default on the TX side, because
   1833 		 * init makes all buffers available to userspace).
   1834 		 *
   1835 		 * netmap_reset() and the device specific routines
   1836 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1837 		 * buffers at the end of the NIC ring, so here we
   1838 		 * must set the RDT (tail) register to make sure
   1839 		 * they are not overwritten.
   1840 		 *
   1841 		 * In this driver the NIC ring starts at RDH = 0,
   1842 		 * RDT points to the last slot available for reception (?),
   1843 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1844 		 */
   1845 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   1846 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   1847 			struct netmap_adapter *na = NA(adapter->ifp);
   1848 			struct netmap_kring *kring = &na->rx_rings[i];
   1849 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1850 
   1851 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1852 		} else
   1853 #endif /* DEV_NETMAP */
   1854 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1855 			    adapter->num_rx_desc - 1);
   1856 	}
   1857 
   1858 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1859 
   1860 	ixv_initialize_rss_mapping(adapter);
   1861 
   1862 	if (adapter->num_queues > 1) {
   1863 		/* RSS and RX IPP Checksum are mutually exclusive */
   1864 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1865 	}
   1866 
   1867 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1868 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1869 
   1870 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1871 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1872 
   1873 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1874 } /* ixv_initialize_receive_units */
   1875 
   1876 /************************************************************************
   1877  * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function
   1878  *
   1879  *   Retrieves the TDH value from the hardware
   1880  ************************************************************************/
   1881 static int
   1882 ixv_sysctl_tdh_handler(SYSCTLFN_ARGS)
   1883 {
   1884 	struct sysctlnode node = *rnode;
   1885 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   1886 	uint32_t val;
   1887 
   1888 	if (!txr)
   1889 		return (0);
   1890 
   1891 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDH(txr->me));
   1892 	node.sysctl_data = &val;
   1893 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   1894 } /* ixv_sysctl_tdh_handler */
   1895 
   1896 /************************************************************************
   1897  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   1898  *
   1899  *   Retrieves the TDT value from the hardware
   1900  ************************************************************************/
   1901 static int
   1902 ixv_sysctl_tdt_handler(SYSCTLFN_ARGS)
   1903 {
   1904 	struct sysctlnode node = *rnode;
   1905 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   1906 	uint32_t val;
   1907 
   1908 	if (!txr)
   1909 		return (0);
   1910 
   1911 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDT(txr->me));
   1912 	node.sysctl_data = &val;
   1913 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   1914 } /* ixv_sysctl_tdt_handler */
   1915 
   1916 /************************************************************************
   1917  * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function
   1918  *
   1919  *   Retrieves the RDH value from the hardware
   1920  ************************************************************************/
   1921 static int
   1922 ixv_sysctl_rdh_handler(SYSCTLFN_ARGS)
   1923 {
   1924 	struct sysctlnode node = *rnode;
   1925 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   1926 	uint32_t val;
   1927 
   1928 	if (!rxr)
   1929 		return (0);
   1930 
   1931 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDH(rxr->me));
   1932 	node.sysctl_data = &val;
   1933 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   1934 } /* ixv_sysctl_rdh_handler */
   1935 
   1936 /************************************************************************
   1937  * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function
   1938  *
   1939  *   Retrieves the RDT value from the hardware
   1940  ************************************************************************/
   1941 static int
   1942 ixv_sysctl_rdt_handler(SYSCTLFN_ARGS)
   1943 {
   1944 	struct sysctlnode node = *rnode;
   1945 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   1946 	uint32_t val;
   1947 
   1948 	if (!rxr)
   1949 		return (0);
   1950 
   1951 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDT(rxr->me));
   1952 	node.sysctl_data = &val;
   1953 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   1954 } /* ixv_sysctl_rdt_handler */
   1955 
   1956 /************************************************************************
   1957  * ixv_setup_vlan_support
   1958  ************************************************************************/
   1959 static void
   1960 ixv_setup_vlan_support(struct adapter *adapter)
   1961 {
   1962 	struct ethercom *ec = &adapter->osdep.ec;
   1963 	struct ixgbe_hw *hw = &adapter->hw;
   1964 	struct rx_ring  *rxr;
   1965 	u32		ctrl, vid, vfta, retry;
   1966 
   1967 	/*
   1968 	 * We get here thru init_locked, meaning
   1969 	 * a soft reset, this has already cleared
   1970 	 * the VFTA and other state, so if there
   1971 	 * have been no vlan's registered do nothing.
   1972 	 */
   1973 	if (!VLAN_ATTACHED(ec))
   1974 		return;
   1975 
   1976 	/* Enable the queues */
   1977 	for (int i = 0; i < adapter->num_queues; i++) {
   1978 		rxr = &adapter->rx_rings[i];
   1979 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
   1980 		ctrl |= IXGBE_RXDCTL_VME;
   1981 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
   1982 		/*
   1983 		 * Let Rx path know that it needs to store VLAN tag
   1984 		 * as part of extra mbuf info.
   1985 		 */
   1986 		rxr->vtag_strip = TRUE;
   1987 	}
   1988 
   1989 #if 1
   1990 	/* XXX dirty hack. Enable all VIDs */
   1991 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
   1992 	  adapter->shadow_vfta[i] = 0xffffffff;
   1993 #endif
   1994 	/*
   1995 	 * A soft reset zero's out the VFTA, so
   1996 	 * we need to repopulate it now.
   1997 	 */
   1998 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   1999 		if (adapter->shadow_vfta[i] == 0)
   2000 			continue;
   2001 		vfta = adapter->shadow_vfta[i];
   2002 		/*
   2003 		 * Reconstruct the vlan id's
   2004 		 * based on the bits set in each
   2005 		 * of the array ints.
   2006 		 */
   2007 		for (int j = 0; j < 32; j++) {
   2008 			retry = 0;
   2009 			if ((vfta & (1 << j)) == 0)
   2010 				continue;
   2011 			vid = (i * 32) + j;
   2012 			/* Call the shared code mailbox routine */
   2013 			while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
   2014 				if (++retry > 5)
   2015 					break;
   2016 			}
   2017 		}
   2018 	}
   2019 } /* ixv_setup_vlan_support */
   2020 
   2021 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   2022 /************************************************************************
   2023  * ixv_register_vlan
   2024  *
   2025  *   Run via a vlan config EVENT, it enables us to use the
   2026  *   HW Filter table since we can get the vlan id. This just
   2027  *   creates the entry in the soft version of the VFTA, init
   2028  *   will repopulate the real table.
   2029  ************************************************************************/
   2030 static void
   2031 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2032 {
   2033 	struct adapter	*adapter = ifp->if_softc;
   2034 	u16		index, bit;
   2035 
   2036 	if (ifp->if_softc != arg) /* Not our event */
   2037 		return;
   2038 
   2039 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   2040 		return;
   2041 
   2042 	IXGBE_CORE_LOCK(adapter);
   2043 	index = (vtag >> 5) & 0x7F;
   2044 	bit = vtag & 0x1F;
   2045 	adapter->shadow_vfta[index] |= (1 << bit);
   2046 	/* Re-init to load the changes */
   2047 	ixv_init_locked(adapter);
   2048 	IXGBE_CORE_UNLOCK(adapter);
   2049 } /* ixv_register_vlan */
   2050 
   2051 /************************************************************************
   2052  * ixv_unregister_vlan
   2053  *
   2054  *   Run via a vlan unconfig EVENT, remove our entry
   2055  *   in the soft vfta.
   2056  ************************************************************************/
   2057 static void
   2058 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2059 {
   2060 	struct adapter	*adapter = ifp->if_softc;
   2061 	u16		index, bit;
   2062 
   2063 	if (ifp->if_softc !=  arg)
   2064 		return;
   2065 
   2066 	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
   2067 		return;
   2068 
   2069 	IXGBE_CORE_LOCK(adapter);
   2070 	index = (vtag >> 5) & 0x7F;
   2071 	bit = vtag & 0x1F;
   2072 	adapter->shadow_vfta[index] &= ~(1 << bit);
   2073 	/* Re-init to load the changes */
   2074 	ixv_init_locked(adapter);
   2075 	IXGBE_CORE_UNLOCK(adapter);
   2076 } /* ixv_unregister_vlan */
   2077 #endif
   2078 
   2079 /************************************************************************
   2080  * ixv_enable_intr
   2081  ************************************************************************/
   2082 static void
   2083 ixv_enable_intr(struct adapter *adapter)
   2084 {
   2085 	struct ixgbe_hw *hw = &adapter->hw;
   2086 	struct ix_queue *que = adapter->queues;
   2087 	u32             mask;
   2088 	int i;
   2089 
   2090 	/* For VTEIAC */
   2091 	mask = (1 << adapter->vector);
   2092 	for (i = 0; i < adapter->num_queues; i++, que++)
   2093 		mask |= (1 << que->msix);
   2094 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   2095 
   2096 	/* For VTEIMS */
   2097 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
   2098 	que = adapter->queues;
   2099 	for (i = 0; i < adapter->num_queues; i++, que++)
   2100 		ixv_enable_queue(adapter, que->msix);
   2101 
   2102 	IXGBE_WRITE_FLUSH(hw);
   2103 } /* ixv_enable_intr */
   2104 
   2105 /************************************************************************
   2106  * ixv_disable_intr
   2107  ************************************************************************/
   2108 static void
   2109 ixv_disable_intr(struct adapter *adapter)
   2110 {
   2111 	struct ix_queue	*que = adapter->queues;
   2112 
   2113 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   2114 
   2115 	/* disable interrupts other than queues */
   2116 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, adapter->vector);
   2117 
   2118 	for (int i = 0; i < adapter->num_queues; i++, que++)
   2119 		ixv_disable_queue(adapter, que->msix);
   2120 
   2121 	IXGBE_WRITE_FLUSH(&adapter->hw);
   2122 } /* ixv_disable_intr */
   2123 
   2124 /************************************************************************
   2125  * ixv_set_ivar
   2126  *
   2127  *   Setup the correct IVAR register for a particular MSI-X interrupt
   2128  *    - entry is the register array entry
   2129  *    - vector is the MSI-X vector for this queue
   2130  *    - type is RX/TX/MISC
   2131  ************************************************************************/
   2132 static void
   2133 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   2134 {
   2135 	struct ixgbe_hw *hw = &adapter->hw;
   2136 	u32             ivar, index;
   2137 
   2138 	vector |= IXGBE_IVAR_ALLOC_VAL;
   2139 
   2140 	if (type == -1) { /* MISC IVAR */
   2141 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   2142 		ivar &= ~0xFF;
   2143 		ivar |= vector;
   2144 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   2145 	} else {          /* RX/TX IVARS */
   2146 		index = (16 * (entry & 1)) + (8 * type);
   2147 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   2148 		ivar &= ~(0xFF << index);
   2149 		ivar |= (vector << index);
   2150 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   2151 	}
   2152 } /* ixv_set_ivar */
   2153 
   2154 /************************************************************************
   2155  * ixv_configure_ivars
   2156  ************************************************************************/
   2157 static void
   2158 ixv_configure_ivars(struct adapter *adapter)
   2159 {
   2160 	struct ix_queue *que = adapter->queues;
   2161 
   2162 	/* XXX We should sync EITR value calculation with ixgbe.c? */
   2163 
   2164 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2165 		/* First the RX queue entry */
   2166 		ixv_set_ivar(adapter, i, que->msix, 0);
   2167 		/* ... and the TX */
   2168 		ixv_set_ivar(adapter, i, que->msix, 1);
   2169 		/* Set an initial value in EITR */
   2170 		ixv_eitr_write(adapter, que->msix, IXGBE_EITR_DEFAULT);
   2171 	}
   2172 
   2173 	/* For the mailbox interrupt */
   2174 	ixv_set_ivar(adapter, 1, adapter->vector, -1);
   2175 } /* ixv_configure_ivars */
   2176 
   2177 
   2178 /************************************************************************
   2179  * ixv_save_stats
   2180  *
   2181  *   The VF stats registers never have a truly virgin
   2182  *   starting point, so this routine tries to make an
   2183  *   artificial one, marking ground zero on attach as
   2184  *   it were.
   2185  ************************************************************************/
   2186 static void
   2187 ixv_save_stats(struct adapter *adapter)
   2188 {
   2189 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2190 
   2191 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   2192 		stats->saved_reset_vfgprc +=
   2193 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   2194 		stats->saved_reset_vfgptc +=
   2195 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   2196 		stats->saved_reset_vfgorc +=
   2197 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   2198 		stats->saved_reset_vfgotc +=
   2199 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   2200 		stats->saved_reset_vfmprc +=
   2201 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   2202 	}
   2203 } /* ixv_save_stats */
   2204 
   2205 /************************************************************************
   2206  * ixv_init_stats
   2207  ************************************************************************/
   2208 static void
   2209 ixv_init_stats(struct adapter *adapter)
   2210 {
   2211 	struct ixgbe_hw *hw = &adapter->hw;
   2212 
   2213 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   2214 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   2215 	adapter->stats.vf.last_vfgorc |=
   2216 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   2217 
   2218 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   2219 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   2220 	adapter->stats.vf.last_vfgotc |=
   2221 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   2222 
   2223 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   2224 
   2225 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   2226 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   2227 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   2228 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   2229 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   2230 } /* ixv_init_stats */
   2231 
   2232 #define UPDATE_STAT_32(reg, last, count)		\
   2233 {                                                       \
   2234 	u32 current = IXGBE_READ_REG(hw, (reg));	\
   2235 	if (current < (last))				\
   2236 		count.ev_count += 0x100000000LL;	\
   2237 	(last) = current;				\
   2238 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   2239 	count.ev_count |= current;			\
   2240 }
   2241 
   2242 #define UPDATE_STAT_36(lsb, msb, last, count)           \
   2243 {                                                       \
   2244 	u64 cur_lsb = IXGBE_READ_REG(hw, (lsb));	\
   2245 	u64 cur_msb = IXGBE_READ_REG(hw, (msb));	\
   2246 	u64 current = ((cur_msb << 32) | cur_lsb);      \
   2247 	if (current < (last))				\
   2248 		count.ev_count += 0x1000000000LL;	\
   2249 	(last) = current;				\
   2250 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   2251 	count.ev_count |= current;			\
   2252 }
   2253 
   2254 /************************************************************************
   2255  * ixv_update_stats - Update the board statistics counters.
   2256  ************************************************************************/
   2257 void
   2258 ixv_update_stats(struct adapter *adapter)
   2259 {
   2260 	struct ixgbe_hw *hw = &adapter->hw;
   2261 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2262 
   2263 	UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
   2264 	UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
   2265 	UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
   2266 	    stats->vfgorc);
   2267 	UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
   2268 	    stats->vfgotc);
   2269 	UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
   2270 
   2271 	/* Fill out the OS statistics structure */
   2272 	/*
   2273 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   2274 	 * adapter->stats counters. It's required to make ifconfig -z
   2275 	 * (SOICZIFDATA) work.
   2276 	 */
   2277 } /* ixv_update_stats */
   2278 
   2279 /************************************************************************
   2280  * ixv_sysctl_interrupt_rate_handler
   2281  ************************************************************************/
   2282 static int
   2283 ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   2284 {
   2285 	struct sysctlnode node = *rnode;
   2286 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   2287 	struct adapter  *adapter = que->adapter;
   2288 	uint32_t reg, usec, rate;
   2289 	int error;
   2290 
   2291 	if (que == NULL)
   2292 		return 0;
   2293 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix));
   2294 	usec = ((reg & 0x0FF8) >> 3);
   2295 	if (usec > 0)
   2296 		rate = 500000 / usec;
   2297 	else
   2298 		rate = 0;
   2299 	node.sysctl_data = &rate;
   2300 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2301 	if (error || newp == NULL)
   2302 		return error;
   2303 	reg &= ~0xfff; /* default, no limitation */
   2304 	if (rate > 0 && rate < 500000) {
   2305 		if (rate < 1000)
   2306 			rate = 1000;
   2307 		reg |= ((4000000/rate) & 0xff8);
   2308 		/*
   2309 		 * When RSC is used, ITR interval must be larger than
   2310 		 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
   2311 		 * The minimum value is always greater than 2us on 100M
   2312 		 * (and 10M?(not documented)), but it's not on 1G and higher.
   2313 		 */
   2314 		if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   2315 		    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   2316 			if ((adapter->num_queues > 1)
   2317 			    && (reg < IXGBE_MIN_RSC_EITR_10G1G))
   2318 				return EINVAL;
   2319 		}
   2320 		ixv_max_interrupt_rate = rate;
   2321 	} else
   2322 		ixv_max_interrupt_rate = 0;
   2323 	ixv_eitr_write(adapter, que->msix, reg);
   2324 
   2325 	return (0);
   2326 } /* ixv_sysctl_interrupt_rate_handler */
   2327 
   2328 const struct sysctlnode *
   2329 ixv_sysctl_instance(struct adapter *adapter)
   2330 {
   2331 	const char *dvname;
   2332 	struct sysctllog **log;
   2333 	int rc;
   2334 	const struct sysctlnode *rnode;
   2335 
   2336 	log = &adapter->sysctllog;
   2337 	dvname = device_xname(adapter->dev);
   2338 
   2339 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2340 	    0, CTLTYPE_NODE, dvname,
   2341 	    SYSCTL_DESCR("ixv information and settings"),
   2342 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2343 		goto err;
   2344 
   2345 	return rnode;
   2346 err:
   2347 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2348 	return NULL;
   2349 }
   2350 
   2351 static void
   2352 ixv_add_device_sysctls(struct adapter *adapter)
   2353 {
   2354 	struct sysctllog **log;
   2355 	const struct sysctlnode *rnode, *cnode;
   2356 	device_t dev;
   2357 
   2358 	dev = adapter->dev;
   2359 	log = &adapter->sysctllog;
   2360 
   2361 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2362 		aprint_error_dev(dev, "could not create sysctl root\n");
   2363 		return;
   2364 	}
   2365 
   2366 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2367 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2368 	    "debug", SYSCTL_DESCR("Debug Info"),
   2369 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   2370 		aprint_error_dev(dev, "could not create sysctl\n");
   2371 
   2372 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2373 	    CTLFLAG_READWRITE, CTLTYPE_BOOL,
   2374 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2375 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2376 		aprint_error_dev(dev, "could not create sysctl\n");
   2377 
   2378 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2379 	    CTLFLAG_READWRITE, CTLTYPE_BOOL,
   2380 	    "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   2381 		NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
   2382 		aprint_error_dev(dev, "could not create sysctl\n");
   2383 }
   2384 
   2385 /************************************************************************
   2386  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
   2387  ************************************************************************/
   2388 static void
   2389 ixv_add_stats_sysctls(struct adapter *adapter)
   2390 {
   2391 	device_t                dev = adapter->dev;
   2392 	struct tx_ring          *txr = adapter->tx_rings;
   2393 	struct rx_ring          *rxr = adapter->rx_rings;
   2394 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2395 	struct ixgbe_hw *hw = &adapter->hw;
   2396 	const struct sysctlnode *rnode, *cnode;
   2397 	struct sysctllog **log = &adapter->sysctllog;
   2398 	const char *xname = device_xname(dev);
   2399 
   2400 	/* Driver Statistics */
   2401 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   2402 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   2403 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2404 	    NULL, xname, "m_defrag() failed");
   2405 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   2406 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   2407 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   2408 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   2409 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   2410 	    NULL, xname, "Driver tx dma hard fail other");
   2411 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   2412 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   2413 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   2414 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   2415 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2416 	    NULL, xname, "Watchdog timeouts");
   2417 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   2418 	    NULL, xname, "TSO errors");
   2419 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   2420 	    NULL, xname, "Link MSI-X IRQ Handled");
   2421 
   2422 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2423 		snprintf(adapter->queues[i].evnamebuf,
   2424 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   2425 		    xname, i);
   2426 		snprintf(adapter->queues[i].namebuf,
   2427 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   2428 
   2429 		if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2430 			aprint_error_dev(dev, "could not create sysctl root\n");
   2431 			break;
   2432 		}
   2433 
   2434 		if (sysctl_createv(log, 0, &rnode, &rnode,
   2435 		    0, CTLTYPE_NODE,
   2436 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   2437 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   2438 			break;
   2439 
   2440 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2441 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   2442 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   2443 		    ixv_sysctl_interrupt_rate_handler, 0,
   2444 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   2445 			break;
   2446 
   2447 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2448 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2449 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   2450 		    ixv_sysctl_tdh_handler, 0, (void *)txr,
   2451 		    0, CTL_CREATE, CTL_EOL) != 0)
   2452 			break;
   2453 
   2454 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2455 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2456 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   2457 		    ixv_sysctl_tdt_handler, 0, (void *)txr,
   2458 		    0, CTL_CREATE, CTL_EOL) != 0)
   2459 			break;
   2460 
   2461 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   2462 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   2463 		evcnt_attach_dynamic(&adapter->queues[i].handleq,
   2464 		    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   2465 		    "Handled queue in softint");
   2466 		evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
   2467 		    NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
   2468 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   2469 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   2470 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   2471 		    NULL, adapter->queues[i].evnamebuf,
   2472 		    "Queue No Descriptor Available");
   2473 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   2474 		    NULL, adapter->queues[i].evnamebuf,
   2475 		    "Queue Packets Transmitted");
   2476 #ifndef IXGBE_LEGACY_TX
   2477 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   2478 		    NULL, adapter->queues[i].evnamebuf,
   2479 		    "Packets dropped in pcq");
   2480 #endif
   2481 
   2482 #ifdef LRO
   2483 		struct lro_ctrl *lro = &rxr->lro;
   2484 #endif /* LRO */
   2485 
   2486 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2487 		    CTLFLAG_READONLY,
   2488 		    CTLTYPE_INT,
   2489 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   2490 		    ixv_sysctl_rdh_handler, 0, (void *)rxr, 0,
   2491 		    CTL_CREATE, CTL_EOL) != 0)
   2492 			break;
   2493 
   2494 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2495 		    CTLFLAG_READONLY,
   2496 		    CTLTYPE_INT,
   2497 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   2498 		    ixv_sysctl_rdt_handler, 0, (void *)rxr, 0,
   2499 		    CTL_CREATE, CTL_EOL) != 0)
   2500 			break;
   2501 
   2502 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   2503 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   2504 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   2505 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   2506 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   2507 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   2508 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   2509 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   2510 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   2511 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   2512 #ifdef LRO
   2513 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   2514 				CTLFLAG_RD, &lro->lro_queued, 0,
   2515 				"LRO Queued");
   2516 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   2517 				CTLFLAG_RD, &lro->lro_flushed, 0,
   2518 				"LRO Flushed");
   2519 #endif /* LRO */
   2520 	}
   2521 
   2522 	/* MAC stats get their own sub node */
   2523 
   2524 	snprintf(stats->namebuf,
   2525 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   2526 
   2527 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   2528 	    stats->namebuf, "rx csum offload - IP");
   2529 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   2530 	    stats->namebuf, "rx csum offload - L4");
   2531 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   2532 	    stats->namebuf, "rx csum offload - IP bad");
   2533 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   2534 	    stats->namebuf, "rx csum offload - L4 bad");
   2535 
   2536 	/* Packet Reception Stats */
   2537 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2538 	    xname, "Good Packets Received");
   2539 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2540 	    xname, "Good Octets Received");
   2541 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2542 	    xname, "Multicast Packets Received");
   2543 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2544 	    xname, "Good Packets Transmitted");
   2545 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2546 	    xname, "Good Octets Transmitted");
   2547 
   2548 	/* Mailbox Stats */
   2549 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
   2550 	    xname, "message TXs");
   2551 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
   2552 	    xname, "message RXs");
   2553 	evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
   2554 	    xname, "ACKs");
   2555 	evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
   2556 	    xname, "REQs");
   2557 	evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
   2558 	    xname, "RSTs");
   2559 
   2560 } /* ixv_add_stats_sysctls */
   2561 
   2562 /************************************************************************
   2563  * ixv_set_sysctl_value
   2564  ************************************************************************/
   2565 static void
   2566 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2567 	const char *description, int *limit, int value)
   2568 {
   2569 	device_t dev =  adapter->dev;
   2570 	struct sysctllog **log;
   2571 	const struct sysctlnode *rnode, *cnode;
   2572 
   2573 	log = &adapter->sysctllog;
   2574 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2575 		aprint_error_dev(dev, "could not create sysctl root\n");
   2576 		return;
   2577 	}
   2578 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2579 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2580 	    name, SYSCTL_DESCR(description),
   2581 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2582 		aprint_error_dev(dev, "could not create sysctl\n");
   2583 	*limit = value;
   2584 } /* ixv_set_sysctl_value */
   2585 
   2586 /************************************************************************
   2587  * ixv_print_debug_info
   2588  *
   2589  *   Called only when em_display_debug_stats is enabled.
   2590  *   Provides a way to take a look at important statistics
   2591  *   maintained by the driver and hardware.
   2592  ************************************************************************/
   2593 static void
   2594 ixv_print_debug_info(struct adapter *adapter)
   2595 {
   2596         device_t        dev = adapter->dev;
   2597         struct ixgbe_hw *hw = &adapter->hw;
   2598         struct ix_queue *que = adapter->queues;
   2599         struct rx_ring  *rxr;
   2600         struct tx_ring  *txr;
   2601 #ifdef LRO
   2602         struct lro_ctrl *lro;
   2603 #endif /* LRO */
   2604 
   2605 	device_printf(dev, "Error Byte Count = %u \n",
   2606 	    IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2607 
   2608 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2609 		txr = que->txr;
   2610 		rxr = que->rxr;
   2611 #ifdef LRO
   2612 		lro = &rxr->lro;
   2613 #endif /* LRO */
   2614 		device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
   2615 		    que->msix, (long)que->irqs.ev_count);
   2616 		device_printf(dev, "RX(%d) Packets Received: %lld\n",
   2617 		    rxr->me, (long long)rxr->rx_packets.ev_count);
   2618 		device_printf(dev, "RX(%d) Bytes Received: %lu\n",
   2619 		    rxr->me, (long)rxr->rx_bytes.ev_count);
   2620 #ifdef LRO
   2621 		device_printf(dev, "RX(%d) LRO Queued= %lld\n",
   2622 		    rxr->me, (long long)lro->lro_queued);
   2623 		device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
   2624 		    rxr->me, (long long)lro->lro_flushed);
   2625 #endif /* LRO */
   2626 		device_printf(dev, "TX(%d) Packets Sent: %lu\n",
   2627 		    txr->me, (long)txr->total_packets.ev_count);
   2628 		device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
   2629 		    txr->me, (long)txr->no_desc_avail.ev_count);
   2630 	}
   2631 
   2632 	device_printf(dev, "MBX IRQ Handled: %lu\n",
   2633 	    (long)adapter->link_irq.ev_count);
   2634 } /* ixv_print_debug_info */
   2635 
   2636 /************************************************************************
   2637  * ixv_sysctl_debug
   2638  ************************************************************************/
   2639 static int
   2640 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2641 {
   2642 	struct sysctlnode node = *rnode;
   2643 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   2644 	int            error, result;
   2645 
   2646 	node.sysctl_data = &result;
   2647 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2648 
   2649 	if (error || newp == NULL)
   2650 		return error;
   2651 
   2652 	if (result == 1)
   2653 		ixv_print_debug_info(adapter);
   2654 
   2655 	return 0;
   2656 } /* ixv_sysctl_debug */
   2657 
   2658 /************************************************************************
   2659  * ixv_init_device_features
   2660  ************************************************************************/
   2661 static void
   2662 ixv_init_device_features(struct adapter *adapter)
   2663 {
   2664 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   2665 	                  | IXGBE_FEATURE_VF
   2666 	                  | IXGBE_FEATURE_RSS
   2667 	                  | IXGBE_FEATURE_LEGACY_TX;
   2668 
   2669 	/* A tad short on feature flags for VFs, atm. */
   2670 	switch (adapter->hw.mac.type) {
   2671 	case ixgbe_mac_82599_vf:
   2672 		break;
   2673 	case ixgbe_mac_X540_vf:
   2674 		break;
   2675 	case ixgbe_mac_X550_vf:
   2676 	case ixgbe_mac_X550EM_x_vf:
   2677 	case ixgbe_mac_X550EM_a_vf:
   2678 		adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
   2679 		break;
   2680 	default:
   2681 		break;
   2682 	}
   2683 
   2684 	/* Enabled by default... */
   2685 	/* Is a virtual function (VF) */
   2686 	if (adapter->feat_cap & IXGBE_FEATURE_VF)
   2687 		adapter->feat_en |= IXGBE_FEATURE_VF;
   2688 	/* Netmap */
   2689 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   2690 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   2691 	/* Receive-Side Scaling (RSS) */
   2692 	if (adapter->feat_cap & IXGBE_FEATURE_RSS)
   2693 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   2694 	/* Needs advanced context descriptor regardless of offloads req'd */
   2695 	if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
   2696 		adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
   2697 
   2698 	/* Enabled via sysctl... */
   2699 	/* Legacy (single queue) transmit */
   2700 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   2701 	    ixv_enable_legacy_tx)
   2702 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   2703 } /* ixv_init_device_features */
   2704 
   2705 /************************************************************************
   2706  * ixv_shutdown - Shutdown entry point
   2707  ************************************************************************/
   2708 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   2709 static int
   2710 ixv_shutdown(device_t dev)
   2711 {
   2712 	struct adapter *adapter = device_private(dev);
   2713 	IXGBE_CORE_LOCK(adapter);
   2714 	ixv_stop(adapter);
   2715 	IXGBE_CORE_UNLOCK(adapter);
   2716 
   2717 	return (0);
   2718 } /* ixv_shutdown */
   2719 #endif
   2720 
   2721 static int
   2722 ixv_ifflags_cb(struct ethercom *ec)
   2723 {
   2724 	struct ifnet *ifp = &ec->ec_if;
   2725 	struct adapter *adapter = ifp->if_softc;
   2726 	int change, rc = 0;
   2727 
   2728 	IXGBE_CORE_LOCK(adapter);
   2729 
   2730 	change = ifp->if_flags ^ adapter->if_flags;
   2731 	if (change != 0)
   2732 		adapter->if_flags = ifp->if_flags;
   2733 
   2734 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   2735 		rc = ENETRESET;
   2736 
   2737 	/* Set up VLAN support and filter */
   2738 	ixv_setup_vlan_support(adapter);
   2739 
   2740 	IXGBE_CORE_UNLOCK(adapter);
   2741 
   2742 	return rc;
   2743 }
   2744 
   2745 
   2746 /************************************************************************
   2747  * ixv_ioctl - Ioctl entry point
   2748  *
   2749  *   Called when the user wants to configure the interface.
   2750  *
   2751  *   return 0 on success, positive on failure
   2752  ************************************************************************/
   2753 static int
   2754 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
   2755 {
   2756 	struct adapter	*adapter = ifp->if_softc;
   2757 	struct ifcapreq *ifcr = data;
   2758 	struct ifreq	*ifr = data;
   2759 	int             error = 0;
   2760 	int l4csum_en;
   2761 	const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|
   2762 	     IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx;
   2763 
   2764 	switch (command) {
   2765 	case SIOCSIFFLAGS:
   2766 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   2767 		break;
   2768 	case SIOCADDMULTI:
   2769 	case SIOCDELMULTI:
   2770 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   2771 		break;
   2772 	case SIOCSIFMEDIA:
   2773 	case SIOCGIFMEDIA:
   2774 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   2775 		break;
   2776 	case SIOCSIFCAP:
   2777 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   2778 		break;
   2779 	case SIOCSIFMTU:
   2780 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   2781 		break;
   2782 	default:
   2783 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   2784 		break;
   2785 	}
   2786 
   2787 	switch (command) {
   2788 	case SIOCSIFMEDIA:
   2789 	case SIOCGIFMEDIA:
   2790 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   2791 	case SIOCSIFCAP:
   2792 		/* Layer-4 Rx checksum offload has to be turned on and
   2793 		 * off as a unit.
   2794 		 */
   2795 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   2796 		if (l4csum_en != l4csum && l4csum_en != 0)
   2797 			return EINVAL;
   2798 		/*FALLTHROUGH*/
   2799 	case SIOCADDMULTI:
   2800 	case SIOCDELMULTI:
   2801 	case SIOCSIFFLAGS:
   2802 	case SIOCSIFMTU:
   2803 	default:
   2804 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   2805 			return error;
   2806 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   2807 			;
   2808 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   2809 			IXGBE_CORE_LOCK(adapter);
   2810 			ixv_init_locked(adapter);
   2811 			IXGBE_CORE_UNLOCK(adapter);
   2812 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   2813 			/*
   2814 			 * Multicast list has changed; set the hardware filter
   2815 			 * accordingly.
   2816 			 */
   2817 			IXGBE_CORE_LOCK(adapter);
   2818 			ixv_disable_intr(adapter);
   2819 			ixv_set_multi(adapter);
   2820 			ixv_enable_intr(adapter);
   2821 			IXGBE_CORE_UNLOCK(adapter);
   2822 		}
   2823 		return 0;
   2824 	}
   2825 } /* ixv_ioctl */
   2826 
   2827 /************************************************************************
   2828  * ixv_init
   2829  ************************************************************************/
   2830 static int
   2831 ixv_init(struct ifnet *ifp)
   2832 {
   2833 	struct adapter *adapter = ifp->if_softc;
   2834 
   2835 	IXGBE_CORE_LOCK(adapter);
   2836 	ixv_init_locked(adapter);
   2837 	IXGBE_CORE_UNLOCK(adapter);
   2838 
   2839 	return 0;
   2840 } /* ixv_init */
   2841 
   2842 /************************************************************************
   2843  * ixv_handle_que
   2844  ************************************************************************/
   2845 static void
   2846 ixv_handle_que(void *context)
   2847 {
   2848 	struct ix_queue *que = context;
   2849 	struct adapter  *adapter = que->adapter;
   2850 	struct tx_ring	*txr = que->txr;
   2851 	struct ifnet    *ifp = adapter->ifp;
   2852 	bool		more;
   2853 
   2854 	que->handleq.ev_count++;
   2855 
   2856 	if (ifp->if_flags & IFF_RUNNING) {
   2857 		more = ixgbe_rxeof(que);
   2858 		IXGBE_TX_LOCK(txr);
   2859 		more |= ixgbe_txeof(txr);
   2860 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   2861 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   2862 				ixgbe_mq_start_locked(ifp, txr);
   2863 		/* Only for queue 0 */
   2864 		/* NetBSD still needs this for CBQ */
   2865 		if ((&adapter->queues[0] == que)
   2866 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   2867 			ixgbe_legacy_start_locked(ifp, txr);
   2868 		IXGBE_TX_UNLOCK(txr);
   2869 		if (more) {
   2870 			que->req.ev_count++;
   2871 			if (adapter->txrx_use_workqueue) {
   2872 				/*
   2873 				 * "enqueued flag" is not required here
   2874 				 * the same as ixg(4). See ixgbe_msix_que().
   2875 				 */
   2876 				workqueue_enqueue(adapter->que_wq,
   2877 				    &que->wq_cookie, curcpu());
   2878 			} else
   2879 				  softint_schedule(que->que_si);
   2880 			return;
   2881 		}
   2882 	}
   2883 
   2884 	/* Re-enable this interrupt */
   2885 	ixv_enable_queue(adapter, que->msix);
   2886 
   2887 	return;
   2888 } /* ixv_handle_que */
   2889 
   2890 /************************************************************************
   2891  * ixv_handle_que_work
   2892  ************************************************************************/
   2893 static void
   2894 ixv_handle_que_work(struct work *wk, void *context)
   2895 {
   2896 	struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
   2897 
   2898 	/*
   2899 	 * "enqueued flag" is not required here the same as ixg(4).
   2900 	 * See ixgbe_msix_que().
   2901 	 */
   2902 	ixv_handle_que(que);
   2903 }
   2904 
   2905 /************************************************************************
   2906  * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
   2907  ************************************************************************/
   2908 static int
   2909 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   2910 {
   2911 	device_t	dev = adapter->dev;
   2912 	struct ix_queue *que = adapter->queues;
   2913 	struct tx_ring	*txr = adapter->tx_rings;
   2914 	int 		error, msix_ctrl, rid, vector = 0;
   2915 	pci_chipset_tag_t pc;
   2916 	pcitag_t	tag;
   2917 	char		intrbuf[PCI_INTRSTR_LEN];
   2918 	char		wqname[MAXCOMLEN];
   2919 	char		intr_xname[32];
   2920 	const char	*intrstr = NULL;
   2921 	kcpuset_t	*affinity;
   2922 	int		cpu_id = 0;
   2923 
   2924 	pc = adapter->osdep.pc;
   2925 	tag = adapter->osdep.tag;
   2926 
   2927 	adapter->osdep.nintrs = adapter->num_queues + 1;
   2928 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   2929 	    adapter->osdep.nintrs) != 0) {
   2930 		aprint_error_dev(dev,
   2931 		    "failed to allocate MSI-X interrupt\n");
   2932 		return (ENXIO);
   2933 	}
   2934 
   2935 	kcpuset_create(&affinity, false);
   2936 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   2937 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   2938 		    device_xname(dev), i);
   2939 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   2940 		    sizeof(intrbuf));
   2941 #ifdef IXGBE_MPSAFE
   2942 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   2943 		    true);
   2944 #endif
   2945 		/* Set the handler function */
   2946 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   2947 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   2948 		    intr_xname);
   2949 		if (que->res == NULL) {
   2950 			pci_intr_release(pc, adapter->osdep.intrs,
   2951 			    adapter->osdep.nintrs);
   2952 			aprint_error_dev(dev,
   2953 			    "Failed to register QUE handler\n");
   2954 			kcpuset_destroy(affinity);
   2955 			return (ENXIO);
   2956 		}
   2957 		que->msix = vector;
   2958         	adapter->active_queues |= (u64)(1 << que->msix);
   2959 
   2960 		cpu_id = i;
   2961 		/* Round-robin affinity */
   2962 		kcpuset_zero(affinity);
   2963 		kcpuset_set(affinity, cpu_id % ncpu);
   2964 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   2965 		    NULL);
   2966 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   2967 		    intrstr);
   2968 		if (error == 0)
   2969 			aprint_normal(", bound queue %d to cpu %d\n",
   2970 			    i, cpu_id % ncpu);
   2971 		else
   2972 			aprint_normal("\n");
   2973 
   2974 #ifndef IXGBE_LEGACY_TX
   2975 		txr->txr_si
   2976 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2977 			ixgbe_deferred_mq_start, txr);
   2978 #endif
   2979 		que->que_si
   2980 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2981 			ixv_handle_que, que);
   2982 		if (que->que_si == NULL) {
   2983 			aprint_error_dev(dev,
   2984 			    "could not establish software interrupt\n");
   2985 		}
   2986 	}
   2987 	snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
   2988 	error = workqueue_create(&adapter->txr_wq, wqname,
   2989 	    ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   2990 	    IXGBE_WORKQUEUE_FLAGS);
   2991 	if (error) {
   2992 		aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
   2993 	}
   2994 	adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
   2995 
   2996 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
   2997 	error = workqueue_create(&adapter->que_wq, wqname,
   2998 	    ixv_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   2999 	    IXGBE_WORKQUEUE_FLAGS);
   3000 	if (error) {
   3001 		aprint_error_dev(dev,
   3002 		    "couldn't create workqueue\n");
   3003 	}
   3004 
   3005 	/* and Mailbox */
   3006 	cpu_id++;
   3007 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   3008 	adapter->vector = vector;
   3009 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   3010 	    sizeof(intrbuf));
   3011 #ifdef IXGBE_MPSAFE
   3012 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   3013 	    true);
   3014 #endif
   3015 	/* Set the mbx handler function */
   3016 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   3017 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   3018 	    intr_xname);
   3019 	if (adapter->osdep.ihs[vector] == NULL) {
   3020 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   3021 		kcpuset_destroy(affinity);
   3022 		return (ENXIO);
   3023 	}
   3024 	/* Round-robin affinity */
   3025 	kcpuset_zero(affinity);
   3026 	kcpuset_set(affinity, cpu_id % ncpu);
   3027 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,NULL);
   3028 
   3029 	aprint_normal_dev(dev,
   3030 	    "for link, interrupting at %s", intrstr);
   3031 	if (error == 0)
   3032 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   3033 	else
   3034 		aprint_normal("\n");
   3035 
   3036 	/* Tasklets for Mailbox */
   3037 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   3038 	    ixv_handle_link, adapter);
   3039 	/*
   3040 	 * Due to a broken design QEMU will fail to properly
   3041 	 * enable the guest for MSI-X unless the vectors in
   3042 	 * the table are all set up, so we must rewrite the
   3043 	 * ENABLE in the MSI-X control register again at this
   3044 	 * point to cause it to successfully initialize us.
   3045 	 */
   3046 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   3047 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   3048 		rid += PCI_MSIX_CTL;
   3049 		msix_ctrl = pci_conf_read(pc, tag, rid);
   3050 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   3051 		pci_conf_write(pc, tag, rid, msix_ctrl);
   3052 	}
   3053 
   3054 	kcpuset_destroy(affinity);
   3055 	return (0);
   3056 } /* ixv_allocate_msix */
   3057 
   3058 /************************************************************************
   3059  * ixv_configure_interrupts - Setup MSI-X resources
   3060  *
   3061  *   Note: The VF device MUST use MSI-X, there is no fallback.
   3062  ************************************************************************/
   3063 static int
   3064 ixv_configure_interrupts(struct adapter *adapter)
   3065 {
   3066 	device_t dev = adapter->dev;
   3067 	int want, queues, msgs;
   3068 
   3069 	/* Must have at least 2 MSI-X vectors */
   3070 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   3071 	if (msgs < 2) {
   3072 		aprint_error_dev(dev, "MSIX config error\n");
   3073 		return (ENXIO);
   3074 	}
   3075 	msgs = MIN(msgs, IXG_MAX_NINTR);
   3076 
   3077 	/* Figure out a reasonable auto config value */
   3078 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   3079 
   3080 	if (ixv_num_queues != 0)
   3081 		queues = ixv_num_queues;
   3082 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   3083 		queues = IXGBE_VF_MAX_TX_QUEUES;
   3084 
   3085 	/*
   3086 	 * Want vectors for the queues,
   3087 	 * plus an additional for mailbox.
   3088 	 */
   3089 	want = queues + 1;
   3090 	if (msgs >= want)
   3091 		msgs = want;
   3092 	else {
   3093                	aprint_error_dev(dev,
   3094 		    "MSI-X Configuration Problem, "
   3095 		    "%d vectors but %d queues wanted!\n",
   3096 		    msgs, want);
   3097 		return -1;
   3098 	}
   3099 
   3100 	adapter->msix_mem = (void *)1; /* XXX */
   3101 	aprint_normal_dev(dev,
   3102 	    "Using MSI-X interrupts with %d vectors\n", msgs);
   3103 	adapter->num_queues = queues;
   3104 
   3105 	return (0);
   3106 } /* ixv_configure_interrupts */
   3107 
   3108 
   3109 /************************************************************************
   3110  * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
   3111  *
   3112  *   Done outside of interrupt context since the driver might sleep
   3113  ************************************************************************/
   3114 static void
   3115 ixv_handle_link(void *context)
   3116 {
   3117 	struct adapter *adapter = context;
   3118 
   3119 	IXGBE_CORE_LOCK(adapter);
   3120 
   3121 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   3122 	    &adapter->link_up, FALSE);
   3123 	ixv_update_link_status(adapter);
   3124 
   3125 	IXGBE_CORE_UNLOCK(adapter);
   3126 } /* ixv_handle_link */
   3127 
   3128 /************************************************************************
   3129  * ixv_check_link - Used in the local timer to poll for link changes
   3130  ************************************************************************/
   3131 static void
   3132 ixv_check_link(struct adapter *adapter)
   3133 {
   3134 
   3135 	KASSERT(mutex_owned(&adapter->core_mtx));
   3136 
   3137 	adapter->hw.mac.get_link_status = TRUE;
   3138 
   3139 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   3140 	    &adapter->link_up, FALSE);
   3141 	ixv_update_link_status(adapter);
   3142 } /* ixv_check_link */
   3143