Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.113
      1 /*$NetBSD: ixv.c,v 1.113 2019/05/23 10:57:28 msaitoh Exp $*/
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/
     36 
     37 #ifdef _KERNEL_OPT
     38 #include "opt_inet.h"
     39 #include "opt_inet6.h"
     40 #include "opt_net_mpsafe.h"
     41 #endif
     42 
     43 #include "ixgbe.h"
     44 #include "vlan.h"
     45 
     46 /************************************************************************
     47  * Driver version
     48  ************************************************************************/
     49 static const char ixv_driver_version[] = "2.0.1-k";
     50 
     51 /************************************************************************
     52  * PCI Device ID Table
     53  *
     54  *   Used by probe to select devices to load on
     55  *   Last field stores an index into ixv_strings
     56  *   Last entry must be all 0s
     57  *
     58  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     59  ************************************************************************/
     60 static const ixgbe_vendor_info_t ixv_vendor_info_array[] =
     61 {
     62 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     63 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     64 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     65 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     66 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
     67 	/* required last entry */
     68 	{0, 0, 0, 0, 0}
     69 };
     70 
     71 /************************************************************************
     72  * Table of branding strings
     73  ************************************************************************/
     74 static const char *ixv_strings[] = {
     75 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     76 };
     77 
     78 /*********************************************************************
     79  *  Function prototypes
     80  *********************************************************************/
     81 static int      ixv_probe(device_t, cfdata_t, void *);
     82 static void	ixv_attach(device_t, device_t, void *);
     83 static int      ixv_detach(device_t, int);
     84 #if 0
     85 static int      ixv_shutdown(device_t);
     86 #endif
     87 static int	ixv_ifflags_cb(struct ethercom *);
     88 static int      ixv_ioctl(struct ifnet *, u_long, void *);
     89 static int	ixv_init(struct ifnet *);
     90 static void	ixv_init_locked(struct adapter *);
     91 static void	ixv_ifstop(struct ifnet *, int);
     92 static void     ixv_stop(void *);
     93 static void     ixv_init_device_features(struct adapter *);
     94 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
     95 static int      ixv_media_change(struct ifnet *);
     96 static int      ixv_allocate_pci_resources(struct adapter *,
     97 		    const struct pci_attach_args *);
     98 static int      ixv_allocate_msix(struct adapter *,
     99 		    const struct pci_attach_args *);
    100 static int      ixv_configure_interrupts(struct adapter *);
    101 static void	ixv_free_pci_resources(struct adapter *);
    102 static void     ixv_local_timer(void *);
    103 static void     ixv_local_timer_locked(void *);
    104 static int      ixv_setup_interface(device_t, struct adapter *);
    105 static int      ixv_negotiate_api(struct adapter *);
    106 
    107 static void     ixv_initialize_transmit_units(struct adapter *);
    108 static void     ixv_initialize_receive_units(struct adapter *);
    109 static void     ixv_initialize_rss_mapping(struct adapter *);
    110 static void     ixv_check_link(struct adapter *);
    111 
    112 static void     ixv_enable_intr(struct adapter *);
    113 static void     ixv_disable_intr(struct adapter *);
    114 static void     ixv_set_multi(struct adapter *);
    115 static void     ixv_update_link_status(struct adapter *);
    116 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    117 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    118 static void	ixv_configure_ivars(struct adapter *);
    119 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    120 static void	ixv_eitr_write(struct adapter *, uint32_t, uint32_t);
    121 
    122 static void	ixv_setup_vlan_support(struct adapter *);
    123 #if 0
    124 static void	ixv_register_vlan(void *, struct ifnet *, u16);
    125 static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
    126 #endif
    127 
    128 static void	ixv_add_device_sysctls(struct adapter *);
    129 static void	ixv_save_stats(struct adapter *);
    130 static void	ixv_init_stats(struct adapter *);
    131 static void	ixv_update_stats(struct adapter *);
    132 static void	ixv_add_stats_sysctls(struct adapter *);
    133 
    134 
    135 /* Sysctl handlers */
    136 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    137 		    const char *, int *, int);
    138 static int      ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    139 static int      ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
    140 static int      ixv_sysctl_rdh_handler(SYSCTLFN_PROTO);
    141 static int      ixv_sysctl_rdt_handler(SYSCTLFN_PROTO);
    142 static int      ixv_sysctl_tdt_handler(SYSCTLFN_PROTO);
    143 static int      ixv_sysctl_tdh_handler(SYSCTLFN_PROTO);
    144 
    145 /* The MSI-X Interrupt handlers */
    146 static int	ixv_msix_que(void *);
    147 static int	ixv_msix_mbx(void *);
    148 
    149 /* Deferred interrupt tasklets */
    150 static void	ixv_handle_que(void *);
    151 static void     ixv_handle_link(void *);
    152 
    153 /* Workqueue handler for deferred work */
    154 static void	ixv_handle_que_work(struct work *, void *);
    155 
    156 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    157 static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    158 
    159 /************************************************************************
    160  * FreeBSD Device Interface Entry Points
    161  ************************************************************************/
    162 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    163     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    164     DVF_DETACH_SHUTDOWN);
    165 
    166 #if 0
    167 static driver_t ixv_driver = {
    168 	"ixv", ixv_methods, sizeof(struct adapter),
    169 };
    170 
    171 devclass_t ixv_devclass;
    172 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    173 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    174 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    175 #endif
    176 
    177 /*
    178  * TUNEABLE PARAMETERS:
    179  */
    180 
    181 /* Number of Queues - do not exceed MSI-X vectors - 1 */
    182 static int ixv_num_queues = 0;
    183 #define	TUNABLE_INT(__x, __y)
    184 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    185 
    186 /*
    187  * AIM: Adaptive Interrupt Moderation
    188  * which means that the interrupt rate
    189  * is varied over time based on the
    190  * traffic for that interrupt vector
    191  */
    192 static bool ixv_enable_aim = false;
    193 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    194 
    195 static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    196 TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate);
    197 
    198 /* How many packets rxeof tries to clean at a time */
    199 static int ixv_rx_process_limit = 256;
    200 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    201 
    202 /* How many packets txeof tries to clean at a time */
    203 static int ixv_tx_process_limit = 256;
    204 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    205 
    206 /* Which packet processing uses workqueue or softint */
    207 static bool ixv_txrx_workqueue = false;
    208 
    209 /*
    210  * Number of TX descriptors per ring,
    211  * setting higher than RX as this seems
    212  * the better performing choice.
    213  */
    214 static int ixv_txd = PERFORM_TXD;
    215 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    216 
    217 /* Number of RX descriptors per ring */
    218 static int ixv_rxd = PERFORM_RXD;
    219 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    220 
    221 /* Legacy Transmit (single queue) */
    222 static int ixv_enable_legacy_tx = 0;
    223 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
    224 
    225 #ifdef NET_MPSAFE
    226 #define IXGBE_MPSAFE		1
    227 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    228 #define IXGBE_SOFTINFT_FLAGS	SOFTINT_MPSAFE
    229 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    230 #else
    231 #define IXGBE_CALLOUT_FLAGS	0
    232 #define IXGBE_SOFTINFT_FLAGS	0
    233 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU
    234 #endif
    235 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
    236 
    237 #if 0
    238 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
    239 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
    240 #endif
    241 
    242 /************************************************************************
    243  * ixv_probe - Device identification routine
    244  *
    245  *   Determines if the driver should be loaded on
    246  *   adapter based on its PCI vendor/device ID.
    247  *
    248  *   return BUS_PROBE_DEFAULT on success, positive on failure
    249  ************************************************************************/
    250 static int
    251 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    252 {
    253 #ifdef __HAVE_PCI_MSI_MSIX
    254 	const struct pci_attach_args *pa = aux;
    255 
    256 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    257 #else
    258 	return 0;
    259 #endif
    260 } /* ixv_probe */
    261 
    262 static const ixgbe_vendor_info_t *
    263 ixv_lookup(const struct pci_attach_args *pa)
    264 {
    265 	const ixgbe_vendor_info_t *ent;
    266 	pcireg_t subid;
    267 
    268 	INIT_DEBUGOUT("ixv_lookup: begin");
    269 
    270 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    271 		return NULL;
    272 
    273 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    274 
    275 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    276 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    277 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    278 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    279 		     (ent->subvendor_id == 0)) &&
    280 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    281 		     (ent->subdevice_id == 0))) {
    282 			return ent;
    283 		}
    284 	}
    285 
    286 	return NULL;
    287 }
    288 
    289 /************************************************************************
    290  * ixv_attach - Device initialization routine
    291  *
    292  *   Called when the driver is being loaded.
    293  *   Identifies the type of hardware, allocates all resources
    294  *   and initializes the hardware.
    295  *
    296  *   return 0 on success, positive on failure
    297  ************************************************************************/
    298 static void
    299 ixv_attach(device_t parent, device_t dev, void *aux)
    300 {
    301 	struct adapter *adapter;
    302 	struct ixgbe_hw *hw;
    303 	int             error = 0;
    304 	pcireg_t	id, subid;
    305 	const ixgbe_vendor_info_t *ent;
    306 	const struct pci_attach_args *pa = aux;
    307 	const char *apivstr;
    308 	const char *str;
    309 	char buf[256];
    310 
    311 	INIT_DEBUGOUT("ixv_attach: begin");
    312 
    313 	/*
    314 	 * Make sure BUSMASTER is set, on a VM under
    315 	 * KVM it may not be and will break things.
    316 	 */
    317 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    318 
    319 	/* Allocate, clear, and link in our adapter structure */
    320 	adapter = device_private(dev);
    321 	adapter->dev = dev;
    322 	adapter->hw.back = adapter;
    323 	hw = &adapter->hw;
    324 
    325 	adapter->init_locked = ixv_init_locked;
    326 	adapter->stop_locked = ixv_stop;
    327 
    328 	adapter->osdep.pc = pa->pa_pc;
    329 	adapter->osdep.tag = pa->pa_tag;
    330 	if (pci_dma64_available(pa))
    331 		adapter->osdep.dmat = pa->pa_dmat64;
    332 	else
    333 		adapter->osdep.dmat = pa->pa_dmat;
    334 	adapter->osdep.attached = false;
    335 
    336 	ent = ixv_lookup(pa);
    337 
    338 	KASSERT(ent != NULL);
    339 
    340 	aprint_normal(": %s, Version - %s\n",
    341 	    ixv_strings[ent->index], ixv_driver_version);
    342 
    343 	/* Core Lock Init*/
    344 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    345 
    346 	/* Do base PCI setup - map BAR0 */
    347 	if (ixv_allocate_pci_resources(adapter, pa)) {
    348 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    349 		error = ENXIO;
    350 		goto err_out;
    351 	}
    352 
    353 	/* SYSCTL APIs */
    354 	ixv_add_device_sysctls(adapter);
    355 
    356 	/* Set up the timer callout */
    357 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    358 
    359 	/* Save off the information about this board */
    360 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    361 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    362 	hw->vendor_id = PCI_VENDOR(id);
    363 	hw->device_id = PCI_PRODUCT(id);
    364 	hw->revision_id =
    365 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    366 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    367 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    368 
    369 	/* A subset of set_mac_type */
    370 	switch (hw->device_id) {
    371 	case IXGBE_DEV_ID_82599_VF:
    372 		hw->mac.type = ixgbe_mac_82599_vf;
    373 		str = "82599 VF";
    374 		break;
    375 	case IXGBE_DEV_ID_X540_VF:
    376 		hw->mac.type = ixgbe_mac_X540_vf;
    377 		str = "X540 VF";
    378 		break;
    379 	case IXGBE_DEV_ID_X550_VF:
    380 		hw->mac.type = ixgbe_mac_X550_vf;
    381 		str = "X550 VF";
    382 		break;
    383 	case IXGBE_DEV_ID_X550EM_X_VF:
    384 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
    385 		str = "X550EM X VF";
    386 		break;
    387 	case IXGBE_DEV_ID_X550EM_A_VF:
    388 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
    389 		str = "X550EM A VF";
    390 		break;
    391 	default:
    392 		/* Shouldn't get here since probe succeeded */
    393 		aprint_error_dev(dev, "Unknown device ID!\n");
    394 		error = ENXIO;
    395 		goto err_out;
    396 		break;
    397 	}
    398 	aprint_normal_dev(dev, "device %s\n", str);
    399 
    400 	ixv_init_device_features(adapter);
    401 
    402 	/* Initialize the shared code */
    403 	error = ixgbe_init_ops_vf(hw);
    404 	if (error) {
    405 		aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
    406 		error = EIO;
    407 		goto err_out;
    408 	}
    409 
    410 	/* Setup the mailbox */
    411 	ixgbe_init_mbx_params_vf(hw);
    412 
    413 	/* Set the right number of segments */
    414 	adapter->num_segs = IXGBE_82599_SCATTER;
    415 
    416 	/* Reset mbox api to 1.0 */
    417 	error = hw->mac.ops.reset_hw(hw);
    418 	if (error == IXGBE_ERR_RESET_FAILED)
    419 		aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
    420 	else if (error)
    421 		aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
    422 		    error);
    423 	if (error) {
    424 		error = EIO;
    425 		goto err_out;
    426 	}
    427 
    428 	error = hw->mac.ops.init_hw(hw);
    429 	if (error) {
    430 		aprint_error_dev(dev, "...init_hw() failed!\n");
    431 		error = EIO;
    432 		goto err_out;
    433 	}
    434 
    435 	/* Negotiate mailbox API version */
    436 	error = ixv_negotiate_api(adapter);
    437 	if (error)
    438 		aprint_normal_dev(dev,
    439 		    "MBX API negotiation failed during attach!\n");
    440 	switch (hw->api_version) {
    441 	case ixgbe_mbox_api_10:
    442 		apivstr = "1.0";
    443 		break;
    444 	case ixgbe_mbox_api_20:
    445 		apivstr = "2.0";
    446 		break;
    447 	case ixgbe_mbox_api_11:
    448 		apivstr = "1.1";
    449 		break;
    450 	case ixgbe_mbox_api_12:
    451 		apivstr = "1.2";
    452 		break;
    453 	case ixgbe_mbox_api_13:
    454 		apivstr = "1.3";
    455 		break;
    456 	default:
    457 		apivstr = "unknown";
    458 		break;
    459 	}
    460 	aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
    461 
    462 	/* If no mac address was assigned, make a random one */
    463 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    464 		u8 addr[ETHER_ADDR_LEN];
    465 		uint64_t rndval = cprng_strong64();
    466 
    467 		memcpy(addr, &rndval, sizeof(addr));
    468 		addr[0] &= 0xFE;
    469 		addr[0] |= 0x02;
    470 		bcopy(addr, hw->mac.addr, sizeof(addr));
    471 	}
    472 
    473 	/* Register for VLAN events */
    474 #if 0 /* XXX delete after write? */
    475 	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
    476 	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    477 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
    478 	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
    479 #endif
    480 
    481 	/* Sysctls for limiting the amount of work done in the taskqueues */
    482 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    483 	    "max number of rx packets to process",
    484 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    485 
    486 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    487 	    "max number of tx packets to process",
    488 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    489 
    490 	/* Do descriptor calc and sanity checks */
    491 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    492 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    493 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    494 		adapter->num_tx_desc = DEFAULT_TXD;
    495 	} else
    496 		adapter->num_tx_desc = ixv_txd;
    497 
    498 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    499 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    500 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    501 		adapter->num_rx_desc = DEFAULT_RXD;
    502 	} else
    503 		adapter->num_rx_desc = ixv_rxd;
    504 
    505 	/* Setup MSI-X */
    506 	error = ixv_configure_interrupts(adapter);
    507 	if (error)
    508 		goto err_out;
    509 
    510 	/* Allocate our TX/RX Queues */
    511 	if (ixgbe_allocate_queues(adapter)) {
    512 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    513 		error = ENOMEM;
    514 		goto err_out;
    515 	}
    516 
    517 	/* hw.ix defaults init */
    518 	adapter->enable_aim = ixv_enable_aim;
    519 
    520 	adapter->txrx_use_workqueue = ixv_txrx_workqueue;
    521 
    522 	error = ixv_allocate_msix(adapter, pa);
    523 	if (error) {
    524 		device_printf(dev, "ixv_allocate_msix() failed!\n");
    525 		goto err_late;
    526 	}
    527 
    528 	/* Setup OS specific network interface */
    529 	error = ixv_setup_interface(dev, adapter);
    530 	if (error != 0) {
    531 		aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
    532 		goto err_late;
    533 	}
    534 
    535 	/* Do the stats setup */
    536 	ixv_save_stats(adapter);
    537 	ixv_init_stats(adapter);
    538 	ixv_add_stats_sysctls(adapter);
    539 
    540 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    541 		ixgbe_netmap_attach(adapter);
    542 
    543 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
    544 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
    545 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
    546 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
    547 
    548 	INIT_DEBUGOUT("ixv_attach: end");
    549 	adapter->osdep.attached = true;
    550 
    551 	return;
    552 
    553 err_late:
    554 	ixgbe_free_transmit_structures(adapter);
    555 	ixgbe_free_receive_structures(adapter);
    556 	free(adapter->queues, M_DEVBUF);
    557 err_out:
    558 	ixv_free_pci_resources(adapter);
    559 	IXGBE_CORE_LOCK_DESTROY(adapter);
    560 
    561 	return;
    562 } /* ixv_attach */
    563 
    564 /************************************************************************
    565  * ixv_detach - Device removal routine
    566  *
    567  *   Called when the driver is being removed.
    568  *   Stops the adapter and deallocates all the resources
    569  *   that were allocated for driver operation.
    570  *
    571  *   return 0 on success, positive on failure
    572  ************************************************************************/
    573 static int
    574 ixv_detach(device_t dev, int flags)
    575 {
    576 	struct adapter  *adapter = device_private(dev);
    577 	struct ixgbe_hw *hw = &adapter->hw;
    578 	struct ix_queue *que = adapter->queues;
    579 	struct tx_ring *txr = adapter->tx_rings;
    580 	struct rx_ring *rxr = adapter->rx_rings;
    581 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
    582 
    583 	INIT_DEBUGOUT("ixv_detach: begin");
    584 	if (adapter->osdep.attached == false)
    585 		return 0;
    586 
    587 	/* Stop the interface. Callouts are stopped in it. */
    588 	ixv_ifstop(adapter->ifp, 1);
    589 
    590 #if NVLAN > 0
    591 	/* Make sure VLANs are not using driver */
    592 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    593 		;	/* nothing to do: no VLANs */
    594 	else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
    595 		vlan_ifdetach(adapter->ifp);
    596 	else {
    597 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    598 		return EBUSY;
    599 	}
    600 #endif
    601 
    602 	IXGBE_CORE_LOCK(adapter);
    603 	ixv_stop(adapter);
    604 	IXGBE_CORE_UNLOCK(adapter);
    605 
    606 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
    607 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
    608 			softint_disestablish(txr->txr_si);
    609 		softint_disestablish(que->que_si);
    610 	}
    611 	if (adapter->txr_wq != NULL)
    612 		workqueue_destroy(adapter->txr_wq);
    613 	if (adapter->txr_wq_enqueued != NULL)
    614 		percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
    615 	if (adapter->que_wq != NULL)
    616 		workqueue_destroy(adapter->que_wq);
    617 
    618 	/* Drain the Mailbox(link) queue */
    619 	softint_disestablish(adapter->link_si);
    620 
    621 	/* Unregister VLAN events */
    622 #if 0 /* XXX msaitoh delete after write? */
    623 	if (adapter->vlan_attach != NULL)
    624 		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
    625 	if (adapter->vlan_detach != NULL)
    626 		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
    627 #endif
    628 
    629 	ether_ifdetach(adapter->ifp);
    630 	callout_halt(&adapter->timer, NULL);
    631 
    632 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    633 		netmap_detach(adapter->ifp);
    634 
    635 	ixv_free_pci_resources(adapter);
    636 #if 0 /* XXX the NetBSD port is probably missing something here */
    637 	bus_generic_detach(dev);
    638 #endif
    639 	if_detach(adapter->ifp);
    640 	if_percpuq_destroy(adapter->ipq);
    641 
    642 	sysctl_teardown(&adapter->sysctllog);
    643 	evcnt_detach(&adapter->efbig_tx_dma_setup);
    644 	evcnt_detach(&adapter->mbuf_defrag_failed);
    645 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
    646 	evcnt_detach(&adapter->einval_tx_dma_setup);
    647 	evcnt_detach(&adapter->other_tx_dma_setup);
    648 	evcnt_detach(&adapter->eagain_tx_dma_setup);
    649 	evcnt_detach(&adapter->enomem_tx_dma_setup);
    650 	evcnt_detach(&adapter->watchdog_events);
    651 	evcnt_detach(&adapter->tso_err);
    652 	evcnt_detach(&adapter->link_irq);
    653 
    654 	txr = adapter->tx_rings;
    655 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
    656 		evcnt_detach(&adapter->queues[i].irqs);
    657 		evcnt_detach(&adapter->queues[i].handleq);
    658 		evcnt_detach(&adapter->queues[i].req);
    659 		evcnt_detach(&txr->no_desc_avail);
    660 		evcnt_detach(&txr->total_packets);
    661 		evcnt_detach(&txr->tso_tx);
    662 #ifndef IXGBE_LEGACY_TX
    663 		evcnt_detach(&txr->pcq_drops);
    664 #endif
    665 
    666 		evcnt_detach(&rxr->rx_packets);
    667 		evcnt_detach(&rxr->rx_bytes);
    668 		evcnt_detach(&rxr->rx_copies);
    669 		evcnt_detach(&rxr->no_jmbuf);
    670 		evcnt_detach(&rxr->rx_discarded);
    671 	}
    672 	evcnt_detach(&stats->ipcs);
    673 	evcnt_detach(&stats->l4cs);
    674 	evcnt_detach(&stats->ipcs_bad);
    675 	evcnt_detach(&stats->l4cs_bad);
    676 
    677 	/* Packet Reception Stats */
    678 	evcnt_detach(&stats->vfgorc);
    679 	evcnt_detach(&stats->vfgprc);
    680 	evcnt_detach(&stats->vfmprc);
    681 
    682 	/* Packet Transmission Stats */
    683 	evcnt_detach(&stats->vfgotc);
    684 	evcnt_detach(&stats->vfgptc);
    685 
    686 	/* Mailbox Stats */
    687 	evcnt_detach(&hw->mbx.stats.msgs_tx);
    688 	evcnt_detach(&hw->mbx.stats.msgs_rx);
    689 	evcnt_detach(&hw->mbx.stats.acks);
    690 	evcnt_detach(&hw->mbx.stats.reqs);
    691 	evcnt_detach(&hw->mbx.stats.rsts);
    692 
    693 	ixgbe_free_transmit_structures(adapter);
    694 	ixgbe_free_receive_structures(adapter);
    695 	for (int i = 0; i < adapter->num_queues; i++) {
    696 		struct ix_queue *lque = &adapter->queues[i];
    697 		mutex_destroy(&lque->dc_mtx);
    698 	}
    699 	free(adapter->queues, M_DEVBUF);
    700 
    701 	IXGBE_CORE_LOCK_DESTROY(adapter);
    702 
    703 	return (0);
    704 } /* ixv_detach */
    705 
    706 /************************************************************************
    707  * ixv_init_locked - Init entry point
    708  *
    709  *   Used in two ways: It is used by the stack as an init entry
    710  *   point in network interface structure. It is also used
    711  *   by the driver as a hw/sw initialization routine to get
    712  *   to a consistent state.
    713  *
    714  *   return 0 on success, positive on failure
    715  ************************************************************************/
    716 static void
    717 ixv_init_locked(struct adapter *adapter)
    718 {
    719 	struct ifnet	*ifp = adapter->ifp;
    720 	device_t 	dev = adapter->dev;
    721 	struct ixgbe_hw *hw = &adapter->hw;
    722 	struct ix_queue	*que;
    723 	int             error = 0;
    724 	uint32_t mask;
    725 	int i;
    726 
    727 	INIT_DEBUGOUT("ixv_init_locked: begin");
    728 	KASSERT(mutex_owned(&adapter->core_mtx));
    729 	hw->adapter_stopped = FALSE;
    730 	hw->mac.ops.stop_adapter(hw);
    731 	callout_stop(&adapter->timer);
    732 	for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
    733 		que->disabled_count = 0;
    734 
    735 	/* reprogram the RAR[0] in case user changed it. */
    736 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    737 
    738 	/* Get the latest mac address, User can use a LAA */
    739 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
    740 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    741 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
    742 
    743 	/* Prepare transmit descriptors and buffers */
    744 	if (ixgbe_setup_transmit_structures(adapter)) {
    745 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    746 		ixv_stop(adapter);
    747 		return;
    748 	}
    749 
    750 	/* Reset VF and renegotiate mailbox API version */
    751 	hw->mac.ops.reset_hw(hw);
    752 	hw->mac.ops.start_hw(hw);
    753 	error = ixv_negotiate_api(adapter);
    754 	if (error)
    755 		device_printf(dev,
    756 		    "Mailbox API negotiation failed in init_locked!\n");
    757 
    758 	ixv_initialize_transmit_units(adapter);
    759 
    760 	/* Setup Multicast table */
    761 	ixv_set_multi(adapter);
    762 
    763 	/*
    764 	 * Determine the correct mbuf pool
    765 	 * for doing jumbo/headersplit
    766 	 */
    767 	if (ifp->if_mtu > ETHERMTU)
    768 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    769 	else
    770 		adapter->rx_mbuf_sz = MCLBYTES;
    771 
    772 	/* Prepare receive descriptors and buffers */
    773 	if (ixgbe_setup_receive_structures(adapter)) {
    774 		device_printf(dev, "Could not setup receive structures\n");
    775 		ixv_stop(adapter);
    776 		return;
    777 	}
    778 
    779 	/* Configure RX settings */
    780 	ixv_initialize_receive_units(adapter);
    781 
    782 #if 0 /* XXX isn't it required? -- msaitoh  */
    783 	/* Set the various hardware offload abilities */
    784 	ifp->if_hwassist = 0;
    785 	if (ifp->if_capenable & IFCAP_TSO4)
    786 		ifp->if_hwassist |= CSUM_TSO;
    787 	if (ifp->if_capenable & IFCAP_TXCSUM) {
    788 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
    789 #if __FreeBSD_version >= 800000
    790 		ifp->if_hwassist |= CSUM_SCTP;
    791 #endif
    792 	}
    793 #endif
    794 
    795 	/* Set up VLAN offload and filter */
    796 	ixv_setup_vlan_support(adapter);
    797 
    798 	/* Set up MSI-X routing */
    799 	ixv_configure_ivars(adapter);
    800 
    801 	/* Set up auto-mask */
    802 	mask = (1 << adapter->vector);
    803 	for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
    804 		mask |= (1 << que->msix);
    805 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
    806 
    807 	/* Set moderation on the Link interrupt */
    808 	ixv_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
    809 
    810 	/* Stats init */
    811 	ixv_init_stats(adapter);
    812 
    813 	/* Config/Enable Link */
    814 	hw->mac.get_link_status = TRUE;
    815 	hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
    816 	    FALSE);
    817 
    818 	/* Start watchdog */
    819 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    820 
    821 	/* And now turn on interrupts */
    822 	ixv_enable_intr(adapter);
    823 
    824 	/* Update saved flags. See ixgbe_ifflags_cb() */
    825 	adapter->if_flags = ifp->if_flags;
    826 
    827 	/* Now inform the stack we're ready */
    828 	ifp->if_flags |= IFF_RUNNING;
    829 	ifp->if_flags &= ~IFF_OACTIVE;
    830 
    831 	return;
    832 } /* ixv_init_locked */
    833 
    834 /************************************************************************
    835  * ixv_enable_queue
    836  ************************************************************************/
    837 static inline void
    838 ixv_enable_queue(struct adapter *adapter, u32 vector)
    839 {
    840 	struct ixgbe_hw *hw = &adapter->hw;
    841 	struct ix_queue *que = &adapter->queues[vector];
    842 	u32             queue = 1 << vector;
    843 	u32             mask;
    844 
    845 	mutex_enter(&que->dc_mtx);
    846 	if (que->disabled_count > 0 && --que->disabled_count > 0)
    847 		goto out;
    848 
    849 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    850 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    851 out:
    852 	mutex_exit(&que->dc_mtx);
    853 } /* ixv_enable_queue */
    854 
    855 /************************************************************************
    856  * ixv_disable_queue
    857  ************************************************************************/
    858 static inline void
    859 ixv_disable_queue(struct adapter *adapter, u32 vector)
    860 {
    861 	struct ixgbe_hw *hw = &adapter->hw;
    862 	struct ix_queue *que = &adapter->queues[vector];
    863 	u64             queue = (u64)(1 << vector);
    864 	u32             mask;
    865 
    866 	mutex_enter(&que->dc_mtx);
    867 	if (que->disabled_count++ > 0)
    868 		goto  out;
    869 
    870 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    871 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    872 out:
    873 	mutex_exit(&que->dc_mtx);
    874 } /* ixv_disable_queue */
    875 
    876 #if 0
    877 static inline void
    878 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    879 {
    880 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    881 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    882 } /* ixv_rearm_queues */
    883 #endif
    884 
    885 
    886 /************************************************************************
    887  * ixv_msix_que - MSI-X Queue Interrupt Service routine
    888  ************************************************************************/
    889 static int
    890 ixv_msix_que(void *arg)
    891 {
    892 	struct ix_queue	*que = arg;
    893 	struct adapter  *adapter = que->adapter;
    894 	struct tx_ring	*txr = que->txr;
    895 	struct rx_ring	*rxr = que->rxr;
    896 	bool		more;
    897 	u32		newitr = 0;
    898 
    899 	ixv_disable_queue(adapter, que->msix);
    900 	++que->irqs.ev_count;
    901 
    902 #ifdef __NetBSD__
    903 	/* Don't run ixgbe_rxeof in interrupt context */
    904 	more = true;
    905 #else
    906 	more = ixgbe_rxeof(que);
    907 #endif
    908 
    909 	IXGBE_TX_LOCK(txr);
    910 	ixgbe_txeof(txr);
    911 	IXGBE_TX_UNLOCK(txr);
    912 
    913 	/* Do AIM now? */
    914 
    915 	if (adapter->enable_aim == false)
    916 		goto no_calc;
    917 	/*
    918 	 * Do Adaptive Interrupt Moderation:
    919 	 *  - Write out last calculated setting
    920 	 *  - Calculate based on average size over
    921 	 *    the last interval.
    922 	 */
    923 	if (que->eitr_setting)
    924 		ixv_eitr_write(adapter, que->msix, que->eitr_setting);
    925 
    926 	que->eitr_setting = 0;
    927 
    928 	/* Idle, do nothing */
    929 	if ((txr->bytes == 0) && (rxr->bytes == 0))
    930 		goto no_calc;
    931 
    932 	if ((txr->bytes) && (txr->packets))
    933 		newitr = txr->bytes/txr->packets;
    934 	if ((rxr->bytes) && (rxr->packets))
    935 		newitr = uimax(newitr, (rxr->bytes / rxr->packets));
    936 	newitr += 24; /* account for hardware frame, crc */
    937 
    938 	/* set an upper boundary */
    939 	newitr = uimin(newitr, 3000);
    940 
    941 	/* Be nice to the mid range */
    942 	if ((newitr > 300) && (newitr < 1200))
    943 		newitr = (newitr / 3);
    944 	else
    945 		newitr = (newitr / 2);
    946 
    947 	/*
    948 	 * When RSC is used, ITR interval must be larger than RSC_DELAY.
    949 	 * Currently, we use 2us for RSC_DELAY. The minimum value is always
    950 	 * greater than 2us on 100M (and 10M?(not documented)), but it's not
    951 	 * on 1G and higher.
    952 	 */
    953 	if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
    954 	    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
    955 		if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
    956 			newitr = IXGBE_MIN_RSC_EITR_10G1G;
    957 	}
    958 
    959 	/* save for next interrupt */
    960 	que->eitr_setting = newitr;
    961 
    962 	/* Reset state */
    963 	txr->bytes = 0;
    964 	txr->packets = 0;
    965 	rxr->bytes = 0;
    966 	rxr->packets = 0;
    967 
    968 no_calc:
    969 	if (more)
    970 		softint_schedule(que->que_si);
    971 	else /* Re-enable this interrupt */
    972 		ixv_enable_queue(adapter, que->msix);
    973 
    974 	return 1;
    975 } /* ixv_msix_que */
    976 
    977 /************************************************************************
    978  * ixv_msix_mbx
    979  ************************************************************************/
    980 static int
    981 ixv_msix_mbx(void *arg)
    982 {
    983 	struct adapter	*adapter = arg;
    984 	struct ixgbe_hw *hw = &adapter->hw;
    985 
    986 	++adapter->link_irq.ev_count;
    987 	/* NetBSD: We use auto-clear, so it's not required to write VTEICR */
    988 
    989 	/* Link status change */
    990 	hw->mac.get_link_status = TRUE;
    991 	softint_schedule(adapter->link_si);
    992 
    993 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
    994 
    995 	return 1;
    996 } /* ixv_msix_mbx */
    997 
    998 static void
    999 ixv_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
   1000 {
   1001 
   1002 	/*
   1003 	 * Newer devices than 82598 have VF function, so this function is
   1004 	 * simple.
   1005 	 */
   1006 	itr |= IXGBE_EITR_CNT_WDIS;
   1007 
   1008 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(index), itr);
   1009 }
   1010 
   1011 
   1012 /************************************************************************
   1013  * ixv_media_status - Media Ioctl callback
   1014  *
   1015  *   Called whenever the user queries the status of
   1016  *   the interface using ifconfig.
   1017  ************************************************************************/
   1018 static void
   1019 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
   1020 {
   1021 	struct adapter *adapter = ifp->if_softc;
   1022 
   1023 	INIT_DEBUGOUT("ixv_media_status: begin");
   1024 	IXGBE_CORE_LOCK(adapter);
   1025 	ixv_update_link_status(adapter);
   1026 
   1027 	ifmr->ifm_status = IFM_AVALID;
   1028 	ifmr->ifm_active = IFM_ETHER;
   1029 
   1030 	if (adapter->link_active != LINK_STATE_UP) {
   1031 		ifmr->ifm_active |= IFM_NONE;
   1032 		IXGBE_CORE_UNLOCK(adapter);
   1033 		return;
   1034 	}
   1035 
   1036 	ifmr->ifm_status |= IFM_ACTIVE;
   1037 
   1038 	switch (adapter->link_speed) {
   1039 		case IXGBE_LINK_SPEED_10GB_FULL:
   1040 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   1041 			break;
   1042 		case IXGBE_LINK_SPEED_5GB_FULL:
   1043 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   1044 			break;
   1045 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   1046 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   1047 			break;
   1048 		case IXGBE_LINK_SPEED_1GB_FULL:
   1049 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1050 			break;
   1051 		case IXGBE_LINK_SPEED_100_FULL:
   1052 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   1053 			break;
   1054 		case IXGBE_LINK_SPEED_10_FULL:
   1055 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   1056 			break;
   1057 	}
   1058 
   1059 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   1060 
   1061 	IXGBE_CORE_UNLOCK(adapter);
   1062 } /* ixv_media_status */
   1063 
   1064 /************************************************************************
   1065  * ixv_media_change - Media Ioctl callback
   1066  *
   1067  *   Called when the user changes speed/duplex using
   1068  *   media/mediopt option with ifconfig.
   1069  ************************************************************************/
   1070 static int
   1071 ixv_media_change(struct ifnet *ifp)
   1072 {
   1073 	struct adapter *adapter = ifp->if_softc;
   1074 	struct ifmedia *ifm = &adapter->media;
   1075 
   1076 	INIT_DEBUGOUT("ixv_media_change: begin");
   1077 
   1078 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1079 		return (EINVAL);
   1080 
   1081 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1082 	case IFM_AUTO:
   1083 		break;
   1084 	default:
   1085 		device_printf(adapter->dev, "Only auto media type\n");
   1086 		return (EINVAL);
   1087 	}
   1088 
   1089 	return (0);
   1090 } /* ixv_media_change */
   1091 
   1092 
   1093 /************************************************************************
   1094  * ixv_negotiate_api
   1095  *
   1096  *   Negotiate the Mailbox API with the PF;
   1097  *   start with the most featured API first.
   1098  ************************************************************************/
   1099 static int
   1100 ixv_negotiate_api(struct adapter *adapter)
   1101 {
   1102 	struct ixgbe_hw *hw = &adapter->hw;
   1103 	int             mbx_api[] = { ixgbe_mbox_api_11,
   1104 	                              ixgbe_mbox_api_10,
   1105 	                              ixgbe_mbox_api_unknown };
   1106 	int             i = 0;
   1107 
   1108 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
   1109 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
   1110 			return (0);
   1111 		i++;
   1112 	}
   1113 
   1114 	return (EINVAL);
   1115 } /* ixv_negotiate_api */
   1116 
   1117 
   1118 /************************************************************************
   1119  * ixv_set_multi - Multicast Update
   1120  *
   1121  *   Called whenever multicast address list is updated.
   1122  ************************************************************************/
   1123 static void
   1124 ixv_set_multi(struct adapter *adapter)
   1125 {
   1126 	struct ether_multi *enm;
   1127 	struct ether_multistep step;
   1128 	struct ethercom *ec = &adapter->osdep.ec;
   1129 	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1130 	u8                 *update_ptr;
   1131 	int                mcnt = 0;
   1132 
   1133 	KASSERT(mutex_owned(&adapter->core_mtx));
   1134 	IOCTL_DEBUGOUT("ixv_set_multi: begin");
   1135 
   1136 	ETHER_LOCK(ec);
   1137 	ETHER_FIRST_MULTI(step, ec, enm);
   1138 	while (enm != NULL) {
   1139 		bcopy(enm->enm_addrlo,
   1140 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1141 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1142 		mcnt++;
   1143 		/* XXX This might be required --msaitoh */
   1144 		if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
   1145 			break;
   1146 		ETHER_NEXT_MULTI(step, enm);
   1147 	}
   1148 	ETHER_UNLOCK(ec);
   1149 
   1150 	update_ptr = mta;
   1151 
   1152 	adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
   1153 	    ixv_mc_array_itr, TRUE);
   1154 } /* ixv_set_multi */
   1155 
   1156 /************************************************************************
   1157  * ixv_mc_array_itr
   1158  *
   1159  *   An iterator function needed by the multicast shared code.
   1160  *   It feeds the shared code routine the addresses in the
   1161  *   array of ixv_set_multi() one by one.
   1162  ************************************************************************/
   1163 static u8 *
   1164 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1165 {
   1166 	u8 *addr = *update_ptr;
   1167 	u8 *newptr;
   1168 
   1169 	*vmdq = 0;
   1170 
   1171 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1172 	*update_ptr = newptr;
   1173 
   1174 	return addr;
   1175 } /* ixv_mc_array_itr */
   1176 
   1177 /************************************************************************
   1178  * ixv_local_timer - Timer routine
   1179  *
   1180  *   Checks for link status, updates statistics,
   1181  *   and runs the watchdog check.
   1182  ************************************************************************/
   1183 static void
   1184 ixv_local_timer(void *arg)
   1185 {
   1186 	struct adapter *adapter = arg;
   1187 
   1188 	IXGBE_CORE_LOCK(adapter);
   1189 	ixv_local_timer_locked(adapter);
   1190 	IXGBE_CORE_UNLOCK(adapter);
   1191 }
   1192 
   1193 static void
   1194 ixv_local_timer_locked(void *arg)
   1195 {
   1196 	struct adapter	*adapter = arg;
   1197 	device_t	dev = adapter->dev;
   1198 	struct ix_queue	*que = adapter->queues;
   1199 	u64		queues = 0;
   1200 	u64		v0, v1, v2, v3, v4, v5, v6, v7;
   1201 	int		hung = 0;
   1202 	int		i;
   1203 
   1204 	KASSERT(mutex_owned(&adapter->core_mtx));
   1205 
   1206 	ixv_check_link(adapter);
   1207 
   1208 	/* Stats Update */
   1209 	ixv_update_stats(adapter);
   1210 
   1211 	/* Update some event counters */
   1212 	v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
   1213 	que = adapter->queues;
   1214 	for (i = 0; i < adapter->num_queues; i++, que++) {
   1215 		struct tx_ring  *txr = que->txr;
   1216 
   1217 		v0 += txr->q_efbig_tx_dma_setup;
   1218 		v1 += txr->q_mbuf_defrag_failed;
   1219 		v2 += txr->q_efbig2_tx_dma_setup;
   1220 		v3 += txr->q_einval_tx_dma_setup;
   1221 		v4 += txr->q_other_tx_dma_setup;
   1222 		v5 += txr->q_eagain_tx_dma_setup;
   1223 		v6 += txr->q_enomem_tx_dma_setup;
   1224 		v7 += txr->q_tso_err;
   1225 	}
   1226 	adapter->efbig_tx_dma_setup.ev_count = v0;
   1227 	adapter->mbuf_defrag_failed.ev_count = v1;
   1228 	adapter->efbig2_tx_dma_setup.ev_count = v2;
   1229 	adapter->einval_tx_dma_setup.ev_count = v3;
   1230 	adapter->other_tx_dma_setup.ev_count = v4;
   1231 	adapter->eagain_tx_dma_setup.ev_count = v5;
   1232 	adapter->enomem_tx_dma_setup.ev_count = v6;
   1233 	adapter->tso_err.ev_count = v7;
   1234 
   1235 	/*
   1236 	 * Check the TX queues status
   1237 	 *      - mark hung queues so we don't schedule on them
   1238 	 *      - watchdog only if all queues show hung
   1239 	 */
   1240 	que = adapter->queues;
   1241 	for (i = 0; i < adapter->num_queues; i++, que++) {
   1242 		/* Keep track of queues with work for soft irq */
   1243 		if (que->txr->busy)
   1244 			queues |= ((u64)1 << que->me);
   1245 		/*
   1246 		 * Each time txeof runs without cleaning, but there
   1247 		 * are uncleaned descriptors it increments busy. If
   1248 		 * we get to the MAX we declare it hung.
   1249 		 */
   1250 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1251 			++hung;
   1252 			/* Mark the queue as inactive */
   1253 			adapter->active_queues &= ~((u64)1 << que->me);
   1254 			continue;
   1255 		} else {
   1256 			/* Check if we've come back from hung */
   1257 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1258 				adapter->active_queues |= ((u64)1 << que->me);
   1259 		}
   1260 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1261 			device_printf(dev,
   1262 			    "Warning queue %d appears to be hung!\n", i);
   1263 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1264 			++hung;
   1265 		}
   1266 	}
   1267 
   1268 	/* Only truly watchdog if all queues show hung */
   1269 	if (hung == adapter->num_queues)
   1270 		goto watchdog;
   1271 #if 0
   1272 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1273 		ixv_rearm_queues(adapter, queues);
   1274 	}
   1275 #endif
   1276 
   1277 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1278 
   1279 	return;
   1280 
   1281 watchdog:
   1282 
   1283 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1284 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1285 	adapter->watchdog_events.ev_count++;
   1286 	ixv_init_locked(adapter);
   1287 } /* ixv_local_timer */
   1288 
   1289 /************************************************************************
   1290  * ixv_update_link_status - Update OS on link state
   1291  *
   1292  * Note: Only updates the OS on the cached link state.
   1293  *       The real check of the hardware only happens with
   1294  *       a link interrupt.
   1295  ************************************************************************/
   1296 static void
   1297 ixv_update_link_status(struct adapter *adapter)
   1298 {
   1299 	struct ifnet *ifp = adapter->ifp;
   1300 	device_t     dev = adapter->dev;
   1301 
   1302 	KASSERT(mutex_owned(&adapter->core_mtx));
   1303 
   1304 	if (adapter->link_up) {
   1305 		if (adapter->link_active != LINK_STATE_UP) {
   1306 			if (bootverbose) {
   1307 				const char *bpsmsg;
   1308 
   1309 				switch (adapter->link_speed) {
   1310 				case IXGBE_LINK_SPEED_10GB_FULL:
   1311 					bpsmsg = "10 Gbps";
   1312 					break;
   1313 				case IXGBE_LINK_SPEED_5GB_FULL:
   1314 					bpsmsg = "5 Gbps";
   1315 					break;
   1316 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   1317 					bpsmsg = "2.5 Gbps";
   1318 					break;
   1319 				case IXGBE_LINK_SPEED_1GB_FULL:
   1320 					bpsmsg = "1 Gbps";
   1321 					break;
   1322 				case IXGBE_LINK_SPEED_100_FULL:
   1323 					bpsmsg = "100 Mbps";
   1324 					break;
   1325 				case IXGBE_LINK_SPEED_10_FULL:
   1326 					bpsmsg = "10 Mbps";
   1327 					break;
   1328 				default:
   1329 					bpsmsg = "unknown speed";
   1330 					break;
   1331 				}
   1332 				device_printf(dev, "Link is up %s %s \n",
   1333 				    bpsmsg, "Full Duplex");
   1334 			}
   1335 			adapter->link_active = LINK_STATE_UP;
   1336 			if_link_state_change(ifp, LINK_STATE_UP);
   1337 		}
   1338 	} else {
   1339 		/*
   1340 		 * Do it when link active changes to DOWN. i.e.
   1341 		 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
   1342 		 * b) LINK_STATE_UP      -> LINK_STATE_DOWN
   1343 		 */
   1344 		if (adapter->link_active != LINK_STATE_DOWN) {
   1345 			if (bootverbose)
   1346 				device_printf(dev, "Link is Down\n");
   1347 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1348 			adapter->link_active = LINK_STATE_DOWN;
   1349 		}
   1350 	}
   1351 } /* ixv_update_link_status */
   1352 
   1353 
   1354 /************************************************************************
   1355  * ixv_stop - Stop the hardware
   1356  *
   1357  *   Disables all traffic on the adapter by issuing a
   1358  *   global reset on the MAC and deallocates TX/RX buffers.
   1359  ************************************************************************/
   1360 static void
   1361 ixv_ifstop(struct ifnet *ifp, int disable)
   1362 {
   1363 	struct adapter *adapter = ifp->if_softc;
   1364 
   1365 	IXGBE_CORE_LOCK(adapter);
   1366 	ixv_stop(adapter);
   1367 	IXGBE_CORE_UNLOCK(adapter);
   1368 }
   1369 
   1370 static void
   1371 ixv_stop(void *arg)
   1372 {
   1373 	struct ifnet    *ifp;
   1374 	struct adapter  *adapter = arg;
   1375 	struct ixgbe_hw *hw = &adapter->hw;
   1376 
   1377 	ifp = adapter->ifp;
   1378 
   1379 	KASSERT(mutex_owned(&adapter->core_mtx));
   1380 
   1381 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1382 	ixv_disable_intr(adapter);
   1383 
   1384 	/* Tell the stack that the interface is no longer active */
   1385 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1386 
   1387 	hw->mac.ops.reset_hw(hw);
   1388 	adapter->hw.adapter_stopped = FALSE;
   1389 	hw->mac.ops.stop_adapter(hw);
   1390 	callout_stop(&adapter->timer);
   1391 
   1392 	/* reprogram the RAR[0] in case user changed it. */
   1393 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1394 
   1395 	return;
   1396 } /* ixv_stop */
   1397 
   1398 
   1399 /************************************************************************
   1400  * ixv_allocate_pci_resources
   1401  ************************************************************************/
   1402 static int
   1403 ixv_allocate_pci_resources(struct adapter *adapter,
   1404     const struct pci_attach_args *pa)
   1405 {
   1406 	pcireg_t	memtype, csr;
   1407 	device_t        dev = adapter->dev;
   1408 	bus_addr_t addr;
   1409 	int flags;
   1410 
   1411 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1412 	switch (memtype) {
   1413 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1414 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1415 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1416 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1417 	              memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1418 			goto map_err;
   1419 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1420 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1421 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1422 		}
   1423 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1424 		     adapter->osdep.mem_size, flags,
   1425 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1426 map_err:
   1427 			adapter->osdep.mem_size = 0;
   1428 			aprint_error_dev(dev, "unable to map BAR0\n");
   1429 			return ENXIO;
   1430 		}
   1431 		/*
   1432 		 * Enable address decoding for memory range in case it's not
   1433 		 * set.
   1434 		 */
   1435 		csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1436 		    PCI_COMMAND_STATUS_REG);
   1437 		csr |= PCI_COMMAND_MEM_ENABLE;
   1438 		pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
   1439 		    csr);
   1440 		break;
   1441 	default:
   1442 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1443 		return ENXIO;
   1444 	}
   1445 
   1446 	/* Pick up the tuneable queues */
   1447 	adapter->num_queues = ixv_num_queues;
   1448 
   1449 	return (0);
   1450 } /* ixv_allocate_pci_resources */
   1451 
   1452 /************************************************************************
   1453  * ixv_free_pci_resources
   1454  ************************************************************************/
   1455 static void
   1456 ixv_free_pci_resources(struct adapter * adapter)
   1457 {
   1458 	struct 		ix_queue *que = adapter->queues;
   1459 	int		rid;
   1460 
   1461 	/*
   1462 	 *  Release all msix queue resources:
   1463 	 */
   1464 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1465 		if (que->res != NULL)
   1466 			pci_intr_disestablish(adapter->osdep.pc,
   1467 			    adapter->osdep.ihs[i]);
   1468 	}
   1469 
   1470 
   1471 	/* Clean the Mailbox interrupt last */
   1472 	rid = adapter->vector;
   1473 
   1474 	if (adapter->osdep.ihs[rid] != NULL) {
   1475 		pci_intr_disestablish(adapter->osdep.pc,
   1476 		    adapter->osdep.ihs[rid]);
   1477 		adapter->osdep.ihs[rid] = NULL;
   1478 	}
   1479 
   1480 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1481 	    adapter->osdep.nintrs);
   1482 
   1483 	if (adapter->osdep.mem_size != 0) {
   1484 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1485 		    adapter->osdep.mem_bus_space_handle,
   1486 		    adapter->osdep.mem_size);
   1487 	}
   1488 
   1489 	return;
   1490 } /* ixv_free_pci_resources */
   1491 
   1492 /************************************************************************
   1493  * ixv_setup_interface
   1494  *
   1495  *   Setup networking device structure and register an interface.
   1496  ************************************************************************/
   1497 static int
   1498 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1499 {
   1500 	struct ethercom *ec = &adapter->osdep.ec;
   1501 	struct ifnet   *ifp;
   1502 	int rv;
   1503 
   1504 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1505 
   1506 	ifp = adapter->ifp = &ec->ec_if;
   1507 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1508 	ifp->if_baudrate = IF_Gbps(10);
   1509 	ifp->if_init = ixv_init;
   1510 	ifp->if_stop = ixv_ifstop;
   1511 	ifp->if_softc = adapter;
   1512 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1513 #ifdef IXGBE_MPSAFE
   1514 	ifp->if_extflags = IFEF_MPSAFE;
   1515 #endif
   1516 	ifp->if_ioctl = ixv_ioctl;
   1517 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1518 #if 0
   1519 		ixv_start_locked = ixgbe_legacy_start_locked;
   1520 #endif
   1521 	} else {
   1522 		ifp->if_transmit = ixgbe_mq_start;
   1523 #if 0
   1524 		ixv_start_locked = ixgbe_mq_start_locked;
   1525 #endif
   1526 	}
   1527 	ifp->if_start = ixgbe_legacy_start;
   1528 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1529 	IFQ_SET_READY(&ifp->if_snd);
   1530 
   1531 	rv = if_initialize(ifp);
   1532 	if (rv != 0) {
   1533 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1534 		return rv;
   1535 	}
   1536 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1537 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1538 	/*
   1539 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1540 	 * used.
   1541 	 */
   1542 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1543 
   1544 	adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
   1545 
   1546 	/*
   1547 	 * Tell the upper layer(s) we support long frames.
   1548 	 */
   1549 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1550 
   1551 	/* Set capability flags */
   1552 	ifp->if_capabilities |= IFCAP_HWCSUM
   1553 	                     |  IFCAP_TSOv4
   1554 	                     |  IFCAP_TSOv6;
   1555 	ifp->if_capenable = 0;
   1556 
   1557 	ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
   1558 			    |  ETHERCAP_VLAN_HWCSUM
   1559 			    |  ETHERCAP_JUMBO_MTU
   1560 			    |  ETHERCAP_VLAN_MTU;
   1561 
   1562 	/* Enable the above capabilities by default */
   1563 	ec->ec_capenable = ec->ec_capabilities;
   1564 
   1565 	/* Don't enable LRO by default */
   1566 #if 0
   1567 	/* NetBSD doesn't support LRO yet */
   1568 	ifp->if_capabilities |= IFCAP_LRO;
   1569 #endif
   1570 
   1571 	/*
   1572 	 * Specify the media types supported by this adapter and register
   1573 	 * callbacks to update media and link information
   1574 	 */
   1575 	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
   1576 	    ixv_media_status);
   1577 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1578 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1579 
   1580 	if_register(ifp);
   1581 
   1582 	return 0;
   1583 } /* ixv_setup_interface */
   1584 
   1585 
   1586 /************************************************************************
   1587  * ixv_initialize_transmit_units - Enable transmit unit.
   1588  ************************************************************************/
   1589 static void
   1590 ixv_initialize_transmit_units(struct adapter *adapter)
   1591 {
   1592 	struct tx_ring	*txr = adapter->tx_rings;
   1593 	struct ixgbe_hw	*hw = &adapter->hw;
   1594 	int i;
   1595 
   1596 	for (i = 0; i < adapter->num_queues; i++, txr++) {
   1597 		u64 tdba = txr->txdma.dma_paddr;
   1598 		u32 txctrl, txdctl;
   1599 		int j = txr->me;
   1600 
   1601 		/* Set WTHRESH to 8, burst writeback */
   1602 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
   1603 		txdctl |= (8 << 16);
   1604 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
   1605 
   1606 		/* Set the HW Tx Head and Tail indices */
   1607 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
   1608 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
   1609 
   1610 		/* Set Tx Tail register */
   1611 		txr->tail = IXGBE_VFTDT(j);
   1612 
   1613 		txr->txr_no_space = false;
   1614 
   1615 		/* Set Ring parameters */
   1616 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
   1617 		    (tdba & 0x00000000ffffffffULL));
   1618 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
   1619 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
   1620 		    adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
   1621 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
   1622 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1623 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
   1624 
   1625 		/* Now enable */
   1626 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
   1627 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1628 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
   1629 	}
   1630 
   1631 	return;
   1632 } /* ixv_initialize_transmit_units */
   1633 
   1634 
   1635 /************************************************************************
   1636  * ixv_initialize_rss_mapping
   1637  ************************************************************************/
   1638 static void
   1639 ixv_initialize_rss_mapping(struct adapter *adapter)
   1640 {
   1641 	struct ixgbe_hw *hw = &adapter->hw;
   1642 	u32             reta = 0, mrqc, rss_key[10];
   1643 	int             queue_id;
   1644 	int             i, j;
   1645 	u32             rss_hash_config;
   1646 
   1647 	/* force use default RSS key. */
   1648 #ifdef __NetBSD__
   1649 	rss_getkey((uint8_t *) &rss_key);
   1650 #else
   1651 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1652 		/* Fetch the configured RSS key */
   1653 		rss_getkey((uint8_t *)&rss_key);
   1654 	} else {
   1655 		/* set up random bits */
   1656 		cprng_fast(&rss_key, sizeof(rss_key));
   1657 	}
   1658 #endif
   1659 
   1660 	/* Now fill out hash function seeds */
   1661 	for (i = 0; i < 10; i++)
   1662 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
   1663 
   1664 	/* Set up the redirection table */
   1665 	for (i = 0, j = 0; i < 64; i++, j++) {
   1666 		if (j == adapter->num_queues)
   1667 			j = 0;
   1668 
   1669 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1670 			/*
   1671 			 * Fetch the RSS bucket id for the given indirection
   1672 			 * entry. Cap it at the number of configured buckets
   1673 			 * (which is num_queues.)
   1674 			 */
   1675 			queue_id = rss_get_indirection_to_bucket(i);
   1676 			queue_id = queue_id % adapter->num_queues;
   1677 		} else
   1678 			queue_id = j;
   1679 
   1680 		/*
   1681 		 * The low 8 bits are for hash value (n+0);
   1682 		 * The next 8 bits are for hash value (n+1), etc.
   1683 		 */
   1684 		reta >>= 8;
   1685 		reta |= ((uint32_t)queue_id) << 24;
   1686 		if ((i & 3) == 3) {
   1687 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
   1688 			reta = 0;
   1689 		}
   1690 	}
   1691 
   1692 	/* Perform hash on these packet types */
   1693 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   1694 		rss_hash_config = rss_gethashconfig();
   1695 	else {
   1696 		/*
   1697 		 * Disable UDP - IP fragments aren't currently being handled
   1698 		 * and so we end up with a mix of 2-tuple and 4-tuple
   1699 		 * traffic.
   1700 		 */
   1701 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
   1702 		                | RSS_HASHTYPE_RSS_TCP_IPV4
   1703 		                | RSS_HASHTYPE_RSS_IPV6
   1704 		                | RSS_HASHTYPE_RSS_TCP_IPV6;
   1705 	}
   1706 
   1707 	mrqc = IXGBE_MRQC_RSSEN;
   1708 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
   1709 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
   1710 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
   1711 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
   1712 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
   1713 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
   1714 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
   1715 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
   1716 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
   1717 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
   1718 		    __func__);
   1719 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
   1720 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
   1721 		    __func__);
   1722 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
   1723 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
   1724 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
   1725 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
   1726 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
   1727 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
   1728 		    __func__);
   1729 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
   1730 } /* ixv_initialize_rss_mapping */
   1731 
   1732 
   1733 /************************************************************************
   1734  * ixv_initialize_receive_units - Setup receive registers and features.
   1735  ************************************************************************/
   1736 static void
   1737 ixv_initialize_receive_units(struct adapter *adapter)
   1738 {
   1739 	struct	rx_ring	*rxr = adapter->rx_rings;
   1740 	struct ixgbe_hw	*hw = &adapter->hw;
   1741 	struct ifnet	*ifp = adapter->ifp;
   1742 	u32		bufsz, rxcsum, psrtype;
   1743 
   1744 	if (ifp->if_mtu > ETHERMTU)
   1745 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1746 	else
   1747 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1748 
   1749 	psrtype = IXGBE_PSRTYPE_TCPHDR
   1750 	        | IXGBE_PSRTYPE_UDPHDR
   1751 	        | IXGBE_PSRTYPE_IPV4HDR
   1752 	        | IXGBE_PSRTYPE_IPV6HDR
   1753 	        | IXGBE_PSRTYPE_L2HDR;
   1754 
   1755 	if (adapter->num_queues > 1)
   1756 		psrtype |= 1 << 29;
   1757 
   1758 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1759 
   1760 	/* Tell PF our max_frame size */
   1761 	if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
   1762 		device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
   1763 	}
   1764 
   1765 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1766 		u64 rdba = rxr->rxdma.dma_paddr;
   1767 		u32 reg, rxdctl;
   1768 		int j = rxr->me;
   1769 
   1770 		/* Disable the queue */
   1771 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
   1772 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1773 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
   1774 		for (int k = 0; k < 10; k++) {
   1775 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
   1776 			    IXGBE_RXDCTL_ENABLE)
   1777 				msec_delay(1);
   1778 			else
   1779 				break;
   1780 		}
   1781 		wmb();
   1782 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1783 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
   1784 		    (rdba & 0x00000000ffffffffULL));
   1785 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
   1786 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
   1787 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1788 
   1789 		/* Reset the ring indices */
   1790 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1791 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1792 
   1793 		/* Set up the SRRCTL register */
   1794 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
   1795 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1796 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1797 		reg |= bufsz;
   1798 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1799 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
   1800 
   1801 		/* Capture Rx Tail index */
   1802 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1803 
   1804 		/* Do the queue enabling last */
   1805 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1806 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
   1807 		for (int k = 0; k < 10; k++) {
   1808 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
   1809 			    IXGBE_RXDCTL_ENABLE)
   1810 				break;
   1811 			msec_delay(1);
   1812 		}
   1813 		wmb();
   1814 
   1815 		/* Set the Tail Pointer */
   1816 #ifdef DEV_NETMAP
   1817 		/*
   1818 		 * In netmap mode, we must preserve the buffers made
   1819 		 * available to userspace before the if_init()
   1820 		 * (this is true by default on the TX side, because
   1821 		 * init makes all buffers available to userspace).
   1822 		 *
   1823 		 * netmap_reset() and the device specific routines
   1824 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1825 		 * buffers at the end of the NIC ring, so here we
   1826 		 * must set the RDT (tail) register to make sure
   1827 		 * they are not overwritten.
   1828 		 *
   1829 		 * In this driver the NIC ring starts at RDH = 0,
   1830 		 * RDT points to the last slot available for reception (?),
   1831 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1832 		 */
   1833 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   1834 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   1835 			struct netmap_adapter *na = NA(adapter->ifp);
   1836 			struct netmap_kring *kring = &na->rx_rings[i];
   1837 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1838 
   1839 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1840 		} else
   1841 #endif /* DEV_NETMAP */
   1842 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1843 			    adapter->num_rx_desc - 1);
   1844 	}
   1845 
   1846 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
   1847 
   1848 	ixv_initialize_rss_mapping(adapter);
   1849 
   1850 	if (adapter->num_queues > 1) {
   1851 		/* RSS and RX IPP Checksum are mutually exclusive */
   1852 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1853 	}
   1854 
   1855 	if (ifp->if_capenable & IFCAP_RXCSUM)
   1856 		rxcsum |= IXGBE_RXCSUM_PCSD;
   1857 
   1858 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
   1859 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
   1860 
   1861 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
   1862 } /* ixv_initialize_receive_units */
   1863 
   1864 /************************************************************************
   1865  * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function
   1866  *
   1867  *   Retrieves the TDH value from the hardware
   1868  ************************************************************************/
   1869 static int
   1870 ixv_sysctl_tdh_handler(SYSCTLFN_ARGS)
   1871 {
   1872 	struct sysctlnode node = *rnode;
   1873 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   1874 	uint32_t val;
   1875 
   1876 	if (!txr)
   1877 		return (0);
   1878 
   1879 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDH(txr->me));
   1880 	node.sysctl_data = &val;
   1881 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   1882 } /* ixv_sysctl_tdh_handler */
   1883 
   1884 /************************************************************************
   1885  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   1886  *
   1887  *   Retrieves the TDT value from the hardware
   1888  ************************************************************************/
   1889 static int
   1890 ixv_sysctl_tdt_handler(SYSCTLFN_ARGS)
   1891 {
   1892 	struct sysctlnode node = *rnode;
   1893 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   1894 	uint32_t val;
   1895 
   1896 	if (!txr)
   1897 		return (0);
   1898 
   1899 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDT(txr->me));
   1900 	node.sysctl_data = &val;
   1901 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   1902 } /* ixv_sysctl_tdt_handler */
   1903 
   1904 /************************************************************************
   1905  * ixv_sysctl_next_to_check_handler - Receive Descriptor next to check
   1906  * handler function
   1907  *
   1908  *   Retrieves the next_to_check value
   1909  ************************************************************************/
   1910 static int
   1911 ixv_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
   1912 {
   1913 	struct sysctlnode node = *rnode;
   1914 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   1915 	uint32_t val;
   1916 
   1917 	if (!rxr)
   1918 		return (0);
   1919 
   1920 	val = rxr->next_to_check;
   1921 	node.sysctl_data = &val;
   1922 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   1923 } /* ixv_sysctl_next_to_check_handler */
   1924 
   1925 /************************************************************************
   1926  * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function
   1927  *
   1928  *   Retrieves the RDH value from the hardware
   1929  ************************************************************************/
   1930 static int
   1931 ixv_sysctl_rdh_handler(SYSCTLFN_ARGS)
   1932 {
   1933 	struct sysctlnode node = *rnode;
   1934 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   1935 	uint32_t val;
   1936 
   1937 	if (!rxr)
   1938 		return (0);
   1939 
   1940 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDH(rxr->me));
   1941 	node.sysctl_data = &val;
   1942 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   1943 } /* ixv_sysctl_rdh_handler */
   1944 
   1945 /************************************************************************
   1946  * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function
   1947  *
   1948  *   Retrieves the RDT value from the hardware
   1949  ************************************************************************/
   1950 static int
   1951 ixv_sysctl_rdt_handler(SYSCTLFN_ARGS)
   1952 {
   1953 	struct sysctlnode node = *rnode;
   1954 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   1955 	uint32_t val;
   1956 
   1957 	if (!rxr)
   1958 		return (0);
   1959 
   1960 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDT(rxr->me));
   1961 	node.sysctl_data = &val;
   1962 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   1963 } /* ixv_sysctl_rdt_handler */
   1964 
   1965 /************************************************************************
   1966  * ixv_setup_vlan_support
   1967  ************************************************************************/
   1968 static void
   1969 ixv_setup_vlan_support(struct adapter *adapter)
   1970 {
   1971 	struct ethercom *ec = &adapter->osdep.ec;
   1972 	struct ixgbe_hw *hw = &adapter->hw;
   1973 	struct rx_ring  *rxr;
   1974 	u32		ctrl, vid, vfta, retry;
   1975 	bool		hwtagging;
   1976 
   1977 	/*
   1978 	 *  This function is called from both if_init and ifflags_cb()
   1979 	 * on NetBSD.
   1980 	 */
   1981 
   1982 	/* Enable HW tagging only if any vlan is attached */
   1983 	hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
   1984 	    && VLAN_ATTACHED(ec);
   1985 
   1986 	/* Enable the queues */
   1987 	for (int i = 0; i < adapter->num_queues; i++) {
   1988 		rxr = &adapter->rx_rings[i];
   1989 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
   1990 		if (hwtagging)
   1991 			ctrl |= IXGBE_RXDCTL_VME;
   1992 		else
   1993 			ctrl &= ~IXGBE_RXDCTL_VME;
   1994 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
   1995 		/*
   1996 		 * Let Rx path know that it needs to store VLAN tag
   1997 		 * as part of extra mbuf info.
   1998 		 */
   1999 		rxr->vtag_strip = hwtagging ? TRUE : FALSE;
   2000 	}
   2001 
   2002 #if 1
   2003 	/* XXX dirty hack. Enable all VIDs */
   2004 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
   2005 	  adapter->shadow_vfta[i] = 0xffffffff;
   2006 #endif
   2007 	/*
   2008 	 * A soft reset zero's out the VFTA, so
   2009 	 * we need to repopulate it now.
   2010 	 */
   2011 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   2012 		if (adapter->shadow_vfta[i] == 0)
   2013 			continue;
   2014 		vfta = adapter->shadow_vfta[i];
   2015 		/*
   2016 		 * Reconstruct the vlan id's
   2017 		 * based on the bits set in each
   2018 		 * of the array ints.
   2019 		 */
   2020 		for (int j = 0; j < 32; j++) {
   2021 			retry = 0;
   2022 			if ((vfta & (1 << j)) == 0)
   2023 				continue;
   2024 			vid = (i * 32) + j;
   2025 			/* Call the shared code mailbox routine */
   2026 			while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
   2027 				if (++retry > 5)
   2028 					break;
   2029 			}
   2030 		}
   2031 	}
   2032 } /* ixv_setup_vlan_support */
   2033 
   2034 #if 0	/* XXX Badly need to overhaul vlan(4) on NetBSD. */
   2035 /************************************************************************
   2036  * ixv_register_vlan
   2037  *
   2038  *   Run via a vlan config EVENT, it enables us to use the
   2039  *   HW Filter table since we can get the vlan id. This just
   2040  *   creates the entry in the soft version of the VFTA, init
   2041  *   will repopulate the real table.
   2042  ************************************************************************/
   2043 static void
   2044 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2045 {
   2046 	struct adapter	*adapter = ifp->if_softc;
   2047 	u16		index, bit;
   2048 
   2049 	if (ifp->if_softc != arg) /* Not our event */
   2050 		return;
   2051 
   2052 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   2053 		return;
   2054 
   2055 	IXGBE_CORE_LOCK(adapter);
   2056 	index = (vtag >> 5) & 0x7F;
   2057 	bit = vtag & 0x1F;
   2058 	adapter->shadow_vfta[index] |= (1 << bit);
   2059 	/* Re-init to load the changes */
   2060 	ixv_init_locked(adapter);
   2061 	IXGBE_CORE_UNLOCK(adapter);
   2062 } /* ixv_register_vlan */
   2063 
   2064 /************************************************************************
   2065  * ixv_unregister_vlan
   2066  *
   2067  *   Run via a vlan unconfig EVENT, remove our entry
   2068  *   in the soft vfta.
   2069  ************************************************************************/
   2070 static void
   2071 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
   2072 {
   2073 	struct adapter	*adapter = ifp->if_softc;
   2074 	u16		index, bit;
   2075 
   2076 	if (ifp->if_softc !=  arg)
   2077 		return;
   2078 
   2079 	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
   2080 		return;
   2081 
   2082 	IXGBE_CORE_LOCK(adapter);
   2083 	index = (vtag >> 5) & 0x7F;
   2084 	bit = vtag & 0x1F;
   2085 	adapter->shadow_vfta[index] &= ~(1 << bit);
   2086 	/* Re-init to load the changes */
   2087 	ixv_init_locked(adapter);
   2088 	IXGBE_CORE_UNLOCK(adapter);
   2089 } /* ixv_unregister_vlan */
   2090 #endif
   2091 
   2092 /************************************************************************
   2093  * ixv_enable_intr
   2094  ************************************************************************/
   2095 static void
   2096 ixv_enable_intr(struct adapter *adapter)
   2097 {
   2098 	struct ixgbe_hw *hw = &adapter->hw;
   2099 	struct ix_queue *que = adapter->queues;
   2100 	u32             mask;
   2101 	int i;
   2102 
   2103 	/* For VTEIAC */
   2104 	mask = (1 << adapter->vector);
   2105 	for (i = 0; i < adapter->num_queues; i++, que++)
   2106 		mask |= (1 << que->msix);
   2107 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   2108 
   2109 	/* For VTEIMS */
   2110 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
   2111 	que = adapter->queues;
   2112 	for (i = 0; i < adapter->num_queues; i++, que++)
   2113 		ixv_enable_queue(adapter, que->msix);
   2114 
   2115 	IXGBE_WRITE_FLUSH(hw);
   2116 } /* ixv_enable_intr */
   2117 
   2118 /************************************************************************
   2119  * ixv_disable_intr
   2120  ************************************************************************/
   2121 static void
   2122 ixv_disable_intr(struct adapter *adapter)
   2123 {
   2124 	struct ix_queue	*que = adapter->queues;
   2125 
   2126 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   2127 
   2128 	/* disable interrupts other than queues */
   2129 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, adapter->vector);
   2130 
   2131 	for (int i = 0; i < adapter->num_queues; i++, que++)
   2132 		ixv_disable_queue(adapter, que->msix);
   2133 
   2134 	IXGBE_WRITE_FLUSH(&adapter->hw);
   2135 } /* ixv_disable_intr */
   2136 
   2137 /************************************************************************
   2138  * ixv_set_ivar
   2139  *
   2140  *   Setup the correct IVAR register for a particular MSI-X interrupt
   2141  *    - entry is the register array entry
   2142  *    - vector is the MSI-X vector for this queue
   2143  *    - type is RX/TX/MISC
   2144  ************************************************************************/
   2145 static void
   2146 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   2147 {
   2148 	struct ixgbe_hw *hw = &adapter->hw;
   2149 	u32             ivar, index;
   2150 
   2151 	vector |= IXGBE_IVAR_ALLOC_VAL;
   2152 
   2153 	if (type == -1) { /* MISC IVAR */
   2154 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   2155 		ivar &= ~0xFF;
   2156 		ivar |= vector;
   2157 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   2158 	} else {          /* RX/TX IVARS */
   2159 		index = (16 * (entry & 1)) + (8 * type);
   2160 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   2161 		ivar &= ~(0xFF << index);
   2162 		ivar |= (vector << index);
   2163 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   2164 	}
   2165 } /* ixv_set_ivar */
   2166 
   2167 /************************************************************************
   2168  * ixv_configure_ivars
   2169  ************************************************************************/
   2170 static void
   2171 ixv_configure_ivars(struct adapter *adapter)
   2172 {
   2173 	struct ix_queue *que = adapter->queues;
   2174 
   2175 	/* XXX We should sync EITR value calculation with ixgbe.c? */
   2176 
   2177 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2178 		/* First the RX queue entry */
   2179 		ixv_set_ivar(adapter, i, que->msix, 0);
   2180 		/* ... and the TX */
   2181 		ixv_set_ivar(adapter, i, que->msix, 1);
   2182 		/* Set an initial value in EITR */
   2183 		ixv_eitr_write(adapter, que->msix, IXGBE_EITR_DEFAULT);
   2184 	}
   2185 
   2186 	/* For the mailbox interrupt */
   2187 	ixv_set_ivar(adapter, 1, adapter->vector, -1);
   2188 } /* ixv_configure_ivars */
   2189 
   2190 
   2191 /************************************************************************
   2192  * ixv_save_stats
   2193  *
   2194  *   The VF stats registers never have a truly virgin
   2195  *   starting point, so this routine tries to make an
   2196  *   artificial one, marking ground zero on attach as
   2197  *   it were.
   2198  ************************************************************************/
   2199 static void
   2200 ixv_save_stats(struct adapter *adapter)
   2201 {
   2202 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2203 
   2204 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   2205 		stats->saved_reset_vfgprc +=
   2206 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   2207 		stats->saved_reset_vfgptc +=
   2208 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   2209 		stats->saved_reset_vfgorc +=
   2210 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   2211 		stats->saved_reset_vfgotc +=
   2212 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   2213 		stats->saved_reset_vfmprc +=
   2214 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   2215 	}
   2216 } /* ixv_save_stats */
   2217 
   2218 /************************************************************************
   2219  * ixv_init_stats
   2220  ************************************************************************/
   2221 static void
   2222 ixv_init_stats(struct adapter *adapter)
   2223 {
   2224 	struct ixgbe_hw *hw = &adapter->hw;
   2225 
   2226 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   2227 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   2228 	adapter->stats.vf.last_vfgorc |=
   2229 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   2230 
   2231 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   2232 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   2233 	adapter->stats.vf.last_vfgotc |=
   2234 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   2235 
   2236 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   2237 
   2238 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   2239 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   2240 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   2241 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   2242 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   2243 } /* ixv_init_stats */
   2244 
   2245 #define UPDATE_STAT_32(reg, last, count)		\
   2246 {                                                       \
   2247 	u32 current = IXGBE_READ_REG(hw, (reg));	\
   2248 	if (current < (last))				\
   2249 		count.ev_count += 0x100000000LL;	\
   2250 	(last) = current;				\
   2251 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   2252 	count.ev_count |= current;			\
   2253 }
   2254 
   2255 #define UPDATE_STAT_36(lsb, msb, last, count)           \
   2256 {                                                       \
   2257 	u64 cur_lsb = IXGBE_READ_REG(hw, (lsb));	\
   2258 	u64 cur_msb = IXGBE_READ_REG(hw, (msb));	\
   2259 	u64 current = ((cur_msb << 32) | cur_lsb);      \
   2260 	if (current < (last))				\
   2261 		count.ev_count += 0x1000000000LL;	\
   2262 	(last) = current;				\
   2263 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   2264 	count.ev_count |= current;			\
   2265 }
   2266 
   2267 /************************************************************************
   2268  * ixv_update_stats - Update the board statistics counters.
   2269  ************************************************************************/
   2270 void
   2271 ixv_update_stats(struct adapter *adapter)
   2272 {
   2273 	struct ixgbe_hw *hw = &adapter->hw;
   2274 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2275 
   2276 	UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
   2277 	UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
   2278 	UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
   2279 	    stats->vfgorc);
   2280 	UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
   2281 	    stats->vfgotc);
   2282 	UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
   2283 
   2284 	/* Fill out the OS statistics structure */
   2285 	/*
   2286 	 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
   2287 	 * adapter->stats counters. It's required to make ifconfig -z
   2288 	 * (SOICZIFDATA) work.
   2289 	 */
   2290 } /* ixv_update_stats */
   2291 
   2292 /************************************************************************
   2293  * ixv_sysctl_interrupt_rate_handler
   2294  ************************************************************************/
   2295 static int
   2296 ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   2297 {
   2298 	struct sysctlnode node = *rnode;
   2299 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   2300 	struct adapter  *adapter = que->adapter;
   2301 	uint32_t reg, usec, rate;
   2302 	int error;
   2303 
   2304 	if (que == NULL)
   2305 		return 0;
   2306 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix));
   2307 	usec = ((reg & 0x0FF8) >> 3);
   2308 	if (usec > 0)
   2309 		rate = 500000 / usec;
   2310 	else
   2311 		rate = 0;
   2312 	node.sysctl_data = &rate;
   2313 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2314 	if (error || newp == NULL)
   2315 		return error;
   2316 	reg &= ~0xfff; /* default, no limitation */
   2317 	if (rate > 0 && rate < 500000) {
   2318 		if (rate < 1000)
   2319 			rate = 1000;
   2320 		reg |= ((4000000/rate) & 0xff8);
   2321 		/*
   2322 		 * When RSC is used, ITR interval must be larger than
   2323 		 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
   2324 		 * The minimum value is always greater than 2us on 100M
   2325 		 * (and 10M?(not documented)), but it's not on 1G and higher.
   2326 		 */
   2327 		if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   2328 		    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   2329 			if ((adapter->num_queues > 1)
   2330 			    && (reg < IXGBE_MIN_RSC_EITR_10G1G))
   2331 				return EINVAL;
   2332 		}
   2333 		ixv_max_interrupt_rate = rate;
   2334 	} else
   2335 		ixv_max_interrupt_rate = 0;
   2336 	ixv_eitr_write(adapter, que->msix, reg);
   2337 
   2338 	return (0);
   2339 } /* ixv_sysctl_interrupt_rate_handler */
   2340 
   2341 const struct sysctlnode *
   2342 ixv_sysctl_instance(struct adapter *adapter)
   2343 {
   2344 	const char *dvname;
   2345 	struct sysctllog **log;
   2346 	int rc;
   2347 	const struct sysctlnode *rnode;
   2348 
   2349 	log = &adapter->sysctllog;
   2350 	dvname = device_xname(adapter->dev);
   2351 
   2352 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2353 	    0, CTLTYPE_NODE, dvname,
   2354 	    SYSCTL_DESCR("ixv information and settings"),
   2355 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2356 		goto err;
   2357 
   2358 	return rnode;
   2359 err:
   2360 	printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2361 	return NULL;
   2362 }
   2363 
   2364 static void
   2365 ixv_add_device_sysctls(struct adapter *adapter)
   2366 {
   2367 	struct sysctllog **log;
   2368 	const struct sysctlnode *rnode, *cnode;
   2369 	device_t dev;
   2370 
   2371 	dev = adapter->dev;
   2372 	log = &adapter->sysctllog;
   2373 
   2374 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2375 		aprint_error_dev(dev, "could not create sysctl root\n");
   2376 		return;
   2377 	}
   2378 
   2379 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2380 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2381 	    "debug", SYSCTL_DESCR("Debug Info"),
   2382 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   2383 		aprint_error_dev(dev, "could not create sysctl\n");
   2384 
   2385 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2386 	    CTLFLAG_READWRITE, CTLTYPE_BOOL,
   2387 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2388 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2389 		aprint_error_dev(dev, "could not create sysctl\n");
   2390 
   2391 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2392 	    CTLFLAG_READWRITE, CTLTYPE_BOOL,
   2393 	    "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   2394 		NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
   2395 		aprint_error_dev(dev, "could not create sysctl\n");
   2396 }
   2397 
   2398 /************************************************************************
   2399  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
   2400  ************************************************************************/
   2401 static void
   2402 ixv_add_stats_sysctls(struct adapter *adapter)
   2403 {
   2404 	device_t                dev = adapter->dev;
   2405 	struct tx_ring          *txr = adapter->tx_rings;
   2406 	struct rx_ring          *rxr = adapter->rx_rings;
   2407 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2408 	struct ixgbe_hw *hw = &adapter->hw;
   2409 	const struct sysctlnode *rnode, *cnode;
   2410 	struct sysctllog **log = &adapter->sysctllog;
   2411 	const char *xname = device_xname(dev);
   2412 
   2413 	/* Driver Statistics */
   2414 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   2415 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   2416 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2417 	    NULL, xname, "m_defrag() failed");
   2418 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   2419 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   2420 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   2421 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   2422 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   2423 	    NULL, xname, "Driver tx dma hard fail other");
   2424 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   2425 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   2426 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   2427 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   2428 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2429 	    NULL, xname, "Watchdog timeouts");
   2430 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   2431 	    NULL, xname, "TSO errors");
   2432 	evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
   2433 	    NULL, xname, "Link MSI-X IRQ Handled");
   2434 
   2435 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2436 		snprintf(adapter->queues[i].evnamebuf,
   2437 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   2438 		    xname, i);
   2439 		snprintf(adapter->queues[i].namebuf,
   2440 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   2441 
   2442 		if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2443 			aprint_error_dev(dev, "could not create sysctl root\n");
   2444 			break;
   2445 		}
   2446 
   2447 		if (sysctl_createv(log, 0, &rnode, &rnode,
   2448 		    0, CTLTYPE_NODE,
   2449 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   2450 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   2451 			break;
   2452 
   2453 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2454 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   2455 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   2456 		    ixv_sysctl_interrupt_rate_handler, 0,
   2457 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   2458 			break;
   2459 
   2460 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2461 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2462 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   2463 		    ixv_sysctl_tdh_handler, 0, (void *)txr,
   2464 		    0, CTL_CREATE, CTL_EOL) != 0)
   2465 			break;
   2466 
   2467 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2468 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2469 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   2470 		    ixv_sysctl_tdt_handler, 0, (void *)txr,
   2471 		    0, CTL_CREATE, CTL_EOL) != 0)
   2472 			break;
   2473 
   2474 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   2475 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   2476 		evcnt_attach_dynamic(&adapter->queues[i].handleq,
   2477 		    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   2478 		    "Handled queue in softint");
   2479 		evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
   2480 		    NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
   2481 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   2482 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   2483 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   2484 		    NULL, adapter->queues[i].evnamebuf,
   2485 		    "Queue No Descriptor Available");
   2486 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   2487 		    NULL, adapter->queues[i].evnamebuf,
   2488 		    "Queue Packets Transmitted");
   2489 #ifndef IXGBE_LEGACY_TX
   2490 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   2491 		    NULL, adapter->queues[i].evnamebuf,
   2492 		    "Packets dropped in pcq");
   2493 #endif
   2494 
   2495 #ifdef LRO
   2496 		struct lro_ctrl *lro = &rxr->lro;
   2497 #endif /* LRO */
   2498 
   2499 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2500 		    CTLFLAG_READONLY,
   2501 		    CTLTYPE_INT,
   2502 		    "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
   2503 			ixv_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
   2504 		    CTL_CREATE, CTL_EOL) != 0)
   2505 			break;
   2506 
   2507 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2508 		    CTLFLAG_READONLY,
   2509 		    CTLTYPE_INT,
   2510 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   2511 		    ixv_sysctl_rdh_handler, 0, (void *)rxr, 0,
   2512 		    CTL_CREATE, CTL_EOL) != 0)
   2513 			break;
   2514 
   2515 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2516 		    CTLFLAG_READONLY,
   2517 		    CTLTYPE_INT,
   2518 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   2519 		    ixv_sysctl_rdt_handler, 0, (void *)rxr, 0,
   2520 		    CTL_CREATE, CTL_EOL) != 0)
   2521 			break;
   2522 
   2523 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   2524 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   2525 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   2526 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   2527 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   2528 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   2529 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   2530 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   2531 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   2532 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   2533 #ifdef LRO
   2534 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   2535 				CTLFLAG_RD, &lro->lro_queued, 0,
   2536 				"LRO Queued");
   2537 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   2538 				CTLFLAG_RD, &lro->lro_flushed, 0,
   2539 				"LRO Flushed");
   2540 #endif /* LRO */
   2541 	}
   2542 
   2543 	/* MAC stats get their own sub node */
   2544 
   2545 	snprintf(stats->namebuf,
   2546 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   2547 
   2548 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   2549 	    stats->namebuf, "rx csum offload - IP");
   2550 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   2551 	    stats->namebuf, "rx csum offload - L4");
   2552 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   2553 	    stats->namebuf, "rx csum offload - IP bad");
   2554 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   2555 	    stats->namebuf, "rx csum offload - L4 bad");
   2556 
   2557 	/* Packet Reception Stats */
   2558 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2559 	    xname, "Good Packets Received");
   2560 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2561 	    xname, "Good Octets Received");
   2562 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2563 	    xname, "Multicast Packets Received");
   2564 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2565 	    xname, "Good Packets Transmitted");
   2566 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2567 	    xname, "Good Octets Transmitted");
   2568 
   2569 	/* Mailbox Stats */
   2570 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
   2571 	    xname, "message TXs");
   2572 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
   2573 	    xname, "message RXs");
   2574 	evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
   2575 	    xname, "ACKs");
   2576 	evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
   2577 	    xname, "REQs");
   2578 	evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
   2579 	    xname, "RSTs");
   2580 
   2581 } /* ixv_add_stats_sysctls */
   2582 
   2583 /************************************************************************
   2584  * ixv_set_sysctl_value
   2585  ************************************************************************/
   2586 static void
   2587 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2588 	const char *description, int *limit, int value)
   2589 {
   2590 	device_t dev =  adapter->dev;
   2591 	struct sysctllog **log;
   2592 	const struct sysctlnode *rnode, *cnode;
   2593 
   2594 	log = &adapter->sysctllog;
   2595 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2596 		aprint_error_dev(dev, "could not create sysctl root\n");
   2597 		return;
   2598 	}
   2599 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2600 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2601 	    name, SYSCTL_DESCR(description),
   2602 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2603 		aprint_error_dev(dev, "could not create sysctl\n");
   2604 	*limit = value;
   2605 } /* ixv_set_sysctl_value */
   2606 
   2607 /************************************************************************
   2608  * ixv_print_debug_info
   2609  *
   2610  *   Called only when em_display_debug_stats is enabled.
   2611  *   Provides a way to take a look at important statistics
   2612  *   maintained by the driver and hardware.
   2613  ************************************************************************/
   2614 static void
   2615 ixv_print_debug_info(struct adapter *adapter)
   2616 {
   2617         device_t        dev = adapter->dev;
   2618         struct ixgbe_hw *hw = &adapter->hw;
   2619         struct ix_queue *que = adapter->queues;
   2620         struct rx_ring  *rxr;
   2621         struct tx_ring  *txr;
   2622 #ifdef LRO
   2623         struct lro_ctrl *lro;
   2624 #endif /* LRO */
   2625 
   2626 	device_printf(dev, "Error Byte Count = %u \n",
   2627 	    IXGBE_READ_REG(hw, IXGBE_ERRBC));
   2628 
   2629 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2630 		txr = que->txr;
   2631 		rxr = que->rxr;
   2632 #ifdef LRO
   2633 		lro = &rxr->lro;
   2634 #endif /* LRO */
   2635 		device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
   2636 		    que->msix, (long)que->irqs.ev_count);
   2637 		device_printf(dev, "RX(%d) Packets Received: %lld\n",
   2638 		    rxr->me, (long long)rxr->rx_packets.ev_count);
   2639 		device_printf(dev, "RX(%d) Bytes Received: %lu\n",
   2640 		    rxr->me, (long)rxr->rx_bytes.ev_count);
   2641 #ifdef LRO
   2642 		device_printf(dev, "RX(%d) LRO Queued= %lld\n",
   2643 		    rxr->me, (long long)lro->lro_queued);
   2644 		device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
   2645 		    rxr->me, (long long)lro->lro_flushed);
   2646 #endif /* LRO */
   2647 		device_printf(dev, "TX(%d) Packets Sent: %lu\n",
   2648 		    txr->me, (long)txr->total_packets.ev_count);
   2649 		device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
   2650 		    txr->me, (long)txr->no_desc_avail.ev_count);
   2651 	}
   2652 
   2653 	device_printf(dev, "MBX IRQ Handled: %lu\n",
   2654 	    (long)adapter->link_irq.ev_count);
   2655 } /* ixv_print_debug_info */
   2656 
   2657 /************************************************************************
   2658  * ixv_sysctl_debug
   2659  ************************************************************************/
   2660 static int
   2661 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2662 {
   2663 	struct sysctlnode node = *rnode;
   2664 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   2665 	int            error, result;
   2666 
   2667 	node.sysctl_data = &result;
   2668 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2669 
   2670 	if (error || newp == NULL)
   2671 		return error;
   2672 
   2673 	if (result == 1)
   2674 		ixv_print_debug_info(adapter);
   2675 
   2676 	return 0;
   2677 } /* ixv_sysctl_debug */
   2678 
   2679 /************************************************************************
   2680  * ixv_init_device_features
   2681  ************************************************************************/
   2682 static void
   2683 ixv_init_device_features(struct adapter *adapter)
   2684 {
   2685 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   2686 	                  | IXGBE_FEATURE_VF
   2687 	                  | IXGBE_FEATURE_RSS
   2688 	                  | IXGBE_FEATURE_LEGACY_TX;
   2689 
   2690 	/* A tad short on feature flags for VFs, atm. */
   2691 	switch (adapter->hw.mac.type) {
   2692 	case ixgbe_mac_82599_vf:
   2693 		break;
   2694 	case ixgbe_mac_X540_vf:
   2695 		break;
   2696 	case ixgbe_mac_X550_vf:
   2697 	case ixgbe_mac_X550EM_x_vf:
   2698 	case ixgbe_mac_X550EM_a_vf:
   2699 		adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
   2700 		break;
   2701 	default:
   2702 		break;
   2703 	}
   2704 
   2705 	/* Enabled by default... */
   2706 	/* Is a virtual function (VF) */
   2707 	if (adapter->feat_cap & IXGBE_FEATURE_VF)
   2708 		adapter->feat_en |= IXGBE_FEATURE_VF;
   2709 	/* Netmap */
   2710 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   2711 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   2712 	/* Receive-Side Scaling (RSS) */
   2713 	if (adapter->feat_cap & IXGBE_FEATURE_RSS)
   2714 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   2715 	/* Needs advanced context descriptor regardless of offloads req'd */
   2716 	if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
   2717 		adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
   2718 
   2719 	/* Enabled via sysctl... */
   2720 	/* Legacy (single queue) transmit */
   2721 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   2722 	    ixv_enable_legacy_tx)
   2723 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   2724 } /* ixv_init_device_features */
   2725 
   2726 /************************************************************************
   2727  * ixv_shutdown - Shutdown entry point
   2728  ************************************************************************/
   2729 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   2730 static int
   2731 ixv_shutdown(device_t dev)
   2732 {
   2733 	struct adapter *adapter = device_private(dev);
   2734 	IXGBE_CORE_LOCK(adapter);
   2735 	ixv_stop(adapter);
   2736 	IXGBE_CORE_UNLOCK(adapter);
   2737 
   2738 	return (0);
   2739 } /* ixv_shutdown */
   2740 #endif
   2741 
   2742 static int
   2743 ixv_ifflags_cb(struct ethercom *ec)
   2744 {
   2745 	struct ifnet *ifp = &ec->ec_if;
   2746 	struct adapter *adapter = ifp->if_softc;
   2747 	int change, rc = 0;
   2748 
   2749 	IXGBE_CORE_LOCK(adapter);
   2750 
   2751 	change = ifp->if_flags ^ adapter->if_flags;
   2752 	if (change != 0)
   2753 		adapter->if_flags = ifp->if_flags;
   2754 
   2755 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
   2756 		rc = ENETRESET;
   2757 
   2758 	/* Set up VLAN support and filter */
   2759 	ixv_setup_vlan_support(adapter);
   2760 
   2761 	IXGBE_CORE_UNLOCK(adapter);
   2762 
   2763 	return rc;
   2764 }
   2765 
   2766 
   2767 /************************************************************************
   2768  * ixv_ioctl - Ioctl entry point
   2769  *
   2770  *   Called when the user wants to configure the interface.
   2771  *
   2772  *   return 0 on success, positive on failure
   2773  ************************************************************************/
   2774 static int
   2775 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
   2776 {
   2777 	struct adapter	*adapter = ifp->if_softc;
   2778 	struct ifcapreq *ifcr = data;
   2779 	struct ifreq	*ifr = data;
   2780 	int             error = 0;
   2781 	int l4csum_en;
   2782 	const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
   2783 	     IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2784 
   2785 	switch (command) {
   2786 	case SIOCSIFFLAGS:
   2787 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   2788 		break;
   2789 	case SIOCADDMULTI:
   2790 	case SIOCDELMULTI:
   2791 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   2792 		break;
   2793 	case SIOCSIFMEDIA:
   2794 	case SIOCGIFMEDIA:
   2795 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   2796 		break;
   2797 	case SIOCSIFCAP:
   2798 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   2799 		break;
   2800 	case SIOCSIFMTU:
   2801 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   2802 		break;
   2803 	default:
   2804 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   2805 		break;
   2806 	}
   2807 
   2808 	switch (command) {
   2809 	case SIOCSIFMEDIA:
   2810 	case SIOCGIFMEDIA:
   2811 		return ifmedia_ioctl(ifp, ifr, &adapter->media, command);
   2812 	case SIOCSIFCAP:
   2813 		/* Layer-4 Rx checksum offload has to be turned on and
   2814 		 * off as a unit.
   2815 		 */
   2816 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   2817 		if (l4csum_en != l4csum && l4csum_en != 0)
   2818 			return EINVAL;
   2819 		/*FALLTHROUGH*/
   2820 	case SIOCADDMULTI:
   2821 	case SIOCDELMULTI:
   2822 	case SIOCSIFFLAGS:
   2823 	case SIOCSIFMTU:
   2824 	default:
   2825 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   2826 			return error;
   2827 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   2828 			;
   2829 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   2830 			IXGBE_CORE_LOCK(adapter);
   2831 			ixv_init_locked(adapter);
   2832 			IXGBE_CORE_UNLOCK(adapter);
   2833 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   2834 			/*
   2835 			 * Multicast list has changed; set the hardware filter
   2836 			 * accordingly.
   2837 			 */
   2838 			IXGBE_CORE_LOCK(adapter);
   2839 			ixv_disable_intr(adapter);
   2840 			ixv_set_multi(adapter);
   2841 			ixv_enable_intr(adapter);
   2842 			IXGBE_CORE_UNLOCK(adapter);
   2843 		}
   2844 		return 0;
   2845 	}
   2846 } /* ixv_ioctl */
   2847 
   2848 /************************************************************************
   2849  * ixv_init
   2850  ************************************************************************/
   2851 static int
   2852 ixv_init(struct ifnet *ifp)
   2853 {
   2854 	struct adapter *adapter = ifp->if_softc;
   2855 
   2856 	IXGBE_CORE_LOCK(adapter);
   2857 	ixv_init_locked(adapter);
   2858 	IXGBE_CORE_UNLOCK(adapter);
   2859 
   2860 	return 0;
   2861 } /* ixv_init */
   2862 
   2863 /************************************************************************
   2864  * ixv_handle_que
   2865  ************************************************************************/
   2866 static void
   2867 ixv_handle_que(void *context)
   2868 {
   2869 	struct ix_queue *que = context;
   2870 	struct adapter  *adapter = que->adapter;
   2871 	struct tx_ring	*txr = que->txr;
   2872 	struct ifnet    *ifp = adapter->ifp;
   2873 	bool		more;
   2874 
   2875 	que->handleq.ev_count++;
   2876 
   2877 	if (ifp->if_flags & IFF_RUNNING) {
   2878 		more = ixgbe_rxeof(que);
   2879 		IXGBE_TX_LOCK(txr);
   2880 		more |= ixgbe_txeof(txr);
   2881 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   2882 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   2883 				ixgbe_mq_start_locked(ifp, txr);
   2884 		/* Only for queue 0 */
   2885 		/* NetBSD still needs this for CBQ */
   2886 		if ((&adapter->queues[0] == que)
   2887 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   2888 			ixgbe_legacy_start_locked(ifp, txr);
   2889 		IXGBE_TX_UNLOCK(txr);
   2890 		if (more) {
   2891 			que->req.ev_count++;
   2892 			if (adapter->txrx_use_workqueue) {
   2893 				/*
   2894 				 * "enqueued flag" is not required here
   2895 				 * the same as ixg(4). See ixgbe_msix_que().
   2896 				 */
   2897 				workqueue_enqueue(adapter->que_wq,
   2898 				    &que->wq_cookie, curcpu());
   2899 			} else
   2900 				  softint_schedule(que->que_si);
   2901 			return;
   2902 		}
   2903 	}
   2904 
   2905 	/* Re-enable this interrupt */
   2906 	ixv_enable_queue(adapter, que->msix);
   2907 
   2908 	return;
   2909 } /* ixv_handle_que */
   2910 
   2911 /************************************************************************
   2912  * ixv_handle_que_work
   2913  ************************************************************************/
   2914 static void
   2915 ixv_handle_que_work(struct work *wk, void *context)
   2916 {
   2917 	struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
   2918 
   2919 	/*
   2920 	 * "enqueued flag" is not required here the same as ixg(4).
   2921 	 * See ixgbe_msix_que().
   2922 	 */
   2923 	ixv_handle_que(que);
   2924 }
   2925 
   2926 /************************************************************************
   2927  * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
   2928  ************************************************************************/
   2929 static int
   2930 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   2931 {
   2932 	device_t	dev = adapter->dev;
   2933 	struct ix_queue *que = adapter->queues;
   2934 	struct		tx_ring *txr = adapter->tx_rings;
   2935 	int 		error, msix_ctrl, rid, vector = 0;
   2936 	pci_chipset_tag_t pc;
   2937 	pcitag_t	tag;
   2938 	char		intrbuf[PCI_INTRSTR_LEN];
   2939 	char		wqname[MAXCOMLEN];
   2940 	char		intr_xname[32];
   2941 	const char	*intrstr = NULL;
   2942 	kcpuset_t	*affinity;
   2943 	int		cpu_id = 0;
   2944 
   2945 	pc = adapter->osdep.pc;
   2946 	tag = adapter->osdep.tag;
   2947 
   2948 	adapter->osdep.nintrs = adapter->num_queues + 1;
   2949 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   2950 	    adapter->osdep.nintrs) != 0) {
   2951 		aprint_error_dev(dev,
   2952 		    "failed to allocate MSI-X interrupt\n");
   2953 		return (ENXIO);
   2954 	}
   2955 
   2956 	kcpuset_create(&affinity, false);
   2957 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   2958 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   2959 		    device_xname(dev), i);
   2960 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   2961 		    sizeof(intrbuf));
   2962 #ifdef IXGBE_MPSAFE
   2963 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   2964 		    true);
   2965 #endif
   2966 		/* Set the handler function */
   2967 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   2968 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   2969 		    intr_xname);
   2970 		if (que->res == NULL) {
   2971 			pci_intr_release(pc, adapter->osdep.intrs,
   2972 			    adapter->osdep.nintrs);
   2973 			aprint_error_dev(dev,
   2974 			    "Failed to register QUE handler\n");
   2975 			kcpuset_destroy(affinity);
   2976 			return (ENXIO);
   2977 		}
   2978 		que->msix = vector;
   2979         	adapter->active_queues |= (u64)(1 << que->msix);
   2980 
   2981 		cpu_id = i;
   2982 		/* Round-robin affinity */
   2983 		kcpuset_zero(affinity);
   2984 		kcpuset_set(affinity, cpu_id % ncpu);
   2985 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   2986 		    NULL);
   2987 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   2988 		    intrstr);
   2989 		if (error == 0)
   2990 			aprint_normal(", bound queue %d to cpu %d\n",
   2991 			    i, cpu_id % ncpu);
   2992 		else
   2993 			aprint_normal("\n");
   2994 
   2995 #ifndef IXGBE_LEGACY_TX
   2996 		txr->txr_si
   2997 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   2998 			ixgbe_deferred_mq_start, txr);
   2999 #endif
   3000 		que->que_si
   3001 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
   3002 			ixv_handle_que, que);
   3003 		if (que->que_si == NULL) {
   3004 			aprint_error_dev(dev,
   3005 			    "could not establish software interrupt\n");
   3006 		}
   3007 	}
   3008 	snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
   3009 	error = workqueue_create(&adapter->txr_wq, wqname,
   3010 	    ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   3011 	    IXGBE_WORKQUEUE_FLAGS);
   3012 	if (error) {
   3013 		aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
   3014 	}
   3015 	adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
   3016 
   3017 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
   3018 	error = workqueue_create(&adapter->que_wq, wqname,
   3019 	    ixv_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   3020 	    IXGBE_WORKQUEUE_FLAGS);
   3021 	if (error) {
   3022 		aprint_error_dev(dev,
   3023 		    "couldn't create workqueue\n");
   3024 	}
   3025 
   3026 	/* and Mailbox */
   3027 	cpu_id++;
   3028 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   3029 	adapter->vector = vector;
   3030 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   3031 	    sizeof(intrbuf));
   3032 #ifdef IXGBE_MPSAFE
   3033 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   3034 	    true);
   3035 #endif
   3036 	/* Set the mbx handler function */
   3037 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   3038 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   3039 	    intr_xname);
   3040 	if (adapter->osdep.ihs[vector] == NULL) {
   3041 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   3042 		kcpuset_destroy(affinity);
   3043 		return (ENXIO);
   3044 	}
   3045 	/* Round-robin affinity */
   3046 	kcpuset_zero(affinity);
   3047 	kcpuset_set(affinity, cpu_id % ncpu);
   3048 	error = interrupt_distribute(adapter->osdep.ihs[vector],
   3049 	    affinity, NULL);
   3050 
   3051 	aprint_normal_dev(dev,
   3052 	    "for link, interrupting at %s", intrstr);
   3053 	if (error == 0)
   3054 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   3055 	else
   3056 		aprint_normal("\n");
   3057 
   3058 	/* Tasklets for Mailbox */
   3059 	adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
   3060 	    ixv_handle_link, adapter);
   3061 	/*
   3062 	 * Due to a broken design QEMU will fail to properly
   3063 	 * enable the guest for MSI-X unless the vectors in
   3064 	 * the table are all set up, so we must rewrite the
   3065 	 * ENABLE in the MSI-X control register again at this
   3066 	 * point to cause it to successfully initialize us.
   3067 	 */
   3068 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   3069 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   3070 		rid += PCI_MSIX_CTL;
   3071 		msix_ctrl = pci_conf_read(pc, tag, rid);
   3072 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   3073 		pci_conf_write(pc, tag, rid, msix_ctrl);
   3074 	}
   3075 
   3076 	kcpuset_destroy(affinity);
   3077 	return (0);
   3078 } /* ixv_allocate_msix */
   3079 
   3080 /************************************************************************
   3081  * ixv_configure_interrupts - Setup MSI-X resources
   3082  *
   3083  *   Note: The VF device MUST use MSI-X, there is no fallback.
   3084  ************************************************************************/
   3085 static int
   3086 ixv_configure_interrupts(struct adapter *adapter)
   3087 {
   3088 	device_t dev = adapter->dev;
   3089 	int want, queues, msgs;
   3090 
   3091 	/* Must have at least 2 MSI-X vectors */
   3092 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   3093 	if (msgs < 2) {
   3094 		aprint_error_dev(dev, "MSIX config error\n");
   3095 		return (ENXIO);
   3096 	}
   3097 	msgs = MIN(msgs, IXG_MAX_NINTR);
   3098 
   3099 	/* Figure out a reasonable auto config value */
   3100 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   3101 
   3102 	if (ixv_num_queues != 0)
   3103 		queues = ixv_num_queues;
   3104 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   3105 		queues = IXGBE_VF_MAX_TX_QUEUES;
   3106 
   3107 	/*
   3108 	 * Want vectors for the queues,
   3109 	 * plus an additional for mailbox.
   3110 	 */
   3111 	want = queues + 1;
   3112 	if (msgs >= want)
   3113 		msgs = want;
   3114 	else {
   3115                	aprint_error_dev(dev,
   3116 		    "MSI-X Configuration Problem, "
   3117 		    "%d vectors but %d queues wanted!\n",
   3118 		    msgs, want);
   3119 		return -1;
   3120 	}
   3121 
   3122 	adapter->msix_mem = (void *)1; /* XXX */
   3123 	aprint_normal_dev(dev,
   3124 	    "Using MSI-X interrupts with %d vectors\n", msgs);
   3125 	adapter->num_queues = queues;
   3126 
   3127 	return (0);
   3128 } /* ixv_configure_interrupts */
   3129 
   3130 
   3131 /************************************************************************
   3132  * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
   3133  *
   3134  *   Done outside of interrupt context since the driver might sleep
   3135  ************************************************************************/
   3136 static void
   3137 ixv_handle_link(void *context)
   3138 {
   3139 	struct adapter *adapter = context;
   3140 
   3141 	IXGBE_CORE_LOCK(adapter);
   3142 
   3143 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   3144 	    &adapter->link_up, FALSE);
   3145 	ixv_update_link_status(adapter);
   3146 
   3147 	IXGBE_CORE_UNLOCK(adapter);
   3148 } /* ixv_handle_link */
   3149 
   3150 /************************************************************************
   3151  * ixv_check_link - Used in the local timer to poll for link changes
   3152  ************************************************************************/
   3153 static void
   3154 ixv_check_link(struct adapter *adapter)
   3155 {
   3156 
   3157 	KASSERT(mutex_owned(&adapter->core_mtx));
   3158 
   3159 	adapter->hw.mac.get_link_status = TRUE;
   3160 
   3161 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   3162 	    &adapter->link_up, FALSE);
   3163 	ixv_update_link_status(adapter);
   3164 } /* ixv_check_link */
   3165