Home | History | Annotate | Line # | Download | only in ixgbe
ixv.c revision 1.152
      1 /*$NetBSD: ixv.c,v 1.152 2020/08/13 08:38:50 msaitoh Exp $*/
      2 
      3 /******************************************************************************
      4 
      5   Copyright (c) 2001-2017, Intel Corporation
      6   All rights reserved.
      7 
      8   Redistribution and use in source and binary forms, with or without
      9   modification, are permitted provided that the following conditions are met:
     10 
     11    1. Redistributions of source code must retain the above copyright notice,
     12       this list of conditions and the following disclaimer.
     13 
     14    2. Redistributions in binary form must reproduce the above copyright
     15       notice, this list of conditions and the following disclaimer in the
     16       documentation and/or other materials provided with the distribution.
     17 
     18    3. Neither the name of the Intel Corporation nor the names of its
     19       contributors may be used to endorse or promote products derived from
     20       this software without specific prior written permission.
     21 
     22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32   POSSIBILITY OF SUCH DAMAGE.
     33 
     34 ******************************************************************************/
     35 /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/
     36 
     37 #ifdef _KERNEL_OPT
     38 #include "opt_inet.h"
     39 #include "opt_inet6.h"
     40 #include "opt_net_mpsafe.h"
     41 #endif
     42 
     43 #include "ixgbe.h"
     44 #include "vlan.h"
     45 
     46 /************************************************************************
     47  * Driver version
     48  ************************************************************************/
     49 static const char ixv_driver_version[] = "2.0.1-k";
     50 /* XXX NetBSD: + 1.5.17 */
     51 
     52 /************************************************************************
     53  * PCI Device ID Table
     54  *
     55  *   Used by probe to select devices to load on
     56  *   Last field stores an index into ixv_strings
     57  *   Last entry must be all 0s
     58  *
     59  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
     60  ************************************************************************/
     61 static const ixgbe_vendor_info_t ixv_vendor_info_array[] =
     62 {
     63 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
     64 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
     65 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
     66 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
     67 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
     68 	/* required last entry */
     69 	{0, 0, 0, 0, 0}
     70 };
     71 
     72 /************************************************************************
     73  * Table of branding strings
     74  ************************************************************************/
     75 static const char *ixv_strings[] = {
     76 	"Intel(R) PRO/10GbE Virtual Function Network Driver"
     77 };
     78 
     79 /*********************************************************************
     80  *  Function prototypes
     81  *********************************************************************/
     82 static int	ixv_probe(device_t, cfdata_t, void *);
     83 static void	ixv_attach(device_t, device_t, void *);
     84 static int	ixv_detach(device_t, int);
     85 #if 0
     86 static int	ixv_shutdown(device_t);
     87 #endif
     88 static int	ixv_ifflags_cb(struct ethercom *);
     89 static int	ixv_ioctl(struct ifnet *, u_long, void *);
     90 static int	ixv_init(struct ifnet *);
     91 static void	ixv_init_locked(struct adapter *);
     92 static void	ixv_ifstop(struct ifnet *, int);
     93 static void	ixv_stop(void *);
     94 static void	ixv_init_device_features(struct adapter *);
     95 static void	ixv_media_status(struct ifnet *, struct ifmediareq *);
     96 static int	ixv_media_change(struct ifnet *);
     97 static int	ixv_allocate_pci_resources(struct adapter *,
     98 		    const struct pci_attach_args *);
     99 static void	ixv_free_workqueue(struct adapter *);
    100 static int	ixv_allocate_msix(struct adapter *,
    101 		    const struct pci_attach_args *);
    102 static int	ixv_configure_interrupts(struct adapter *);
    103 static void	ixv_free_pci_resources(struct adapter *);
    104 static void	ixv_local_timer(void *);
    105 static void	ixv_handle_timer(struct work *, void *);
    106 static int	ixv_setup_interface(device_t, struct adapter *);
    107 static void	ixv_schedule_admin_tasklet(struct adapter *);
    108 static int	ixv_negotiate_api(struct adapter *);
    109 
    110 static void	ixv_initialize_transmit_units(struct adapter *);
    111 static void	ixv_initialize_receive_units(struct adapter *);
    112 static void	ixv_initialize_rss_mapping(struct adapter *);
    113 static s32	ixv_check_link(struct adapter *);
    114 
    115 static void	ixv_enable_intr(struct adapter *);
    116 static void	ixv_disable_intr(struct adapter *);
    117 static int	ixv_set_rxfilter(struct adapter *);
    118 static void	ixv_update_link_status(struct adapter *);
    119 static int	ixv_sysctl_debug(SYSCTLFN_PROTO);
    120 static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
    121 static void	ixv_configure_ivars(struct adapter *);
    122 static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
    123 static void	ixv_eitr_write(struct adapter *, uint32_t, uint32_t);
    124 
    125 static void	ixv_setup_vlan_tagging(struct adapter *);
    126 static int	ixv_setup_vlan_support(struct adapter *);
    127 static int	ixv_vlan_cb(struct ethercom *, uint16_t, bool);
    128 static int	ixv_register_vlan(struct adapter *, u16);
    129 static int	ixv_unregister_vlan(struct adapter *, u16);
    130 
    131 static void	ixv_add_device_sysctls(struct adapter *);
    132 static void	ixv_save_stats(struct adapter *);
    133 static void	ixv_init_stats(struct adapter *);
    134 static void	ixv_update_stats(struct adapter *);
    135 static void	ixv_add_stats_sysctls(struct adapter *);
    136 static void	ixv_clear_evcnt(struct adapter *);
    137 
    138 /* Sysctl handlers */
    139 static void	ixv_set_sysctl_value(struct adapter *, const char *,
    140 		    const char *, int *, int);
    141 static int	ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
    142 static int	ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
    143 static int	ixv_sysctl_rdh_handler(SYSCTLFN_PROTO);
    144 static int	ixv_sysctl_rdt_handler(SYSCTLFN_PROTO);
    145 static int	ixv_sysctl_tdt_handler(SYSCTLFN_PROTO);
    146 static int	ixv_sysctl_tdh_handler(SYSCTLFN_PROTO);
    147 
    148 /* The MSI-X Interrupt handlers */
    149 static int	ixv_msix_que(void *);
    150 static int	ixv_msix_mbx(void *);
    151 
    152 /* Event handlers running on workqueue */
    153 static void	ixv_handle_que(void *);
    154 
    155 /* Deferred workqueue handlers */
    156 static void	ixv_handle_admin(struct work *, void *);
    157 static void	ixv_handle_que_work(struct work *, void *);
    158 
    159 const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
    160 static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
    161 
    162 /************************************************************************
    163  * NetBSD Device Interface Entry Points
    164  ************************************************************************/
    165 CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
    166     ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
    167     DVF_DETACH_SHUTDOWN);
    168 
    169 #if 0
    170 static driver_t ixv_driver = {
    171 	"ixv", ixv_methods, sizeof(struct adapter),
    172 };
    173 
    174 devclass_t ixv_devclass;
    175 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
    176 MODULE_DEPEND(ixv, pci, 1, 1, 1);
    177 MODULE_DEPEND(ixv, ether, 1, 1, 1);
    178 #endif
    179 
    180 /*
    181  * TUNEABLE PARAMETERS:
    182  */
    183 
    184 /* Number of Queues - do not exceed MSI-X vectors - 1 */
    185 static int ixv_num_queues = 0;
    186 #define	TUNABLE_INT(__x, __y)
    187 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
    188 
    189 /*
    190  * AIM: Adaptive Interrupt Moderation
    191  * which means that the interrupt rate
    192  * is varied over time based on the
    193  * traffic for that interrupt vector
    194  */
    195 static bool ixv_enable_aim = false;
    196 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
    197 
    198 static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
    199 TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate);
    200 
    201 /* How many packets rxeof tries to clean at a time */
    202 static int ixv_rx_process_limit = 256;
    203 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
    204 
    205 /* How many packets txeof tries to clean at a time */
    206 static int ixv_tx_process_limit = 256;
    207 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
    208 
    209 /* Which packet processing uses workqueue or softint */
    210 static bool ixv_txrx_workqueue = false;
    211 
    212 /*
    213  * Number of TX descriptors per ring,
    214  * setting higher than RX as this seems
    215  * the better performing choice.
    216  */
    217 static int ixv_txd = PERFORM_TXD;
    218 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
    219 
    220 /* Number of RX descriptors per ring */
    221 static int ixv_rxd = PERFORM_RXD;
    222 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
    223 
    224 /* Legacy Transmit (single queue) */
    225 static int ixv_enable_legacy_tx = 0;
    226 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
    227 
    228 #ifdef NET_MPSAFE
    229 #define IXGBE_MPSAFE		1
    230 #define IXGBE_CALLOUT_FLAGS	CALLOUT_MPSAFE
    231 #define IXGBE_SOFTINT_FLAGS	SOFTINT_MPSAFE
    232 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    233 #define IXGBE_TASKLET_WQ_FLAGS	WQ_MPSAFE
    234 #else
    235 #define IXGBE_CALLOUT_FLAGS	0
    236 #define IXGBE_SOFTINT_FLAGS	0
    237 #define IXGBE_WORKQUEUE_FLAGS	WQ_PERCPU
    238 #define IXGBE_TASKLET_WQ_FLAGS	0
    239 #endif
    240 #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
    241 
    242 #if 0
    243 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
    244 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
    245 #endif
    246 
    247 /************************************************************************
    248  * ixv_probe - Device identification routine
    249  *
    250  *   Determines if the driver should be loaded on
    251  *   adapter based on its PCI vendor/device ID.
    252  *
    253  *   return BUS_PROBE_DEFAULT on success, positive on failure
    254  ************************************************************************/
    255 static int
    256 ixv_probe(device_t dev, cfdata_t cf, void *aux)
    257 {
    258 #ifdef __HAVE_PCI_MSI_MSIX
    259 	const struct pci_attach_args *pa = aux;
    260 
    261 	return (ixv_lookup(pa) != NULL) ? 1 : 0;
    262 #else
    263 	return 0;
    264 #endif
    265 } /* ixv_probe */
    266 
    267 static const ixgbe_vendor_info_t *
    268 ixv_lookup(const struct pci_attach_args *pa)
    269 {
    270 	const ixgbe_vendor_info_t *ent;
    271 	pcireg_t subid;
    272 
    273 	INIT_DEBUGOUT("ixv_lookup: begin");
    274 
    275 	if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
    276 		return NULL;
    277 
    278 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    279 
    280 	for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
    281 		if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
    282 		    (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
    283 		    ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
    284 		     (ent->subvendor_id == 0)) &&
    285 		    ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
    286 		     (ent->subdevice_id == 0))) {
    287 			return ent;
    288 		}
    289 	}
    290 
    291 	return NULL;
    292 }
    293 
    294 /************************************************************************
    295  * ixv_attach - Device initialization routine
    296  *
    297  *   Called when the driver is being loaded.
    298  *   Identifies the type of hardware, allocates all resources
    299  *   and initializes the hardware.
    300  *
    301  *   return 0 on success, positive on failure
    302  ************************************************************************/
    303 static void
    304 ixv_attach(device_t parent, device_t dev, void *aux)
    305 {
    306 	struct adapter *adapter;
    307 	struct ixgbe_hw *hw;
    308 	int		error = 0;
    309 	pcireg_t	id, subid;
    310 	const ixgbe_vendor_info_t *ent;
    311 	const struct pci_attach_args *pa = aux;
    312 	const char *apivstr;
    313 	const char *str;
    314 	char wqname[MAXCOMLEN];
    315 	char buf[256];
    316 
    317 	INIT_DEBUGOUT("ixv_attach: begin");
    318 
    319 	/*
    320 	 * Make sure BUSMASTER is set, on a VM under
    321 	 * KVM it may not be and will break things.
    322 	 */
    323 	ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
    324 
    325 	/* Allocate, clear, and link in our adapter structure */
    326 	adapter = device_private(dev);
    327 	adapter->hw.back = adapter;
    328 	adapter->dev = dev;
    329 	hw = &adapter->hw;
    330 
    331 	adapter->init_locked = ixv_init_locked;
    332 	adapter->stop_locked = ixv_stop;
    333 
    334 	adapter->osdep.pc = pa->pa_pc;
    335 	adapter->osdep.tag = pa->pa_tag;
    336 	if (pci_dma64_available(pa))
    337 		adapter->osdep.dmat = pa->pa_dmat64;
    338 	else
    339 		adapter->osdep.dmat = pa->pa_dmat;
    340 	adapter->osdep.attached = false;
    341 
    342 	ent = ixv_lookup(pa);
    343 
    344 	KASSERT(ent != NULL);
    345 
    346 	aprint_normal(": %s, Version - %s\n",
    347 	    ixv_strings[ent->index], ixv_driver_version);
    348 
    349 	/* Core Lock Init */
    350 	IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
    351 
    352 	/* Do base PCI setup - map BAR0 */
    353 	if (ixv_allocate_pci_resources(adapter, pa)) {
    354 		aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
    355 		error = ENXIO;
    356 		goto err_out;
    357 	}
    358 
    359 	/* SYSCTL APIs */
    360 	ixv_add_device_sysctls(adapter);
    361 
    362 	/* Set up the timer callout and workqueue */
    363 	callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
    364 	snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
    365 	error = workqueue_create(&adapter->timer_wq, wqname,
    366 	    ixv_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
    367 	    IXGBE_TASKLET_WQ_FLAGS);
    368 	if (error) {
    369 		aprint_error_dev(dev,
    370 		    "could not create timer workqueue (%d)\n", error);
    371 		goto err_out;
    372 	}
    373 
    374 	/* Save off the information about this board */
    375 	id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
    376 	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
    377 	hw->vendor_id = PCI_VENDOR(id);
    378 	hw->device_id = PCI_PRODUCT(id);
    379 	hw->revision_id =
    380 	    PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
    381 	hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
    382 	hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
    383 
    384 	/* A subset of set_mac_type */
    385 	switch (hw->device_id) {
    386 	case IXGBE_DEV_ID_82599_VF:
    387 		hw->mac.type = ixgbe_mac_82599_vf;
    388 		str = "82599 VF";
    389 		break;
    390 	case IXGBE_DEV_ID_X540_VF:
    391 		hw->mac.type = ixgbe_mac_X540_vf;
    392 		str = "X540 VF";
    393 		break;
    394 	case IXGBE_DEV_ID_X550_VF:
    395 		hw->mac.type = ixgbe_mac_X550_vf;
    396 		str = "X550 VF";
    397 		break;
    398 	case IXGBE_DEV_ID_X550EM_X_VF:
    399 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
    400 		str = "X550EM X VF";
    401 		break;
    402 	case IXGBE_DEV_ID_X550EM_A_VF:
    403 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
    404 		str = "X550EM A VF";
    405 		break;
    406 	default:
    407 		/* Shouldn't get here since probe succeeded */
    408 		aprint_error_dev(dev, "Unknown device ID!\n");
    409 		error = ENXIO;
    410 		goto err_out;
    411 		break;
    412 	}
    413 	aprint_normal_dev(dev, "device %s\n", str);
    414 
    415 	ixv_init_device_features(adapter);
    416 
    417 	/* Initialize the shared code */
    418 	error = ixgbe_init_ops_vf(hw);
    419 	if (error) {
    420 		aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
    421 		error = EIO;
    422 		goto err_out;
    423 	}
    424 
    425 	/* Setup the mailbox */
    426 	ixgbe_init_mbx_params_vf(hw);
    427 
    428 	/* Set the right number of segments */
    429 	adapter->num_segs = IXGBE_82599_SCATTER;
    430 
    431 	/* Reset mbox api to 1.0 */
    432 	error = hw->mac.ops.reset_hw(hw);
    433 	if (error == IXGBE_ERR_RESET_FAILED)
    434 		aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
    435 	else if (error)
    436 		aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
    437 		    error);
    438 	if (error) {
    439 		error = EIO;
    440 		goto err_out;
    441 	}
    442 
    443 	error = hw->mac.ops.init_hw(hw);
    444 	if (error) {
    445 		aprint_error_dev(dev, "...init_hw() failed!\n");
    446 		error = EIO;
    447 		goto err_out;
    448 	}
    449 
    450 	/* Negotiate mailbox API version */
    451 	error = ixv_negotiate_api(adapter);
    452 	if (error)
    453 		aprint_normal_dev(dev,
    454 		    "MBX API negotiation failed during attach!\n");
    455 	switch (hw->api_version) {
    456 	case ixgbe_mbox_api_10:
    457 		apivstr = "1.0";
    458 		break;
    459 	case ixgbe_mbox_api_20:
    460 		apivstr = "2.0";
    461 		break;
    462 	case ixgbe_mbox_api_11:
    463 		apivstr = "1.1";
    464 		break;
    465 	case ixgbe_mbox_api_12:
    466 		apivstr = "1.2";
    467 		break;
    468 	case ixgbe_mbox_api_13:
    469 		apivstr = "1.3";
    470 		break;
    471 	default:
    472 		apivstr = "unknown";
    473 		break;
    474 	}
    475 	aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
    476 
    477 	/* If no mac address was assigned, make a random one */
    478 	if (!ixv_check_ether_addr(hw->mac.addr)) {
    479 		u8 addr[ETHER_ADDR_LEN];
    480 		uint64_t rndval = cprng_strong64();
    481 
    482 		memcpy(addr, &rndval, sizeof(addr));
    483 		addr[0] &= 0xFE;
    484 		addr[0] |= 0x02;
    485 		bcopy(addr, hw->mac.addr, sizeof(addr));
    486 	}
    487 
    488 	/* Register for VLAN events */
    489 	ether_set_vlan_cb(&adapter->osdep.ec, ixv_vlan_cb);
    490 
    491 	/* Sysctls for limiting the amount of work done in the taskqueues */
    492 	ixv_set_sysctl_value(adapter, "rx_processing_limit",
    493 	    "max number of rx packets to process",
    494 	    &adapter->rx_process_limit, ixv_rx_process_limit);
    495 
    496 	ixv_set_sysctl_value(adapter, "tx_processing_limit",
    497 	    "max number of tx packets to process",
    498 	    &adapter->tx_process_limit, ixv_tx_process_limit);
    499 
    500 	/* Do descriptor calc and sanity checks */
    501 	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
    502 	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
    503 		aprint_error_dev(dev, "TXD config issue, using default!\n");
    504 		adapter->num_tx_desc = DEFAULT_TXD;
    505 	} else
    506 		adapter->num_tx_desc = ixv_txd;
    507 
    508 	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
    509 	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
    510 		aprint_error_dev(dev, "RXD config issue, using default!\n");
    511 		adapter->num_rx_desc = DEFAULT_RXD;
    512 	} else
    513 		adapter->num_rx_desc = ixv_rxd;
    514 
    515 	/* Setup MSI-X */
    516 	error = ixv_configure_interrupts(adapter);
    517 	if (error)
    518 		goto err_out;
    519 
    520 	/* Allocate our TX/RX Queues */
    521 	if (ixgbe_allocate_queues(adapter)) {
    522 		aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
    523 		error = ENOMEM;
    524 		goto err_out;
    525 	}
    526 
    527 	/* hw.ix defaults init */
    528 	adapter->enable_aim = ixv_enable_aim;
    529 
    530 	adapter->txrx_use_workqueue = ixv_txrx_workqueue;
    531 
    532 	error = ixv_allocate_msix(adapter, pa);
    533 	if (error) {
    534 		aprint_error_dev(dev, "ixv_allocate_msix() failed!\n");
    535 		goto err_late;
    536 	}
    537 
    538 	/* Setup OS specific network interface */
    539 	error = ixv_setup_interface(dev, adapter);
    540 	if (error != 0) {
    541 		aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
    542 		goto err_late;
    543 	}
    544 
    545 	/* Do the stats setup */
    546 	ixv_save_stats(adapter);
    547 	ixv_init_stats(adapter);
    548 	ixv_add_stats_sysctls(adapter);
    549 
    550 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    551 		ixgbe_netmap_attach(adapter);
    552 
    553 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
    554 	aprint_verbose_dev(dev, "feature cap %s\n", buf);
    555 	snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
    556 	aprint_verbose_dev(dev, "feature ena %s\n", buf);
    557 
    558 	INIT_DEBUGOUT("ixv_attach: end");
    559 	adapter->osdep.attached = true;
    560 
    561 	return;
    562 
    563 err_late:
    564 	ixgbe_free_queues(adapter);
    565 err_out:
    566 	ixv_free_pci_resources(adapter);
    567 	IXGBE_CORE_LOCK_DESTROY(adapter);
    568 
    569 	return;
    570 } /* ixv_attach */
    571 
    572 /************************************************************************
    573  * ixv_detach - Device removal routine
    574  *
    575  *   Called when the driver is being removed.
    576  *   Stops the adapter and deallocates all the resources
    577  *   that were allocated for driver operation.
    578  *
    579  *   return 0 on success, positive on failure
    580  ************************************************************************/
    581 static int
    582 ixv_detach(device_t dev, int flags)
    583 {
    584 	struct adapter	*adapter = device_private(dev);
    585 	struct ixgbe_hw *hw = &adapter->hw;
    586 	struct tx_ring *txr = adapter->tx_rings;
    587 	struct rx_ring *rxr = adapter->rx_rings;
    588 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
    589 
    590 	INIT_DEBUGOUT("ixv_detach: begin");
    591 	if (adapter->osdep.attached == false)
    592 		return 0;
    593 
    594 	/* Stop the interface. Callouts are stopped in it. */
    595 	ixv_ifstop(adapter->ifp, 1);
    596 
    597 #if NVLAN > 0
    598 	/* Make sure VLANs are not using driver */
    599 	if (!VLAN_ATTACHED(&adapter->osdep.ec))
    600 		;	/* nothing to do: no VLANs */
    601 	else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
    602 		vlan_ifdetach(adapter->ifp);
    603 	else {
    604 		aprint_error_dev(dev, "VLANs in use, detach first\n");
    605 		return EBUSY;
    606 	}
    607 #endif
    608 
    609 	ether_ifdetach(adapter->ifp);
    610 	callout_halt(&adapter->timer, NULL);
    611 	ixv_free_workqueue(adapter);
    612 
    613 	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
    614 		netmap_detach(adapter->ifp);
    615 
    616 	ixv_free_pci_resources(adapter);
    617 #if 0 /* XXX the NetBSD port is probably missing something here */
    618 	bus_generic_detach(dev);
    619 #endif
    620 	if_detach(adapter->ifp);
    621 	ifmedia_fini(&adapter->media);
    622 	if_percpuq_destroy(adapter->ipq);
    623 
    624 	sysctl_teardown(&adapter->sysctllog);
    625 	evcnt_detach(&adapter->efbig_tx_dma_setup);
    626 	evcnt_detach(&adapter->mbuf_defrag_failed);
    627 	evcnt_detach(&adapter->efbig2_tx_dma_setup);
    628 	evcnt_detach(&adapter->einval_tx_dma_setup);
    629 	evcnt_detach(&adapter->other_tx_dma_setup);
    630 	evcnt_detach(&adapter->eagain_tx_dma_setup);
    631 	evcnt_detach(&adapter->enomem_tx_dma_setup);
    632 	evcnt_detach(&adapter->watchdog_events);
    633 	evcnt_detach(&adapter->tso_err);
    634 	evcnt_detach(&adapter->admin_irqev);
    635 	evcnt_detach(&adapter->link_workev);
    636 
    637 	txr = adapter->tx_rings;
    638 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
    639 		evcnt_detach(&adapter->queues[i].irqs);
    640 		evcnt_detach(&adapter->queues[i].handleq);
    641 		evcnt_detach(&adapter->queues[i].req);
    642 		evcnt_detach(&txr->no_desc_avail);
    643 		evcnt_detach(&txr->total_packets);
    644 		evcnt_detach(&txr->tso_tx);
    645 #ifndef IXGBE_LEGACY_TX
    646 		evcnt_detach(&txr->pcq_drops);
    647 #endif
    648 
    649 		evcnt_detach(&rxr->rx_packets);
    650 		evcnt_detach(&rxr->rx_bytes);
    651 		evcnt_detach(&rxr->rx_copies);
    652 		evcnt_detach(&rxr->no_jmbuf);
    653 		evcnt_detach(&rxr->rx_discarded);
    654 	}
    655 	evcnt_detach(&stats->ipcs);
    656 	evcnt_detach(&stats->l4cs);
    657 	evcnt_detach(&stats->ipcs_bad);
    658 	evcnt_detach(&stats->l4cs_bad);
    659 
    660 	/* Packet Reception Stats */
    661 	evcnt_detach(&stats->vfgorc);
    662 	evcnt_detach(&stats->vfgprc);
    663 	evcnt_detach(&stats->vfmprc);
    664 
    665 	/* Packet Transmission Stats */
    666 	evcnt_detach(&stats->vfgotc);
    667 	evcnt_detach(&stats->vfgptc);
    668 
    669 	/* Mailbox Stats */
    670 	evcnt_detach(&hw->mbx.stats.msgs_tx);
    671 	evcnt_detach(&hw->mbx.stats.msgs_rx);
    672 	evcnt_detach(&hw->mbx.stats.acks);
    673 	evcnt_detach(&hw->mbx.stats.reqs);
    674 	evcnt_detach(&hw->mbx.stats.rsts);
    675 
    676 	ixgbe_free_queues(adapter);
    677 
    678 	IXGBE_CORE_LOCK_DESTROY(adapter);
    679 
    680 	return (0);
    681 } /* ixv_detach */
    682 
    683 /************************************************************************
    684  * ixv_init_locked - Init entry point
    685  *
    686  *   Used in two ways: It is used by the stack as an init entry
    687  *   point in network interface structure. It is also used
    688  *   by the driver as a hw/sw initialization routine to get
    689  *   to a consistent state.
    690  *
    691  *   return 0 on success, positive on failure
    692  ************************************************************************/
    693 static void
    694 ixv_init_locked(struct adapter *adapter)
    695 {
    696 	struct ifnet	*ifp = adapter->ifp;
    697 	device_t	dev = adapter->dev;
    698 	struct ixgbe_hw *hw = &adapter->hw;
    699 	struct ix_queue	*que;
    700 	int		error = 0;
    701 	uint32_t mask;
    702 	int i;
    703 
    704 	INIT_DEBUGOUT("ixv_init_locked: begin");
    705 	KASSERT(mutex_owned(&adapter->core_mtx));
    706 	hw->adapter_stopped = FALSE;
    707 	hw->mac.ops.stop_adapter(hw);
    708 	callout_stop(&adapter->timer);
    709 	for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
    710 		que->disabled_count = 0;
    711 
    712 	adapter->max_frame_size =
    713 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
    714 
    715 	/* reprogram the RAR[0] in case user changed it. */
    716 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
    717 
    718 	/* Get the latest mac address, User can use a LAA */
    719 	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
    720 	     IXGBE_ETH_LENGTH_OF_ADDRESS);
    721 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
    722 
    723 	/* Prepare transmit descriptors and buffers */
    724 	if (ixgbe_setup_transmit_structures(adapter)) {
    725 		aprint_error_dev(dev, "Could not setup transmit structures\n");
    726 		ixv_stop(adapter);
    727 		return;
    728 	}
    729 
    730 	/* Reset VF and renegotiate mailbox API version */
    731 	hw->mac.ops.reset_hw(hw);
    732 	hw->mac.ops.start_hw(hw);
    733 	error = ixv_negotiate_api(adapter);
    734 	if (error)
    735 		device_printf(dev,
    736 		    "Mailbox API negotiation failed in init_locked!\n");
    737 
    738 	ixv_initialize_transmit_units(adapter);
    739 
    740 	/* Setup Multicast table */
    741 	ixv_set_rxfilter(adapter);
    742 
    743 	/*
    744 	 * Determine the correct mbuf pool
    745 	 * for doing jumbo/headersplit
    746 	 */
    747 	if (adapter->max_frame_size <= MCLBYTES)
    748 		adapter->rx_mbuf_sz = MCLBYTES;
    749 	else
    750 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
    751 
    752 	/* Prepare receive descriptors and buffers */
    753 	if (ixgbe_setup_receive_structures(adapter)) {
    754 		device_printf(dev, "Could not setup receive structures\n");
    755 		ixv_stop(adapter);
    756 		return;
    757 	}
    758 
    759 	/* Configure RX settings */
    760 	ixv_initialize_receive_units(adapter);
    761 
    762 	/* Initialize variable holding task enqueue requests interrupts */
    763 	adapter->task_requests = 0;
    764 
    765 	/* Set up VLAN offload and filter */
    766 	ixv_setup_vlan_support(adapter);
    767 
    768 	/* Set up MSI-X routing */
    769 	ixv_configure_ivars(adapter);
    770 
    771 	/* Set up auto-mask */
    772 	mask = (1 << adapter->vector);
    773 	for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
    774 		mask |= (1 << que->msix);
    775 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
    776 
    777 	/* Set moderation on the Link interrupt */
    778 	ixv_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
    779 
    780 	/* Stats init */
    781 	ixv_init_stats(adapter);
    782 
    783 	/* Config/Enable Link */
    784 	hw->mac.get_link_status = TRUE;
    785 	hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
    786 	    FALSE);
    787 
    788 	/* Start watchdog */
    789 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
    790 	atomic_store_relaxed(&adapter->timer_pending, 0);
    791 
    792 	/* OK to schedule workqueues. */
    793 	adapter->schedule_wqs_ok = true;
    794 
    795 	/* And now turn on interrupts */
    796 	ixv_enable_intr(adapter);
    797 
    798 	/* Update saved flags. See ixgbe_ifflags_cb() */
    799 	adapter->if_flags = ifp->if_flags;
    800 	adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
    801 
    802 	/* Now inform the stack we're ready */
    803 	ifp->if_flags |= IFF_RUNNING;
    804 	ifp->if_flags &= ~IFF_OACTIVE;
    805 
    806 	return;
    807 } /* ixv_init_locked */
    808 
    809 /************************************************************************
    810  * ixv_enable_queue
    811  ************************************************************************/
    812 static inline void
    813 ixv_enable_queue(struct adapter *adapter, u32 vector)
    814 {
    815 	struct ixgbe_hw *hw = &adapter->hw;
    816 	struct ix_queue *que = &adapter->queues[vector];
    817 	u32		queue = 1UL << vector;
    818 	u32		mask;
    819 
    820 	mutex_enter(&que->dc_mtx);
    821 	if (que->disabled_count > 0 && --que->disabled_count > 0)
    822 		goto out;
    823 
    824 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    825 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
    826 out:
    827 	mutex_exit(&que->dc_mtx);
    828 } /* ixv_enable_queue */
    829 
    830 /************************************************************************
    831  * ixv_disable_queue
    832  ************************************************************************/
    833 static inline void
    834 ixv_disable_queue(struct adapter *adapter, u32 vector)
    835 {
    836 	struct ixgbe_hw *hw = &adapter->hw;
    837 	struct ix_queue *que = &adapter->queues[vector];
    838 	u32		queue = 1UL << vector;
    839 	u32		mask;
    840 
    841 	mutex_enter(&que->dc_mtx);
    842 	if (que->disabled_count++ > 0)
    843 		goto  out;
    844 
    845 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
    846 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
    847 out:
    848 	mutex_exit(&que->dc_mtx);
    849 } /* ixv_disable_queue */
    850 
    851 #if 0
    852 static inline void
    853 ixv_rearm_queues(struct adapter *adapter, u64 queues)
    854 {
    855 	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
    856 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
    857 } /* ixv_rearm_queues */
    858 #endif
    859 
    860 
    861 /************************************************************************
    862  * ixv_msix_que - MSI-X Queue Interrupt Service routine
    863  ************************************************************************/
    864 static int
    865 ixv_msix_que(void *arg)
    866 {
    867 	struct ix_queue	*que = arg;
    868 	struct adapter	*adapter = que->adapter;
    869 	struct tx_ring	*txr = que->txr;
    870 	struct rx_ring	*rxr = que->rxr;
    871 	bool		more;
    872 	u32		newitr = 0;
    873 
    874 	ixv_disable_queue(adapter, que->msix);
    875 	++que->irqs.ev_count;
    876 
    877 #ifdef __NetBSD__
    878 	/* Don't run ixgbe_rxeof in interrupt context */
    879 	more = true;
    880 #else
    881 	more = ixgbe_rxeof(que);
    882 #endif
    883 
    884 	IXGBE_TX_LOCK(txr);
    885 	ixgbe_txeof(txr);
    886 	IXGBE_TX_UNLOCK(txr);
    887 
    888 	/* Do AIM now? */
    889 
    890 	if (adapter->enable_aim == false)
    891 		goto no_calc;
    892 	/*
    893 	 * Do Adaptive Interrupt Moderation:
    894 	 *  - Write out last calculated setting
    895 	 *  - Calculate based on average size over
    896 	 *    the last interval.
    897 	 */
    898 	if (que->eitr_setting)
    899 		ixv_eitr_write(adapter, que->msix, que->eitr_setting);
    900 
    901 	que->eitr_setting = 0;
    902 
    903 	/* Idle, do nothing */
    904 	if ((txr->bytes == 0) && (rxr->bytes == 0))
    905 		goto no_calc;
    906 
    907 	if ((txr->bytes) && (txr->packets))
    908 		newitr = txr->bytes/txr->packets;
    909 	if ((rxr->bytes) && (rxr->packets))
    910 		newitr = uimax(newitr, (rxr->bytes / rxr->packets));
    911 	newitr += 24; /* account for hardware frame, crc */
    912 
    913 	/* set an upper boundary */
    914 	newitr = uimin(newitr, 3000);
    915 
    916 	/* Be nice to the mid range */
    917 	if ((newitr > 300) && (newitr < 1200))
    918 		newitr = (newitr / 3);
    919 	else
    920 		newitr = (newitr / 2);
    921 
    922 	/*
    923 	 * When RSC is used, ITR interval must be larger than RSC_DELAY.
    924 	 * Currently, we use 2us for RSC_DELAY. The minimum value is always
    925 	 * greater than 2us on 100M (and 10M?(not documented)), but it's not
    926 	 * on 1G and higher.
    927 	 */
    928 	if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
    929 	    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
    930 		if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
    931 			newitr = IXGBE_MIN_RSC_EITR_10G1G;
    932 	}
    933 
    934 	/* save for next interrupt */
    935 	que->eitr_setting = newitr;
    936 
    937 	/* Reset state */
    938 	txr->bytes = 0;
    939 	txr->packets = 0;
    940 	rxr->bytes = 0;
    941 	rxr->packets = 0;
    942 
    943 no_calc:
    944 	if (more)
    945 		softint_schedule(que->que_si);
    946 	else /* Re-enable this interrupt */
    947 		ixv_enable_queue(adapter, que->msix);
    948 
    949 	return 1;
    950 } /* ixv_msix_que */
    951 
    952 /************************************************************************
    953  * ixv_msix_mbx
    954  ************************************************************************/
    955 static int
    956 ixv_msix_mbx(void *arg)
    957 {
    958 	struct adapter	*adapter = arg;
    959 	struct ixgbe_hw *hw = &adapter->hw;
    960 
    961 	++adapter->admin_irqev.ev_count;
    962 	/* NetBSD: We use auto-clear, so it's not required to write VTEICR */
    963 
    964 	/* Link status change */
    965 	hw->mac.get_link_status = TRUE;
    966 	atomic_or_32(&adapter->task_requests, IXGBE_REQUEST_TASK_MBX);
    967 	ixv_schedule_admin_tasklet(adapter);
    968 
    969 	return 1;
    970 } /* ixv_msix_mbx */
    971 
    972 static void
    973 ixv_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
    974 {
    975 
    976 	/*
    977 	 * Newer devices than 82598 have VF function, so this function is
    978 	 * simple.
    979 	 */
    980 	itr |= IXGBE_EITR_CNT_WDIS;
    981 
    982 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(index), itr);
    983 }
    984 
    985 
    986 /************************************************************************
    987  * ixv_media_status - Media Ioctl callback
    988  *
    989  *   Called whenever the user queries the status of
    990  *   the interface using ifconfig.
    991  ************************************************************************/
    992 static void
    993 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
    994 {
    995 	struct adapter *adapter = ifp->if_softc;
    996 
    997 	INIT_DEBUGOUT("ixv_media_status: begin");
    998 	ixv_update_link_status(adapter);
    999 
   1000 	ifmr->ifm_status = IFM_AVALID;
   1001 	ifmr->ifm_active = IFM_ETHER;
   1002 
   1003 	if (adapter->link_active != LINK_STATE_UP) {
   1004 		ifmr->ifm_active |= IFM_NONE;
   1005 		return;
   1006 	}
   1007 
   1008 	ifmr->ifm_status |= IFM_ACTIVE;
   1009 
   1010 	switch (adapter->link_speed) {
   1011 		case IXGBE_LINK_SPEED_10GB_FULL:
   1012 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
   1013 			break;
   1014 		case IXGBE_LINK_SPEED_5GB_FULL:
   1015 			ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
   1016 			break;
   1017 		case IXGBE_LINK_SPEED_2_5GB_FULL:
   1018 			ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
   1019 			break;
   1020 		case IXGBE_LINK_SPEED_1GB_FULL:
   1021 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
   1022 			break;
   1023 		case IXGBE_LINK_SPEED_100_FULL:
   1024 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
   1025 			break;
   1026 		case IXGBE_LINK_SPEED_10_FULL:
   1027 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
   1028 			break;
   1029 	}
   1030 
   1031 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
   1032 } /* ixv_media_status */
   1033 
   1034 /************************************************************************
   1035  * ixv_media_change - Media Ioctl callback
   1036  *
   1037  *   Called when the user changes speed/duplex using
   1038  *   media/mediopt option with ifconfig.
   1039  ************************************************************************/
   1040 static int
   1041 ixv_media_change(struct ifnet *ifp)
   1042 {
   1043 	struct adapter *adapter = ifp->if_softc;
   1044 	struct ifmedia *ifm = &adapter->media;
   1045 
   1046 	INIT_DEBUGOUT("ixv_media_change: begin");
   1047 
   1048 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
   1049 		return (EINVAL);
   1050 
   1051 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
   1052 	case IFM_AUTO:
   1053 		break;
   1054 	default:
   1055 		device_printf(adapter->dev, "Only auto media type\n");
   1056 		return (EINVAL);
   1057 	}
   1058 
   1059 	return (0);
   1060 } /* ixv_media_change */
   1061 
   1062 static void
   1063 ixv_schedule_admin_tasklet(struct adapter *adapter)
   1064 {
   1065 	if (adapter->schedule_wqs_ok) {
   1066 		if (atomic_cas_uint(&adapter->admin_pending, 0, 1) == 0)
   1067 			workqueue_enqueue(adapter->admin_wq,
   1068 			    &adapter->admin_wc, NULL);
   1069 	}
   1070 }
   1071 
   1072 /************************************************************************
   1073  * ixv_negotiate_api
   1074  *
   1075  *   Negotiate the Mailbox API with the PF;
   1076  *   start with the most featured API first.
   1077  ************************************************************************/
   1078 static int
   1079 ixv_negotiate_api(struct adapter *adapter)
   1080 {
   1081 	struct ixgbe_hw *hw = &adapter->hw;
   1082 	int		mbx_api[] = { ixgbe_mbox_api_13,
   1083 				      ixgbe_mbox_api_12,
   1084 				      ixgbe_mbox_api_11,
   1085 				      ixgbe_mbox_api_10,
   1086 				      ixgbe_mbox_api_unknown };
   1087 	int		i = 0;
   1088 
   1089 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
   1090 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
   1091 			return (0);
   1092 		i++;
   1093 	}
   1094 
   1095 	return (EINVAL);
   1096 } /* ixv_negotiate_api */
   1097 
   1098 
   1099 /************************************************************************
   1100  * ixv_set_rxfilter - Multicast Update
   1101  *
   1102  *   Called whenever multicast address list is updated.
   1103  ************************************************************************/
   1104 static int
   1105 ixv_set_rxfilter(struct adapter *adapter)
   1106 {
   1107 	u8	mta[IXGBE_MAX_VF_MC * IXGBE_ETH_LENGTH_OF_ADDRESS];
   1108 	struct ifnet		*ifp = adapter->ifp;
   1109 	struct ixgbe_hw		*hw = &adapter->hw;
   1110 	u8			*update_ptr;
   1111 	int			mcnt = 0;
   1112 	struct ethercom		*ec = &adapter->osdep.ec;
   1113 	struct ether_multi	*enm;
   1114 	struct ether_multistep	step;
   1115 	bool			overflow = false;
   1116 	int			error, rc = 0;
   1117 
   1118 	KASSERT(mutex_owned(&adapter->core_mtx));
   1119 	IOCTL_DEBUGOUT("ixv_set_rxfilter: begin");
   1120 
   1121 	/* 1: For PROMISC */
   1122 	if (ifp->if_flags & IFF_PROMISC) {
   1123 		error = hw->mac.ops.update_xcast_mode(hw,
   1124 		    IXGBEVF_XCAST_MODE_PROMISC);
   1125 		if (error == IXGBE_ERR_NOT_TRUSTED) {
   1126 			device_printf(adapter->dev,
   1127 			    "this interface is not trusted\n");
   1128 			error = EPERM;
   1129 		} else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
   1130 			device_printf(adapter->dev,
   1131 			    "the PF doesn't support promisc mode\n");
   1132 			error = EOPNOTSUPP;
   1133 		} else if (error == IXGBE_ERR_NOT_IN_PROMISC) {
   1134 			device_printf(adapter->dev,
   1135 			    "the PF may not in promisc mode\n");
   1136 			error = EINVAL;
   1137 		} else if (error) {
   1138 			device_printf(adapter->dev,
   1139 			    "failed to set promisc mode. error = %d\n",
   1140 			    error);
   1141 			error = EIO;
   1142 		} else
   1143 			return 0;
   1144 		rc = error;
   1145 	}
   1146 
   1147 	/* 2: For ALLMULTI or normal */
   1148 	ETHER_LOCK(ec);
   1149 	ETHER_FIRST_MULTI(step, ec, enm);
   1150 	while (enm != NULL) {
   1151 		if ((mcnt >= IXGBE_MAX_VF_MC) ||
   1152 		    (memcmp(enm->enm_addrlo, enm->enm_addrhi,
   1153 			ETHER_ADDR_LEN) != 0)) {
   1154 			overflow = true;
   1155 			break;
   1156 		}
   1157 		bcopy(enm->enm_addrlo,
   1158 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
   1159 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
   1160 		mcnt++;
   1161 		ETHER_NEXT_MULTI(step, enm);
   1162 	}
   1163 	ETHER_UNLOCK(ec);
   1164 
   1165 	/* 3: For ALLMULTI */
   1166 	if (overflow) {
   1167 		error = hw->mac.ops.update_xcast_mode(hw,
   1168 		    IXGBEVF_XCAST_MODE_ALLMULTI);
   1169 		if (error == IXGBE_ERR_NOT_TRUSTED) {
   1170 			device_printf(adapter->dev,
   1171 			    "this interface is not trusted\n");
   1172 			error = EPERM;
   1173 		} else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
   1174 			device_printf(adapter->dev,
   1175 			    "the PF doesn't support allmulti mode\n");
   1176 			error = EOPNOTSUPP;
   1177 		} else if (error) {
   1178 			device_printf(adapter->dev,
   1179 			    "number of Ethernet multicast addresses "
   1180 			    "exceeds the limit (%d). error = %d\n",
   1181 			    IXGBE_MAX_VF_MC, error);
   1182 			error = ENOSPC;
   1183 		} else {
   1184 			ETHER_LOCK(ec);
   1185 			ec->ec_flags |= ETHER_F_ALLMULTI;
   1186 			ETHER_UNLOCK(ec);
   1187 			return rc; /* Promisc might have failed */
   1188 		}
   1189 
   1190 		if (rc == 0)
   1191 			rc = error;
   1192 
   1193 		/* Continue to update the multicast table as many as we can */
   1194 	}
   1195 
   1196 	/* 4: For normal operation */
   1197 	error = hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
   1198 	if ((error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) || (error == 0)) {
   1199 		/* Normal operation */
   1200 		ETHER_LOCK(ec);
   1201 		ec->ec_flags &= ~ETHER_F_ALLMULTI;
   1202 		ETHER_UNLOCK(ec);
   1203 		error = 0;
   1204 	} else if (error) {
   1205 		device_printf(adapter->dev,
   1206 		    "failed to set Ethernet multicast address "
   1207 		    "operation to normal. error = %d\n", error);
   1208 	}
   1209 
   1210 	update_ptr = mta;
   1211 
   1212 	error = adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw,
   1213 	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
   1214 	if (rc == 0)
   1215 		rc = error;
   1216 
   1217 	return rc;
   1218 } /* ixv_set_rxfilter */
   1219 
   1220 /************************************************************************
   1221  * ixv_mc_array_itr
   1222  *
   1223  *   An iterator function needed by the multicast shared code.
   1224  *   It feeds the shared code routine the addresses in the
   1225  *   array of ixv_set_rxfilter() one by one.
   1226  ************************************************************************/
   1227 static u8 *
   1228 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
   1229 {
   1230 	u8 *addr = *update_ptr;
   1231 	u8 *newptr;
   1232 
   1233 	*vmdq = 0;
   1234 
   1235 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
   1236 	*update_ptr = newptr;
   1237 
   1238 	return addr;
   1239 } /* ixv_mc_array_itr */
   1240 
   1241 /************************************************************************
   1242  * ixv_local_timer - Timer routine
   1243  *
   1244  *   Checks for link status, updates statistics,
   1245  *   and runs the watchdog check.
   1246  ************************************************************************/
   1247 static void
   1248 ixv_local_timer(void *arg)
   1249 {
   1250 	struct adapter *adapter = arg;
   1251 
   1252 	if (adapter->schedule_wqs_ok) {
   1253 		if (atomic_cas_uint(&adapter->timer_pending, 0, 1) == 0)
   1254 			workqueue_enqueue(adapter->timer_wq,
   1255 			    &adapter->timer_wc, NULL);
   1256 	}
   1257 }
   1258 
   1259 static void
   1260 ixv_handle_timer(struct work *wk, void *context)
   1261 {
   1262 	struct adapter	*adapter = context;
   1263 	device_t	dev = adapter->dev;
   1264 	struct ix_queue	*que = adapter->queues;
   1265 	u64		queues = 0;
   1266 	u64		v0, v1, v2, v3, v4, v5, v6, v7;
   1267 	int		hung = 0;
   1268 	int		i;
   1269 
   1270 	IXGBE_CORE_LOCK(adapter);
   1271 
   1272 	if (ixv_check_link(adapter)) {
   1273 		ixv_init_locked(adapter);
   1274 		IXGBE_CORE_UNLOCK(adapter);
   1275 		return;
   1276 	}
   1277 
   1278 	/* Stats Update */
   1279 	ixv_update_stats(adapter);
   1280 
   1281 	/* Update some event counters */
   1282 	v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
   1283 	que = adapter->queues;
   1284 	for (i = 0; i < adapter->num_queues; i++, que++) {
   1285 		struct tx_ring	*txr = que->txr;
   1286 
   1287 		v0 += txr->q_efbig_tx_dma_setup;
   1288 		v1 += txr->q_mbuf_defrag_failed;
   1289 		v2 += txr->q_efbig2_tx_dma_setup;
   1290 		v3 += txr->q_einval_tx_dma_setup;
   1291 		v4 += txr->q_other_tx_dma_setup;
   1292 		v5 += txr->q_eagain_tx_dma_setup;
   1293 		v6 += txr->q_enomem_tx_dma_setup;
   1294 		v7 += txr->q_tso_err;
   1295 	}
   1296 	adapter->efbig_tx_dma_setup.ev_count = v0;
   1297 	adapter->mbuf_defrag_failed.ev_count = v1;
   1298 	adapter->efbig2_tx_dma_setup.ev_count = v2;
   1299 	adapter->einval_tx_dma_setup.ev_count = v3;
   1300 	adapter->other_tx_dma_setup.ev_count = v4;
   1301 	adapter->eagain_tx_dma_setup.ev_count = v5;
   1302 	adapter->enomem_tx_dma_setup.ev_count = v6;
   1303 	adapter->tso_err.ev_count = v7;
   1304 
   1305 	/*
   1306 	 * Check the TX queues status
   1307 	 *	- mark hung queues so we don't schedule on them
   1308 	 *	- watchdog only if all queues show hung
   1309 	 */
   1310 	que = adapter->queues;
   1311 	for (i = 0; i < adapter->num_queues; i++, que++) {
   1312 		/* Keep track of queues with work for soft irq */
   1313 		if (que->txr->busy)
   1314 			queues |= ((u64)1 << que->me);
   1315 		/*
   1316 		 * Each time txeof runs without cleaning, but there
   1317 		 * are uncleaned descriptors it increments busy. If
   1318 		 * we get to the MAX we declare it hung.
   1319 		 */
   1320 		if (que->busy == IXGBE_QUEUE_HUNG) {
   1321 			++hung;
   1322 			/* Mark the queue as inactive */
   1323 			adapter->active_queues &= ~((u64)1 << que->me);
   1324 			continue;
   1325 		} else {
   1326 			/* Check if we've come back from hung */
   1327 			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
   1328 				adapter->active_queues |= ((u64)1 << que->me);
   1329 		}
   1330 		if (que->busy >= IXGBE_MAX_TX_BUSY) {
   1331 			device_printf(dev,
   1332 			    "Warning queue %d appears to be hung!\n", i);
   1333 			que->txr->busy = IXGBE_QUEUE_HUNG;
   1334 			++hung;
   1335 		}
   1336 	}
   1337 
   1338 	/* Only truly watchdog if all queues show hung */
   1339 	if (hung == adapter->num_queues)
   1340 		goto watchdog;
   1341 #if 0
   1342 	else if (queues != 0) { /* Force an IRQ on queues with work */
   1343 		ixv_rearm_queues(adapter, queues);
   1344 	}
   1345 #endif
   1346 
   1347 	atomic_store_relaxed(&adapter->timer_pending, 0);
   1348 	IXGBE_CORE_UNLOCK(adapter);
   1349 	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
   1350 
   1351 	return;
   1352 
   1353 watchdog:
   1354 	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
   1355 	adapter->ifp->if_flags &= ~IFF_RUNNING;
   1356 	adapter->watchdog_events.ev_count++;
   1357 	ixv_init_locked(adapter);
   1358 	IXGBE_CORE_UNLOCK(adapter);
   1359 } /* ixv_handle_timer */
   1360 
   1361 /************************************************************************
   1362  * ixv_update_link_status - Update OS on link state
   1363  *
   1364  * Note: Only updates the OS on the cached link state.
   1365  *	 The real check of the hardware only happens with
   1366  *	 a link interrupt.
   1367  ************************************************************************/
   1368 static void
   1369 ixv_update_link_status(struct adapter *adapter)
   1370 {
   1371 	struct ifnet *ifp = adapter->ifp;
   1372 	device_t     dev = adapter->dev;
   1373 
   1374 	KASSERT(mutex_owned(&adapter->core_mtx));
   1375 
   1376 	if (adapter->link_up) {
   1377 		if (adapter->link_active != LINK_STATE_UP) {
   1378 			if (bootverbose) {
   1379 				const char *bpsmsg;
   1380 
   1381 				switch (adapter->link_speed) {
   1382 				case IXGBE_LINK_SPEED_10GB_FULL:
   1383 					bpsmsg = "10 Gbps";
   1384 					break;
   1385 				case IXGBE_LINK_SPEED_5GB_FULL:
   1386 					bpsmsg = "5 Gbps";
   1387 					break;
   1388 				case IXGBE_LINK_SPEED_2_5GB_FULL:
   1389 					bpsmsg = "2.5 Gbps";
   1390 					break;
   1391 				case IXGBE_LINK_SPEED_1GB_FULL:
   1392 					bpsmsg = "1 Gbps";
   1393 					break;
   1394 				case IXGBE_LINK_SPEED_100_FULL:
   1395 					bpsmsg = "100 Mbps";
   1396 					break;
   1397 				case IXGBE_LINK_SPEED_10_FULL:
   1398 					bpsmsg = "10 Mbps";
   1399 					break;
   1400 				default:
   1401 					bpsmsg = "unknown speed";
   1402 					break;
   1403 				}
   1404 				device_printf(dev, "Link is up %s %s \n",
   1405 				    bpsmsg, "Full Duplex");
   1406 			}
   1407 			adapter->link_active = LINK_STATE_UP;
   1408 			if_link_state_change(ifp, LINK_STATE_UP);
   1409 		}
   1410 	} else {
   1411 		/*
   1412 		 * Do it when link active changes to DOWN. i.e.
   1413 		 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
   1414 		 * b) LINK_STATE_UP	 -> LINK_STATE_DOWN
   1415 		 */
   1416 		if (adapter->link_active != LINK_STATE_DOWN) {
   1417 			if (bootverbose)
   1418 				device_printf(dev, "Link is Down\n");
   1419 			if_link_state_change(ifp, LINK_STATE_DOWN);
   1420 			adapter->link_active = LINK_STATE_DOWN;
   1421 		}
   1422 	}
   1423 } /* ixv_update_link_status */
   1424 
   1425 
   1426 /************************************************************************
   1427  * ixv_stop - Stop the hardware
   1428  *
   1429  *   Disables all traffic on the adapter by issuing a
   1430  *   global reset on the MAC and deallocates TX/RX buffers.
   1431  ************************************************************************/
   1432 static void
   1433 ixv_ifstop(struct ifnet *ifp, int disable)
   1434 {
   1435 	struct adapter *adapter = ifp->if_softc;
   1436 
   1437 	IXGBE_CORE_LOCK(adapter);
   1438 	ixv_stop(adapter);
   1439 	IXGBE_CORE_UNLOCK(adapter);
   1440 
   1441 	workqueue_wait(adapter->admin_wq, &adapter->admin_wc);
   1442 	atomic_store_relaxed(&adapter->admin_pending, 0);
   1443 	workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
   1444 	atomic_store_relaxed(&adapter->timer_pending, 0);
   1445 }
   1446 
   1447 static void
   1448 ixv_stop(void *arg)
   1449 {
   1450 	struct ifnet	*ifp;
   1451 	struct adapter	*adapter = arg;
   1452 	struct ixgbe_hw *hw = &adapter->hw;
   1453 
   1454 	ifp = adapter->ifp;
   1455 
   1456 	KASSERT(mutex_owned(&adapter->core_mtx));
   1457 
   1458 	INIT_DEBUGOUT("ixv_stop: begin\n");
   1459 	ixv_disable_intr(adapter);
   1460 
   1461 	/* Tell the stack that the interface is no longer active */
   1462 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   1463 
   1464 	hw->mac.ops.reset_hw(hw);
   1465 	adapter->hw.adapter_stopped = FALSE;
   1466 	hw->mac.ops.stop_adapter(hw);
   1467 	callout_stop(&adapter->timer);
   1468 
   1469 	/* Don't schedule workqueues. */
   1470 	adapter->schedule_wqs_ok = false;
   1471 
   1472 	/* reprogram the RAR[0] in case user changed it. */
   1473 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
   1474 
   1475 	return;
   1476 } /* ixv_stop */
   1477 
   1478 
   1479 /************************************************************************
   1480  * ixv_allocate_pci_resources
   1481  ************************************************************************/
   1482 static int
   1483 ixv_allocate_pci_resources(struct adapter *adapter,
   1484     const struct pci_attach_args *pa)
   1485 {
   1486 	pcireg_t	memtype, csr;
   1487 	device_t	dev = adapter->dev;
   1488 	bus_addr_t addr;
   1489 	int flags;
   1490 
   1491 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
   1492 	switch (memtype) {
   1493 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1494 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1495 		adapter->osdep.mem_bus_space_tag = pa->pa_memt;
   1496 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
   1497 		      memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
   1498 			goto map_err;
   1499 		if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
   1500 			aprint_normal_dev(dev, "clearing prefetchable bit\n");
   1501 			flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
   1502 		}
   1503 		if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
   1504 		     adapter->osdep.mem_size, flags,
   1505 		     &adapter->osdep.mem_bus_space_handle) != 0) {
   1506 map_err:
   1507 			adapter->osdep.mem_size = 0;
   1508 			aprint_error_dev(dev, "unable to map BAR0\n");
   1509 			return ENXIO;
   1510 		}
   1511 		/*
   1512 		 * Enable address decoding for memory range in case it's not
   1513 		 * set.
   1514 		 */
   1515 		csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1516 		    PCI_COMMAND_STATUS_REG);
   1517 		csr |= PCI_COMMAND_MEM_ENABLE;
   1518 		pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
   1519 		    csr);
   1520 		break;
   1521 	default:
   1522 		aprint_error_dev(dev, "unexpected type on BAR0\n");
   1523 		return ENXIO;
   1524 	}
   1525 
   1526 	/* Pick up the tuneable queues */
   1527 	adapter->num_queues = ixv_num_queues;
   1528 
   1529 	return (0);
   1530 } /* ixv_allocate_pci_resources */
   1531 
   1532 static void
   1533 ixv_free_workqueue(struct adapter *adapter)
   1534 {
   1535 	struct ix_queue *que = adapter->queues;
   1536 	struct tx_ring *txr = adapter->tx_rings;
   1537 	int i;
   1538 
   1539 	for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
   1540 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
   1541 			if (txr->txr_si != NULL)
   1542 				softint_disestablish(txr->txr_si);
   1543 		}
   1544 		if (que->que_si != NULL)
   1545 			softint_disestablish(que->que_si);
   1546 	}
   1547 	if (adapter->txr_wq != NULL)
   1548 		workqueue_destroy(adapter->txr_wq);
   1549 	if (adapter->txr_wq_enqueued != NULL)
   1550 		percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
   1551 	if (adapter->que_wq != NULL)
   1552 		workqueue_destroy(adapter->que_wq);
   1553 
   1554 	/* Drain the Mailbox(link) queue */
   1555 	if (adapter->admin_wq != NULL) {
   1556 		workqueue_destroy(adapter->admin_wq);
   1557 		adapter->admin_wq = NULL;
   1558 	}
   1559 	if (adapter->timer_wq != NULL) {
   1560 		workqueue_destroy(adapter->timer_wq);
   1561 		adapter->timer_wq = NULL;
   1562 	}
   1563 } /* ixv_free_workqueue */
   1564 
   1565 /************************************************************************
   1566  * ixv_free_pci_resources
   1567  ************************************************************************/
   1568 static void
   1569 ixv_free_pci_resources(struct adapter * adapter)
   1570 {
   1571 	struct		ix_queue *que = adapter->queues;
   1572 	int		rid;
   1573 
   1574 	/*
   1575 	 *  Release all msix queue resources:
   1576 	 */
   1577 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   1578 		if (que->res != NULL)
   1579 			pci_intr_disestablish(adapter->osdep.pc,
   1580 			    adapter->osdep.ihs[i]);
   1581 	}
   1582 
   1583 
   1584 	/* Clean the Mailbox interrupt last */
   1585 	rid = adapter->vector;
   1586 
   1587 	if (adapter->osdep.ihs[rid] != NULL) {
   1588 		pci_intr_disestablish(adapter->osdep.pc,
   1589 		    adapter->osdep.ihs[rid]);
   1590 		adapter->osdep.ihs[rid] = NULL;
   1591 	}
   1592 
   1593 	pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
   1594 	    adapter->osdep.nintrs);
   1595 
   1596 	if (adapter->osdep.mem_size != 0) {
   1597 		bus_space_unmap(adapter->osdep.mem_bus_space_tag,
   1598 		    adapter->osdep.mem_bus_space_handle,
   1599 		    adapter->osdep.mem_size);
   1600 	}
   1601 
   1602 	return;
   1603 } /* ixv_free_pci_resources */
   1604 
   1605 /************************************************************************
   1606  * ixv_setup_interface
   1607  *
   1608  *   Setup networking device structure and register an interface.
   1609  ************************************************************************/
   1610 static int
   1611 ixv_setup_interface(device_t dev, struct adapter *adapter)
   1612 {
   1613 	struct ethercom *ec = &adapter->osdep.ec;
   1614 	struct ifnet   *ifp;
   1615 	int rv;
   1616 
   1617 	INIT_DEBUGOUT("ixv_setup_interface: begin");
   1618 
   1619 	ifp = adapter->ifp = &ec->ec_if;
   1620 	strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
   1621 	ifp->if_baudrate = IF_Gbps(10);
   1622 	ifp->if_init = ixv_init;
   1623 	ifp->if_stop = ixv_ifstop;
   1624 	ifp->if_softc = adapter;
   1625 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   1626 #ifdef IXGBE_MPSAFE
   1627 	ifp->if_extflags = IFEF_MPSAFE;
   1628 #endif
   1629 	ifp->if_ioctl = ixv_ioctl;
   1630 	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
   1631 #if 0
   1632 		ixv_start_locked = ixgbe_legacy_start_locked;
   1633 #endif
   1634 	} else {
   1635 		ifp->if_transmit = ixgbe_mq_start;
   1636 #if 0
   1637 		ixv_start_locked = ixgbe_mq_start_locked;
   1638 #endif
   1639 	}
   1640 	ifp->if_start = ixgbe_legacy_start;
   1641 	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
   1642 	IFQ_SET_READY(&ifp->if_snd);
   1643 
   1644 	rv = if_initialize(ifp);
   1645 	if (rv != 0) {
   1646 		aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
   1647 		return rv;
   1648 	}
   1649 	adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
   1650 	ether_ifattach(ifp, adapter->hw.mac.addr);
   1651 	aprint_normal_dev(dev, "Ethernet address %s\n",
   1652 	    ether_sprintf(adapter->hw.mac.addr));
   1653 	/*
   1654 	 * We use per TX queue softint, so if_deferred_start_init() isn't
   1655 	 * used.
   1656 	 */
   1657 	ether_set_ifflags_cb(ec, ixv_ifflags_cb);
   1658 
   1659 	adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
   1660 
   1661 	/*
   1662 	 * Tell the upper layer(s) we support long frames.
   1663 	 */
   1664 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
   1665 
   1666 	/* Set capability flags */
   1667 	ifp->if_capabilities |= IFCAP_HWCSUM
   1668 			     |	IFCAP_TSOv4
   1669 			     |	IFCAP_TSOv6;
   1670 	ifp->if_capenable = 0;
   1671 
   1672 	ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER
   1673 			    |  ETHERCAP_VLAN_HWTAGGING
   1674 			    |  ETHERCAP_VLAN_HWCSUM
   1675 			    |  ETHERCAP_JUMBO_MTU
   1676 			    |  ETHERCAP_VLAN_MTU;
   1677 
   1678 	/* Enable the above capabilities by default */
   1679 	ec->ec_capenable = ec->ec_capabilities;
   1680 
   1681 	/* Don't enable LRO by default */
   1682 #if 0
   1683 	/* NetBSD doesn't support LRO yet */
   1684 	ifp->if_capabilities |= IFCAP_LRO;
   1685 #endif
   1686 
   1687 	/*
   1688 	 * Specify the media types supported by this adapter and register
   1689 	 * callbacks to update media and link information
   1690 	 */
   1691 	ec->ec_ifmedia = &adapter->media;
   1692 	ifmedia_init_with_lock(&adapter->media, IFM_IMASK, ixv_media_change,
   1693 	    ixv_media_status, &adapter->core_mtx);
   1694 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
   1695 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
   1696 
   1697 	if_register(ifp);
   1698 
   1699 	return 0;
   1700 } /* ixv_setup_interface */
   1701 
   1702 
   1703 /************************************************************************
   1704  * ixv_initialize_transmit_units - Enable transmit unit.
   1705  ************************************************************************/
   1706 static void
   1707 ixv_initialize_transmit_units(struct adapter *adapter)
   1708 {
   1709 	struct tx_ring	*txr = adapter->tx_rings;
   1710 	struct ixgbe_hw	*hw = &adapter->hw;
   1711 	int i;
   1712 
   1713 	for (i = 0; i < adapter->num_queues; i++, txr++) {
   1714 		u64 tdba = txr->txdma.dma_paddr;
   1715 		u32 txctrl, txdctl;
   1716 		int j = txr->me;
   1717 
   1718 		/* Set WTHRESH to 8, burst writeback */
   1719 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
   1720 		txdctl |= (8 << 16);
   1721 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
   1722 
   1723 		/* Set the HW Tx Head and Tail indices */
   1724 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
   1725 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
   1726 
   1727 		/* Set Tx Tail register */
   1728 		txr->tail = IXGBE_VFTDT(j);
   1729 
   1730 		txr->txr_no_space = false;
   1731 
   1732 		/* Set Ring parameters */
   1733 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
   1734 		    (tdba & 0x00000000ffffffffULL));
   1735 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
   1736 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
   1737 		    adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
   1738 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
   1739 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
   1740 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
   1741 
   1742 		/* Now enable */
   1743 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
   1744 		txdctl |= IXGBE_TXDCTL_ENABLE;
   1745 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
   1746 	}
   1747 
   1748 	return;
   1749 } /* ixv_initialize_transmit_units */
   1750 
   1751 
   1752 /************************************************************************
   1753  * ixv_initialize_rss_mapping
   1754  ************************************************************************/
   1755 static void
   1756 ixv_initialize_rss_mapping(struct adapter *adapter)
   1757 {
   1758 	struct ixgbe_hw *hw = &adapter->hw;
   1759 	u32		reta = 0, mrqc, rss_key[10];
   1760 	int		queue_id;
   1761 	int		i, j;
   1762 	u32		rss_hash_config;
   1763 
   1764 	/* force use default RSS key. */
   1765 #ifdef __NetBSD__
   1766 	rss_getkey((uint8_t *) &rss_key);
   1767 #else
   1768 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1769 		/* Fetch the configured RSS key */
   1770 		rss_getkey((uint8_t *)&rss_key);
   1771 	} else {
   1772 		/* set up random bits */
   1773 		cprng_fast(&rss_key, sizeof(rss_key));
   1774 	}
   1775 #endif
   1776 
   1777 	/* Now fill out hash function seeds */
   1778 	for (i = 0; i < 10; i++)
   1779 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
   1780 
   1781 	/* Set up the redirection table */
   1782 	for (i = 0, j = 0; i < 64; i++, j++) {
   1783 		if (j == adapter->num_queues)
   1784 			j = 0;
   1785 
   1786 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
   1787 			/*
   1788 			 * Fetch the RSS bucket id for the given indirection
   1789 			 * entry. Cap it at the number of configured buckets
   1790 			 * (which is num_queues.)
   1791 			 */
   1792 			queue_id = rss_get_indirection_to_bucket(i);
   1793 			queue_id = queue_id % adapter->num_queues;
   1794 		} else
   1795 			queue_id = j;
   1796 
   1797 		/*
   1798 		 * The low 8 bits are for hash value (n+0);
   1799 		 * The next 8 bits are for hash value (n+1), etc.
   1800 		 */
   1801 		reta >>= 8;
   1802 		reta |= ((uint32_t)queue_id) << 24;
   1803 		if ((i & 3) == 3) {
   1804 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
   1805 			reta = 0;
   1806 		}
   1807 	}
   1808 
   1809 	/* Perform hash on these packet types */
   1810 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
   1811 		rss_hash_config = rss_gethashconfig();
   1812 	else {
   1813 		/*
   1814 		 * Disable UDP - IP fragments aren't currently being handled
   1815 		 * and so we end up with a mix of 2-tuple and 4-tuple
   1816 		 * traffic.
   1817 		 */
   1818 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
   1819 				| RSS_HASHTYPE_RSS_TCP_IPV4
   1820 				| RSS_HASHTYPE_RSS_IPV6
   1821 				| RSS_HASHTYPE_RSS_TCP_IPV6;
   1822 	}
   1823 
   1824 	mrqc = IXGBE_MRQC_RSSEN;
   1825 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
   1826 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
   1827 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
   1828 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
   1829 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
   1830 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
   1831 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
   1832 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
   1833 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
   1834 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
   1835 		    __func__);
   1836 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
   1837 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
   1838 		    __func__);
   1839 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
   1840 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
   1841 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
   1842 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
   1843 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
   1844 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
   1845 		    __func__);
   1846 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
   1847 } /* ixv_initialize_rss_mapping */
   1848 
   1849 
   1850 /************************************************************************
   1851  * ixv_initialize_receive_units - Setup receive registers and features.
   1852  ************************************************************************/
   1853 static void
   1854 ixv_initialize_receive_units(struct adapter *adapter)
   1855 {
   1856 	struct	rx_ring	*rxr = adapter->rx_rings;
   1857 	struct ixgbe_hw	*hw = &adapter->hw;
   1858 	struct ifnet	*ifp = adapter->ifp;
   1859 	u32		bufsz, psrtype;
   1860 
   1861 	if (ifp->if_mtu > ETHERMTU)
   1862 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1863 	else
   1864 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
   1865 
   1866 	psrtype = IXGBE_PSRTYPE_TCPHDR
   1867 		| IXGBE_PSRTYPE_UDPHDR
   1868 		| IXGBE_PSRTYPE_IPV4HDR
   1869 		| IXGBE_PSRTYPE_IPV6HDR
   1870 		| IXGBE_PSRTYPE_L2HDR;
   1871 
   1872 	if (adapter->num_queues > 1)
   1873 		psrtype |= 1 << 29;
   1874 
   1875 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
   1876 
   1877 	/* Tell PF our max_frame size */
   1878 	if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
   1879 		device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
   1880 	}
   1881 
   1882 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
   1883 		u64 rdba = rxr->rxdma.dma_paddr;
   1884 		u32 reg, rxdctl;
   1885 		int j = rxr->me;
   1886 
   1887 		/* Disable the queue */
   1888 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
   1889 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
   1890 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
   1891 		for (int k = 0; k < 10; k++) {
   1892 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
   1893 			    IXGBE_RXDCTL_ENABLE)
   1894 				msec_delay(1);
   1895 			else
   1896 				break;
   1897 		}
   1898 		IXGBE_WRITE_BARRIER(hw);
   1899 		/* Setup the Base and Length of the Rx Descriptor Ring */
   1900 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
   1901 		    (rdba & 0x00000000ffffffffULL));
   1902 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
   1903 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
   1904 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
   1905 
   1906 		/* Reset the ring indices */
   1907 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
   1908 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
   1909 
   1910 		/* Set up the SRRCTL register */
   1911 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
   1912 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
   1913 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
   1914 		reg |= bufsz;
   1915 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
   1916 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
   1917 
   1918 		/* Capture Rx Tail index */
   1919 		rxr->tail = IXGBE_VFRDT(rxr->me);
   1920 
   1921 		/* Do the queue enabling last */
   1922 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
   1923 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
   1924 		for (int k = 0; k < 10; k++) {
   1925 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
   1926 			    IXGBE_RXDCTL_ENABLE)
   1927 				break;
   1928 			msec_delay(1);
   1929 		}
   1930 		IXGBE_WRITE_BARRIER(hw);
   1931 
   1932 		/* Set the Tail Pointer */
   1933 #ifdef DEV_NETMAP
   1934 		/*
   1935 		 * In netmap mode, we must preserve the buffers made
   1936 		 * available to userspace before the if_init()
   1937 		 * (this is true by default on the TX side, because
   1938 		 * init makes all buffers available to userspace).
   1939 		 *
   1940 		 * netmap_reset() and the device specific routines
   1941 		 * (e.g. ixgbe_setup_receive_rings()) map these
   1942 		 * buffers at the end of the NIC ring, so here we
   1943 		 * must set the RDT (tail) register to make sure
   1944 		 * they are not overwritten.
   1945 		 *
   1946 		 * In this driver the NIC ring starts at RDH = 0,
   1947 		 * RDT points to the last slot available for reception (?),
   1948 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
   1949 		 */
   1950 		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
   1951 		    (ifp->if_capenable & IFCAP_NETMAP)) {
   1952 			struct netmap_adapter *na = NA(adapter->ifp);
   1953 			struct netmap_kring *kring = na->rx_rings[i];
   1954 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
   1955 
   1956 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
   1957 		} else
   1958 #endif /* DEV_NETMAP */
   1959 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
   1960 			    adapter->num_rx_desc - 1);
   1961 	}
   1962 
   1963 	if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
   1964 		ixv_initialize_rss_mapping(adapter);
   1965 } /* ixv_initialize_receive_units */
   1966 
   1967 /************************************************************************
   1968  * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function
   1969  *
   1970  *   Retrieves the TDH value from the hardware
   1971  ************************************************************************/
   1972 static int
   1973 ixv_sysctl_tdh_handler(SYSCTLFN_ARGS)
   1974 {
   1975 	struct sysctlnode node = *rnode;
   1976 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   1977 	uint32_t val;
   1978 
   1979 	if (!txr)
   1980 		return (0);
   1981 
   1982 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDH(txr->me));
   1983 	node.sysctl_data = &val;
   1984 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   1985 } /* ixv_sysctl_tdh_handler */
   1986 
   1987 /************************************************************************
   1988  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
   1989  *
   1990  *   Retrieves the TDT value from the hardware
   1991  ************************************************************************/
   1992 static int
   1993 ixv_sysctl_tdt_handler(SYSCTLFN_ARGS)
   1994 {
   1995 	struct sysctlnode node = *rnode;
   1996 	struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
   1997 	uint32_t val;
   1998 
   1999 	if (!txr)
   2000 		return (0);
   2001 
   2002 	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDT(txr->me));
   2003 	node.sysctl_data = &val;
   2004 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2005 } /* ixv_sysctl_tdt_handler */
   2006 
   2007 /************************************************************************
   2008  * ixv_sysctl_next_to_check_handler - Receive Descriptor next to check
   2009  * handler function
   2010  *
   2011  *   Retrieves the next_to_check value
   2012  ************************************************************************/
   2013 static int
   2014 ixv_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
   2015 {
   2016 	struct sysctlnode node = *rnode;
   2017 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2018 	uint32_t val;
   2019 
   2020 	if (!rxr)
   2021 		return (0);
   2022 
   2023 	val = rxr->next_to_check;
   2024 	node.sysctl_data = &val;
   2025 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2026 } /* ixv_sysctl_next_to_check_handler */
   2027 
   2028 /************************************************************************
   2029  * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function
   2030  *
   2031  *   Retrieves the RDH value from the hardware
   2032  ************************************************************************/
   2033 static int
   2034 ixv_sysctl_rdh_handler(SYSCTLFN_ARGS)
   2035 {
   2036 	struct sysctlnode node = *rnode;
   2037 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2038 	uint32_t val;
   2039 
   2040 	if (!rxr)
   2041 		return (0);
   2042 
   2043 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDH(rxr->me));
   2044 	node.sysctl_data = &val;
   2045 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2046 } /* ixv_sysctl_rdh_handler */
   2047 
   2048 /************************************************************************
   2049  * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function
   2050  *
   2051  *   Retrieves the RDT value from the hardware
   2052  ************************************************************************/
   2053 static int
   2054 ixv_sysctl_rdt_handler(SYSCTLFN_ARGS)
   2055 {
   2056 	struct sysctlnode node = *rnode;
   2057 	struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
   2058 	uint32_t val;
   2059 
   2060 	if (!rxr)
   2061 		return (0);
   2062 
   2063 	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDT(rxr->me));
   2064 	node.sysctl_data = &val;
   2065 	return sysctl_lookup(SYSCTLFN_CALL(&node));
   2066 } /* ixv_sysctl_rdt_handler */
   2067 
   2068 static void
   2069 ixv_setup_vlan_tagging(struct adapter *adapter)
   2070 {
   2071 	struct ethercom *ec = &adapter->osdep.ec;
   2072 	struct ixgbe_hw *hw = &adapter->hw;
   2073 	struct rx_ring	*rxr;
   2074 	u32		ctrl;
   2075 	int		i;
   2076 	bool		hwtagging;
   2077 
   2078 	/* Enable HW tagging only if any vlan is attached */
   2079 	hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
   2080 	    && VLAN_ATTACHED(ec);
   2081 
   2082 	/* Enable the queues */
   2083 	for (i = 0; i < adapter->num_queues; i++) {
   2084 		rxr = &adapter->rx_rings[i];
   2085 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
   2086 		if (hwtagging)
   2087 			ctrl |= IXGBE_RXDCTL_VME;
   2088 		else
   2089 			ctrl &= ~IXGBE_RXDCTL_VME;
   2090 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
   2091 		/*
   2092 		 * Let Rx path know that it needs to store VLAN tag
   2093 		 * as part of extra mbuf info.
   2094 		 */
   2095 		rxr->vtag_strip = hwtagging ? TRUE : FALSE;
   2096 	}
   2097 } /* ixv_setup_vlan_tagging */
   2098 
   2099 /************************************************************************
   2100  * ixv_setup_vlan_support
   2101  ************************************************************************/
   2102 static int
   2103 ixv_setup_vlan_support(struct adapter *adapter)
   2104 {
   2105 	struct ethercom *ec = &adapter->osdep.ec;
   2106 	struct ixgbe_hw *hw = &adapter->hw;
   2107 	u32		vid, vfta, retry;
   2108 	struct vlanid_list *vlanidp;
   2109 	int rv, error = 0;
   2110 
   2111 	/*
   2112 	 *  This function is called from both if_init and ifflags_cb()
   2113 	 * on NetBSD.
   2114 	 */
   2115 
   2116 	/*
   2117 	 * Part 1:
   2118 	 * Setup VLAN HW tagging
   2119 	 */
   2120 	ixv_setup_vlan_tagging(adapter);
   2121 
   2122 	if (!VLAN_ATTACHED(ec))
   2123 		return 0;
   2124 
   2125 	/*
   2126 	 * Part 2:
   2127 	 * Setup VLAN HW filter
   2128 	 */
   2129 	/* Cleanup shadow_vfta */
   2130 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
   2131 		adapter->shadow_vfta[i] = 0;
   2132 	/* Generate shadow_vfta from ec_vids */
   2133 	ETHER_LOCK(ec);
   2134 	SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
   2135 		uint32_t idx;
   2136 
   2137 		idx = vlanidp->vid / 32;
   2138 		KASSERT(idx < IXGBE_VFTA_SIZE);
   2139 		adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
   2140 	}
   2141 	ETHER_UNLOCK(ec);
   2142 
   2143 	/*
   2144 	 * A soft reset zero's out the VFTA, so
   2145 	 * we need to repopulate it now.
   2146 	 */
   2147 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
   2148 		if (adapter->shadow_vfta[i] == 0)
   2149 			continue;
   2150 		vfta = adapter->shadow_vfta[i];
   2151 		/*
   2152 		 * Reconstruct the vlan id's
   2153 		 * based on the bits set in each
   2154 		 * of the array ints.
   2155 		 */
   2156 		for (int j = 0; j < 32; j++) {
   2157 			retry = 0;
   2158 			if ((vfta & ((u32)1 << j)) == 0)
   2159 				continue;
   2160 			vid = (i * 32) + j;
   2161 
   2162 			/* Call the shared code mailbox routine */
   2163 			while ((rv = hw->mac.ops.set_vfta(hw, vid, 0, TRUE,
   2164 			    FALSE)) != 0) {
   2165 				if (++retry > 5) {
   2166 					device_printf(adapter->dev,
   2167 					    "%s: max retry exceeded\n",
   2168 						__func__);
   2169 					break;
   2170 				}
   2171 			}
   2172 			if (rv != 0) {
   2173 				device_printf(adapter->dev,
   2174 				    "failed to set vlan %d\n", vid);
   2175 				error = EACCES;
   2176 			}
   2177 		}
   2178 	}
   2179 	return error;
   2180 } /* ixv_setup_vlan_support */
   2181 
   2182 static int
   2183 ixv_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
   2184 {
   2185 	struct ifnet *ifp = &ec->ec_if;
   2186 	struct adapter *adapter = ifp->if_softc;
   2187 	int rv;
   2188 
   2189 	if (set)
   2190 		rv = ixv_register_vlan(adapter, vid);
   2191 	else
   2192 		rv = ixv_unregister_vlan(adapter, vid);
   2193 
   2194 	if (rv != 0)
   2195 		return rv;
   2196 
   2197 	/*
   2198 	 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
   2199 	 * or 0 to 1.
   2200 	 */
   2201 	if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
   2202 		ixv_setup_vlan_tagging(adapter);
   2203 
   2204 	return rv;
   2205 }
   2206 
   2207 /************************************************************************
   2208  * ixv_register_vlan
   2209  *
   2210  *   Run via a vlan config EVENT, it enables us to use the
   2211  *   HW Filter table since we can get the vlan id. This just
   2212  *   creates the entry in the soft version of the VFTA, init
   2213  *   will repopulate the real table.
   2214  ************************************************************************/
   2215 static int
   2216 ixv_register_vlan(struct adapter *adapter, u16 vtag)
   2217 {
   2218 	struct ixgbe_hw *hw = &adapter->hw;
   2219 	u16		index, bit;
   2220 	int error;
   2221 
   2222 	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
   2223 		return EINVAL;
   2224 	IXGBE_CORE_LOCK(adapter);
   2225 	index = (vtag >> 5) & 0x7F;
   2226 	bit = vtag & 0x1F;
   2227 	adapter->shadow_vfta[index] |= ((u32)1 << bit);
   2228 	error = hw->mac.ops.set_vfta(hw, vtag, 0, true, false);
   2229 	IXGBE_CORE_UNLOCK(adapter);
   2230 
   2231 	if (error != 0) {
   2232 		device_printf(adapter->dev, "failed to register vlan %hu\n",
   2233 		    vtag);
   2234 		error = EACCES;
   2235 	}
   2236 	return error;
   2237 } /* ixv_register_vlan */
   2238 
   2239 /************************************************************************
   2240  * ixv_unregister_vlan
   2241  *
   2242  *   Run via a vlan unconfig EVENT, remove our entry
   2243  *   in the soft vfta.
   2244  ************************************************************************/
   2245 static int
   2246 ixv_unregister_vlan(struct adapter *adapter, u16 vtag)
   2247 {
   2248 	struct ixgbe_hw *hw = &adapter->hw;
   2249 	u16		index, bit;
   2250 	int		error;
   2251 
   2252 	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
   2253 		return EINVAL;
   2254 
   2255 	IXGBE_CORE_LOCK(adapter);
   2256 	index = (vtag >> 5) & 0x7F;
   2257 	bit = vtag & 0x1F;
   2258 	adapter->shadow_vfta[index] &= ~((u32)1 << bit);
   2259 	error = hw->mac.ops.set_vfta(hw, vtag, 0, false, false);
   2260 	IXGBE_CORE_UNLOCK(adapter);
   2261 
   2262 	if (error != 0) {
   2263 		device_printf(adapter->dev, "failed to unregister vlan %hu\n",
   2264 		    vtag);
   2265 		error = EIO;
   2266 	}
   2267 	return error;
   2268 } /* ixv_unregister_vlan */
   2269 
   2270 /************************************************************************
   2271  * ixv_enable_intr
   2272  ************************************************************************/
   2273 static void
   2274 ixv_enable_intr(struct adapter *adapter)
   2275 {
   2276 	struct ixgbe_hw *hw = &adapter->hw;
   2277 	struct ix_queue *que = adapter->queues;
   2278 	u32		mask;
   2279 	int i;
   2280 
   2281 	/* For VTEIAC */
   2282 	mask = (1 << adapter->vector);
   2283 	for (i = 0; i < adapter->num_queues; i++, que++)
   2284 		mask |= (1 << que->msix);
   2285 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
   2286 
   2287 	/* For VTEIMS */
   2288 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
   2289 	que = adapter->queues;
   2290 	for (i = 0; i < adapter->num_queues; i++, que++)
   2291 		ixv_enable_queue(adapter, que->msix);
   2292 
   2293 	IXGBE_WRITE_FLUSH(hw);
   2294 } /* ixv_enable_intr */
   2295 
   2296 /************************************************************************
   2297  * ixv_disable_intr
   2298  ************************************************************************/
   2299 static void
   2300 ixv_disable_intr(struct adapter *adapter)
   2301 {
   2302 	struct ix_queue	*que = adapter->queues;
   2303 
   2304 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
   2305 
   2306 	/* disable interrupts other than queues */
   2307 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, adapter->vector);
   2308 
   2309 	for (int i = 0; i < adapter->num_queues; i++, que++)
   2310 		ixv_disable_queue(adapter, que->msix);
   2311 
   2312 	IXGBE_WRITE_FLUSH(&adapter->hw);
   2313 } /* ixv_disable_intr */
   2314 
   2315 /************************************************************************
   2316  * ixv_set_ivar
   2317  *
   2318  *   Setup the correct IVAR register for a particular MSI-X interrupt
   2319  *    - entry is the register array entry
   2320  *    - vector is the MSI-X vector for this queue
   2321  *    - type is RX/TX/MISC
   2322  ************************************************************************/
   2323 static void
   2324 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
   2325 {
   2326 	struct ixgbe_hw *hw = &adapter->hw;
   2327 	u32		ivar, index;
   2328 
   2329 	vector |= IXGBE_IVAR_ALLOC_VAL;
   2330 
   2331 	if (type == -1) { /* MISC IVAR */
   2332 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
   2333 		ivar &= ~0xFF;
   2334 		ivar |= vector;
   2335 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
   2336 	} else {	  /* RX/TX IVARS */
   2337 		index = (16 * (entry & 1)) + (8 * type);
   2338 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
   2339 		ivar &= ~(0xffUL << index);
   2340 		ivar |= ((u32)vector << index);
   2341 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
   2342 	}
   2343 } /* ixv_set_ivar */
   2344 
   2345 /************************************************************************
   2346  * ixv_configure_ivars
   2347  ************************************************************************/
   2348 static void
   2349 ixv_configure_ivars(struct adapter *adapter)
   2350 {
   2351 	struct ix_queue *que = adapter->queues;
   2352 
   2353 	/* XXX We should sync EITR value calculation with ixgbe.c? */
   2354 
   2355 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2356 		/* First the RX queue entry */
   2357 		ixv_set_ivar(adapter, i, que->msix, 0);
   2358 		/* ... and the TX */
   2359 		ixv_set_ivar(adapter, i, que->msix, 1);
   2360 		/* Set an initial value in EITR */
   2361 		ixv_eitr_write(adapter, que->msix, IXGBE_EITR_DEFAULT);
   2362 	}
   2363 
   2364 	/* For the mailbox interrupt */
   2365 	ixv_set_ivar(adapter, 1, adapter->vector, -1);
   2366 } /* ixv_configure_ivars */
   2367 
   2368 
   2369 /************************************************************************
   2370  * ixv_save_stats
   2371  *
   2372  *   The VF stats registers never have a truly virgin
   2373  *   starting point, so this routine tries to make an
   2374  *   artificial one, marking ground zero on attach as
   2375  *   it were.
   2376  ************************************************************************/
   2377 static void
   2378 ixv_save_stats(struct adapter *adapter)
   2379 {
   2380 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2381 
   2382 	if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
   2383 		stats->saved_reset_vfgprc +=
   2384 		    stats->vfgprc.ev_count - stats->base_vfgprc;
   2385 		stats->saved_reset_vfgptc +=
   2386 		    stats->vfgptc.ev_count - stats->base_vfgptc;
   2387 		stats->saved_reset_vfgorc +=
   2388 		    stats->vfgorc.ev_count - stats->base_vfgorc;
   2389 		stats->saved_reset_vfgotc +=
   2390 		    stats->vfgotc.ev_count - stats->base_vfgotc;
   2391 		stats->saved_reset_vfmprc +=
   2392 		    stats->vfmprc.ev_count - stats->base_vfmprc;
   2393 	}
   2394 } /* ixv_save_stats */
   2395 
   2396 /************************************************************************
   2397  * ixv_init_stats
   2398  ************************************************************************/
   2399 static void
   2400 ixv_init_stats(struct adapter *adapter)
   2401 {
   2402 	struct ixgbe_hw *hw = &adapter->hw;
   2403 
   2404 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
   2405 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
   2406 	adapter->stats.vf.last_vfgorc |=
   2407 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
   2408 
   2409 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
   2410 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
   2411 	adapter->stats.vf.last_vfgotc |=
   2412 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
   2413 
   2414 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
   2415 
   2416 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
   2417 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
   2418 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
   2419 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
   2420 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
   2421 } /* ixv_init_stats */
   2422 
   2423 #define UPDATE_STAT_32(reg, last, count)		\
   2424 {							\
   2425 	u32 current = IXGBE_READ_REG(hw, (reg));	\
   2426 	if (current < (last))				\
   2427 		count.ev_count += 0x100000000LL;	\
   2428 	(last) = current;				\
   2429 	count.ev_count &= 0xFFFFFFFF00000000LL;		\
   2430 	count.ev_count |= current;			\
   2431 }
   2432 
   2433 #define UPDATE_STAT_36(lsb, msb, last, count)		\
   2434 {							\
   2435 	u64 cur_lsb = IXGBE_READ_REG(hw, (lsb));	\
   2436 	u64 cur_msb = IXGBE_READ_REG(hw, (msb));	\
   2437 	u64 current = ((cur_msb << 32) | cur_lsb);	\
   2438 	if (current < (last))				\
   2439 		count.ev_count += 0x1000000000LL;	\
   2440 	(last) = current;				\
   2441 	count.ev_count &= 0xFFFFFFF000000000LL;		\
   2442 	count.ev_count |= current;			\
   2443 }
   2444 
   2445 /************************************************************************
   2446  * ixv_update_stats - Update the board statistics counters.
   2447  ************************************************************************/
   2448 void
   2449 ixv_update_stats(struct adapter *adapter)
   2450 {
   2451 	struct ixgbe_hw *hw = &adapter->hw;
   2452 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2453 
   2454 	UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
   2455 	UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
   2456 	UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
   2457 	    stats->vfgorc);
   2458 	UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
   2459 	    stats->vfgotc);
   2460 	UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
   2461 
   2462 	/* VF doesn't count errors by hardware */
   2463 
   2464 } /* ixv_update_stats */
   2465 
   2466 /************************************************************************
   2467  * ixv_sysctl_interrupt_rate_handler
   2468  ************************************************************************/
   2469 static int
   2470 ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
   2471 {
   2472 	struct sysctlnode node = *rnode;
   2473 	struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
   2474 	struct adapter	*adapter = que->adapter;
   2475 	uint32_t reg, usec, rate;
   2476 	int error;
   2477 
   2478 	if (que == NULL)
   2479 		return 0;
   2480 	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix));
   2481 	usec = ((reg & 0x0FF8) >> 3);
   2482 	if (usec > 0)
   2483 		rate = 500000 / usec;
   2484 	else
   2485 		rate = 0;
   2486 	node.sysctl_data = &rate;
   2487 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2488 	if (error || newp == NULL)
   2489 		return error;
   2490 	reg &= ~0xfff; /* default, no limitation */
   2491 	if (rate > 0 && rate < 500000) {
   2492 		if (rate < 1000)
   2493 			rate = 1000;
   2494 		reg |= ((4000000 / rate) & 0xff8);
   2495 		/*
   2496 		 * When RSC is used, ITR interval must be larger than
   2497 		 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
   2498 		 * The minimum value is always greater than 2us on 100M
   2499 		 * (and 10M?(not documented)), but it's not on 1G and higher.
   2500 		 */
   2501 		if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
   2502 		    && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
   2503 			if ((adapter->num_queues > 1)
   2504 			    && (reg < IXGBE_MIN_RSC_EITR_10G1G))
   2505 				return EINVAL;
   2506 		}
   2507 		ixv_max_interrupt_rate = rate;
   2508 	} else
   2509 		ixv_max_interrupt_rate = 0;
   2510 	ixv_eitr_write(adapter, que->msix, reg);
   2511 
   2512 	return (0);
   2513 } /* ixv_sysctl_interrupt_rate_handler */
   2514 
   2515 const struct sysctlnode *
   2516 ixv_sysctl_instance(struct adapter *adapter)
   2517 {
   2518 	const char *dvname;
   2519 	struct sysctllog **log;
   2520 	int rc;
   2521 	const struct sysctlnode *rnode;
   2522 
   2523 	log = &adapter->sysctllog;
   2524 	dvname = device_xname(adapter->dev);
   2525 
   2526 	if ((rc = sysctl_createv(log, 0, NULL, &rnode,
   2527 	    0, CTLTYPE_NODE, dvname,
   2528 	    SYSCTL_DESCR("ixv information and settings"),
   2529 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   2530 		goto err;
   2531 
   2532 	return rnode;
   2533 err:
   2534 	device_printf(adapter->dev,
   2535 	    "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
   2536 	return NULL;
   2537 }
   2538 
   2539 static void
   2540 ixv_add_device_sysctls(struct adapter *adapter)
   2541 {
   2542 	struct sysctllog **log;
   2543 	const struct sysctlnode *rnode, *cnode;
   2544 	device_t dev;
   2545 
   2546 	dev = adapter->dev;
   2547 	log = &adapter->sysctllog;
   2548 
   2549 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2550 		aprint_error_dev(dev, "could not create sysctl root\n");
   2551 		return;
   2552 	}
   2553 
   2554 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2555 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2556 	    "debug", SYSCTL_DESCR("Debug Info"),
   2557 	    ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
   2558 		aprint_error_dev(dev, "could not create sysctl\n");
   2559 
   2560 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2561 	    CTLFLAG_READWRITE, CTLTYPE_BOOL,
   2562 	    "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
   2563 	    NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
   2564 		aprint_error_dev(dev, "could not create sysctl\n");
   2565 
   2566 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2567 	    CTLFLAG_READWRITE, CTLTYPE_BOOL,
   2568 	    "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   2569 		NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
   2570 		aprint_error_dev(dev, "could not create sysctl\n");
   2571 }
   2572 
   2573 /************************************************************************
   2574  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
   2575  ************************************************************************/
   2576 static void
   2577 ixv_add_stats_sysctls(struct adapter *adapter)
   2578 {
   2579 	device_t		dev = adapter->dev;
   2580 	struct tx_ring		*txr = adapter->tx_rings;
   2581 	struct rx_ring		*rxr = adapter->rx_rings;
   2582 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2583 	struct ixgbe_hw *hw = &adapter->hw;
   2584 	const struct sysctlnode *rnode, *cnode;
   2585 	struct sysctllog **log = &adapter->sysctllog;
   2586 	const char *xname = device_xname(dev);
   2587 
   2588 	/* Driver Statistics */
   2589 	evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
   2590 	    NULL, xname, "Driver tx dma soft fail EFBIG");
   2591 	evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
   2592 	    NULL, xname, "m_defrag() failed");
   2593 	evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
   2594 	    NULL, xname, "Driver tx dma hard fail EFBIG");
   2595 	evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
   2596 	    NULL, xname, "Driver tx dma hard fail EINVAL");
   2597 	evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
   2598 	    NULL, xname, "Driver tx dma hard fail other");
   2599 	evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
   2600 	    NULL, xname, "Driver tx dma soft fail EAGAIN");
   2601 	evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
   2602 	    NULL, xname, "Driver tx dma soft fail ENOMEM");
   2603 	evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
   2604 	    NULL, xname, "Watchdog timeouts");
   2605 	evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
   2606 	    NULL, xname, "TSO errors");
   2607 	evcnt_attach_dynamic(&adapter->admin_irqev, EVCNT_TYPE_INTR,
   2608 	    NULL, xname, "Admin MSI-X IRQ Handled");
   2609 	evcnt_attach_dynamic(&adapter->link_workev, EVCNT_TYPE_INTR,
   2610 	    NULL, xname, "Admin event");
   2611 
   2612 	for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2613 		snprintf(adapter->queues[i].evnamebuf,
   2614 		    sizeof(adapter->queues[i].evnamebuf), "%s q%d",
   2615 		    xname, i);
   2616 		snprintf(adapter->queues[i].namebuf,
   2617 		    sizeof(adapter->queues[i].namebuf), "q%d", i);
   2618 
   2619 		if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2620 			aprint_error_dev(dev, "could not create sysctl root\n");
   2621 			break;
   2622 		}
   2623 
   2624 		if (sysctl_createv(log, 0, &rnode, &rnode,
   2625 		    0, CTLTYPE_NODE,
   2626 		    adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
   2627 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
   2628 			break;
   2629 
   2630 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2631 		    CTLFLAG_READWRITE, CTLTYPE_INT,
   2632 		    "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
   2633 		    ixv_sysctl_interrupt_rate_handler, 0,
   2634 		    (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
   2635 			break;
   2636 
   2637 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2638 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2639 		    "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
   2640 		    ixv_sysctl_tdh_handler, 0, (void *)txr,
   2641 		    0, CTL_CREATE, CTL_EOL) != 0)
   2642 			break;
   2643 
   2644 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2645 		    CTLFLAG_READONLY, CTLTYPE_INT,
   2646 		    "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
   2647 		    ixv_sysctl_tdt_handler, 0, (void *)txr,
   2648 		    0, CTL_CREATE, CTL_EOL) != 0)
   2649 			break;
   2650 
   2651 		evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
   2652 		    NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
   2653 		evcnt_attach_dynamic(&adapter->queues[i].handleq,
   2654 		    EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
   2655 		    "Handled queue in softint");
   2656 		evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
   2657 		    NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
   2658 		evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
   2659 		    NULL, adapter->queues[i].evnamebuf, "TSO");
   2660 		evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
   2661 		    NULL, adapter->queues[i].evnamebuf,
   2662 		    "Queue No Descriptor Available");
   2663 		evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
   2664 		    NULL, adapter->queues[i].evnamebuf,
   2665 		    "Queue Packets Transmitted");
   2666 #ifndef IXGBE_LEGACY_TX
   2667 		evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
   2668 		    NULL, adapter->queues[i].evnamebuf,
   2669 		    "Packets dropped in pcq");
   2670 #endif
   2671 
   2672 #ifdef LRO
   2673 		struct lro_ctrl *lro = &rxr->lro;
   2674 #endif /* LRO */
   2675 
   2676 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2677 		    CTLFLAG_READONLY,
   2678 		    CTLTYPE_INT,
   2679 		    "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
   2680 			ixv_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
   2681 		    CTL_CREATE, CTL_EOL) != 0)
   2682 			break;
   2683 
   2684 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2685 		    CTLFLAG_READONLY,
   2686 		    CTLTYPE_INT,
   2687 		    "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
   2688 		    ixv_sysctl_rdh_handler, 0, (void *)rxr, 0,
   2689 		    CTL_CREATE, CTL_EOL) != 0)
   2690 			break;
   2691 
   2692 		if (sysctl_createv(log, 0, &rnode, &cnode,
   2693 		    CTLFLAG_READONLY,
   2694 		    CTLTYPE_INT,
   2695 		    "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
   2696 		    ixv_sysctl_rdt_handler, 0, (void *)rxr, 0,
   2697 		    CTL_CREATE, CTL_EOL) != 0)
   2698 			break;
   2699 
   2700 		evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
   2701 		    NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
   2702 		evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
   2703 		    NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
   2704 		evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
   2705 		    NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
   2706 		evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
   2707 		    NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
   2708 		evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
   2709 		    NULL, adapter->queues[i].evnamebuf, "Rx discarded");
   2710 #ifdef LRO
   2711 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
   2712 				CTLFLAG_RD, &lro->lro_queued, 0,
   2713 				"LRO Queued");
   2714 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
   2715 				CTLFLAG_RD, &lro->lro_flushed, 0,
   2716 				"LRO Flushed");
   2717 #endif /* LRO */
   2718 	}
   2719 
   2720 	/* MAC stats get their own sub node */
   2721 
   2722 	snprintf(stats->namebuf,
   2723 	    sizeof(stats->namebuf), "%s MAC Statistics", xname);
   2724 
   2725 	evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
   2726 	    stats->namebuf, "rx csum offload - IP");
   2727 	evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
   2728 	    stats->namebuf, "rx csum offload - L4");
   2729 	evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
   2730 	    stats->namebuf, "rx csum offload - IP bad");
   2731 	evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
   2732 	    stats->namebuf, "rx csum offload - L4 bad");
   2733 
   2734 	/* Packet Reception Stats */
   2735 	evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
   2736 	    xname, "Good Packets Received");
   2737 	evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
   2738 	    xname, "Good Octets Received");
   2739 	evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
   2740 	    xname, "Multicast Packets Received");
   2741 	evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
   2742 	    xname, "Good Packets Transmitted");
   2743 	evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
   2744 	    xname, "Good Octets Transmitted");
   2745 
   2746 	/* Mailbox Stats */
   2747 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
   2748 	    xname, "message TXs");
   2749 	evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
   2750 	    xname, "message RXs");
   2751 	evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
   2752 	    xname, "ACKs");
   2753 	evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
   2754 	    xname, "REQs");
   2755 	evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
   2756 	    xname, "RSTs");
   2757 
   2758 } /* ixv_add_stats_sysctls */
   2759 
   2760 static void
   2761 ixv_clear_evcnt(struct adapter *adapter)
   2762 {
   2763 	struct tx_ring		*txr = adapter->tx_rings;
   2764 	struct rx_ring		*rxr = adapter->rx_rings;
   2765 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
   2766 	struct ixgbe_hw *hw = &adapter->hw;
   2767 	int i;
   2768 
   2769 	/* Driver Statistics */
   2770 	adapter->efbig_tx_dma_setup.ev_count = 0;
   2771 	adapter->mbuf_defrag_failed.ev_count = 0;
   2772 	adapter->efbig2_tx_dma_setup.ev_count = 0;
   2773 	adapter->einval_tx_dma_setup.ev_count = 0;
   2774 	adapter->other_tx_dma_setup.ev_count = 0;
   2775 	adapter->eagain_tx_dma_setup.ev_count = 0;
   2776 	adapter->enomem_tx_dma_setup.ev_count = 0;
   2777 	adapter->watchdog_events.ev_count = 0;
   2778 	adapter->tso_err.ev_count = 0;
   2779 	adapter->admin_irqev.ev_count = 0;
   2780 	adapter->link_workev.ev_count = 0;
   2781 
   2782 	for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
   2783 		adapter->queues[i].irqs.ev_count = 0;
   2784 		adapter->queues[i].handleq.ev_count = 0;
   2785 		adapter->queues[i].req.ev_count = 0;
   2786 		txr->tso_tx.ev_count = 0;
   2787 		txr->no_desc_avail.ev_count = 0;
   2788 		txr->total_packets.ev_count = 0;
   2789 #ifndef IXGBE_LEGACY_TX
   2790 		txr->pcq_drops.ev_count = 0;
   2791 #endif
   2792 		txr->q_efbig_tx_dma_setup = 0;
   2793 		txr->q_mbuf_defrag_failed = 0;
   2794 		txr->q_efbig2_tx_dma_setup = 0;
   2795 		txr->q_einval_tx_dma_setup = 0;
   2796 		txr->q_other_tx_dma_setup = 0;
   2797 		txr->q_eagain_tx_dma_setup = 0;
   2798 		txr->q_enomem_tx_dma_setup = 0;
   2799 		txr->q_tso_err = 0;
   2800 
   2801 		rxr->rx_packets.ev_count = 0;
   2802 		rxr->rx_bytes.ev_count = 0;
   2803 		rxr->rx_copies.ev_count = 0;
   2804 		rxr->no_jmbuf.ev_count = 0;
   2805 		rxr->rx_discarded.ev_count = 0;
   2806 	}
   2807 
   2808 	/* MAC stats get their own sub node */
   2809 
   2810 	stats->ipcs.ev_count = 0;
   2811 	stats->l4cs.ev_count = 0;
   2812 	stats->ipcs_bad.ev_count = 0;
   2813 	stats->l4cs_bad.ev_count = 0;
   2814 
   2815 	/* Packet Reception Stats */
   2816 	stats->vfgprc.ev_count = 0;
   2817 	stats->vfgorc.ev_count = 0;
   2818 	stats->vfmprc.ev_count = 0;
   2819 	stats->vfgptc.ev_count = 0;
   2820 	stats->vfgotc.ev_count = 0;
   2821 
   2822 	/* Mailbox Stats */
   2823 	hw->mbx.stats.msgs_tx.ev_count = 0;
   2824 	hw->mbx.stats.msgs_rx.ev_count = 0;
   2825 	hw->mbx.stats.acks.ev_count = 0;
   2826 	hw->mbx.stats.reqs.ev_count = 0;
   2827 	hw->mbx.stats.rsts.ev_count = 0;
   2828 
   2829 } /* ixv_clear_evcnt */
   2830 
   2831 /************************************************************************
   2832  * ixv_set_sysctl_value
   2833  ************************************************************************/
   2834 static void
   2835 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
   2836 	const char *description, int *limit, int value)
   2837 {
   2838 	device_t dev =	adapter->dev;
   2839 	struct sysctllog **log;
   2840 	const struct sysctlnode *rnode, *cnode;
   2841 
   2842 	log = &adapter->sysctllog;
   2843 	if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
   2844 		aprint_error_dev(dev, "could not create sysctl root\n");
   2845 		return;
   2846 	}
   2847 	if (sysctl_createv(log, 0, &rnode, &cnode,
   2848 	    CTLFLAG_READWRITE, CTLTYPE_INT,
   2849 	    name, SYSCTL_DESCR(description),
   2850 	    NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
   2851 		aprint_error_dev(dev, "could not create sysctl\n");
   2852 	*limit = value;
   2853 } /* ixv_set_sysctl_value */
   2854 
   2855 /************************************************************************
   2856  * ixv_print_debug_info
   2857  *
   2858  *   Called only when em_display_debug_stats is enabled.
   2859  *   Provides a way to take a look at important statistics
   2860  *   maintained by the driver and hardware.
   2861  ************************************************************************/
   2862 static void
   2863 ixv_print_debug_info(struct adapter *adapter)
   2864 {
   2865 	device_t	dev = adapter->dev;
   2866 	struct ix_queue *que = adapter->queues;
   2867 	struct rx_ring	*rxr;
   2868 	struct tx_ring	*txr;
   2869 #ifdef LRO
   2870 	struct lro_ctrl *lro;
   2871 #endif /* LRO */
   2872 
   2873 	for (int i = 0; i < adapter->num_queues; i++, que++) {
   2874 		txr = que->txr;
   2875 		rxr = que->rxr;
   2876 #ifdef LRO
   2877 		lro = &rxr->lro;
   2878 #endif /* LRO */
   2879 		device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
   2880 		    que->msix, (long)que->irqs.ev_count);
   2881 		device_printf(dev, "RX(%d) Packets Received: %lld\n",
   2882 		    rxr->me, (long long)rxr->rx_packets.ev_count);
   2883 		device_printf(dev, "RX(%d) Bytes Received: %lu\n",
   2884 		    rxr->me, (long)rxr->rx_bytes.ev_count);
   2885 #ifdef LRO
   2886 		device_printf(dev, "RX(%d) LRO Queued= %ju\n",
   2887 		    rxr->me, (uintmax_t)lro->lro_queued);
   2888 		device_printf(dev, "RX(%d) LRO Flushed= %ju\n",
   2889 		    rxr->me, (uintmax_t)lro->lro_flushed);
   2890 #endif /* LRO */
   2891 		device_printf(dev, "TX(%d) Packets Sent: %lu\n",
   2892 		    txr->me, (long)txr->total_packets.ev_count);
   2893 		device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
   2894 		    txr->me, (long)txr->no_desc_avail.ev_count);
   2895 	}
   2896 
   2897 	device_printf(dev, "Admin IRQ Handled: %lu\n",
   2898 	    (long)adapter->admin_irqev.ev_count);
   2899 	device_printf(dev, "Admin work Handled: %lu\n",
   2900 	    (long)adapter->link_workev.ev_count);
   2901 } /* ixv_print_debug_info */
   2902 
   2903 /************************************************************************
   2904  * ixv_sysctl_debug
   2905  ************************************************************************/
   2906 static int
   2907 ixv_sysctl_debug(SYSCTLFN_ARGS)
   2908 {
   2909 	struct sysctlnode node = *rnode;
   2910 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
   2911 	int	       error, result;
   2912 
   2913 	node.sysctl_data = &result;
   2914 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2915 
   2916 	if (error || newp == NULL)
   2917 		return error;
   2918 
   2919 	if (result == 1)
   2920 		ixv_print_debug_info(adapter);
   2921 
   2922 	return 0;
   2923 } /* ixv_sysctl_debug */
   2924 
   2925 /************************************************************************
   2926  * ixv_init_device_features
   2927  ************************************************************************/
   2928 static void
   2929 ixv_init_device_features(struct adapter *adapter)
   2930 {
   2931 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
   2932 			  | IXGBE_FEATURE_VF
   2933 			  | IXGBE_FEATURE_RSS
   2934 			  | IXGBE_FEATURE_LEGACY_TX;
   2935 
   2936 	/* A tad short on feature flags for VFs, atm. */
   2937 	switch (adapter->hw.mac.type) {
   2938 	case ixgbe_mac_82599_vf:
   2939 		break;
   2940 	case ixgbe_mac_X540_vf:
   2941 		break;
   2942 	case ixgbe_mac_X550_vf:
   2943 	case ixgbe_mac_X550EM_x_vf:
   2944 	case ixgbe_mac_X550EM_a_vf:
   2945 		adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
   2946 		break;
   2947 	default:
   2948 		break;
   2949 	}
   2950 
   2951 	/* Enabled by default... */
   2952 	/* Is a virtual function (VF) */
   2953 	if (adapter->feat_cap & IXGBE_FEATURE_VF)
   2954 		adapter->feat_en |= IXGBE_FEATURE_VF;
   2955 	/* Netmap */
   2956 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
   2957 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
   2958 	/* Receive-Side Scaling (RSS) */
   2959 	if (adapter->feat_cap & IXGBE_FEATURE_RSS)
   2960 		adapter->feat_en |= IXGBE_FEATURE_RSS;
   2961 	/* Needs advanced context descriptor regardless of offloads req'd */
   2962 	if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
   2963 		adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
   2964 
   2965 	/* Enabled via sysctl... */
   2966 	/* Legacy (single queue) transmit */
   2967 	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
   2968 	    ixv_enable_legacy_tx)
   2969 		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
   2970 } /* ixv_init_device_features */
   2971 
   2972 /************************************************************************
   2973  * ixv_shutdown - Shutdown entry point
   2974  ************************************************************************/
   2975 #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
   2976 static int
   2977 ixv_shutdown(device_t dev)
   2978 {
   2979 	struct adapter *adapter = device_private(dev);
   2980 	IXGBE_CORE_LOCK(adapter);
   2981 	ixv_stop(adapter);
   2982 	IXGBE_CORE_UNLOCK(adapter);
   2983 
   2984 	return (0);
   2985 } /* ixv_shutdown */
   2986 #endif
   2987 
   2988 static int
   2989 ixv_ifflags_cb(struct ethercom *ec)
   2990 {
   2991 	struct ifnet *ifp = &ec->ec_if;
   2992 	struct adapter *adapter = ifp->if_softc;
   2993 	u_short saved_flags;
   2994 	u_short change;
   2995 	int rv = 0;
   2996 
   2997 	IXGBE_CORE_LOCK(adapter);
   2998 
   2999 	saved_flags = adapter->if_flags;
   3000 	change = ifp->if_flags ^ adapter->if_flags;
   3001 	if (change != 0)
   3002 		adapter->if_flags = ifp->if_flags;
   3003 
   3004 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3005 		rv = ENETRESET;
   3006 		goto out;
   3007 	} else if ((change & IFF_PROMISC) != 0) {
   3008 		rv = ixv_set_rxfilter(adapter);
   3009 		if (rv != 0) {
   3010 			/* Restore previous */
   3011 			adapter->if_flags = saved_flags;
   3012 			goto out;
   3013 		}
   3014 	}
   3015 
   3016 	/* Check for ec_capenable. */
   3017 	change = ec->ec_capenable ^ adapter->ec_capenable;
   3018 	adapter->ec_capenable = ec->ec_capenable;
   3019 	if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
   3020 	    | ETHERCAP_VLAN_HWFILTER)) != 0) {
   3021 		rv = ENETRESET;
   3022 		goto out;
   3023 	}
   3024 
   3025 	/*
   3026 	 * Special handling is not required for ETHERCAP_VLAN_MTU.
   3027 	 * PF's MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
   3028 	 */
   3029 
   3030 	/* Set up VLAN support and filter */
   3031 	if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
   3032 		rv = ixv_setup_vlan_support(adapter);
   3033 
   3034 out:
   3035 	IXGBE_CORE_UNLOCK(adapter);
   3036 
   3037 	return rv;
   3038 }
   3039 
   3040 
   3041 /************************************************************************
   3042  * ixv_ioctl - Ioctl entry point
   3043  *
   3044  *   Called when the user wants to configure the interface.
   3045  *
   3046  *   return 0 on success, positive on failure
   3047  ************************************************************************/
   3048 static int
   3049 ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
   3050 {
   3051 	struct adapter	*adapter = ifp->if_softc;
   3052 	struct ixgbe_hw *hw = &adapter->hw;
   3053 	struct ifcapreq *ifcr = data;
   3054 	int		error;
   3055 	int l4csum_en;
   3056 	const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
   3057 	     IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3058 
   3059 	switch (command) {
   3060 	case SIOCSIFFLAGS:
   3061 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
   3062 		break;
   3063 	case SIOCADDMULTI: {
   3064 		struct ether_multi *enm;
   3065 		struct ether_multistep step;
   3066 		struct ethercom *ec = &adapter->osdep.ec;
   3067 		bool overflow = false;
   3068 		int mcnt = 0;
   3069 
   3070 		/*
   3071 		 * Check the number of multicast address. If it exceeds,
   3072 		 * return ENOSPC.
   3073 		 * Update this code when we support API 1.3.
   3074 		 */
   3075 		ETHER_LOCK(ec);
   3076 		ETHER_FIRST_MULTI(step, ec, enm);
   3077 		while (enm != NULL) {
   3078 			mcnt++;
   3079 
   3080 			/*
   3081 			 * This code is before adding, so one room is required
   3082 			 * at least.
   3083 			 */
   3084 			if (mcnt > (IXGBE_MAX_VF_MC - 1)) {
   3085 				overflow = true;
   3086 				break;
   3087 			}
   3088 			ETHER_NEXT_MULTI(step, enm);
   3089 		}
   3090 		ETHER_UNLOCK(ec);
   3091 		error = 0;
   3092 		if (overflow && ((ec->ec_flags & ETHER_F_ALLMULTI) == 0)) {
   3093 			error = hw->mac.ops.update_xcast_mode(hw,
   3094 			    IXGBEVF_XCAST_MODE_ALLMULTI);
   3095 			if (error == IXGBE_ERR_NOT_TRUSTED) {
   3096 				device_printf(adapter->dev,
   3097 				    "this interface is not trusted\n");
   3098 				error = EPERM;
   3099 			} else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
   3100 				device_printf(adapter->dev,
   3101 				    "the PF doesn't support allmulti mode\n");
   3102 				error = EOPNOTSUPP;
   3103 			} else if (error) {
   3104 				device_printf(adapter->dev,
   3105 				    "number of Ethernet multicast addresses "
   3106 				    "exceeds the limit (%d). error = %d\n",
   3107 				    IXGBE_MAX_VF_MC, error);
   3108 				error = ENOSPC;
   3109 			} else
   3110 				ec->ec_flags |= ETHER_F_ALLMULTI;
   3111 		}
   3112 		if (error)
   3113 			return error;
   3114 	}
   3115 		/*FALLTHROUGH*/
   3116 	case SIOCDELMULTI:
   3117 		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
   3118 		break;
   3119 	case SIOCSIFMEDIA:
   3120 	case SIOCGIFMEDIA:
   3121 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
   3122 		break;
   3123 	case SIOCSIFCAP:
   3124 		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
   3125 		break;
   3126 	case SIOCSIFMTU:
   3127 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
   3128 		break;
   3129 	case SIOCZIFDATA:
   3130 		IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
   3131 		ixv_update_stats(adapter);
   3132 		ixv_clear_evcnt(adapter);
   3133 		break;
   3134 	default:
   3135 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
   3136 		break;
   3137 	}
   3138 
   3139 	switch (command) {
   3140 	case SIOCSIFCAP:
   3141 		/* Layer-4 Rx checksum offload has to be turned on and
   3142 		 * off as a unit.
   3143 		 */
   3144 		l4csum_en = ifcr->ifcr_capenable & l4csum;
   3145 		if (l4csum_en != l4csum && l4csum_en != 0)
   3146 			return EINVAL;
   3147 		/*FALLTHROUGH*/
   3148 	case SIOCADDMULTI:
   3149 	case SIOCDELMULTI:
   3150 	case SIOCSIFFLAGS:
   3151 	case SIOCSIFMTU:
   3152 	default:
   3153 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
   3154 			return error;
   3155 		if ((ifp->if_flags & IFF_RUNNING) == 0)
   3156 			;
   3157 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
   3158 			IXGBE_CORE_LOCK(adapter);
   3159 			ixv_init_locked(adapter);
   3160 			IXGBE_CORE_UNLOCK(adapter);
   3161 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
   3162 			/*
   3163 			 * Multicast list has changed; set the hardware filter
   3164 			 * accordingly.
   3165 			 */
   3166 			IXGBE_CORE_LOCK(adapter);
   3167 			ixv_disable_intr(adapter);
   3168 			ixv_set_rxfilter(adapter);
   3169 			ixv_enable_intr(adapter);
   3170 			IXGBE_CORE_UNLOCK(adapter);
   3171 		}
   3172 		return 0;
   3173 	}
   3174 } /* ixv_ioctl */
   3175 
   3176 /************************************************************************
   3177  * ixv_init
   3178  ************************************************************************/
   3179 static int
   3180 ixv_init(struct ifnet *ifp)
   3181 {
   3182 	struct adapter *adapter = ifp->if_softc;
   3183 
   3184 	IXGBE_CORE_LOCK(adapter);
   3185 	ixv_init_locked(adapter);
   3186 	IXGBE_CORE_UNLOCK(adapter);
   3187 
   3188 	return 0;
   3189 } /* ixv_init */
   3190 
   3191 /************************************************************************
   3192  * ixv_handle_que
   3193  ************************************************************************/
   3194 static void
   3195 ixv_handle_que(void *context)
   3196 {
   3197 	struct ix_queue *que = context;
   3198 	struct adapter	*adapter = que->adapter;
   3199 	struct tx_ring	*txr = que->txr;
   3200 	struct ifnet	*ifp = adapter->ifp;
   3201 	bool		more;
   3202 
   3203 	que->handleq.ev_count++;
   3204 
   3205 	if (ifp->if_flags & IFF_RUNNING) {
   3206 		more = ixgbe_rxeof(que);
   3207 		IXGBE_TX_LOCK(txr);
   3208 		more |= ixgbe_txeof(txr);
   3209 		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
   3210 			if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
   3211 				ixgbe_mq_start_locked(ifp, txr);
   3212 		/* Only for queue 0 */
   3213 		/* NetBSD still needs this for CBQ */
   3214 		if ((&adapter->queues[0] == que)
   3215 		    && (!ixgbe_legacy_ring_empty(ifp, NULL)))
   3216 			ixgbe_legacy_start_locked(ifp, txr);
   3217 		IXGBE_TX_UNLOCK(txr);
   3218 		if (more) {
   3219 			que->req.ev_count++;
   3220 			if (adapter->txrx_use_workqueue) {
   3221 				/*
   3222 				 * "enqueued flag" is not required here
   3223 				 * the same as ixg(4). See ixgbe_msix_que().
   3224 				 */
   3225 				workqueue_enqueue(adapter->que_wq,
   3226 				    &que->wq_cookie, curcpu());
   3227 			} else
   3228 				  softint_schedule(que->que_si);
   3229 			return;
   3230 		}
   3231 	}
   3232 
   3233 	/* Re-enable this interrupt */
   3234 	ixv_enable_queue(adapter, que->msix);
   3235 
   3236 	return;
   3237 } /* ixv_handle_que */
   3238 
   3239 /************************************************************************
   3240  * ixv_handle_que_work
   3241  ************************************************************************/
   3242 static void
   3243 ixv_handle_que_work(struct work *wk, void *context)
   3244 {
   3245 	struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
   3246 
   3247 	/*
   3248 	 * "enqueued flag" is not required here the same as ixg(4).
   3249 	 * See ixgbe_msix_que().
   3250 	 */
   3251 	ixv_handle_que(que);
   3252 }
   3253 
   3254 /************************************************************************
   3255  * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
   3256  ************************************************************************/
   3257 static int
   3258 ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
   3259 {
   3260 	device_t	dev = adapter->dev;
   3261 	struct ix_queue *que = adapter->queues;
   3262 	struct		tx_ring *txr = adapter->tx_rings;
   3263 	int		error, msix_ctrl, rid, vector = 0;
   3264 	pci_chipset_tag_t pc;
   3265 	pcitag_t	tag;
   3266 	char		intrbuf[PCI_INTRSTR_LEN];
   3267 	char		wqname[MAXCOMLEN];
   3268 	char		intr_xname[32];
   3269 	const char	*intrstr = NULL;
   3270 	kcpuset_t	*affinity;
   3271 	int		cpu_id = 0;
   3272 
   3273 	pc = adapter->osdep.pc;
   3274 	tag = adapter->osdep.tag;
   3275 
   3276 	adapter->osdep.nintrs = adapter->num_queues + 1;
   3277 	if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
   3278 	    adapter->osdep.nintrs) != 0) {
   3279 		aprint_error_dev(dev,
   3280 		    "failed to allocate MSI-X interrupt\n");
   3281 		return (ENXIO);
   3282 	}
   3283 
   3284 	kcpuset_create(&affinity, false);
   3285 	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
   3286 		snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
   3287 		    device_xname(dev), i);
   3288 		intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
   3289 		    sizeof(intrbuf));
   3290 #ifdef IXGBE_MPSAFE
   3291 		pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
   3292 		    true);
   3293 #endif
   3294 		/* Set the handler function */
   3295 		que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
   3296 		    adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
   3297 		    intr_xname);
   3298 		if (que->res == NULL) {
   3299 			pci_intr_release(pc, adapter->osdep.intrs,
   3300 			    adapter->osdep.nintrs);
   3301 			aprint_error_dev(dev,
   3302 			    "Failed to register QUE handler\n");
   3303 			kcpuset_destroy(affinity);
   3304 			return (ENXIO);
   3305 		}
   3306 		que->msix = vector;
   3307 		adapter->active_queues |= (u64)(1 << que->msix);
   3308 
   3309 		cpu_id = i;
   3310 		/* Round-robin affinity */
   3311 		kcpuset_zero(affinity);
   3312 		kcpuset_set(affinity, cpu_id % ncpu);
   3313 		error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
   3314 		    NULL);
   3315 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
   3316 		    intrstr);
   3317 		if (error == 0)
   3318 			aprint_normal(", bound queue %d to cpu %d\n",
   3319 			    i, cpu_id % ncpu);
   3320 		else
   3321 			aprint_normal("\n");
   3322 
   3323 #ifndef IXGBE_LEGACY_TX
   3324 		txr->txr_si
   3325 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
   3326 			ixgbe_deferred_mq_start, txr);
   3327 #endif
   3328 		que->que_si
   3329 		    = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
   3330 			ixv_handle_que, que);
   3331 		if (que->que_si == NULL) {
   3332 			aprint_error_dev(dev,
   3333 			    "could not establish software interrupt\n");
   3334 		}
   3335 	}
   3336 	snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
   3337 	error = workqueue_create(&adapter->txr_wq, wqname,
   3338 	    ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   3339 	    IXGBE_WORKQUEUE_FLAGS);
   3340 	if (error) {
   3341 		aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
   3342 	}
   3343 	adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
   3344 
   3345 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
   3346 	error = workqueue_create(&adapter->que_wq, wqname,
   3347 	    ixv_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   3348 	    IXGBE_WORKQUEUE_FLAGS);
   3349 	if (error) {
   3350 		aprint_error_dev(dev,
   3351 		    "couldn't create workqueue\n");
   3352 	}
   3353 
   3354 	/* and Mailbox */
   3355 	cpu_id++;
   3356 	snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
   3357 	adapter->vector = vector;
   3358 	intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
   3359 	    sizeof(intrbuf));
   3360 #ifdef IXGBE_MPSAFE
   3361 	pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
   3362 	    true);
   3363 #endif
   3364 	/* Set the mbx handler function */
   3365 	adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
   3366 	    adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
   3367 	    intr_xname);
   3368 	if (adapter->osdep.ihs[vector] == NULL) {
   3369 		aprint_error_dev(dev, "Failed to register LINK handler\n");
   3370 		kcpuset_destroy(affinity);
   3371 		return (ENXIO);
   3372 	}
   3373 	/* Round-robin affinity */
   3374 	kcpuset_zero(affinity);
   3375 	kcpuset_set(affinity, cpu_id % ncpu);
   3376 	error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
   3377 	    NULL);
   3378 
   3379 	aprint_normal_dev(dev,
   3380 	    "for link, interrupting at %s", intrstr);
   3381 	if (error == 0)
   3382 		aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
   3383 	else
   3384 		aprint_normal("\n");
   3385 
   3386 	/* Tasklets for Mailbox */
   3387 	snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
   3388 	error = workqueue_create(&adapter->admin_wq, wqname,
   3389 	    ixv_handle_admin, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
   3390 	    IXGBE_TASKLET_WQ_FLAGS);
   3391 	if (error) {
   3392 		aprint_error_dev(dev,
   3393 		    "could not create admin workqueue (%d)\n", error);
   3394 		goto err_out;
   3395 	}
   3396 
   3397 	/*
   3398 	 * Due to a broken design QEMU will fail to properly
   3399 	 * enable the guest for MSI-X unless the vectors in
   3400 	 * the table are all set up, so we must rewrite the
   3401 	 * ENABLE in the MSI-X control register again at this
   3402 	 * point to cause it to successfully initialize us.
   3403 	 */
   3404 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
   3405 		pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
   3406 		rid += PCI_MSIX_CTL;
   3407 		msix_ctrl = pci_conf_read(pc, tag, rid);
   3408 		msix_ctrl |= PCI_MSIX_CTL_ENABLE;
   3409 		pci_conf_write(pc, tag, rid, msix_ctrl);
   3410 	}
   3411 
   3412 	kcpuset_destroy(affinity);
   3413 	return (0);
   3414 err_out:
   3415 	kcpuset_destroy(affinity);
   3416 	ixv_free_workqueue(adapter);
   3417 	ixv_free_pci_resources(adapter);
   3418 	return (error);
   3419 } /* ixv_allocate_msix */
   3420 
   3421 /************************************************************************
   3422  * ixv_configure_interrupts - Setup MSI-X resources
   3423  *
   3424  *   Note: The VF device MUST use MSI-X, there is no fallback.
   3425  ************************************************************************/
   3426 static int
   3427 ixv_configure_interrupts(struct adapter *adapter)
   3428 {
   3429 	device_t dev = adapter->dev;
   3430 	int want, queues, msgs;
   3431 
   3432 	/* Must have at least 2 MSI-X vectors */
   3433 	msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
   3434 	if (msgs < 2) {
   3435 		aprint_error_dev(dev, "MSIX config error\n");
   3436 		return (ENXIO);
   3437 	}
   3438 	msgs = MIN(msgs, IXG_MAX_NINTR);
   3439 
   3440 	/* Figure out a reasonable auto config value */
   3441 	queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
   3442 
   3443 	if (ixv_num_queues != 0)
   3444 		queues = ixv_num_queues;
   3445 	else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
   3446 		queues = IXGBE_VF_MAX_TX_QUEUES;
   3447 
   3448 	/*
   3449 	 * Want vectors for the queues,
   3450 	 * plus an additional for mailbox.
   3451 	 */
   3452 	want = queues + 1;
   3453 	if (msgs >= want)
   3454 		msgs = want;
   3455 	else {
   3456 		aprint_error_dev(dev,
   3457 		    "MSI-X Configuration Problem, "
   3458 		    "%d vectors but %d queues wanted!\n",
   3459 		    msgs, want);
   3460 		return -1;
   3461 	}
   3462 
   3463 	adapter->msix_mem = (void *)1; /* XXX */
   3464 	aprint_normal_dev(dev,
   3465 	    "Using MSI-X interrupts with %d vectors\n", msgs);
   3466 	adapter->num_queues = queues;
   3467 
   3468 	return (0);
   3469 } /* ixv_configure_interrupts */
   3470 
   3471 
   3472 /************************************************************************
   3473  * ixv_handle_admin - Tasklet handler for MSI-X MBX interrupts
   3474  *
   3475  *   Done outside of interrupt context since the driver might sleep
   3476  ************************************************************************/
   3477 static void
   3478 ixv_handle_admin(struct work *wk, void *context)
   3479 {
   3480 	struct adapter *adapter = context;
   3481 	struct ixgbe_hw	*hw = &adapter->hw;
   3482 
   3483 	IXGBE_CORE_LOCK(adapter);
   3484 
   3485 	++adapter->link_workev.ev_count;
   3486 	adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
   3487 	    &adapter->link_up, FALSE);
   3488 	ixv_update_link_status(adapter);
   3489 
   3490 	adapter->task_requests = 0;
   3491 	atomic_store_relaxed(&adapter->admin_pending, 0);
   3492 
   3493 	/* Re-enable interrupts */
   3494 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
   3495 
   3496 	IXGBE_CORE_UNLOCK(adapter);
   3497 } /* ixv_handle_admin */
   3498 
   3499 /************************************************************************
   3500  * ixv_check_link - Used in the local timer to poll for link changes
   3501  ************************************************************************/
   3502 static s32
   3503 ixv_check_link(struct adapter *adapter)
   3504 {
   3505 	s32 error;
   3506 
   3507 	KASSERT(mutex_owned(&adapter->core_mtx));
   3508 
   3509 	adapter->hw.mac.get_link_status = TRUE;
   3510 
   3511 	error = adapter->hw.mac.ops.check_link(&adapter->hw,
   3512 	    &adapter->link_speed, &adapter->link_up, FALSE);
   3513 	ixv_update_link_status(adapter);
   3514 
   3515 	return error;
   3516 } /* ixv_check_link */
   3517